diff --git a/CMakeLists.txt b/CMakeLists.txt index b3b5438a7..d62c8f99f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,6 +3,7 @@ cmake_minimum_required(VERSION 3.21) project(Ollama C CXX) include(CheckLanguage) +include(GNUInstallDirs) find_package(Threads REQUIRED) @@ -51,7 +52,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/include include_directories(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-cpu) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-cpu/amx) -add_compile_definitions(NDEBUG) +add_compile_definitions(NDEBUG GGML_VERSION=0x0 GGML_COMMIT=0x0) set(GGML_CPU ON) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src) diff --git a/Makefile.sync b/Makefile.sync index 711667c98..0bfb70e07 100644 --- a/Makefile.sync +++ b/Makefile.sync @@ -1,6 +1,6 @@ -UPSTREAM=https://github.com/ggerganov/llama.cpp.git +UPSTREAM=https://github.com/ggml-org/llama.cpp.git WORKDIR=llama/vendor -FETCH_HEAD=de4c07f93783a1a96456a44dc16b9db538ee1618 +FETCH_HEAD=e54d41befcc1575f4c898c5ff4ef43970cead75f .PHONY: help help: @@ -12,7 +12,7 @@ help: @echo " clean Clean local repository" @echo @echo "Example:" - @echo " make -f $(lastword $(MAKEFILE_LIST)) clean sync" + @echo " make -f $(lastword $(MAKEFILE_LIST)) clean apply-patches sync" .PHONY: sync sync: llama/build-info.cpp ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.metal @@ -24,12 +24,12 @@ ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.metal: ml/backend/ggml/ggml go generate ./$(@D) .PHONY: llama/llama.cpp -llama/llama.cpp: llama/vendor/ - rsync -arvzc -f "merge $@/.rsync-filter" $< $@ +llama/llama.cpp: llama/vendor + rsync -arvzc --delete -f "include LICENSE" -f "merge $@/.rsync-filter" $(addprefix $<,/LICENSE /) $@ .PHONY: ml/backend/ggml/ggml -ml/backend/ggml/ggml: llama/vendor/ggml/ - rsync -arvzc -f "merge $@/.rsync-filter" $< $@ +ml/backend/ggml/ggml: llama/vendor + rsync -arvzc --delete -f "include LICENSE" -f "merge $@/.rsync-filter" $(addprefix $<,/LICENSE /ggml/) $@ PATCHES=$(wildcard llama/patches/*.patch) PATCHED=$(join $(dir $(PATCHES)), $(addsuffix ed, $(addprefix ., $(notdir $(PATCHES))))) @@ -39,7 +39,15 @@ PATCHED=$(join $(dir $(PATCHES)), $(addsuffix ed, $(addprefix ., $(notdir $(PATC apply-patches: $(PATCHED) llama/patches/.%.patched: llama/patches/%.patch - @if git -c user.name=nobody -c 'user.email=<>' -C $(WORKDIR) am -3 $(realpath $<); then touch $@; else git -C $(WORKDIR) am --abort; exit 1; fi + @if git -c user.name=nobody -c 'user.email=<>' -C $(WORKDIR) am -3 $(realpath $<); then \ + touch $@; \ + else \ + echo "Patch failed. Resolve any conflicts then continue."; \ + echo "1. Run 'git -C $(WORKDIR) am --continue'"; \ + echo "2. Run 'make -f $(lastword $(MAKEFILE_LIST)) format-patches'"; \ + echo "3. Run 'make -f $(lastword $(MAKEFILE_LIST)) clean apply-patches'"; \ + exit 1; \ + fi .PHONY: checkout checkout: $(WORKDIR) @@ -60,4 +68,5 @@ format-patches: llama/patches .PHONE: clean clean: checkout + @git -C $(WORKDIR) am --abort || true $(RM) llama/patches/.*.patched diff --git a/convert/reader.go b/convert/reader.go index 367e91a29..907d2a9ef 100644 --- a/convert/reader.go +++ b/convert/reader.go @@ -39,6 +39,7 @@ const ( func (t tensorBase) Kind() uint32 { if strings.HasSuffix(t.name, ".ffn_gate_inp.weight") || + strings.HasSuffix(t.name, ".bias") || t.name == "token_types.weight" || t.name == "v.positional_embedding_vlm" || t.name == "v.tile_position_embd.weight" || diff --git a/fs/ggml/ggml.go b/fs/ggml/ggml.go index 45d58bc47..008d1e0fc 100644 --- a/fs/ggml/ggml.go +++ b/fs/ggml/ggml.go @@ -180,7 +180,7 @@ func (kv KV) OllamaEngineRequired() bool { "llama4", "mllama", "qwen25vl", - "gptoss", + "gptoss", "gpt-oss", }, kv.Architecture()) } @@ -328,7 +328,7 @@ func (t TensorType) TypeSize() uint64 { return 2 + blockSize/2 case TensorTypeQ4_1: return 2 + 2 + blockSize/2 - case TensorTypeMXFP4: + case TensorTypeMXFP4, 39: return 1 + blockSize/2 case TensorTypeQ5_0: return 2 + 4 + blockSize/2 @@ -665,7 +665,7 @@ func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType stri 4*qkvBias.Shape[0], ) } - case "gptoss": + case "gptoss", "gpt-oss": kv = make([]uint64, f.KV().BlockCount()) for i := range kv { kv[i] = uint64(float64((embeddingHeadsK+embeddingHeadsV)*headsKV) * bytesPerElement) @@ -675,8 +675,7 @@ func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType stri kv[i] *= context } } - fullOffload = 4 * f.KV().HeadCountMax() / cmp.Or(f.KV().HeadCountKVMin(), 1) * kvTotal / 6 - partialOffload = fullOffload + partialOffload = 2 * f.KV().HeadCountMax() / cmp.Or(f.KV().HeadCountKVMin(), 1) * kvTotal / 6 } return @@ -761,10 +760,6 @@ func (f GGML) SupportsFlashAttention() bool { return false } - if f.KV().Architecture() == "gptoss" { - return false - } - // Check head counts match and are non-zero headCountK := f.KV().EmbeddingHeadCountK() headCountV := f.KV().EmbeddingHeadCountV() diff --git a/llama/build-info.cpp b/llama/build-info.cpp index afef6b85c..18c449611 100644 --- a/llama/build-info.cpp +++ b/llama/build-info.cpp @@ -1,4 +1,4 @@ int LLAMA_BUILD_NUMBER = 0; -char const *LLAMA_COMMIT = "de4c07f93783a1a96456a44dc16b9db538ee1618"; +char const *LLAMA_COMMIT = "e54d41befcc1575f4c898c5ff4ef43970cead75f"; char const *LLAMA_COMPILER = ""; char const *LLAMA_BUILD_TARGET = ""; diff --git a/llama/llama.cpp/.rsync-filter b/llama/llama.cpp/.rsync-filter index 1f81b0075..650d9463b 100644 --- a/llama/llama.cpp/.rsync-filter +++ b/llama/llama.cpp/.rsync-filter @@ -1,23 +1,32 @@ -protect **/*.go -include common/ -include common/base64.* -include common/common.* -include common/json-schema-to-grammar.* -include common/json.* -include common/log.* -include common/sampling.* -include common/stb_image.* -include include/ -include include/llama.* -include include/llama-*.* -include tools/ -include tools/mtmd/ -include tools/mtmd/clip.* -include tools/mtmd/clip-impl.* -include tools/mtmd/llava.* -include src/ -include src/llama.* -include src/llama-*.* -include src/unicode-data.* -include src/unicode.* -exclude * +protect .rsync-filter +protect *.go +include /common/ +include /common/base64.* +include /common/common.* +include /common/json-schema-to-grammar.* +include /common/json.* +include /common/log.* +include /common/sampling.* +include /include/ +include /include/llama.* +include /include/llama-*.* +include /tools/ +include /tools/mtmd/ +include /tools/mtmd/*.h +include /tools/mtmd/clip.cpp +include /tools/mtmd/mtmd.cpp +include /tools/mtmd/mtmd-audio.cpp +include /tools/mtmd/mtmd-helper.cpp +include /src/ +include /src/llama.* +include /src/llama-*.* +include /src/unicode-data.* +include /src/unicode.* +include /vendor/ +include /vendor/miniaudio/ +include /vendor/miniaudio/*.h +include /vendor/nlohmann/ +include /vendor/nlohmann/*.hpp +include /vendor/stb/ +include /vendor/stb/*.h +hide * diff --git a/llama/llama.cpp/common/common.cpp b/llama/llama.cpp/common/common.cpp index 2b1d8da59..c6962d1d1 100644 --- a/llama/llama.cpp/common/common.cpp +++ b/llama/llama.cpp/common/common.cpp @@ -203,6 +203,7 @@ bool set_process_priority(enum ggml_sched_priority prio) { DWORD p = NORMAL_PRIORITY_CLASS; switch (prio) { + case GGML_SCHED_PRIO_LOW: p = BELOW_NORMAL_PRIORITY_CLASS; break; case GGML_SCHED_PRIO_NORMAL: p = NORMAL_PRIORITY_CLASS; break; case GGML_SCHED_PRIO_MEDIUM: p = ABOVE_NORMAL_PRIORITY_CLASS; break; case GGML_SCHED_PRIO_HIGH: p = HIGH_PRIORITY_CLASS; break; @@ -228,6 +229,7 @@ bool set_process_priority(enum ggml_sched_priority prio) { int p = 0; switch (prio) { + case GGML_SCHED_PRIO_LOW: p = 5; break; case GGML_SCHED_PRIO_NORMAL: p = 0; break; case GGML_SCHED_PRIO_MEDIUM: p = -5; break; case GGML_SCHED_PRIO_HIGH: p = -10; break; @@ -443,9 +445,37 @@ void string_replace_all(std::string & s, const std::string & search, const std:: s = std::move(builder); } +bool string_ends_with(const std::string_view & str, const std::string_view & suffix) { + return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0; +} + +bool string_remove_suffix(std::string & str, const std::string_view & suffix) { + bool has_suffix = string_ends_with(str, suffix); + if (has_suffix) { + str = str.substr(0, str.size() - suffix.size()); + } + return has_suffix; +} + +size_t string_find_partial_stop(const std::string_view & str, const std::string_view & stop) { + if (!str.empty() && !stop.empty()) { + const char text_last_char = str.back(); + for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) { + if (stop[char_index] == text_last_char) { + const auto current_partial = stop.substr(0, char_index + 1); + if (string_ends_with(str, current_partial)) { + return str.size() - char_index - 1; + } + } + } + } + + return std::string::npos; +} + std::string regex_escape(const std::string & s) { static const std::regex special_chars("[.^$|()*+?\\[\\]{}\\\\]"); - return std::regex_replace(s, special_chars, "\\$0"); + return std::regex_replace(s, special_chars, "\\$&"); } std::string string_join(const std::vector & values, const std::string & separator) { @@ -685,11 +715,17 @@ bool fs_validate_filename(const std::string & filename) { // disable C++17 deprecation warning for std::codecvt_utf8 # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wdeprecated-declarations" +#elif defined(__GNUC__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wdeprecated-declarations" #endif + std::wstring_convert, char32_t> converter; #if defined(__clang__) # pragma clang diagnostic pop +#elif defined(__GNUC__) +# pragma GCC diagnostic pop #endif filename_utf32 = converter.from_bytes(filename); @@ -746,6 +782,9 @@ bool fs_validate_filename(const std::string & filename) { return true; } +#include + + // returns true if successful, false otherwise bool fs_create_directory_with_parents(const std::string & path) { #ifdef _WIN32 @@ -763,9 +802,16 @@ bool fs_create_directory_with_parents(const std::string & path) { // process path from front to back, procedurally creating directories while ((pos_slash = path.find('\\', pos_slash)) != std::string::npos) { const std::wstring subpath = wpath.substr(0, pos_slash); - const wchar_t * test = subpath.c_str(); - const bool success = CreateDirectoryW(test, NULL); + pos_slash += 1; + + // skip the drive letter, in some systems it can return an access denied error + if (subpath.length() == 2 && subpath[1] == ':') { + continue; + } + + const bool success = CreateDirectoryW(subpath.c_str(), NULL); + if (!success) { const DWORD error = GetLastError(); @@ -779,8 +825,6 @@ bool fs_create_directory_with_parents(const std::string & path) { return false; } } - - pos_slash += 1; } return true; @@ -830,7 +874,7 @@ std::string fs_get_cache_directory() { if (getenv("LLAMA_CACHE")) { cache_directory = std::getenv("LLAMA_CACHE"); } else { -#if defined(__linux__) || defined(__FreeBSD__) || defined(_AIX) +#if defined(__linux__) || defined(__FreeBSD__) || defined(_AIX) || defined(__OpenBSD__) if (std::getenv("XDG_CACHE_HOME")) { cache_directory = std::getenv("XDG_CACHE_HOME"); } else { @@ -876,31 +920,6 @@ struct common_init_result common_init_from_params(common_params & params) { const llama_vocab * vocab = llama_model_get_vocab(model); - if (params.reranking) { - bool ok = true; - - if (llama_vocab_bos(vocab) == LLAMA_TOKEN_NULL) { - LOG_WRN("%s: warning: vocab does not have a BOS token, reranking will not work\n", __func__); - ok = false; - } - - if (llama_vocab_eos(vocab) == LLAMA_TOKEN_NULL) { - LOG_WRN("%s: warning: vocab does not have an EOS token, reranking will not work\n", __func__); - ok = false; - } - - if (llama_vocab_sep(vocab) == LLAMA_TOKEN_NULL) { - LOG_WRN("%s: warning: vocab does not have a SEP token, reranking will not work\n", __func__); - ok = false; - } - - if (!ok) { - llama_model_free(model); - - return iparams; - } - } - auto cparams = common_context_params_to_llama(params); llama_context * lctx = llama_init_from_model(model, cparams); @@ -910,7 +929,7 @@ struct common_init_result common_init_from_params(common_params & params) { return iparams; } - if (params.ctx_shift && !llama_kv_self_can_shift(lctx)) { + if (params.ctx_shift && !llama_memory_can_shift(llama_get_memory(lctx))) { LOG_WRN("%s: KV cache shifting is not supported for this context, disabling KV cache shifting\n", __func__); params.ctx_shift = false; } @@ -942,6 +961,35 @@ struct common_init_result common_init_from_params(common_params & params) { } } + if (llama_pooling_type(lctx) == LLAMA_POOLING_TYPE_RANK) { + bool ok = true; + + if (llama_vocab_bos(vocab) == LLAMA_TOKEN_NULL) { + LOG_WRN("%s: warning: vocab does not have a BOS token, reranking will not work\n", __func__); + ok = false; + } + + bool has_eos = llama_vocab_eos(vocab) != LLAMA_TOKEN_NULL; + bool has_sep = llama_vocab_sep(vocab) != LLAMA_TOKEN_NULL; + + if (!has_eos && !has_sep) { + LOG_WRN("%s: warning: vocab does not have an EOS token or SEP token, reranking will not work\n", __func__); + ok = false; + } else if (!has_eos) { + LOG_WRN("%s: warning: vocab does not have an EOS token, using SEP token as fallback\n", __func__); + } else if (!has_sep) { + LOG_WRN("%s: warning: vocab does not have a SEP token, reranking will not work\n", __func__); + ok = false; + } + + if (!ok) { + llama_free(lctx); + llama_model_free(model); + + return iparams; + } + } + // load and optionally apply lora adapters for (auto & la : params.lora_adapters) { llama_adapter_lora_ptr lora; @@ -966,15 +1014,21 @@ struct common_init_result common_init_from_params(common_params & params) { params.sampling.ignore_eos = false; } - if (params.sampling.ignore_eos) { - for (llama_token i = 0; i < llama_vocab_n_tokens(vocab); i++) { - if (llama_vocab_is_eog(vocab, i)) { - LOG_INF("%s: added %s logit bias = %f\n", __func__, common_token_to_piece(lctx, i).c_str(), -INFINITY); - params.sampling.logit_bias.push_back({i, -INFINITY}); - } + // initialize once + for (llama_token i = 0; i < llama_vocab_n_tokens(vocab); i++) { + if (llama_vocab_is_eog(vocab, i)) { + LOG_INF("%s: added %s logit bias = %f\n", __func__, common_token_to_piece(lctx, i).c_str(), -INFINITY); + params.sampling.logit_bias_eog.push_back({i, -INFINITY}); } } + if (params.sampling.ignore_eos) { + // add EOG biases to the active set of logit biases + params.sampling.logit_bias.insert( + params.sampling.logit_bias.end(), + params.sampling.logit_bias_eog.begin(), params.sampling.logit_bias_eog.end()); + } + if (params.sampling.penalty_last_n == -1) { LOG_INF("%s: setting penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx)); params.sampling.penalty_last_n = llama_n_ctx(lctx); @@ -1017,7 +1071,7 @@ struct common_init_result common_init_from_params(common_params & params) { if (llama_model_has_decoder(model)) { llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch))); } - llama_kv_self_clear(lctx); + llama_memory_clear(llama_get_memory(lctx), true); llama_synchronize(lctx); llama_perf_context_reset(lctx); llama_set_warmup(lctx, false); @@ -1068,6 +1122,7 @@ struct llama_model_params common_model_params_to_llama(common_params & params) { mparams.use_mmap = params.use_mmap; mparams.use_mlock = params.use_mlock; mparams.check_tensors = params.check_tensors; + mparams.use_extra_bufts = !params.no_extra_bufts; if (params.kv_overrides.empty()) { mparams.kv_overrides = NULL; @@ -1083,6 +1138,9 @@ struct llama_model_params common_model_params_to_llama(common_params & params) { mparams.tensor_buft_overrides = params.tensor_buft_overrides.data(); } + mparams.progress_callback = params.load_progress_callback; + mparams.progress_callback_user_data = params.load_progress_callback_user_data; + return mparams; } @@ -1114,11 +1172,8 @@ struct llama_context_params common_context_params_to_llama(const common_params & cparams.flash_attn = params.flash_attn; cparams.no_perf = params.no_perf; cparams.op_offload = !params.no_op_offload; - - if (params.reranking) { - cparams.embeddings = true; - cparams.pooling_type = LLAMA_POOLING_TYPE_RANK; - } + cparams.swa_full = params.swa_full; + cparams.kv_unified = params.kv_unified; cparams.type_k = params.cache_type_k; cparams.type_v = params.cache_type_v; @@ -1252,6 +1307,9 @@ std::vector common_tokenize( int n_tokens = text.length() + 2 * add_special; std::vector result(n_tokens); n_tokens = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); + if (n_tokens == std::numeric_limits::min()) { + throw std::runtime_error("Tokenization failed: input text too large, tokenization result exceeds int32_t limit"); + } if (n_tokens < 0) { result.resize(-n_tokens); int check = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); @@ -1306,81 +1364,6 @@ std::string common_detokenize(const struct llama_vocab * vocab, const std::vecto return text; } -// -// KV cache utils -// - -void common_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size) { - static const char slot_chars[] = ".123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+"; - - printf("=== Dumping KV cache. total cells %d, max sequences per cell %d, populated cells %d, total tokens in cache %d, largest empty slot=%d @ %d", - view.n_cells, view.n_seq_max, view.used_cells, view.token_count, view.max_contiguous, view.max_contiguous_idx); - - llama_kv_cache_view_cell * c_curr = view.cells; - llama_seq_id * cs_curr = view.cells_sequences; - - for (int i = 0; i < view.n_cells; i++, c_curr++, cs_curr += view.n_seq_max) { - if (i % row_size == 0) { - printf("\n%5d: ", i); - } - int seq_count = 0; - for (int j = 0; j < view.n_seq_max; j++) { - if (cs_curr[j] >= 0) { seq_count++; } - } - putchar(slot_chars[std::min(sizeof(slot_chars) - 2, size_t(seq_count))]); - } - - printf("\n=== Done dumping\n"); -} - -void common_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size) { - static const char slot_chars[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; - - printf("=== Dumping KV cache. total cells %d, max sequences per cell %d, populated cells %d, total tokens in cache %d, largest empty slot=%d @ %d\n", - view.n_cells, view.n_seq_max, view.used_cells, view.token_count, view.max_contiguous, view.max_contiguous_idx); - - std::unordered_map seqs; - llama_kv_cache_view_cell * c_curr = view.cells; - llama_seq_id * cs_curr = view.cells_sequences; - - for (int i = 0; i < view.n_cells; i++, c_curr++, cs_curr += view.n_seq_max) { - for (int j = 0; j < view.n_seq_max; j++) { - if (cs_curr[j] < 0) { continue; } - if (seqs.find(cs_curr[j]) == seqs.end()) { - if (seqs.size() + 1 >= sizeof(slot_chars)) { break; } - const size_t sz = seqs.size(); - seqs[cs_curr[j]] = sz; - } - } - if (seqs.size() + 1 >= sizeof(slot_chars)) { break; } - } - - printf("=== Sequence legend: "); - for (const auto & it : seqs) { - printf("%zu=%d, ", it.second, it.first); - } - printf("'+'=other sequence ids"); - - c_curr = view.cells; - cs_curr = view.cells_sequences; - for (int i = 0; i < view.n_cells; i++, c_curr++, cs_curr += view.n_seq_max) { - if (i % row_size == 0) { - printf("\n%5d: ", i); - } - for (int j = 0; j < view.n_seq_max; j++) { - if (cs_curr[j] >= 0) { - const auto & it = seqs.find(cs_curr[j]); - putchar(it != seqs.end() ? int(slot_chars[it->second]) : '+'); - } else { - putchar('.'); - } - } - putchar(' '); - } - - printf("\n=== Done dumping\n"); -} - // // Embedding utils // diff --git a/llama/llama.cpp/common/common.go b/llama/llama.cpp/common/common.go index ebbb738f2..e11bb1679 100644 --- a/llama/llama.cpp/common/common.go +++ b/llama/llama.cpp/common/common.go @@ -1,6 +1,6 @@ package common -// #cgo CXXFLAGS: -std=c++11 -// #cgo CPPFLAGS: -I${SRCDIR}/../include +// #cgo CXXFLAGS: -std=c++17 +// #cgo CPPFLAGS: -I${SRCDIR}/../include -I${SRCDIR}/../vendor // #cgo CPPFLAGS: -I${SRCDIR}/../../../ml/backend/ggml/ggml/include import "C" diff --git a/llama/llama.cpp/common/common.h b/llama/llama.cpp/common/common.h index dea34267c..5eab199af 100644 --- a/llama/llama.cpp/common/common.h +++ b/llama/llama.cpp/common/common.h @@ -6,7 +6,9 @@ #include #include +#include #include +#include #include #ifdef _WIN32 @@ -75,10 +77,11 @@ enum llama_example { LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA, - LLAMA_EXAMPLE_LLAVA, + LLAMA_EXAMPLE_MTMD, LLAMA_EXAMPLE_LOOKUP, LLAMA_EXAMPLE_PARALLEL, LLAMA_EXAMPLE_TTS, + LLAMA_EXAMPLE_DIFFUSION, LLAMA_EXAMPLE_COUNT, }; @@ -114,7 +117,7 @@ enum common_grammar_trigger_type { COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN, COMMON_GRAMMAR_TRIGGER_TYPE_WORD, COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN, - COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_START, + COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL, }; struct common_grammar_trigger { @@ -175,7 +178,8 @@ struct common_params_sampling { std::vector grammar_triggers; // optional triggers (for lazy grammars) std::set preserved_tokens; - std::vector logit_bias; // logit biases to apply + std::vector logit_bias; // logit biases to apply + std::vector logit_bias_eog; // pre-calculated logit biases for EOG tokens // print the parameters into a string std::string print() const; @@ -197,6 +201,10 @@ struct common_params_speculative { int32_t n_gpu_layers = -1; // number of layers to store in VRAM for the draft model (-1 - use default) float p_split = 0.1f; // speculative decoding split probability float p_min = 0.75f; // minimum speculative decoding probability (greedy) + std::vector> replacements; // main to speculative model replacements + + ggml_type cache_type_k = GGML_TYPE_F16; // KV cache data type for the K + ggml_type cache_type_v = GGML_TYPE_F16; // KV cache data type for the V struct cpu_params cpuparams; struct cpu_params cpuparams_batch; @@ -212,9 +220,26 @@ struct common_params_vocoder { bool use_guide_tokens = false; // enable guide tokens to improve TTS accuracy // NOLINT }; +struct common_params_diffusion { + int32_t steps = 128; + bool visual_mode = false; + + float eps = 0; // epsilon for timesteps + int32_t block_length = 0; // block length for generation + + int32_t algorithm = 4; // default algorithm: low-confidence + float alg_temp = 0.0f; // algorithm temperature + + float cfg_scale = 0; // classifier-free guidance scale + bool add_gumbel_noise = false; // add gumbel noise to the logits if temp > 0.0 +}; + enum common_reasoning_format { COMMON_REASONING_FORMAT_NONE, - COMMON_REASONING_FORMAT_DEEPSEEK, // Extract thinking tag contents and return as `message.reasoning_content` + COMMON_REASONING_FORMAT_AUTO, + COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY, // Extract thinking tag contents and return as `message.reasoning_content`, or leave inline in tags in stream mode + COMMON_REASONING_FORMAT_DEEPSEEK, // Extract thinking tag contents and return as `message.reasoning_content`, including in streaming deltas. + COMMON_REASONING_FORMAT_GRANITE, // Extract thinking tag contents and return as `message.reasoning_content`, including in streaming deltas. }; struct common_params { @@ -262,6 +287,7 @@ struct common_params { struct common_params_sampling sampling; struct common_params_speculative speculative; struct common_params_vocoder vocoder; + struct common_params_diffusion diffusion; struct common_params_model model; @@ -290,6 +316,7 @@ struct common_params { int32_t verbosity = 0; int32_t control_vector_layer_start = -1; // layer range for control vector int32_t control_vector_layer_end = -1; // layer range for control vector + bool offline = false; int32_t ppl_stride = 0; // stride for perplexity calculations. If left at 0, the pre-existing approach will be used. int32_t ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line @@ -322,17 +349,19 @@ struct common_params { bool flash_attn = false; // flash attention bool no_perf = false; // disable performance metrics bool ctx_shift = true; // context shift on inifinite text generation + bool swa_full = false; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055) + bool kv_unified = false; // enable unified KV cache bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix bool use_mmap = true; // use mmap for faster loads bool use_mlock = false; // use mlock to keep model in memory bool verbose_prompt = false; // print prompt tokens before generation bool display_prompt = true; // print prompt before generation - bool dump_kv_cache = false; // dump the KV cache contents for debugging purposes bool no_kv_offload = false; // disable KV offloading bool warmup = true; // warmup run bool check_tensors = false; // validate tensor data bool no_op_offload = false; // globally disable offload host tensor operations to device + bool no_extra_bufts = false; // disable extra buffer types (used for weight repacking) bool single_turn = false; // single turn chat conversation @@ -352,7 +381,7 @@ struct common_params { int32_t embd_normalize = 2; // normalisation for embeddings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm) std::string embd_out = ""; // empty = default, "array" = [[],[]...], "json" = openai style, "json+" = same "json" + cosine similarity matrix std::string embd_sep = "\n"; // separator of embeddings - bool reranking = false; // enable reranking support on server + std::string cls_sep = "\t"; // separator of classification sequences // server params int32_t port = 8080; // server listens on this network port @@ -363,16 +392,21 @@ struct common_params { std::string hostname = "127.0.0.1"; std::string public_path = ""; // NOLINT + std::string api_prefix = ""; // NOLINT std::string chat_template = ""; // NOLINT bool use_jinja = false; // NOLINT bool enable_chat_template = true; - common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_DEEPSEEK; + common_reasoning_format reasoning_format = COMMON_REASONING_FORMAT_AUTO; + int reasoning_budget = -1; + bool prefill_assistant = true; // if true, any trailing assistant message will be prefilled into the response std::vector api_keys; std::string ssl_file_key = ""; // NOLINT std::string ssl_file_cert = ""; // NOLINT + std::map default_template_kwargs; + // "advanced" endpoints are disabled by default for better security bool webui = true; bool endpoint_slots = false; @@ -407,10 +441,12 @@ struct common_params { int32_t n_out_freq = 10; // output the imatrix every n_out_freq iterations int32_t n_save_freq = 0; // save the imatrix every n_save_freq iterations int32_t i_chunk = 0; // start processing from this chunk + int8_t imat_dat = 0; // whether the legacy imatrix.dat format should be output (gguf <= 0 < dat) - bool process_output = false; // collect data for the output tensor - bool compute_ppl = true; // whether to compute perplexity - bool parse_special = false; // whether to parse special tokens during imatrix tokenization + bool process_output = false; // collect data for the output tensor + bool compute_ppl = true; // whether to compute perplexity + bool show_statistics = false; // show imatrix statistics per tensor + bool parse_special = false; // whether to parse special tokens during imatrix tokenization // cvector-generator params int n_pca_batch = 100; @@ -426,6 +462,11 @@ struct common_params { // common params std::string out_file; // output filename for all example programs + // optional callback for model loading progress and cancellation: + // called with a progress value between 0.0 and 1.0. + // return false from callback to abort model loading or true to continue + llama_progress_callback load_progress_callback = NULL; + void * load_progress_callback_user_data = NULL; }; // call once at the start of a program if it uses libcommon @@ -503,10 +544,10 @@ static bool string_starts_with(const std::string & str, return str.rfind(prefix, 0) == 0; } -static bool string_ends_with(const std::string & str, - const std::string & suffix) { // While we wait for C++20's std::string::ends_with... - return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0; -} +// While we wait for C++20's std::string::ends_with... +bool string_ends_with(const std::string_view & str, const std::string_view & suffix); +bool string_remove_suffix(std::string & str, const std::string_view & suffix); +size_t string_find_partial_stop(const std::string_view & str, const std::string_view & stop); bool string_parse_kv_override(const char * data, std::vector & overrides); void string_process_escapes(std::string & input); @@ -615,16 +656,6 @@ std::string common_detokenize( const std::vector & tokens, bool special = true); -// -// KV cache utils -// - -// Dump the KV cache view with the number of sequences per cell. -void common_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size = 80); - -// Dump the KV cache view showing individual sequences in each cell (long output). -void common_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size = 40); - // // Embedding utils // diff --git a/llama/llama.cpp/common/json-schema-to-grammar.cpp b/llama/llama.cpp/common/json-schema-to-grammar.cpp index 656b3ecaa..98b8280fd 100644 --- a/llama/llama.cpp/common/json-schema-to-grammar.cpp +++ b/llama/llama.cpp/common/json-schema-to-grammar.cpp @@ -1,8 +1,9 @@ #include "json-schema-to-grammar.h" #include "common.h" +#include + #include -#include #include #include #include @@ -40,49 +41,6 @@ static std::string build_repetition(const std::string & item_rule, int min_items return result; } -/* Minimalistic replacement for std::string_view, which is only available from C++17 onwards */ -class string_view { - const std::string & _str; - const size_t _start; - const size_t _end; -public: - string_view(const std::string & str, size_t start = 0, size_t end = std::string::npos) : _str(str), _start(start), _end(end == std::string::npos ? str.length() : end) {} - - size_t size() const { - return _end - _start; - } - - size_t length() const { - return size(); - } - - operator std::string() const { - return str(); - } - - std::string str() const { - return _str.substr(_start, _end - _start); - } - - string_view substr(size_t pos, size_t len = std::string::npos) const { - return string_view(_str, _start + pos, len == std::string::npos ? _end : _start + pos + len); - } - - char operator[](size_t pos) const { - auto index = _start + pos; - if (index >= _end) { - throw std::out_of_range("string_view index out of range"); - } - return _str[_start + pos]; - } - - bool operator==(const string_view & other) const { - std::string this_str = *this; - std::string other_str = other; - return this_str == other_str; - } -}; - static void _build_min_max_int(int min_value, int max_value, std::stringstream & out, int decimals_left = 16, bool top_level = true) { auto has_min = min_value != std::numeric_limits::min(); auto has_max = max_value != std::numeric_limits::max(); @@ -111,14 +69,14 @@ static void _build_min_max_int(int min_value, int max_value, std::stringstream & } out << "}"; }; - std::function uniform_range = - [&](const string_view & from, const string_view & to) { + std::function uniform_range = + [&](const std::string_view & from, const std::string_view & to) { size_t i = 0; while (i < from.length() && i < to.length() && from[i] == to[i]) { i++; } if (i > 0) { - out << "\"" << from.substr(0, i).str() << "\""; + out << "\"" << from.substr(0, i) << "\""; } if (i < from.length() && i < to.length()) { if (i > 0) { diff --git a/llama/llama.cpp/common/json-schema-to-grammar.h b/llama/llama.cpp/common/json-schema-to-grammar.h index 4613f5d9f..362991b54 100644 --- a/llama/llama.cpp/common/json-schema-to-grammar.h +++ b/llama/llama.cpp/common/json-schema-to-grammar.h @@ -1,9 +1,9 @@ #pragma once -#include "ggml.h" -// Change JSON_ASSERT from assert() to GGML_ASSERT: -#define JSON_ASSERT GGML_ASSERT -#include "json.hpp" +#include + +#include +#include std::string json_schema_to_grammar(const nlohmann::ordered_json & schema, bool force_gbnf = false); diff --git a/llama/llama.cpp/common/sampling.cpp b/llama/llama.cpp/common/sampling.cpp index 28705e24c..9c04d35fd 100644 --- a/llama/llama.cpp/common/sampling.cpp +++ b/llama/llama.cpp/common/sampling.cpp @@ -161,7 +161,7 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co GGML_ABORT("llguidance (cmake -DLLAMA_LLGUIDANCE=ON) is not enabled"); #endif // LLAMA_USE_LLGUIDANCE } else { - std::vector patterns_at_start; + std::vector trigger_patterns; std::vector patterns_anywhere; std::vector trigger_tokens; for (const auto & trigger : params.grammar_triggers) { @@ -173,10 +173,13 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co break; } case COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN: - case COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_START: { - const auto & pattern = trigger.value; - (trigger.type == COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_START ? patterns_at_start : patterns_anywhere).push_back(pattern); + patterns_anywhere.push_back(trigger.value); + break; + } + case COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL: + { + trigger_patterns.push_back(trigger.value); break; } case COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN: @@ -190,10 +193,6 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co } } - std::vector trigger_patterns; - if (!patterns_at_start.empty()) { - trigger_patterns.push_back("^(" + string_join(patterns_at_start, "|") + ")[\\s\\S]*"); - } if (!patterns_anywhere.empty()) { trigger_patterns.push_back("^[\\s\\S]*?(" + string_join(patterns_anywhere, "|") + ")[\\s\\S]*"); } diff --git a/llama/llama.cpp/include/llama.h b/llama/llama.cpp/include/llama.h index abedebdb7..545e957e5 100644 --- a/llama/llama.cpp/include/llama.h +++ b/llama/llama.cpp/include/llama.h @@ -61,59 +61,23 @@ extern "C" { struct llama_model; struct llama_context; struct llama_sampler; - struct llama_kv_cache; + + typedef struct llama_memory_i * llama_memory_t; + + struct llama_kv_cache; // DEPRECATED (use llama_memory instead) typedef int32_t llama_pos; typedef int32_t llama_token; typedef int32_t llama_seq_id; enum llama_vocab_type { - LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab - LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback - LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE - LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece - LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram - LLAMA_VOCAB_TYPE_RWKV = 5, // RWKV tokenizer based on greedy tokenization - }; - - // pre-tokenization types - enum llama_vocab_pre_type { - LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0, - LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1, - LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2, - LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3, - LLAMA_VOCAB_PRE_TYPE_FALCON = 4, - LLAMA_VOCAB_PRE_TYPE_MPT = 5, - LLAMA_VOCAB_PRE_TYPE_STARCODER = 6, - LLAMA_VOCAB_PRE_TYPE_GPT2 = 7, - LLAMA_VOCAB_PRE_TYPE_REFACT = 8, - LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9, - LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10, - LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11, - LLAMA_VOCAB_PRE_TYPE_OLMO = 12, - LLAMA_VOCAB_PRE_TYPE_DBRX = 13, - LLAMA_VOCAB_PRE_TYPE_SMAUG = 14, - LLAMA_VOCAB_PRE_TYPE_PORO = 15, - LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16, - LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17, - LLAMA_VOCAB_PRE_TYPE_VIKING = 18, - LLAMA_VOCAB_PRE_TYPE_JAIS = 19, - LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20, - LLAMA_VOCAB_PRE_TYPE_SMOLLM = 21, - LLAMA_VOCAB_PRE_TYPE_CODESHELL = 22, - LLAMA_VOCAB_PRE_TYPE_BLOOM = 23, - LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24, - LLAMA_VOCAB_PRE_TYPE_EXAONE = 25, - LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26, - LLAMA_VOCAB_PRE_TYPE_MINERVA = 27, - LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28, - LLAMA_VOCAB_PRE_TYPE_GPT4O = 29, - LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30, - LLAMA_VOCAB_PRE_TYPE_TRILLION = 31, - LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32, - LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33, - LLAMA_VOCAB_PRE_TYPE_PIXTRAL = 34, - LLAMA_VOCAB_PRE_TYPE_SEED_CODER = 35, + LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab + LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback + LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE + LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece + LLAMA_VOCAB_TYPE_UGM = 4, // T5 tokenizer based on Unigram + LLAMA_VOCAB_TYPE_RWKV = 5, // RWKV tokenizer based on greedy tokenization + LLAMA_VOCAB_TYPE_PLAMO2 = 6, // PLaMo-2 tokenizer based on Aho-Corasick with dynamic programming }; enum llama_rope_type { @@ -188,6 +152,7 @@ extern "C" { //LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // removed from gguf files, use Q4_0 and runtime repack LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors + LLAMA_FTYPE_MOSTLY_MXFP4_MOE = 38, // except 1d tensors LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file }; @@ -240,18 +205,21 @@ extern "C" { typedef bool (*llama_progress_callback)(float progress, void * user_data); - // Input data for llama_decode + // Input data for llama_encode/llama_decode // A llama_batch object can contain input about one or many sequences // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens // // - token : the token ids of the input (used when embd is NULL) // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL) // - pos : the positions of the respective token in the sequence - // (if set to NULL, the token position will be tracked automatically by llama_decode) + // (if set to NULL, the token position will be tracked automatically by llama_encode/llama_decode) // - seq_id : the sequence to which the respective token belongs // (if set to NULL, the sequence ID will be assumed to be 0) // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output - // (if set to NULL, only the logits for last token will be returned) + // (if set to NULL: + // - if embeddings: all tokens are output + // - if not: only the last token is output + // ) // typedef struct llama_batch { int32_t n_tokens; @@ -261,7 +229,7 @@ extern "C" { llama_pos * pos; int32_t * n_seq_id; llama_seq_id ** seq_id; - int8_t * logits; // TODO: rename this to "output" + int8_t * logits; // TODO: rename this to "output" } llama_batch; enum llama_model_kv_override_type { @@ -317,10 +285,11 @@ extern "C" { const struct llama_model_kv_override * kv_overrides; // Keep the booleans together to avoid misalignment during copy-by-value. - bool vocab_only; // only load the vocabulary, no weights - bool use_mmap; // use mmap if possible - bool use_mlock; // force system to keep model in RAM - bool check_tensors; // validate model tensor data + bool vocab_only; // only load the vocabulary, no weights + bool use_mmap; // use mmap if possible + bool use_mlock; // force system to keep model in RAM + bool check_tensors; // validate model tensor data + bool use_extra_bufts; // use extra buffer types (used for weight repacking) }; // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations @@ -345,7 +314,7 @@ extern "C" { float yarn_beta_fast; // YaRN low correction dim float yarn_beta_slow; // YaRN high correction dim uint32_t yarn_orig_ctx; // YaRN original context size - float defrag_thold; // defragment the KV cache if holes/size > thold, < 0 disabled (default) + float defrag_thold; // defragment the KV cache if holes/size > thold, <= 0 disabled (default) ggml_backend_sched_eval_callback cb_eval; void * cb_eval_user_data; @@ -361,10 +330,16 @@ extern "C" { // Keep the booleans together and at the end of the struct to avoid misalignment during copy-by-value. bool embeddings; // if true, extract embeddings (together with logits) - bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU - bool flash_attn; // whether to use flash attention [EXPERIMENTAL] - bool no_perf; // whether to measure performance timings - bool op_offload; // whether to offload host tensor operations to device + bool offload_kqv; // offload the KQV ops (including the KV cache) to GPU + bool flash_attn; // use flash attention [EXPERIMENTAL] + bool no_perf; // measure performance timings + bool op_offload; // offload host tensor operations to device + bool swa_full; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055) + // NOTE: setting to false when n_seq_max > 1 can cause bad performance in some cases + // ref: https://github.com/ggml-org/llama.cpp/pull/13845#issuecomment-2924800573 + bool kv_unified; // use a unified buffer across the input sequences when computing the attention + // try to disable when n_seq_max > 1 for improved performance when the sequences do not share a large prefix + // ref: https://github.com/ggml-org/llama.cpp/pull/14363 }; // model quantization parameters @@ -381,6 +356,7 @@ extern "C" { void * imatrix; // pointer to importance matrix data void * kv_overrides; // pointer to vector containing overrides void * tensor_types; // pointer to vector containing tensor types + void * prune_layers; // pointer to vector containing layer indices to prune } llama_model_quantize_params; typedef struct llama_logit_bias { @@ -470,6 +446,7 @@ extern "C" { LLAMA_API int64_t llama_time_us(void); LLAMA_API size_t llama_max_devices(void); + LLAMA_API size_t llama_max_parallel_sequences(void); LLAMA_API bool llama_supports_mmap (void); LLAMA_API bool llama_supports_mlock (void); @@ -489,9 +466,11 @@ extern "C" { DEPRECATED(LLAMA_API int32_t llama_n_vocab (const struct llama_vocab * vocab), "use llama_vocab_n_tokens instead"); LLAMA_API const struct llama_model * llama_get_model (const struct llama_context * ctx); - LLAMA_API struct llama_kv_cache * llama_get_kv_self ( struct llama_context * ctx); + LLAMA_API llama_memory_t llama_get_memory (const struct llama_context * ctx); LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); // TODO: rename to llama_get_pooling_type + DEPRECATED(LLAMA_API struct llama_kv_cache * llama_get_kv_self(struct llama_context * ctx), "use llama_get_memory instead"); + LLAMA_API const struct llama_vocab * llama_model_get_vocab(const struct llama_model * model); LLAMA_API enum llama_rope_type llama_model_rope_type(const struct llama_model * model); @@ -500,10 +479,18 @@ extern "C" { LLAMA_API int32_t llama_model_n_layer (const struct llama_model * model); LLAMA_API int32_t llama_model_n_head (const struct llama_model * model); LLAMA_API int32_t llama_model_n_head_kv (const struct llama_model * model); + LLAMA_API int32_t llama_model_n_swa (const struct llama_model * model); // Get the model's RoPE frequency scaling factor LLAMA_API float llama_model_rope_freq_scale_train(const struct llama_model * model); + // Returns the number of classifier outputs (only valid for classifier models) + // Undefined behavior for non-classifier models + LLAMA_API uint32_t llama_model_n_cls_out(const struct llama_model * model); + + // Returns label of classifier output by index ( 1` + // p0 < 0 : [0, p1] + // p1 < 0 : [p0, inf) + LLAMA_API void llama_memory_seq_div( + llama_memory_t mem, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + int d); + + // Returns the smallest position present in the memory for the specified sequence + // This is typically non-zero only for SWA caches + // Note that all positions in the range [pos_min, pos_max] are guaranteed to be present in the memory + // Return -1 if the sequence is empty + LLAMA_API llama_pos llama_memory_seq_pos_min( + llama_memory_t mem, + llama_seq_id seq_id); + + // Returns the largest position present in the memory for the specified sequence + // Note that all positions in the range [pos_min, pos_max] are guaranteed to be present in the memory + // Return -1 if the sequence is empty + LLAMA_API llama_pos llama_memory_seq_pos_max( + llama_memory_t mem, + llama_seq_id seq_id); + + // Check if the memory supports shifting + LLAMA_API bool llama_memory_can_shift(llama_memory_t mem); + + // + // KV cache for self-attention (TODO: deprecate in favor of llama_memory) + // + + // Returns the number of tokens in the KV cache (slow, use only for debug) + // If a KV cell has multiple sequences assigned to it, it will be counted multiple times + DEPRECATED(LLAMA_API int32_t llama_kv_self_n_tokens(const struct llama_context * ctx), + "Use llama_kv_self_seq_pos_max() and llama_kv_self_seq_pos_min() instead (https://github.com/ggml-org/llama.cpp/issues/13793)"); + + // Returns the number of used KV cells (i.e. have at least one sequence assigned to them) + DEPRECATED(LLAMA_API int32_t llama_kv_self_used_cells(const struct llama_context * ctx), + "Use llama_kv_self_seq_pos_max() and llama_kv_self_seq_pos_min() instead (https://github.com/ggml-org/llama.cpp/issues/13793)"); + + // Clear the KV cache - both cell info is erased and KV data is zeroed + DEPRECATED(LLAMA_API void llama_kv_self_clear( + struct llama_context * ctx), + "Use llama_memory_clear() instead"); + + // Removes all tokens that belong to the specified sequence and have positions in [p0, p1) + // Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails + // seq_id < 0 : match any sequence + // p0 < 0 : [0, p1] + // p1 < 0 : [p0, inf) + DEPRECATED(LLAMA_API bool llama_kv_self_seq_rm( struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, - llama_pos p1); + llama_pos p1), + "Use llama_memory_seq_rm() instead"); // Copy all tokens that belong to the specified sequence to another sequence // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence // p0 < 0 : [0, p1] // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_self_seq_cp( + DEPRECATED(LLAMA_API void llama_kv_self_seq_cp( struct llama_context * ctx, llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, - llama_pos p1); + llama_pos p1), + "Use llama_memory_seq_cp() instead"); // Removes all tokens that do not belong to the specified sequence - LLAMA_API void llama_kv_self_seq_keep( + DEPRECATED(LLAMA_API void llama_kv_self_seq_keep( struct llama_context * ctx, - llama_seq_id seq_id); + llama_seq_id seq_id), + "Use llama_memory_seq_keep() instead"); // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) // If the KV cache is RoPEd, the KV data is updated accordingly: // - lazily on next llama_decode() - // - explicitly with llama_kv_self_update() // p0 < 0 : [0, p1] // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_self_seq_add( - struct llama_context * ctx, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1, - llama_pos delta); - - // Integer division of the positions by factor of `d > 1` - // If the KV cache is RoPEd, the KV data is updated accordingly: - // - lazily on next llama_decode() - // - explicitly with llama_kv_self_update() - // p0 < 0 : [0, p1] - // p1 < 0 : [p0, inf) - LLAMA_API void llama_kv_self_seq_div( - struct llama_context * ctx, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1, - int d); - - // Returns the largest position present in the KV cache for the specified sequence - LLAMA_API llama_pos llama_kv_self_seq_pos_max( - struct llama_context * ctx, - llama_seq_id seq_id); - - // Defragment the KV cache - // This will be applied: - // - lazily on next llama_decode() - // - explicitly with llama_kv_self_update() - LLAMA_API void llama_kv_self_defrag(struct llama_context * ctx); - - // Check if the context supports KV cache shifting - LLAMA_API bool llama_kv_self_can_shift(const struct llama_context * ctx); - - // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) - LLAMA_API void llama_kv_self_update(struct llama_context * ctx); - - DEPRECATED(LLAMA_API void llama_kv_cache_clear( - struct llama_context * ctx), - "use llama_kv_self_clear instead"); - - DEPRECATED(LLAMA_API bool llama_kv_cache_seq_rm( - struct llama_context * ctx, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1), - "use llama_kv_self_seq_rm instead"); - - DEPRECATED(LLAMA_API void llama_kv_cache_seq_cp( - struct llama_context * ctx, - llama_seq_id seq_id_src, - llama_seq_id seq_id_dst, - llama_pos p0, - llama_pos p1), - "use llama_kv_self_seq_cp instead"); - - DEPRECATED(LLAMA_API void llama_kv_cache_seq_keep( - struct llama_context * ctx, - llama_seq_id seq_id), - "use llama_kv_self_seq_keep instead"); - - DEPRECATED(LLAMA_API void llama_kv_cache_seq_add( + DEPRECATED(LLAMA_API void llama_kv_self_seq_add( struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta), - "use llama_kv_self_seq_add instead"); + "Use llama_memory_seq_add() instead"); - DEPRECATED(LLAMA_API void llama_kv_cache_seq_div( + // Integer division of the positions by factor of `d > 1` + // If the KV cache is RoPEd, the KV data is updated accordingly: + // - lazily on next llama_decode() + // p0 < 0 : [0, p1] + // p1 < 0 : [p0, inf) + DEPRECATED(LLAMA_API void llama_kv_self_seq_div( struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d), - "use llama_kv_self_seq_div instead"); + "Use llama_memory_seq_div() instead"); - DEPRECATED(LLAMA_API llama_pos llama_kv_cache_seq_pos_max( + // Returns the smallest position present in the KV cache for the specified sequence + // This is typically non-zero only for SWA caches + // Note that all positions in the range [pos_min, pos_max] are guaranteed to be present in the KV cache + // Return -1 if the sequence is empty + DEPRECATED(LLAMA_API llama_pos llama_kv_self_seq_pos_min( struct llama_context * ctx, llama_seq_id seq_id), - "use llama_kv_self_seq_pos_max instead"); + "Use llama_memory_seq_pos_min() instead"); - DEPRECATED(LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx), - "use llama_kv_self_defrag instead"); + // Returns the largest position present in the KV cache for the specified sequence + // Note that all positions in the range [pos_min, pos_max] are guaranteed to be present in the KV cache + // Return -1 if the sequence is empty + DEPRECATED(LLAMA_API llama_pos llama_kv_self_seq_pos_max( + struct llama_context * ctx, + llama_seq_id seq_id), + "Use llama_memory_seq_pos_max() instead"); - DEPRECATED(LLAMA_API bool llama_kv_cache_can_shift(const struct llama_context * ctx), - "use llama_kv_self_can_shift instead"); + // Defragment the KV cache + // This will be applied: + // - lazily on next llama_decode() + DEPRECATED(LLAMA_API void llama_kv_self_defrag(struct llama_context * ctx), + "simply remove this call, the context will automatically decide when to do a defragmentation based on 'defrag_thold'"); - DEPRECATED(LLAMA_API void llama_kv_cache_update(struct llama_context * ctx), - "use llama_kv_self_update instead"); + // Check if the context supports KV cache shifting + DEPRECATED(LLAMA_API bool llama_kv_self_can_shift(const struct llama_context * ctx), + "use llama_memory_can_shift() instead"); + // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) + DEPRECATED(LLAMA_API void llama_kv_self_update(struct llama_context * ctx), + "simply remove this call, updates are applied lazily on the next llama_decode()"); // // State / sessions // // Returns the *actual* size in bytes of the state - // (logits, embedding and kv_cache) + // (logits, embedding and memory) // Only use when saving the state, not when restoring it, otherwise the size may be too small. LLAMA_API size_t llama_state_get_size(struct llama_context * ctx); LLAMA_API DEPRECATED(size_t llama_get_state_size(struct llama_context * ctx), @@ -863,12 +833,12 @@ extern "C" { size_t n_token_count), "use llama_state_save_file instead"); - // Get the exact size needed to copy the KV cache of a single sequence + // Get the exact size needed to copy the state of a single sequence LLAMA_API size_t llama_state_seq_get_size( struct llama_context * ctx, llama_seq_id seq_id); - // Copy the KV cache of a single sequence into the specified buffer + // Copy the state of a single sequence into the specified buffer LLAMA_API size_t llama_state_seq_get_data( struct llama_context * ctx, uint8_t * dst, @@ -934,18 +904,23 @@ extern "C" { // For encode-decoder contexts, processes the batch using the encoder. // Can store the encoder output internally for later use by the decoder's cross-attention layers. // 0 - success - // < 0 - error. the KV cache state is restored to the state before this call + // < 0 - error. the memory state is restored to the state before this call LLAMA_API int32_t llama_encode( struct llama_context * ctx, struct llama_batch batch); // Process a batch of tokens. - // Requires KV cache. + // Requires the context to have a memory. // For encode-decoder contexts, processes the batch using the decoder. // Positive return values does not mean a fatal error, but rather a warning. - // 0 - success - // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context) - // < 0 - error. the KV cache state is restored to the state before this call + // Upon fatal-error or abort, the ubatches that managed to be been processed will remain in the memory state of the context + // To handle this correctly, query the memory state using llama_memory_seq_pos_min() and llama_memory_seq_pos_max() + // Upon other return values, the memory state is restored to the state before this call + // 0 - success + // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context) + // 2 - aborted (processed ubatches will remain in the context's memory) + // -1 - invalid input batch + // < -1 - fatal error (processed ubatches will remain in the context's memory) LLAMA_API int32_t llama_decode( struct llama_context * ctx, struct llama_batch batch); @@ -961,8 +936,8 @@ extern "C" { // Get the number of threads used for prompt and batch processing (multiple token). LLAMA_API int32_t llama_n_threads_batch(struct llama_context * ctx); - // Set whether the model is in embeddings mode or not - // If true, embeddings will be returned but logits will not + // Set whether the context outputs embeddings or not + // TODO: rename to avoid confusion with llama_get_embeddings() LLAMA_API void llama_set_embeddings(struct llama_context * ctx, bool embeddings); // Set whether to use causal attention or not @@ -986,6 +961,7 @@ extern "C" { // in the order they have appeared in the batch. // Rows: number of tokens for which llama_batch.logits[i] != 0 // Cols: n_vocab + // TODO: deprecate in favor of llama_get_logits_ith() (ref: https://github.com/ggml-org/llama.cpp/pull/14853#issuecomment-3113143522) LLAMA_API float * llama_get_logits(struct llama_context * ctx); // Logits for the ith token. For positive indices, Equivalent to: @@ -1000,6 +976,7 @@ extern "C" { // in the order they have appeared in the batch. // shape: [n_outputs*n_embd] // Otherwise, returns NULL. + // TODO: deprecate in favor of llama_get_embeddings_ith() (ref: https://github.com/ggml-org/llama.cpp/pull/14853#issuecomment-3113143522) LLAMA_API float * llama_get_embeddings(struct llama_context * ctx); // Get the embeddings for the ith token. For positive indices, Equivalent to: @@ -1011,7 +988,7 @@ extern "C" { // Get the embeddings for a sequence id // Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE - // when pooling_type == LLAMA_POOLING_TYPE_RANK, returns float[1] with the rank of the sequence + // when pooling_type == LLAMA_POOLING_TYPE_RANK, returns float[n_cls_out] with the rank(s) of the sequence // otherwise: float[n_embd] (1-dimensional) LLAMA_API float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id); @@ -1038,9 +1015,11 @@ extern "C" { LLAMA_API llama_token llama_vocab_sep(const struct llama_vocab * vocab); // sentence separator LLAMA_API llama_token llama_vocab_nl (const struct llama_vocab * vocab); // next-line LLAMA_API llama_token llama_vocab_pad(const struct llama_vocab * vocab); // padding + LLAMA_API llama_token llama_vocab_mask(const struct llama_vocab * vocab); // mask LLAMA_API bool llama_vocab_get_add_bos(const struct llama_vocab * vocab); LLAMA_API bool llama_vocab_get_add_eos(const struct llama_vocab * vocab); + LLAMA_API bool llama_vocab_get_add_sep(const struct llama_vocab * vocab); LLAMA_API llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab); LLAMA_API llama_token llama_vocab_fim_suf(const struct llama_vocab * vocab); @@ -1084,6 +1063,7 @@ extern "C" { /// @param tokens The tokens pointer must be large enough to hold the resulting tokens. /// @return Returns the number of tokens on success, no more than n_tokens_max /// @return Returns a negative number on failure - the number of tokens that would have been returned + /// @return Returns INT32_MIN on overflow (e.g., tokenization result size exceeds int32_t limit) /// @param add_special Allow to add BOS and EOS tokens if model is configured to do so. /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated /// as plaintext. Does not insert a leading space. @@ -1421,6 +1401,7 @@ extern "C" { int32_t n_p_eval; int32_t n_eval; + int32_t n_reused; // number of times a ggml compute graph had been reused }; struct llama_perf_sampler_data { diff --git a/llama/llama.cpp/src/llama-arch.cpp b/llama/llama.cpp/src/llama-arch.cpp index 5ab3f5722..4b2856466 100644 --- a/llama/llama.cpp/src/llama-arch.cpp +++ b/llama/llama.cpp/src/llama-arch.cpp @@ -20,6 +20,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_BERT, "bert" }, { LLM_ARCH_NOMIC_BERT, "nomic-bert" }, { LLM_ARCH_NOMIC_BERT_MOE, "nomic-bert-moe" }, + { LLM_ARCH_NEO_BERT, "neo-bert" }, { LLM_ARCH_JINA_BERT_V2, "jina-bert-v2" }, { LLM_ARCH_BLOOM, "bloom" }, { LLM_ARCH_STABLELM, "stablelm" }, @@ -33,6 +34,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_PHI3, "phi3" }, { LLM_ARCH_PHIMOE, "phimoe" }, { LLM_ARCH_PLAMO, "plamo" }, + { LLM_ARCH_PLAMO2, "plamo2" }, { LLM_ARCH_CODESHELL, "codeshell" }, { LLM_ARCH_ORION, "orion" }, { LLM_ARCH_INTERNLM2, "internlm2" }, @@ -41,8 +43,12 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_GEMMA, "gemma" }, { LLM_ARCH_GEMMA2, "gemma2" }, { LLM_ARCH_GEMMA3, "gemma3" }, + { LLM_ARCH_GEMMA3N, "gemma3n" }, { LLM_ARCH_STARCODER2, "starcoder2" }, { LLM_ARCH_MAMBA, "mamba" }, + { LLM_ARCH_MAMBA2, "mamba2" }, + { LLM_ARCH_JAMBA, "jamba" }, + { LLM_ARCH_FALCON_H1, "falcon-h1" }, { LLM_ARCH_XVERSE, "xverse" }, { LLM_ARCH_COMMAND_R, "command-r" }, { LLM_ARCH_COHERE2, "cohere2" }, @@ -56,23 +62,38 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_DEEPSEEK2, "deepseek2" }, { LLM_ARCH_CHATGLM, "chatglm" }, { LLM_ARCH_GLM4, "glm4" }, + { LLM_ARCH_GLM4_MOE, "glm4moe" }, { LLM_ARCH_BITNET, "bitnet" }, { LLM_ARCH_T5, "t5" }, { LLM_ARCH_T5ENCODER, "t5encoder" }, { LLM_ARCH_JAIS, "jais" }, { LLM_ARCH_NEMOTRON, "nemotron" }, { LLM_ARCH_EXAONE, "exaone" }, + { LLM_ARCH_EXAONE4, "exaone4" }, { LLM_ARCH_RWKV6, "rwkv6" }, { LLM_ARCH_RWKV6QWEN2, "rwkv6qwen2" }, { LLM_ARCH_RWKV7, "rwkv7" }, { LLM_ARCH_ARWKV7, "arwkv7" }, { LLM_ARCH_GRANITE, "granite" }, { LLM_ARCH_GRANITE_MOE, "granitemoe" }, + { LLM_ARCH_GRANITE_HYBRID, "granitehybrid" }, { LLM_ARCH_CHAMELEON, "chameleon" }, { LLM_ARCH_SOLAR, "solar" }, { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" }, { LLM_ARCH_PLM, "plm" }, { LLM_ARCH_BAILINGMOE, "bailingmoe" }, + { LLM_ARCH_DOTS1, "dots1" }, + { LLM_ARCH_ARCEE, "arcee" }, + { LLM_ARCH_ERNIE4_5, "ernie4_5" }, + { LLM_ARCH_ERNIE4_5_MOE, "ernie4_5-moe" }, + { LLM_ARCH_HUNYUAN_MOE, "hunyuan-moe" }, + { LLM_ARCH_HUNYUAN_DENSE, "hunyuan-dense" }, + { LLM_ARCH_SMOLLM3, "smollm3" }, + { LLM_ARCH_OPENAI_MOE, "gpt-oss" }, + { LLM_ARCH_LFM2, "lfm2" }, + { LLM_ARCH_DREAM, "dream" }, + { LLM_ARCH_SMALLTHINKER, "smallthinker" }, + { LLM_ARCH_LLADA, "llada" }, { LLM_ARCH_UNKNOWN, "(unknown)" }, }; @@ -109,6 +130,7 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_EXPERT_WEIGHTS_NORM, "%s.expert_weights_norm" }, { LLM_KV_EXPERT_GATING_FUNC, "%s.expert_gating_func" }, { LLM_KV_MOE_EVERY_N_LAYERS, "%s.moe_every_n_layers" }, + { LLM_KV_NEXTN_PREDICT_LAYERS, "%s.nextn_predict_layers" }, { LLM_KV_POOLING_TYPE, "%s.pooling_type" }, { LLM_KV_LOGIT_SCALE, "%s.logit_scale" }, { LLM_KV_DECODER_START_TOKEN_ID, "%s.decoder_start_token_id" }, @@ -166,6 +188,7 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_SSM_INNER_SIZE, "%s.ssm.inner_size" }, { LLM_KV_SSM_STATE_SIZE, "%s.ssm.state_size" }, { LLM_KV_SSM_TIME_STEP_RANK, "%s.ssm.time_step_rank" }, + { LLM_KV_SSM_GROUP_COUNT, "%s.ssm.group_count" }, { LLM_KV_SSM_DT_B_C_RMS, "%s.ssm.dt_b_c_rms" }, { LLM_KV_WKV_HEAD_SIZE, "%s.wkv.head_size" }, @@ -176,6 +199,10 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_CONVNEXT_EMBEDDING_LENGTH, "%s.convnext.embedding_length" }, { LLM_KV_CONVNEXT_BLOCK_COUNT, "%s.convnext.block_count" }, + { LLM_KV_CLASSIFIER_OUTPUT_LABELS, "%s.classifier.output_labels" }, + + { LLM_KV_SHORTCONV_L_CACHE, "%s.shortconv.l_cache" }, + { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" }, { LLM_KV_TOKENIZER_PRE, "tokenizer.ggml.pre" }, { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" }, @@ -194,13 +221,13 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_TOKENIZER_MASK_ID, "tokenizer.ggml.mask_token_id" }, { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" }, { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" }, + { LLM_KV_TOKENIZER_ADD_SEP, "tokenizer.ggml.add_sep_token" }, { LLM_KV_TOKENIZER_ADD_PREFIX, "tokenizer.ggml.add_space_prefix" }, { LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, "tokenizer.ggml.remove_extra_whitespaces" }, { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap" }, { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" }, { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" }, { LLM_KV_TOKENIZER_CHAT_TEMPLATE, "tokenizer.chat_template" }, - { LLM_KV_TOKENIZER_CHAT_TEMPLATE_N, "tokenizer.chat_template.%s" }, { LLM_KV_TOKENIZER_FIM_PRE_ID, "tokenizer.ggml.fim_pre_token_id" }, { LLM_KV_TOKENIZER_FIM_SUF_ID, "tokenizer.ggml.fim_suf_token_id" }, { LLM_KV_TOKENIZER_FIM_MID_ID, "tokenizer.ggml.fim_mid_token_id" }, @@ -244,6 +271,24 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, }, }, + { + LLM_ARCH_ARCEE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, { LLM_ARCH_LLAMA4, { @@ -450,6 +495,7 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_TOKEN_TYPES, "token_types" }, { LLM_TENSOR_POS_EMBD, "position_embd" }, { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, @@ -494,6 +540,21 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, }, }, + { + LLM_ARCH_NEO_BERT, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" }, + { LLM_TENSOR_CLS, "cls" }, + { LLM_TENSOR_CLS_OUT, "cls.output" }, + }, + }, { LLM_ARCH_JINA_BERT_V2, { @@ -735,6 +796,36 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, }, }, + { + LLM_ARCH_PLAMO2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" }, + { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" }, + { LLM_TENSOR_SSM_X, "blk.%d.ssm_x" }, + { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" }, + { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" }, + { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" }, + { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" }, + { LLM_TENSOR_SSM_DT_NORM, "blk.%d.ssm_dt_norm" }, + { LLM_TENSOR_SSM_B_NORM, "blk.%d.ssm_b_norm" }, + { LLM_TENSOR_SSM_C_NORM, "blk.%d.ssm_c_norm" }, + { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, + { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, + }, + }, { LLM_ARCH_CODESHELL, { @@ -894,6 +985,42 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, }, }, + { + LLM_ARCH_GEMMA3N, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, + { LLM_TENSOR_PER_LAYER_TOKEN_EMBD, "per_layer_token_embd" }, + { LLM_TENSOR_PER_LAYER_MODEL_PROJ, "per_layer_model_proj" }, + { LLM_TENSOR_PER_LAYER_PROJ_NORM, "per_layer_proj_norm" }, + { LLM_TENSOR_ALTUP_UNEMBD_PROJ, "altup_unembd_proj" }, + { LLM_TENSOR_ALTUP_PROJ, "altup_proj" }, + { LLM_TENSOR_PER_LAYER_INP_GATE, "blk.%d.inp_gate" }, + { LLM_TENSOR_PER_LAYER_PROJ, "blk.%d.proj" }, + { LLM_TENSOR_PER_LAYER_POST_NORM, "blk.%d.post_norm" }, + { LLM_TENSOR_ALTUP_CORRECT_COEF, "blk.%d.altup_correct_coef" }, + { LLM_TENSOR_ALTUP_CORRECT_SCALE, "blk.%d.altup_correct_scale" }, + { LLM_TENSOR_ALTUP_PREDICT_COEF, "blk.%d.altup_predict_coef" }, + { LLM_TENSOR_ALTUP_ROUTER, "blk.%d.altup_router" }, + { LLM_TENSOR_ALTUP_ROUTER_NORM, "blk.%d.altup_router_norm" }, + { LLM_TENSOR_LAUREL_L, "blk.%d.laurel_l" }, + { LLM_TENSOR_LAUREL_R, "blk.%d.laurel_r" }, + { LLM_TENSOR_LAUREL_POST_NORM, "blk.%d.laurel_post_norm" }, + }, + }, { LLM_ARCH_STARCODER2, { @@ -928,6 +1055,77 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" }, }, }, + { + LLM_ARCH_MAMBA2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" }, + { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" }, + { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" }, + { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" }, + { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" }, + { LLM_TENSOR_SSM_NORM, "blk.%d.ssm_norm" }, + { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" }, + }, + }, + { + LLM_ARCH_JAMBA, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" }, + { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" }, + { LLM_TENSOR_SSM_X, "blk.%d.ssm_x" }, + { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" }, + { LLM_TENSOR_SSM_DT_NORM, "blk.%d.ssm_dt_norm" }, + { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" }, + { LLM_TENSOR_SSM_B_NORM, "blk.%d.ssm_b_norm" }, + { LLM_TENSOR_SSM_C_NORM, "blk.%d.ssm_c_norm" }, + { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" }, + { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_FALCON_H1, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" }, + { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" }, + { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" }, + { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" }, + { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" }, + { LLM_TENSOR_SSM_NORM, "blk.%d.ssm_norm" }, + { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, { LLM_ARCH_XVERSE, { @@ -1198,6 +1396,40 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, }, }, + { + LLM_ARCH_GLM4_MOE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" }, + // NextN/MTP tensors - preserved but unused (in final layer, dynamic layer number) + { LLM_TENSOR_NEXTN_EH_PROJ, "blk.%d.nextn.eh_proj" }, + { LLM_TENSOR_NEXTN_EMBED_TOKENS, "blk.%d.nextn.embed_tokens" }, + { LLM_TENSOR_NEXTN_ENORM, "blk.%d.nextn.enorm" }, + { LLM_TENSOR_NEXTN_HNORM, "blk.%d.nextn.hnorm" }, + { LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "blk.%d.nextn.shared_head_head" }, + { LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "blk.%d.nextn.shared_head_norm" }, + }, + }, { LLM_ARCH_BITNET, { @@ -1321,6 +1553,26 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, }, }, + { + LLM_ARCH_EXAONE4, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, + } + }, { LLM_ARCH_RWKV6, { @@ -1483,6 +1735,46 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + }, + }, + { + LLM_ARCH_GRANITE_HYBRID, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + // mamba(2) ssm layers + { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" }, + { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" }, + { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" }, + { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" }, + { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" }, + { LLM_TENSOR_SSM_NORM, "blk.%d.ssm_norm" }, + { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" }, + // attention layers + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + // dense FFN + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + // moe FFN + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + // shared expert + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, }, }, { @@ -1570,6 +1862,231 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, }, }, + { + LLM_ARCH_DOTS1, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" }, + } + }, + { + LLM_ARCH_ERNIE4_5, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_ERNIE4_5_MOE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" }, + }, + }, + { + LLM_ARCH_HUNYUAN_MOE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_HUNYUAN_DENSE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + + }, + }, + { + LLM_ARCH_SMOLLM3, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_OPENAI_MOE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_SINKS, "blk.%d.attn_sinks" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_LFM2, + { + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_SHORTCONV_CONV, "blk.%d.shortconv.conv" }, + { LLM_TENSOR_SHORTCONV_INPROJ, "blk.%d.shortconv.in_proj" }, + { LLM_TENSOR_SHORTCONV_OUTPROJ, "blk.%d.shortconv.out_proj" }, + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + } + }, + { + LLM_ARCH_SMALLTHINKER, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" } + }, + }, + { + LLM_ARCH_DREAM, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_LLADA, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, { LLM_ARCH_UNKNOWN, { @@ -1609,6 +2126,7 @@ static const std::map LLM_TENSOR_INFOS = { {LLM_TENSOR_ATTN_KV_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, {LLM_TENSOR_ATTN_K_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, {LLM_TENSOR_ATTN_V_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_SINKS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SCALE}}, {LLM_TENSOR_DEC_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, {LLM_TENSOR_DEC_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, {LLM_TENSOR_DEC_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, @@ -1654,7 +2172,11 @@ static const std::map LLM_TENSOR_INFOS = { {LLM_TENSOR_FFN_ACT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_DIV}}, {LLM_TENSOR_SSM_CONV1D, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}}, {LLM_TENSOR_SSM_A, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_SCAN}}, + {LLM_TENSOR_SSM_DT_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_SSM_B_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_SSM_C_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, {LLM_TENSOR_SSM_D, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_SSM_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, {LLM_TENSOR_TIME_MIX_LERP_X, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, {LLM_TENSOR_TIME_MIX_LN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, {LLM_TENSOR_CHANNEL_MIX_LERP_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, @@ -1698,6 +2220,23 @@ static const std::map LLM_TENSOR_INFOS = { {LLM_TENSOR_FFN_GATE_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}}, {LLM_TENSOR_FFN_UP_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}}, {LLM_TENSOR_FFN_EXP_PROBS_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + // altup / laurel (gemma 3n) + {LLM_TENSOR_PER_LAYER_TOKEN_EMBD, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_PER_LAYER_MODEL_PROJ, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_PER_LAYER_PROJ_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}}, + {LLM_TENSOR_ALTUP_PROJ, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ALTUP_UNEMBD_PROJ, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_PER_LAYER_INP_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_PER_LAYER_PROJ, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_PER_LAYER_POST_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ALTUP_CORRECT_COEF, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ALTUP_CORRECT_SCALE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ALTUP_PREDICT_COEF, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ALTUP_ROUTER, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ALTUP_ROUTER_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_LAUREL_L, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_LAUREL_R, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_LAUREL_POST_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, // this tensor is loaded for T5, but never used {LLM_TENSOR_DEC_CROSS_ATTN_REL_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}}, {LLM_TENSOR_BSKCN_TV, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, @@ -1717,13 +2256,30 @@ static const std::map LLM_TENSOR_INFOS = { {LLM_TENSOR_CONVNEXT_PW1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, {LLM_TENSOR_CONVNEXT_PW2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, {LLM_TENSOR_CONVNEXT_GAMMA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_SHORTCONV_CONV, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}}, + {LLM_TENSOR_SHORTCONV_INPROJ, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_SHORTCONV_OUTPROJ, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + // NextN/MTP tensors are currently ignored (reserved for future MTP support) + // These tensors only exist in the last layer(s) and are treated as output tensors + {LLM_TENSOR_NEXTN_EH_PROJ, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_NEXTN_EMBED_TOKENS, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_NEXTN_ENORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_NEXTN_HNORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}}, + {LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}}, }; LLM_KV::LLM_KV(llm_arch arch, const char * suffix) : arch(arch), suffix(suffix) {} std::string LLM_KV::operator()(llm_kv kv) const { - return suffix ? ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch), suffix) - : ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch)); + std::string name = ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch)); + + if (suffix != nullptr) { + name += "."; + name += suffix; + } + + return name; } std::string LLM_TN_IMPL::str() const { @@ -1762,3 +2318,40 @@ llm_arch llm_arch_from_string(const std::string & name) { const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor) { return LLM_TENSOR_INFOS.at(tensor); } + +bool llm_arch_is_recurrent(const llm_arch & arch) { + switch (arch) { + case LLM_ARCH_MAMBA: + case LLM_ARCH_MAMBA2: + case LLM_ARCH_RWKV6: + case LLM_ARCH_RWKV6QWEN2: + case LLM_ARCH_RWKV7: + case LLM_ARCH_ARWKV7: + return true; + default: + return false; + } +} + +bool llm_arch_is_hybrid(const llm_arch & arch) { + switch (arch) { + case LLM_ARCH_JAMBA: + case LLM_ARCH_FALCON_H1: + case LLM_ARCH_PLAMO2: + case LLM_ARCH_GRANITE_HYBRID: + case LLM_ARCH_LFM2: + return true; + default: + return false; + } +} + +bool llm_arch_is_diffusion(const llm_arch & arch) { + switch (arch) { + case LLM_ARCH_DREAM: + case LLM_ARCH_LLADA: + return true; + default: + return false; + } +} diff --git a/llama/llama.cpp/src/llama-arch.h b/llama/llama.cpp/src/llama-arch.h index 525c1b7d4..3ea994c70 100644 --- a/llama/llama.cpp/src/llama-arch.h +++ b/llama/llama.cpp/src/llama-arch.h @@ -24,6 +24,7 @@ enum llm_arch { LLM_ARCH_BERT, LLM_ARCH_NOMIC_BERT, LLM_ARCH_NOMIC_BERT_MOE, + LLM_ARCH_NEO_BERT, LLM_ARCH_JINA_BERT_V2, LLM_ARCH_BLOOM, LLM_ARCH_STABLELM, @@ -37,6 +38,7 @@ enum llm_arch { LLM_ARCH_PHI3, LLM_ARCH_PHIMOE, LLM_ARCH_PLAMO, + LLM_ARCH_PLAMO2, LLM_ARCH_CODESHELL, LLM_ARCH_ORION, LLM_ARCH_INTERNLM2, @@ -45,8 +47,12 @@ enum llm_arch { LLM_ARCH_GEMMA, LLM_ARCH_GEMMA2, LLM_ARCH_GEMMA3, + LLM_ARCH_GEMMA3N, LLM_ARCH_STARCODER2, LLM_ARCH_MAMBA, + LLM_ARCH_MAMBA2, + LLM_ARCH_JAMBA, + LLM_ARCH_FALCON_H1, LLM_ARCH_XVERSE, LLM_ARCH_COMMAND_R, LLM_ARCH_COHERE2, @@ -60,23 +66,38 @@ enum llm_arch { LLM_ARCH_DEEPSEEK2, LLM_ARCH_CHATGLM, LLM_ARCH_GLM4, + LLM_ARCH_GLM4_MOE, LLM_ARCH_BITNET, LLM_ARCH_T5, LLM_ARCH_T5ENCODER, LLM_ARCH_JAIS, LLM_ARCH_NEMOTRON, LLM_ARCH_EXAONE, + LLM_ARCH_EXAONE4, LLM_ARCH_RWKV6, LLM_ARCH_RWKV6QWEN2, LLM_ARCH_RWKV7, LLM_ARCH_ARWKV7, LLM_ARCH_GRANITE, LLM_ARCH_GRANITE_MOE, + LLM_ARCH_GRANITE_HYBRID, LLM_ARCH_CHAMELEON, LLM_ARCH_SOLAR, LLM_ARCH_WAVTOKENIZER_DEC, LLM_ARCH_PLM, LLM_ARCH_BAILINGMOE, + LLM_ARCH_DOTS1, + LLM_ARCH_ARCEE, + LLM_ARCH_ERNIE4_5, + LLM_ARCH_ERNIE4_5_MOE, + LLM_ARCH_HUNYUAN_MOE, + LLM_ARCH_HUNYUAN_DENSE, + LLM_ARCH_SMOLLM3, + LLM_ARCH_OPENAI_MOE, + LLM_ARCH_LFM2, + LLM_ARCH_DREAM, + LLM_ARCH_SMALLTHINKER, + LLM_ARCH_LLADA, LLM_ARCH_UNKNOWN, }; @@ -113,6 +134,7 @@ enum llm_kv { LLM_KV_EXPERT_WEIGHTS_NORM, LLM_KV_EXPERT_GATING_FUNC, LLM_KV_MOE_EVERY_N_LAYERS, + LLM_KV_NEXTN_PREDICT_LAYERS, LLM_KV_POOLING_TYPE, LLM_KV_LOGIT_SCALE, LLM_KV_DECODER_START_TOKEN_ID, @@ -170,6 +192,7 @@ enum llm_kv { LLM_KV_SSM_CONV_KERNEL, LLM_KV_SSM_STATE_SIZE, LLM_KV_SSM_TIME_STEP_RANK, + LLM_KV_SSM_GROUP_COUNT, LLM_KV_SSM_DT_B_C_RMS, LLM_KV_WKV_HEAD_SIZE, @@ -192,13 +215,13 @@ enum llm_kv { LLM_KV_TOKENIZER_MASK_ID, LLM_KV_TOKENIZER_ADD_BOS, LLM_KV_TOKENIZER_ADD_EOS, + LLM_KV_TOKENIZER_ADD_SEP, LLM_KV_TOKENIZER_ADD_PREFIX, LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, LLM_KV_TOKENIZER_HF_JSON, LLM_KV_TOKENIZER_RWKV, LLM_KV_TOKENIZER_CHAT_TEMPLATE, - LLM_KV_TOKENIZER_CHAT_TEMPLATE_N, LLM_KV_TOKENIZER_FIM_PRE_ID, LLM_KV_TOKENIZER_FIM_SUF_ID, LLM_KV_TOKENIZER_FIM_MID_ID, @@ -215,6 +238,10 @@ enum llm_kv { LLM_KV_CONVNEXT_EMBEDDING_LENGTH, LLM_KV_CONVNEXT_BLOCK_COUNT, + LLM_KV_CLASSIFIER_OUTPUT_LABELS, + + LLM_KV_SHORTCONV_L_CACHE, + // deprecated: LLM_KV_TOKENIZER_PREFIX_ID, LLM_KV_TOKENIZER_SUFFIX_ID, @@ -241,6 +268,7 @@ enum llm_tensor { LLM_TENSOR_ATTN_OUT_NORM, LLM_TENSOR_ATTN_POST_NORM, LLM_TENSOR_ATTN_ROT_EMBD, + LLM_TENSOR_ATTN_SINKS, LLM_TENSOR_FFN_GATE_INP, LLM_TENSOR_FFN_GATE_INP_SHEXP, LLM_TENSOR_FFN_NORM, @@ -265,12 +293,32 @@ enum llm_tensor { LLM_TENSOR_LAYER_OUT_NORM, LLM_TENSOR_POST_ATTN_NORM, LLM_TENSOR_POST_MLP_NORM, + LLM_TENSOR_PER_LAYER_TOKEN_EMBD, // gemma3n + LLM_TENSOR_PER_LAYER_MODEL_PROJ, // gemma3n + LLM_TENSOR_PER_LAYER_INP_GATE, // gemma3n + LLM_TENSOR_PER_LAYER_PROJ, // gemma3n + LLM_TENSOR_PER_LAYER_PROJ_NORM, // gemma3n + LLM_TENSOR_PER_LAYER_POST_NORM, // gemma3n + LLM_TENSOR_ALTUP_PROJ, // gemma3n + LLM_TENSOR_ALTUP_UNEMBD_PROJ, // gemma3n + LLM_TENSOR_ALTUP_CORRECT_COEF, // gemma3n + LLM_TENSOR_ALTUP_CORRECT_SCALE, // gemma3n + LLM_TENSOR_ALTUP_PREDICT_COEF, // gemma3n + LLM_TENSOR_ALTUP_ROUTER, // gemma3n + LLM_TENSOR_ALTUP_ROUTER_NORM, // gemma3n + LLM_TENSOR_LAUREL_L, // gemma3n + LLM_TENSOR_LAUREL_R, // gemma3n + LLM_TENSOR_LAUREL_POST_NORM, // gemma3n LLM_TENSOR_SSM_IN, LLM_TENSOR_SSM_CONV1D, LLM_TENSOR_SSM_X, LLM_TENSOR_SSM_DT, + LLM_TENSOR_SSM_DT_NORM, LLM_TENSOR_SSM_A, + LLM_TENSOR_SSM_B_NORM, + LLM_TENSOR_SSM_C_NORM, LLM_TENSOR_SSM_D, + LLM_TENSOR_SSM_NORM, LLM_TENSOR_SSM_OUT, LLM_TENSOR_TIME_MIX_W0, LLM_TENSOR_TIME_MIX_W1, @@ -365,6 +413,15 @@ enum llm_tensor { LLM_TENSOR_POS_NET_ATTN_K, LLM_TENSOR_POS_NET_ATTN_V, LLM_TENSOR_POS_NET_ATTN_OUT, + LLM_TENSOR_SHORTCONV_CONV, + LLM_TENSOR_SHORTCONV_INPROJ, + LLM_TENSOR_SHORTCONV_OUTPROJ, + LLM_TENSOR_NEXTN_EH_PROJ, + LLM_TENSOR_NEXTN_EMBED_TOKENS, + LLM_TENSOR_NEXTN_ENORM, + LLM_TENSOR_NEXTN_HNORM, + LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, + LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, }; enum llm_tensor_layer { @@ -438,3 +495,7 @@ const char * llm_arch_name(llm_arch arch); llm_arch llm_arch_from_string(const std::string & name); const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor); + +bool llm_arch_is_recurrent(const llm_arch & arch); +bool llm_arch_is_hybrid (const llm_arch & arch); +bool llm_arch_is_diffusion(const llm_arch & arch); diff --git a/llama/llama.cpp/src/llama-batch.cpp b/llama/llama.cpp/src/llama-batch.cpp index a88b2fe30..8698d89ac 100644 --- a/llama/llama.cpp/src/llama-batch.cpp +++ b/llama/llama.cpp/src/llama-batch.cpp @@ -1,292 +1,75 @@ #include "llama-batch.h" +#include "llama-impl.h" +#include "llama-vocab.h" +#include "llama-memory.h" + +#include #include #include +#include -llama_ubatch llama_sbatch::reserve_ubatch(size_t n_ubatch, bool has_embd) { - // clear empty sequences - // the previous ubatch is assumed to be gone, - // so nothing should refer to values in these sequences anymore. - for (size_t i = seq.size(); i-- > 0;) { - if (seq[i].length == 0) { - seq.pop_back(); - } else { - break; - } +llama_batch_allocr::llama_batch_allocr(uint32_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) { + const char * LLAMA_BATCH_DEBUG = getenv("LLAMA_BATCH_DEBUG"); + debug = LLAMA_BATCH_DEBUG ? atoi(LLAMA_BATCH_DEBUG) : 0; + + seq_pos.resize(LLAMA_MAX_SEQ); + seq_cpl.resize(LLAMA_MAX_SEQ); + for (auto & cur : seq_cpl) { + cur.resize(LLAMA_MAX_SEQ); } - ubatch_token.resize(!has_embd ? n_ubatch : 0); - ubatch_embd.resize(has_embd ? n_embd * n_ubatch : 0); - ubatch_pos.resize(n_ubatch); - ubatch_n_seq_id.resize(n_ubatch); - ubatch_seq_id.resize(n_ubatch); - ubatch_output.resize(n_ubatch); - llama_ubatch ubatch = { - /*equal_seqs =*/ true, - /*n_tokens =*/ 0, - /*n_seq_tokens =*/ 0, - /*n_seqs =*/ 0, - /*token =*/ !has_embd ? ubatch_token.data() : nullptr, - /*embd =*/ has_embd ? ubatch_embd.data() : nullptr, - /*pos =*/ ubatch_pos.data(), - /*n_seq_id =*/ ubatch_n_seq_id.data(), - /*seq_id =*/ ubatch_seq_id.data(), - /*output =*/ ubatch_output.data(), - }; - return ubatch; + + seq_idx.resize(LLAMA_MAX_SEQ, -1); } -void llama_sbatch::add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length) { - GGML_ASSERT(batch != nullptr); - GGML_ASSERT(length <= seq.length); - // Can only add sequences of equal lengths to a batch, - // otherwise it isn't clear to which sequence a token belongs - GGML_ASSERT(seq.n_seq_id == 0 || ubatch.n_seqs == 0 || length == (size_t) ubatch.n_tokens / ubatch.n_seqs); - GGML_ASSERT((seq.n_seq_id != 0) == ubatch.equal_seqs); - // NOTE: loops are separated for cache-friendliness - if (batch->token) { - if (ubatch.equal_seqs) { - for (size_t i = 0; i < length; ++i) { - ubatch.token[ubatch.n_tokens + i] = batch->token[ids[seq.offset + i]]; - } - } else { - // simple split - ubatch.token = batch->token + seq.offset; - } - } else { - ubatch.token = nullptr; - } - if (batch->embd) { - if (ubatch.equal_seqs) { - for (size_t i = 0; i < length; ++i) { - memcpy( - ubatch.embd + (n_embd * (ubatch.n_tokens + i)), - batch->embd + (n_embd * ids[seq.offset + i]), - n_embd * sizeof(float) - ); - } - } else { - // simple split - ubatch.embd = batch->embd + (n_embd * seq.offset); - } - } else { - ubatch.embd = nullptr; - } - if (ubatch.equal_seqs) { - for (size_t i = 0; i < length; ++i) { - ubatch.pos[ubatch.n_tokens + i] = batch->pos[ids[seq.offset + i]]; - } - } else { - // simple split - ubatch.pos = batch->pos + seq.offset; - } - if (ubatch.equal_seqs) { - ubatch.n_seq_id[ubatch.n_seqs] = seq.n_seq_id; - if (seq.seq_id) { - ubatch.seq_id[ubatch.n_seqs] = seq.seq_id; - } - } else { - // simple split - if (batch->n_seq_id) { - ubatch.n_seq_id = batch->n_seq_id + seq.offset; - } else { - for (size_t i = 0; i < length; ++i) { - ubatch.n_seq_id[ubatch.n_seqs + i] = 1; - } - } - if (batch->seq_id) { - ubatch.seq_id = batch->seq_id + seq.offset; - } - } - if (logits_all) { - for (size_t i = 0; i < length; ++i) { - ubatch.output[ubatch.n_tokens + i] = 1; - out_ids.push_back(ids[seq.offset + i]); - } - } else if (batch->logits) { - if (ubatch.equal_seqs) { - for (size_t i = 0; i < length; ++i) { - size_t id = ids[seq.offset + i]; - int8_t is_output = batch->logits[id]; - ubatch.output[ubatch.n_tokens + i] = is_output; - if (is_output) { out_ids.push_back(id); } - } - } else { - // simple split - ubatch.output = batch->logits + seq.offset; - for (size_t i = 0; i < length; ++i) { - if (ubatch.output[i] != 0) { out_ids.push_back(seq.offset + i); } - } - } - } else { - // only get last output - for (size_t i = 0; i < length; ++i) { - size_t id = ids[seq.offset + i]; - int8_t is_last = id == ids.size() - 1; - ubatch.output[ubatch.n_tokens + i] = is_last; - if (is_last) { out_ids.push_back(id); } - } - } - if (ubatch.n_tokens == 0 && ubatch.n_seqs == 0) { - ubatch.n_seq_tokens = ubatch.equal_seqs ? length : 1; - } - ubatch.n_tokens += length; - ubatch.n_seqs += ubatch.equal_seqs ? 1 : length; // virtual sequences for simple splits - seq.offset += length; - seq.length -= length; - n_tokens -= length; - GGML_ASSERT(ubatch.n_tokens == ubatch.n_seq_tokens * ubatch.n_seqs); -} +bool llama_batch_allocr::init( + const llama_batch & batch_inp, + const llama_vocab & vocab, + const llama_memory_i * memory, + uint32_t n_embd, + uint32_t n_seq_max, + bool output_all) { + clear(); -llama_ubatch llama_sbatch::split_simple(size_t n_ubatch) { - n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch; - llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr); - ubatch.equal_seqs = false; - if (!seq.empty()) { - llama_sbatch_seq & s = seq[0]; - size_t length = s.length < n_ubatch ? s.length : n_ubatch; - GGML_ASSERT(seq.size() == 1 && s.n_seq_id == 0); // don't mix with other splits - add_seq_to_ubatch(ubatch, s, length); - } - return ubatch; -} + batch = batch_inp; -llama_ubatch llama_sbatch::split_equal(size_t n_ubatch) { - n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch; - llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr); - if (!seq.empty()) { - size_t length = 0; - size_t n_tokens_in_ubatch = 0; - GGML_ASSERT(seq[0].n_seq_id > 0); // should not be mixed with simple splits - // smallest first, because it's easier to split this way; - // starting from the end to pop in constant time. - for (size_t i = seq.size(); i-- > 0;) { - llama_sbatch_seq & s = seq[i]; - GGML_ASSERT(s.length > 0); - if (length == 0) { - length = s.length < n_ubatch ? s.length : n_ubatch; - } - add_seq_to_ubatch(ubatch, s, length); - n_tokens_in_ubatch += length; - // shared prompts can't be mixed with any of their sequences, - // so it's safer to compute them in their own ubatch - if (s.n_seq_id > 1) { break; } - // stop when there isn't enough space for another sequence - if (length + n_tokens_in_ubatch > n_ubatch) { break; } - } - } - return ubatch; -} + this->vocab = &vocab; -llama_ubatch llama_sbatch::split_seq(size_t n_ubatch) { - n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch; - llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr); - if (!seq.empty()) { - llama_sbatch_seq & s = seq[seq.size() - 1]; - size_t length = s.length < n_ubatch ? s.length : n_ubatch; - GGML_ASSERT(s.n_seq_id > 0); // should not be mixed with simple splits - add_seq_to_ubatch(ubatch, s, length); - } - return ubatch; -} - -llama_sbatch::llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split, bool logits_all) { - GGML_ASSERT(batch.n_tokens >= 0); - this->batch = &batch; - this->n_embd = n_embd; - this->logits_all = logits_all; - - n_tokens = batch.n_tokens; - ids.resize(n_tokens); - out_ids.clear(); - // TODO: reserve out_ids and seq - - for (size_t i = 0; i < n_tokens; ++i) { - ids[i] = i; - } - - if (simple_split) { - seq.resize(1); - llama_sbatch_seq & s = seq[0]; - s.n_seq_id = 0; - s.seq_id = nullptr; - s.offset = 0; - s.length = n_tokens; - return; - } - - std::sort(ids.begin(), ids.end(), - [&batch](size_t a, size_t b) { - int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1; - int32_t n_seq_b = batch.n_seq_id ? batch.n_seq_id[b] : 1; - // sort by seq_id, then by pos - if (n_seq_a == n_seq_b) { - if (batch.seq_id) { - for (int32_t i = 0; i < n_seq_a; ++i) { - llama_seq_id seq_id_a = batch.seq_id[a][i]; - llama_seq_id seq_id_b = batch.seq_id[b][i]; - // smaller seq_ids go first - if (seq_id_a != seq_id_b) { - return seq_id_a < seq_id_b; - } - } - } - // when all else is equal, sort by pos - if (batch.pos) { - return batch.pos[a] < batch.pos[b]; - } - // no pos, sort by id - return a < b; - } - // shared prompts go first - return n_seq_a > n_seq_b; - } - ); - - // init seq - llama_sbatch_seq * last_seq = nullptr; - - for (size_t i = 0; i < n_tokens; ++i) { - const size_t bi = ids[i]; - const int32_t n_seqs = batch.n_seq_id[bi]; - llama_seq_id * seq_ids = batch.seq_id[bi]; - if (last_seq != nullptr) { - bool same = n_seqs == last_seq->n_seq_id; - for (int32_t j = 0; same && j < n_seqs; ++j) { - if (seq_ids[j] != last_seq->seq_id[j]) { - same = false; - } - } - if (same) { - last_seq->length += 1; - continue; - } - } - llama_sbatch_seq new_seq = {n_seqs, seq_ids, i, 1}; - seq.push_back(new_seq); - last_seq = &seq.back(); - } - - // keep shared prompts first at the end, then sort by length descending. - std::sort(seq.begin(), seq.end(), - [](llama_sbatch_seq & a, llama_sbatch_seq & b) { - if (a.n_seq_id == b.n_seq_id) { - return a.length > b.length; - } - return a.n_seq_id < b.n_seq_id; - } - ); -} - -llama_batch_allocr::llama_batch_allocr(struct llama_batch in_batch, llama_pos p0) { - batch = in_batch; GGML_ASSERT(batch.n_tokens > 0); - if (!batch.pos) { - pos.resize(batch.n_tokens); - for (int32_t i = 0; i < batch.n_tokens; i++) { - pos[i] = i + p0; - } - batch.pos = pos.data(); + + // + // validate input batch + // + + if (n_seq_max > LLAMA_MAX_SEQ) { + LLAMA_LOG_ERROR("%s: n_seq_max = %d > %d\n", __func__, n_seq_max, LLAMA_MAX_SEQ); + return false; } + + if (batch.token) { + for (int32_t i = 0; i < batch.n_tokens; ++i) { + if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= vocab.n_tokens()) { + LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]); + return false; + } + } + } + + if (batch.seq_id) { + for (int32_t i = 0; i < batch.n_tokens; ++i) { + for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { + if (batch.seq_id && (batch.seq_id[i][s] < 0 || batch.seq_id[i][s] >= (llama_seq_id) n_seq_max)) { + LLAMA_LOG_ERROR("%s: invalid seq_id[%d][%d] = %d >= %d\n", __func__, i, s, batch.seq_id[i][s], (llama_seq_id) n_seq_max); + return false; + } + } + } + } + + // + // auto-generate missing fields + // + if (!batch.n_seq_id) { n_seq_id.resize(batch.n_tokens); for (int32_t i = 0; i < batch.n_tokens; i++) { @@ -294,6 +77,7 @@ llama_batch_allocr::llama_batch_allocr(struct llama_batch in_batch, llama_pos p0 } batch.n_seq_id = n_seq_id.data(); } + if (!batch.seq_id) { seq_id.resize(batch.n_tokens + 1); seq_id[batch.n_tokens] = NULL; @@ -302,10 +86,729 @@ llama_batch_allocr::llama_batch_allocr(struct llama_batch in_batch, llama_pos p0 } batch.seq_id = seq_id.data(); } + + if (!batch.pos) { + pos.resize(batch.n_tokens); + + // initialize the starting position for each sequence based on the positions in the memory + llama_pos p0[LLAMA_MAX_SEQ]; + for (uint32_t s = 0; s < n_seq_max; ++s) { + if (!memory) { + // if no memory -> start from 0 + p0[s] = 0; + } else { + p0[s] = memory->seq_pos_max(s) + 1; + } + } + + for (int32_t i = 0; i < batch.n_tokens; i++) { + const llama_seq_id seq_id = batch.seq_id[i][0]; + + pos[i] = p0[seq_id]; + + // update the starting position for all sequences that are assigned to the this token + for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { + const llama_seq_id seq_id = batch.seq_id[i][s]; + + p0[seq_id] = pos[i] + 1; + } + } + + batch.pos = pos.data(); + } + if (!batch.logits) { - logits.resize(batch.n_tokens); - logits[logits.size() - 1] = true; - batch.logits = logits.data(); + if (output_all) { + // return the output for all tokens + output.resize(batch.n_tokens, true); + } else { + // return the output only for the last token + output.resize(batch.n_tokens, false); + output[output.size() - 1] = true; + } + + batch.logits = output.data(); + } else if (output_all) { + bool warn = false; + + for (int32_t i = 0; i < batch.n_tokens; ++i) { + if (batch.logits[i] == 0) { + warn = true; + } + } + + if (warn) { + LLAMA_LOG_WARN("%s: embeddings required but some input tokens were not marked as outputs -> overriding\n", __func__); + + output.resize(batch.n_tokens, true); + batch.logits = output.data(); + } + } + + // + // compute stats + // + + this->n_embd = n_embd; + this->n_seq_max = n_seq_max; + + // count the outputs in this batch + for (int32_t i = 0; i < batch.n_tokens; ++i) { + n_outputs += batch.logits[i] != 0; + } + + has_cpl = false; + + // determine coupled sequences + // these are pairs of sequences that have at least one token in the input batch that is assigned to both of them + for (int32_t i = 0; i < batch.n_tokens; ++i) { + const llama_seq_id s0 = batch.seq_id[i][0]; + + for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { + const llama_seq_id s1 = batch.seq_id[i][s]; + + seq_pos[s1].insert(batch.pos[i]); + + if (s > 0) { + // mark that sequence s1 is coupled to s0 + seq_cpl[s1][s0] = true; + + // note: tracking the other way around is not necessary for now + //seq_cpl[s0][s1] = true; + + has_cpl = true; + } + } + } + + // precompute the sequence sets for each token and determine the unique sequence ids that participate in the batch + { + seq_set_t seq_set_unq; + + for (int32_t i = 0; i < batch.n_tokens; ++i) { + seq_set_t cur; + for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { + const llama_seq_id seq_id = batch.seq_id[i][s]; + + cur .set(seq_id); + seq_set_unq.set(seq_id); + } + + seq_set.push_back(cur); + seq_set_map[cur].push_back(i); + } + + for (uint32_t s = 0; s < n_seq_max; ++s) { + if (seq_set_unq.test(s)) { + seq_idx[s] = seq_id_unq.size(); + seq_id_unq.push_back(s); + } + } + } + + if (debug > 0) { + LLAMA_LOG_DEBUG("%s: input batch info:\n", __func__); + + llama_ubatch ubatch { + /*.b_equal_seqs =*/ false, + /*.n_tokens =*/ (uint32_t) batch.n_tokens, + /*.n_seq_tokens =*/ (uint32_t) 1, + /*.n_seqs =*/ (uint32_t) batch.n_tokens, + /*.n_seqs_unq =*/ (uint32_t) this->seq_id_unq.size(), + /*.token =*/ batch.token, + /*.embd =*/ batch.embd, + /*.pos =*/ batch.pos, + /*.n_seq_id =*/ batch.n_seq_id, + /*.seq_id =*/ batch.seq_id, + /*.seq_id_unq =*/ this->seq_id_unq.data(), + /*.seq_idx =*/ this->seq_idx.data(), + /*.output =*/ batch.logits, + /*.data =*/ {}, + }; + + ubatch_print(ubatch, debug); + + LLAMA_LOG_DEBUG("%s: seq = [\n", __func__); + for (int s0 = 0; s0 < (int) seq_pos.size(); ++s0) { + if (seq_pos[s0].empty()) { + continue; + } + + std::stringstream ss; + for (int s1 = 0; s1 < (int) seq_cpl[s0].size(); ++s1) { + if (seq_cpl[s0][s1]) { + ss << s1 << " "; + } + } + + LLAMA_LOG_DEBUG("%s: %4d: pos = [%4d, %4d], cpl = %s\n", + __func__, s0, seq_pos_min(s0), seq_pos_max(s0), ss.str().empty() ? "-" : ss.str().c_str()); + } + LLAMA_LOG_DEBUG("%s: ]\n", __func__); + } + + // + // consistency checks + // + + for (uint32_t s = 0; s < n_seq_max; ++s) { + if (seq_pos[s].empty()) { + continue; + } + + const llama_pos p0 = memory ? memory->seq_pos_max(s) : -1; + + if (p0 >= 0) { + bool ok = true; + + if (batch.token) { + if (seq_pos_min(s) != p0 + 1) { + ok = false; + } + } else { + assert(batch.embd); + + // for embeddings (typically used as vision input), we allow them to have repeating positions + // ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762 + if (seq_pos_min(s) != p0 && seq_pos_min(s) != p0 + 1) { + ok = false; + } + } + + if (!ok) { + LLAMA_LOG_ERROR( + "%s: the tokens of sequence %d in the input batch have inconsistent sequence positions:\n" + " - the last position stored in the memory module of the context (i.e. the KV cache) for sequence %d is X = %d\n" + " - the tokens for sequence %d in the input batch have a starting position of Y = %d\n" + " it is required that the sequence positions remain consecutive: Y = X + 1\n", + __func__, s, s, p0, s, seq_pos_min(s)); + + return false; + } + } + + if (seq_pos_max(s) - seq_pos_min(s) + 1 > (int) seq_pos[s].size()) { + LLAMA_LOG_ERROR("%s: sequence %d positions are not continuous\n", __func__, s); + return false; + } + } + + if (memory) { + for (uint32_t s0 = 0; s0 < n_seq_max; ++s0) { + for (uint32_t s1 = 0; s1 < n_seq_max; ++s1) { + if (seq_cpl[s0][s1]) { + if (memory->seq_pos_min(s0) != memory->seq_pos_min(s1) || + memory->seq_pos_max(s0) != memory->seq_pos_max(s1)) { + LLAMA_LOG_ERROR("%s: sequence %d is coupled to %d in the input batch, but have divereged\n", __func__, s0, s1); + return false; + } + } + } + } + } + + // disallow partial sequence sub-sets: + // + // invalid: x + // i: 0 1 2 ... + // --------------------------------------- + // seq_id[i][0]: 0 0 1 + // seq_id[i][1]: 1 1 2 + // seq_id[i][2]: 2 + // + // disallow decreasing sequence positions: + // + // invalid: x + // i: 0 1 2 3 4 5 6 ... + // --------------------------------------- + // pos[i]: 4 5 0 1 6 2 3 + // seq_id[i][0]: 0 0 1 1 0 1 0 + // + { + seq_set_t cur_seq_set[LLAMA_MAX_SEQ]; + for (uint32_t s = 0; s < n_seq_max; ++s) { + cur_seq_set[s].set(); + } + + llama_pos cur_seq_pos[LLAMA_MAX_SEQ]; + for (uint32_t s = 0; s < n_seq_max; ++s) { + cur_seq_pos[s] = -1; + } + + for (int32_t i = 0; i < batch.n_tokens; ++i) { + const llama_pos pos = batch.pos[i]; + + for (int32_t s = 0; s < batch.n_seq_id[i]; ++s) { + const llama_seq_id seq_id = batch.seq_id[i][s]; + + cur_seq_set[seq_id] &= seq_set[i]; + + if (cur_seq_set[seq_id].none()) { + LLAMA_LOG_ERROR("%s: sequence %d belongs to incompatible sequence sets (not allowed)\n", __func__, seq_id); + return false; + } + + if (pos < cur_seq_pos[seq_id]) { + LLAMA_LOG_ERROR("%s: sequence %d positions are decreasing (not allowed)\n", __func__, seq_id); + return false; + } + } + } + } + + split_reset(); + + return true; +} + +llama_ubatch llama_batch_allocr::ubatch_reserve(uint32_t n_seq_tokens, uint32_t n_seqs) { + const uint32_t n_tokens = n_seq_tokens*n_seqs; + + clear(); + split_reset(); + + auto udata = std::make_shared(); + + udata->token .resize(n_tokens); + udata->embd .clear(); + udata->pos .resize(n_tokens); + udata->n_seq_id .resize(n_tokens); + udata->seq_id .resize(n_tokens); + udata->seq_id_unq.resize(0); + udata->seq_idx .resize(LLAMA_MAX_SEQ, -1); + udata->output .resize(n_tokens); + + for (uint32_t s = 0; s < n_seqs; ++s) { + udata->seq_idx[s] = s; + udata->seq_id_unq.push_back(s); + } + + llama_ubatch res { + /*.b_equal_seqs =*/ true, + /*.n_tokens =*/ n_tokens, + /*.n_seq_tokens =*/ n_seq_tokens, + /*.n_seqs =*/ n_seqs, + /*.n_seqs_unq =*/ n_seqs, + + /*.token =*/ udata->token.data(), + /*.embd =*/ nullptr, + /*.pos =*/ udata->pos.data(), + /*.n_seq_id =*/ udata->n_seq_id.data(), + /*.seq_id =*/ udata->seq_id.data(), + /*.seq_id_unq =*/ udata->seq_id_unq.data(), + /*.seq_idx =*/ udata->seq_idx.data(), + /*.output =*/ udata->output.data(), + /*.data =*/ std::move(udata), + }; + + return res; +} + +const llama_batch & llama_batch_allocr::get_batch() const { + return batch; +} + +uint32_t llama_batch_allocr::get_n_tokens() const { + return batch.n_tokens; +} + +uint32_t llama_batch_allocr::get_n_outputs() const { + return n_outputs; +} + +uint32_t llama_batch_allocr::get_n_used() const { + return n_used; +} + +std::vector & llama_batch_allocr::get_out_ids() { + return out_ids; +} + +llama_pos llama_batch_allocr::seq_pos_min(llama_seq_id seq_id) const { + return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].begin(); +} + +llama_pos llama_batch_allocr::seq_pos_max(llama_seq_id seq_id) const { + return seq_pos[seq_id].empty() ? -1 : *seq_pos[seq_id].rbegin(); +} + +void llama_batch_allocr::split_reset() { + out_ids.clear(); + + n_used = 0; + + used.clear(); + used.resize(get_n_tokens(), false); +} + +llama_ubatch llama_batch_allocr::split_simple(uint32_t n_ubatch) { + // find the first unused token + uint32_t cur_idx = 0; + while (cur_idx < used.size() && used[cur_idx]) { + ++cur_idx; + } + + // we are done + if (cur_idx >= used.size()) { + return {}; + } + + std::vector idxs; + + while (true) { + idxs.push_back(cur_idx); + + used[cur_idx] = true; + ++n_used; + + ++cur_idx; + + if (cur_idx >= used.size()) { + break; + } + + if (idxs.size() >= n_ubatch) { + break; + } + } + + return ubatch_add(idxs, idxs.size(), false); +} + +llama_ubatch llama_batch_allocr::split_equal(uint32_t n_ubatch, bool sequential) { + if (sequential && has_cpl) { + LLAMA_LOG_ERROR("%s: sequential split is not supported when there are coupled sequences in the input batch\n", __func__); + + return {}; + } + + std::vector cur_seq_set; + + llama_seq_id last_seq_id = -1; + + // determine the non-overlapping sequence sets participating in this ubatch + for (int32_t i = 0; i < batch.n_tokens; ++i) { + if (used[i]) { + continue; + } + + bool add = true; + + for (uint32_t s = 0; s < cur_seq_set.size(); ++s) { + // no overlap with existing sequence sets: + if (!(cur_seq_set[s] & seq_set[i]).none()) { + add = false; + break; + } + } + + // accept only increasing sequence ids + if (sequential) { + add = add && (cur_seq_set.empty() || batch.seq_id[i][0] == last_seq_id + 1); + } + + if (add) { + cur_seq_set.push_back(seq_set[i]); + + last_seq_id = batch.seq_id[i][0]; + + if (cur_seq_set.size() > n_ubatch) { + break; + } + } + } + + const uint32_t n_seqs = cur_seq_set.size(); + + // we are done + if (n_seqs == 0) { + return {}; + } + + // the current batch index of each sequence set + std::vector cur_idx(n_seqs, 0); + + for (uint32_t s = 0; s < n_seqs; ++s) { + while (used[seq_set_map[cur_seq_set[s]][cur_idx[s]]]) { + ++cur_idx[s]; + } + } + + // the list of batch indices for each sequence set + // at the end we will concat these to get the final ubatch + std::vector idxs_per_seq(n_seqs); + + while (true) { + // we can only add new n_seq_tokens tokens if all the sequence sets have at least one more unused token and + // if we haven't reached n_ubatch + bool can_expand = true; + + for (uint32_t s = 0; s < n_seqs; ++s) { + if (cur_idx[s] >= (int32_t) seq_set_map[cur_seq_set[s]].size()) { + can_expand = false; + break; + } + } + + if (!can_expand) { + break; + } + + for (uint32_t s = 0; s < n_seqs; ++s) { + const int32_t idx = seq_set_map[cur_seq_set[s]][cur_idx[s]]; + + idxs_per_seq[s].push_back(idx); + + used[idx] = true; + ++n_used; + + ++cur_idx[s]; + } + + if ((idxs_per_seq[0].size() + 1)*n_seqs > n_ubatch) { + break; + } + } + + // concat the per-sequence-set lists + std::vector idxs; + + for (uint32_t s = 0; s < n_seqs; ++s) { + idxs.insert(idxs.end(), idxs_per_seq[s].begin(), idxs_per_seq[s].end()); + } + + return ubatch_add(idxs, n_seqs, true); +} + +llama_ubatch llama_batch_allocr::split_seq(uint32_t n_ubatch) { + // find the first unused token + uint32_t cur_idx = 0; + while (cur_idx < used.size() && used[cur_idx]) { + ++cur_idx; + } + + // we are done + if (cur_idx >= used.size()) { + return {}; + } + + // this is the starting sequence set + // we allow adding tokens only if their sequence set is a subset of the current sequence set + auto cur_seq_set = seq_set[cur_idx]; + + std::vector idxs; + + while (true) { + idxs.push_back(cur_idx); + + used[cur_idx] = true; + ++n_used; + + if (idxs.size() >= n_ubatch) { + break; + } + + do { + ++cur_idx; + } while (cur_idx < get_n_tokens() && (used[cur_idx] || ((cur_seq_set & seq_set[cur_idx]) != seq_set[cur_idx]))); + + if (cur_idx == get_n_tokens()) { + break; + } + + cur_seq_set = seq_set[cur_idx]; + } + + return ubatch_add(idxs, 1, true); +} + +void llama_batch_allocr::clear() { + n_outputs = 0; + + batch = {}; + + pos .clear(); + n_seq_id .clear(); + seq_id .clear(); + seq_id_unq.clear(); + output .clear(); + + for (auto & cur : seq_pos) { + cur.clear(); + } + + for (auto & cur : seq_cpl) { + std::fill(cur.begin(), cur.end(), false); + } + + seq_set.clear(); + + seq_set_map.clear(); + + std::fill(seq_idx.begin(), seq_idx.end(), -1); +} + +llama_ubatch llama_batch_allocr::ubatch_add(const std::vector & idxs, uint32_t n_seqs, bool equal_seqs) { + const uint32_t n_tokens = idxs.size(); + + assert(n_tokens%n_seqs == 0); + + auto udata = std::make_shared(); + + const int32_t n_pos_cur = batch.embd ? n_pos_per_embd : 1; + + const int64_t n_embd_all = batch.embd ? (int64_t) n_tokens*n_embd : 0; + const int64_t n_pos_all = (int64_t) n_tokens*n_pos_cur; + + udata->token .resize(n_tokens); + udata->embd .resize(n_embd_all); + udata->pos .resize(n_pos_all); + udata->n_seq_id .resize(n_tokens); + udata->seq_id .resize(n_tokens); + udata->seq_id_unq.resize(0); + udata->seq_idx .resize(LLAMA_MAX_SEQ, -1); + udata->output .resize(n_tokens); + + seq_set_t seq_set_unq; + + for (size_t i = 0; i < idxs.size(); ++i) { + if (batch.token) { + udata->token[i] = batch.token[idxs[i]]; + } + + if (batch.embd) { + memcpy(udata->embd.data() + i*n_embd, batch.embd + (int64_t) idxs[i]*n_embd, n_embd*sizeof(float)); + } + + for (int j = 0; j < n_pos_cur; ++j) { + udata->pos[j*n_tokens + i] = batch.pos[j*batch.n_tokens + idxs[i]]; + } + + udata->n_seq_id[i] = batch.n_seq_id[idxs[i]]; + udata->seq_id[i] = batch.seq_id[idxs[i]]; + udata->output[i] = batch.logits[idxs[i]]; + + for (int s = 0; s < udata->n_seq_id[i]; ++s) { + seq_set_unq.set(udata->seq_id[i][s]); + } + + if (udata->output[i]) { + out_ids.push_back(idxs[i]); + } + } + + for (uint32_t s = 0; s < n_seq_max; ++s) { + if (seq_set_unq.test(s)) { + udata->seq_idx[s] = udata->seq_id_unq.size(); + udata->seq_id_unq.push_back(s); + } + } + + llama_ubatch res { + /*.b_equal_seqs =*/ equal_seqs, + /*.n_tokens =*/ n_tokens, + /*.n_seq_tokens =*/ n_tokens/n_seqs, + /*.n_seqs =*/ n_seqs, + /*.n_seqs_unq =*/ (uint32_t) udata->seq_id_unq.size(), + + /*.token =*/ batch.token ? udata->token.data() : nullptr, + /*.embd =*/ batch.embd ? udata->embd.data() : nullptr, + /*.pos =*/ udata->pos.data(), + /*.n_seq_id =*/ udata->n_seq_id.data(), + /*.seq_id =*/ udata->seq_id.data(), + /*.seq_id_unq =*/ udata->seq_id_unq.data(), + /*.seq_idx =*/ udata->seq_idx.data(), + /*.output =*/ udata->output.data(), + /*.data =*/ std::move(udata), + }; + + if (debug > 0) { + LLAMA_LOG_DEBUG("%s: added ubatch to split:\n", __func__); + + ubatch_print(res, debug); + } + + return res; +} + +void llama_batch_allocr::ubatch_print(const llama_ubatch & ubatch, int debug) { + if (debug > 0) { + LLAMA_LOG_DEBUG("%s: equal_seqs = %d\n", __func__, ubatch.equal_seqs()); + LLAMA_LOG_DEBUG("%s: n_tokens = %d\n", __func__, ubatch.n_tokens); + LLAMA_LOG_DEBUG("%s: n_seq_tokens = %d\n", __func__, ubatch.n_seq_tokens); + LLAMA_LOG_DEBUG("%s: n_seqs = %d\n", __func__, ubatch.n_seqs); + LLAMA_LOG_DEBUG("%s: n_seqs_unq = %d\n", __func__, ubatch.n_seqs_unq); + + std::stringstream ss_seq_id_unq; + std::stringstream ss_seq_idx; + + ss_seq_id_unq << "[ "; + ss_seq_idx << "["; + + for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { + ss_seq_id_unq << ubatch.seq_id_unq[s] << " "; + } + + for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { + if (ubatch.seq_idx[s] >= 0) { + ss_seq_idx << ubatch.seq_idx[s]%10; + } else { + ss_seq_idx << "."; + } + } + + ss_seq_id_unq << "]"; + ss_seq_idx << "]"; + + LLAMA_LOG_DEBUG("%s: token = %p\n", __func__, (void *) ubatch.token); + LLAMA_LOG_DEBUG("%s: embd = %p\n", __func__, (void *) ubatch.embd); + LLAMA_LOG_DEBUG("%s: pos = %p\n", __func__, (void *) ubatch.pos); + LLAMA_LOG_DEBUG("%s: n_seq_id = %p\n", __func__, (void *) ubatch.n_seq_id); + LLAMA_LOG_DEBUG("%s: seq_id = %p\n", __func__, (void *) ubatch.seq_id); + LLAMA_LOG_DEBUG("%s: seq_id_unq = %s\n", __func__, ss_seq_id_unq.str().c_str()); + LLAMA_LOG_DEBUG("%s: seq_idx = %s\n", __func__, ss_seq_idx.str().c_str()); + LLAMA_LOG_DEBUG("%s: output = %p\n", __func__, (void *) ubatch.output); + LLAMA_LOG_DEBUG("%s: n_outputs = %d\n", __func__, n_outputs); + + if (debug > 1) { + int seq_id_max = 0; + for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { + for (int s = 0; s < ubatch.n_seq_id[i]; ++s) { + for (int s = 0; s < ubatch.n_seq_id[i]; ++s) { + seq_id_max = std::max(seq_id_max, ubatch.seq_id[i][s]); + } + } + } + ++seq_id_max; + + LLAMA_LOG_DEBUG("%s: token = [\n", __func__); + for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { + std::vector seq_id(seq_id_max); + + for (int s = 0; s < ubatch.n_seq_id[i]; ++s) { + seq_id[ubatch.seq_id[i][s]] = 1; + } + + std::stringstream ss; + for (int s = 0; s < seq_id_max; ++s) { + if (seq_id[s]) { + ss << s%10; + } else { + ss << "."; + } + } + + if (ubatch.token) { + LLAMA_LOG_DEBUG("%s: %4d: id = %6d (%16s), pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n", + __func__, i, ubatch.token[i], vocab->token_to_piece(ubatch.token[i]).c_str(), + ubatch.pos[i], ubatch.n_seq_id[i], ss.str().c_str(), ubatch.output[i]); + } else { + LLAMA_LOG_DEBUG("%s: %4d: [embd], pos = %4d, n_seq_id = %2d, seq_id = [%s], output = %d\n", + __func__, i, ubatch.pos[i], ubatch.n_seq_id[i], ss.str().c_str(), ubatch.output[i]); + } + } + LLAMA_LOG_DEBUG("%s: ]\n", __func__); + } } } @@ -317,25 +820,25 @@ struct llama_batch llama_batch_get_one( llama_token * tokens, int32_t n_tokens) { return { - /*n_tokens =*/ n_tokens, - /*tokens =*/ tokens, - /*embd =*/ nullptr, - /*pos =*/ nullptr, - /*n_seq_id =*/ nullptr, - /*seq_id =*/ nullptr, - /*logits =*/ nullptr, + /*n_tokens =*/ n_tokens, + /*tokens =*/ tokens, + /*embd =*/ nullptr, + /*pos =*/ nullptr, + /*n_seq_id =*/ nullptr, + /*seq_id =*/ nullptr, + /*logits =*/ nullptr, }; } struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) { llama_batch batch = { - /*n_tokens =*/ 0, - /*tokens =*/ nullptr, - /*embd =*/ nullptr, - /*pos =*/ nullptr, - /*n_seq_id =*/ nullptr, - /*seq_id =*/ nullptr, - /*logits =*/ nullptr, + /*n_tokens =*/ 0, + /*tokens =*/ nullptr, + /*embd =*/ nullptr, + /*pos =*/ nullptr, + /*n_seq_id =*/ nullptr, + /*seq_id =*/ nullptr, + /*logits =*/ nullptr, }; if (embd) { diff --git a/llama/llama.cpp/src/llama-batch.h b/llama/llama.cpp/src/llama-batch.h index 6305051b6..d563adc66 100644 --- a/llama/llama.cpp/src/llama-batch.h +++ b/llama/llama.cpp/src/llama-batch.h @@ -2,88 +2,159 @@ #include "llama.h" +#include "llama-cparams.h" + #include #include +#include +#include +#include +#include -// very similar to llama_batch, -// but has more metadata about sequences +// keep this struct lightweight struct llama_ubatch { - bool equal_seqs; + bool equal_seqs() const { + return b_equal_seqs != 0; + } + + uint32_t b_equal_seqs; // note: this is a boolean, but we use an int32_t for alignment + // otherwise address sanitizer complains // TODO: whole_seqs for embeddings? - uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs) - uint32_t n_seq_tokens; // tokens per sequence - uint32_t n_seqs; + uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs) + uint32_t n_seq_tokens; // tokens per sequence set + uint32_t n_seqs; // sequence sets in the ubatch + uint32_t n_seqs_unq; // unique sequence ids in the ubatch - llama_token * token; // [n_tokens] - float * embd; // [n_embd, n_tokens] - llama_pos * pos; // [n_tokens] - int32_t * n_seq_id; // [n_seqs] - llama_seq_id ** seq_id; // [n_seqs] - int8_t * output; // [n_tokens] + // seq_id_unq: unique sequence ids in the ubatch + // seq_idx: indices of the unique sequence ids in the ubatch in [0, n_seqs_unq) + // used for extracting sequence pooled embeddings + + // // size | idx | val + llama_token * token; // [n_tokens] | i | id, token + float * embd; // [n_embd, n_tokens] | i | embd + llama_pos * pos; // [n_tokens] | i | pos + int32_t * n_seq_id; // [n_tokens] | i | - + llama_seq_id ** seq_id; // [n_tokens] | s | s0, s1, seq_id + llama_seq_id * seq_id_unq; // [n_seqs_unq] | s | seq_id + int32_t * seq_idx; // [LLAMA_MAX_SEQ] | - | seq_idx + int8_t * output; // [n_tokens] | i | - + + struct data_t { + std::vector token; + std::vector embd; + std::vector pos; + std::vector n_seq_id; + std::vector seq_id; + std::vector seq_id_unq; + std::vector seq_idx; + std::vector output; + }; + + // the llama_ubatch pointers above point to this data if set. otherwise - points to non-owning data + std::shared_ptr data; }; -struct llama_sbatch_seq { - int32_t n_seq_id; +// a helper for sanitizing, fulfilling and splitting a batch +class llama_batch_allocr { +public: + llama_batch_allocr(uint32_t n_pos_per_embd); - llama_seq_id * seq_id; + // sanitize and auto-gen missing data in the input batch + // memory is optional. if provided will be used to check for sequence continuity and to determine the positions + bool init( + const llama_batch & batch_inp, + const llama_vocab & vocab, + const llama_memory_i * memory, + uint32_t n_embd, + uint32_t n_seq_max, + bool output_all); - size_t offset; - size_t length; -}; + const llama_batch & get_batch() const; -// sequence-length-aware batch splitting -struct llama_sbatch { - // tokens left in this batch - size_t n_tokens; + uint32_t get_n_tokens() const; + uint32_t get_n_outputs() const; + uint32_t get_n_used() const; - size_t n_embd; + // the array of output indices in the order they were encountered during the ubatch splitting + std::vector & get_out_ids(); - bool logits_all; // TODO: remove once lctx.logits_all is removed too + // min/max positions of each sequence in the current ubatch + llama_pos seq_pos_min(llama_seq_id seq_id) const; + llama_pos seq_pos_max(llama_seq_id seq_id) const; - // sorted indices into the batch - std::vector ids; - // batch indices of the output - std::vector out_ids; - std::vector seq; + // call once before splitting the batch to reset the internal state + void split_reset(); - const llama_batch * batch = nullptr; + // simple split, unknown number of sequence sets of unequal lengths + llama_ubatch split_simple(uint32_t n_ubatch); - // buffers for the ubatch - std::vector ubatch_token; - std::vector ubatch_embd; - std::vector ubatch_pos; - std::vector ubatch_n_seq_id; - std::vector ubatch_seq_id; - std::vector ubatch_output; + // make ubatches of equal-length sequences sets + // if sequential == true, the tokens in the ubatch will have increasing sequential sequence ids + llama_ubatch split_equal(uint32_t n_ubatch, bool sequential); - llama_ubatch reserve_ubatch(size_t n_ubatch, bool has_embd = false); + // sequence-set-wise split - each ubatch contains a single sequence-set + llama_ubatch split_seq(uint32_t n_ubatch); - void add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length); + // a helper method for creating a well-defined ubatch of tokens + // TODO: support embeddings if needed in the future + llama_ubatch ubatch_reserve(uint32_t n_seq_tokens, uint32_t n_seqs); - // simple split, unknown number of sequences of unequal lengths - llama_ubatch split_simple(size_t n_ubatch); +private: + void clear(); - // make batches of equal-length sequences - llama_ubatch split_equal(size_t n_ubatch); + // create the next ubatch based on the provided batch indices (idxs) and the number of sequence sets (n_seqs) + // return llama_ubatch.n_tokens == 0 if the entire batch was consumed + llama_ubatch ubatch_add(const std::vector & idxs, uint32_t n_seqs, bool equal_seqs); - // sequence-wise split - llama_ubatch split_seq(size_t n_ubatch); + // for debugging, start with LLAMA_BATCH_DEBUG=2 + void ubatch_print(const llama_ubatch & ubatch, int debug); - llama_sbatch() = default; - llama_sbatch(const llama_batch & batch, size_t n_embd, bool simple_split = false, bool logits_all = false); -}; + llama_batch batch; -// temporary allocate memory for the input batch if needed -struct llama_batch_allocr { - struct llama_batch batch; + // only for debugging purposes + const llama_vocab * vocab; + + // TODO: this is more of a temporary solution until we have a better way to handle multiple positions per token/embd + // ref: https://github.com/ggml-org/llama.cpp/issues/13694#issuecomment-2983871762 + const uint32_t n_pos_per_embd; + + uint32_t n_embd; + uint32_t n_seq_max; + uint32_t n_outputs; std::array seq_id_0 = { 0 }; // default sequence id + std::vector pos; std::vector n_seq_id; std::vector seq_id; - std::vector logits; + std::vector seq_id_unq; + std::vector seq_idx; + std::vector output; - // optionally fulfill the batch returned by llama_batch_get_one - llama_batch_allocr(struct llama_batch in_batch, llama_pos p0); + using pos_set_t = std::set; + using seq_cpl_t = std::vector; + + // helper flag to quickly determine if there are any coupled sequences in the batch + bool has_cpl = false; + + std::vector seq_pos; // seq_pos[s]: the set of positions in sequence s + std::vector seq_cpl; // seq_cpl[s0][s1]: if sequence s0 is coupled to sequence s1 + + using idx_vec_t = std::vector; + using seq_set_t = std::bitset; + + std::vector seq_set; // seq_set[i]: the sequence set of token i + + std::unordered_map seq_set_map; // the indices at which the sequence set appears + + // batch indices of the output + std::vector out_ids; + + uint32_t n_used; + + // used[i] indicates if token i has already been used in a previous ubatch + std::vector used; + + int debug; }; diff --git a/llama/llama.cpp/src/llama-chat.cpp b/llama/llama.cpp/src/llama-chat.cpp index d12743e6b..0a96a9a57 100644 --- a/llama/llama.cpp/src/llama-chat.cpp +++ b/llama/llama.cpp/src/llama-chat.cpp @@ -56,6 +56,7 @@ static const std::map LLM_CHAT_TEMPLATES = { { "glmedge", LLM_CHAT_TEMPLATE_GLMEDGE }, { "minicpm", LLM_CHAT_TEMPLATE_MINICPM }, { "exaone3", LLM_CHAT_TEMPLATE_EXAONE_3 }, + { "exaone4", LLM_CHAT_TEMPLATE_EXAONE_4 }, { "rwkv-world", LLM_CHAT_TEMPLATE_RWKV_WORLD }, { "granite", LLM_CHAT_TEMPLATE_GRANITE }, { "gigachat", LLM_CHAT_TEMPLATE_GIGACHAT }, @@ -64,6 +65,10 @@ static const std::map LLM_CHAT_TEMPLATES = { { "bailing", LLM_CHAT_TEMPLATE_BAILING }, { "llama4", LLM_CHAT_TEMPLATE_LLAMA4 }, { "smolvlm", LLM_CHAT_TEMPLATE_SMOLVLM }, + { "hunyuan-moe", LLM_CHAT_TEMPLATE_HUNYUAN_MOE }, + { "gpt-oss", LLM_CHAT_TEMPLATE_OPENAI_MOE }, + { "hunyuan-dense", LLM_CHAT_TEMPLATE_HUNYUAN_DENSE }, + { "kimi-k2", LLM_CHAT_TEMPLATE_KIMI_K2 }, }; llm_chat_template llm_chat_template_from_str(const std::string & name) { @@ -166,10 +171,13 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { } else if (tmpl_contains(LU8("<|Assistant|>")) && tmpl_contains(LU8("<|User|>")) && tmpl_contains(LU8("<|end▁of▁sentence|>"))) { return LLM_CHAT_TEMPLATE_DEEPSEEK_3; } else if (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]")) { + if (tmpl_contains("[|tool|]")) { + return LLM_CHAT_TEMPLATE_EXAONE_4; + } // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb // EXAONE-3.0-7.8B-Instruct return LLM_CHAT_TEMPLATE_EXAONE_3; - } else if (tmpl_contains("rwkv-world")) { + } else if (tmpl_contains("rwkv-world") || tmpl_contains("{{- 'User: ' + message['content']|trim + '\\n\\n' -}}")) { return LLM_CHAT_TEMPLATE_RWKV_WORLD; } else if (tmpl_contains("<|start_of_role|>")) { return LLM_CHAT_TEMPLATE_GRANITE; @@ -183,6 +191,16 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { return LLM_CHAT_TEMPLATE_BAILING; } else if (tmpl_contains("<|header_start|>") && tmpl_contains("<|header_end|>")) { return LLM_CHAT_TEMPLATE_LLAMA4; + } else if (tmpl_contains("<|endofuserprompt|>")) { + return LLM_CHAT_TEMPLATE_DOTS1; + } else if (tmpl_contains("<|extra_0|>") && tmpl_contains("<|extra_4|>")) { + return LLM_CHAT_TEMPLATE_HUNYUAN_MOE; + } else if (tmpl_contains("<|start|>") && tmpl_contains("<|channel|>")) { + return LLM_CHAT_TEMPLATE_OPENAI_MOE; + } else if (tmpl_contains("<|hy_Assistant|>") && tmpl_contains("<|hy_place▁holder▁no▁3|>")) { + return LLM_CHAT_TEMPLATE_HUNYUAN_DENSE; + } else if (tmpl_contains("<|im_assistant|>assistant<|im_middle|>")) { + return LLM_CHAT_TEMPLATE_KIMI_K2; } return LLM_CHAT_TEMPLATE_UNKNOWN; } @@ -331,7 +349,7 @@ int32_t llm_chat_apply_template( std::string role(message->role); if (role == "system") { // there is no system message for gemma, but we will merge it with user prompt, so nothing is broken - system_prompt = trim(message->content); + system_prompt += trim(message->content); continue; } // in gemma, "assistant" is "model" @@ -353,7 +371,7 @@ int32_t llm_chat_apply_template( std::string role(message->role); if (role == "system") { // there is no system message support, we will merge it with user prompt - system_prompt = message->content; + system_prompt += message->content; continue; } else if (role == "user") { ss << "Human: "; @@ -524,14 +542,35 @@ int32_t llm_chat_apply_template( if (add_ass) { ss << "[|assistant|]"; } - } else if (tmpl == LLM_CHAT_TEMPLATE_RWKV_WORLD) { - // this template requires the model to have "\n\n" as EOT token + } else if (tmpl == LLM_CHAT_TEMPLATE_EXAONE_4) { for (auto message : chat) { std::string role(message->role); - if (role == "user") { - ss << "User: " << message->content << "\n\nAssistant:"; - } else { - ss << message->content << "\n\n"; + if (role == "system") { + ss << "[|system|]" << trim(message->content) << "[|endofturn|]\n"; + } else if (role == "user") { + ss << "[|user|]" << trim(message->content) << "\n"; + } else if (role == "assistant") { + ss << "[|assistant|]" << trim(message->content) << "[|endofturn|]\n"; + } else if (role == "tool") { + ss << "[|tool|]" << trim(message->content) << "[|endofturn|]\n"; + } + } + if (add_ass) { + ss << "[|assistant|]"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_RWKV_WORLD) { + // this template requires the model to have "\n\n" as EOT token + for (size_t i = 0; i < chat.size(); i++) { + std::string role(chat[i]->role); + if (role == "system") { + ss << "System: " << trim(chat[i]->content) << "\n\n"; + } else if (role == "user") { + ss << "User: " << trim(chat[i]->content) << "\n\n"; + if (i == chat.size() - 1) { + ss << "Assistant:"; + } + } else if (role == "assistant") { + ss << "Assistant: " << trim(chat[i]->content) << "\n\n"; } } } else if (tmpl == LLM_CHAT_TEMPLATE_GRANITE) { @@ -586,8 +625,6 @@ int32_t llm_chat_apply_template( } else if (tmpl == LLM_CHAT_TEMPLATE_YANDEX) { // Yandex template ("\n\n" is defined as EOT token) - ss << ""; - for (size_t i = 0; i < chat.size(); i++) { std::string role(chat[i]->role); if (role == "user") { @@ -643,6 +680,78 @@ int32_t llm_chat_apply_template( if (add_ass) { ss << "Assistant:"; } + } else if (tmpl == LLM_CHAT_TEMPLATE_DOTS1) { + // dots.llm1.inst (DOTS1) + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << "<|system|>" << message->content << "<|endofsystem|>"; + } else if (role == "user") { + ss << "<|userprompt|>" << message->content << "<|endofuserprompt|>"; + } else { + ss << "<|response|>" << message->content << "<|endofresponse|>"; + } + } + if (add_ass) { + ss << "<|response|>"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_HUNYUAN_MOE) { + // tencent/Hunyuan-A13B-Instruct + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << "<|startoftext|>" << message->content << "<|extra_4|>"; + } else if (role == "assistant") { + ss << message->content << "<|eos|>"; + } else { + ss << "<|startoftext|>" << message->content << "<|extra_0|>"; + } + } + } else if (tmpl == LLM_CHAT_TEMPLATE_OPENAI_MOE) { + // OpenAI MoE (based on Harmony chat template) + for (auto message : chat) { + std::string role(message->role); + ss << "<|start|>" << role << "<|message|>" << message->content; + ss << (role == "assistant" ? "<|return|>" : "<|end|>"); + } + if (add_ass) { + ss << "<|start|>assistant"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_HUNYUAN_DENSE) { + // tencent/Hunyuan-4B-Instruct + for (size_t i = 0; i < chat.size(); i++) { + std::string role(chat[i]->role); + if (i == 0) { + if (role == "system") { + ss << chat[i]->content << "<|hy_place▁holder▁no▁3|>"; + } + } + + if (role == "assistant") { + ss << "<|hy_Assistant|>" << chat[i]->content << "<|hy_place▁holder▁no▁2|>"; + } else if (role == "user") { + ss << "<|hy_User|>" << chat[i]->content << "<|hy_Assistant|>"; + } + } + } else if (tmpl == LLM_CHAT_TEMPLATE_KIMI_K2) { + // moonshotai/Kimi-K2-Instruct + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << "<|im_system|>system<|im_middle|>"; + } else if (role == "user") { + ss << "<|im_user|>user<|im_middle|>"; + } else if (role == "assistant") { + ss << "<|im_assistant|>assistant<|im_middle|>"; + } else if (role == "tool") { + ss << "<|im_system|>tool<|im_middle|>"; + } + + ss << message->content << "<|im_end|>"; + } + if (add_ass) { + ss << "<|im_assistant|>assistant<|im_middle|>"; + } } else { // template not supported return -1; diff --git a/llama/llama.cpp/src/llama-chat.h b/llama/llama.cpp/src/llama-chat.h index db24ade21..35a943856 100644 --- a/llama/llama.cpp/src/llama-chat.h +++ b/llama/llama.cpp/src/llama-chat.h @@ -35,6 +35,7 @@ enum llm_chat_template { LLM_CHAT_TEMPLATE_GLMEDGE, LLM_CHAT_TEMPLATE_MINICPM, LLM_CHAT_TEMPLATE_EXAONE_3, + LLM_CHAT_TEMPLATE_EXAONE_4, LLM_CHAT_TEMPLATE_RWKV_WORLD, LLM_CHAT_TEMPLATE_GRANITE, LLM_CHAT_TEMPLATE_GIGACHAT, @@ -43,6 +44,11 @@ enum llm_chat_template { LLM_CHAT_TEMPLATE_BAILING, LLM_CHAT_TEMPLATE_LLAMA4, LLM_CHAT_TEMPLATE_SMOLVLM, + LLM_CHAT_TEMPLATE_DOTS1, + LLM_CHAT_TEMPLATE_HUNYUAN_MOE, + LLM_CHAT_TEMPLATE_OPENAI_MOE, + LLM_CHAT_TEMPLATE_HUNYUAN_DENSE, + LLM_CHAT_TEMPLATE_KIMI_K2, LLM_CHAT_TEMPLATE_UNKNOWN, }; diff --git a/llama/llama.cpp/src/llama-context.cpp b/llama/llama.cpp/src/llama-context.cpp index 1f3a39564..26a5cf9c3 100644 --- a/llama/llama.cpp/src/llama-context.cpp +++ b/llama/llama.cpp/src/llama-context.cpp @@ -1,14 +1,16 @@ #include "llama-context.h" #include "llama-impl.h" +#include "llama-batch.h" #include "llama-io.h" +#include "llama-memory.h" #include "llama-mmap.h" #include "llama-model.h" -#include "llama-kv-cache.h" -#include -#include #include +#include +#include +#include // // llama_context @@ -17,7 +19,8 @@ llama_context::llama_context( const llama_model & model, llama_context_params params) : - model(model) { + model(model), + balloc(std::make_unique(model.hparams.n_pos_per_embd())) { LLAMA_LOG_INFO("%s: constructing llama_context\n", __func__); t_start_us = model.t_start_us; @@ -25,7 +28,11 @@ llama_context::llama_context( const auto & hparams = model.hparams; - cparams.n_seq_max = std::max(1u, params.n_seq_max); + cparams.n_seq_max = std::max(1u, params.n_seq_max); + if (cparams.n_seq_max > LLAMA_MAX_SEQ) { + throw std::runtime_error("n_seq_max must be <= " + std::to_string(LLAMA_MAX_SEQ)); + } + cparams.n_threads = params.n_threads; cparams.n_threads_batch = params.n_threads_batch; cparams.yarn_ext_factor = params.yarn_ext_factor; @@ -91,9 +98,29 @@ llama_context::llama_context( LLAMA_LOG_WARN("%s: n_batch is less than GGML_KQ_MASK_PAD - increasing to %d\n", __func__, GGML_KQ_MASK_PAD); cparams.n_batch = GGML_KQ_MASK_PAD; } - cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch); + cparams.op_offload = params.op_offload; + cparams.kv_unified = params.kv_unified; + + { + const char * LLAMA_SET_ROWS = getenv("LLAMA_SET_ROWS"); + supports_set_rows = LLAMA_SET_ROWS ? (atoi(LLAMA_SET_ROWS) != 0) : supports_set_rows; + + if (!supports_set_rows && !cparams.kv_unified) { + LLAMA_LOG_WARN("%s: non-unified KV cache requires ggml_set_rows() - forcing unified KV cache\n", __func__); + cparams.kv_unified = true; + } + } + + { + const char * LLAMA_GRAPH_REUSE_DISABLE = getenv("LLAMA_GRAPH_REUSE_DISABLE"); + graph_reuse_disable = LLAMA_GRAPH_REUSE_DISABLE ? (atoi(LLAMA_GRAPH_REUSE_DISABLE) != 0) : graph_reuse_disable; + + if (graph_reuse_disable) { + LLAMA_LOG_WARN("%s: graph reuse disabled\n", __func__); + } + } const uint32_t n_ctx_per_seq = cparams.n_ctx / cparams.n_seq_max; @@ -104,6 +131,7 @@ llama_context::llama_context( LLAMA_LOG_INFO("%s: n_ubatch = %u\n", __func__, cparams.n_ubatch); LLAMA_LOG_INFO("%s: causal_attn = %d\n", __func__, cparams.causal_attn); LLAMA_LOG_INFO("%s: flash_attn = %d\n", __func__, cparams.flash_attn); + LLAMA_LOG_INFO("%s: kv_unified = %s\n", __func__, cparams.kv_unified ? "true" : "false"); LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base); LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale); @@ -117,6 +145,11 @@ llama_context::llama_context( __func__, n_ctx_per_seq, hparams.n_ctx_train); } + if (!params.swa_full && cparams.n_seq_max > 1 && hparams.is_swa_any()) { + LLAMA_LOG_WARN("%s: requested n_seq_max (%u) > 1, but swa_full is not enabled -- performance may be degraded: %s\n", + __func__, cparams.n_seq_max, "https://github.com/ggml-org/llama.cpp/pull/13845#issuecomment-2924800573"); + } + if (!hparams.vocab_only) { // GPU backends for (auto * dev : model.devices) { @@ -176,8 +209,9 @@ llama_context::llama_context( // init the memory module if (!hparams.vocab_only) { llama_memory_params params_mem = { - /*.type_k =*/ params.type_k, - /*.type_v =*/ params.type_v, + /*.type_k =*/ params.type_k, + /*.type_v =*/ params.type_v, + /*.swa_full =*/ params.swa_full, }; memory.reset(model.create_memory(params_mem, cparams)); @@ -213,8 +247,8 @@ llama_context::llama_context( LLAMA_LOG_DEBUG("%s: max_nodes = %zu\n", __func__, max_nodes); - // buffer used to store the computation graph and the tensor meta data - buf_compute_meta.resize(ggml_tensor_overhead()*max_nodes + ggml_graph_overhead_custom(max_nodes, false)); + gf_res_prev.reset(new llm_graph_result(max_nodes)); + gf_res_reserve.reset(new llm_graph_result(max_nodes)); // TODO: move these checks to ggml_backend_sched // enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary @@ -253,15 +287,9 @@ llama_context::llama_context( // reserve worst-case graph if (!hparams.vocab_only && memory) { - const uint32_t n_seqs = 1; // TODO: worst-case number of sequences + const uint32_t n_seqs = cparams.kv_unified ? 1 : cparams.n_seq_max; const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); - llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph - - // restore later - // TODO: something cleaner - const auto n_outputs_save = n_outputs; - LLAMA_LOG_DEBUG("%s: worst-case: n_tokens = %d, n_seqs = %d, n_outputs = %d\n", __func__, n_tokens, n_seqs, n_outputs); int n_splits_pp = -1; @@ -271,25 +299,18 @@ llama_context::llama_context( int n_nodes_tg = -1; // simulate full KV cache - llama_kv_cache * kv_self = static_cast(memory.get()); - kv_self->set_full(); + const auto mctx = memory->init_full(); + if (!mctx) { + throw std::runtime_error("failed to initialize KV cache"); + } cross.v_embd.clear(); - // reserve pp graph first so that buffers are only allocated once + // reserve pp (prompt processing) graph first so that buffers are only allocated once { - llama_ubatch ubatch_pp = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; - - // max number of outputs - n_outputs = ubatch_pp.n_tokens; - - LLAMA_LOG_DEBUG("%s: reserving graph for n_tokens = %d, n_seqs = %d\n", __func__, ubatch_pp.n_tokens, ubatch_pp.n_seqs); - - auto * gf = graph_init(); - graph_build(ctx_compute.get(), gf, ubatch_pp, LLM_GRAPH_TYPE_DEFAULT); - - if (!ggml_backend_sched_reserve(sched.get(), gf)) { + auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get()); + if (!gf) { throw std::runtime_error("failed to allocate compute pp buffers"); } @@ -297,18 +318,10 @@ llama_context::llama_context( n_nodes_pp = ggml_graph_n_nodes(gf); } - // reserve with tg graph to get the number of splits and nodes + // reserve with tg (token generation) graph to get the number of splits and nodes { - llama_ubatch ubatch_tg = { true, 1, 1, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; - - n_outputs = ubatch_tg.n_tokens; - - LLAMA_LOG_DEBUG("%s: reserving graph for n_tokens = %d, n_seqs = %d\n", __func__, ubatch_tg.n_tokens, ubatch_tg.n_seqs); - - auto * gf = graph_init(); - graph_build(ctx_compute.get(), gf, ubatch_tg, LLM_GRAPH_TYPE_DEFAULT); - - if (!ggml_backend_sched_reserve(sched.get(), gf)) { + auto * gf = graph_reserve(n_seqs, n_seqs, n_seqs, mctx.get()); + if (!gf) { throw std::runtime_error("failed to allocate compute tg buffers"); } @@ -318,22 +331,16 @@ llama_context::llama_context( // reserve again with pp graph to avoid ggml-alloc reallocations during inference { - llama_ubatch ubatch_pp = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; - - n_outputs = ubatch_pp.n_tokens; - - LLAMA_LOG_DEBUG("%s: reserving graph for n_tokens = %d, n_seqs = %d\n", __func__, ubatch_pp.n_tokens, ubatch_pp.n_seqs); - - auto * gf = graph_init(); - graph_build(ctx_compute.get(), gf, ubatch_pp, LLM_GRAPH_TYPE_DEFAULT); - - if (!ggml_backend_sched_reserve(sched.get(), gf)) { + // TODO: not sure if the following graph would be worster case for multi-stream KV caches: + // + // auto * gf = graph_reserve(n_tokens, 1, n_tokens, mctx.get()); + // + auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get()); + if (!gf) { throw std::runtime_error("failed to allocate compute pp buffers"); } } - n_outputs = n_outputs_save; - for (size_t i = 0; i < backend_ptrs.size(); ++i) { ggml_backend_t backend = backend_ptrs[i]; ggml_backend_buffer_type_t buft = backend_buft[i]; @@ -405,10 +412,6 @@ ggml_backend_sched_t llama_context::get_sched() const { return sched.get(); } -ggml_context * llama_context::get_ctx_compute() const { - return ctx_compute.get(); -} - uint32_t llama_context::n_ctx() const { return cparams.n_ctx; } @@ -437,46 +440,76 @@ uint32_t llama_context::n_threads_batch() const { return cparams.n_threads_batch; } -llama_kv_cache * llama_context::get_kv_self() { - llama_kv_cache * kv_self = static_cast(memory.get()); - return kv_self; +llama_memory_t llama_context::get_memory() const { + return memory.get(); } -const llama_kv_cache * llama_context::get_kv_self() const { - llama_kv_cache * kv_self = static_cast(memory.get()); - return kv_self; +// deprecated +void llama_context::kv_self_defrag_sched() { + if (!memory) { + return; + } + + memory_force_optimize = true; } -void llama_context::kv_self_update() { - bool need_reserve = false; +// deprecated +bool llama_context::kv_self_update(bool optimize) { + if (!memory) { + return false; + } - llama_kv_cache * kv_self = static_cast(memory.get()); + { + // TODO: remove in the future + optimize |= memory_force_optimize; + memory_force_optimize = false; - need_reserve = kv_self->update(*this); + const auto mctx = memory->init_update(this, optimize); + switch (mctx->get_status()) { + case LLAMA_MEMORY_STATUS_SUCCESS: + { + // noop + } break; + case LLAMA_MEMORY_STATUS_NO_UPDATE: + { + // no updates need to be performed + return false; + } + case LLAMA_MEMORY_STATUS_FAILED_PREPARE: + case LLAMA_MEMORY_STATUS_FAILED_COMPUTE: + { + LLAMA_LOG_ERROR("%s: failed to prepare memory update\n", __func__); + return false; + } + } - // reserve a worst case graph if needed - if (need_reserve) { - LLAMA_LOG_DEBUG("%s: reserving a worst case graph\n", __func__); + // reset the previous graph result to make sure that it won't be reused + // TODO: change the mctx->apply() to return information if a graph reserve is needed + // reset the graph result only if the memory module did reset the scheduler + gf_res_prev->reset(); - // build worst-case graph - uint32_t n_seqs = 1; // TODO: worst-case number of sequences - uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); - - // simulate full KV cache - kv_self->set_full(); - - llama_token token = model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph - llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr}; - - auto * gf = graph_init(); - graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT); - - // initialize scheduler with the worst-case graph - ggml_backend_sched_reset(sched.get()); - if (!ggml_backend_sched_reserve(sched.get(), gf)) { - LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__); + if (!mctx->apply()) { + LLAMA_LOG_ERROR("%s: failed to apply memory update\n", __func__); } } + + // if the memory module did any computation, we have to reserve a new worst-case graph + { + const auto mctx = memory->init_full(); + if (!mctx) { + throw std::runtime_error("failed to initialize memory context"); + } + + const uint32_t n_seqs = cparams.kv_unified ? 1 : cparams.n_seq_max; + const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); + + auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get()); + if (!gf) { + LLAMA_LOG_ERROR("%s: failed to reserve graph after the memory update\n", __func__); + } + } + + return true; } enum llama_pooling_type llama_context::pooling_type() const { @@ -484,11 +517,15 @@ enum llama_pooling_type llama_context::pooling_type() const { } float * llama_context::get_logits() { + output_reorder(); + return logits; } float * llama_context::get_logits_ith(int32_t i) { - int32_t j = -1; + int64_t j = -1; + + output_reorder(); try { if (logits == nullptr) { @@ -511,7 +548,7 @@ float * llama_context::get_logits_ith(int32_t i) { } if (j >= n_outputs) { // This should not happen - throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, n_outputs)); + throw std::runtime_error(format("corrupt output buffer (j=%" PRId64 ", n_outputs=%d)", j, n_outputs)); } return logits + j*model.vocab.n_tokens(); @@ -526,11 +563,15 @@ float * llama_context::get_logits_ith(int32_t i) { } float * llama_context::get_embeddings() { + output_reorder(); + return embd; } float * llama_context::get_embeddings_ith(int32_t i) { - int32_t j = -1; + int64_t j = -1; + + output_reorder(); try { if (embd == nullptr) { @@ -553,7 +594,7 @@ float * llama_context::get_embeddings_ith(int32_t i) { } if (j >= n_outputs) { // This should not happen - throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, n_outputs)); + throw std::runtime_error(format("corrupt output buffer (j=%" PRId64 ", n_outputs=%d)", j, n_outputs)); } return embd + j*model.hparams.n_embd; @@ -670,66 +711,119 @@ bool llama_context::apply_adapter_cvec( return cvec.apply(model, data, len, n_embd, il_start, il_end); } -int llama_context::encode(llama_batch & inp_batch) { - if (inp_batch.n_tokens == 0) { +llm_graph_result * llama_context::process_ubatch(const llama_ubatch & ubatch, llm_graph_type gtype, llama_memory_context_i * mctx, ggml_status & ret) { + if (mctx && !mctx->apply()) { + LLAMA_LOG_ERROR("%s: failed to apply memory context\n", __func__); + ret = GGML_STATUS_FAILED; + return nullptr; + } + + auto * res = gf_res_prev.get(); + auto * gf = res->get_gf(); + + // the new graph parameters + // in order to correctly reuse a graph, it's full topology has to be uniquely determined by these parameters + const auto gparams = graph_params(res, ubatch, mctx, gtype); + + if (!graph_reuse_disable && res->can_reuse(gparams)) { + //LLAMA_LOG_DEBUG("%s: reusing previous graph\n", __func__); + + n_reused++; + } else { + res->reset(); + + ggml_backend_sched_reset(sched.get()); + ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data); + + //const auto t_start_us = ggml_time_us(); + + gf = model.build_graph(gparams); + + //LLAMA_LOG_INFO("graph build time: %.3f ms\n", (ggml_time_us() - t_start_us)/1000.0); + + if (!gf) { + LLAMA_LOG_ERROR("%s: failed to initialize graph\n", __func__); + ret = GGML_STATUS_FAILED; + return nullptr; + } + + if (!ggml_backend_sched_alloc_graph(sched.get(), gf)) { + LLAMA_LOG_ERROR("%s: failed to allocate graph\n", __func__); + ret = GGML_STATUS_ALLOC_FAILED; + return nullptr; + } + } + + // set the input data for the input tensors + { + //const auto t_start_us = ggml_time_us(); + + res->set_inputs(&ubatch); + + //LLAMA_LOG_INFO("graph set inputs time: %.3f ms\n", (ggml_time_us() - t_start_us)/1000.0); + } + + const auto status = graph_compute(res->get_gf(), ubatch.n_tokens > 1); + if (status != GGML_STATUS_SUCCESS) { + LLAMA_LOG_ERROR("%s: failed to compute graph, compute status: %d\n", __func__, status); + ret = status; + return nullptr; + } + + ret = GGML_STATUS_SUCCESS; + + return res; +} + +int llama_context::encode(const llama_batch & batch_inp) { + GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT + + if (batch_inp.n_tokens == 0) { LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); return -1; } - // temporary allocate memory for the input batch if needed - // note: during encode, we always pass the full sequence starting from pos = 0 - llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : 0); - - const llama_batch & batch = batch_allocr.batch; - const int32_t n_tokens = batch.n_tokens; - const auto & hparams = model.hparams; - GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT + const int64_t n_embd = hparams.n_embd; + const int64_t n_vocab = model.vocab.n_tokens(); - if (batch.token) { - for (int32_t i = 0; i < n_tokens; ++i) { - if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= model.vocab.n_tokens()) { - LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]); - return -1; - } - } + // note: during encode, we always pass the full sequence starting from pos = 0 + if (!balloc->init(batch_inp, model.vocab, nullptr, n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, true)) { + LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); + return -1; } + const uint32_t n_tokens = balloc->get_n_tokens(); + + // [TAG_NO_CACHE_PAD] + // TODO: add new split mode where we pad the input sequences so that ubatch.equal_seqs == true + const llama_ubatch ubatch = balloc->split_simple(n_tokens); + // micro-batching is not possible for non-causal encoding, so we process the batch in a single shot - GGML_ASSERT(cparams.n_ubatch >= (uint32_t) n_tokens && "encoder requires n_ubatch >= n_tokens"); + GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens"); if (t_compute_start_us == 0) { t_compute_start_us = ggml_time_us(); } + // TODO: this clear of the buffer can easily be forgotten - need something better embd_seq.clear(); n_queued_tokens += n_tokens; - const int64_t n_embd = hparams.n_embd; - - llama_sbatch sbatch = llama_sbatch(batch, n_embd, /* simple_split */ true, /* logits_all */ true); - - const llama_ubatch ubatch = sbatch.split_simple(n_tokens); - // reserve output buffer if (output_reserve(n_tokens) < n_tokens) { LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens); return -2; }; - for (int32_t i = 0; i < n_tokens; ++i) { + for (uint32_t i = 0; i < n_tokens; ++i) { output_ids[i] = i; } n_outputs = n_tokens; - //batch_manager->prepare(ubatch); - - ggml_backend_sched_reset(sched.get()); - ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data); - const auto causal_attn_org = cparams.causal_attn; // always use non-causal attention for encoder graphs @@ -737,32 +831,34 @@ int llama_context::encode(llama_batch & inp_batch) { // ref: https://github.com/ggml-org/llama.cpp/pull/12181#issuecomment-2730451223 cparams.causal_attn = false; - auto * gf = graph_init(); - auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_ENCODER); - - ggml_backend_sched_alloc_graph(sched.get(), gf); - - res->set_inputs(&ubatch); + ggml_status status; + const auto * res = process_ubatch(ubatch, LLM_GRAPH_TYPE_ENCODER, nullptr, status); cparams.causal_attn = causal_attn_org; - const auto compute_status = graph_compute(gf, n_tokens > 1); - switch (compute_status) { - case GGML_STATUS_SUCCESS: - break; - case GGML_STATUS_ABORTED: - return 2; - case GGML_STATUS_ALLOC_FAILED: - return -2; - case GGML_STATUS_FAILED: - default: - return -3; + if (!res) { + switch (status) { + case GGML_STATUS_ABORTED: return 2; + case GGML_STATUS_ALLOC_FAILED: return -2; + case GGML_STATUS_FAILED: return -3; + case GGML_STATUS_SUCCESS: GGML_ABORT("should not happen"); + } } + auto * t_logits = res->get_logits(); auto * t_embd = res->get_embd_pooled() ? res->get_embd_pooled() : res->get_embd(); + // extract logits + if (logits && t_logits) { + ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(sched.get(), t_logits); + GGML_ASSERT(backend_res != nullptr); + GGML_ASSERT(logits != nullptr); + + ggml_backend_tensor_get_async(backend_res, t_logits, logits, 0, n_tokens*n_vocab*sizeof(float)); + } + // extract embeddings - if (t_embd) { + if (embd && t_embd) { ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(sched.get(), t_embd); GGML_ASSERT(backend_embd != nullptr); @@ -781,31 +877,28 @@ int llama_context::encode(llama_batch & inp_batch) { { // extract sequence embeddings auto & embd_seq_out = embd_seq; - embd_seq_out.clear(); - GGML_ASSERT(!ubatch.equal_seqs); // TODO: handle equal splits + for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { + const llama_seq_id seq_id = ubatch.seq_id_unq[s]; + const int32_t seq_idx = ubatch.seq_idx[seq_id]; - for (int32_t i = 0; i < n_tokens; i++) { - const llama_seq_id seq_id = ubatch.seq_id[i][0]; - if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { - continue; - } embd_seq_out[seq_id].resize(n_embd); - ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float)); + ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float)); } } break; case LLAMA_POOLING_TYPE_RANK: { - // extract the rerank score - a single float per sequence + // extract the rerank score - n_cls_out floats per sequence auto & embd_seq_out = embd_seq; - for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; - if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { - continue; - } - embd_seq_out[seq_id].resize(1); - ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (seq_id)*sizeof(float), sizeof(float)); + const uint32_t n_cls_out = hparams.n_cls_out; + + for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { + const llama_seq_id seq_id = ubatch.seq_id_unq[s]; + const int32_t seq_idx = ubatch.seq_idx[seq_id]; + + embd_seq_out[seq_id].resize(n_cls_out); + ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float)); } } break; case LLAMA_POOLING_TYPE_UNSPECIFIED: @@ -815,9 +908,11 @@ int llama_context::encode(llama_batch & inp_batch) { } } - // Reset state for the next token before backend sync, to allow the CPU activities in the reset to - // overlap with device computation. - ggml_backend_sched_reset(sched.get()); + if (!supports_set_rows) { + // Reset state for the next token before backend sync, to allow the CPU activities in the reset to + // overlap with device computation. + ggml_backend_sched_reset(sched.get()); + } // TODO: hacky solution if (model.arch == LLM_ARCH_T5 && t_embd) { @@ -830,12 +925,16 @@ int llama_context::encode(llama_batch & inp_batch) { cross.v_embd.resize(cross.n_embd*cross.n_enc); memcpy(cross.v_embd.data(), embd, ggml_nbytes(t_embd)); + const auto & batch = balloc->get_batch(); + // remember the sequence ids used during the encoding - needed for cross attention later cross.seq_ids_enc.resize(n_tokens); - for (int32_t i = 0; i < n_tokens; i++) { + for (uint32_t i = 0; i < n_tokens; i++) { cross.seq_ids_enc[i].clear(); - for (int s = 0; s < ubatch.n_seq_id[i]; s++) { - llama_seq_id seq_id = ubatch.seq_id[i][s]; + + for (int s = 0; s < batch.n_seq_id[i]; s++) { + const llama_seq_id seq_id = batch.seq_id[i][s]; + cross.seq_ids_enc[i].insert(seq_id); } } @@ -844,43 +943,42 @@ int llama_context::encode(llama_batch & inp_batch) { return 0; } -int llama_context::decode(llama_batch & inp_batch) { +int llama_context::decode(const llama_batch & batch_inp) { + GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT + if (!memory) { - LLAMA_LOG_WARN("%s: cannot decode batches with this context (use llama_encode() instead)\n", __func__); - return encode(inp_batch); + LLAMA_LOG_DEBUG("%s: cannot decode batches with this context (calling encode() instead)\n", __func__); + return encode(batch_inp); } - if (inp_batch.n_tokens == 0) { + if (batch_inp.n_tokens == 0) { LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); return -1; } - llama_kv_cache * kv_self = static_cast(memory.get()); - - // temporary allocate memory for the input batch if needed - // TODO: this is incorrect for multiple sequences because get_pos_max() is the maximum across all sequences - llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : kv_self->get_pos_max() + 1); - - const llama_batch & batch = batch_allocr.batch; - const auto & vocab = model.vocab; const auto & hparams = model.hparams; - const int32_t n_vocab = vocab.n_tokens(); + const int64_t n_vocab = vocab.n_tokens(); + const int64_t n_embd = hparams.n_embd; - const int64_t n_tokens_all = batch.n_tokens; - const int64_t n_embd = hparams.n_embd; + // when computing embeddings, all tokens are output + const bool output_all = cparams.embeddings; - llama_kv_cache_guard kv_guard(kv_self); + if (!balloc->init(batch_inp, vocab, memory.get(), n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, output_all)) { + LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); + return -1; + } - GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT + const uint32_t n_tokens_all = balloc->get_n_tokens(); + const uint32_t n_outputs_all = balloc->get_n_outputs(); - if (batch.token) { - for (int64_t i = 0; i < n_tokens_all; ++i) { - if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= model.vocab.n_tokens()) { - LLAMA_LOG_ERROR("%s: invalid token[%" PRId64 "] = %d\n", __func__, i, batch.token[i]); - throw std::runtime_error("invalid token"); - } + if (output_all) { + // require that all tokens are output + if (n_outputs_all != n_tokens_all) { + LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %d, n_tokens_all = %d)\n", + __func__, n_outputs_all, n_tokens_all); + return -1; } } @@ -893,49 +991,78 @@ int llama_context::decode(llama_batch & inp_batch) { } n_queued_tokens += n_tokens_all; - // this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens - const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; - + // TODO: this clear of the buffer can easily be forgotten - need something better embd_seq.clear(); + output_swaps.clear(); - int64_t n_outputs_all = 0; + bool did_optimize = false; - // count outputs - if (batch.logits) { - for (uint32_t i = 0; i < n_tokens_all; ++i) { - n_outputs_all += batch.logits[i] != 0; + // handle any pending defrags/shifts + kv_self_update(false); + + llama_memory_context_ptr mctx; + + while (true) { + mctx = memory->init_batch(*balloc, cparams.n_ubatch, output_all); + if (!mctx) { + return -2; } - } else if (embd_pooled) { - n_outputs_all = n_tokens_all; - } else { - // keep last output only - n_outputs_all = 1; - } - llama_sbatch sbatch = kv_self->sbatch_init(batch, /* logits_all */ n_outputs_all == n_tokens_all); + switch (mctx->get_status()) { + case LLAMA_MEMORY_STATUS_SUCCESS: + { + } break; + case LLAMA_MEMORY_STATUS_NO_UPDATE: + { + LLAMA_LOG_ERROR("%s: unexpected memory context status: %d\n", __func__, mctx->get_status()); + + return -2; + } + case LLAMA_MEMORY_STATUS_FAILED_PREPARE: + { + if (!did_optimize) { + did_optimize = true; + + if (kv_self_update(true)) { + LLAMA_LOG_DEBUG("%s: retrying batch size %d after cache optimization\n", __func__, balloc->get_n_tokens()); + + continue; + } + } + + LLAMA_LOG_WARN("%s: failed to find a memory slot for batch of size %d\n", __func__, balloc->get_n_tokens()); + + return 1; + } + case LLAMA_MEMORY_STATUS_FAILED_COMPUTE: + { + LLAMA_LOG_ERROR("%s: compute failed while preparing batch of size %d\n", __func__, balloc->get_n_tokens()); + + return -2; + } + } + + break; + } // reserve output buffer if (output_reserve(n_outputs_all) < n_outputs_all) { - LLAMA_LOG_ERROR("%s: could not reserve space for batch with %" PRId64 " outputs\n", __func__, n_outputs_all); + LLAMA_LOG_ERROR("%s: could not reserve space for batch with %d outputs\n", __func__, n_outputs_all); return -2; }; - // handle any pending defrags/shifts - kv_self_update(); - int64_t n_outputs_prev = 0; - while (sbatch.n_tokens > 0) { - llama_ubatch ubatch = kv_self->ubatch_next(sbatch, cparams.n_ubatch, embd_pooled); + do { + const auto & ubatch = mctx->get_ubatch(); - // count the outputs in this u_batch + // count the outputs in this ubatch { int32_t n_outputs_new = 0; if (n_outputs_all == n_tokens_all) { n_outputs_new = ubatch.n_tokens; } else { - GGML_ASSERT(ubatch.output); for (uint32_t i = 0; i < ubatch.n_tokens; i++) { n_outputs_new += (int32_t) (ubatch.output[i] != 0); } @@ -945,38 +1072,37 @@ int llama_context::decode(llama_batch & inp_batch) { n_outputs = n_outputs_new; } - // find KV slot - if (!kv_self->find_slot(ubatch)) { - kv_self->defrag_sched(-1.0f); - kv_self->update(*this); - if (!kv_self->find_slot(ubatch)) { - LLAMA_LOG_WARN("%s: failed to find KV cache slot for ubatch of size %d\n", __func__, ubatch.n_tokens); - return 1; + ggml_status status; + const auto * res = process_ubatch(ubatch, LLM_GRAPH_TYPE_DECODER, mctx.get(), status); + + if (!res) { + // the last ubatch failed or was aborted -> remove all positions of that ubatch from the KV cache + llama_pos pos_min[LLAMA_MAX_SEQ]; + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { + pos_min[s] = std::numeric_limits::max(); } - } - ggml_backend_sched_reset(sched.get()); - ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data); + for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { + const auto & seq_id = ubatch.seq_id[i][0]; - auto * gf = graph_init(); - auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DECODER); + pos_min[seq_id] = std::min(pos_min[seq_id], ubatch.pos[i]); + } - // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { + if (pos_min[s] == std::numeric_limits::max()) { + continue; + } - ggml_backend_sched_alloc_graph(sched.get(), gf); + LLAMA_LOG_WARN("%s: removing KV cache entries for seq_id = %d, pos = [%d, +inf)\n", __func__, s, pos_min[s]); - res->set_inputs(&ubatch); + memory->seq_rm(s, pos_min[s], -1); + } - const auto compute_status = graph_compute(gf, ubatch.n_tokens > 1); - if (compute_status != GGML_STATUS_SUCCESS) { - switch (compute_status) { - case GGML_STATUS_ABORTED: - return 2; - case GGML_STATUS_ALLOC_FAILED: - return -2; - case GGML_STATUS_FAILED: - default: - return -3; + switch (status) { + case GGML_STATUS_ABORTED: return 2; + case GGML_STATUS_ALLOC_FAILED: return -2; + case GGML_STATUS_FAILED: return -3; + case GGML_STATUS_SUCCESS: GGML_ABORT("should not happen"); } } @@ -985,7 +1111,7 @@ int llama_context::decode(llama_batch & inp_batch) { // ggml_graph_dump_dot(gf, NULL, "llama.dot"); //} - auto * t_logits = cparams.causal_attn ? res->get_logits() : nullptr; + auto * t_logits = res->get_logits(); auto * t_embd = cparams.embeddings ? res->get_embd() : nullptr; if (t_embd && res->get_embd_pooled()) { @@ -1032,27 +1158,27 @@ int llama_context::decode(llama_batch & inp_batch) { // extract sequence embeddings (cleared before processing each batch) auto & embd_seq_out = embd_seq; - for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; - if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { - continue; - } + for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { + const llama_seq_id seq_id = ubatch.seq_id_unq[s]; + const int32_t seq_idx = ubatch.seq_idx[seq_id]; + embd_seq_out[seq_id].resize(n_embd); - ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float)); + ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float)); } } break; case LLAMA_POOLING_TYPE_RANK: { - // extract the rerank score - a single float per sequence + // extract the rerank score - n_cls_out floats per sequence auto & embd_seq_out = embd_seq; - for (uint32_t s = 0; s < ubatch.n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; - if (embd_seq_out.find(seq_id) != embd_seq_out.end()) { - continue; - } - embd_seq_out[seq_id].resize(1); - ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (seq_id)*sizeof(float), sizeof(float)); + const uint32_t n_cls_out = hparams.n_cls_out; + + for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { + const llama_seq_id seq_id = ubatch.seq_id_unq[s]; + const int32_t seq_idx = ubatch.seq_idx[seq_id]; + + embd_seq_out[seq_id].resize(n_cls_out); + ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float)); } } break; case LLAMA_POOLING_TYPE_UNSPECIFIED: @@ -1063,23 +1189,20 @@ int llama_context::decode(llama_batch & inp_batch) { } n_outputs_prev += n_outputs; - } - - // finalize the batch processing - kv_guard.commit(); + } while (mctx->next()); // set to total number of outputs in the batch, for use in llama_get_logits_ith n_outputs = n_outputs_all; // set output mappings - { + if (n_outputs > 0) { bool sorted_output = true; - auto & out_ids = sbatch.out_ids; + auto & out_ids = balloc->get_out_ids(); - GGML_ASSERT(out_ids.size() == (size_t) n_outputs_all); + GGML_ASSERT(out_ids.size() == (size_t) n_outputs); - for (int64_t i = 0; i < n_outputs_all; ++i) { + for (int64_t i = 0; i < n_outputs; ++i) { int64_t out_id = out_ids[i]; output_ids[out_id] = i; if (out_id != i) { @@ -1090,35 +1213,29 @@ int llama_context::decode(llama_batch & inp_batch) { // make the outputs have the same order they had in the user-provided batch // note: this is mostly relevant for recurrent models atm if (!sorted_output) { - const uint32_t n_vocab = model.vocab.n_tokens(); - const uint32_t n_embd = model.hparams.n_embd; - GGML_ASSERT((size_t) n_outputs == out_ids.size()); // TODO: is there something more efficient which also minimizes swaps? // selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort) - for (int32_t i = 0; i < n_outputs - 1; ++i) { - int32_t j_min = i; - for (int32_t j = i + 1; j < n_outputs; ++j) { + for (uint32_t i = 0; i < n_outputs - 1; ++i) { + uint32_t j_min = i; + for (uint32_t j = i + 1; j < n_outputs; ++j) { if (out_ids[j] < out_ids[j_min]) { j_min = j; } } - if (j_min == i) { continue; } + if (j_min == i) { + continue; + } std::swap(out_ids[i], out_ids[j_min]); - if (logits_size > 0) { - for (uint32_t k = 0; k < n_vocab; k++) { - std::swap(logits[i*n_vocab + k], logits[j_min*n_vocab + k]); - } - } - if (embd_size > 0) { - for (uint32_t k = 0; k < n_embd; k++) { - std::swap(embd[i*n_embd + k], embd[j_min*n_embd + k]); - } - } + + // remember the swaps and apply them lazily upon logits/embeddings access + output_swaps.push_back({ i, j_min }); } + std::fill(output_ids.begin(), output_ids.end(), -1); - for (int32_t i = 0; i < n_outputs; ++i) { + + for (uint32_t i = 0; i < n_outputs; ++i) { output_ids[out_ids[i]] = i; } } @@ -1127,15 +1244,12 @@ int llama_context::decode(llama_batch & inp_batch) { // wait for the computation to finish (automatically done when obtaining the model output) //synchronize(); - // decide if we need to defrag the kv cache - if (cparams.defrag_thold > 0.0f) { - kv_self->defrag_sched(cparams.defrag_thold); + if (!supports_set_rows) { + // Reset state for the next token before backend sync, to allow the CPU activities in the reset to + // overlap with device computation. + ggml_backend_sched_reset(sched.get()); } - // Reset state for the next token before backend sync, to allow the CPU activities in the reset to - // overlap with device computation. - ggml_backend_sched_reset(sched.get()); - return 0; } @@ -1143,7 +1257,7 @@ int llama_context::decode(llama_batch & inp_batch) { // output // -int32_t llama_context::output_reserve(int32_t n_outputs) { +uint32_t llama_context::output_reserve(int32_t n_outputs) { const auto & hparams = model.hparams; const auto & vocab = model.vocab; @@ -1153,9 +1267,8 @@ int32_t llama_context::output_reserve(int32_t n_outputs) { const auto n_vocab = vocab.n_tokens(); const auto n_embd = hparams.n_embd; - // TODO: use a per-batch flag for logits presence instead - bool has_logits = cparams.causal_attn; - bool has_embd = cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE); + bool has_logits = true; + bool has_embd = cparams.embeddings; // TODO: hacky enc-dec support if (model.arch == LLM_ARCH_T5) { @@ -1209,53 +1322,111 @@ int32_t llama_context::output_reserve(int32_t n_outputs) { // set all ids as invalid (negative) std::fill(output_ids.begin(), output_ids.end(), -1); - this->n_outputs = 0; - this->n_outputs_max = n_outputs_max; + this->n_outputs = 0; return n_outputs_max; } +void llama_context::output_reorder() { + const uint64_t n_vocab = model.vocab.n_tokens(); + const uint64_t n_embd = model.hparams.n_embd; + + for (size_t s = 0; s < output_swaps.size(); ++s) { + const uint64_t i0 = output_swaps[s].i0; + const uint64_t i1 = output_swaps[s].i1; + + if (logits_size > 0) { + for (uint64_t k = 0; k < n_vocab; k++) { + std::swap(logits[i0*n_vocab + k], logits[i1*n_vocab + k]); + } + } + + if (embd_size > 0) { + for (uint64_t k = 0; k < n_embd; k++) { + std::swap(embd[i0*n_embd + k], embd[i1*n_embd + k]); + } + } + } + + output_swaps.clear(); +} + // // graph // -int32_t llama_context::graph_max_nodes() const { - return std::max(65536, 5*model.n_tensors()); +uint32_t llama_context::graph_max_nodes() const { + return std::max(1024u, 8u*model.n_tensors()); } -ggml_cgraph * llama_context::graph_init() { - ggml_init_params params = { - /*.mem_size =*/ buf_compute_meta.size(), - /*.mem_buffer =*/ buf_compute_meta.data(), - /*.no_alloc =*/ true, +llm_graph_result * llama_context::get_gf_res_reserve() const { + return static_cast(gf_res_reserve.get()); +} + +ggml_cgraph * llama_context::graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx) { + LLAMA_LOG_DEBUG("%s: reserving a graph for ubatch with n_tokens = %4u, n_seqs = %2u, n_outputs = %4u\n", __func__, n_tokens, n_seqs, n_outputs); + + if (n_tokens % n_seqs != 0) { + n_tokens = ((n_tokens + (n_seqs - 1)) / n_seqs) * n_seqs; // round to next multiple of n_seqs + n_outputs = std::min(n_outputs, n_tokens); + + LLAMA_LOG_DEBUG("%s: making n_tokens a multiple of n_seqs - n_tokens = %u, n_seqs = %u, n_outputs = %u\n", __func__, n_tokens, n_seqs, n_outputs); + } + + ggml_backend_sched_reset(sched.get()); + + // when the scheduler is reset, we cannnot reuse the old graph, so we reset the previous graph result to prevent that + gf_res_prev->reset(); + + // store the n_outputs as it is, and restore it afterwards + // TODO: not sure if needed, might simplify in the future by removing this + const auto save_n_outputs = this->n_outputs; + + this->n_outputs = n_outputs; + + llama_batch_allocr balloc(model.hparams.n_pos_per_embd()); + llama_ubatch ubatch = balloc.ubatch_reserve(n_tokens/n_seqs, n_seqs); + + auto * res = gf_res_reserve.get(); + + const auto gparams = graph_params(res, ubatch, mctx, LLM_GRAPH_TYPE_DEFAULT); + + res->reset(); + + auto * gf = model.build_graph(gparams); + + this->n_outputs = save_n_outputs; + + // initialize scheduler with the specified graph + if (!ggml_backend_sched_reserve(sched.get(), gf)) { + LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__); + return nullptr; + } + + return gf; +} + +llm_graph_params llama_context::graph_params( + llm_graph_result * res, + const llama_ubatch & ubatch, + const llama_memory_context_i * mctx, + llm_graph_type gtype) const { + return { + /*.arch =*/ model.arch, + /*.hparams =*/ model.hparams, + /*.cparams =*/ cparams, + /*.ubatch =*/ ubatch, + /*.gtype =*/ gtype, + /*.sched =*/ sched.get(), + /*.backend_cpu =*/ backend_cpu, + /*.cvec =*/ &cvec, + /*.loras =*/ &loras, + /*.mctx =*/ mctx, + /*.cross =*/ &cross, + /*.n_outputs =*/ n_outputs, + /*.cb =*/ graph_get_cb(), + /*.res =*/ res, }; - - ctx_compute.reset(ggml_init(params)); - - return ggml_new_graph_custom(ctx_compute.get(), graph_max_nodes(), false); -} - -llm_graph_result_ptr llama_context::graph_build( - ggml_context * ctx, - ggml_cgraph * gf, - const llama_ubatch & ubatch, - llm_graph_type gtype) { - return model.build_graph( - { - /*.ctx =*/ ctx, - /*.arch =*/ model.arch, - /*.hparams =*/ model.hparams, - /*.cparams =*/ cparams, - /*.ubatch =*/ ubatch, - /*.sched =*/ sched.get(), - /*.backend_cpu =*/ backend_cpu, - /*.cvec =*/ &cvec, - /*.loras =*/ &loras, - /*.memory =*/ memory.get(), - /*.cross =*/ &cross, - /*.n_outputs =*/ n_outputs, - /*.cb =*/ graph_get_cb(), - }, gf, gtype); } ggml_status llama_context::graph_compute( @@ -1660,14 +1831,12 @@ size_t llama_context::state_write_data(llama_io_write_i & io) { std::vector w_output_pos; - GGML_ASSERT(n_outputs <= n_outputs_max); - w_output_pos.resize(n_outputs); // build a more compact representation of the output ids for (size_t i = 0; i < n_batch(); ++i) { // map an output id to a position in the batch - int32_t pos = output_ids[i]; + int64_t pos = output_ids[i]; if (pos >= 0) { GGML_ASSERT(pos < n_outputs); w_output_pos[pos] = i; @@ -1707,10 +1876,10 @@ size_t llama_context::state_write_data(llama_io_write_i & io) { } } - LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__); - llama_kv_cache * kv_self = static_cast(memory.get()); - - kv_self->state_write(io); + if (memory != nullptr) { + LLAMA_LOG_DEBUG("%s: - writing KV self\n", __func__); + memory->state_write(io); + } return io.n_bytes(); } @@ -1796,9 +1965,7 @@ size_t llama_context::state_read_data(llama_io_read_i & io) { if (memory) { LLAMA_LOG_DEBUG("%s: - reading KV self\n", __func__); - llama_kv_cache * kv_self = static_cast(memory.get()); - - kv_self->state_read(io); + memory->state_read(io); } return io.n_bytes(); @@ -1808,9 +1975,7 @@ size_t llama_context::state_seq_write_data(llama_io_write_i & io, llama_seq_id s GGML_UNUSED(seq_id); if (memory) { - llama_kv_cache * kv_self = static_cast(memory.get()); - - kv_self->state_write(io, seq_id); + memory->state_write(io, seq_id); } return io.n_bytes(); @@ -1820,9 +1985,7 @@ size_t llama_context::state_seq_read_data(llama_io_read_i & io, llama_seq_id seq GGML_UNUSED(seq_id); if (memory) { - llama_kv_cache * kv_self = static_cast(memory.get()); - - kv_self->state_read(io, seq_id); + memory->state_read(io, seq_id); } return io.n_bytes(); @@ -1841,6 +2004,7 @@ llama_perf_context_data llama_context::perf_get_data() const { data.t_eval_ms = 1e-3 * t_eval_us; data.n_p_eval = std::max(1, n_p_eval); data.n_eval = std::max(1, n_eval); + data.n_reused = std::max(0, n_reused); return data; } @@ -1849,6 +2013,7 @@ void llama_context::perf_reset() { t_start_us = ggml_time_us(); t_eval_us = n_eval = 0; t_p_eval_us = n_p_eval = 0; + n_reused = 0; } // @@ -1927,10 +2092,7 @@ void llama_context::opt_epoch_iter( const uint32_t n_batch = std::min(this->n_batch(), n_ctx); const uint32_t n_ubatch = std::min(this->n_ubatch(), n_batch); - llama_kv_cache * kv_self = static_cast(memory.get()); - - kv_self->clear(); - llama_kv_cache_guard kv_guard(kv_self); + memory->clear(true); for (uint32_t pos_ctx = 0; pos_ctx < n_ctx; pos_ctx += n_batch) { batch.n_tokens = n_batch; @@ -1942,42 +2104,49 @@ void llama_context::opt_epoch_iter( batch.logits [pos_batch] = true; } - const auto n_tokens_all = batch.n_tokens; + if (!balloc->init(batch, model.vocab, nullptr, model.hparams.n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, true)) { + LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); + return; + } + + const uint32_t n_tokens_all = balloc->get_n_tokens(); n_queued_tokens += n_tokens_all; - // this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens - const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; - embd_seq.clear(); - int64_t n_outputs_all = n_tokens_all; + uint32_t n_outputs_all = n_tokens_all; - llama_sbatch sbatch = kv_self->sbatch_init(batch, /*logits_all =*/ true); + auto mctx = memory->init_batch(*balloc, cparams.n_ubatch, true); + if (!mctx || mctx->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) { + LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__); + break; + } // reserve output buffer if (output_reserve(n_outputs_all) < n_outputs_all) { - LLAMA_LOG_ERROR("%s: could not reserve space for batch with %" PRId64 " outputs\n", __func__, n_outputs_all); + LLAMA_LOG_ERROR("%s: could not reserve space for batch with %d outputs\n", __func__, n_outputs_all); GGML_ABORT("TODO: handle this error"); }; - for (uint32_t pos_batch = 0; pos_batch < n_batch; pos_batch += n_ubatch) { - llama_ubatch ubatch = kv_self->ubatch_next(sbatch, cparams.n_ubatch, embd_pooled); + uint32_t pos_batch = 0; + do { + const auto & ubatch = mctx->get_ubatch(); n_outputs = ubatch.n_tokens; - // TODO: not sure if this is needed - if (!kv_self->find_slot(ubatch)) { - kv_self->defrag_sched(-1.0f); - kv_self->update(*this); - if (!kv_self->find_slot(ubatch)) { - LLAMA_LOG_WARN("%s: failed to find KV cache slot for ubatch of size %d\n", __func__, ubatch.n_tokens); - GGML_ABORT("TODO: handle this error"); - } + if (!mctx->apply()) { + LLAMA_LOG_ERROR("%s: failed to update the memory context\n", __func__); + break; } - auto * gf = graph_init(); - auto res = graph_build(ctx_compute.get(), gf, ubatch, LLM_GRAPH_TYPE_DEFAULT); + auto * res = gf_res_prev.get(); + + const auto gparams = graph_params(res, ubatch, mctx.get(), LLM_GRAPH_TYPE_DEFAULT); + + res->reset(); + + auto * gf = model.build_graph(gparams); struct ggml_context * ctx_compute_opt; { @@ -1992,6 +2161,7 @@ void llama_context::opt_epoch_iter( } ggml_opt_prepare_alloc(opt_ctx, ctx_compute_opt, gf, res->get_tokens(), res->get_logits()); ggml_opt_alloc(opt_ctx, train); + res->set_inputs(&ubatch); { struct ggml_tensor * labels = ggml_opt_labels(opt_ctx); @@ -2009,10 +2179,10 @@ void llama_context::opt_epoch_iter( callback(train, opt_ctx, dataset, result, idata_in_loop + (pos_ctx + pos_batch)/n_ubatch + 1, ndata_in_loop, t_loop_start); } ggml_free(ctx_compute_opt); - } - } - kv_guard.commit(); + pos_batch += ubatch.n_tokens; + } while (mctx->next()); + } } void llama_context::opt_epoch( @@ -2097,6 +2267,8 @@ llama_context_params llama_context_default_params() { /*.flash_attn =*/ false, /*.no_perf =*/ true, /*.op_offload =*/ true, + /*.swa_full =*/ true, + /*.kv_unified =*/ false, }; return result; @@ -2171,12 +2343,14 @@ const llama_model * llama_get_model(const llama_context * ctx) { return &ctx->get_model(); } +// deprecated llama_kv_cache * llama_get_kv_self(llama_context * ctx) { - return ctx->get_kv_self(); + return dynamic_cast(ctx->get_memory()); } +// deprecated void llama_kv_self_update(llama_context * ctx) { - ctx->kv_self_update(); + ctx->kv_self_update(false); } enum llama_pooling_type llama_pooling_type(const llama_context * ctx) { @@ -2292,27 +2466,108 @@ int32_t llama_apply_adapter_cvec( } // -// kv cache view +// memory // -llama_kv_cache_view llama_kv_cache_view_init(const llama_context * ctx, int32_t n_seq_max) { - const auto * kv = ctx->get_kv_self(); - if (kv == nullptr) { - LLAMA_LOG_WARN("%s: the context does not have a KV cache\n", __func__); - return {}; - } - - return llama_kv_cache_view_init(*kv, n_seq_max); +llama_memory_t llama_get_memory(const struct llama_context * ctx) { + return ctx->get_memory(); } -void llama_kv_cache_view_update(const llama_context * ctx, llama_kv_cache_view * view) { - const auto * kv = ctx->get_kv_self(); - if (kv == nullptr) { - LLAMA_LOG_WARN("%s: the context does not have a KV cache\n", __func__); +void llama_memory_clear(llama_memory_t mem, bool data) { + if (!mem) { return; } - llama_kv_cache_view_update(view, kv); + mem->clear(data); +} + +bool llama_memory_seq_rm( + llama_memory_t mem, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1) { + if (!mem) { + return true; + } + + return mem->seq_rm(seq_id, p0, p1); +} + +void llama_memory_seq_cp( + llama_memory_t mem, + llama_seq_id seq_id_src, + llama_seq_id seq_id_dst, + llama_pos p0, + llama_pos p1) { + if (!mem) { + return; + } + + mem->seq_cp(seq_id_src, seq_id_dst, p0, p1); +} + +void llama_memory_seq_keep( + llama_memory_t mem, + llama_seq_id seq_id) { + if (!mem) { + return; + } + + mem->seq_keep(seq_id); +} + +void llama_memory_seq_add( + llama_memory_t mem, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + llama_pos delta) { + if (!mem) { + return; + } + + mem->seq_add(seq_id, p0, p1, delta); +} + +void llama_memory_seq_div( + llama_memory_t mem, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + int d) { + if (!mem) { + return; + } + + mem->seq_div(seq_id, p0, p1, d); +} + +llama_pos llama_memory_seq_pos_min( + llama_memory_t mem, + llama_seq_id seq_id) { + if (!mem) { + return -1; + } + + return mem->seq_pos_min(seq_id); +} + +llama_pos llama_memory_seq_pos_max( + llama_memory_t mem, + llama_seq_id seq_id) { + if (!mem) { + return -1; + } + + return mem->seq_pos_max(seq_id); +} + +bool llama_memory_can_shift(llama_memory_t mem) { + if (!mem) { + return false; + } + + return mem->get_can_shift(); } // @@ -2320,203 +2575,161 @@ void llama_kv_cache_view_update(const llama_context * ctx, llama_kv_cache_view * // // deprecated -int32_t llama_get_kv_cache_token_count(const llama_context * ctx) { - return llama_kv_self_n_tokens(ctx); -} - int32_t llama_kv_self_n_tokens(const llama_context * ctx) { - const auto * kv = ctx->get_kv_self(); + const auto * kv = llama_get_memory(ctx); if (!kv) { return 0; } - return kv->get_n_tokens(); + int32_t res = 0; + + for (uint32_t s = 0; s < ctx->get_cparams().n_seq_max; s++) { + const llama_pos p0 = kv->seq_pos_min(s); + const llama_pos p1 = kv->seq_pos_max(s); + + if (p0 >= 0) { + res += (p1 - p0) + 1; + } + } + + return res; } // deprecated -int32_t llama_get_kv_cache_used_cells(const llama_context * ctx) { - return llama_kv_self_used_cells(ctx); -} - +// note: this is the same as above - will be removed anyway, so it's ok int32_t llama_kv_self_used_cells(const llama_context * ctx) { - const auto * kv = ctx->get_kv_self(); + const auto * kv = llama_get_memory(ctx); if (!kv) { return 0; } - return kv->get_used_cells(); + int32_t res = 0; + + for (uint32_t s = 0; s < ctx->get_cparams().n_seq_max; s++) { + const llama_pos p0 = kv->seq_pos_min(s); + const llama_pos p1 = kv->seq_pos_max(s); + + if (p0 >= 0) { + res += (p1 - p0) + 1; + } + } + + return res; } // deprecated -void llama_kv_cache_clear(llama_context * ctx) { - llama_kv_self_clear(ctx); -} - void llama_kv_self_clear(llama_context * ctx) { - auto * kv = ctx->get_kv_self(); + auto * kv = llama_get_memory(ctx); if (!kv) { return; } - kv->clear(); + llama_memory_clear(kv, true); } // deprecated -bool llama_kv_cache_seq_rm( - llama_context * ctx, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1) { - return llama_kv_self_seq_rm(ctx, seq_id, p0, p1); -} - bool llama_kv_self_seq_rm( llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1) { - auto * kv = ctx->get_kv_self(); + auto * kv = llama_get_memory(ctx); if (!kv) { return true; } - return kv->seq_rm(seq_id, p0, p1); + return llama_memory_seq_rm(kv, seq_id, p0, p1); } // deprecated -void llama_kv_cache_seq_cp( - llama_context * ctx, - llama_seq_id seq_id_src, - llama_seq_id seq_id_dst, - llama_pos p0, - llama_pos p1) { - llama_kv_self_seq_cp(ctx, seq_id_src, seq_id_dst, p0, p1); -} - void llama_kv_self_seq_cp( llama_context * ctx, llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) { - auto * kv = ctx->get_kv_self(); + auto * kv = llama_get_memory(ctx); if (!kv) { return; } - kv->seq_cp(seq_id_src, seq_id_dst, p0, p1); + llama_memory_seq_cp(kv, seq_id_src, seq_id_dst, p0, p1); } // deprecated -void llama_kv_cache_seq_keep( - llama_context * ctx, - llama_seq_id seq_id) { - llama_kv_self_seq_keep(ctx, seq_id); -} - void llama_kv_self_seq_keep(llama_context * ctx, llama_seq_id seq_id) { - auto * kv = ctx->get_kv_self(); + auto * kv = llama_get_memory(ctx); if (!kv) { return; } - kv->seq_keep(seq_id); + llama_memory_seq_keep(kv, seq_id); } // deprecated -void llama_kv_cache_seq_add( - llama_context * ctx, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1, - llama_pos delta) { - llama_kv_self_seq_add(ctx, seq_id, p0, p1, delta); -} - void llama_kv_self_seq_add( llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) { - auto * kv = ctx->get_kv_self(); + auto * kv = llama_get_memory(ctx); if (!kv) { return; } - kv->seq_add(seq_id, p0, p1, delta); + llama_memory_seq_add(kv, seq_id, p0, p1, delta); } // deprecated -void llama_kv_cache_seq_div( - llama_context * ctx, - llama_seq_id seq_id, - llama_pos p0, - llama_pos p1, - int d) { - llama_kv_self_seq_div(ctx, seq_id, p0, p1, d); -} - void llama_kv_self_seq_div( llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) { - auto * kv = ctx->get_kv_self(); + auto * kv = llama_get_memory(ctx); if (!kv) { return; } - kv->seq_div(seq_id, p0, p1, d); + llama_memory_seq_div(kv, seq_id, p0, p1, d); } // deprecated -llama_pos llama_kv_cache_seq_pos_max(llama_context * ctx, llama_seq_id seq_id) { - return llama_kv_self_seq_pos_max(ctx, seq_id); +llama_pos llama_kv_self_seq_pos_min(llama_context * ctx, llama_seq_id seq_id) { + auto * kv = llama_get_memory(ctx); + if (!kv) { + return -1; + } + + return llama_memory_seq_pos_min(kv, seq_id); } +// deprecated llama_pos llama_kv_self_seq_pos_max(llama_context * ctx, llama_seq_id seq_id) { - const auto * kv = ctx->get_kv_self(); + auto * kv = llama_get_memory(ctx); if (!kv) { - return 0; + return -1; } - return kv->seq_pos_max(seq_id); + return llama_memory_seq_pos_max(kv, seq_id); } // deprecated -void llama_kv_cache_defrag(llama_context * ctx) { - llama_kv_self_defrag(ctx); -} - void llama_kv_self_defrag(llama_context * ctx) { - auto * kv = ctx->get_kv_self(); - if (!kv) { - return; - } - // force defrag - kv->defrag_sched(-1.0f); + ctx->kv_self_defrag_sched(); } // deprecated -bool llama_kv_cache_can_shift(const llama_context * ctx) { - return llama_kv_self_can_shift(ctx); -} - bool llama_kv_self_can_shift(const llama_context * ctx) { - const auto * kv = ctx->get_kv_self(); + auto * kv = llama_get_memory(ctx); if (!kv) { return false; } - return kv->get_can_shift(); -} - -// deprecated -void llama_kv_cache_update(llama_context * ctx) { - llama_kv_self_update(ctx); + return llama_memory_can_shift(kv); } // llama state API @@ -2642,7 +2855,7 @@ int32_t llama_decode( llama_context * ctx, llama_batch batch) { const int ret = ctx->decode(batch); - if (ret != 0) { + if (ret != 0 && ret != 1) { LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret); } @@ -2676,6 +2889,7 @@ void llama_perf_context_print(const llama_context * ctx) { LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", __func__, data.t_eval_ms, data.n_eval, data.t_eval_ms / data.n_eval, 1e3 / data.t_eval_ms * data.n_eval); LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (t_end_ms - data.t_start_ms), (data.n_p_eval + data.n_eval)); + LLAMA_LOG_INFO("%s: graphs reused = %10d\n", __func__, data.n_reused); } void llama_perf_context_reset(llama_context * ctx) { diff --git a/llama/llama.cpp/src/llama-context.h b/llama/llama.cpp/src/llama-context.h index 0264e9371..25c143d56 100644 --- a/llama/llama.cpp/src/llama-context.h +++ b/llama/llama.cpp/src/llama-context.h @@ -1,11 +1,9 @@ #pragma once #include "llama.h" -#include "llama-batch.h" #include "llama-cparams.h" #include "llama-graph.h" #include "llama-adapter.h" -#include "llama-kv-cache.h" #include "ggml-cpp.h" #include "ggml-opt.h" @@ -14,11 +12,14 @@ #include struct llama_model; -struct llama_kv_cache; +class llama_batch_allocr; class llama_io_read_i; class llama_io_write_i; +struct llama_memory_i; +struct llama_memory_context_i; + struct llama_context { // init scheduler and compute buffers, reserve worst-case graphs llama_context( @@ -34,8 +35,6 @@ struct llama_context { ggml_backend_sched_t get_sched() const; - ggml_context * get_ctx_compute() const; - uint32_t n_ctx() const; uint32_t n_ctx_per_seq() const; uint32_t n_batch() const; @@ -45,10 +44,12 @@ struct llama_context { uint32_t n_threads() const; uint32_t n_threads_batch() const; - llama_kv_cache * get_kv_self(); - const llama_kv_cache * get_kv_self() const; + llama_memory_t get_memory() const; - void kv_self_update(); + // return true of the KV cache was updated + // TODO: remove + bool kv_self_update(bool optimize); + void kv_self_defrag_sched(); enum llama_pooling_type pooling_type() const; @@ -89,8 +90,18 @@ struct llama_context { int32_t il_start, int32_t il_end); - int encode(llama_batch & inp_batch); - int decode(llama_batch & inp_batch); + // process a single ubatch with a specific graph type + // if memory_context is provided, it will be applied first to the context's memory + // ret contains the status of the graph computation + // returns nullptr only if ret != GGML_STATUS_SUCCESS + llm_graph_result * process_ubatch( + const llama_ubatch & ubatch, + llm_graph_type gtype, + llama_memory_context_i * mctx, + ggml_status & ret); + + int encode(const llama_batch & batch_inp); + int decode(const llama_batch & batch_inp); // // state save/load @@ -168,29 +179,32 @@ private: // Make sure enough space is available for outputs. // Returns max number of outputs for which space was reserved. - int32_t output_reserve(int32_t n_outputs); + uint32_t output_reserve(int32_t n_outputs); + + void output_reorder(); // // graph // public: - int32_t graph_max_nodes() const; + uint32_t graph_max_nodes() const; - // zero-out inputs and create the ctx_compute for the compute graph - ggml_cgraph * graph_init(); + // can reuse the llm_graph_result instance of the context (for example to update a memory module) + llm_graph_result * get_gf_res_reserve() const; // returns the result of ggml_backend_sched_graph_compute_async execution - ggml_status graph_compute( - ggml_cgraph * gf, - bool batched); + ggml_status graph_compute(ggml_cgraph * gf, bool batched); + + // reserve a graph with a dummy ubatch of the specified size + ggml_cgraph * graph_reserve(uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx); private: - llm_graph_result_ptr graph_build( - ggml_context * ctx, - ggml_cgraph * gf, - const llama_ubatch & ubatch, - llm_graph_type gtype); + llm_graph_params graph_params( + llm_graph_result * res, + const llama_ubatch & ubatch, + const llama_memory_context_i * mctx, + llm_graph_type gtype) const; llm_graph_cb graph_get_cb() const; @@ -215,6 +229,9 @@ private: std::unique_ptr memory; + // TODO: temporary, until the llama_kv_self_defrag() API is removed + bool memory_force_optimize = false; + // decode output (2-dimensional array: [n_outputs][n_vocab]) size_t logits_size = 0; // capacity (of floats) for logits float * logits = nullptr; @@ -228,18 +245,25 @@ private: // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE std::map> embd_seq; - int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch - int32_t n_outputs_max = 0; // capacity (of tokens positions) for the output buffers + // reuse the batch_allocr to avoid unnecessary memory allocations + std::unique_ptr balloc; + + uint32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch std::vector output_ids; // map batch token positions to ids of the logits and embd buffers + struct swap_info { + uint32_t i0; + uint32_t i1; + }; + + std::vector output_swaps; + ggml_backend_sched_ptr sched; ggml_backend_t backend_cpu = nullptr; std::vector backends; - ggml_context_ptr ctx_compute; - // training ggml_opt_context_t opt_ctx = nullptr; @@ -255,14 +279,21 @@ private: std::vector backend_ptrs; std::vector backend_buft; - // memory buffers used to evaluate the model - std::vector buf_compute_meta; + llm_graph_result_ptr gf_res_prev; + llm_graph_result_ptr gf_res_reserve; // host buffer for the model output (logits and embeddings) ggml_backend_buffer_ptr buf_output; bool has_evaluated_once = false; + // env: LLAMA_SET_ROWS (temporary) + // ref: https://github.com/ggml-org/llama.cpp/pull/14285 + bool supports_set_rows = true; + + // env: LLAMA_GRAPH_REUSE_DISABLE + bool graph_reuse_disable = false; + // perf mutable int64_t t_start_us = 0; mutable int64_t t_load_us = 0; @@ -274,4 +305,6 @@ private: mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1) mutable int32_t n_eval = 0; // number of eval calls + + mutable int32_t n_reused = 0; // number of times the previous graph was reused }; diff --git a/llama/llama.cpp/src/llama-cparams.cpp b/llama/llama.cpp/src/llama-cparams.cpp index 28369be36..a3e7a37ee 100644 --- a/llama/llama.cpp/src/llama-cparams.cpp +++ b/llama/llama.cpp/src/llama-cparams.cpp @@ -1 +1,5 @@ #include "llama-cparams.h" + +size_t llama_max_parallel_sequences(void) { + return LLAMA_MAX_SEQ; +} diff --git a/llama/llama.cpp/src/llama-cparams.h b/llama/llama.cpp/src/llama-cparams.h index 246fa5777..38750affc 100644 --- a/llama/llama.cpp/src/llama-cparams.h +++ b/llama/llama.cpp/src/llama-cparams.h @@ -4,13 +4,15 @@ #include +#define LLAMA_MAX_SEQ 64 + struct llama_cparams { uint32_t n_ctx; // context size used during inference uint32_t n_batch; uint32_t n_ubatch; uint32_t n_seq_max; - int n_threads; // number of threads to use for generation - int n_threads_batch; // number of threads to use for batch processing + int32_t n_threads; // number of threads to use for generation + int32_t n_threads_batch; // number of threads to use for batch processing float rope_freq_base; float rope_freq_scale; @@ -31,6 +33,7 @@ struct llama_cparams { bool no_perf; bool warmup; bool op_offload; + bool kv_unified; enum llama_pooling_type pooling_type; diff --git a/llama/llama.cpp/src/llama-grammar.cpp b/llama/llama.cpp/src/llama-grammar.cpp index 60d582362..b51cee090 100644 --- a/llama/llama.cpp/src/llama-grammar.cpp +++ b/llama/llama.cpp/src/llama-grammar.cpp @@ -1186,8 +1186,18 @@ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token for (const auto & trigger_pattern : grammar.trigger_patterns) { if (std::regex_match(grammar.trigger_buffer, match, trigger_pattern.regex)) { grammar.awaiting_trigger = false; - // get from the first match to the end of the string - auto constrained_str = grammar.trigger_buffer.substr(match.position(1)); + // get from the first matched capturing group to the end of the string + size_t start = std::string::npos; + for (auto i = 1u; i < match.size(); i++) { + if (match.length(i) > 0) { + start = match.position(i); + break; + } + } + if (start == std::string::npos) { + start = match.position(0); + } + auto constrained_str = grammar.trigger_buffer.substr(start); // std::string constrained_str(match[1].first, grammar.trigger_buffer.end()); grammar.trigger_buffer.clear(); llama_grammar_accept_str(grammar, constrained_str); diff --git a/llama/llama.cpp/src/llama-graph.cpp b/llama/llama.cpp/src/llama-graph.cpp index b0e3f6359..053c72d6d 100644 --- a/llama/llama.cpp/src/llama-graph.cpp +++ b/llama/llama.cpp/src/llama-graph.cpp @@ -3,39 +3,16 @@ #include "llama-impl.h" #include "llama-batch.h" #include "llama-cparams.h" -#include "llama-kv-cache.h" + +#include "llama-kv-cache-unified.h" +#include "llama-kv-cache-unified-iswa.h" +#include "llama-memory-hybrid.h" +#include "llama-memory-recurrent.h" #include #include #include -static int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) { - // TODO move to hparams if a T5 variant appears that uses a different value - const int64_t max_distance = 128; - - if (bidirectional) { - n_buckets >>= 1; - } - - const int64_t max_exact = n_buckets >> 1; - - int32_t relative_position = x - y; - int32_t relative_bucket = 0; - - if (bidirectional) { - relative_bucket += (relative_position > 0) * n_buckets; - relative_position = abs(relative_position); - } else { - relative_position = -std::min(relative_position, 0); - } - - int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact)); - relative_position_if_large = std::min(relative_position_if_large, n_buckets - 1); - relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large); - - return relative_bucket; -} - void llm_graph_input_embd::set_input(const llama_ubatch * ubatch) { if (ubatch->token) { const int64_t n_tokens = ubatch->n_tokens; @@ -51,6 +28,15 @@ void llm_graph_input_embd::set_input(const llama_ubatch * ubatch) { } } +bool llm_graph_input_embd::can_reuse(const llm_graph_params & params) { + bool res = true; + + res &= (!tokens && !params.ubatch.token) || (tokens && tokens->ne[0] == params.ubatch.n_tokens); + res &= (!embd && !params.ubatch.embd) || (embd && embd->ne[0] == params.ubatch.n_tokens); + + return res; +} + void llm_graph_input_pos::set_input(const llama_ubatch * ubatch) { if (ubatch->pos && pos) { const int64_t n_tokens = ubatch->n_tokens; @@ -73,6 +59,14 @@ void llm_graph_input_pos::set_input(const llama_ubatch * ubatch) { } } +bool llm_graph_input_pos::can_reuse(const llm_graph_params & params) { + bool res = true; + + res &= pos->ne[0] == params.ubatch.n_tokens; + + return res; +} + void llm_graph_input_attn_temp::set_input(const llama_ubatch * ubatch) { if (ubatch->pos && attn_scale) { const int64_t n_tokens = ubatch->n_tokens; @@ -94,7 +88,7 @@ void llm_graph_input_pos_bucket::set_input(const llama_ubatch * ubatch) { const int64_t n_tokens = ubatch->n_tokens; GGML_ASSERT(ggml_backend_buffer_is_host(pos_bucket->buffer)); - GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing + GGML_ASSERT(!ubatch->equal_seqs()); // TODO: use ubatch->n_seqs instead of failing int32_t * data = (int32_t *) pos_bucket->data; @@ -110,197 +104,146 @@ void llm_graph_input_pos_bucket::set_input(const llama_ubatch * ubatch) { void llm_graph_input_pos_bucket_kv::set_input(const llama_ubatch * ubatch) { if (pos_bucket) { - const int64_t n_tokens = ubatch->n_tokens; - - GGML_ASSERT(ggml_backend_buffer_is_host(pos_bucket->buffer)); - GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing - - int32_t * data = (int32_t *) pos_bucket->data; - - const int64_t n_kv = kv_self->n; - - for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - for (int i = 0; i < n_kv; ++i) { - data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(kv_self->cells[i].pos, ubatch->pos[j], hparams.n_rel_attn_bkts, false); - } - } - } + mctx->set_input_pos_bucket(pos_bucket, ubatch); } } void llm_graph_input_out_ids::set_input(const llama_ubatch * ubatch) { - if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) { - //GGML_ASSERT(out_ids && "every model that can must skip unused outputs"); + GGML_ASSERT(out_ids); - if (!out_ids) { - LLAMA_LOG_WARN("%s: 'out_ids' is not created\n", __func__); - } else { - const int64_t n_tokens = ubatch->n_tokens; + const int64_t n_tokens = ubatch->n_tokens; - GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer)); - int32_t * data = (int32_t *) out_ids->data; + GGML_ASSERT(ggml_backend_buffer_is_host(out_ids->buffer)); + int32_t * data = (int32_t *) out_ids->data; - if (n_outputs == n_tokens) { - for (int i = 0; i < n_tokens; ++i) { - data[i] = i; - } - } else if (ubatch->output) { - int32_t n_outputs = 0; - for (int i = 0; i < n_tokens; ++i) { - if (ubatch->output[i]) { - data[n_outputs++] = i; - } - } - // the graph needs to have been passed the correct number of outputs - GGML_ASSERT(n_outputs == n_outputs); - } else if (n_outputs == 1) { - // only keep last output - data[0] = n_tokens - 1; - } else { - GGML_ASSERT(n_outputs == 0); - } + if (n_outputs == n_tokens) { + for (int i = 0; i < n_tokens; ++i) { + data[i] = i; + } + + return; + } + + GGML_ASSERT(ubatch->output); + + int n_outputs = 0; + + for (int i = 0; i < n_tokens; ++i) { + if (ubatch->output[i]) { + data[n_outputs++] = i; } } } +bool llm_graph_input_out_ids::can_reuse(const llm_graph_params & params) { + bool res = true; + + res &= n_outputs == params.n_outputs; + + return res; +} + void llm_graph_input_mean::set_input(const llama_ubatch * ubatch) { if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) { const int64_t n_tokens = ubatch->n_tokens; const int64_t n_seq_tokens = ubatch->n_seq_tokens; - const int64_t n_seqs = ubatch->n_seqs; + const int64_t n_seqs_unq = ubatch->n_seqs_unq; GGML_ASSERT(mean); GGML_ASSERT(ggml_backend_buffer_is_host(mean->buffer)); float * data = (float *) mean->data; - memset(mean->data, 0, n_tokens * n_tokens * ggml_element_size(mean)); + memset(mean->data, 0, n_tokens*n_seqs_unq*ggml_element_size(mean)); - std::vector sum(n_tokens, 0); + std::vector sums(n_seqs_unq, 0); + for (int i = 0; i < n_tokens; i += n_seq_tokens) { + for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[i][s]; + const int32_t seq_idx = ubatch->seq_idx[seq_id]; - for (int s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch->seq_id[s][0]; - - // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true - GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN"); - - sum[seq_id] += ubatch->n_seq_tokens; - } - - std::vector div(n_tokens, 0.0f); - for (int i = 0; i < n_tokens; ++i) { - const uint64_t s = sum[i]; - if (s > 0) { - div[i] = 1.0f/float(s); + sums[seq_idx] += ubatch->n_seq_tokens; } } - for (int s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch->seq_id[s][0]; + std::vector div(n_seqs_unq, 0.0f); + for (int s = 0; s < n_seqs_unq; ++s) { + const uint64_t sum = sums[s]; + if (sum > 0) { + div[s] = 1.0f/float(sum); + } + } - for (int i = 0; i < n_seq_tokens; ++i) { - data[seq_id*n_tokens + s*n_seq_tokens + i] = div[seq_id]; + for (int i = 0; i < n_tokens; i += n_seq_tokens) { + for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[i][s]; + const int32_t seq_idx = ubatch->seq_idx[seq_id]; + + for (int j = 0; j < n_seq_tokens; ++j) { + data[seq_idx*n_tokens + i + j] = div[seq_idx]; + } } } } } void llm_graph_input_cls::set_input(const llama_ubatch * ubatch) { + const int64_t n_tokens = ubatch->n_tokens; + const int64_t n_seqs_unq = ubatch->n_seqs_unq; + if (cparams.embeddings && ( - cparams.pooling_type == LLAMA_POOLING_TYPE_CLS || - cparams.pooling_type == LLAMA_POOLING_TYPE_RANK)) { - const int64_t n_tokens = ubatch->n_tokens; - const int64_t n_seq_tokens = ubatch->n_seq_tokens; - const int64_t n_seqs = ubatch->n_seqs; - + cparams.pooling_type == LLAMA_POOLING_TYPE_CLS || + cparams.pooling_type == LLAMA_POOLING_TYPE_RANK || + cparams.pooling_type == LLAMA_POOLING_TYPE_LAST + )) { GGML_ASSERT(cls); GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer)); uint32_t * data = (uint32_t *) cls->data; - memset(cls->data, 0, n_tokens * ggml_element_size(cls)); + memset(cls->data, 0, n_seqs_unq*ggml_element_size(cls)); - for (int s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch->seq_id[s][0]; + std::vector target_pos(n_seqs_unq, -1); + std::vector target_row(n_seqs_unq, -1); - // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true - GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS or RANK"); - - for (int i = 0; i < n_seq_tokens; ++i) { - const llama_pos pos = ubatch->pos[s*n_seq_tokens + i]; - - if (pos == 0) { - data[seq_id] = s*n_seq_tokens + i; - } - } - } - } - - if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) { - const int64_t n_tokens = ubatch->n_tokens; - const int64_t n_seq_tokens = ubatch->n_seq_tokens; - const int64_t n_seqs = ubatch->n_seqs; - - GGML_ASSERT(cls); - GGML_ASSERT(ggml_backend_buffer_is_host(cls->buffer)); - - uint32_t * data = (uint32_t *) cls->data; - memset(cls->data, 0, n_tokens * ggml_element_size(cls)); - - std::vector last_pos(n_tokens, -1); - std::vector last_row(n_tokens, -1); - - for (int s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch->seq_id[s][0]; - - // TODO: adapt limits to n_seqs when ubatch->equal_seqs is true - GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == LAST"); - - for (int i = 0; i < n_seq_tokens; ++i) { - const llama_pos pos = ubatch->pos[s*n_seq_tokens + i]; - - if (pos >= last_pos[seq_id]) { - last_pos[seq_id] = pos; - last_row[seq_id] = s*n_seq_tokens + i; - } - } - } + bool last = cparams.pooling_type == LLAMA_POOLING_TYPE_LAST; for (int i = 0; i < n_tokens; ++i) { - if (last_row[i] >= 0) { - data[i] = last_row[i]; + const llama_pos pos = ubatch->pos[i]; + + for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[i][s]; + const int32_t seq_idx = ubatch->seq_idx[seq_id]; + + if ( + (target_pos[seq_idx] == -1) || + ( last && pos >= target_pos[seq_idx]) || + (!last && pos < target_pos[seq_idx]) + ) { + target_pos[seq_idx] = pos; + target_row[seq_idx] = i; + } + } + } + + for (int s = 0; s < n_seqs_unq; ++s) { + if (target_row[s] >= 0) { + data[s] = target_row[s]; } } } } -void llm_graph_input_s_copy::set_input(const llama_ubatch * ubatch) { +void llm_graph_input_rs::set_input(const llama_ubatch * ubatch) { GGML_UNUSED(ubatch); - const int64_t n_kv = kv_self->n; + const int64_t n_rs = mctx->get_n_rs(); if (s_copy) { GGML_ASSERT(ggml_backend_buffer_is_host(s_copy->buffer)); int32_t * data = (int32_t *) s_copy->data; // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n - for (uint32_t i = 0; i < n_kv; ++i) { - data[i] = kv_self->s_copy(i); - } - } -} - -void llm_graph_input_s_mask::set_input(const llama_ubatch * ubatch) { - GGML_UNUSED(ubatch); - - const int64_t n_kv = kv_self->n; - - if (s_mask) { - GGML_ASSERT(ggml_backend_buffer_is_host(s_mask->buffer)); - float * data = (float *) s_mask->data; - - // clear unused states - for (int i = 0; i < n_kv; ++i) { - data[i] = kv_self->s_mask(i); + for (uint32_t i = 0; i < n_rs; ++i) { + data[i] = mctx->s_copy(i); } } } @@ -316,222 +259,228 @@ void llm_graph_input_cross_embd::set_input(const llama_ubatch * ubatch) { } void llm_graph_input_attn_no_cache::set_input(const llama_ubatch * ubatch) { - if (kq_mask) { - if (cparams.causal_attn) { - const int64_t n_kv = ubatch->n_tokens; - const int64_t n_tokens = ubatch->n_tokens; - const int64_t n_seq_tokens = ubatch->n_seq_tokens; - const int64_t n_seqs = ubatch->n_seqs; + const int64_t n_kv = ubatch->n_tokens; + const int64_t n_tokens = ubatch->n_tokens; - GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer)); - float * data = (float *) kq_mask->data; + GGML_ASSERT(kq_mask); + GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer)); - for (int h = 0; h < 1; ++h) { - for (int s1 = 0; s1 < n_seqs; ++s1) { - const llama_seq_id seq_id = ubatch->seq_id[s1][0]; + float * data = (float *) kq_mask->data; - for (int j = 0; j < n_seq_tokens; ++j) { - const int32_t tj = s1*n_seq_tokens + j; + for (int h = 0; h < 1; ++h) { + for (int i1 = 0; i1 < n_tokens; ++i1) { + const llama_seq_id s1 = ubatch->seq_id[i1][0]; - for (int s0 = 0; s0 < n_seqs; ++s0) { - for (int i = 0; i < n_seq_tokens; ++i) { - const int32_t ti = s0*n_seq_tokens + i; - float f = -INFINITY; + for (int i0 = 0; i0 < n_tokens; ++i0) { + float f = -INFINITY; - for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) { - if (ubatch->seq_id[s0][s] == seq_id && ubatch->pos[ti] <= ubatch->pos[tj]) { - if (hparams.use_alibi) { - f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]); - } else { - f = 0.0f; - } - break; - } - } + for (int s = 0; s < ubatch->n_seq_id[i0]; ++s) { + const llama_seq_id s0 = ubatch->seq_id[i0][0]; - data[h*(n_kv*n_tokens) + tj*n_kv + ti] = f; - } + // TODO: reimplement this like in llama_kv_cache_unified + if (s0 == s1 && (!cparams.causal_attn || ubatch->pos[i0] <= ubatch->pos[i1])) { + if (hparams.use_alibi) { + f = -std::abs(ubatch->pos[i0] - ubatch->pos[i1]); + } else { + f = 0.0f; } + break; } } - } - } else { - const int64_t n_tokens = ubatch->n_tokens; - const int64_t n_seq_tokens = ubatch->n_seq_tokens; - const int64_t n_seqs = ubatch->n_seqs; - const int64_t n_stride = ubatch->n_tokens; - GGML_ASSERT(ggml_backend_buffer_is_host(kq_mask->buffer)); - - float * data = (float *) kq_mask->data; - - for (int h = 0; h < 1; ++h) { - for (int s1 = 0; s1 < n_seqs; ++s1) { - const llama_seq_id seq_id = ubatch->seq_id[s1][0]; - - for (int j = 0; j < n_seq_tokens; ++j) { - const int32_t tj = s1*n_seq_tokens + j; - - for (int s0 = 0; s0 < n_seqs; ++s0) { - for (int i = 0; i < n_seq_tokens; ++i) { - const int32_t ti = s0*n_seq_tokens + i; - float f = -INFINITY; - - for (int s = 0; s < ubatch->n_seq_id[s0]; ++s) { - if (ubatch->seq_id[s0][s] == seq_id) { - if (hparams.use_alibi) { - f = -std::abs(ubatch->pos[ti] - ubatch->pos[tj]); - } else { - f = 0.0f; - } - break; - } - } - - data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f; - } - } - - for (int i = n_tokens; i < n_stride; ++i) { - data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY; - } - } - } + data[h*(n_kv*n_tokens) + i1*n_kv + i0] = f; } } } } void llm_graph_input_attn_kv_unified::set_input(const llama_ubatch * ubatch) { - if (self_kq_mask || self_kq_mask_swa) { - const int64_t n_kv = kv_self->n; - const int64_t n_tokens = ubatch->n_tokens; - const int64_t n_seq_tokens = ubatch->n_seq_tokens; - const int64_t n_seqs = ubatch->n_seqs; + mctx->set_input_k_idxs(self_k_idxs, ubatch); + mctx->set_input_v_idxs(self_v_idxs, ubatch); - float * data = nullptr; - float * data_swa = nullptr; + mctx->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); +} - if (self_kq_mask) { - GGML_ASSERT(ggml_backend_buffer_is_host(self_kq_mask->buffer)); - data = (float *) self_kq_mask->data; +bool llm_graph_input_attn_kv_unified::can_reuse(const llm_graph_params & params) { + const auto * mctx = static_cast(params.mctx); + + this->mctx = mctx; + + bool res = true; + + res &= self_k_idxs->ne[0] == params.ubatch.n_tokens; + //res &= self_v_idxs->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there + + res &= self_kq_mask->ne[0] == mctx->get_n_kv(); + res &= self_kq_mask->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD); + + res &= mctx->get_supports_set_rows(); // TODO: tmp + + return res; +} + +void llm_graph_input_attn_kv_unified_iswa::set_input(const llama_ubatch * ubatch) { + mctx->get_base()->set_input_k_idxs(self_k_idxs, ubatch); + mctx->get_base()->set_input_v_idxs(self_v_idxs, ubatch); + + mctx->get_base()->set_input_kq_mask(self_kq_mask, ubatch, cparams.causal_attn); + + mctx->get_swa()->set_input_k_idxs(self_k_idxs_swa, ubatch); + mctx->get_swa()->set_input_v_idxs(self_v_idxs_swa, ubatch); + + mctx->get_swa()->set_input_kq_mask(self_kq_mask_swa, ubatch, cparams.causal_attn); +} + +bool llm_graph_input_attn_kv_unified_iswa::can_reuse(const llm_graph_params & params) { + const auto * mctx = static_cast(params.mctx); + + this->mctx = mctx; + + bool res = true; + + res &= self_k_idxs->ne[0] == params.ubatch.n_tokens; + //res &= self_v_idxs->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there + + res &= self_k_idxs_swa->ne[0] == params.ubatch.n_tokens; + //res &= self_v_idxs_swa->ne[0] == params.ubatch.n_tokens; // TODO: need to move this to the unified cache and check there + + res &= self_kq_mask->ne[0] == mctx->get_base()->get_n_kv(); + res &= self_kq_mask->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD); + + res &= self_kq_mask_swa->ne[0] == mctx->get_swa()->get_n_kv(); + res &= self_kq_mask_swa->ne[1] == GGML_PAD(params.ubatch.n_tokens, GGML_KQ_MASK_PAD); + + res &= mctx->get_base()->get_supports_set_rows(); // TODO: tmp + + return res; +} + +void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) { + GGML_ASSERT(cross_kq_mask); + + const int64_t n_enc = cross_kq_mask->ne[0]; + const int64_t n_tokens = ubatch->n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer)); + GGML_ASSERT(!ubatch->equal_seqs()); // TODO: use ubatch->n_seqs instead of failing + + float * data = (float *) cross_kq_mask->data; + + for (int h = 0; h < 1; ++h) { + for (int i = 0; i < n_tokens; ++i) { + for (int j = 0; j < n_enc; ++j) { + float f = -INFINITY; + + for (int s = 0; s < ubatch->n_seq_id[i]; ++s) { + const llama_seq_id seq_id = ubatch->seq_id[i][s]; + + if (cross->seq_ids_enc[j].find(seq_id) != cross->seq_ids_enc[j].end()) { + f = 0.0f; + } + } + + data[h*(n_enc*n_tokens) + i*n_enc + j] = f; + } } - if (self_kq_mask_swa) { - GGML_ASSERT(ggml_backend_buffer_is_host(self_kq_mask_swa->buffer)); - data_swa = (float *) self_kq_mask_swa->data; - } - - // Use only the previous KV cells of the correct sequence for each token of the ubatch. - // It's assumed that if a token in the batch has multiple sequences, they are equivalent. - // Example with a cache of 10 tokens, 2 tokens populated in cache and 3 tokens in batch: - // Causal mask: - // xxx------- - // xxxx------ - // xxxxx----- - // Non-causal mask: - // xxxxx----- - // xxxxx----- - // xxxxx----- - // To visualize the mask, see https://github.com/ggml-org/llama.cpp/pull/12615 - for (int h = 0; h < 1; ++h) { - for (int s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch->seq_id[s][0]; - - for (int j = 0; j < n_seq_tokens; ++j) { - const llama_pos pos = ubatch->pos[s*n_seq_tokens + j]; - for (int i = 0; i < n_kv; ++i) { - float f; - // mask the token if: - if (!kv_self->cells[i].has_seq_id(seq_id) // not the correct sequence - || (cparams.causal_attn && kv_self->cells[i].pos > pos) // for causal, mask future tokens - ) { - f = -INFINITY; - } else { - if (hparams.use_alibi) { - f = -std::abs(kv_self->cells[i].pos - pos); - } else { - f = 0.0f; - } - } - - if (data) { - data[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f; - } - - // may need to cut off old tokens for sliding window - // TODO @ngxson : we are currently re-using the swa logic to store the chunked mask, we should rename SWA to something more generic like "aux mask" - if (data_swa) { - if (hparams.n_attn_chunk) { - llama_pos pos_chunk_start = (pos / hparams.n_attn_chunk) * hparams.n_attn_chunk; - if (kv_self->cells[i].pos < pos_chunk_start || pos < pos_chunk_start) { - f = -INFINITY; - } - } else { - if (pos - kv_self->cells[i].pos >= (int32_t)hparams.n_swa) { - f = -INFINITY; - } - } - data_swa[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f; - } - } - } - } - - // mask padded tokens - if (data) { - for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { - for (int j = 0; j < n_kv; ++j) { - data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; - } - } - } - - // mask padded tokens - if (data_swa) { - for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { - for (int j = 0; j < n_kv; ++j) { - data_swa[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; - } - } + for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { + for (int j = 0; j < n_enc; ++j) { + data[h*(n_enc*n_tokens) + i*n_enc + j] = -INFINITY; } } } } -void llm_graph_input_attn_cross::set_input(const llama_ubatch * ubatch) { - if (cross_kq_mask) { - const int64_t n_enc = cross_kq_mask->ne[0]; - const int64_t n_tokens = ubatch->n_tokens; +void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) { + inp_attn->set_input(ubatch); + inp_rs->set_input(ubatch); +} - GGML_ASSERT(ggml_backend_buffer_is_host(cross_kq_mask->buffer)); - GGML_ASSERT(!ubatch->equal_seqs); // TODO: use ubatch->n_seqs instead of failing +// +// llm_graph_result +// - float * data = (float *) cross_kq_mask->data; +llm_graph_result::llm_graph_result(int64_t max_nodes) : max_nodes(max_nodes) { + reset(); - for (int h = 0; h < 1; ++h) { - for (int j = 0; j < n_tokens; ++j) { - for (int i = 0; i < n_enc; ++i) { - float f = -INFINITY; - for (int s = 0; s < ubatch->n_seq_id[j]; ++s) { - const llama_seq_id seq_id = ubatch->seq_id[j][s]; - if (cross->seq_ids_enc[i].find(seq_id) != cross->seq_ids_enc[i].end()) { - f = 0.0f; - } - } - data[h*(n_enc*n_tokens) + j*n_enc + i] = f; - } - } + const char * LLAMA_GRAPH_RESULT_DEBUG = getenv("LLAMA_GRAPH_RESULT_DEBUG"); + debug = LLAMA_GRAPH_RESULT_DEBUG ? atoi(LLAMA_GRAPH_RESULT_DEBUG) : 0; +} - for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { - for (int j = 0; j < n_enc; ++j) { - data[h*(n_enc*n_tokens) + i*n_enc + j] = -INFINITY; - } - } - } +int64_t llm_graph_result::get_max_nodes() const { + return max_nodes; +} + +void llm_graph_result::reset() { + t_tokens = nullptr; + t_logits = nullptr; + t_embd = nullptr; + t_embd_pooled = nullptr; + + params = {}; + + inputs.clear(); + + buf_compute_meta.resize(ggml_tensor_overhead()*max_nodes + ggml_graph_overhead_custom(max_nodes, false)); + + ggml_init_params params = { + /*.mem_size =*/ buf_compute_meta.size(), + /*.mem_buffer =*/ buf_compute_meta.data(), + /*.no_alloc =*/ true, + }; + + ctx_compute.reset(ggml_init(params)); + + gf = ggml_new_graph_custom(ctx_compute.get(), max_nodes, false); +} + +void llm_graph_result::set_inputs(const llama_ubatch * ubatch) { + for (auto & input : inputs) { + input->set_input(ubatch); } } +bool llm_graph_result::can_reuse(const llm_graph_params & params) { + if (!this->params.allow_reuse(params)) { + if (debug > 1) { + LLAMA_LOG_DEBUG("%s: cannot reuse graph due to incompatible graph parameters\n", __func__); + } + + return false; + } + + if (debug > 1) { + LLAMA_LOG_DEBUG("%s: checking compatibility of %d inputs:\n", __func__, (int) inputs.size()); + } + + bool res = true; + + for (auto & input : inputs) { + const bool cur = input->can_reuse(params); + + if (debug > 1) { + LLAMA_LOG_DEBUG("%s: can_reuse = %d\n", "placeholder", cur); + } + + res = res && cur; + } + + if (debug > 0) { + LLAMA_LOG_DEBUG("%s: can reuse graph = %d\n", __func__, res); + } + + return res; +} + +llm_graph_input_i * llm_graph_result::add_input(llm_graph_input_ptr input) { + inputs.emplace_back(std::move(input)); + return inputs.back().get(); +} + +void llm_graph_result::set_params(const llm_graph_params & params) { + this->params = params; +} + // // llm_graph_context // @@ -545,7 +494,6 @@ llm_graph_context::llm_graph_context(const llm_graph_params & params) : n_layer (hparams.n_layer), n_rot (hparams.n_rot), n_ctx (cparams.n_ctx), - n_ctx_per_seq (cparams.n_ctx / cparams.n_seq_max), n_head (hparams.n_head()), n_head_kv (hparams.n_head_kv()), n_embd_head_k (hparams.n_embd_head_k), @@ -567,21 +515,19 @@ llm_graph_context::llm_graph_context(const llm_graph_params & params) : n_ctx_orig (cparams.n_ctx_orig_yarn), pooling_type (cparams.pooling_type), rope_type (hparams.rope_type), - ctx0 (params.ctx), sched (params.sched), backend_cpu (params.backend_cpu), cvec (params.cvec), loras (params.loras), - memory (params.memory), + mctx (params.mctx), cross (params.cross), cb_func (params.cb), - res (std::make_unique()) { + res (params.res), + ctx0 (res->get_ctx()), + gf (res->get_gf()) { + res->set_params(params); } -int64_t llm_graph_context::n_pos_per_embd() const { - return arch == LLM_ARCH_QWEN2VL ? 4 : 1; -} - void llm_graph_context::cb(ggml_tensor * cur, const char * name, int il) const { if (cb_func) { cb_func(ubatch, cur, name, il); @@ -741,12 +687,20 @@ ggml_tensor * llm_graph_context::build_ffn( switch (type_op) { case LLM_FFN_SILU: - { + if (gate && type_gate == LLM_FFN_PAR) { + cur = ggml_swiglu_split(ctx0, cur, tmp); + cb(cur, "ffn_swiglu", il); + type_gate = LLM_FFN_SEQ; + } else { cur = ggml_silu(ctx0, cur); cb(cur, "ffn_silu", il); } break; case LLM_FFN_GELU: - { + if (gate && type_gate == LLM_FFN_PAR) { + cur = ggml_geglu_split(ctx0, cur, tmp); + cb(cur, "ffn_geglu", il); + type_gate = LLM_FFN_SEQ; + } else { cur = ggml_gelu(ctx0, cur); cb(cur, "ffn_gelu", il); if (act_scales != NULL) { @@ -755,7 +709,11 @@ ggml_tensor * llm_graph_context::build_ffn( } } break; case LLM_FFN_RELU: - { + if (gate && type_gate == LLM_FFN_PAR) { + cur = ggml_reglu_split(ctx0, cur, tmp); + cb(cur, "ffn_reglu", il); + type_gate = LLM_FFN_SEQ; + } else { cur = ggml_relu(ctx0, cur); cb(cur, "ffn_relu", il); } break; @@ -769,17 +727,21 @@ ggml_tensor * llm_graph_context::build_ffn( } break; case LLM_FFN_SWIGLU: { - // Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf - int64_t split_point = cur->ne[0] / 2; - ggml_tensor * x0 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], 0)); - ggml_tensor * x1 = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, split_point, cur->ne[1], cur->nb[1], split_point * ggml_element_size(cur))); - - x0 = ggml_silu(ctx0, x0); - cb(cur, "ffn_silu", il); - - cur = ggml_mul(ctx0, x0, x1); - cb(cur, "ffn_mul", il); + cur = ggml_swiglu(ctx0, cur); + cb(cur, "ffn_swiglu", il); } break; + case LLM_FFN_GEGLU: + { + cur = ggml_geglu(ctx0, cur); + cb(cur, "ffn_geglu", il); + } break; + case LLM_FFN_REGLU: + { + cur = ggml_reglu(ctx0, cur); + cb(cur, "ffn_reglu", il); + } break; + default: + GGML_ABORT("fatal error"); } if (gate && type_gate == LLM_FFN_PAR) { @@ -789,8 +751,8 @@ ggml_tensor * llm_graph_context::build_ffn( if (down) { cur = build_lora_mm(down, cur); - if (arch == LLM_ARCH_GLM4) { - // GLM4 seems to have numerical issues with half-precision accumulators + if (arch == LLM_ARCH_GLM4 || arch == LLM_ARCH_GLM4_MOE) { + // GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators ggml_mul_mat_set_prec(cur, GGML_PREC_F32); } } @@ -825,13 +787,64 @@ ggml_tensor * llm_graph_context::build_moe_ffn( bool scale_w, float w_scale, llama_expert_gating_func_type gating_op, - int il) const { + int il, + ggml_tensor * probs_in) const { + return build_moe_ffn( + cur, + gate_inp, /* gate_inp_b */ nullptr, + up_exps, /* up_exps_b */ nullptr, + gate_exps, /* gate_exps_b */ nullptr, + down_exps, /* down_exps_b */ nullptr, + exp_probs_b, + n_expert, + n_expert_used, + type_op, + norm_w, + scale_w, + w_scale, + gating_op, + il, + probs_in + ); +} + +ggml_tensor * llm_graph_context::build_moe_ffn( + ggml_tensor * cur, + ggml_tensor * gate_inp, + ggml_tensor * gate_inp_b, + ggml_tensor * up_exps, + ggml_tensor * up_exps_b, + ggml_tensor * gate_exps, + ggml_tensor * gate_exps_b, + ggml_tensor * down_exps, + ggml_tensor * down_exps_b, + ggml_tensor * exp_probs_b, + int64_t n_expert, + int64_t n_expert_used, + llm_ffn_op_type type_op, + bool norm_w, + bool scale_w, + float w_scale, + llama_expert_gating_func_type gating_op, + int il, + ggml_tensor * probs_in) const { const int64_t n_embd = cur->ne[0]; const int64_t n_tokens = cur->ne[1]; const bool weight_before_ffn = arch == LLM_ARCH_LLAMA4; // for llama4, we apply the sigmoid-ed weights before the FFN - ggml_tensor * logits = build_lora_mm(gate_inp, cur); // [n_expert, n_tokens] - cb(logits, "ffn_moe_logits", il); + ggml_tensor * logits = nullptr; + + if (probs_in == nullptr) { + logits = build_lora_mm(gate_inp, cur); // [n_expert, n_tokens] + cb(logits, "ffn_moe_logits", il); + } else { + logits = probs_in; + } + + if (gate_inp_b) { + logits = ggml_add(ctx0, logits, gate_inp_b); + cb(logits, "ffn_moe_logits_biased", il); + } ggml_tensor * probs = nullptr; switch (gating_op) { @@ -843,6 +856,10 @@ ggml_tensor * llm_graph_context::build_moe_ffn( { probs = ggml_sigmoid(ctx0, logits); // [n_expert, n_tokens] } break; + case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT: + { + probs = logits; // [n_expert, n_tokens] + } break; default: GGML_ABORT("fatal error"); } @@ -871,6 +888,13 @@ ggml_tensor * llm_graph_context::build_moe_ffn( ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens), selected_experts); // [1, n_expert_used, n_tokens] cb(weights, "ffn_moe_weights", il); + if (gating_op == LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT) { + weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens); + weights = ggml_soft_max(ctx0, weights); // [n_expert_used, n_tokens] + weights = ggml_reshape_3d(ctx0, weights, 1, n_expert_used, n_tokens); + cb(weights, "ffn_moe_weights_softmax", il); + } + if (norm_w) { weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens); @@ -890,9 +914,8 @@ ggml_tensor * llm_graph_context::build_moe_ffn( cur = ggml_reshape_3d(ctx0, cur, n_embd, 1, n_tokens); if (weight_before_ffn) { - // TODO: this is a workaround as we don't yet have a repeat op that takes custom dim (ggml_repeat_4d) - ggml_tensor * repeated = ggml_new_tensor_3d(ctx0, cur->type, n_embd, n_expert_used, n_tokens); - repeated = ggml_repeat(ctx0, cur, repeated); // [n_embd, n_expert_used, n_tokens] + // repeat cur to [n_embd, n_expert_used, n_tokens] + ggml_tensor * repeated = ggml_repeat_4d(ctx0, cur, n_embd, n_expert_used, n_tokens, 1); cur = ggml_mul(ctx0, repeated, weights); cb(cur, "ffn_moe_weighted", il); } @@ -900,6 +923,11 @@ ggml_tensor * llm_graph_context::build_moe_ffn( ggml_tensor * up = build_lora_mm_id(up_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens] cb(up, "ffn_moe_up", il); + if (up_exps_b) { + up = ggml_add_id(ctx0, up, up_exps_b, selected_experts); + cb(up, "ffn_moe_up_biased", il); + } + ggml_tensor * experts = nullptr; if (gate_exps) { cur = build_lora_mm_id(gate_exps, cur, selected_experts); // [n_ff, n_expert_used, n_tokens] @@ -908,48 +936,83 @@ ggml_tensor * llm_graph_context::build_moe_ffn( cur = up; } + if (gate_exps_b) { + cur = ggml_add_id(ctx0, cur, gate_exps_b, selected_experts); + cb(cur, "ffn_moe_gate_biased", il); + } + switch (type_op) { case LLM_FFN_SILU: - { + if (gate_exps) { + cur = ggml_swiglu_split(ctx0, cur, up); + cb(cur, "ffn_moe_swiglu", il); + } else { cur = ggml_silu(ctx0, cur); cb(cur, "ffn_moe_silu", il); } break; case LLM_FFN_GELU: - { + if (gate_exps) { + cur = ggml_geglu_split(ctx0, cur, up); + cb(cur, "ffn_moe_geglu", il); + } else { cur = ggml_gelu(ctx0, cur); cb(cur, "ffn_moe_gelu", il); } break; + case LLM_FFN_SWIGLU_OAI_MOE: + { + // TODO: move to hparams? + constexpr float alpha = 1.702f; + constexpr float limit = 7.0f; + cur = ggml_swiglu_oai(ctx0, cur, up, alpha, limit); + cb(cur, "ffn_moe_swiglu_oai", il); + } break; + case LLM_FFN_RELU: + if (gate_exps) { + cur = ggml_reglu_split(ctx0, cur, up); + cb(cur, "ffn_moe_reglu", il); + } else { + cur = ggml_relu(ctx0, cur); + cb(cur, "ffn_moe_relu", il); + } break; default: GGML_ABORT("fatal error"); } - if (gate_exps) { - cur = ggml_mul(ctx0, cur, up); // [n_ff, n_expert_used, n_tokens] - cb(cur, "ffn_moe_gate_par", il); - } - experts = build_lora_mm_id(down_exps, cur, selected_experts); // [n_embd, n_expert_used, n_tokens] cb(experts, "ffn_moe_down", il); + if (down_exps_b) { + experts = ggml_add_id(ctx0, experts, down_exps_b, selected_experts); + cb(experts, "ffn_moe_down_biased", il); + } + if (!weight_before_ffn) { experts = ggml_mul(ctx0, experts, weights); cb(cur, "ffn_moe_weighted", il); } - // aggregate experts - ggml_tensor * moe_out = nullptr; - for (int i = 0; i < n_expert_used; ++i) { - ggml_tensor * cur_expert = ggml_view_2d(ctx0, experts, n_embd, n_tokens, - experts->nb[2], i*experts->nb[1]); + ggml_tensor * cur_experts[LLAMA_MAX_EXPERTS] = { nullptr }; - if (i == 0) { - moe_out = cur_expert; - } else { - moe_out = ggml_add(ctx0, moe_out, cur_expert); - } + assert(n_expert_used > 0); + + // order the views before the adds + for (uint32_t i = 0; i < hparams.n_expert_used; ++i) { + cur_experts[i] = ggml_view_2d(ctx0, experts, n_embd, n_tokens, experts->nb[2], i*experts->nb[1]); + + ggml_build_forward_expand(gf, cur_experts[i]); } - if (n_expert_used == 1) { + // aggregate experts + // note: here we explicitly use hparams.n_expert_used instead of n_expert_used + // to avoid potentially a large number of add nodes during warmup + // ref: https://github.com/ggml-org/llama.cpp/pull/14753 + ggml_tensor * moe_out = cur_experts[0]; + + for (uint32_t i = 1; i < hparams.n_expert_used; ++i) { + moe_out = ggml_add(ctx0, moe_out, cur_experts[i]); + } + + if (hparams.n_expert_used == 1) { // avoid returning a non-contiguous tensor moe_out = ggml_cont(ctx0, moe_out); } @@ -1012,11 +1075,11 @@ ggml_tensor * llm_graph_context::build_inp_embd(ggml_tensor * tok_embd) const { } ggml_tensor * llm_graph_context::build_inp_pos() const { - auto inp = std::make_unique(n_pos_per_embd()); + auto inp = std::make_unique(hparams.n_pos_per_embd()); auto & cur = inp->pos; - cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens*n_pos_per_embd()); + cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, (int64_t)n_tokens*hparams.n_pos_per_embd()); ggml_set_input(cur); res->add_input(std::move(inp)); @@ -1039,6 +1102,14 @@ ggml_tensor * llm_graph_context::build_inp_attn_scale() const { } ggml_tensor * llm_graph_context::build_inp_out_ids() const { + // note: when all tokens are output, we could skip this optimization to spare the ggml_get_rows() calls, + // but this would make the graph topology depend on the number of output tokens, which can interere with + // features that require constant topology such as pipline parallelism + // ref: https://github.com/ggml-org/llama.cpp/pull/14275#issuecomment-2987424471 + //if (n_outputs < n_tokens) { + // return nullptr; + //} + auto inp = std::make_unique(hparams, cparams, n_outputs); auto & cur = inp->out_ids; @@ -1056,7 +1127,7 @@ ggml_tensor * llm_graph_context::build_inp_mean() const { auto & cur = inp->mean; - cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens); + cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, ubatch.n_seqs_unq); ggml_set_input(cur); res->add_input(std::move(inp)); @@ -1069,41 +1140,7 @@ ggml_tensor * llm_graph_context::build_inp_cls() const { auto & cur = inp->cls; - cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); - ggml_set_input(cur); - - res->add_input(std::move(inp)); - - return cur; -} - -ggml_tensor * llm_graph_context::build_inp_s_copy() const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); - - auto inp = std::make_unique(kv_self); - - const auto n_kv = kv_self->n; - - auto & cur = inp->s_copy; - - cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_kv); - ggml_set_input(cur); - - res->add_input(std::move(inp)); - - return cur; -} - -ggml_tensor * llm_graph_context::build_inp_s_mask() const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); - - auto inp = std::make_unique(kv_self); - - const auto n_kv = kv_self->n; - - auto & cur = inp->s_mask; - - cur = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 1, n_kv); + cur = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_seqs_unq); ggml_set_input(cur); res->add_input(std::move(inp)); @@ -1149,11 +1186,11 @@ ggml_tensor * llm_graph_context::build_inp_pos_bucket_enc() const { } ggml_tensor * llm_graph_context::build_inp_pos_bucket_dec() const { - const llama_kv_cache_unified * kv_self = static_cast(memory); + const auto * mctx_cur = static_cast(mctx); - auto inp = std::make_unique(hparams, kv_self); + auto inp = std::make_unique(hparams, mctx_cur); - const auto n_kv = kv_self->n; + const auto n_kv = mctx_cur->get_n_kv(); auto & cur = inp->pos_bucket; @@ -1181,27 +1218,26 @@ ggml_tensor * llm_graph_context::build_pos_bias(ggml_tensor * pos_bucket, ggml_t } ggml_tensor * llm_graph_context::build_attn_mha( - ggml_cgraph * gf, ggml_tensor * q, ggml_tensor * k, ggml_tensor * v, ggml_tensor * kq_b, ggml_tensor * kq_mask, ggml_tensor * v_mla, - bool v_trans, + ggml_tensor * sinks, float kq_scale) const { - //const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); - //const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); + const bool v_trans = v->nb[1] > v->nb[2]; - //const int64_t n_head = hparams.n_head(il); - //const int64_t n_head_kv = hparams.n_head_kv(il); + // split the batch into streams if needed + const auto n_stream = k->ne[3]; - //const auto & n_embd_head_k = hparams.n_embd_head_k; - //const auto & n_embd_head_v = hparams.n_embd_head_v; + q = ggml_reshape_4d(ctx0, q, q->ne[0], q->ne[1], q->ne[2]/n_stream, n_stream); - const auto n_tokens = q->ne[1]; - const auto n_head = q->ne[2]; - const auto n_kv = k->ne[1]; + q = ggml_permute(ctx0, q, 0, 2, 1, 3); + k = ggml_permute(ctx0, k, 0, 2, 1, 3); + v = ggml_permute(ctx0, v, 0, 2, 1, 3); + + const auto n_kv = k->ne[1]; ggml_tensor * cur; @@ -1225,7 +1261,8 @@ ggml_tensor * llm_graph_context::build_attn_mha( cur = ggml_flash_attn_ext(ctx0, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias, hparams.attn_soft_cap ? hparams.f_attn_logit_softcapping : 0.0f); - ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32); + ggml_flash_attn_ext_add_sinks(cur, sinks); + ggml_flash_attn_ext_set_prec (cur, GGML_PREC_F32); if (v_mla) { #if 0 @@ -1243,7 +1280,7 @@ ggml_tensor * llm_graph_context::build_attn_mha( #endif } - cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens); + cur = ggml_reshape_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]); } else { ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); @@ -1273,6 +1310,7 @@ ggml_tensor * llm_graph_context::build_attn_mha( } kq = ggml_soft_max_ext(ctx0, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias); + ggml_soft_max_add_sinks(kq, sinks); if (!v_trans) { // note: avoid this branch @@ -1288,7 +1326,8 @@ ggml_tensor * llm_graph_context::build_attn_mha( cur = ggml_permute(ctx0, kqv, 0, 2, 1, 3); - cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*n_head, n_tokens); + // recombine streams + cur = ggml_cont_2d(ctx0, cur, cur->ne[0]*cur->ne[1], cur->ne[2]*cur->ne[3]); if (!cparams.offload_kqv) { // all nodes between the KV store and the attention output are run on the CPU @@ -1305,8 +1344,7 @@ llm_graph_input_attn_no_cache * llm_graph_context::build_attn_inp_no_cache() con auto inp = std::make_unique(hparams, cparams); // note: there is no KV cache, so the number of KV values is equal to the number of tokens in the batch - inp->kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); - //cb(inp_kq_mask, "KQ_mask", -1); + inp->kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_tokens, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD), 1, 1); ggml_set_input(inp->kq_mask); inp->kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->kq_mask, GGML_TYPE_F16) : inp->kq_mask; @@ -1316,7 +1354,6 @@ llm_graph_input_attn_no_cache * llm_graph_context::build_attn_inp_no_cache() con ggml_tensor * llm_graph_context::build_attn( llm_graph_input_attn_no_cache * inp, - ggml_cgraph * gf, ggml_tensor * wo, ggml_tensor * wo_b, ggml_tensor * q_cur, @@ -1336,17 +1373,15 @@ ggml_tensor * llm_graph_context::build_attn( const auto & kq_mask = inp->get_kq_mask(); - ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3); - //cb(q, "q", il); + // [TAG_NO_CACHE_PAD] + // TODO: if ubatch.equal_seqs() == true, we can split the three tensors below into ubatch.n_seqs_unq streams + assert(!ubatch.equal_seqs()); - ggml_tensor * k = ggml_permute(ctx0, k_cur, 0, 2, 1, 3); - //cb(k, "k", il); - - ggml_tensor * v = ggml_permute(ctx0, v_cur, 0, 2, 1, 3); - //cb(k, "v", il); - - ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, false, kq_scale); + ggml_tensor * q = q_cur; + ggml_tensor * k = k_cur; + ggml_tensor * v = v_cur; + ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, v_mla, nullptr, kq_scale); cb(cur, "kqv_out", il); if (wo) { @@ -1364,35 +1399,44 @@ ggml_tensor * llm_graph_context::build_attn( return cur; } -llm_graph_input_attn_kv_unified * llm_graph_context::build_attn_inp_kv_unified() const { - const llama_kv_cache_unified * kv_self = static_cast(memory); +static std::unique_ptr build_attn_inp_kv_unified_impl( + ggml_context * ctx0, + const llama_ubatch & ubatch, + const llama_hparams & hparams, + const llama_cparams & cparams, + const llama_kv_cache_unified_context * mctx_cur) { - auto inp = std::make_unique(hparams, cparams, kv_self); + auto inp = std::make_unique(hparams, cparams, mctx_cur); - const auto n_kv = kv_self->n; + { + GGML_ASSERT(hparams.swa_type == LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified_iswa for SWA"); - inp->self_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); - //cb(inp->self_kq_mask, "KQ_mask", -1); - ggml_set_input(inp->self_kq_mask); + const auto n_kv = mctx_cur->get_n_kv(); + const auto n_tokens = ubatch.n_tokens; + const auto n_stream = cparams.kv_unified ? 1 : ubatch.n_seqs_unq; - inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; + inp->self_k_idxs = mctx_cur->build_input_k_idxs(ctx0, ubatch); + inp->self_v_idxs = mctx_cur->build_input_v_idxs(ctx0, ubatch); - if (hparams.n_swa_pattern > 1) { - GGML_ASSERT(hparams.n_swa > 0); + inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream); + ggml_set_input(inp->self_kq_mask); - inp->self_kq_mask_swa = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); - //cb(inp->self_kq_mask_swa, "KQ_mask_swa", -1); - ggml_set_input(inp->self_kq_mask_swa); - - inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa; + inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; } + return inp; +} + +llm_graph_input_attn_kv_unified * llm_graph_context::build_attn_inp_kv_unified() const { + const auto * mctx_cur = static_cast(mctx); + + auto inp = build_attn_inp_kv_unified_impl(ctx0, ubatch, hparams, cparams, mctx_cur); + return (llm_graph_input_attn_kv_unified *) res->add_input(std::move(inp)); } ggml_tensor * llm_graph_context::build_attn( llm_graph_input_attn_kv_unified * inp, - ggml_cgraph * gf, ggml_tensor * wo, ggml_tensor * wo_b, ggml_tensor * q_cur, @@ -1408,82 +1452,116 @@ ggml_tensor * llm_graph_context::build_attn( ggml_build_forward_expand(gf, k_cur); ggml_build_forward_expand(gf, v_cur); - const llama_kv_cache_unified * kv_self = static_cast(memory); - const auto & n_ctx = cparams.n_ctx; - - const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); - const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); - - const auto n_tokens = q_cur->ne[2]; - - const bool v_trans = !cparams.flash_attn; + const auto * mctx_cur = inp->mctx; // store to KV cache { - const auto kv_head = kv_self->head; + const auto & k_idxs = inp->get_k_idxs(); + const auto & v_idxs = inp->get_v_idxs(); - GGML_ASSERT(kv_self->size == n_ctx); - - ggml_tensor * k_cache_view = ggml_view_1d(ctx0, kv_self->k_l[il], n_tokens*n_embd_k_gqa, ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa)*kv_head); - //cb(k_cache_view, "k_cache_view", il); - - // note: storing RoPE-ed version of K in the KV cache - ggml_build_forward_expand(gf, ggml_cpy(ctx0, k_cur, k_cache_view)); - - v_cur = ggml_reshape_2d(ctx0, v_cur, n_embd_v_gqa, n_tokens); - - ggml_tensor * v_cache_view = nullptr; - - if (!v_trans) { - v_cache_view = ggml_view_1d(ctx0, kv_self->v_l[il], n_tokens*n_embd_v_gqa, ggml_row_size(kv_self->v_l[il]->type, n_embd_v_gqa)*kv_head); - } else { - // note: the V cache is transposed when not using flash attention - v_cache_view = ggml_view_2d(ctx0, kv_self->v_l[il], n_tokens, n_embd_v_gqa, - ( n_ctx)*ggml_element_size(kv_self->v_l[il]), - (kv_head)*ggml_element_size(kv_self->v_l[il])); - - v_cur = ggml_transpose(ctx0, v_cur); - } - //cb(v_cache_view, "v_cache_view", il); - - ggml_build_forward_expand(gf, ggml_cpy(ctx0, v_cur, v_cache_view)); + ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il)); + ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, v_idxs, il)); } + const auto & kq_mask = inp->get_kq_mask(); + + ggml_tensor * q = q_cur; + ggml_tensor * k = mctx_cur->get_k(ctx0, il); + ggml_tensor * v = mctx_cur->get_v(ctx0, il); + + ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, v_mla, nullptr, kq_scale); + cb(cur, "kqv_out", il); + + if (wo) { + cur = build_lora_mm(wo, cur); + if (arch == LLM_ARCH_GLM4 || arch == LLM_ARCH_GLM4_MOE) { + // GLM4 and GLM4_MOE seem to have numerical issues with half-precision accumulators + ggml_mul_mat_set_prec(cur, GGML_PREC_F32); + } + } + + if (wo_b) { + cur = ggml_add(ctx0, cur, wo_b); + } + + return cur; +} + +ggml_tensor * llm_graph_context::build_attn( + llm_graph_input_attn_kv_unified_iswa * inp, + ggml_tensor * wo, + ggml_tensor * wo_b, + ggml_tensor * q_cur, + ggml_tensor * k_cur, + ggml_tensor * v_cur, + ggml_tensor * kq_b, + ggml_tensor * v_mla, + float kq_scale, + int il) const { + return build_attn_with_sinks( + inp, + wo, + wo_b, + q_cur, + k_cur, + v_cur, + kq_b, + v_mla, + nullptr, + kq_scale, + il); +} + +ggml_tensor * llm_graph_context::build_attn_with_sinks( + llm_graph_input_attn_kv_unified_iswa * inp, + ggml_tensor * wo, + ggml_tensor * wo_b, + ggml_tensor * q_cur, + ggml_tensor * k_cur, + ggml_tensor * v_cur, + ggml_tensor * kq_b, + ggml_tensor * v_mla, + ggml_tensor * sinks, + float kq_scale, + int il) const { + // these nodes are added to the graph together so that they are not reordered + // by doing so, the number of splits in the graph is reduced + ggml_build_forward_expand(gf, q_cur); + + if (k_cur) { + ggml_build_forward_expand(gf, k_cur); + } + + if (v_cur) { + ggml_build_forward_expand(gf, v_cur); + } + + const auto * mctx_iswa = inp->mctx; + const bool is_swa = hparams.is_swa(il); + const auto * mctx_cur = is_swa ? mctx_iswa->get_swa() : mctx_iswa->get_base(); + + // optionally store to KV cache + if (k_cur) { + const auto & k_idxs = is_swa ? inp->get_k_idxs_swa() : inp->get_k_idxs(); + + ggml_build_forward_expand(gf, mctx_cur->cpy_k(ctx0, k_cur, k_idxs, il)); + } + + if (v_cur) { + const auto & v_idxs = is_swa ? inp->get_v_idxs_swa() : inp->get_v_idxs(); + + ggml_build_forward_expand(gf, mctx_cur->cpy_v(ctx0, v_cur, v_idxs, il)); + } + const auto & kq_mask = is_swa ? inp->get_kq_mask_swa() : inp->get_kq_mask(); - const auto n_kv = kv_self->n; + ggml_tensor * q = q_cur; + ggml_tensor * k = mctx_cur->get_k(ctx0, il); + ggml_tensor * v = mctx_cur->get_v(ctx0, il); - const int64_t n_head_kv = hparams.n_head_kv(il); - - const auto & n_embd_head_k = hparams.n_embd_head_k; - const auto & n_embd_head_v = hparams.n_embd_head_v; - - ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3); - //cb(q, "q", il); - - ggml_tensor * k = - ggml_view_3d(ctx0, kv_self->k_l[il], - n_embd_head_k, n_kv, n_head_kv, - ggml_row_size(kv_self->k_l[il]->type, n_embd_k_gqa), - ggml_row_size(kv_self->k_l[il]->type, n_embd_head_k), - 0); - //cb(k, "k", il); - - ggml_tensor * v = !v_trans ? - ggml_view_3d(ctx0, kv_self->v_l[il], - n_embd_head_v, n_kv, n_head_kv, - ggml_row_size(kv_self->v_l[il]->type, n_embd_v_gqa), - ggml_row_size(kv_self->v_l[il]->type, n_embd_head_v), - 0) : - ggml_view_3d(ctx0, kv_self->v_l[il], - n_kv, n_embd_head_v, n_head_kv, - ggml_element_size(kv_self->v_l[il])*n_ctx, - ggml_element_size(kv_self->v_l[il])*n_ctx*n_embd_head_v, - 0); - - ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, v_trans, kq_scale); + ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, v_mla, sinks, kq_scale); cb(cur, "kqv_out", il); if (wo) { @@ -1506,7 +1584,7 @@ llm_graph_input_attn_cross * llm_graph_context::build_attn_inp_cross() const { const int32_t n_enc = !cross->v_embd.empty() ? cross->n_enc : hparams.n_ctx_train; - inp->cross_kq_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_enc, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD)); + inp->cross_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_enc, GGML_PAD(n_tokens, GGML_KQ_MASK_PAD), 1, 1); ggml_set_input(inp->cross_kq_mask); inp->cross_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->cross_kq_mask, GGML_TYPE_F16) : inp->cross_kq_mask; @@ -1516,7 +1594,6 @@ llm_graph_input_attn_cross * llm_graph_context::build_attn_inp_cross() const { ggml_tensor * llm_graph_context::build_attn( llm_graph_input_attn_cross * inp, - ggml_cgraph * gf, ggml_tensor * wo, ggml_tensor * wo_b, ggml_tensor * q_cur, @@ -1534,17 +1611,11 @@ ggml_tensor * llm_graph_context::build_attn( const auto & kq_mask = inp->get_kq_mask_cross(); - ggml_tensor * q = ggml_permute(ctx0, q_cur, 0, 2, 1, 3); - //cb(q, "q", il); - - ggml_tensor * k = ggml_permute(ctx0, k_cur, 0, 2, 1, 3); - //cb(k, "k", il); - - ggml_tensor * v = ggml_permute(ctx0, v_cur, 0, 2, 1, 3); - //cb(k, "v", il); - - ggml_tensor * cur = build_attn_mha(gf, q, k, v, kq_b, kq_mask, v_mla, false, kq_scale); + ggml_tensor * q = q_cur; + ggml_tensor * k = k_cur; + ggml_tensor * v = v_cur; + ggml_tensor * cur = build_attn_mha(q, k, v, kq_b, kq_mask, v_mla, nullptr, kq_scale); cb(cur, "kqv_out", il); if (wo) { @@ -1562,56 +1633,135 @@ ggml_tensor * llm_graph_context::build_attn( return cur; } -ggml_tensor * llm_graph_context::build_copy_mask_state( - ggml_cgraph * gf, - ggml_tensor * s, - ggml_tensor * state_copy, - ggml_tensor * state_mask, - int32_t n_state, - int32_t n_seqs) const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); +// TODO: maybe separate the inner implementation into a separate function +// like with the non-sliding window equivalent +// once sliding-window hybrid caches are a thing. +llm_graph_input_attn_kv_unified_iswa * llm_graph_context::build_attn_inp_kv_unified_iswa() const { + const auto * mctx_cur = static_cast(mctx); - const auto n_kv = kv_self->n; - const auto kv_head = kv_self->head; + auto inp = std::make_unique(hparams, cparams, mctx_cur); - ggml_tensor * states = ggml_reshape_2d(ctx0, s, n_state, kv_self->size); + const auto n_stream = cparams.kv_unified ? 1 : ubatch.n_seqs_unq; + + { + const auto n_kv = mctx_cur->get_base()->get_n_kv(); + + inp->self_k_idxs = mctx_cur->get_base()->build_input_k_idxs(ctx0, ubatch); + inp->self_v_idxs = mctx_cur->get_base()->build_input_v_idxs(ctx0, ubatch); + + inp->self_kq_mask = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream); + ggml_set_input(inp->self_kq_mask); + + inp->self_kq_mask_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask, GGML_TYPE_F16) : inp->self_kq_mask; + } + + { + GGML_ASSERT(hparams.swa_type != LLAMA_SWA_TYPE_NONE && "Use llama_kv_cache_unified for non-SWA"); + + const auto n_kv = mctx_cur->get_swa()->get_n_kv(); + + inp->self_k_idxs_swa = mctx_cur->get_swa()->build_input_k_idxs(ctx0, ubatch); + inp->self_v_idxs_swa = mctx_cur->get_swa()->build_input_v_idxs(ctx0, ubatch); + + inp->self_kq_mask_swa = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, n_kv, GGML_PAD(n_tokens/n_stream, GGML_KQ_MASK_PAD), 1, n_stream); + ggml_set_input(inp->self_kq_mask_swa); + + inp->self_kq_mask_swa_cnv = cparams.flash_attn ? ggml_cast(ctx0, inp->self_kq_mask_swa, GGML_TYPE_F16) : inp->self_kq_mask_swa; + } + + return (llm_graph_input_attn_kv_unified_iswa *) res->add_input(std::move(inp)); +} + +ggml_tensor * llm_graph_context::build_rs( + ggml_tensor * s, + ggml_tensor * state_copy_main, + ggml_tensor * state_copy_extra, + int32_t state_size, + int32_t n_seqs, + uint32_t n_rs, + uint32_t rs_head, + uint32_t rs_size, + int32_t rs_zero, + const llm_graph_get_rows_fn & get_state_rows) const { + + ggml_tensor * states = ggml_reshape_2d(ctx0, s, state_size, rs_size); + + // Clear a single state which will then be copied to the other cleared states. + // Note that this is a no-op when the view is zero-sized. + ggml_tensor * state_zero = ggml_view_1d(ctx0, states, state_size*(rs_zero >= 0), rs_zero*states->nb[1]*(rs_zero >= 0)); + ggml_build_forward_expand(gf, ggml_scale_inplace(ctx0, state_zero, 0)); // copy states - // NOTE: assuming the copy destinations are ALL contained between kv_head and kv_head + n_kv - // this shrinks the tensors's ne[1] to n_kv - states = ggml_get_rows(ctx0, states, state_copy); + // NOTE: assuming the copy destinations are ALL contained between rs_head and rs_head + n_rs + // {state_size, rs_size} -> {state_size, n_seqs} + ggml_tensor * output_states = get_state_rows(ctx0, states, state_copy_main); + ggml_build_forward_expand(gf, output_states); - // clear states of sequences which are starting at the beginning of this batch - // FIXME: zero-out NANs? - states = ggml_mul(ctx0, states, state_mask); - - // copy states which won't be changed further (between n_seqs and n_kv) + // copy extra states which won't be changed further (between n_seqs and n_rs) + ggml_tensor * states_extra = ggml_get_rows(ctx0, states, state_copy_extra); ggml_build_forward_expand(gf, ggml_cpy(ctx0, - ggml_view_1d(ctx0, states, n_state*(n_kv - n_seqs), (n_seqs )*n_state*ggml_element_size(states)), - ggml_view_1d(ctx0, s, n_state*(n_kv - n_seqs), (kv_head + n_seqs)*n_state*ggml_element_size(s)))); + states_extra, + ggml_view_1d(ctx0, s, state_size*(n_rs - n_seqs), (rs_head + n_seqs)*state_size*ggml_element_size(s)))); - // the part of the states that will be used and modified - return ggml_view_2d(ctx0, states, n_state, n_seqs, states->nb[1], 0); + return output_states; +} + +static std::unique_ptr build_rs_inp_impl( + ggml_context * ctx0, + const llama_ubatch & ubatch, + const llama_memory_recurrent_context * mctx_cur) { + + auto inp = std::make_unique(mctx_cur); + + const int64_t n_rs = mctx_cur->get_n_rs(); + const int64_t n_seqs = ubatch.n_seqs; + + inp->s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_rs); + ggml_set_input(inp->s_copy); + + inp->s_copy_main = ggml_view_1d(ctx0, inp->s_copy, n_seqs, 0); + inp->s_copy_extra = ggml_view_1d(ctx0, inp->s_copy, n_rs - n_seqs, n_seqs * inp->s_copy->nb[0]); + + return inp; +} + +llm_graph_input_rs * llm_graph_context::build_rs_inp() const { + const auto * mctx_cur = static_cast(mctx); + + auto inp = build_rs_inp_impl(ctx0, ubatch, mctx_cur); + + return (llm_graph_input_rs *) res->add_input(std::move(inp)); +} + +ggml_tensor * llm_graph_context::build_rs( + llm_graph_input_rs * inp, + ggml_tensor * s, + int32_t state_size, + int32_t n_seqs, + const llm_graph_get_rows_fn & get_state_rows) const { + const auto * kv_state = inp->mctx; + + return build_rs(s, inp->s_copy_main, inp->s_copy_extra, state_size, n_seqs, + kv_state->get_n_rs(), kv_state->get_head(), kv_state->get_size(), kv_state->get_rs_z(), + get_state_rows); } ggml_tensor * llm_graph_context::build_rwkv_token_shift_load( - ggml_cgraph * gf, - ggml_tensor * state_copy, - ggml_tensor * state_mask, - const llama_ubatch & ubatch, - int il) const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); + llm_graph_input_rs * inp, + const llama_ubatch & ubatch, + int il) const { + const auto * mctx_cur = static_cast(mctx); const auto token_shift_count = hparams.token_shift_count; const int64_t n_seqs = ubatch.n_seqs; - ggml_tensor * token_shift_all = kv_self->k_l[il]; + ggml_tensor * token_shift_all = mctx_cur->get_r_l(il); - ggml_tensor * token_shift = build_copy_mask_state( - gf, token_shift_all, state_copy, state_mask, - hparams.n_embd_k_s(), n_seqs); + ggml_tensor * token_shift = build_rs( + inp, token_shift_all, + hparams.n_embd_r(), n_seqs); token_shift = ggml_reshape_3d(ctx0, token_shift, hparams.n_embd, token_shift_count, n_seqs); @@ -1622,24 +1772,34 @@ ggml_tensor * llm_graph_context::build_rwkv_token_shift_store( ggml_tensor * token_shift, const llama_ubatch & ubatch, int il) const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); + const auto * mctx_cur = static_cast(mctx); const auto token_shift_count = hparams.token_shift_count; const auto n_embd = hparams.n_embd; const int64_t n_seqs = ubatch.n_seqs; - const auto kv_head = kv_self->head; + const auto kv_head = mctx_cur->get_head(); return ggml_cpy( ctx0, ggml_view_1d(ctx0, token_shift, n_embd * n_seqs * token_shift_count, 0), - ggml_view_1d(ctx0, kv_self->k_l[il], hparams.n_embd_k_s() * n_seqs, hparams.n_embd_k_s() * kv_head * ggml_element_size(kv_self->k_l[il])) + ggml_view_1d(ctx0, mctx_cur->get_r_l(il), hparams.n_embd_r()*n_seqs, hparams.n_embd_r()*kv_head*ggml_element_size(mctx_cur->get_r_l(il))) ); } +llm_graph_input_mem_hybrid * llm_graph_context::build_inp_mem_hybrid() const { + const auto * mctx_cur = static_cast(mctx); + + auto inp_rs = build_rs_inp_impl(ctx0, ubatch, mctx_cur->get_recr()); + auto inp_attn = build_attn_inp_kv_unified_impl(ctx0, ubatch, hparams, cparams, mctx_cur->get_attn()); + + auto inp = std::make_unique(std::move(inp_attn), std::move(inp_rs), mctx_cur); + + return (llm_graph_input_mem_hybrid *) res->add_input(std::move(inp)); +} + void llm_graph_context::build_pooling( - ggml_cgraph * gf, ggml_tensor * cls, ggml_tensor * cls_b, ggml_tensor * cls_out, @@ -1685,20 +1845,32 @@ void llm_graph_context::build_pooling( ggml_tensor * inp_cls = build_inp_cls(); inp = ggml_get_rows(ctx0, inp, inp_cls); - // classification head - // https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/roberta/modeling_roberta.py#L1566 - GGML_ASSERT(cls != nullptr); - GGML_ASSERT(cls_b != nullptr); + if (cls) { + // classification head + // https://github.com/huggingface/transformers/blob/5af7d41e49bbfc8319f462eb45253dcb3863dfb7/src/transformers/models/roberta/modeling_roberta.py#L1566 + cur = ggml_mul_mat(ctx0, cls, inp); + if (cls_b) { + cur = ggml_add(ctx0, cur, cls_b); + } + cur = ggml_tanh(ctx0, cur); - cur = ggml_add (ctx0, ggml_mul_mat(ctx0, cls, inp), cls_b); - cur = ggml_tanh(ctx0, cur); - - // some models don't have `cls_out`, for example: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en - // https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/blob/cb5347e43979c3084a890e3f99491952603ae1b7/modeling_bert.py#L884-L896 - if (cls_out) { - GGML_ASSERT(cls_out_b != nullptr); - - cur = ggml_add (ctx0, ggml_mul_mat(ctx0, cls_out, cur), cls_out_b); + // some models don't have `cls_out`, for example: https://huggingface.co/jinaai/jina-reranker-v1-tiny-en + // https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/blob/cb5347e43979c3084a890e3f99491952603ae1b7/modeling_bert.py#L884-L896 + if (cls_out) { + cur = ggml_mul_mat(ctx0, cls_out, cur); + if (cls_out_b) { + cur = ggml_add(ctx0, cur, cls_out_b); + } + } + } else if (cls_out) { + // Single layer classification head (direct projection) + // https://github.com/huggingface/transformers/blob/f4fc42216cd56ab6b68270bf80d811614d8d59e4/src/transformers/models/bert/modeling_bert.py#L1476 + cur = ggml_mul_mat(ctx0, cls_out, inp); + if (cls_out_b) { + cur = ggml_add(ctx0, cur, cls_out_b); + } + } else { + GGML_ABORT("RANK pooling requires either cls+cls_b or cls_out+cls_out_b"); } } break; default: @@ -1712,3 +1884,30 @@ void llm_graph_context::build_pooling( ggml_build_forward_expand(gf, cur); } + +int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) { + // TODO move to hparams if a T5 variant appears that uses a different value + const int64_t max_distance = 128; + + if (bidirectional) { + n_buckets >>= 1; + } + + const int64_t max_exact = n_buckets >> 1; + + int32_t relative_position = x - y; + int32_t relative_bucket = 0; + + if (bidirectional) { + relative_bucket += (relative_position > 0) * n_buckets; + relative_position = abs(relative_position); + } else { + relative_position = -std::min(relative_position, 0); + } + + int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact)); + relative_position_if_large = std::min(relative_position_if_large, n_buckets - 1); + relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large); + + return relative_bucket; +} diff --git a/llama/llama.cpp/src/llama-graph.h b/llama/llama.cpp/src/llama-graph.h index 832a8c09f..6ff49de3a 100644 --- a/llama/llama.cpp/src/llama-graph.h +++ b/llama/llama.cpp/src/llama-graph.h @@ -1,6 +1,7 @@ #pragma once #include "llama-arch.h" +#include "llama-batch.h" #include "llama-hparams.h" #include "llama-adapter.h" @@ -14,12 +15,14 @@ struct ggml_cgraph; struct ggml_context; struct ggml_tensor; -struct llama_ubatch; struct llama_cparams; -class llama_memory_i; -class llama_kv_cache_unified; -class llama_kv_cache_recurrent; +struct llama_memory_context_i; + +class llama_kv_cache_unified_context; +class llama_kv_cache_unified_iswa_context; +class llama_memory_recurrent_context; +class llama_memory_hybrid_context; // certain models (typically multi-modal) can produce different types of graphs enum llm_graph_type { @@ -34,6 +37,9 @@ enum llm_ffn_op_type { LLM_FFN_RELU, LLM_FFN_RELU_SQR, LLM_FFN_SWIGLU, + LLM_FFN_GEGLU, + LLM_FFN_REGLU, + LLM_FFN_SWIGLU_OAI_MOE, }; enum llm_ffn_gate_type { @@ -64,6 +70,8 @@ struct llama_cross { std::vector> seq_ids_enc; }; +struct llm_graph_params; + // // llm_graph_input // @@ -73,11 +81,19 @@ public: virtual ~llm_graph_input_i() = default; virtual void set_input(const llama_ubatch * ubatch) = 0; + + // return true if the resulting input tensors using the provided graph parameters would be + // the same as the previous input tensors that we have currently stored in the object + virtual bool can_reuse(const llm_graph_params & params) { + // returning false here by default will prevent from reusing the graph if the check + // for the input type has not been implemented yet + GGML_UNUSED(params); + return false; + } }; using llm_graph_input_ptr = std::unique_ptr; - class llm_graph_input_embd : public llm_graph_input_i { public: llm_graph_input_embd() = default; @@ -85,20 +101,24 @@ public: void set_input(const llama_ubatch * ubatch) override; + bool can_reuse(const llm_graph_params & params) override; + ggml_tensor * tokens = nullptr; // I32 [n_batch] ggml_tensor * embd = nullptr; // F32 [n_embd, n_batch] }; class llm_graph_input_pos : public llm_graph_input_i { public: - llm_graph_input_pos(int64_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {} + llm_graph_input_pos(uint32_t n_pos_per_embd) : n_pos_per_embd(n_pos_per_embd) {} virtual ~llm_graph_input_pos() = default; void set_input(const llama_ubatch * ubatch) override; + bool can_reuse(const llm_graph_params & params) override; + ggml_tensor * pos = nullptr; // I32 [n_batch] - const int64_t n_pos_per_embd = 1; + const uint32_t n_pos_per_embd = 1; }; // temperature tuning, used by llama4 @@ -125,22 +145,23 @@ public: ggml_tensor * pos_bucket = nullptr; // I32 [n_batch, n_batch] - const llama_hparams & hparams; + const llama_hparams hparams; }; class llm_graph_input_pos_bucket_kv : public llm_graph_input_i { public: llm_graph_input_pos_bucket_kv( const llama_hparams & hparams, - const llama_kv_cache_unified * kv_self) : hparams(hparams), kv_self(kv_self) {} + const llama_kv_cache_unified_context * mctx) : hparams(hparams), mctx(mctx) {} virtual ~llm_graph_input_pos_bucket_kv() = default; void set_input(const llama_ubatch * ubatch) override; ggml_tensor * pos_bucket = nullptr; // I32 [n_kv, n_batch] - const llama_hparams & hparams; - const llama_kv_cache_unified * kv_self; + const llama_hparams hparams; + + const llama_kv_cache_unified_context * mctx; }; class llm_graph_input_out_ids : public llm_graph_input_i { @@ -148,17 +169,19 @@ public: llm_graph_input_out_ids( const llama_hparams & hparams, const llama_cparams & cparams, - int32_t n_outputs) : hparams(hparams), cparams(cparams), n_outputs(n_outputs) {} + uint32_t n_outputs) : hparams(hparams), cparams(cparams), n_outputs(n_outputs) {} virtual ~llm_graph_input_out_ids() = default; void set_input(const llama_ubatch * ubatch) override; + bool can_reuse(const llm_graph_params & params) override; + ggml_tensor * out_ids; // I32 [n_outputs] - const llama_hparams & hparams; - const llama_cparams & cparams; + const llama_hparams hparams; + const llama_cparams cparams; - const int32_t n_outputs; + const uint32_t n_outputs; }; class llm_graph_input_mean : public llm_graph_input_i { @@ -170,7 +193,7 @@ public: ggml_tensor * mean; // F32 [n_batch, n_batch] - const llama_cparams & cparams; + const llama_cparams cparams; }; class llm_graph_input_cls : public llm_graph_input_i { @@ -182,31 +205,24 @@ public: ggml_tensor * cls; // I32 [n_batch] - const llama_cparams & cparams; + const llama_cparams cparams; }; -class llm_graph_input_s_copy : public llm_graph_input_i { +class llm_graph_input_rs : public llm_graph_input_i { public: - llm_graph_input_s_copy(const llama_kv_cache_recurrent * kv_self) : kv_self(kv_self) {} - virtual ~llm_graph_input_s_copy() = default; + llm_graph_input_rs(const llama_memory_recurrent_context * mctx) : mctx(mctx) {} + virtual ~llm_graph_input_rs() = default; void set_input(const llama_ubatch * ubatch) override; - ggml_tensor * s_copy; // I32 [kv_size] + ggml_tensor * s_copy; // I32 [n_rs] - const llama_kv_cache_recurrent * kv_self; -}; + // views of s_copy, computed once per graph + // and shared across layers which use build_rs + ggml_tensor * s_copy_main; // I32 [n_seqs] + ggml_tensor * s_copy_extra; // I32 [n_rs - n_seqs] -class llm_graph_input_s_mask : public llm_graph_input_i { -public: - llm_graph_input_s_mask(const llama_kv_cache_recurrent * kv_self) : kv_self(kv_self) {} - virtual ~llm_graph_input_s_mask() = default; - - void set_input(const llama_ubatch * ubatch) override; - - ggml_tensor * s_mask; // F32 [1, n_kv] - - const llama_kv_cache_recurrent * kv_self; + const llama_memory_recurrent_context * mctx; }; class llm_graph_input_cross_embd : public llm_graph_input_i { @@ -234,11 +250,11 @@ public: ggml_tensor * get_kq_mask() const { return kq_mask_cnv; } - ggml_tensor * kq_mask = nullptr; // F32 [n_tokens, n_batch] - ggml_tensor * kq_mask_cnv = nullptr; // [n_tokens, n_batch] + ggml_tensor * kq_mask = nullptr; // F32 [n_tokens, n_batch, 1, 1] + ggml_tensor * kq_mask_cnv = nullptr; // [n_tokens, n_batch, 1, 1] - const llama_hparams & hparams; - const llama_cparams & cparams; + const llama_hparams hparams; + const llama_cparams cparams; }; class llm_graph_input_attn_kv_unified : public llm_graph_input_i { @@ -246,27 +262,75 @@ public: llm_graph_input_attn_kv_unified( const llama_hparams & hparams, const llama_cparams & cparams, - const llama_kv_cache_unified * kv_self) : + const llama_kv_cache_unified_context * mctx) : hparams(hparams), cparams(cparams), - kv_self(kv_self) { + mctx(mctx) { } ~llm_graph_input_attn_kv_unified() = default; void set_input(const llama_ubatch * ubatch) override; + bool can_reuse(const llm_graph_params & params) override; + + ggml_tensor * get_k_idxs() const { return self_k_idxs; } + ggml_tensor * get_v_idxs() const { return self_v_idxs; } + + ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; } + + ggml_tensor * self_k_idxs = nullptr; // I64 [n_batch] + ggml_tensor * self_v_idxs = nullptr; // I64 [n_batch] or [n_batch*n_embd_v_gqa] + + ggml_tensor * self_kq_mask = nullptr; // F32 [n_kv, n_batch/n_stream, 1, n_stream] + ggml_tensor * self_kq_mask_cnv = nullptr; // [n_kv, n_batch/n_stream, 1, n_stream] + + // note: these have to be copies because in order to be able to reuse a graph, its inputs + // need to carry these parameters with them. otherwise, they can point to freed + // llm_graph_params from a previous batch, causing stack-use-after-return + const llama_hparams hparams; + const llama_cparams cparams; + + const llama_kv_cache_unified_context * mctx; +}; + +class llm_graph_input_attn_kv_unified_iswa : public llm_graph_input_i { +public: + llm_graph_input_attn_kv_unified_iswa( + const llama_hparams & hparams, + const llama_cparams & cparams, + const llama_kv_cache_unified_iswa_context * mctx) : + hparams(hparams), + cparams(cparams), + mctx(mctx) { + } + ~llm_graph_input_attn_kv_unified_iswa() = default; + + void set_input(const llama_ubatch * ubatch) override; + + bool can_reuse(const llm_graph_params & params) override; + + ggml_tensor * get_k_idxs() const { return self_k_idxs; } + ggml_tensor * get_v_idxs() const { return self_v_idxs; } + ggml_tensor * get_k_idxs_swa() const { return self_k_idxs_swa; } + ggml_tensor * get_v_idxs_swa() const { return self_v_idxs_swa; } + ggml_tensor * get_kq_mask() const { return self_kq_mask_cnv; } ggml_tensor * get_kq_mask_swa() const { return self_kq_mask_swa_cnv; } - ggml_tensor * self_kq_mask = nullptr; // F32 [n_kv, n_batch] - ggml_tensor * self_kq_mask_cnv = nullptr; // [n_kv, n_batch] - ggml_tensor * self_kq_mask_swa = nullptr; // F32 [n_kv, n_batch] - ggml_tensor * self_kq_mask_swa_cnv = nullptr; // [n_kv, n_batch] + ggml_tensor * self_k_idxs = nullptr; // I64 [n_batch] + ggml_tensor * self_v_idxs = nullptr; // I64 [n_batch] or [n_batch*n_embd_v_gqa] + ggml_tensor * self_k_idxs_swa = nullptr; // I64 [n_batch] + ggml_tensor * self_v_idxs_swa = nullptr; // I64 [n_batch] or [n_batch*n_embd_v_gqa] - const llama_hparams & hparams; - const llama_cparams & cparams; + ggml_tensor * self_kq_mask = nullptr; // F32 [n_kv, n_batch/n_stream, 1, n_stream] + ggml_tensor * self_kq_mask_cnv = nullptr; // [n_kv, n_batch/n_stream, 1, n_stream] + ggml_tensor * self_kq_mask_swa = nullptr; // F32 [n_kv, n_batch/n_stream, 1, n_stream] + ggml_tensor * self_kq_mask_swa_cnv = nullptr; // [n_kv, n_batch/n_stream, 1, n_stream] - const llama_kv_cache_unified * kv_self; + const llama_hparams hparams; + const llama_cparams cparams; + + const llama_kv_cache_unified_iswa_context * mctx; }; class llm_graph_input_attn_cross : public llm_graph_input_i { @@ -278,12 +342,34 @@ public: ggml_tensor * get_kq_mask_cross() const { return cross_kq_mask_cnv; } - ggml_tensor * cross_kq_mask = nullptr; // F32 [n_outputs_enc, n_batch] - ggml_tensor * cross_kq_mask_cnv = nullptr; // F32 [n_outputs_enc, n_batch] + ggml_tensor * cross_kq_mask = nullptr; // F32 [n_outputs_enc, n_batch, 1, 1] + ggml_tensor * cross_kq_mask_cnv = nullptr; // F32 [n_outputs_enc, n_batch, 1, 1] const llama_cross * cross = nullptr; }; +class llm_graph_input_mem_hybrid : public llm_graph_input_i { +public: + llm_graph_input_mem_hybrid( + std::unique_ptr inp_attn, + std::unique_ptr inp_rs, + const llama_memory_hybrid_context * mctx) : + inp_attn(std::move(inp_attn)), + inp_rs(std::move(inp_rs)), + mctx(mctx) { } + virtual ~llm_graph_input_mem_hybrid() = default; + + void set_input(const llama_ubatch * ubatch) override; + + std::unique_ptr inp_attn; + std::unique_ptr inp_rs; + + llm_graph_input_attn_kv_unified * get_attn() const { return inp_attn.get(); } + llm_graph_input_rs * get_recr() const { return inp_rs.get(); } + + const llama_memory_hybrid_context * mctx; +}; + // // llm_graph_result // @@ -294,40 +380,110 @@ public: // along with the input tensors, the object also provides commonly used outputs tensors, such as logits, embeddings, etc. // these are used by the llama_context to extact the relevant data, based on the compute parameters -class llm_graph_result_i { -public: - virtual ~llm_graph_result_i() = default; +// callback that allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.) +using llm_graph_cb = std::function; - virtual ggml_tensor * get_tokens() = 0; - virtual ggml_tensor * get_logits() = 0; - virtual ggml_tensor * get_embd() = 0; - virtual ggml_tensor * get_embd_pooled() = 0; +class llm_graph_result; - virtual void set_inputs(const llama_ubatch * ubatch) = 0; +struct llm_graph_params { + llm_arch arch = LLM_ARCH_UNKNOWN; + + llama_hparams hparams; + llama_cparams cparams; + + llama_ubatch ubatch; // note: intentionally make a copy + + llm_graph_type gtype; + + ggml_backend_sched_t sched; + ggml_backend_t backend_cpu; + + const llama_adapter_cvec * cvec; + const llama_adapter_loras * loras; + const llama_memory_context_i * mctx; + const llama_cross * cross; + + uint32_t n_outputs; + + llm_graph_cb cb; + + llm_graph_result * res; + + // return true if the "other" params would result in a graph with the same topology as with the current params + // having the same topology allows us to reuse the graph in some cases + bool allow_reuse(const llm_graph_params & other) const { + // first check the ubatch + bool can_reuse_ubatch = + ubatch.equal_seqs() == other.ubatch.equal_seqs() && + ubatch.n_tokens == other.ubatch.n_tokens && + ubatch.n_seq_tokens == other.ubatch.n_seq_tokens && + ubatch.n_seqs == other.ubatch.n_seqs && + ubatch.n_seqs_unq == other.ubatch.n_seqs_unq && + ( + (!ubatch.token && !other.ubatch.token) || + (!ubatch.embd && !other.ubatch.embd) + ); + + // when we split the batch using "equal_seqs" we have to verify that the participating sequences are the same + // the reason is because the set of attention streams would be different for different sequences + if (can_reuse_ubatch && ubatch.equal_seqs()) { + if (!ubatch.data) { + // if the old ubatch does not own it's data, then we cannot guarantee that it is still alive, and + // therefore we cannot perform the sequence id check. normally should never happen + can_reuse_ubatch = false; + } else { + for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { + can_reuse_ubatch &= ubatch.seq_id_unq[s] == other.ubatch.seq_id_unq[s]; + } + } + } + + if (!can_reuse_ubatch) { + return false; + } + + return + cparams.embeddings == other.cparams.embeddings && + cparams.causal_attn == other.cparams.causal_attn && + arch == other.arch && + gtype == other.gtype && + cvec == other.cvec && + loras == other.loras && + cross == other.cross && + n_outputs == other.n_outputs; + } }; -using llm_graph_result_ptr = std::unique_ptr; - - -class llm_graph_result : public llm_graph_result_i { +class llm_graph_result { public: + llm_graph_result(int64_t max_nodes); + virtual ~llm_graph_result() = default; - ggml_tensor * get_tokens() override { return t_tokens; } - ggml_tensor * get_logits() override { return t_logits; } - ggml_tensor * get_embd() override { return t_embd; } - ggml_tensor * get_embd_pooled() override { return t_embd_pooled; } + ggml_tensor * get_tokens() const { return t_tokens; } + ggml_tensor * get_logits() const { return t_logits; } + ggml_tensor * get_embd() const { return t_embd; } + ggml_tensor * get_embd_pooled() const { return t_embd_pooled; } - void set_inputs(const llama_ubatch * ubatch) override { - for (auto & input : inputs) { - input->set_input(ubatch); - } - } + ggml_cgraph * get_gf() const { return gf; } + ggml_context * get_ctx() const { return ctx_compute.get(); } - llm_graph_input_i * add_input(llm_graph_input_ptr input) { - inputs.emplace_back(std::move(input)); - return inputs.back().get(); - } + int64_t get_max_nodes() const; + + void reset(); + + void set_inputs(const llama_ubatch * ubatch); + + // try to update the existing graph result using the new graph parameters in order to reuse it + // this can only be done if we determine that the resulting graph using the new graph parameters + // would be identical to the existing graph. in that case, we simply have to update the memory + // contexts of the input tensors of the graph and we can reuse it for another computation + // return true if the graph was updated and can be reused + bool can_reuse(const llm_graph_params & params); + + llm_graph_input_i * add_input(llm_graph_input_ptr input); + + void set_params(const llm_graph_params & params); // important graph nodes ggml_tensor * t_tokens = nullptr; @@ -336,36 +492,34 @@ public: ggml_tensor * t_embd_pooled = nullptr; std::vector inputs; + + ggml_context_ptr ctx_compute; + + // memory buffers used to evaluate the model + std::vector buf_compute_meta; + + ggml_cgraph * gf; + + int64_t max_nodes; + +private: + // keep a copy of the previous graph parameters + // we will use this to determine whether the graph can be reused by comparing them with the new parameters + // note: these are updated after constructing the new graph + llm_graph_params params; + + // env: LLAMA_GRAPH_RESULT_DEBUG + int debug = 0; }; +using llm_graph_result_ptr = std::unique_ptr; + // // llm_graph_context // -// callback that allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.) -using llm_graph_cb = std::function; - -struct llm_graph_params { - ggml_context * ctx; - - const llm_arch arch; - - const llama_hparams & hparams; - const llama_cparams & cparams; - const llama_ubatch & ubatch; - - ggml_backend_sched_t sched; - ggml_backend_t backend_cpu; - - const llama_adapter_cvec * cvec; - const llama_adapter_loras * loras; - const llama_memory_i * memory; - const llama_cross * cross; - - int32_t n_outputs; - - const llm_graph_cb & cb; -}; +// used in build_rs to properly order writes and avoid unnecessary copies +using llm_graph_get_rows_fn = std::function; struct llm_graph_context { const llm_arch arch; @@ -378,7 +532,6 @@ struct llm_graph_context { const int64_t n_layer; const int64_t n_rot; const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train) - const int64_t n_ctx_per_seq; const int64_t n_head; const int64_t n_head_kv; const int64_t n_embd_head_k; @@ -397,31 +550,31 @@ struct llm_graph_context { const float norm_eps; const float norm_rms_eps; - const int32_t n_tokens; - const int32_t n_outputs; + const int64_t n_tokens; + const int64_t n_outputs; const int32_t n_ctx_orig; // yarn const enum llama_pooling_type pooling_type; const enum llama_rope_type rope_type; - ggml_context * ctx0 = nullptr; - ggml_backend_sched_t sched; ggml_backend_t backend_cpu; // TODO: needed by build_attn_mha, figure out a way to remove? - const llama_adapter_cvec * cvec; - const llama_adapter_loras * loras; - const llama_memory_i * memory; - const llama_cross * cross; + const llama_adapter_cvec * cvec; + const llama_adapter_loras * loras; + const llama_memory_context_i * mctx; + const llama_cross * cross; const llm_graph_cb & cb_func; - std::unique_ptr res; + llm_graph_result * res; + + ggml_context * ctx0 = nullptr; + ggml_cgraph * gf = nullptr; llm_graph_context(const llm_graph_params & params); - - int64_t n_pos_per_embd() const; + virtual ~llm_graph_context() = default; void cb(ggml_tensor * cur, const char * name, int il) const; @@ -467,6 +620,7 @@ struct llm_graph_context { llm_ffn_gate_type type_gate, int il) const; + // build MoE FFN without bias tensors ggml_tensor * build_moe_ffn( ggml_tensor * cur, ggml_tensor * gate_inp, @@ -481,7 +635,29 @@ struct llm_graph_context { bool scale_w, float w_scale, llama_expert_gating_func_type gating_op, - int il) const; + int il, + ggml_tensor * probs_in = nullptr) const; + + ggml_tensor * build_moe_ffn( + ggml_tensor * cur, + ggml_tensor * gate_inp, + ggml_tensor * gate_inp_b, + ggml_tensor * up_exps, + ggml_tensor * up_exps_b, + ggml_tensor * gate_exps, + ggml_tensor * gate_exps_b, + ggml_tensor * down_exps, + ggml_tensor * down_exps_b, + ggml_tensor * exp_probs_b, + int64_t n_expert, + int64_t n_expert_used, + llm_ffn_op_type type_op, + bool norm_w, + bool scale_w, + float w_scale, + llama_expert_gating_func_type gating_op, + int il, + ggml_tensor * probs_in = nullptr) const; // // inputs @@ -493,8 +669,6 @@ struct llm_graph_context { ggml_tensor * build_inp_out_ids() const; ggml_tensor * build_inp_mean() const; ggml_tensor * build_inp_cls() const; - ggml_tensor * build_inp_s_copy() const; - ggml_tensor * build_inp_s_mask() const; ggml_tensor * build_inp_cross_embd() const; ggml_tensor * build_inp_pos_bucket_enc() const; @@ -506,21 +680,19 @@ struct llm_graph_context { // ggml_tensor * build_attn_mha( - ggml_cgraph * gf, - ggml_tensor * q, // [n_embd_head_q, n_tokens, n_head_q] - ggml_tensor * k, // [n_embd_head_k, n_tokens, n_head_k] - ggml_tensor * v, // [n_embd_head_v, n_tokens, n_head_v] (v_trans == false) + ggml_tensor * q, // [n_embd_head_q, n_head_q, n_tokens] + ggml_tensor * k, // [n_embd_head_k, n_head_k, n_tokens] + ggml_tensor * v, // [n_embd_head_v, n_head_v, n_tokens] (v_trans == false) ggml_tensor * kq_b, ggml_tensor * kq_mask, - ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v] - bool v_trans, + ggml_tensor * sinks, + ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v] float kq_scale) const; llm_graph_input_attn_no_cache * build_attn_inp_no_cache() const; ggml_tensor * build_attn( llm_graph_input_attn_no_cache * inp, - ggml_cgraph * gf, ggml_tensor * wo, ggml_tensor * wo_b, ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens] @@ -535,7 +707,6 @@ struct llm_graph_context { ggml_tensor * build_attn( llm_graph_input_attn_kv_unified * inp, - ggml_cgraph * gf, ggml_tensor * wo, ggml_tensor * wo_b, ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens] @@ -546,11 +717,39 @@ struct llm_graph_context { float kq_scale, int il) const; + llm_graph_input_attn_kv_unified_iswa * build_attn_inp_kv_unified_iswa() const; + + // note: if k_cur or v_cur are not provided, they will not be stored in the memory + ggml_tensor * build_attn( + llm_graph_input_attn_kv_unified_iswa * inp, + ggml_tensor * wo, + ggml_tensor * wo_b, + ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens] + ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens] optional + ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens] optional + ggml_tensor * kq_b, + ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v] + float kq_scale, + int il) const; + + // TODO: temporary to keep the diff small. after the code is public will refactor to simplify this + ggml_tensor * build_attn_with_sinks( + llm_graph_input_attn_kv_unified_iswa * inp, + ggml_tensor * wo, + ggml_tensor * wo_b, + ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens] + ggml_tensor * k_cur, // [n_embd_head_k, n_head_k, n_tokens] optional + ggml_tensor * v_cur, // [n_embd_head_v, n_head_v, n_tokens] optional + ggml_tensor * kq_b, + ggml_tensor * v_mla, // [n_embd_head_v_mla, n_embd_head_v, n_head_v] + ggml_tensor * sinks, // [n_head_q] + float kq_scale, + int il) const; + llm_graph_input_attn_cross * build_attn_inp_cross() const; ggml_tensor * build_attn( llm_graph_input_attn_cross * inp, - ggml_cgraph * gf, ggml_tensor * wo, ggml_tensor * wo_b, ggml_tensor * q_cur, // [n_embd_head_q, n_head_q, n_tokens] @@ -565,34 +764,57 @@ struct llm_graph_context { // recurrent // - ggml_tensor * build_copy_mask_state( - ggml_cgraph * gf, - ggml_tensor * s, - ggml_tensor * state_copy, - ggml_tensor * state_mask, - int32_t n_state, - int32_t n_seqs) const; + // TODO: move this implementation to llama_memory_recurrent. + // this is analogous to llama_kv_cache_unified::cpy_k / cpy_v + // when moving, avoid passing `ggml_cgraph` - only pass `ggml_context`. would likely need to split the + // implementation in 2 separate methods. the goal is to avoid calling `ggml_build_forward_expand` in + // `llama_memory_recurrent` + ggml_tensor * build_rs( + ggml_tensor * s, + ggml_tensor * state_copy_main, + ggml_tensor * state_copy_extra, + int32_t state_size, + int32_t n_seqs, + uint32_t n_rs, + uint32_t rs_head, + uint32_t rs_size, + int32_t rs_zero, + const llm_graph_get_rows_fn & get_state_rows = ggml_get_rows) const; + + llm_graph_input_rs * build_rs_inp() const; + + ggml_tensor * build_rs( + llm_graph_input_rs * inp, + ggml_tensor * s, + int32_t state_size, + int32_t n_seqs, + const llm_graph_get_rows_fn & get_state_rows = ggml_get_rows) const; ggml_tensor * build_rwkv_token_shift_load( - ggml_cgraph * gf, - ggml_tensor * state_copy, - ggml_tensor * state_mask, - const llama_ubatch & ubatch, - int il) const; + llm_graph_input_rs * inp, + const llama_ubatch & ubatch, + int il) const; ggml_tensor * build_rwkv_token_shift_store( ggml_tensor * token_shift, const llama_ubatch & ubatch, int il) const; + // + // hybrid + // + + llm_graph_input_mem_hybrid * build_inp_mem_hybrid() const; // // pooling // void build_pooling( - ggml_cgraph * gf, ggml_tensor * cls, ggml_tensor * cls_b, ggml_tensor * cls_out, ggml_tensor * cls_out_b) const; }; + +// TODO: better name +int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional); diff --git a/llama/llama.cpp/src/llama-hparams.cpp b/llama/llama.cpp/src/llama-hparams.cpp index 8a6679601..35fc054f1 100644 --- a/llama/llama.cpp/src/llama-hparams.cpp +++ b/llama/llama.cpp/src/llama-hparams.cpp @@ -2,6 +2,28 @@ #include "ggml.h" +void llama_hparams::set_swa_pattern(uint32_t n_pattern, bool dense_first) { + if (dense_first) { + for (uint32_t il = 0; il < n_layer; ++il) { + swa_layers[il] = n_pattern == 0 || (il % n_pattern != 0); + } + } else { + for (uint32_t il = 0; il < n_layer; ++il) { + swa_layers[il] = n_pattern == 0 || (il % n_pattern < (n_pattern - 1)); + } + } +} + +bool llama_hparams::is_swa_any() const { + for (uint32_t il = 0; il < n_layer; ++il) { + if (swa_layers[il]) { + return true; + } + } + + return false; +} + uint32_t llama_hparams::n_head(uint32_t il) const { if (il < n_layer) { return n_head_arr[il]; @@ -49,18 +71,64 @@ uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const { return n_embd_head_v * n_head_kv; } -uint32_t llama_hparams::n_embd_k_s() const { +bool llama_hparams::is_n_embd_k_gqa_variable() const { + const uint32_t val = n_embd_k_gqa(); + for (uint32_t il = 0; il < n_layer; ++il) { + if (val != n_embd_k_gqa(il)) { + return true; + } + } + + return false; +} + +bool llama_hparams::is_n_embd_v_gqa_variable() const { + const uint32_t val = n_embd_v_gqa(); + for (uint32_t il = 0; il < n_layer; ++il) { + if (val != n_embd_v_gqa(il)) { + return true; + } + } + + return false; +} + +uint32_t llama_hparams::n_embd_k_gqa_max() const { + uint32_t val = n_embd_k_gqa(); + for (uint32_t il = 0; il < n_layer; ++il) { + val = std::max(val, n_embd_k_gqa(il)); + } + + return val; +} + +uint32_t llama_hparams::n_embd_v_gqa_max() const { + uint32_t val = n_embd_v_gqa(); + for (uint32_t il = 0; il < n_layer; ++il) { + val = std::max(val, n_embd_v_gqa(il)); + } + + return val; +} + +uint32_t llama_hparams::n_embd_r() const { if (wkv_head_size != 0) { // for RWKV models return token_shift_count * n_embd; } + if (n_shortconv_l_cache != 0) { + // for LFM2 models + return n_embd * (n_shortconv_l_cache - 1); + } + // TODO: maybe support other convolution strides than 1 // NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed - return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner; + // Corresponds to Mamba's conv_states size + return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * (ssm_d_inner + 2*ssm_n_group*ssm_d_state); } -uint32_t llama_hparams::n_embd_v_s() const { +uint32_t llama_hparams::n_embd_s() const { if (wkv_head_size != 0) { // corresponds to RWKV's wkv_states size return n_embd * wkv_head_size; @@ -70,6 +138,14 @@ uint32_t llama_hparams::n_embd_v_s() const { return ssm_d_state * ssm_d_inner; } +bool llama_hparams::is_recurrent(uint32_t il) const { + return recurrent_layer_arr[il]; +} + +uint32_t llama_hparams::n_pos_per_embd() const { + return rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1; +} + bool llama_hparams::n_bskcn(uint32_t n, uint32_t il) const { if (il < n_layer) { return n_bskcn_arr[n][il] > 0; @@ -80,7 +156,7 @@ bool llama_hparams::n_bskcn(uint32_t n, uint32_t il) const { bool llama_hparams::is_swa(uint32_t il) const { if (il < n_layer) { - return n_swa > 0 && n_swa_pattern > 0 && il % n_swa_pattern < (n_swa_pattern - 1); + return swa_layers[il]; } GGML_ABORT("fatal error"); diff --git a/llama/llama.cpp/src/llama-hparams.h b/llama/llama.cpp/src/llama-hparams.h index 48dce4071..29bd90565 100644 --- a/llama/llama.cpp/src/llama-hparams.h +++ b/llama/llama.cpp/src/llama-hparams.h @@ -6,12 +6,19 @@ // bump if necessary #define LLAMA_MAX_LAYERS 512 -#define LLAMA_MAX_EXPERTS 256 // DeepSeekV3 +#define LLAMA_MAX_EXPERTS 384 // Kimi-K2 enum llama_expert_gating_func_type { - LLAMA_EXPERT_GATING_FUNC_TYPE_NONE = 0, - LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX = 1, - LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID = 2, + LLAMA_EXPERT_GATING_FUNC_TYPE_NONE = 0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX = 1, + LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID = 2, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT = 3, // applied to the router weights instead of the logits +}; + +enum llama_swa_type { + LLAMA_SWA_TYPE_NONE = 0, + LLAMA_SWA_TYPE_STANDARD = 1, + LLAMA_SWA_TYPE_CHUNKED = 2, }; struct llama_hparams_posnet { @@ -35,8 +42,6 @@ struct llama_hparams { uint32_t n_embd_features = 0; uint32_t n_layer; uint32_t n_rot; - uint32_t n_swa = 0; // sliding window attention (SWA) - uint32_t n_swa_pattern = 1; // by default, all layers use non-sliding-window attention uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head uint32_t n_expert = 0; @@ -51,6 +56,8 @@ struct llama_hparams { struct llama_hparams_posnet posnet; struct llama_hparams_convnext convnext; + uint32_t n_shortconv_l_cache = 0; + std::array n_head_arr; std::array n_head_kv_arr; std::array n_ff_arr; @@ -69,6 +76,7 @@ struct llama_hparams { bool expert_weights_norm = false; uint32_t expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_NONE; uint32_t moe_every_n_layers = 0; + uint32_t nextn_predict_layers = 0; float f_norm_eps; float f_norm_rms_eps; @@ -94,15 +102,28 @@ struct llama_hparams { float rope_freq_scale_train; float rope_freq_scale_train_swa; uint32_t n_ctx_orig_yarn; - float rope_yarn_log_mul; + float rope_yarn_log_mul = 0.0f; std::array rope_sections; + // Sliding Window Attention (SWA) + llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE; + // the size of the sliding window (0 - no SWA) + uint32_t n_swa = 0; + // if swa_layers[il] == true, then layer il is SWA + // if swa_layers[il] == false, then layer il is dense (i.e. non-SWA) + // by default, all layers are dense + std::array swa_layers; + // for State Space Models uint32_t ssm_d_conv = 0; uint32_t ssm_d_inner = 0; uint32_t ssm_d_state = 0; uint32_t ssm_dt_rank = 0; + uint32_t ssm_n_group = 0; + + // for hybrid state space models + std::array recurrent_layer_arr; bool ssm_dt_b_c_rms = false; @@ -118,15 +139,23 @@ struct llama_hparams { bool causal_attn = true; bool use_alibi = false; bool attn_soft_cap = false; + bool use_kq_norm = true; + // for Classifiers + uint32_t n_cls_out = 1; + + // llama4 smallthinker uint32_t n_moe_layer_step = 0; - bool use_kq_norm = true; - uint32_t n_attn_chunk = 0; - // values below seems to be fixed on llama4 uint32_t n_no_rope_layer_step = 4; uint32_t n_attn_temp_floor_scale = 8192; float f_attn_temp_scale = 0.1; + // gemma3n altup + uint32_t n_altup = 4; // altup_num_inputs + uint32_t i_altup_act = 0; // altup_active_idx + uint32_t laurel_rank = 64; + uint32_t n_embd_altup = 256; + // needed by encoder-decoder models (e.g. T5, FLAN-T5) // ref: https://github.com/ggerganov/llama.cpp/pull/8141 llama_token dec_start_token_id = LLAMA_TOKEN_NULL; @@ -135,6 +164,30 @@ struct llama_hparams { enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE; enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE; + // this value n_pattern means that every nth layer is dense (i.e. non-SWA) + // dense_first means whether the pattern is start with a dense layer + // note that if n_pattern == 0, all layers are SWA + // if n_pattern == 1, all layers are dense + // example 1: n_pattern = 3, dense_first = false + // il == 0: swa + // il == 1: swa + // il == 2: dense + // il == 3: swa + // il == 4: swa + // il == 5: dense + // il == 6: swa + // etc ... + // example 2: n_pattern = 2, dense_first = true + // il == 0: dense + // il == 1: swa + // il == 2: dense + // il == 3: swa + // etc ... + void set_swa_pattern(uint32_t n_pattern, bool dense_first = false); + + // return true if one of the layers is SWA + bool is_swa_any() const; + uint32_t n_head(uint32_t il = 0) const; uint32_t n_head_kv(uint32_t il = 0) const; @@ -149,12 +202,25 @@ struct llama_hparams { // dimension of value embeddings across all k-v heads uint32_t n_embd_v_gqa(uint32_t il = 0) const; + // true if any layer has a different n_embd_k_gqa/n_embd_v_gqa + bool is_n_embd_k_gqa_variable() const; + bool is_n_embd_v_gqa_variable() const; + + // return the maximum n_embd_k_gqa/n_embd_v_gqa across all layers + uint32_t n_embd_k_gqa_max() const; + uint32_t n_embd_v_gqa_max() const; + // dimension of the rolling state embeddings // corresponds to Mamba's conv_states size or RWKV's token_shift states size - uint32_t n_embd_k_s() const; + uint32_t n_embd_r() const; // dimension of the recurrent state embeddings - uint32_t n_embd_v_s() const; + uint32_t n_embd_s() const; + + // whether or not the given layer is recurrent (for hybrid models) + bool is_recurrent(uint32_t il) const; + + uint32_t n_pos_per_embd() const; // Block skip connection bool n_bskcn(uint32_t n, uint32_t il) const; diff --git a/llama/llama.cpp/src/llama-kv-cache-unified-iswa.cpp b/llama/llama.cpp/src/llama-kv-cache-unified-iswa.cpp new file mode 100644 index 000000000..01d27fb4d --- /dev/null +++ b/llama/llama.cpp/src/llama-kv-cache-unified-iswa.cpp @@ -0,0 +1,295 @@ +#include "llama-kv-cache-unified-iswa.h" + +#include "llama-impl.h" +#include "llama-batch.h" +#include "llama-model.h" + +#include +#include + +// +// llama_kv_cache_unified_iswa +// + +llama_kv_cache_unified_iswa::llama_kv_cache_unified_iswa( + const llama_model & model, + ggml_type type_k, + ggml_type type_v, + bool v_trans, + bool offload, + bool swa_full, + bool unified, + uint32_t kv_size, + uint32_t n_seq_max, + uint32_t n_ubatch, + uint32_t n_pad) : hparams(model.hparams), unified(unified) { + llama_kv_cache_unified::layer_filter_cb filter_base = [&](int32_t il) { return !model.hparams.is_swa(il); }; + llama_kv_cache_unified::layer_filter_cb filter_swa = [&](int32_t il) { return model.hparams.is_swa(il); }; + + const uint32_t size_base = kv_size; + + uint32_t size_swa = std::min(size_base, GGML_PAD(hparams.n_swa*(unified ? n_seq_max : 1) + n_ubatch, n_pad)); + + // when using full-size SWA cache, we set the SWA cache size to be equal to the base cache size + if (swa_full) { + LLAMA_LOG_WARN("%s: using full-size SWA cache (ref: %s)\n", + __func__, "https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055"); + + size_swa = size_base; + } + + LLAMA_LOG_INFO("%s: creating non-SWA KV cache, size = %u cells\n", __func__, size_base); + + kv_base = std::make_unique( + model, std::move(filter_base), type_k, type_v, + v_trans, offload, unified, size_base, n_seq_max, n_pad, + 0, LLAMA_SWA_TYPE_NONE); + + LLAMA_LOG_INFO("%s: creating SWA KV cache, size = %u cells\n", __func__, size_swa); + + kv_swa = std::make_unique( + model, std::move(filter_swa), type_k, type_v, + v_trans, offload, unified, size_swa, n_seq_max, n_pad, + hparams.n_swa, hparams.swa_type); +} + +void llama_kv_cache_unified_iswa::clear(bool data) { + kv_base->clear(data); + kv_swa ->clear(data); +} + +bool llama_kv_cache_unified_iswa::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) { + bool res = true; + + res = res & kv_base->seq_rm(seq_id, p0, p1); + res = res & kv_swa ->seq_rm(seq_id, p0, p1); + + return res; +} + +void llama_kv_cache_unified_iswa::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) { + kv_base->seq_cp(seq_id_src, seq_id_dst, p0, p1); + kv_swa ->seq_cp(seq_id_src, seq_id_dst, p0, p1); +} + +void llama_kv_cache_unified_iswa::seq_keep(llama_seq_id seq_id) { + kv_base->seq_keep(seq_id); + kv_swa ->seq_keep(seq_id); +} + +void llama_kv_cache_unified_iswa::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) { + kv_base->seq_add(seq_id, p0, p1, shift); + kv_swa ->seq_add(seq_id, p0, p1, shift); +} + +void llama_kv_cache_unified_iswa::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) { + kv_base->seq_div(seq_id, p0, p1, d); + kv_swa ->seq_div(seq_id, p0, p1, d); +} + +llama_pos llama_kv_cache_unified_iswa::seq_pos_min(llama_seq_id seq_id) const { + // the base cache is a superset of the SWA cache, so we can just check the SWA cache + return kv_swa->seq_pos_min(seq_id); +} + +llama_pos llama_kv_cache_unified_iswa::seq_pos_max(llama_seq_id seq_id) const { + return kv_swa->seq_pos_max(seq_id); +} + +llama_memory_context_ptr llama_kv_cache_unified_iswa::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { + GGML_UNUSED(embd_all); + + // first try simple split + do { + if (!unified) { + // requires equal splits, so we skip the simple split + break; + } + + balloc.split_reset(); + + std::vector ubatches; + while (true) { + auto ubatch = balloc.split_simple(n_ubatch); + + if (ubatch.n_tokens == 0) { + break; + } + + ubatches.push_back(std::move(ubatch)); // NOLINT + } + + if (balloc.get_n_used() < balloc.get_n_tokens()) { + // failed to find a suitable split + break; + } + + auto sinfos_base = kv_base->prepare(ubatches); + if (sinfos_base.empty()) { + break; + } + + auto sinfos_swa = kv_swa->prepare(ubatches); + if (sinfos_swa.empty()) { + break; + } + + assert(sinfos_base.size() == sinfos_swa.size()); + + return std::make_unique( + this, std::move(sinfos_base), std::move(sinfos_swa), std::move(ubatches)); + } while (false); + + // if it fails, try equal split + do { + balloc.split_reset(); + + std::vector ubatches; + while (true) { + auto ubatch = balloc.split_equal(n_ubatch, !unified); + + if (ubatch.n_tokens == 0) { + break; + } + + ubatches.push_back(std::move(ubatch)); // NOLINT + } + + if (balloc.get_n_used() < balloc.get_n_tokens()) { + // failed to find a suitable split + break; + } + + auto sinfos_base = kv_base->prepare(ubatches); + if (sinfos_base.empty()) { + break; + } + + auto sinfos_swa = kv_swa->prepare(ubatches); + if (sinfos_swa.empty()) { + break; + } + + assert(sinfos_base.size() == sinfos_swa.size()); + + return std::make_unique( + this, std::move(sinfos_base), std::move(sinfos_swa), std::move(ubatches)); + } while (false); + + // TODO: if we fail again, we should attempt different splitting strategies + // but to do that properly, we first have to refactor the batches to be more flexible + + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); +} + +llama_memory_context_ptr llama_kv_cache_unified_iswa::init_full() { + return std::make_unique(this); +} + +llama_memory_context_ptr llama_kv_cache_unified_iswa::init_update(llama_context * lctx, bool optimize) { + return std::make_unique(this, lctx, optimize); +} + +bool llama_kv_cache_unified_iswa::get_can_shift() const { + return kv_base->get_size() == kv_swa->get_size(); +} + +void llama_kv_cache_unified_iswa::state_write(llama_io_write_i & io, llama_seq_id seq_id) const { + kv_base->state_write(io, seq_id); + kv_swa ->state_write(io, seq_id); +} + +void llama_kv_cache_unified_iswa::state_read(llama_io_read_i & io, llama_seq_id seq_id) { + kv_base->state_read(io, seq_id); + kv_swa ->state_read(io, seq_id); +} + +llama_kv_cache_unified * llama_kv_cache_unified_iswa::get_base() const { + return kv_base.get(); +} + +llama_kv_cache_unified * llama_kv_cache_unified_iswa::get_swa() const { + return kv_swa.get(); +} + +// +// llama_kv_cache_unified_iswa_context +// + +llama_kv_cache_unified_iswa_context::llama_kv_cache_unified_iswa_context(llama_memory_status status) : status(status) {} + +llama_kv_cache_unified_iswa_context::llama_kv_cache_unified_iswa_context( + llama_kv_cache_unified_iswa * kv) : + ctx_base(kv->get_base()->init_full()), + ctx_swa (kv->get_swa ()->init_full()), + status(llama_memory_status_combine(ctx_base->get_status(), ctx_swa->get_status())) { +} + +llama_kv_cache_unified_iswa_context::llama_kv_cache_unified_iswa_context( + llama_kv_cache_unified_iswa * kv, + llama_context * lctx, + bool optimize) : + ctx_base(kv->get_base()->init_update(lctx, optimize)), + ctx_swa (kv->get_swa ()->init_update(lctx, optimize)), + status(llama_memory_status_combine(ctx_base->get_status(), ctx_swa->get_status())) { +} + +llama_kv_cache_unified_iswa_context::llama_kv_cache_unified_iswa_context( + llama_kv_cache_unified_iswa * kv, + slot_info_vec_t sinfos_base, + slot_info_vec_t sinfos_swa, + std::vector ubatches) : + ubatches(std::move(ubatches)), + // note: here we copy the ubatches. not sure if this is ideal + ctx_base(new llama_kv_cache_unified_context(kv->get_base(), std::move(sinfos_base), this->ubatches)), + ctx_swa (new llama_kv_cache_unified_context(kv->get_swa (), std::move(sinfos_swa), this->ubatches)), + status(llama_memory_status_combine(ctx_base->get_status(), ctx_swa->get_status())) { +} + +llama_kv_cache_unified_iswa_context:: ~llama_kv_cache_unified_iswa_context() = default; + +bool llama_kv_cache_unified_iswa_context::next() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + ctx_base->next(); + ctx_swa ->next(); + + if (++i_next >= ubatches.size()) { + return false; + } + + return true; +} + +bool llama_kv_cache_unified_iswa_context::apply() { + assert(!llama_memory_status_is_fail(status)); + + bool res = true; + + res = res & ctx_base->apply(); + res = res & ctx_swa ->apply(); + + return res; +} + +llama_memory_status llama_kv_cache_unified_iswa_context::get_status() const { + return status; +} + +const llama_ubatch & llama_kv_cache_unified_iswa_context::get_ubatch() const { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + return ubatches[i_next]; +} + +const llama_kv_cache_unified_context * llama_kv_cache_unified_iswa_context::get_base() const { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + return static_cast(ctx_base.get()); +} + +const llama_kv_cache_unified_context * llama_kv_cache_unified_iswa_context::get_swa() const { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + return static_cast(ctx_swa.get()); +} diff --git a/llama/llama.cpp/src/llama-kv-cache-unified-iswa.h b/llama/llama.cpp/src/llama-kv-cache-unified-iswa.h new file mode 100644 index 000000000..d2650dadd --- /dev/null +++ b/llama/llama.cpp/src/llama-kv-cache-unified-iswa.h @@ -0,0 +1,133 @@ +#pragma once + +#include "llama-kv-cache-unified.h" + +#include + +// +// llama_kv_cache_unified_iswa +// + +// utilizes two instances of llama_kv_cache_unified +// the first instance is for the non-SWA layers of the model and the second instance is for the SWA layers + +class llama_kv_cache_unified_iswa : public llama_memory_i { +public: + llama_kv_cache_unified_iswa( + const llama_model & model, + ggml_type type_k, + ggml_type type_v, + bool v_trans, + bool offload, + bool swa_full, + bool unified, + uint32_t kv_size, + uint32_t n_seq_max, + uint32_t n_ubatch, + uint32_t n_pad); + + ~llama_kv_cache_unified_iswa() = default; + + // + // llama_memory_i + // + + llama_memory_context_ptr init_batch( + llama_batch_allocr & balloc, + uint32_t n_ubatch, + bool embd_all) override; + + llama_memory_context_ptr init_full() override; + + llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override; + + bool get_can_shift() const override; + + void clear(bool data) override; + + bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override; + void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override; + void seq_keep(llama_seq_id seq_id) override; + void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override; + void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override; + + llama_pos seq_pos_min(llama_seq_id seq_id) const override; + llama_pos seq_pos_max(llama_seq_id seq_id) const override; + + // state write/load + + void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override; + void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override; + + // + // llama_kv_cache_unified_iswa specific API + // + + llama_kv_cache_unified * get_base() const; + llama_kv_cache_unified * get_swa () const; + +private: + const llama_hparams & hparams; + + const bool unified; + + std::unique_ptr kv_base; + std::unique_ptr kv_swa; +}; + +class llama_kv_cache_unified_iswa_context : public llama_memory_context_i { +public: + using slot_info_vec_t = llama_kv_cache_unified::slot_info_vec_t; + + // used for errors + llama_kv_cache_unified_iswa_context(llama_memory_status status); + + // used to create a full-cache context + llama_kv_cache_unified_iswa_context( + llama_kv_cache_unified_iswa * kv); + + // used to create an update context + llama_kv_cache_unified_iswa_context( + llama_kv_cache_unified_iswa * kv, + llama_context * lctx, + bool optimize); + + // used to create a batch processing context from a batch + llama_kv_cache_unified_iswa_context( + llama_kv_cache_unified_iswa * kv, + slot_info_vec_t sinfos_base, + slot_info_vec_t sinfos_swa, + std::vector ubatches); + + virtual ~llama_kv_cache_unified_iswa_context(); + + // + // llama_memory_context_i + // + + bool next() override; + bool apply() override; + + llama_memory_status get_status() const override; + const llama_ubatch & get_ubatch() const override; + + // + // llama_kv_cache_unified_iswa_context specific API + // + + const llama_kv_cache_unified_context * get_base() const; + const llama_kv_cache_unified_context * get_swa() const; + +private: + //llama_kv_cache_unified_iswa * kv; + + // the index of the next ubatch to process + size_t i_next = 0; + + std::vector ubatches; + + const llama_memory_context_ptr ctx_base; + const llama_memory_context_ptr ctx_swa; + + const llama_memory_status status; +}; diff --git a/llama/llama.cpp/src/llama-kv-cache-unified.cpp b/llama/llama.cpp/src/llama-kv-cache-unified.cpp new file mode 100644 index 000000000..e539142e6 --- /dev/null +++ b/llama/llama.cpp/src/llama-kv-cache-unified.cpp @@ -0,0 +1,2390 @@ +#include "llama-kv-cache-unified.h" + +#include "llama-impl.h" +#include "llama-io.h" +#include "llama-model.h" +#include "llama-context.h" + +#include +#include +#include +#include +#include +#include + +// +// llama_kv_cache_unified +// + +llama_kv_cache_unified::llama_kv_cache_unified( + const llama_model & model, + layer_filter_cb && filter, + ggml_type type_k, + ggml_type type_v, + bool v_trans, + bool offload, + bool unified, + uint32_t kv_size, + uint32_t n_seq_max, + uint32_t n_pad, + uint32_t n_swa, + llama_swa_type swa_type) : + model(model), hparams(model.hparams), v_trans(v_trans), + n_seq_max(n_seq_max), n_stream(unified ? 1 : n_seq_max), n_pad(n_pad), n_swa(n_swa), swa_type(swa_type) { + + GGML_ASSERT(kv_size % n_pad == 0); + + // TODO: this is temporary until we support passing reuse layer filters [KV_REUSE] + auto n_layer_cache = hparams.n_layer; + if (model.arch == LLM_ARCH_GEMMA3N) { + n_layer_cache = 20; + } + if (model.arch == LLM_ARCH_GLM4_MOE) { + // GLM-4.5: Only process up to last layer, skip final NextN layer + n_layer_cache = hparams.n_layer - hparams.nextn_predict_layers; + } + + // create a context for each buffer type + std::map ctx_map; + auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { + auto it = ctx_map.find(buft); + if (it == ctx_map.end()) { + ggml_init_params params = { + /*.mem_size =*/ size_t(2u*(1 + n_stream)*n_layer_cache*ggml_tensor_overhead()), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + + ggml_context * ctx = ggml_init(params); + if (!ctx) { + return nullptr; + } + + ctx_map[buft] = ctx; + ctxs.emplace_back(ctx); + + return ctx; + } + + return it->second; + }; + + GGML_ASSERT(n_stream == 1 || n_stream == n_seq_max); + + v_heads.resize(n_stream); + for (uint32_t s = 0; s < n_stream; ++s) { + v_heads[s] = 0; + } + + v_cells.resize(n_stream); + for (uint32_t s = 0; s < n_stream; ++s) { + v_cells[s].resize(kv_size); + } + + // by default, all sequence ids are mapped to the 0th stream + seq_to_stream.resize(LLAMA_MAX_SEQ, 0); + + if (n_stream > 1) { + seq_to_stream.resize(n_stream, 0); + for (uint32_t s = 0; s < n_stream; ++s) { + seq_to_stream[s] = s; + } + } + + // [TAG_V_CACHE_VARIABLE] + if (v_trans && hparams.is_n_embd_v_gqa_variable()) { + LLAMA_LOG_WARN("%s: the V embeddings have different sizes across layers and FA is not enabled - padding V cache to %d\n", + __func__, hparams.n_embd_v_gqa_max()); + } + + for (uint32_t il = 0; il < n_layer_cache; il++) { + if (filter && !filter(il)) { + LLAMA_LOG_DEBUG("%s: layer %3d: skipped\n", __func__, il); + continue; + } + + // [TAG_V_CACHE_VARIABLE] + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); + const uint32_t n_embd_v_gqa = !v_trans ? hparams.n_embd_v_gqa(il) : hparams.n_embd_v_gqa_max(); + + const char * dev_name = "CPU"; + + ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type(); + + if (offload) { + auto * dev = model.dev_layer(il); + buft = ggml_backend_dev_buffer_type(dev); + + dev_name = ggml_backend_dev_name(dev); + } + + LLAMA_LOG_DEBUG("%s: layer %3d: dev = %s\n", __func__, il, dev_name); + + ggml_context * ctx = ctx_for_buft(buft); + if (!ctx) { + throw std::runtime_error("failed to create ggml context for kv cache"); + } + + ggml_tensor * k; + ggml_tensor * v; + + k = ggml_new_tensor_3d(ctx, type_k, n_embd_k_gqa, kv_size, n_stream); + v = ggml_new_tensor_3d(ctx, type_v, n_embd_v_gqa, kv_size, n_stream); + + ggml_format_name(k, "cache_k_l%d", il); + ggml_format_name(v, "cache_v_l%d", il); + + std::vector k_stream; + std::vector v_stream; + + for (uint32_t s = 0; s < n_stream; ++s) { + k_stream.push_back(ggml_view_2d(ctx, k, n_embd_k_gqa, kv_size, k->nb[1], s*k->nb[2])); + v_stream.push_back(ggml_view_2d(ctx, v, n_embd_v_gqa, kv_size, v->nb[1], s*v->nb[2])); + } + + map_layer_ids[il] = layers.size(); + + layers.push_back({ il, k, v, k_stream, v_stream, }); + } + + // TODO: this is temporary until we support passing reuse layer filters [KV_REUSE] + if (model.arch == LLM_ARCH_GEMMA3N) { + LLAMA_LOG_DEBUG("%s: GEMMA3N: reuse layers [%d, %d]\n", __func__, n_layer_cache, hparams.n_layer - 1); + + for (uint32_t il = n_layer_cache; il < hparams.n_layer; il++) { + if (filter && !filter(il)) { + LLAMA_LOG_DEBUG("%s: layer %3d: skipped\n", __func__, il); + continue; + } + + const bool is_swa = hparams.is_swa(il); + const uint32_t il_reuse = n_layer_cache - (is_swa ? 2 : 1); + + GGML_ASSERT(map_layer_ids.find(il_reuse) != map_layer_ids.end()); + map_layer_ids[il] = map_layer_ids[il_reuse]; + + LLAMA_LOG_DEBUG("%s: layer %3d: reuse layer %d, isw = %d\n", __func__, il, il_reuse, is_swa); + } + } + + // allocate tensors and initialize the buffers to avoid NaNs in the padding + for (auto it : ctx_map) { + auto * buft = it.first; + auto * ctx = it.second; + + ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); + if (!buf) { + throw std::runtime_error("failed to allocate buffer for kv cache"); + } + + LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0); + + ggml_backend_buffer_clear(buf, 0); + bufs.emplace_back(buf); + } + + { + const size_t memory_size_k = size_k_bytes(); + const size_t memory_size_v = size_v_bytes(); + + LLAMA_LOG_INFO("%s: size = %7.2f MiB (%6u cells, %3d layers, %2u/%u seqs), K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__, + (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f), kv_size, (int) layers.size(), n_seq_max, n_stream, + ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f), + ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f)); + } + + const char * LLAMA_KV_CACHE_DEBUG = getenv("LLAMA_KV_CACHE_DEBUG"); + debug = LLAMA_KV_CACHE_DEBUG ? atoi(LLAMA_KV_CACHE_DEBUG) : 0; + + const char * LLAMA_SET_ROWS = getenv("LLAMA_SET_ROWS"); + supports_set_rows = LLAMA_SET_ROWS ? atoi(LLAMA_SET_ROWS) != 0 : supports_set_rows; + + if (!supports_set_rows) { + // ref: https://github.com/ggml-org/llama.cpp/pull/14363 + GGML_ASSERT(unified && "cannot use non-unified KV cache without ggml_set_rows() support"); + } + + if (!supports_set_rows) { + LLAMA_LOG_WARN("%s: LLAMA_SET_ROWS=0, using old ggml_cpy() method for backwards compatibility\n", __func__); + } +} + +void llama_kv_cache_unified::clear(bool data) { + for (uint32_t s = 0; s < n_stream; ++s) { + v_cells[s].reset(); + v_heads[s] = 0; + } + + if (data) { + for (auto & buf : bufs) { + ggml_backend_buffer_clear(buf.get(), 0); + } + } +} + +bool llama_kv_cache_unified::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) { + GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size()); + + auto & cells = v_cells[seq_to_stream[seq_id]]; + auto & head = v_heads[seq_to_stream[seq_id]]; + + uint32_t new_head = cells.size(); + + if (p0 < 0) { + p0 = 0; + } + + if (p1 < 0) { + p1 = std::numeric_limits::max(); + } + + if (seq_id >= 0) { + for (uint32_t i = 0; i < cells.size(); ++i) { + if (!cells.pos_in(i, p0, p1)) { + continue; + } + + if (cells.seq_has(i, seq_id) && cells.seq_rm(i, seq_id)) { + if (new_head == cells.size()) { + new_head = i; + } + } + } + } else { + // match any sequence + for (uint32_t i = 0; i < cells.size(); ++i) { + if (!cells.pos_in(i, p0, p1)) { + continue; + } + + cells.rm(i); + + if (new_head == cells.size()) { + new_head = i; + } + } + } + + // If we freed up a slot, set head to it so searching can start there. + if (new_head != cells.size() && new_head < head) { + head = new_head; + } + + return true; +} + +void llama_kv_cache_unified::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) { + GGML_ASSERT(seq_id_src >= 0 && (size_t) seq_id_src < seq_to_stream.size()); + GGML_ASSERT(seq_id_dst >= 0 && (size_t) seq_id_dst < seq_to_stream.size()); + + const auto s0 = seq_to_stream[seq_id_src]; + const auto s1 = seq_to_stream[seq_id_dst]; + + if (s0 == s1) { + // since both sequences are in the same stream, no data copy is necessary + // we just have to update the cells meta data + + auto & cells = v_cells[s0]; + + if (seq_id_src == seq_id_dst) { + return; + } + + if (p0 < 0) { + p0 = 0; + } + + if (p1 < 0) { + p1 = std::numeric_limits::max(); + } + + for (uint32_t i = 0; i < cells.size(); ++i) { + if (!cells.pos_in(i, p0, p1)) { + continue; + } + + if (cells.seq_has(i, seq_id_src)) { + cells.seq_add(i, seq_id_dst); + } + } + + return; + } + + // cross-stream sequence copies require to copy the actual buffer data + + bool is_full = true; + + if (p0 > 0 && p0 + 1 < (int) get_size()) { + is_full = false; + } + + if (p1 > 0 && p1 + 1 < (int) get_size()) { + is_full = false; + } + + GGML_ASSERT(is_full && "seq_cp() is only supported for full KV buffers"); + + // enqueue the copy operation - the buffer copy will be performed during the next update + sc_info.ssrc.push_back(s0); + sc_info.sdst.push_back(s1); + + v_cells[s1].reset(); + for (uint32_t i = 0; i < v_cells[s0].size(); ++i) { + if (v_cells[s0].seq_has(i, seq_id_src)) { + llama_pos pos = v_cells[s0].pos_get(i); + llama_pos shift = v_cells[s0].get_shift(i); + + if (shift != 0) { + pos -= shift; + assert(pos >= 0); + } + + v_cells[s1].pos_set(i, pos); + v_cells[s1].seq_add(i, seq_id_dst); + + if (shift != 0) { + v_cells[s1].pos_add(i, shift); + } + } + } + + v_heads[s1] = v_heads[s0]; + + //for (uint32_t s = 0; s < n_stream; ++s) { + // LLAMA_LOG_WARN("%s: seq %d: min = %d, max = %d\n", __func__, s, v_cells[s].seq_pos_min(s), v_cells[s].seq_pos_max(s)); + //} +} + +void llama_kv_cache_unified::seq_keep(llama_seq_id seq_id) { + GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size()); + + auto & cells = v_cells[seq_to_stream[seq_id]]; + auto & head = v_heads[seq_to_stream[seq_id]]; + + uint32_t new_head = cells.size(); + + for (uint32_t i = 0; i < cells.size(); ++i) { + if (cells.seq_keep(i, seq_id)) { + if (new_head == cells.size()) { + new_head = i; + } + } + } + + // If we freed up a slot, set head to it so searching can start there. + if (new_head != cells.size() && new_head < head) { + head = new_head; + } +} + +void llama_kv_cache_unified::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) { + GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size()); + + auto & cells = v_cells[seq_to_stream[seq_id]]; + auto & head = v_heads[seq_to_stream[seq_id]]; + + if (shift == 0) { + return; + } + + uint32_t new_head = cells.size(); + + if (p0 < 0) { + p0 = 0; + } + + if (p1 < 0) { + p1 = std::numeric_limits::max(); + } + + // If there is no range then return early to avoid looping over all cells. + if (p0 == p1) { + return; + } + + for (uint32_t i = 0; i < cells.size(); ++i) { + if (!cells.pos_in(i, p0, p1)) { + continue; + } + + if (cells.seq_has(i, seq_id)) { + if (cells.pos_add(i, shift)) { + if (new_head == cells.size()) { + new_head = i; + } + } + } + } + + // If we freed up a slot, set head to it so searching can start there. + // Otherwise we just start the next search from the beginning. + head = new_head != cells.size() ? new_head : 0; +} + +void llama_kv_cache_unified::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) { + GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size()); + + auto & cells = v_cells[seq_to_stream[seq_id]]; + + if (d == 1) { + return; + } + + if (p0 < 0) { + p0 = 0; + } + + if (p1 < 0) { + p1 = std::numeric_limits::max(); + } + + // If there is no range then return early to avoid looping over the cache. + if (p0 == p1) { + return; + } + + for (uint32_t i = 0; i < cells.size(); ++i) { + if (!cells.pos_in(i, p0, p1)) { + continue; + } + + if (cells.seq_has(i, seq_id)) { + cells.pos_div(i, d); + } + } +} + +llama_pos llama_kv_cache_unified::seq_pos_min(llama_seq_id seq_id) const { + GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size()); + + const auto & cells = v_cells[seq_to_stream[seq_id]]; + + return cells.seq_pos_min(seq_id); +} + +llama_pos llama_kv_cache_unified::seq_pos_max(llama_seq_id seq_id) const { + GGML_ASSERT(seq_id >= 0 && (size_t) seq_id < seq_to_stream.size()); + + const auto & cells = v_cells[seq_to_stream[seq_id]]; + + return cells.seq_pos_max(seq_id); +} + +llama_memory_context_ptr llama_kv_cache_unified::init_batch( + llama_batch_allocr & balloc, + uint32_t n_ubatch, + bool embd_all) { + GGML_UNUSED(embd_all); + + do { + balloc.split_reset(); + + std::vector ubatches; + while (true) { + auto ubatch = n_stream == 1 ? balloc.split_simple(n_ubatch) : balloc.split_equal(n_ubatch, true); + + if (ubatch.n_tokens == 0) { + break; + } + + ubatches.push_back(std::move(ubatch)); // NOLINT + } + + if (balloc.get_n_used() < balloc.get_n_tokens()) { + // failed to find a suitable split + break; + } + + auto sinfos = prepare(ubatches); + if (sinfos.empty()) { + break; + } + + return std::make_unique( + this, std::move(sinfos), std::move(ubatches)); + } while (false); + + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); +} + +llama_memory_context_ptr llama_kv_cache_unified::init_full() { + return std::make_unique(this); +} + +llama_memory_context_ptr llama_kv_cache_unified::init_update(llama_context * lctx, bool optimize) { + bool do_shift = get_has_shift(); + + defrag_info dinfo; + + // see if we need to defrag + if (n_stream == 1) { + // note : for now do not consider defrag for n_stream > 1 + const auto & cells = v_cells[seq_to_stream[0]]; + + bool do_defrag = optimize; + + const auto thold = lctx->get_cparams().defrag_thold; + + if (!do_defrag && thold > 0.0f) { + const auto n_kv = cells.used_max_p1(); + + // - do not defrag small contexts (i.e. < 2048 tokens) + // - count the padding towards the number of used tokens + const float fragmentation = n_kv >= 2048 ? std::max(0.0f, 1.0f - (float(cells.get_used() + n_pad)/n_kv)) : 0.0f; + + if (fragmentation > thold) { + LLAMA_LOG_DEBUG("%s: fragmentation: %.2f - requesting defrag\n", __func__, fragmentation); + + do_defrag = true; + } + } + + if (do_defrag) { + dinfo = defrag_prepare(lctx->graph_max_nodes()); + } + } + + return std::make_unique(this, lctx, do_shift, std::move(dinfo), std::move(sc_info)); +} + +llama_kv_cache_unified::slot_info_vec_t llama_kv_cache_unified::prepare(const std::vector & ubatches) { + llama_kv_cache_unified::slot_info_vec_t res; + + struct state_t { + slot_info sinfo; // slot info for the ubatch + + std::vector v_heads_old; // old positions of the heads, before placing the ubatch + + std::vector v_cells; // copy of the old cells, before placing the ubatch + }; + + // remember the old state of the cells so we can restore it in the end + std::vector states; + + bool success = true; + + for (const auto & ubatch : ubatches) { + // non-continuous slots require support for ggml_set_rows() + const bool cont = supports_set_rows ? false : true; + + // only find a suitable slot for the ubatch. don't modify the cells yet + const auto sinfo_new = find_slot(ubatch, cont); + if (sinfo_new.empty()) { + success = false; + break; + } + + // remeber the position that we found + res.push_back(sinfo_new); + + // store the old state of the cells in the recovery stack + { + state_t state = { sinfo_new, v_heads, {} }; + + for (uint32_t s = 0; s < sinfo_new.n_stream(); ++s) { + auto & cells = v_cells[sinfo_new.strm[s]]; + + state.v_cells.push_back(cells.cp(sinfo_new.idxs[s])); + } + + states.push_back(std::move(state)); + } + + // now emplace the ubatch + apply_ubatch(sinfo_new, ubatch); + } + + GGML_ASSERT(!states.empty() || !success); + + // iterate backwards and restore the cells to their original state + for (auto it = states.rbegin(); it != states.rend(); ++it) { + const auto & sinfo = it->sinfo; + + for (uint32_t s = 0; s < sinfo.n_stream(); ++s) { + auto & cells = v_cells[sinfo.strm[s]]; + auto & head = v_heads[sinfo.strm[s]]; + + cells.set(sinfo.idxs[s], it->v_cells[s]); + head = it->v_heads_old[s]; + } + } + + if (!success) { + return {}; + } + + return res; +} + +bool llama_kv_cache_unified::update(llama_context * lctx, bool do_shift, const defrag_info & dinfo, const stream_copy_info & sc_info) { + bool updated = false; + + auto * sched = lctx->get_sched(); + + if (!sc_info.empty()) { + assert(n_stream > 1 && "stream copy should never happen with a single stream"); + + llama_synchronize(lctx); + + const size_t n_copy = sc_info.ssrc.size(); + + for (size_t i = 0; i < n_copy; ++i) { + const auto ssrc = sc_info.ssrc[i]; + const auto sdst = sc_info.sdst[i]; + + assert(ssrc < n_stream); + assert(sdst < n_stream); + + LLAMA_LOG_DEBUG("%s: copying KV buffer: stream %d to stream %d\n", __func__, ssrc, sdst); + + assert(ssrc != sdst); + + for (uint32_t il = 0; il < layers.size(); ++il) { + const auto & layer = layers[il]; + + ggml_backend_tensor_copy(layer.k_stream[ssrc], layer.k_stream[sdst]); + ggml_backend_tensor_copy(layer.v_stream[ssrc], layer.v_stream[sdst]); + } + } + } + + if (do_shift) { + if (!get_can_shift()) { + GGML_ABORT("The current KV cache / model configuration does not support K-shift"); + } + + LLAMA_LOG_DEBUG("%s: applying K-shift\n", __func__); + + // apply K-shift if needed + if (hparams.rope_type != LLAMA_ROPE_TYPE_NONE) { + ggml_backend_sched_reset(sched); + + auto * res = lctx->get_gf_res_reserve(); + + res->reset(); + + auto * gf = build_graph_shift(res, lctx); + if (!ggml_backend_sched_alloc_graph(sched, gf)) { + LLAMA_LOG_ERROR("%s: failed to allocate compute graph for K-shift\n", __func__); + return updated; + } + + res->set_inputs(nullptr); + + if (lctx->graph_compute(gf, false) != GGML_STATUS_SUCCESS) { + LLAMA_LOG_ERROR("%s: failed to compute K-shift\n", __func__); + return updated; + } + + updated = true; + } + + for (uint32_t s = 0; s < n_stream; ++s) { + auto & cells = v_cells[s]; + + cells.reset_shift(); + } + } + + if (!dinfo.empty()) { + LLAMA_LOG_DEBUG("%s: defragmenting KV cache\n", __func__); + + // note: for now do not consider defrag for n_stream > 1 + auto & cells = v_cells[seq_to_stream[0]]; + auto & head = v_heads[seq_to_stream[0]]; + + // apply moves: + { + const auto n_kv = dinfo.ids.size(); + + for (uint32_t i = 0; i < n_kv; ++i) { + assert(dinfo.ids[i] <= n_kv); + + if (dinfo.ids[i] == n_kv || dinfo.ids[i] == i) { + continue; + } + + cells.mv(i, dinfo.ids[i]); + } + + // reset the head so we can find the first free slot during the next ubatch + head = 0; + } + + ggml_backend_sched_reset(sched); + + auto * res = lctx->get_gf_res_reserve(); + + res->reset(); + + auto * gf = build_graph_defrag(res, lctx, dinfo); + if (!ggml_backend_sched_alloc_graph(sched, gf)) { + LLAMA_LOG_ERROR("%s: failed to allocate compute graph for defrag\n", __func__); + return updated; + } + + res->set_inputs(nullptr); + + if (lctx->graph_compute(gf, false) != GGML_STATUS_SUCCESS) { + LLAMA_LOG_ERROR("%s: failed to compute defrag\n", __func__); + return updated; + } + + updated = true; + } + + return updated; +} + +llama_kv_cache_unified::slot_info llama_kv_cache_unified::find_slot(const llama_ubatch & ubatch, bool cont) const { + if (debug > 0) { + const auto & cells = v_cells[seq_to_stream[1]]; + + const uint32_t head_cur = v_heads[1]; + + LLAMA_LOG_DEBUG("%s: n = %5d, used = %5d, head = %5d, size = %5d, n_swa = %5d\n", + __func__, cells.used_max_p1(), cells.get_used(), head_cur, get_size(), n_swa); + + if ((debug == 2 && n_swa > 0) || debug > 2) { + std::string ss; + for (uint32_t i = 0; i < cells.size(); ++i) { + if (cells.is_empty(i)) { + ss += '.'; + } else { + assert(cells.seq_count(i) >= 1); + + if (cells.seq_count(i) == 1) { + ss += std::to_string(cells.seq_get(i)); + } else { + ss += 'M'; + } + } + if (i%256 == 255) { + ss += " *"; + ss += '\n'; + } + } + LLAMA_LOG_DEBUG("\n%s\n", ss.c_str()); + } + + if ((debug == 2 && n_swa > 0) || debug > 2) { + std::string ss; + for (uint32_t i = 0; i < cells.size(); ++i) { + std::string cur; + if (cells.is_empty(i)) { + cur = '.'; + } else { + cur = std::to_string(cells.pos_get(i)); + } + const int n = cur.size(); + for (int j = 0; j < 5 - n; ++j) { + cur += ' '; + } + ss += cur; + if (i%256 == 255) { + ss += " *"; + } + if (i%64 == 63) { + ss += '\n'; + } + } + LLAMA_LOG_DEBUG("\n%s\n", ss.c_str()); + } + + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { + if (cells.seq_pos_min(s) < 0) { + continue; + } + + LLAMA_LOG_DEBUG("%s: min[%d] = %5d, max[%d] = %5d\n", __func__, s, cells.seq_pos_min(s), s, cells.seq_pos_max(s)); + } + } + + uint32_t n_tokens = ubatch.n_tokens; + uint32_t n_seqs = 1; + + if (n_stream > 1) { + GGML_ASSERT(n_tokens % ubatch.n_seqs_unq == 0); + + n_seqs = ubatch.n_seqs_unq; + n_tokens = n_tokens / n_seqs; + } + + slot_info res = { + /*.s0 =*/ LLAMA_MAX_SEQ, + /*.s1 =*/ 0, + /*.strm =*/ { }, + /*.idxs =*/ { }, + }; + + res.resize(n_seqs); + + for (uint32_t s = 0; s < n_seqs; ++s) { + const auto seq_id = ubatch.seq_id_unq[s]; + + if (n_stream > 1) { + GGML_ASSERT(ubatch.n_seq_id[s*n_tokens] == 1); + GGML_ASSERT(ubatch.seq_id [s*n_tokens][0] == seq_id); + } + + res.s0 = std::min(res.s0, seq_to_stream[seq_id]); + res.s1 = std::max(res.s1, seq_to_stream[seq_id]); + + res.strm[s] = seq_to_stream[seq_id]; + res.idxs[s].reserve(n_tokens); + + const auto & cells = v_cells[seq_to_stream[seq_id]]; + + uint32_t head_cur = v_heads[seq_to_stream[seq_id]]; + + // if we have enough unused cells before the current head -> + // better to start searching from the beginning of the cache, hoping to fill it + if (head_cur > cells.get_used() + 2*n_tokens) { + head_cur = 0; + } + + if (n_tokens > cells.size()) { + LLAMA_LOG_ERROR("%s: n_tokens = %d > size = %u\n", __func__, n_tokens, cells.size()); + return { }; + } + + uint32_t n_tested = 0; + + // for continuous slots, we test that all tokens in the ubatch fit, starting from the current head + // for non-continuous slots, we test the tokens one by one + const uint32_t n_test = cont ? n_tokens : 1; + + while (true) { + if (head_cur + n_test > cells.size()) { + n_tested += cells.size() - head_cur; + head_cur = 0; + continue; + } + + for (uint32_t i = 0; i < n_test; i++) { + const auto idx = head_cur; + + head_cur++; + n_tested++; + + //const llama_pos pos = ubatch.pos[i]; + //const llama_seq_id seq_id = ubatch.seq_id[i][0]; + + // can we use this cell? either: + // - the cell is empty + // - the cell is occupied only by one sequence: + // - (disabled) mask causally, if the sequence is the same as the one we are inserting + // - mask SWA, using current max pos for that sequence in the cache + // always insert in the cell with minimum pos + bool can_use = cells.is_empty(idx); + + if (!can_use && cells.seq_count(idx) == 1) { + const llama_pos pos_cell = cells.pos_get(idx); + + // (disabled) causal mask + // note: it's better to purge any "future" tokens beforehand + //if (cells.seq_has(idx, seq_id)) { + // can_use = pos_cell >= pos; + //} + + if (!can_use) { + const llama_seq_id seq_id_cell = cells.seq_get(idx); + + // SWA mask + if (is_masked_swa(pos_cell, cells.seq_pos_max(seq_id_cell) + 1)) { + can_use = true; + } + } + } + + if (can_use) { + res.idxs[s].push_back(idx); + } else { + if (cont) { + break; + } + } + } + + if (res.idxs[s].size() == n_tokens) { + break; + } + + if (cont) { + res.idxs[s].clear(); + } + + if (n_tested >= cells.size()) { + //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens); + return { }; + } + } + + // we didn't find a suitable slot - return empty result + if (res.idxs[s].size() < n_tokens) { + return { }; + } + } + + assert(res.s1 >= res.s0); + + return res; +} + +void llama_kv_cache_unified::apply_ubatch(const slot_info & sinfo, const llama_ubatch & ubatch) { + // keep track of the max sequence position that we would overwrite with this ubatch + // for non-SWA cache, this would be always empty + llama_seq_id seq_pos_max_rm[LLAMA_MAX_SEQ]; + for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { + seq_pos_max_rm[s] = -1; + } + + assert(ubatch.n_tokens == sinfo.n_stream()*sinfo.size()); + + for (uint32_t s = 0; s < sinfo.n_stream(); ++s) { + for (uint32_t ii = 0; ii < sinfo.size(); ++ii) { + const uint32_t i = s*sinfo.size() + ii; + + auto & cells = v_cells[sinfo.strm[s]]; + + const auto idx = sinfo.idxs[s][ii]; + + if (!cells.is_empty(idx)) { + assert(cells.seq_count(idx) == 1); + + const llama_seq_id seq_id = cells.seq_get(idx); + const llama_pos pos = cells.pos_get(idx); + + seq_pos_max_rm[seq_id] = std::max(seq_pos_max_rm[seq_id], pos); + + cells.rm(idx); + } + + cells.pos_set(idx, ubatch.pos[i]); + + for (int32_t s = 0; s < ubatch.n_seq_id[i]; s++) { + cells.seq_add(idx, ubatch.seq_id[i][s]); + } + } + } + + // note: we want to preserve the invariant that all positions between [pos_min, pos_max] for each sequence + // will be present in the cache. so we have to purge any position which is less than those we would overwrite + // ref: https://github.com/ggml-org/llama.cpp/pull/13746#issuecomment-2916057092 + for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { + if (seq_pos_max_rm[s] == -1) { + continue; + } + + GGML_ASSERT(s < seq_to_stream.size()); + + auto & cells = v_cells[seq_to_stream[s]]; + + if (cells.seq_pos_min(s) <= seq_pos_max_rm[s]) { + LLAMA_LOG_DEBUG("%s: purging positions [%d, %d] of sequence %d from KV cache\n", + __func__, cells.seq_pos_min(s), seq_pos_max_rm[s], s); + + seq_rm(s, cells.seq_pos_min(s), seq_pos_max_rm[s] + 1); + } + } + + // move the head at the end of the slot + for (uint32_t s = 0; s < sinfo.n_stream(); ++s) { + auto & head = v_heads[sinfo.strm[s]]; + + head = sinfo.idxs[s].back() + 1; + } +} + +bool llama_kv_cache_unified::get_can_shift() const { + return true; +} + +uint32_t llama_kv_cache_unified::get_size() const { + const auto & cells = v_cells[seq_to_stream[0]]; + + return cells.size(); +} + +uint32_t llama_kv_cache_unified::get_n_stream() const { + return n_stream; +} + +bool llama_kv_cache_unified::get_has_shift() const { + bool result = false; + + for (uint32_t s = 0; s < n_stream; ++s) { + result |= v_cells[s].get_has_shift(); + } + + return result; +} + +uint32_t llama_kv_cache_unified::get_n_kv() const { + uint32_t result = 0; + + for (uint32_t s = 0; s < n_stream; ++s) { + const auto & cells = v_cells[s]; + + result = std::max(std::min(cells.size(), std::max(n_pad, GGML_PAD(cells.used_max_p1(), n_pad))), result); + } + + return result; +} + +bool llama_kv_cache_unified::get_supports_set_rows() const { + return supports_set_rows; +} + +ggml_tensor * llama_kv_cache_unified::get_k(ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const { + const int32_t ikv = map_layer_ids.at(il); + + auto * k = layers[ikv].k; + + const uint64_t kv_size = get_size(); + const uint64_t n_embd_k_gqa = k->ne[0]; + + assert(n_embd_k_gqa == hparams.n_embd_k_gqa(il)); + + const uint32_t ns = sinfo.s1 - sinfo.s0 + 1; + + return ggml_view_4d(ctx, k, + hparams.n_embd_head_k, hparams.n_head_kv(il), n_kv, ns, + ggml_row_size(k->type, hparams.n_embd_head_k), + ggml_row_size(k->type, n_embd_k_gqa), + ggml_row_size(k->type, n_embd_k_gqa*kv_size), + ggml_row_size(k->type, n_embd_k_gqa*kv_size)*sinfo.s0); +} + +ggml_tensor * llama_kv_cache_unified::get_v(ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const { + const int32_t ikv = map_layer_ids.at(il); + + auto * v = layers[ikv].v; + + const uint64_t kv_size = get_size(); + const uint64_t n_embd_v_gqa = v->ne[0]; + + // [TAG_V_CACHE_VARIABLE] + assert(n_embd_v_gqa >= hparams.n_embd_v_gqa(il)); + + const uint32_t ns = sinfo.s1 - sinfo.s0 + 1; + + if (!v_trans) { + // note: v->nb[1] <= v->nb[2] + return ggml_view_4d(ctx, v, + hparams.n_embd_head_v, hparams.n_head_kv(il), n_kv, ns, + ggml_row_size(v->type, hparams.n_embd_head_v), // v->nb[1] + ggml_row_size(v->type, n_embd_v_gqa), // v->nb[2] + ggml_row_size(v->type, n_embd_v_gqa*kv_size), // v->nb[3] + ggml_row_size(v->type, n_embd_v_gqa*kv_size)*sinfo.s0); + } + + // note: v->nb[1] > v->nb[2] + return ggml_view_4d(ctx, v, + n_kv, hparams.n_head_kv(il), hparams.n_embd_head_v, ns, + ggml_row_size(v->type, kv_size*hparams.n_embd_head_v), // v->nb[1] + ggml_row_size(v->type, kv_size), // v->nb[2] + ggml_row_size(v->type, kv_size*n_embd_v_gqa), // v->nb[3] + ggml_row_size(v->type, kv_size*n_embd_v_gqa)*sinfo.s0); +} + +ggml_tensor * llama_kv_cache_unified::cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il, const slot_info & sinfo) const { + const int32_t ikv = map_layer_ids.at(il); + + auto * k = layers[ikv].k; + + const int64_t n_embd_k_gqa = k->ne[0]; + const int64_t n_tokens = k_cur->ne[2]; + + k_cur = ggml_reshape_2d(ctx, k_cur, k->ne[0], n_tokens); + + if (k_idxs && supports_set_rows) { + if (k->ne[2] > 1) { + k = ggml_reshape_2d(ctx, k, k->ne[0], k->ne[1]*k->ne[2]); + } + + return ggml_set_rows(ctx, k, k_cur, k_idxs); + } + + // TODO: fallback to old ggml_cpy() method for backwards compatibility + // will be removed when ggml_set_rows() is adopted by all backends + + GGML_ASSERT(n_stream == 1 && "n_stream > 1 not supported without LLAMA_SET_ROWS"); + + ggml_tensor * k_view = ggml_view_1d(ctx, k, + n_tokens*n_embd_k_gqa, + ggml_row_size(k->type, n_embd_k_gqa)*sinfo.head()); + + return ggml_cpy(ctx, k_cur, k_view); +} + +ggml_tensor * llama_kv_cache_unified::cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il, const slot_info & sinfo) const { + const int32_t ikv = map_layer_ids.at(il); + + auto * v = layers[ikv].v; + + const int64_t n_embd_v_gqa = v_cur->ne[0]*v_cur->ne[1]; + const int64_t n_tokens = v_cur->ne[2]; + + v_cur = ggml_reshape_2d(ctx, v_cur, n_embd_v_gqa, n_tokens); + + if (v_idxs && supports_set_rows) { + if (!v_trans) { + if (v->ne[2] > 1) { + v = ggml_reshape_2d(ctx, v, v->ne[0], v->ne[1]*v->ne[2]); + } + + return ggml_set_rows(ctx, v, v_cur, v_idxs); + } + + // [TAG_V_CACHE_VARIABLE] + if (n_embd_v_gqa < v->ne[0]) { + v_cur = ggml_pad(ctx, v_cur, v->ne[0] - n_embd_v_gqa, 0, 0, 0); + } + + // the row becomes a single element + ggml_tensor * v_view = ggml_reshape_2d(ctx, v, 1, v->ne[0]*v->ne[1]*v->ne[2]); + + v_cur = ggml_reshape_2d(ctx, v_cur, 1, v_cur->ne[0]*v_cur->ne[1]); + + return ggml_set_rows(ctx, v_view, v_cur, v_idxs); + } + + // TODO: fallback to old ggml_cpy() method for backwards compatibility + // will be removed when ggml_set_rows() is adopted by all backends + + GGML_ASSERT(n_stream == 1 && "n_stream > 1 not supported without LLAMA_SET_ROWS"); + + ggml_tensor * v_view = nullptr; + + if (!v_trans) { + v_view = ggml_view_1d(ctx, v, + n_tokens*n_embd_v_gqa, + ggml_row_size(v->type, n_embd_v_gqa)*sinfo.head()); + } else { + v_cur = ggml_transpose(ctx, v_cur); + + v_view = ggml_view_2d(ctx, v, n_tokens, n_embd_v_gqa, + (v->ne[1] )*ggml_element_size(v), + (sinfo.head())*ggml_element_size(v)); + } + + return ggml_cpy(ctx, v_cur, v_view); +} + +ggml_tensor * llama_kv_cache_unified::build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const { + const uint32_t n_tokens = ubatch.n_tokens; + + ggml_tensor * k_idxs = ggml_new_tensor_1d(ctx, GGML_TYPE_I64, n_tokens); + + ggml_set_input(k_idxs); + + return k_idxs; +} + +ggml_tensor * llama_kv_cache_unified::build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const { + const uint32_t n_tokens = ubatch.n_tokens; + + ggml_tensor * v_idxs; + + if (!v_trans) { + v_idxs = ggml_new_tensor_1d(ctx, GGML_TYPE_I64, n_tokens); + } else { + v_idxs = ggml_new_tensor_1d(ctx, GGML_TYPE_I64, n_tokens*hparams.n_embd_v_gqa_max()); + } + + ggml_set_input(v_idxs); + + return v_idxs; +} + +void llama_kv_cache_unified::set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const { + if (!supports_set_rows) { + return; + } + + const uint32_t n_tokens = ubatch->n_tokens; + GGML_ASSERT(n_tokens == (int64_t) sinfo.size()*sinfo.n_stream()); + + GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer)); + int64_t * data = (int64_t *) dst->data; + + for (uint32_t s = 0; s < sinfo.n_stream(); ++s) { + const int64_t offs = sinfo.strm[s]*get_size(); + + for (uint32_t i = 0; i < sinfo.size(); ++i) { + data[s*sinfo.size() + i] = offs + sinfo.idxs[s][i]; + } + } +} + +void llama_kv_cache_unified::set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const { + if (!supports_set_rows) { + return; + } + + const uint32_t n_tokens = ubatch->n_tokens; + GGML_ASSERT(n_tokens == (int64_t) sinfo.size()*sinfo.n_stream()); + + GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer)); + int64_t * data = (int64_t *) dst->data; + + if (!v_trans) { + for (uint32_t s = 0; s < sinfo.n_stream(); ++s) { + const int64_t offs = sinfo.strm[s]*get_size(); + + for (uint32_t i = 0; i < sinfo.size(); ++i) { + data[s*sinfo.size() + i] = offs + sinfo.idxs[s][i]; + } + } + } else { + // note: the V cache is transposed when not using flash attention + const int64_t kv_size = get_size(); + + const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa_max(); + + for (uint32_t s = 0; s < sinfo.n_stream(); ++s) { + const int64_t offs = sinfo.strm[s]*kv_size*n_embd_v_gqa; + + for (uint32_t i = 0; i < sinfo.size(); ++i) { + for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { + data[s*sinfo.size()*n_embd_v_gqa + i*n_embd_v_gqa + j] = offs + j*kv_size + sinfo.idxs[s][i]; + } + } + } + } +} + +void llama_kv_cache_unified::set_input_k_shift(ggml_tensor * dst) const { + GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer)); + + int32_t * data = (int32_t *) dst->data; + + for (uint32_t s = 0; s < n_stream; ++s) { + const auto & cells = v_cells[s]; + + for (uint32_t i = 0; i < cells.size(); ++i) { + data[s*cells.size() + i] = cells.is_empty(i) ? 0 : cells.get_shift(i); + } + } +} + +void llama_kv_cache_unified::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const { + const uint32_t n_tokens = ubatch->n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer)); + float * data = (float *) dst->data; + + const int64_t n_kv = dst->ne[0]; + const int64_t n_stream = dst->ne[3]; // num streams in the current ubatch + + GGML_ASSERT(n_tokens%n_stream == 0); + + // n_tps == n_tokens_per_stream + const int64_t n_tps = n_tokens/n_stream; + const int64_t n_tps_pad = GGML_PAD(n_tps, GGML_KQ_MASK_PAD); + + std::fill(data, data + ggml_nelements(dst), -INFINITY); + + // Use only the previous KV cells of the correct sequence for each token of the ubatch. + // It's assumed that if a token in the batch has multiple sequences, they are equivalent. + // Example with a cache of 10 tokens, 2 tokens populated in cache and 3 tokens in batch: + // Causal mask: + // xxx------- + // xxxx------ + // xxxxx----- + // Non-causal mask: + // xxxxx----- + // xxxxx----- + // xxxxx----- + // To visualize the mask, see https://github.com/ggml-org/llama.cpp/pull/12615 + // TODO: optimize this section + for (uint32_t h = 0; h < 1; ++h) { + for (uint32_t s = 0; s < n_stream; ++s) { + for (uint32_t ii = 0; ii < n_tps; ++ii) { + const uint32_t i = s*n_tps + ii; + + const llama_seq_id seq_id = ubatch->seq_id[i][0]; + + const auto & cells = v_cells[seq_to_stream[seq_id]]; + + const llama_pos p1 = ubatch->pos[i]; + + const uint64_t idst = n_kv*(h*n_stream*n_tps_pad + s*n_tps_pad + ii); + + for (uint32_t j = 0; j < n_kv; ++j) { + if (cells.is_empty(j)) { + continue; + } + + // mask the token if not the same sequence + if (!cells.seq_has(j, seq_id)) { + continue; + } + + const llama_pos p0 = cells.pos_get(j); + + // mask future tokens + if (causal_attn && p0 > p1) { + continue; + } + + // apply SWA if any + if (is_masked_swa(p0, p1)) { + continue; + } + + data[idst + j] = hparams.use_alibi ? -std::abs(p0 - p1) : 0.0f; + } + } + } + } +} + +void llama_kv_cache_unified::set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const { + const int64_t n_tokens = ubatch->n_tokens; + + GGML_ASSERT(n_stream == 1 && "TODO: support multiple streams"); + const auto & cells = v_cells[0]; + + GGML_ASSERT(ggml_backend_buffer_is_host(dst->buffer)); + GGML_ASSERT(!ubatch->equal_seqs()); // TODO: use ubatch->n_seqs instead of failing + + int32_t * data = (int32_t *) dst->data; + + const int32_t n_kv = dst->ne[0]; + + for (int h = 0; h < 1; ++h) { + for (int i = 0; i < n_tokens; ++i) { + for (int j = 0; j < n_kv; ++j) { + // the position when the cells is empty is irrelevant - it will be masked out later in the attention + const llama_pos p0 = cells.is_empty(j) ? -1 : cells.pos_get(j); + + data[h*(n_kv*n_tokens) + i*n_kv + j] = llama_relative_position_bucket(p0, ubatch->pos[i], hparams.n_rel_attn_bkts, false); + } + } + } +} + +size_t llama_kv_cache_unified::total_size() const { + size_t size = 0; + + for (const auto & buf : bufs) { + size += ggml_backend_buffer_get_size(buf.get()); + } + + return size; +} + +size_t llama_kv_cache_unified::size_k_bytes() const { + size_t size_k_bytes = 0; + + for (const auto & layer : layers) { + size_k_bytes += ggml_nbytes(layer.k); + } + + return size_k_bytes; +} + +size_t llama_kv_cache_unified::size_v_bytes() const { + size_t size_v_bytes = 0; + + for (const auto & layer : layers) { + size_v_bytes += ggml_nbytes(layer.v); + } + + return size_v_bytes; +} + +ggml_tensor * llama_kv_cache_unified::build_rope_shift( + const llama_cparams & cparams, + ggml_context * ctx, + ggml_tensor * cur, + ggml_tensor * shift, + ggml_tensor * factors, + float freq_base, + float freq_scale) const { + const auto & n_ctx_orig = cparams.n_ctx_orig_yarn; + + const auto & yarn_ext_factor = cparams.yarn_ext_factor; + const auto & yarn_beta_fast = cparams.yarn_beta_fast; + const auto & yarn_beta_slow = cparams.yarn_beta_slow; + + const auto & n_rot = hparams.n_rot; + const auto & rope_type = hparams.rope_type == LLAMA_ROPE_TYPE_MROPE + // @ngxson : this is a workaround + // for M-RoPE, we want to rotate the whole vector when doing KV shift + // a normal RoPE should work, we just need to use the correct ordering + // ref: https://github.com/ggml-org/llama.cpp/pull/13870 + ? LLAMA_ROPE_TYPE_NEOX + : hparams.rope_type; + + // See llm_build_deepseek2() for why attn_factor has to be scaled for YaRN RoPE to work correctly. + // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation. + const float yarn_attn_factor = model.arch == LLM_ARCH_DEEPSEEK2 + ? 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)) + : cparams.yarn_attn_factor; + + ggml_tensor * tmp; + + if (ggml_is_quantized(cur->type)) { + // dequantize to f32 -> RoPE -> quantize back + tmp = ggml_cast(ctx, cur, GGML_TYPE_F32); + + tmp = ggml_rope_ext(ctx, tmp, + shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow); + + tmp = ggml_cpy(ctx, tmp, cur); + } else { + // we rotate only the first n_rot dimensions + tmp = ggml_rope_ext_inplace(ctx, cur, + shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow); + } + + return tmp; +} + +class llm_graph_input_k_shift : public llm_graph_input_i { +public: + llm_graph_input_k_shift(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {} + virtual ~llm_graph_input_k_shift() = default; + + void set_input(const llama_ubatch * ubatch) override; + + ggml_tensor * k_shift; // I32 [kv_size*n_stream] + + const llama_kv_cache_unified * kv_self; +}; + +void llm_graph_input_k_shift::set_input(const llama_ubatch * ubatch) { + GGML_UNUSED(ubatch); + + if (k_shift) { + kv_self->set_input_k_shift(k_shift); + } +} + +ggml_cgraph * llama_kv_cache_unified::build_graph_shift(llm_graph_result * res, llama_context * lctx) const { + auto * ctx = res->get_ctx(); + auto * gf = res->get_gf(); + + const auto & n_embd_head_k = hparams.n_embd_head_k; + //const auto & n_embd_head_v = hparams.n_embd_head_v; + + auto inp = std::make_unique(this); + + inp->k_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, (int64_t) get_size()*n_stream); + ggml_set_input(inp->k_shift); + + const auto & cparams = lctx->get_cparams(); + + for (const auto & layer : layers) { + const uint32_t il = layer.il; + + const int64_t n_head_kv = hparams.n_head_kv(il); + const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); + + const float freq_base_l = model.get_rope_freq_base (cparams, il); + const float freq_scale_l = model.get_rope_freq_scale(cparams, il); + + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); + + ggml_tensor * k = + ggml_view_3d(ctx, layer.k, + n_embd_head_k, n_head_kv, get_size()*n_stream, + ggml_row_size(layer.k->type, n_embd_head_k), + ggml_row_size(layer.k->type, n_embd_k_gqa), + 0); + + ggml_tensor * cur = build_rope_shift(cparams, ctx, k, inp->k_shift, rope_factors, freq_base_l, freq_scale_l); + + ggml_build_forward_expand(gf, cur); + } + + res->add_input(std::move(inp)); + + return gf; +} + +ggml_cgraph * llama_kv_cache_unified::build_graph_defrag( + llm_graph_result * res, + llama_context * lctx, + const defrag_info & dinfo) const { + auto * ctx = res->get_ctx(); + auto * gf = res->get_gf(); + + GGML_ASSERT(n_stream == 1 && "n_stream > 1 does not support defrag"); + + const auto & cells = v_cells[0]; + + const auto & ids = dinfo.ids; + + const auto & cparams = lctx->get_cparams(); + +#if 0 + // CPU defrag + // + // TODO: optimizations are possible: + // - multiple threads + // - avoid copying to the host memory when already there + // + // likely not worth the effort, as we have ggml_graph based defrag + // + + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(); + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(); + + const uint32_t kv_size = size; + + std::vector buf_k; + std::vector buf_v; + + for (uint32_t il = 0; il < n_layer; ++il) { + const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa); + const size_t k_size = ggml_row_size(k_l[il]->type, n_embd_k_gqa*kv_size); + + const size_t v_size_el = ggml_type_size(v_l[il]->type); + const size_t v_size = ggml_row_size (v_l[il]->type, n_embd_v_gqa*kv_size); + + buf_k.resize(k_size); + buf_v.resize(v_size); + + ggml_backend_tensor_get(k_l[il], buf_k.data(), 0, buf_k.size()); + ggml_backend_tensor_get(v_l[il], buf_v.data(), 0, buf_v.size()); + + // batch move [i, i+nm) to [id, id+nm) + // note: cells can move only to a lower index + for (uint32_t i = 0; i < n_kv; ++i) { + const uint32_t id = ids[i]; + + if (i == id || id == n_kv) { + continue; + } + + uint32_t nm = 1; + + while (i + nm < n_kv && ids[i + nm] == id + nm) { + nm++; + } + + // move keys + { + const int64_t os = i*k_size_row; + const int64_t od = id*k_size_row; + + memcpy(buf_k.data() + od, buf_k.data() + os, nm*k_size_row); + } + + // move values (note: they are transposed) + { + const int64_t os = i; + const int64_t od = id; + + for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { + memcpy(buf_v.data() + (od + j*kv_size)*v_size_el, buf_v.data() + (os + j*kv_size)*v_size_el, nm*v_size_el); + } + } + + i += nm - 1; + } + + ggml_backend_tensor_set(k_l[il], buf_k.data(), 0, buf_k.size()); + ggml_backend_tensor_set(v_l[il], buf_v.data(), 0, buf_v.size()); + } +#else + for (uint32_t i = 0; i < ids.size(); ++i) { + const uint32_t id = ids[i]; + + if (i == id || id == ids.size()) { + continue; + } + + uint32_t nm = 1; + + while (i + nm < ids.size() && ids[i + nm] == id + nm) { + nm++; + } + + for (const auto & layer : layers) { + const uint32_t il = layer.il; + + const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); + const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); + + ggml_tensor * view_k_src = ggml_view_2d(ctx, layer.k, + n_embd_k_gqa, nm, + ggml_row_size(layer.k->type, n_embd_k_gqa), + ggml_row_size(layer.k->type, n_embd_k_gqa*i)); + + ggml_tensor * view_k_dst = ggml_view_2d(ctx, layer.k, + n_embd_k_gqa, nm, + ggml_row_size(layer.k->type, n_embd_k_gqa), + ggml_row_size(layer.k->type, n_embd_k_gqa*id)); + + ggml_tensor * view_v_src; + ggml_tensor * view_v_dst; + + if (cparams.flash_attn) { + // NOTE: the V cache is not transposed when using flash attention + view_v_src = ggml_view_2d(ctx, layer.v, + n_embd_v_gqa, nm, + ggml_row_size(layer.v->type, n_embd_v_gqa), + ggml_row_size(layer.v->type, n_embd_v_gqa*i)); + + view_v_dst = ggml_view_2d(ctx, layer.v, + n_embd_v_gqa, nm, + ggml_row_size(layer.v->type, n_embd_v_gqa), + ggml_row_size(layer.v->type, n_embd_v_gqa*id)); + } else { + view_v_src = ggml_view_2d(ctx, layer.v, + nm, n_embd_v_gqa, + ggml_row_size(layer.v->type, cells.size()), + ggml_row_size(layer.v->type, i)); + + view_v_dst = ggml_view_2d(ctx, layer.v, + nm, n_embd_v_gqa, + ggml_row_size(layer.v->type, cells.size()), + ggml_row_size(layer.v->type, id)); + } + + ggml_build_forward_expand(gf, ggml_cpy(ctx, view_k_src, view_k_dst)); + ggml_build_forward_expand(gf, ggml_cpy(ctx, view_v_src, view_v_dst)); + } + + i += nm - 1; + } + + //LLAMA_LOG_INFO("gf->n_nodes = %d\n", gf->n_nodes); +#endif + + return gf; +} + +llama_kv_cache_unified::defrag_info llama_kv_cache_unified::defrag_prepare(int32_t n_max_nodes) const { + GGML_ASSERT(n_stream == 1 && "n_stream > 1 does not support defrag"); + + const auto & cells = v_cells[0]; + + const uint32_t n_layer = layers.size(); + + const uint32_t n_kv = cells.used_max_p1(); + const uint32_t n_used = cells.get_used(); + + assert(n_used <= n_kv); + + //const int64_t t_start = ggml_time_us(); + + // number of cells moved + uint32_t n_moves = 0; + + // each move requires 6*n_layer tensors (see graph_build_kv_self_defrag) + // - source view, destination view, copy operation + // - x2 for keys and values + //const uint32_t max_moves = max_nodes()/(6*n_layer); + // TODO: tmp fix https://github.com/ggerganov/llama.cpp/issues/6685#issuecomment-2057579516 + const uint32_t max_moves = (n_max_nodes - 2*n_layer)/(6*n_layer); + + // determine which KV cells to move where + defrag_info res; + auto & ids = res.ids; + + ids.resize(n_kv, n_kv); + + for (uint32_t i0 = 0; i0 < n_used; ++i0) { + if (!cells.is_empty(i0)) { + ids[i0] = i0; + + continue; + } + + // found a hole - fill it with data from the end of the cache + + uint32_t nh = 1; + + // determine the size of the hole + while (i0 + nh < n_used && cells.is_empty(i0 + nh)) { + nh++; + } + + uint32_t nf = 0; + uint32_t is = n_kv - 1; + + // starting from the end, find nh non-empty cells + for (; is > i0; --is) { + if (cells.is_empty(is) || ids[is] != n_kv) { + continue; + } + + // non-empty cell which is not yet moved + nf++; + + if (nf == nh) { + break; + } + } + + // this can only happen if `n_used` is not accurate, which would be a bug + GGML_ASSERT(nf == nh && "KV defrag bug: nf != nh"); + + nf = 0; + + uint32_t i1 = is; + + // are we moving a continuous block of memory? + bool cont = false; + + // should we stop searching for the next move? + bool stop = false; + + // go back and move the nf cells to the hole + for (; i1 < n_kv; ++i1) { + if (cells.is_empty(i1) || ids[i1] != n_kv) { + if (n_moves == max_moves) { + stop = true; + break; + } + + cont = false; + continue; + } + + // this cell goes to (i0 + nf) + ids[i1] = i0 + nf; + + if (!cont) { + n_moves++; + cont = true; + } + + nf++; + + if (nf == nh) { + break; + } + } + + if (stop || n_moves == max_moves) { + break; + } + + //LLAMA_LOG_INFO("(tmp log) KV defrag: move [%u, %u) to [%u, %u)\n", is, i1 + 1, i0, i0 + nh); + + i0 += nh - 1; + } + + if (n_moves == 0) { + return {}; + } + + LLAMA_LOG_DEBUG("%s: (tmp log) KV defrag cell moves: %u\n", __func__, n_moves); + + LLAMA_LOG_DEBUG("%s: expected gf nodes: %u\n", __func__, 6*n_moves*n_layer); + + return res; +} + +bool llama_kv_cache_unified::is_masked_swa(llama_pos p0, llama_pos p1) const { + assert(p0 >= 0 && p1 >= 0); + + switch (swa_type) { + case LLAMA_SWA_TYPE_NONE: + { + } break; + case LLAMA_SWA_TYPE_STANDARD: + { + if (p1 - p0 >= (int32_t) n_swa) { + return true; + } + } break; + case LLAMA_SWA_TYPE_CHUNKED: + { + const llama_pos pos_chunk_start = (p1 / n_swa) * n_swa; + + if (p0 < pos_chunk_start) { + return true; + } + } break; + } + + return false; +} + +void llama_kv_cache_unified::state_write(llama_io_write_i & io, llama_seq_id seq_id) const { + io.write(&n_stream, sizeof(n_stream)); + + for (uint32_t s = 0; s < n_stream; ++s) { + cell_ranges_t cr { s, {} }; + + uint32_t cell_count = 0; + + const auto & cells = v_cells[s]; + + // Count the number of cells with the specified seq_id + // Find all the ranges of cells with this seq id (or all, when -1) + uint32_t cell_range_begin = cells.size(); + + for (uint32_t i = 0; i < cells.size(); ++i) { + if (!cells.is_empty(i) && (seq_id == -1 || cells.seq_has(i, seq_id))) { + ++cell_count; + if (cell_range_begin == cells.size()) { + cell_range_begin = i; + } + } else { + if (cell_range_begin != cells.size()) { + cr.data.emplace_back(cell_range_begin, i); + cell_range_begin = cells.size(); + } + } + } + + if (cell_range_begin != cells.size()) { + cr.data.emplace_back(cell_range_begin, cells.size()); + } + + // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count + uint32_t cell_count_check = 0; + for (const auto & range : cr.data) { + cell_count_check += range.second - range.first; + } + GGML_ASSERT(cell_count == cell_count_check); + + io.write(&cell_count, sizeof(cell_count)); + + // skip empty streams + if (cell_count == 0) { + continue; + } + + state_write_meta(io, cr, seq_id); + state_write_data(io, cr); + } +} + +void llama_kv_cache_unified::state_read(llama_io_read_i & io, llama_seq_id seq_id) { + GGML_ASSERT(seq_id == -1 || (seq_id >= 0 && (size_t) seq_id < seq_to_stream.size())); + + uint32_t n_stream_cur; + io.read_to(&n_stream_cur, sizeof(n_stream_cur)); + if (n_stream_cur != n_stream) { + throw std::runtime_error("n_stream mismatch"); + } + + for (uint32_t s = 0; s < n_stream; ++s) { + uint32_t cell_count; + io.read_to(&cell_count, sizeof(cell_count)); + + if (cell_count == 0) { + continue; + } + + const uint32_t strm = seq_id == -1 ? s : seq_to_stream[seq_id]; + + bool res = true; + res = res && state_read_meta(io, strm, cell_count, seq_id); + res = res && state_read_data(io, strm, cell_count); + + if (!res) { + if (seq_id == -1) { + clear(true); + } else { + seq_rm(seq_id, -1, -1); + } + throw std::runtime_error("failed to restore kv cache"); + } + } +} + +void llama_kv_cache_unified::state_write_meta(llama_io_write_i & io, const cell_ranges_t & cr, llama_seq_id seq_id) const { + const auto & cells = v_cells[cr.strm]; + + for (const auto & range : cr.data) { + for (uint32_t i = range.first; i < range.second; ++i) { + std::vector seq_ids; + + for (llama_seq_id cur = 0; cur < (int) n_seq_max; ++cur) { + if (cur == seq_id || seq_id == -1) { + if (cells.seq_has(i, cur)) { + seq_ids.push_back(cur); + } + } + } + + const llama_pos pos = cells.pos_get(i); + const uint32_t n_seq_id = seq_ids.size(); + + io.write(&pos, sizeof(pos)); + io.write(&n_seq_id, sizeof(n_seq_id)); + + for (const auto & seq_id : seq_ids) { + io.write(&seq_id, sizeof(seq_id)); + } + } + } +} + +void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const cell_ranges_t & cr) const { + const auto & cells = v_cells[cr.strm]; + + const uint32_t v_trans = this->v_trans ? 1 : 0; + const uint32_t n_layer = layers.size(); + + io.write(&v_trans, sizeof(v_trans)); + io.write(&n_layer, sizeof(n_layer)); + + std::vector tmp_buf; + + // Iterate and write all the keys first, each row is a cell + // Get whole range at a time + for (const auto & layer : layers) { + const uint32_t il = layer.il; + + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); + + auto * k = layer.k_stream[cr.strm]; + + // Write key type + const int32_t k_type_i = (int32_t) k->type; + io.write(&k_type_i, sizeof(k_type_i)); + + // Write row size of key + const uint64_t k_size_row = ggml_row_size(k->type, n_embd_k_gqa); + io.write(&k_size_row, sizeof(k_size_row)); + + // Read each range of cells of k_size length each into tmp_buf and write out + for (const auto & range : cr.data) { + const size_t range_size = range.second - range.first; + const size_t buf_size = range_size * k_size_row; + io.write_tensor(k, range.first * k_size_row, buf_size); + } + } + + if (!v_trans) { + for (const auto & layer : layers) { + const uint32_t il = layer.il; + + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); + + auto * v = layer.v_stream[cr.strm]; + + // Write value type + const int32_t v_type_i = (int32_t) v->type; + io.write(&v_type_i, sizeof(v_type_i)); + + // Write row size of value + const uint64_t v_size_row = ggml_row_size(v->type, n_embd_v_gqa); + io.write(&v_size_row, sizeof(v_size_row)); + + // Read each range of cells of v_size length each into tmp_buf and write out + for (const auto & range : cr.data) { + const size_t range_size = range.second - range.first; + const size_t buf_size = range_size * v_size_row; + io.write_tensor(v, range.first * v_size_row, buf_size); + } + } + } else { + // When v is transposed, we also need the element size and get the element ranges from each row + const uint32_t kv_size = cells.size(); + + for (const auto & layer : layers) { + const uint32_t il = layer.il; + + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); + + auto * v = layer.v_stream[cr.strm]; + + // Write value type + const int32_t v_type_i = (int32_t) v->type; + io.write(&v_type_i, sizeof(v_type_i)); + + // Write element size + const uint32_t v_size_el = ggml_type_size(v->type); + io.write(&v_size_el, sizeof(v_size_el)); + + // Write GQA embedding size + io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa)); + + // For each row, we get the element values of each cell + for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { + // Read each range of cells of v_size_el length each into tmp_buf and write out + for (const auto & range : cr.data) { + const size_t range_size = range.second - range.first; + const size_t src_offset = (range.first + j * kv_size) * v_size_el; + const size_t buf_size = range_size * v_size_el; + io.write_tensor(v, src_offset, buf_size); + } + } + } + } +} + +bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, llama_seq_id dest_seq_id) { + auto & cells = v_cells[strm]; + auto & head = v_heads[strm]; + + if (dest_seq_id != -1) { + // single sequence + seq_rm(dest_seq_id, -1, -1); + + llama_batch_allocr balloc(hparams.n_pos_per_embd()); + + llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1); + + ubatch.seq_id_unq[0] = dest_seq_id; + + for (uint32_t i = 0; i < cell_count; ++i) { + llama_pos pos; + uint32_t n_seq_id; + + io.read_to(&pos, sizeof(pos)); + io.read_to(&n_seq_id, sizeof(n_seq_id)); + + if (n_seq_id != 1) { + LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__); + return false; + } + + // read the sequence id, but directly discard it - we will use dest_seq_id instead + { + llama_seq_id seq_id; + io.read_to(&seq_id, sizeof(seq_id)); + } + + ubatch.pos[i] = pos; + ubatch.n_seq_id[i] = n_seq_id; + ubatch.seq_id[i] = &dest_seq_id; + } + + const auto sinfo = find_slot(ubatch, true); + if (sinfo.empty()) { + LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__); + return false; + } + + apply_ubatch(sinfo, ubatch); + + const auto head_cur = sinfo.head(); + + // keep the head at the old position because we will read the KV data into it in state_read_data() + head = head_cur; + + LLAMA_LOG_DEBUG("%s: head_cur = %d, head = %d, cell_count = %d, dest_seq_id = %d\n", __func__, head_cur, head, cell_count, dest_seq_id); + + // DEBUG CHECK: head_cur should be our first cell, head_cur + cell_count - 1 should be our last cell (verify seq_id and pos values) + // Assume that this is one contiguous block of cells + GGML_ASSERT(head_cur + cell_count <= cells.size()); + GGML_ASSERT(cells.pos_get(head_cur) == ubatch.pos[0]); + GGML_ASSERT(cells.pos_get(head_cur + cell_count - 1) == ubatch.pos[cell_count - 1]); + GGML_ASSERT(cells.seq_has(head_cur, dest_seq_id)); + GGML_ASSERT(cells.seq_has(head_cur + cell_count - 1, dest_seq_id)); + } else { + // whole KV cache restore + + if (cell_count > cells.size()) { + LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__); + return false; + } + + clear(true); + + for (uint32_t i = 0; i < cell_count; ++i) { + llama_pos pos; + uint32_t n_seq_id; + + io.read_to(&pos, sizeof(pos)); + io.read_to(&n_seq_id, sizeof(n_seq_id)); + + cells.pos_set(i, pos); + + for (uint32_t j = 0; j < n_seq_id; ++j) { + llama_seq_id seq_id; + io.read_to(&seq_id, sizeof(seq_id)); + + if (seq_id < 0 || (uint32_t) seq_id >= n_seq_max) { + LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, n_seq_max); + return false; + } + + cells.seq_add(i, seq_id); + } + } + + head = 0; + } + + return true; +} + +bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t strm, uint32_t cell_count) { + auto & cells = v_cells[strm]; + auto & head = v_heads[strm]; + + uint32_t v_trans; + uint32_t n_layer; + + io.read_to(&v_trans, sizeof(v_trans)); + io.read_to(&n_layer, sizeof(n_layer)); + + if (n_layer != layers.size()) { + LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, (uint32_t) layers.size()); + return false; + } + + if (cell_count > cells.size()) { + LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, cells.size()); + return false; + } + + if (this->v_trans != (bool) v_trans) { + LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__); + return false; + } + + // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block + for (const auto & layer : layers) { + const uint32_t il = layer.il; + + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); + + auto * k = layer.k_stream[strm]; + + // Read type of key + int32_t k_type_i_ref; + io.read_to(&k_type_i_ref, sizeof(k_type_i_ref)); + const int32_t k_type_i = (int32_t) k->type; + if (k_type_i != k_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il); + return false; + } + + // Read row size of key + uint64_t k_size_row_ref; + io.read_to(&k_size_row_ref, sizeof(k_size_row_ref)); + const size_t k_size_row = ggml_row_size(k->type, n_embd_k_gqa); + if (k_size_row != k_size_row_ref) { + LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il); + return false; + } + + if (cell_count) { + // Read and set the keys for the whole cell range + ggml_backend_tensor_set(k, io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row); + } + } + + if (!this->v_trans) { + for (const auto & layer : layers) { + const uint32_t il = layer.il; + + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); + + auto * v = layer.v_stream[strm]; + + // Read type of value + int32_t v_type_i_ref; + io.read_to(&v_type_i_ref, sizeof(v_type_i_ref)); + const int32_t v_type_i = (int32_t) v->type; + if (v_type_i != v_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); + return false; + } + + // Read row size of value + uint64_t v_size_row_ref; + io.read_to(&v_size_row_ref, sizeof(v_size_row_ref)); + const size_t v_size_row = ggml_row_size(v->type, n_embd_v_gqa); + if (v_size_row != v_size_row_ref) { + LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il); + return false; + } + + if (cell_count) { + // Read and set the values for the whole cell range + ggml_backend_tensor_set(v, io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row); + } + } + } else { + // For each layer, read the values for each cell (transposed) + for (const auto & layer : layers) { + const uint32_t il = layer.il; + + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); + + auto * v = layer.v_stream[strm]; + + // Read type of value + int32_t v_type_i_ref; + io.read_to(&v_type_i_ref, sizeof(v_type_i_ref)); + const int32_t v_type_i = (int32_t) v->type; + if (v_type_i != v_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); + return false; + } + + // Read element size of value + uint32_t v_size_el_ref; + io.read_to(&v_size_el_ref, sizeof(v_size_el_ref)); + const size_t v_size_el = ggml_type_size(v->type); + if (v_size_el != v_size_el_ref) { + LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il); + return false; + } + + // Read GQA embedding size + uint32_t n_embd_v_gqa_ref; + io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref)); + if (n_embd_v_gqa != n_embd_v_gqa_ref) { + LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il); + return false; + } + + if (cell_count) { + // For each row in the transposed matrix, read the values for the whole cell range + for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { + const size_t dst_offset = (head + j * cells.size()) * v_size_el; + ggml_backend_tensor_set(v, io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el); + } + } + } + } + + return true; +} + +// +// llama_kv_cache_unified_context +// + +llama_kv_cache_unified_context::llama_kv_cache_unified_context(llama_memory_status status) : status(status) {} + +llama_kv_cache_unified_context::llama_kv_cache_unified_context( + llama_kv_cache_unified * kv) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv) { + n_kv = kv->get_size(); + + const uint32_t n_stream = kv->get_n_stream(); + + // create a dummy slot info - the actual data is irrelevant. we just need to build the graph + sinfos.resize(1); + sinfos[0].s0 = 0; + sinfos[0].s1 = n_stream - 1; + sinfos[0].idxs.resize(n_stream); + for (uint32_t s = 0; s < n_stream; ++s) { + sinfos[0].strm.push_back(s); + sinfos[0].idxs[s].resize(1, 0); + } +} + +llama_kv_cache_unified_context::llama_kv_cache_unified_context( + llama_kv_cache_unified * kv, + llama_context * lctx, + bool do_shift, + defrag_info dinfo, + stream_copy_info sc_info) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), lctx(lctx), do_shift(do_shift), dinfo(std::move(dinfo)), sc_info(std::move(sc_info)) { + if (!do_shift && this->dinfo.empty() && this->sc_info.empty()) { + status = LLAMA_MEMORY_STATUS_NO_UPDATE; + } +} + +llama_kv_cache_unified_context::llama_kv_cache_unified_context( + llama_kv_cache_unified * kv, + llama_kv_cache_unified::slot_info_vec_t sinfos, + std::vector ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), kv(kv), sinfos(std::move(sinfos)), ubatches(std::move(ubatches)) { +} + +llama_kv_cache_unified_context::~llama_kv_cache_unified_context() = default; + +bool llama_kv_cache_unified_context::next() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + if (++i_cur >= ubatches.size()) { + return false; + } + + return true; +} + +bool llama_kv_cache_unified_context::apply() { + assert(!llama_memory_status_is_fail(status)); + + // no ubatches -> this is a KV cache update + if (ubatches.empty()) { + kv->update(lctx, do_shift, dinfo, sc_info); + + return true; + } + + kv->apply_ubatch(sinfos[i_cur], ubatches[i_cur]); + + n_kv = kv->get_n_kv(); + + return true; +} + +llama_memory_status llama_kv_cache_unified_context::get_status() const { + return status; +} + +const llama_ubatch & llama_kv_cache_unified_context::get_ubatch() const { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + return ubatches[i_cur]; +} + +uint32_t llama_kv_cache_unified_context::get_n_kv() const { + return n_kv; +} + +bool llama_kv_cache_unified_context::get_supports_set_rows() const { + return kv->get_supports_set_rows(); +} + +ggml_tensor * llama_kv_cache_unified_context::get_k(ggml_context * ctx, int32_t il) const { + return kv->get_k(ctx, il, n_kv, sinfos[i_cur]); +} + +ggml_tensor * llama_kv_cache_unified_context::get_v(ggml_context * ctx, int32_t il) const { + return kv->get_v(ctx, il, n_kv, sinfos[i_cur]); +} + +ggml_tensor * llama_kv_cache_unified_context::cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il) const { + return kv->cpy_k(ctx, k_cur, k_idxs, il, sinfos[i_cur]); +} + +ggml_tensor * llama_kv_cache_unified_context::cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il) const { + return kv->cpy_v(ctx, v_cur, v_idxs, il, sinfos[i_cur]); +} + +ggml_tensor * llama_kv_cache_unified_context::build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const { + return kv->build_input_k_idxs(ctx, ubatch); +} + +ggml_tensor * llama_kv_cache_unified_context::build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const { + return kv->build_input_v_idxs(ctx, ubatch); +} + +void llama_kv_cache_unified_context::set_input_k_shift(ggml_tensor * dst) const { + kv->set_input_k_shift(dst); +} + +void llama_kv_cache_unified_context::set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const { + kv->set_input_k_idxs(dst, ubatch, sinfos[i_cur]); +} + +void llama_kv_cache_unified_context::set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const { + kv->set_input_v_idxs(dst, ubatch, sinfos[i_cur]); +} + +void llama_kv_cache_unified_context::set_input_kq_mask(ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const { + kv->set_input_kq_mask(dst, ubatch, causal_attn); +} + +void llama_kv_cache_unified_context::set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const { + kv->set_input_pos_bucket(dst, ubatch); +} + +uint32_t llama_kv_cache_unified::get_padding(const llama_cparams & cparams) { + // the FA kernels require padding to avoid extra runtime boundary checks + return cparams.flash_attn ? 256u : 32u; +} diff --git a/llama/llama.cpp/src/llama-kv-cache-unified.h b/llama/llama.cpp/src/llama-kv-cache-unified.h new file mode 100644 index 000000000..342a67596 --- /dev/null +++ b/llama/llama.cpp/src/llama-kv-cache-unified.h @@ -0,0 +1,399 @@ +#pragma once + +#include "llama-batch.h" +#include "llama-graph.h" +#include "llama-kv-cells.h" +#include "llama-memory.h" + +#include +#include + +struct llama_cparams; +struct llama_hparams; +struct llama_model; +struct llama_context; + +// +// llama_kv_cache_unified +// + +class llama_kv_cache_unified : public llama_memory_i { +public: + static uint32_t get_padding(const llama_cparams & cparams); + + // this callback is used to filter out layers that should not be included in the cache + using layer_filter_cb = std::function; + + struct defrag_info { + bool empty() const { + return ids.empty(); + } + + // contains information about which cell moves where: + // - cell i moves to ids[i] + // - if ids[i] == i || ids[i] == ids.size(), then cell i is not moved + std::vector ids; + }; + + struct stream_copy_info { + bool empty() const { + assert(ssrc.size() == sdst.size()); + return ssrc.empty(); + } + + std::vector ssrc; + std::vector sdst; + }; + + // for each ubatch, create a slot_info that contains information about where the ubatch should be inserted in the + // KV cells. for example, cell indices for each token, such that: token[i] -> goes to cells[idxs[i]] + struct slot_info { + // data for ggml_set_rows + using idx_vec_t = std::vector; + + // number of streams: ns = s1 - s0 + 1 + llama_seq_id s0; + llama_seq_id s1; + + std::vector strm; // [ns] + std::vector idxs; // [ns] + + uint32_t head() const { + GGML_ASSERT(idxs.size() == 1); + GGML_ASSERT(!idxs[0].empty()); + + return idxs[0][0]; + } + + void resize(size_t n) { + strm.resize(n); + idxs.resize(n); + } + + size_t size() const { + GGML_ASSERT(idxs.size() == strm.size()); + GGML_ASSERT(!idxs.empty()); + + return idxs[0].size(); + } + + size_t n_stream() const { + return strm.size(); + } + + bool empty() const { + return idxs.empty(); + } + + void clear() { + idxs.clear(); + } + }; + + using slot_info_vec_t = std::vector; + + llama_kv_cache_unified( + const llama_model & model, + layer_filter_cb && filter, + ggml_type type_k, + ggml_type type_v, + bool v_trans, + bool offload, + bool unified, + uint32_t kv_size, + uint32_t n_seq_max, + uint32_t n_pad, + uint32_t n_swa, + llama_swa_type swa_type); + + ~llama_kv_cache_unified() = default; + + // + // llama_memory_i + // + + llama_memory_context_ptr init_batch( + llama_batch_allocr & balloc, + uint32_t n_ubatch, + bool embd_all) override; + + llama_memory_context_ptr init_full() override; + + llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override; + + bool get_can_shift() const override; + + void clear(bool data) override; + + bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override; + void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override; + void seq_keep(llama_seq_id seq_id) override; + void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override; + void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override; + + llama_pos seq_pos_min(llama_seq_id seq_id) const override; + llama_pos seq_pos_max(llama_seq_id seq_id) const override; + + // state write/load + + void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override; + void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override; + + // + // llama_kv_cache_unified specific API + // + + uint32_t get_size() const; + uint32_t get_n_stream() const; + + bool get_has_shift() const; + + // + // graph_build API + // + + uint32_t get_n_kv() const; + + // TODO: temporary + bool get_supports_set_rows() const; + + // get views of the current state of the cache + ggml_tensor * get_k(ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const; + ggml_tensor * get_v(ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const; + + // store k_cur and v_cur in the cache based on the provided head location + ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il, const slot_info & sinfo) const; + ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il, const slot_info & sinfo) const; + + // + // preparation API + // + + // find places for the provided ubatches in the cache, returns the slot infos + // return empty vector on failure + slot_info_vec_t prepare(const std::vector & ubatches); + + bool update(llama_context * lctx, bool do_shift, const defrag_info & dinfo, const stream_copy_info & sc_info); + + // find a slot of kv cells that can hold the ubatch + // if cont == true, then the slot must be continuous + // return empty slot_info on failure + slot_info find_slot(const llama_ubatch & ubatch, bool cont) const; + + // emplace the ubatch context into slot: [sinfo.idxs[0...ubatch.n_tokens - 1]] + void apply_ubatch(const slot_info & sinfo, const llama_ubatch & ubatch); + + // + // input API + // + + ggml_tensor * build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const; + ggml_tensor * build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const; + + void set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const; + void set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const; + + void set_input_k_shift(ggml_tensor * dst) const; + + void set_input_kq_mask (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const; + void set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const; + +private: + const llama_model & model; + const llama_hparams & hparams; + + struct kv_layer { + // layer index in the model + // note: can be different from the layer index in the KV cache + uint32_t il; + + ggml_tensor * k; + ggml_tensor * v; + + std::vector k_stream; + std::vector v_stream; + }; + + bool v_trans = true; // the value tensor is transposed + + const uint32_t n_seq_max = 1; + const uint32_t n_stream = 1; + + // required padding + const uint32_t n_pad = 1; + + // SWA + const uint32_t n_swa = 0; + + // env: LLAMA_KV_CACHE_DEBUG + int debug = 0; + + // env: LLAMA_SET_ROWS (temporary) + // ref: https://github.com/ggml-org/llama.cpp/pull/14285 + bool supports_set_rows = true; + + const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE; + + std::vector ctxs; + std::vector bufs; + + // the current index from where we start searching for a free slot in the ring buffer of KV cells (see find_slot()) + // note: this is not part of the KV state and it's only used to speed-up the find_slot() method + std::vector v_heads; + + std::vector v_cells; + + // maps from a sequence id to a stream id + std::vector seq_to_stream; + + // pending stream copies that will be applied during the next update + stream_copy_info sc_info; + + std::vector layers; + + // model layer id -> KV cache layer id + std::unordered_map map_layer_ids; + + // return non-empty vector if cells have been moved + defrag_info defrag_prepare(int32_t n_max_nodes) const; + + size_t total_size() const; + + size_t size_k_bytes() const; + size_t size_v_bytes() const; + + bool is_masked_swa(llama_pos p0, llama_pos p1) const; + + ggml_tensor * build_rope_shift( + const llama_cparams & cparams, + ggml_context * ctx, + ggml_tensor * cur, + ggml_tensor * shift, + ggml_tensor * factors, + float freq_base, + float freq_scale) const; + + ggml_cgraph * build_graph_shift( + llm_graph_result * res, + llama_context * lctx) const; + + ggml_cgraph * build_graph_defrag( + llm_graph_result * res, + llama_context * lctx, + const defrag_info & dinfo) const; + + struct cell_ranges_t { + uint32_t strm; + + std::vector> data; // ranges, from inclusive, to exclusive + }; + + void state_write_meta(llama_io_write_i & io, const cell_ranges_t & cr, llama_seq_id seq_id = -1) const; + void state_write_data(llama_io_write_i & io, const cell_ranges_t & cr) const; + + bool state_read_meta(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, llama_seq_id dest_seq_id = -1); + bool state_read_data(llama_io_read_i & io, uint32_t strm, uint32_t cell_count); +}; + +class llama_kv_cache_unified_context : public llama_memory_context_i { +public: + // some shorthands + using slot_info_vec_t = llama_kv_cache_unified::slot_info_vec_t; + using defrag_info = llama_kv_cache_unified::defrag_info; + using stream_copy_info = llama_kv_cache_unified::stream_copy_info; + + // used for errors + llama_kv_cache_unified_context(llama_memory_status status); + + // used to create a full-cache context + llama_kv_cache_unified_context( + llama_kv_cache_unified * kv); + + // used to create an update context + llama_kv_cache_unified_context( + llama_kv_cache_unified * kv, + llama_context * lctx, + bool do_shift, + defrag_info dinfo, + stream_copy_info sc_info); + + // used to create a batch procesing context from a batch + llama_kv_cache_unified_context( + llama_kv_cache_unified * kv, + slot_info_vec_t sinfos, + std::vector ubatches); + + virtual ~llama_kv_cache_unified_context(); + + // + // llama_memory_context_i + // + + bool next() override; + bool apply() override; + + llama_memory_status get_status() const override; + const llama_ubatch & get_ubatch() const override; + + // + // llama_kv_cache_unified_context specific API + // + + uint32_t get_n_kv() const; + + // TODO: temporary + bool get_supports_set_rows() const; + + // get views of the current state of the cache + ggml_tensor * get_k(ggml_context * ctx, int32_t il) const; + ggml_tensor * get_v(ggml_context * ctx, int32_t il) const; + + // store k_cur and v_cur in the cache based on the provided head location + ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il) const; + ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il) const; + + ggml_tensor * build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const; + ggml_tensor * build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const; + + void set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const; + void set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const; + + void set_input_k_shift (ggml_tensor * dst) const; + void set_input_kq_mask (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const; + void set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const; + +private: + llama_memory_status status; + + llama_kv_cache_unified * kv; + llama_context * lctx; + + // + // update context + // + + bool do_shift = false; + + defrag_info dinfo; + + stream_copy_info sc_info; + + // + // batch processing context + // + + // the index of the cur ubatch to process + size_t i_cur = 0; + + slot_info_vec_t sinfos; + + std::vector ubatches; + + // + // data needed for building the compute graph for the current ubatch: + // + + // a heuristic, to avoid attending the full cache if it is not yet utilized + // as the cache gets filled, the benefit from this heuristic disappears + int32_t n_kv; +}; diff --git a/llama/llama.cpp/src/llama-kv-cache.cpp b/llama/llama.cpp/src/llama-kv-cache.cpp deleted file mode 100644 index 60e67b036..000000000 --- a/llama/llama.cpp/src/llama-kv-cache.cpp +++ /dev/null @@ -1,2451 +0,0 @@ -#include "llama-kv-cache.h" - -#include "llama-impl.h" -#include "llama-batch.h" -#include "llama-cparams.h" -#include "llama-model.h" -#include "llama-context.h" - -#include -#include -#include -#include -#include -#include - -// -// llama_kv_cache_unified -// - -uint32_t llama_kv_cache_unified::get_padding(const llama_cparams & cparams) { - // the FA kernels require padding to avoid extra runtime boundary checks - return cparams.flash_attn ? 256u : 32u; -} - -llama_kv_cache_unified::llama_kv_cache_unified( - const llama_model & model, - ggml_type type_k, - ggml_type type_v, - bool v_trans, - bool offload, - uint32_t kv_size, - uint32_t padding) : model(model), hparams(model.hparams), v_trans(v_trans), padding(padding) { - const int32_t n_layer = hparams.n_layer; - - has_shift = false; - can_shift = true; - - LLAMA_LOG_INFO("%s: kv_size = %d, type_k = '%s', type_v = '%s', n_layer = %d, can_shift = %d, padding = %d\n", - __func__, kv_size, ggml_type_name(type_k), ggml_type_name(type_v), n_layer, can_shift, padding); - - GGML_ASSERT(kv_size % padding == 0 && "kv_size must be a multiple of padding"); - - head = 0; - size = kv_size; - used = 0; - - this->type_k = type_k; - this->type_v = type_v; - - cells.clear(); - cells.resize(kv_size); - - // create a context for each buffer type - std::map ctx_map; - auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { - auto it = ctx_map.find(buft); - if (it == ctx_map.end()) { - ggml_init_params params = { - /*.mem_size =*/ size_t(2u*n_layer*ggml_tensor_overhead()), - /*.mem_buffer =*/ NULL, - /*.no_alloc =*/ true, - }; - - ggml_context * ctx = ggml_init(params); - if (!ctx) { - return nullptr; - } - - ctx_map[buft] = ctx; - ctxs.emplace_back(ctx); - - return ctx; - } - - return it->second; - }; - - k_l.reserve(n_layer); - v_l.reserve(n_layer); - - for (int i = 0; i < n_layer; i++) { - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s(); - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s(); - - const char * dev_name = "CPU"; - - ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type(); - - if (offload) { - auto * dev = model.dev_layer(i); - buft = ggml_backend_dev_buffer_type(dev); - - dev_name = ggml_backend_dev_name(dev); - } - - LLAMA_LOG_DEBUG("%s: layer %3d: dev = %s\n", __func__, i, dev_name); - - ggml_context * ctx = ctx_for_buft(buft); - if (!ctx) { - throw std::runtime_error("failed to create ggml context for kv cache"); - } - - ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size); - ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size); - ggml_format_name(k, "cache_k_l%d", i); - ggml_format_name(v, "cache_v_l%d", i); - k_l.push_back(k); - v_l.push_back(v); - } - - // allocate tensors and initialize the buffers to avoid NaNs in the padding - for (auto it : ctx_map) { - auto * buft = it.first; - auto * ctx = it.second; - - ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); - if (!buf) { - throw std::runtime_error("failed to allocate buffer for kv cache"); - } - ggml_backend_buffer_clear(buf, 0); - LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0); - bufs.emplace_back(buf); - } - - { - const size_t memory_size_k = size_k_bytes(); - const size_t memory_size_v = size_v_bytes(); - - LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__, - (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f), - ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f), - ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f)); - } -} - -void llama_kv_cache_unified::clear() { - for (int32_t i = 0; i < (int32_t) size; ++i) { - cells[i].pos = -1; - cells[i].seq_id.clear(); - } - head = 0; - used = 0; - - for (auto & buf : bufs) { - ggml_backend_buffer_clear(buf.get(), 0); - } -} - -bool llama_kv_cache_unified::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) { - uint32_t new_head = size; - - if (p0 < 0) { - p0 = 0; - } - - if (p1 < 0) { - p1 = std::numeric_limits::max(); - } - - for (uint32_t i = 0; i < size; ++i) { - if (cells[i].pos >= p0 && cells[i].pos < p1) { - if (seq_id < 0) { - cells[i].seq_id.clear(); - } else if (cells[i].has_seq_id(seq_id)) { - cells[i].seq_id.erase(seq_id); - } else { - continue; - } - if (cells[i].is_empty()) { - // keep count of the number of used cells - if (cells[i].pos >= 0) { - used--; - } - - cells[i].pos = -1; - - if (new_head == size) { - new_head = i; - } - } - } - } - - // If we freed up a slot, set head to it so searching can start there. - if (new_head != size && new_head < head) { - head = new_head; - } - - return true; -} - -void llama_kv_cache_unified::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) { - if (seq_id_src == seq_id_dst) { - return; - } - - if (p0 < 0) { - p0 = 0; - } - - if (p1 < 0) { - p1 = std::numeric_limits::max(); - } - - // otherwise, this is the KV of a Transformer-like model - head = 0; - - for (uint32_t i = 0; i < size; ++i) { - if (cells[i].has_seq_id(seq_id_src) && cells[i].pos >= p0 && cells[i].pos < p1) { - cells[i].seq_id.insert(seq_id_dst); - } - } -} - -void llama_kv_cache_unified::seq_keep(llama_seq_id seq_id) { - uint32_t new_head = size; - - for (uint32_t i = 0; i < size; ++i) { - if (!cells[i].has_seq_id(seq_id)) { - if (cells[i].pos >= 0) { - used--; - } - - cells[i].pos = -1; - cells[i].seq_id.clear(); - - if (new_head == size){ - new_head = i; - } - } else { - cells[i].seq_id.clear(); - cells[i].seq_id.insert(seq_id); - } - } - - // If we freed up a slot, set head to it so searching can start there. - if (new_head != size && new_head < head) { - head = new_head; - } -} - -void llama_kv_cache_unified::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) { - if (delta == 0) { - return; - } - - uint32_t new_head = size; - - if (p0 < 0) { - p0 = 0; - } - - if (p1 < 0) { - p1 = std::numeric_limits::max(); - } - - // If there is no range then return early to avoid looping over the - if (p0 == p1) { - return; - } - - for (uint32_t i = 0; i < size; ++i) { - if (cells[i].has_seq_id(seq_id) && cells[i].pos >= p0 && cells[i].pos < p1) { - has_shift = true; - cells[i].pos += delta; - cells[i].delta += delta; - - if (cells[i].pos < 0) { - if (!cells[i].is_empty()) { - used--; - } - cells[i].pos = -1; - cells[i].seq_id.clear(); - if (new_head == size) { - new_head = i; - } - } - } - } - - // If we freed up a slot, set head to it so searching can start there. - // Otherwise we just start the next search from the beginning. - head = new_head != size ? new_head : 0; -} - -void llama_kv_cache_unified::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) { - if (d == 1) { - return; - } - - if (p0 < 0) { - p0 = 0; - } - - if (p1 < 0) { - p1 = std::numeric_limits::max(); - } - - // If there is no range then return early to avoid looping over the cache. - if (p0 == p1) { - return; - } - - for (uint32_t i = 0; i < size; ++i) { - if (cells[i].has_seq_id(seq_id) && cells[i].pos >= p0 && cells[i].pos < p1) { - has_shift = true; - - { - llama_pos p_old = cells[i].pos; - cells[i].pos /= d; - cells[i].delta += cells[i].pos - p_old; - } - } - } -} - -llama_pos llama_kv_cache_unified::seq_pos_max(llama_seq_id seq_id) const { - llama_pos result = 0; - - for (uint32_t i = 0; i < size; ++i) { - if (cells[i].has_seq_id(seq_id)) { - result = std::max(result, cells[i].pos); - } - } - - return result; -} - -void llama_kv_cache_unified::restore() { - if (pending.ranges.empty()) { - return; - } - - uint32_t new_head = size; - - for (auto & range : pending.ranges) { - for (uint32_t i = range.c0; i < range.c1; ++i) { - cells[i].seq_id.clear(); - - // keep count of the number of used cells - if (cells[i].pos >= 0) { - used--; - } - - cells[i].pos = -1; - } - - new_head = std::min(new_head, range.c0); - } - - if (new_head != size && new_head < head) { - head = new_head; - } -} - -void llama_kv_cache_unified::commit() { - if (pending.ranges.empty()) { - LLAMA_LOG_WARN("%s: no pending KV cache updates to commit - might indicate a bug (ref: %s)\n", - __func__, "https://github.com/ggml-org/llama.cpp/pull/12695"); - return; - } - - pending.ranges.clear(); -} - -bool llama_kv_cache_unified::update(llama_context & lctx) { - auto * sched = lctx.get_sched(); - - if (has_shift) { - if (!get_can_shift()) { - GGML_ABORT("The current KV cache / model configuration does not support K-shift"); - } - - LLAMA_LOG_DEBUG("%s: applying K-shift\n", __func__); - - // apply K-shift if needed - if (hparams.rope_type != LLAMA_ROPE_TYPE_NONE) { - ggml_backend_sched_reset(sched); - - auto * gf = lctx.graph_init(); - - auto res = build_graph_shift(lctx.get_cparams(), lctx.get_ctx_compute(), gf); - - ggml_backend_sched_alloc_graph(sched, gf); - - res->set_inputs(nullptr); - - lctx.graph_compute(gf, false); - } - - { - has_shift = false; - - for (uint32_t i = 0; i < size; ++i) { - cells[i].delta = 0; - } - } - } - - if (do_defrag) { - LLAMA_LOG_DEBUG("%s: defragmenting KV cache\n", __func__); - const uint32_t n_max_nodes = lctx.graph_max_nodes(); - const uint32_t max_moves = (n_max_nodes - 2*model.hparams.n_layer)/(6*model.hparams.n_layer); - if (!defrag_prepare(n_max_nodes)) { - LLAMA_LOG_ERROR("%s: failed to prepare defragmentation\n", __func__); - return false; - } - - for (std::size_t i = 0; i < defrag_info.moves.size(); i += max_moves) { - std::vector chunk; - auto end = std::min(i + max_moves, defrag_info.moves.size()); - chunk.assign(defrag_info.moves.begin() + i, defrag_info.moves.begin() + end); - - ggml_backend_sched_reset(sched); - - auto * gf = lctx.graph_init(); - - auto res = build_graph_defrag(lctx.get_cparams(), lctx.get_ctx_compute(), gf, chunk); - - ggml_backend_sched_alloc_graph(sched, gf); - - res->set_inputs(nullptr); - - lctx.graph_compute(gf, false); - } - - do_defrag = false; - } - - // we never need to reserve a worst case graph - return false; -} - -void llama_kv_cache_unified::defrag_sched(float thold) { - // - do not defrag small contexts (i.e. < 2048 tokens) - // - count the padding towards the number of used tokens - const float fragmentation = n >= 2048 ? std::max(0.0f, 1.0f - (float(used + padding)/n)) : 0.0f; - - // queue defragmentation for next llama_kv_cache_update - if (fragmentation > thold) { - LLAMA_LOG_DEBUG("%s: fragmentation: %.2f - requesting defrag\n", __func__, fragmentation); - - do_defrag = true; - } -} - -void llama_kv_cache_unified::set_full() { - n = size; -} - -llama_sbatch llama_kv_cache_unified::sbatch_init( - const llama_batch & batch, - bool logits_all) { - return llama_sbatch(batch, hparams.n_embd, true, logits_all); -} - -llama_ubatch llama_kv_cache_unified::ubatch_next( - llama_sbatch & sbatch, - uint32_t n_ubatch, - bool embd_pooled) const { - GGML_UNUSED(embd_pooled); - return sbatch.split_simple(n_ubatch); -} - -bool llama_kv_cache_unified::find_slot( - const llama_ubatch & ubatch) { - const uint32_t n_tokens = ubatch.n_tokens; - const uint32_t n_seqs = ubatch.n_seqs; - const uint32_t n_seq_tokens = ubatch.n_seq_tokens; - - // if we have enough unused cells before the current head -> - // better to start searching from the beginning of the cache, hoping to fill it - if (head > used + 2*ubatch.n_tokens) { - head = 0; - } - - // otherwise, one cell per token. - - if (n_tokens > size) { - LLAMA_LOG_ERROR("%s: n_tokens = %d > size = %d\n", __func__, n_tokens, size); - return false; - } - - uint32_t n_tested = 0; - - while (true) { - if (head + n_tokens > size) { - n_tested += size - head; - head = 0; - continue; - } - - bool found = true; - for (uint32_t i = 0; i < n_tokens; i++) { - if (cells[head + i].pos >= 0) { - found = false; - head += i + 1; - n_tested += i + 1; - break; - } - } - - if (found) { - break; - } - - if (n_tested >= size) { - //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens); - return false; - } - } - - for (uint32_t s = 0; s < n_seqs; s++) { - for (uint32_t i = 0; i < n_seq_tokens; ++i) { - uint32_t k = s*n_seq_tokens + i; - cells[head + k].pos = ubatch.pos[k]; - - for (int32_t j = 0; j < ubatch.n_seq_id[s]; j++) { - cells[head + k].seq_id.insert(ubatch.seq_id[s][j]); - } - } - } - - used += n_tokens; - - pending.ranges.push_back({head, head + n_tokens}); - - // a heuristic, to avoid attending the full cache if it is not yet utilized - // after enough generations, the benefit from this heuristic disappears - // if we start defragmenting the cache, the benefit from this will be more important - n = std::min(size, std::max(padding, GGML_PAD(cell_max(), padding))); - - //printf("n = %5d, used = %5d, head = %5d\n", n, used, head); - - return true; -} - -int32_t llama_kv_cache_unified::get_n_tokens() const { - int32_t result = 0; - - for (uint32_t i = 0; i < size; i++) { - result += cells[i].seq_id.size(); - } - - return result; -} - -int32_t llama_kv_cache_unified::get_used_cells() const { - return used; -} - -bool llama_kv_cache_unified::get_can_shift() const { - return can_shift; -} - -llama_pos llama_kv_cache_unified::get_pos_max() const { - llama_pos pos_max = -1; - for (const auto & cell : cells) { - pos_max = std::max(pos_max, cell.pos); - } - - return pos_max; -} - -size_t llama_kv_cache_unified::total_size() const { - size_t size = 0; - for (const auto & buf : bufs) { - size += ggml_backend_buffer_get_size(buf.get()); - } - - return size; -} - -size_t llama_kv_cache_unified::size_k_bytes() const { - size_t size_k_bytes = 0; - - for (const auto & k : k_l) { - size_k_bytes += ggml_nbytes(k); - } - - return size_k_bytes; -} - -size_t llama_kv_cache_unified::size_v_bytes() const { - size_t size_v_bytes = 0; - - for (const auto & v : v_l) { - size_v_bytes += ggml_nbytes(v); - } - - return size_v_bytes; -} - -ggml_tensor * llama_kv_cache_unified::build_rope_shift( - const llama_cparams & cparams, - ggml_context * ctx, - ggml_tensor * cur, - ggml_tensor * shift, - ggml_tensor * factors, - float freq_base, - float freq_scale) const { - const auto & n_ctx_orig = cparams.n_ctx_orig_yarn; - - const auto & yarn_ext_factor = cparams.yarn_ext_factor; - const auto & yarn_beta_fast = cparams.yarn_beta_fast; - const auto & yarn_beta_slow = cparams.yarn_beta_slow; - - const auto & n_rot = hparams.n_rot; - const auto & rope_type = hparams.rope_type; - - // See llm_build_deepseek2() for why attn_factor has to be scaled for YaRN RoPE to work correctly. - // See https://github.com/ggerganov/llama.cpp/discussions/7416 for detailed explanation. - const float yarn_attn_factor = model.arch == LLM_ARCH_DEEPSEEK2 ? 1.0f / (1.0f + 0.1f * logf(1.0f / freq_scale)) : cparams.yarn_attn_factor; - - ggml_tensor * tmp; - - if (ggml_is_quantized(cur->type)) { - // dequantize to f32 -> RoPE -> quantize back - tmp = ggml_cast(ctx, cur, GGML_TYPE_F32); - - tmp = ggml_rope_ext(ctx, tmp, - shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow); - - tmp = ggml_cpy(ctx, tmp, cur); - } else { - // we rotate only the first n_rot dimensions - tmp = ggml_rope_ext_inplace(ctx, cur, - shift, factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - yarn_ext_factor, yarn_attn_factor, yarn_beta_fast, yarn_beta_slow); - } - - return tmp; -} - -class llm_graph_input_k_shift : public llm_graph_input_i { -public: - llm_graph_input_k_shift(const llama_kv_cache_unified * kv_self) : kv_self(kv_self) {} - virtual ~llm_graph_input_k_shift() = default; - - void set_input(const llama_ubatch * ubatch) override; - - ggml_tensor * k_shift; // I32 [kv_size] - - const llama_kv_cache_unified * kv_self; -}; - -void llm_graph_input_k_shift::set_input(const llama_ubatch * ubatch) { - GGML_UNUSED(ubatch); - - if (k_shift) { - assert(ggml_backend_buffer_is_host(k_shift->buffer)); - - int32_t * data = (int32_t *) k_shift->data; - - for (uint32_t i = 0; i < kv_self->size; ++i) { - data[i] = kv_self->cells[i].delta; - } - } -} - -llm_graph_result_ptr llama_kv_cache_unified::build_graph_shift( - const llama_cparams & cparams, - ggml_context * ctx, - ggml_cgraph * gf) const { - auto res = std::make_unique(); - - const auto & n_layer = hparams.n_layer; - - const auto & n_embd_head_k = hparams.n_embd_head_k; - //const auto & n_embd_head_v = hparams.n_embd_head_v; - - const uint32_t n_ctx_per_seq = cparams.n_ctx / cparams.n_seq_max; - - //GGML_ASSERT(kv_self->size == n_ctx); - - auto inp = std::make_unique(this); - - inp->k_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, cparams.n_ctx); - ggml_set_input(inp->k_shift); - - for (uint32_t il = 0; il < n_layer; ++il) { - const int64_t n_head_kv = hparams.n_head_kv(il); - const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); - - const bool is_swa = hparams.is_swa(il); - - // note: the swa rope params could become part of the cparams in the future - // if we decide to make them configurable, like the non-sliding ones - const float freq_base_l = is_swa ? hparams.rope_freq_base_train_swa : cparams.rope_freq_base; - const float freq_scale_l = is_swa ? hparams.rope_freq_scale_train_swa : cparams.rope_freq_scale; - - ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il); - - ggml_tensor * k = - ggml_view_3d(ctx, k_l[il], - n_embd_head_k, n_head_kv, size, - ggml_row_size(k_l[il]->type, n_embd_head_k), - ggml_row_size(k_l[il]->type, n_embd_k_gqa), - 0); - - ggml_tensor * cur = build_rope_shift(cparams, ctx, k, inp->k_shift, rope_factors, freq_base_l, freq_scale_l); - - ggml_build_forward_expand(gf, cur); - } - - res->add_input(std::move(inp)); - - return res; -} - -llm_graph_result_ptr llama_kv_cache_unified::build_graph_defrag( - const llama_cparams & cparams, - ggml_context * ctx, - ggml_cgraph * gf, - const std::vector & moves) const { - auto res = std::make_unique(); - -#if 0 - // CPU defrag - // - // TODO: optimizations are possible: - // - multiple threads - // - avoid copying to the host memory when already there - // - // likely not worth the effort, as we have ggml_graph based defrag - // - - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(); - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(); - - const uint32_t kv_size = size; - - std::vector buf_k; - std::vector buf_v; - - for (uint32_t il = 0; il < n_layer; ++il) { - const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa); - const size_t k_size = ggml_row_size(k_l[il]->type, n_embd_k_gqa*kv_size); - - const size_t v_size_el = ggml_type_size(v_l[il]->type); - const size_t v_size = ggml_row_size (v_l[il]->type, n_embd_v_gqa*kv_size); - - buf_k.resize(k_size); - buf_v.resize(v_size); - - ggml_backend_tensor_get(k_l[il], buf_k.data(), 0, buf_k.size()); - ggml_backend_tensor_get(v_l[il], buf_v.data(), 0, buf_v.size()); - - // batch move [i, i+nm) to [id, id+nm) - // note: cells can move only to a lower index - for (uint32_t i = 0; i < n_kv; ++i) { - const uint32_t id = ids[i]; - - if (i == id || id == n_kv) { - continue; - } - - uint32_t nm = 1; - - while (i + nm < n_kv && ids[i + nm] == id + nm) { - nm++; - } - - // move keys - { - const int64_t os = i*k_size_row; - const int64_t od = id*k_size_row; - - memcpy(buf_k.data() + od, buf_k.data() + os, nm*k_size_row); - } - - // move values (note: they are transposed) - { - const int64_t os = i; - const int64_t od = id; - - for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { - memcpy(buf_v.data() + (od + j*kv_size)*v_size_el, buf_v.data() + (os + j*kv_size)*v_size_el, nm*v_size_el); - } - } - - i += nm - 1; - } - - ggml_backend_tensor_set(k_l[il], buf_k.data(), 0, buf_k.size()); - ggml_backend_tensor_set(v_l[il], buf_v.data(), 0, buf_v.size()); - } -#else - for (const auto & move : moves) { - for (uint32_t il = 0; il < hparams.n_layer; ++il) { // NOLINT - const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); - const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); - - ggml_tensor * view_k_src = ggml_view_2d(ctx, k_l[il], - n_embd_k_gqa, move.len, - ggml_row_size(k_l[il]->type, n_embd_k_gqa), - ggml_row_size(k_l[il]->type, n_embd_k_gqa*move.src)); - - ggml_tensor * view_k_dst = ggml_view_2d(ctx, k_l[il], - n_embd_k_gqa, move.len, - ggml_row_size(k_l[il]->type, n_embd_k_gqa), - ggml_row_size(k_l[il]->type, n_embd_k_gqa*move.dst)); - - ggml_tensor * view_v_src; - ggml_tensor * view_v_dst; - - if (cparams.flash_attn) { - // NOTE: the V cache is not transposed when using flash attention - view_v_src = ggml_view_2d(ctx, v_l[il], - n_embd_v_gqa, move.len, - ggml_row_size(v_l[il]->type, n_embd_v_gqa), - ggml_row_size(v_l[il]->type, n_embd_v_gqa*move.dst)); - - view_v_dst = ggml_view_2d(ctx, v_l[il], - move.len, n_embd_v_gqa, - ggml_row_size(v_l[il]->type, n_embd_v_gqa), - ggml_row_size(v_l[il]->type, move.src)); - } else { - view_v_src = ggml_view_2d(ctx, v_l[il], - move.len, n_embd_v_gqa, - ggml_row_size(v_l[il]->type, size), - ggml_row_size(v_l[il]->type, move.src)); - - view_v_dst = ggml_view_2d(ctx, v_l[il], - move.len, n_embd_v_gqa, - ggml_row_size(v_l[il]->type, size), - ggml_row_size(v_l[il]->type, move.dst)); - } - - ggml_build_forward_expand(gf, ggml_cpy(ctx, view_k_src, view_k_dst)); - ggml_build_forward_expand(gf, ggml_cpy(ctx, view_v_src, view_v_dst)); - } - } - - //LLAMA_LOG_INFO("gf->n_nodes = %d\n", gf->n_nodes); -#endif - - return res; -} - -bool llama_kv_cache_unified::defrag_prepare(int32_t n_max_nodes) { - const uint32_t n_layer = hparams.n_layer; - - const uint32_t n_kv = cell_max(); - const uint32_t n_used = used; - - assert(n_used <= n_kv); - - defrag_info.moves.clear(); - - // determine which KV cells to move where - // - // cell i moves to ids[i] - // - // if ids[i] == i || ids[i] == n_kv, then cell i is not moved - // - std::vector ids(n_kv, n_kv); - - for (uint32_t i0 = 0; i0 < n_used; ++i0) { - const auto & cell0 = cells[i0]; - - if (!cell0.is_empty()) { - ids[i0] = i0; - - continue; - } - - // found a hole - fill it with data from the end of the cache - - uint32_t nh = 1; - - // determine the size of the hole - while (i0 + nh < n_used && cells[i0 + nh].is_empty()) { - nh++; - } - - uint32_t nf = 0; - uint32_t is = n_kv - 1; - - // starting from the end, find nh non-empty cells - for (; is > i0; --is) { - const auto & cell1 = cells[is]; - - if (cell1.is_empty() || ids[is] != n_kv) { - continue; - } - - // non-empty cell which is not yet moved - nf++; - - if (nf == nh) { - break; - } - } - - // this can only happen if `n_used` is not accurate, which would be a bug - GGML_ASSERT(nf == nh && "KV defrag bug: nf != nh"); - - nf = 0; - - uint32_t i1 = is; - - // are we moving a continuous block of memory? - bool cont = false; - - // go back and move the nf cells to the hole - for (; i1 < n_kv; ++i1) { - auto & cell1 = cells[i1]; - - if (cell1.is_empty() || ids[i1] != n_kv) { - cont = false; - continue; - } - - // this cell goes to (i0 + nf) - ids[i1] = i0 + nf; - - // move the cell meta data - cells[i0 + nf] = cell1; - - // clear the old cell and move the head there - cell1 = kv_cell(); - head = n_used; - - if (!cont) { - defrag_info.moves.push_back({i1, i0 + nf, 1}); - cont = true; - } else { - defrag_info.moves.back().len++; - } - - nf++; - - if (nf == nh) { - break; - } - } - - //LLAMA_LOG_INFO("(tmp log) KV defrag: move [%u, %u) to [%u, %u)\n", is, i1 + 1, i0, i0 + nh); - - i0 += nh - 1; - } - - if (defrag_info.moves.size() == 0) { - return false; - } - - // LLAMA_LOG_DEBUG("(tmp log) KV defrag cell moves: %u\n", n_moves); - - return true; -} - -uint32_t llama_kv_cache_unified::cell_max() const { - for (uint32_t i = size; i > 0; --i) { - const kv_cell & cell = cells[i - 1]; - - if (cell.pos >= 0 && !cell.is_empty()) { - return i; - } - } - - return 0; -} - -void llama_kv_cache_unified::state_write(llama_io_write_i & io, llama_seq_id seq_id) const { - std::vector> cell_ranges; // ranges, from inclusive, to exclusive - uint32_t cell_count = 0; - - // Count the number of cells with the specified seq_id - // Find all the ranges of cells with this seq id (or all, when -1) - uint32_t cell_range_begin = size; - for (uint32_t i = 0; i < size; ++i) { - const auto & cell = cells[i]; - if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) { - ++cell_count; - if (cell_range_begin == size) { - cell_range_begin = i; - } - } else { - if (cell_range_begin != size) { - cell_ranges.emplace_back(cell_range_begin, i); - cell_range_begin = size; - } - } - } - if (cell_range_begin != size) { - cell_ranges.emplace_back(cell_range_begin, size); - } - - // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count - uint32_t cell_count_check = 0; - for (const auto & range : cell_ranges) { - cell_count_check += range.second - range.first; - } - GGML_ASSERT(cell_count == cell_count_check); - - io.write(&cell_count, sizeof(cell_count)); - - state_write_meta(io, cell_ranges, seq_id); - state_write_data(io, cell_ranges); -} - -void llama_kv_cache_unified::state_read(llama_io_read_i & io, llama_seq_id seq_id) { - uint32_t cell_count; - io.read_to(&cell_count, sizeof(cell_count)); - - bool res = true; - res = res && state_read_meta(io, cell_count, seq_id); - res = res && state_read_data(io, cell_count); - - if (!res) { - if (seq_id == -1) { - clear(); - } else { - seq_rm(seq_id, -1, -1); - } - throw std::runtime_error("failed to restore kv cache"); - } -} - -void llama_kv_cache_unified::state_write_meta(llama_io_write_i & io, const std::vector> & cell_ranges, llama_seq_id seq_id) const { - for (const auto & range : cell_ranges) { - for (uint32_t i = range.first; i < range.second; ++i) { - const auto & cell = cells[i]; - const llama_pos pos = cell.pos; - const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0; - - io.write(&pos, sizeof(pos)); - io.write(&n_seq_id, sizeof(n_seq_id)); - - if (n_seq_id) { - for (auto seq_id : cell.seq_id) { - io.write(&seq_id, sizeof(seq_id)); - } - } - } - } -} - -void llama_kv_cache_unified::state_write_data(llama_io_write_i & io, const std::vector> & cell_ranges) const { - const uint32_t v_trans = this->v_trans ? 1 : 0; - const uint32_t n_layer = hparams.n_layer; - - io.write(&v_trans, sizeof(v_trans)); - io.write(&n_layer, sizeof(n_layer)); - - std::vector tmp_buf; - - // Iterate and write all the keys first, each row is a cell - // Get whole range at a time - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); - - // Write key type - const int32_t k_type_i = (int32_t)k_l[il]->type; - io.write(&k_type_i, sizeof(k_type_i)); - - // Write row size of key - const uint64_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa); - io.write(&k_size_row, sizeof(k_size_row)); - - // Read each range of cells of k_size length each into tmp_buf and write out - for (const auto & range : cell_ranges) { - const size_t range_size = range.second - range.first; - const size_t buf_size = range_size * k_size_row; - io.write_tensor(k_l[il], range.first * k_size_row, buf_size); - } - } - - if (!v_trans) { - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); - - // Write value type - const int32_t v_type_i = (int32_t)v_l[il]->type; - io.write(&v_type_i, sizeof(v_type_i)); - - // Write row size of value - const uint64_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa); - io.write(&v_size_row, sizeof(v_size_row)); - - // Read each range of cells of v_size length each into tmp_buf and write out - for (const auto & range : cell_ranges) { - const size_t range_size = range.second - range.first; - const size_t buf_size = range_size * v_size_row; - io.write_tensor(v_l[il], range.first * v_size_row, buf_size); - } - } - } else { - // When v is transposed, we also need the element size and get the element ranges from each row - const uint32_t kv_size = size; - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); - - // Write value type - const int32_t v_type_i = (int32_t)v_l[il]->type; - io.write(&v_type_i, sizeof(v_type_i)); - - // Write element size - const uint32_t v_size_el = ggml_type_size(v_l[il]->type); - io.write(&v_size_el, sizeof(v_size_el)); - - // Write GQA embedding size - io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa)); - - // For each row, we get the element values of each cell - for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { - // Read each range of cells of v_size_el length each into tmp_buf and write out - for (const auto & range : cell_ranges) { - const size_t range_size = range.second - range.first; - const size_t src_offset = (range.first + j * kv_size) * v_size_el; - const size_t buf_size = range_size * v_size_el; - io.write_tensor(v_l[il], src_offset, buf_size); - } - } - } - } -} - -bool llama_kv_cache_unified::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) { - if (dest_seq_id != -1) { - // single sequence - - seq_rm(dest_seq_id, -1, -1); - - llama_sbatch sbatch; - llama_ubatch batch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false); - - batch.n_tokens = cell_count; - batch.n_seq_tokens = cell_count; - batch.n_seqs = 1; - - for (uint32_t i = 0; i < cell_count; ++i) { - llama_pos pos; - uint32_t n_seq_id; - - io.read_to(&pos, sizeof(pos)); - io.read_to(&n_seq_id, sizeof(n_seq_id)); - - if (n_seq_id != 0) { - LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__); - return false; - } - - batch.pos[i] = pos; - } - batch.n_seq_id[0] = 1; - batch.seq_id[0] = &dest_seq_id; - if (!find_slot(batch)) { - LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__); - return false; - } - commit(); - - // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values) - // Assume that this is one contiguous block of cells - GGML_ASSERT(head + cell_count <= size); - GGML_ASSERT(cells[head].pos == batch.pos[0]); - GGML_ASSERT(cells[head + cell_count - 1].pos == batch.pos[cell_count - 1]); - GGML_ASSERT(cells[head].has_seq_id(dest_seq_id)); - GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id)); - } else { - // whole KV cache restore - - if (cell_count > size) { - LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__); - return false; - } - - clear(); - - for (uint32_t i = 0; i < cell_count; ++i) { - kv_cell & cell = cells[i]; - - llama_pos pos; - uint32_t n_seq_id; - - io.read_to(&pos, sizeof(pos)); - io.read_to(&n_seq_id, sizeof(n_seq_id)); - - cell.pos = pos; - - for (uint32_t j = 0; j < n_seq_id; ++j) { - llama_seq_id seq_id; - io.read_to(&seq_id, sizeof(seq_id)); - - // TODO: llama_kv_cache_unified should have a notion of max sequences - //if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) { - if (seq_id < 0) { - //LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx)); - LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, inf)\n", __func__, seq_id); - return false; - } - - cell.seq_id.insert(seq_id); - } - } - - head = 0; - used = cell_count; - } - - return true; -} - -bool llama_kv_cache_unified::state_read_data(llama_io_read_i & io, uint32_t cell_count) { - uint32_t v_trans; - uint32_t n_layer; - io.read_to(&v_trans, sizeof(v_trans)); - io.read_to(&n_layer, sizeof(n_layer)); - - if (n_layer != hparams.n_layer) { - LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer); - return false; - } - if (cell_count > size) { - LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size); - return false; - } - if (this->v_trans != (bool) v_trans) { - LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__); - return false; - } - - // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); - - // Read type of key - int32_t k_type_i_ref; - io.read_to(&k_type_i_ref, sizeof(k_type_i_ref)); - const int32_t k_type_i = (int32_t) k_l[il]->type; - if (k_type_i != k_type_i_ref) { - LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il); - return false; - } - - // Read row size of key - uint64_t k_size_row_ref; - io.read_to(&k_size_row_ref, sizeof(k_size_row_ref)); - const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa); - if (k_size_row != k_size_row_ref) { - LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il); - return false; - } - - if (cell_count) { - // Read and set the keys for the whole cell range - ggml_backend_tensor_set(k_l[il], io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row); - } - } - - if (!this->v_trans) { - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); - - // Read type of value - int32_t v_type_i_ref; - io.read_to(&v_type_i_ref, sizeof(v_type_i_ref)); - const int32_t v_type_i = (int32_t)v_l[il]->type; - if (v_type_i != v_type_i_ref) { - LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); - return false; - } - - // Read row size of value - uint64_t v_size_row_ref; - io.read_to(&v_size_row_ref, sizeof(v_size_row_ref)); - const size_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa); - if (v_size_row != v_size_row_ref) { - LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il); - return false; - } - - if (cell_count) { - // Read and set the values for the whole cell range - ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row); - } - } - } else { - // For each layer, read the values for each cell (transposed) - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); - - // Read type of value - int32_t v_type_i_ref; - io.read_to(&v_type_i_ref, sizeof(v_type_i_ref)); - const int32_t v_type_i = (int32_t)v_l[il]->type; - if (v_type_i != v_type_i_ref) { - LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); - return false; - } - - // Read element size of value - uint32_t v_size_el_ref; - io.read_to(&v_size_el_ref, sizeof(v_size_el_ref)); - const size_t v_size_el = ggml_type_size(v_l[il]->type); - if (v_size_el != v_size_el_ref) { - LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il); - return false; - } - - // Read GQA embedding size - uint32_t n_embd_v_gqa_ref; - io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref)); - if (n_embd_v_gqa != n_embd_v_gqa_ref) { - LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il); - return false; - } - - if (cell_count) { - // For each row in the transposed matrix, read the values for the whole cell range - for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { - const size_t dst_offset = (head + j * size) * v_size_el; - ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el); - } - } - } - } - - return true; -} - -// -// llama_kv_cache_recurrent -// - -llama_kv_cache_recurrent::llama_kv_cache_recurrent( - const llama_model & model, - ggml_type type_k, - ggml_type type_v, - bool offload, - uint32_t kv_size) : hparams(model.hparams) { - const int32_t n_layer = hparams.n_layer; - - LLAMA_LOG_INFO("%s: kv_size = %d, type_k = '%s', type_v = '%s', n_layer = %d\n", - __func__, kv_size, ggml_type_name(type_k), ggml_type_name(type_v), n_layer); - - head = 0; - size = kv_size; - used = 0; - - this->type_k = type_k; - this->type_v = type_v; - - cells.clear(); - cells.resize(kv_size); - - // create a context for each buffer type - std::map ctx_map; - auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { - auto it = ctx_map.find(buft); - if (it == ctx_map.end()) { - ggml_init_params params = { - /*.mem_size =*/ size_t(2u*n_layer*ggml_tensor_overhead()), - /*.mem_buffer =*/ NULL, - /*.no_alloc =*/ true, - }; - - ggml_context * ctx = ggml_init(params); - if (!ctx) { - return nullptr; - } - - ctx_map[buft] = ctx; - ctxs.emplace_back(ctx); - - return ctx; - } - - return it->second; - }; - - k_l.reserve(n_layer); - v_l.reserve(n_layer); - - for (int i = 0; i < n_layer; i++) { - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s(); - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s(); - - const char * dev_name = "CPU"; - - ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type(); - - if (offload) { - auto * dev = model.dev_layer(i); - buft = ggml_backend_dev_buffer_type(dev); - - dev_name = ggml_backend_dev_name(dev); - } - - LLAMA_LOG_DEBUG("%s, layer %3d: dev = %s\n", __func__, i, dev_name); - - ggml_context * ctx = ctx_for_buft(buft); - if (!ctx) { - throw std::runtime_error("failed to create ggml context for kv cache"); - } - - ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size); - ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size); - ggml_format_name(k, "cache_k_l%d", i); - ggml_format_name(v, "cache_v_l%d", i); - k_l.push_back(k); - v_l.push_back(v); - } - - // allocate tensors and initialize the buffers to avoid NaNs in the padding - for (auto it : ctx_map) { - auto * buft = it.first; - auto * ctx = it.second; - - ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); - if (!buf) { - throw std::runtime_error("failed to allocate buffer for kv cache"); - } - ggml_backend_buffer_clear(buf, 0); - LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0); - bufs.emplace_back(buf); - } - - { - const size_t memory_size_k = size_k_bytes(); - const size_t memory_size_v = size_v_bytes(); - - LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__, - (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f), - ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f), - ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f)); - } -} - -void llama_kv_cache_recurrent::clear() { - for (int32_t i = 0; i < (int32_t) size; ++i) { - cells[i].pos = -1; - cells[i].seq_id.clear(); - cells[i].src = -1; - cells[i].tail = -1; - } - head = 0; - used = 0; - - for (auto & buf : bufs) { - ggml_backend_buffer_clear(buf.get(), 0); - } -} - -bool llama_kv_cache_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) { - uint32_t new_head = size; - - if (p0 < 0) { - p0 = 0; - } - - if (p1 < 0) { - p1 = std::numeric_limits::max(); - } - - // models like Mamba or RWKV can't have a state partially erased - if (seq_id >= (int64_t) size) { - // could be fatal - return false; - } - if (0 <= seq_id) { - int32_t & tail_id = cells[seq_id].tail; - if (tail_id >= 0) { - const kv_cell & cell = cells[tail_id]; - // partial intersection is invalid - if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) { - return false; - } - // invalidate tails which will be cleared - if (p0 <= cell.pos && cell.pos < p1) { - tail_id = -1; - } - } - } else { - // seq_id is negative, then the range should include everything or nothing - if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits::max())) { - return false; - } - } - - for (uint32_t i = 0; i < size; ++i) { - if (cells[i].pos >= p0 && cells[i].pos < p1) { - if (seq_id < 0) { - cells[i].seq_id.clear(); - } else if (cells[i].has_seq_id(seq_id)) { - cells[i].seq_id.erase(seq_id); - } else { - continue; - } - if (cells[i].is_empty()) { - // keep count of the number of used cells - if (cells[i].pos >= 0) { - used--; - } - cells[i].pos = -1; - cells[i].src = -1; - if (new_head == size) { - new_head = i; - } - } - } - } - - // If we freed up a slot, set head to it so searching can start there. - if (new_head != size && new_head < head) { - head = new_head; - } - - return true; -} - -void llama_kv_cache_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) { - if (seq_id_src == seq_id_dst) { - return; - } - - if (p0 < 0) { - p0 = 0; - } - - if (p1 < 0) { - p1 = std::numeric_limits::max(); - } - - if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) { - kv_cell & tail_src = cells[seq_id_src]; - kv_cell & tail_dst = cells[seq_id_dst]; - if (tail_dst.tail >= 0) { - // clear destination seq_id if it wasn't empty - kv_cell & cell_dst = cells[tail_dst.tail]; - - cell_dst.seq_id.erase(seq_id_dst); - tail_dst.tail = -1; - if (cell_dst.seq_id.empty()) { - cell_dst.pos = -1; - cell_dst.src = -1; - used -= 1; - } - } - if (tail_src.tail >= 0) { - kv_cell & cell_src = cells[tail_src.tail]; - - cell_src.seq_id.insert(seq_id_dst); - tail_dst.tail = tail_src.tail; - } - } -} - -void llama_kv_cache_recurrent::seq_keep(llama_seq_id seq_id) { - uint32_t new_head = size; - - for (uint32_t i = 0; i < size; ++i) { - if ((llama_seq_id) i != seq_id) { - cells[i].tail = -1; - } - - if (!cells[i].has_seq_id(seq_id)) { - if (cells[i].pos >= 0) { - used--; - } - - cells[i].pos = -1; - cells[i].src = -1; - cells[i].seq_id.clear(); - - if (new_head == size){ - new_head = i; - } - } else { - cells[i].seq_id.clear(); - cells[i].seq_id.insert(seq_id); - } - } - - // If we freed up a slot, set head to it so searching can start there. - if (new_head != size && new_head < head) { - head = new_head; - } -} - -void llama_kv_cache_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) { - if (delta == 0) { - return; - } - - if (p0 < 0) { - p0 = 0; - } - - if (p1 < 0) { - p1 = std::numeric_limits::max(); - } - - // If there is no range then return early to avoid looping over the - if (p0 == p1) { - return; - } - - // for Mamba-like or RWKV models, only the pos needs to be shifted - if (0 <= seq_id && seq_id < (int64_t) size) { - const int32_t tail_id = cells[seq_id].tail; - if (tail_id >= 0) { - kv_cell & cell = cells[tail_id]; - if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { - cell.pos += delta; - } - } - } -} - -void llama_kv_cache_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) { - if (d == 1) { - return; - } - - if (p0 < 0) { - p0 = 0; - } - - if (p1 < 0) { - p1 = std::numeric_limits::max(); - } - - // If there is no range then return early to avoid looping over the cache. - if (p0 == p1) { - return; - } - - // for Mamba-like or RWKV models, only the pos needs to be changed - if (0 <= seq_id && seq_id < (int64_t) size) { - const int32_t tail_id = cells[seq_id].tail; - if (tail_id >= 0) { - kv_cell & cell = cells[tail_id]; - if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { - cell.pos /= d; - } - } - } -} - -llama_pos llama_kv_cache_recurrent::seq_pos_max(llama_seq_id seq_id) const { - llama_pos result = 0; - - for (uint32_t i = 0; i < size; ++i) { - if (cells[i].has_seq_id(seq_id)) { - result = std::max(result, cells[i].pos); - } - } - - return result; -} - -void llama_kv_cache_recurrent::restore() { - if (pending.ranges.empty()) { - return; - } - - seq_rm(-1, -1, -1); -} - -void llama_kv_cache_recurrent::commit() { - pending.ranges.clear(); -} - -bool llama_kv_cache_recurrent::update(llama_context & lctx) { - GGML_UNUSED(lctx); - return false; -} - -void llama_kv_cache_recurrent::defrag_sched(float thold) { - GGML_UNUSED(thold); - // noop -} - -void llama_kv_cache_recurrent::set_full() { - n = size; -} - -llama_sbatch llama_kv_cache_recurrent::sbatch_init( - const llama_batch & batch, - bool logits_all) { - return llama_sbatch(batch, hparams.n_embd, false, logits_all); -} - -llama_ubatch llama_kv_cache_recurrent::ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const { - if (embd_pooled) { - // Pooled embeddings cannot be split across ubatches (yet) - return sbatch.split_seq(n_ubatch); - } - - return sbatch.split_equal(n_ubatch); -} - -bool llama_kv_cache_recurrent::find_slot( - const llama_ubatch & ubatch) { - const uint32_t n_tokens = ubatch.n_tokens; - const uint32_t n_seqs = ubatch.n_seqs; - - const uint32_t n_seq_tokens = ubatch.n_seq_tokens; - - // if we have enough unused cells before the current head -> - // better to start searching from the beginning of the cache, hoping to fill it - if (head > used + 2*n_tokens) { - head = 0; - } - - // For recurrent state architectures (like Mamba or RWKV), - // each cache cell can store the state for a whole sequence. - // A slot should be always be contiguous. - - // can only process batches with an equal number of new tokens in each sequence - GGML_ASSERT(ubatch.equal_seqs); - - int32_t min = size - 1; - int32_t max = 0; - - // everything should fit if all seq_ids are smaller than the max - for (uint32_t s = 0; s < n_seqs; ++s) { - const uint32_t n_seq_id = ubatch.n_seq_id[s]; - for (uint32_t j = 0; j < n_seq_id; ++j) { - const llama_seq_id seq_id = ubatch.seq_id[s][j]; - - if (seq_id < 0 || (uint32_t) seq_id >= size) { - // too big seq_id - // TODO: would it be possible to resize the cache instead? - LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%d Try using a bigger --parallel value\n", __func__, seq_id, size); - return false; - } - if (j > 0) { - kv_cell & seq = cells[seq_id]; - if (seq.tail >= 0) { - kv_cell & cell = cells[seq.tail]; - // clear cells from seq_ids that become shared - // (should not normally happen, but let's handle it anyway) - cell.seq_id.erase(seq_id); - seq.tail = -1; - if (cell.seq_id.empty()) { - cell.pos = -1; - cell.src = -1; - used -= 1; - } - } - } - } - } - -#ifndef NDEBUG - { - std::vector tails_verif; - tails_verif.assign(size, -1); - for (uint32_t i = 0; i < size; ++i) { - kv_cell & cell = cells[i]; - for (llama_seq_id seq_id : cell.seq_id) { - if (tails_verif[seq_id] != -1) { - LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]); - } - tails_verif[seq_id] = i; - } - } - for (uint32_t i = 0; i < size; ++i) { - if (tails_verif[i] != cells[i].tail) { - LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cells[i].tail, tails_verif[i]); - } - } - } -#endif - - // find next empty cell - uint32_t next_empty_cell = head; - - for (uint32_t i = 0; i < size; ++i) { - if (next_empty_cell >= size) { next_empty_cell -= size; } - kv_cell & cell = cells[next_empty_cell]; - if (cell.is_empty()) { break; } - next_empty_cell += 1; - } - - // find usable cell range - for (uint32_t s = 0; s < n_seqs; ++s) { - const llama_seq_id seq_id = ubatch.seq_id[s][0]; - kv_cell & seq_meta = cells[seq_id]; - bool has_cell = false; - if (seq_meta.tail >= 0) { - kv_cell & cell = cells[seq_meta.tail]; - GGML_ASSERT(cell.has_seq_id(seq_id)); - // does this seq_id "own" the cell? - if (cell.seq_id.size() == 1) { has_cell = true; } - } - if (!has_cell) { - kv_cell & empty_cell = cells[next_empty_cell]; - GGML_ASSERT(empty_cell.is_empty()); - // copy old tail into the empty cell - if (seq_meta.tail >= 0) { - kv_cell & orig_cell = cells[seq_meta.tail]; - empty_cell.pos = orig_cell.pos; - empty_cell.src = orig_cell.src; - orig_cell.seq_id.erase(seq_id); - empty_cell.seq_id.insert(seq_id); // will be overwritten - } - seq_meta.tail = next_empty_cell; - // find next empty cell - if (s + 1 < n_seqs) { - next_empty_cell += 1; - for (uint32_t i = 0; i < size; ++i) { - if (next_empty_cell >= size) { next_empty_cell -= size; } - kv_cell & cell = cells[next_empty_cell]; - if (cell.is_empty()) { break; } - next_empty_cell += 1; - } - } - } - if (min > seq_meta.tail) { min = seq_meta.tail; } - if (max < seq_meta.tail) { max = seq_meta.tail; } - } - - // gather and re-order - for (uint32_t s = 0; s < n_seqs; ++s) { - int32_t dst_id = s + min; - int32_t src_id = cells[ubatch.seq_id[s][0]].tail; - if (dst_id != src_id) { - kv_cell & dst_cell = cells[dst_id]; - kv_cell & src_cell = cells[src_id]; - - std::swap(dst_cell.pos, src_cell.pos); - std::swap(dst_cell.src, src_cell.src); - std::swap(dst_cell.seq_id, src_cell.seq_id); - - // swap tails (assuming they NEVER overlap) - for (const llama_seq_id seq_id : src_cell.seq_id) { - cells[seq_id].tail = src_id; - } - for (const llama_seq_id seq_id : dst_cell.seq_id) { - cells[seq_id].tail = dst_id; - } - } - } - - // update the pos of the used seqs - for (uint32_t s = 0; s < n_seqs; ++s) { - const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1]; - int32_t cell_id = s + min; - kv_cell & cell = cells[cell_id]; - - if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) { - // What should happen when the pos backtracks or skips a value? - // Clearing the state mid-batch would require special-casing which isn't done. - LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n", - __func__, last_pos, cell.pos, ubatch.seq_id[s][0], n_seq_tokens); - } - cell.pos = last_pos; - cell.seq_id.clear(); - for (int32_t j = 0; j < ubatch.n_seq_id[s]; ++j) { - const llama_seq_id seq_id = ubatch.seq_id[s][j]; - cell.seq_id.insert(seq_id); - cells[seq_id].tail = cell_id; - } - } - - // allow getting the range of used cells, from head to head + n - head = min; - n = max - min + 1; - used = std::count_if(cells.begin(), cells.end(), - [](const kv_cell & cell){ return !cell.is_empty(); }); - - // sanity check - return n >= n_seqs; -} - -int32_t llama_kv_cache_recurrent::get_n_tokens() const { - int32_t result = 0; - - for (uint32_t i = 0; i < size; i++) { - result += cells[i].seq_id.size(); - } - - return result; -} - -int32_t llama_kv_cache_recurrent::get_used_cells() const { - return used; -} - -llama_pos llama_kv_cache_recurrent::get_pos_max() const { - llama_pos pos_max = -1; - for (const auto & cell : cells) { - pos_max = std::max(pos_max, cell.pos); - } - - return pos_max; -} - -bool llama_kv_cache_recurrent::get_can_shift() const { - return false; -} - -int32_t llama_kv_cache_recurrent::s_copy(int i) const { - const uint32_t cell_id = i + head; - - ////////////////////////////////////////////// - // TODO: this should not mutate the KV cache ! - kv_cell & cell = const_cast(cells[cell_id]); - - // prevent out-of-bound sources - if (cell.src < 0 || (uint32_t) cell.src >= size) { - cell.src = cell_id; - } - - int32_t res = cell.src; - - // TODO: do not mutate the KV cache - // ensure copy only happens once - if (cell.src != (int32_t) cell_id) { - cell.src = cell_id; - } - - return res; -} - -float llama_kv_cache_recurrent::s_mask(int i) const { - const uint32_t cell_id = i + head; - - ////////////////////////////////////////////// - // TODO: this should not mutate the KV cache ! - kv_cell & cell = const_cast(cells[cell_id]); - - float res = (float) (cell.src >= 0); - - // only clear once - if (cell.src < 0) { - cell.src = cell_id; - } - - return res; -} - -uint32_t llama_kv_cache_recurrent::cell_max() const { - for (uint32_t i = size; i > 0; --i) { - const kv_cell & cell = cells[i - 1]; - - if (cell.pos >= 0 && !cell.is_empty()) { - return i; - } - } - - return 0; -} - -size_t llama_kv_cache_recurrent::total_size() const { - size_t size = 0; - for (const auto & buf : bufs) { - size += ggml_backend_buffer_get_size(buf.get()); - } - - return size; -} - -size_t llama_kv_cache_recurrent::size_k_bytes() const { - size_t size_k_bytes = 0; - - for (const auto & k : k_l) { - size_k_bytes += ggml_nbytes(k); - } - - return size_k_bytes; -} - -size_t llama_kv_cache_recurrent::size_v_bytes() const { - size_t size_v_bytes = 0; - - for (const auto & v : v_l) { - size_v_bytes += ggml_nbytes(v); - } - - return size_v_bytes; -} - -void llama_kv_cache_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id) const { - std::vector> cell_ranges; // ranges, from inclusive, to exclusive - uint32_t cell_count = 0; - - // Count the number of cells with the specified seq_id - // Find all the ranges of cells with this seq id (or all, when -1) - uint32_t cell_range_begin = size; - for (uint32_t i = 0; i < size; ++i) { - const auto & cell = cells[i]; - if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) { - ++cell_count; - if (cell_range_begin == size) { - cell_range_begin = i; - } - } else { - if (cell_range_begin != size) { - cell_ranges.emplace_back(cell_range_begin, i); - cell_range_begin = size; - } - } - } - if (cell_range_begin != size) { - cell_ranges.emplace_back(cell_range_begin, size); - } - - // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count - uint32_t cell_count_check = 0; - for (const auto & range : cell_ranges) { - cell_count_check += range.second - range.first; - } - GGML_ASSERT(cell_count == cell_count_check); - - io.write(&cell_count, sizeof(cell_count)); - - state_write_meta(io, cell_ranges, seq_id); - state_write_data(io, cell_ranges); -} - -void llama_kv_cache_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id) { - uint32_t cell_count; - io.read_to(&cell_count, sizeof(cell_count)); - - bool res = true; - res = res && state_read_meta(io, cell_count, seq_id); - res = res && state_read_data(io, cell_count); - - if (!res) { - if (seq_id == -1) { - clear(); - } else { - seq_rm(seq_id, -1, -1); - } - throw std::runtime_error("failed to restore kv cache"); - } -} - -void llama_kv_cache_recurrent::state_write_meta(llama_io_write_i & io, const std::vector> & cell_ranges, llama_seq_id seq_id) const { - for (const auto & range : cell_ranges) { - for (uint32_t i = range.first; i < range.second; ++i) { - const auto & cell = cells[i]; - const llama_pos pos = cell.pos; - const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0; - - io.write(&pos, sizeof(pos)); - io.write(&n_seq_id, sizeof(n_seq_id)); - - if (n_seq_id) { - for (auto seq_id : cell.seq_id) { - io.write(&seq_id, sizeof(seq_id)); - } - } - } - } -} - -void llama_kv_cache_recurrent::state_write_data(llama_io_write_i & io, const std::vector> & cell_ranges) const { - const uint32_t v_trans = 0; - const uint32_t n_layer = hparams.n_layer; - - io.write(&v_trans, sizeof(v_trans)); - io.write(&n_layer, sizeof(n_layer)); - - std::vector tmp_buf; - - // Iterate and write all the keys first, each row is a cell - // Get whole range at a time - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); - - // Write key type - const int32_t k_type_i = (int32_t)k_l[il]->type; - io.write(&k_type_i, sizeof(k_type_i)); - - // Write row size of key - const uint64_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa); - io.write(&k_size_row, sizeof(k_size_row)); - - // Read each range of cells of k_size length each into tmp_buf and write out - for (const auto & range : cell_ranges) { - const size_t range_size = range.second - range.first; - const size_t buf_size = range_size * k_size_row; - io.write_tensor(k_l[il], range.first * k_size_row, buf_size); - } - } - - if (!v_trans) { - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); - - // Write value type - const int32_t v_type_i = (int32_t)v_l[il]->type; - io.write(&v_type_i, sizeof(v_type_i)); - - // Write row size of value - const uint64_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa); - io.write(&v_size_row, sizeof(v_size_row)); - - // Read each range of cells of v_size length each into tmp_buf and write out - for (const auto & range : cell_ranges) { - const size_t range_size = range.second - range.first; - const size_t buf_size = range_size * v_size_row; - io.write_tensor(v_l[il], range.first * v_size_row, buf_size); - } - } - } else { - // When v is transposed, we also need the element size and get the element ranges from each row - const uint32_t kv_size = size; - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); - - // Write value type - const int32_t v_type_i = (int32_t)v_l[il]->type; - io.write(&v_type_i, sizeof(v_type_i)); - - // Write element size - const uint32_t v_size_el = ggml_type_size(v_l[il]->type); - io.write(&v_size_el, sizeof(v_size_el)); - - // Write GQA embedding size - io.write(&n_embd_v_gqa, sizeof(n_embd_v_gqa)); - - // For each row, we get the element values of each cell - for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { - // Read each range of cells of v_size_el length each into tmp_buf and write out - for (const auto & range : cell_ranges) { - const size_t range_size = range.second - range.first; - const size_t src_offset = (range.first + j * kv_size) * v_size_el; - const size_t buf_size = range_size * v_size_el; - io.write_tensor(v_l[il], src_offset, buf_size); - } - } - } - } -} - -bool llama_kv_cache_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) { - if (dest_seq_id != -1) { - // single sequence - - seq_rm(dest_seq_id, -1, -1); - - llama_sbatch sbatch; - llama_ubatch batch = sbatch.reserve_ubatch(cell_count, /* has_embd */ false); - - batch.n_tokens = cell_count; - batch.n_seq_tokens = cell_count; - batch.n_seqs = 1; - - for (uint32_t i = 0; i < cell_count; ++i) { - llama_pos pos; - uint32_t n_seq_id; - - io.read_to(&pos, sizeof(pos)); - io.read_to(&n_seq_id, sizeof(n_seq_id)); - - if (n_seq_id != 0) { - LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__); - return false; - } - - batch.pos[i] = pos; - } - batch.n_seq_id[0] = 1; - batch.seq_id[0] = &dest_seq_id; - if (!find_slot(batch)) { - LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__); - return false; - } - commit(); - - // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values) - // Assume that this is one contiguous block of cells - GGML_ASSERT(head + cell_count <= size); - GGML_ASSERT(cells[head].pos == batch.pos[0]); - GGML_ASSERT(cells[head + cell_count - 1].pos == batch.pos[cell_count - 1]); - GGML_ASSERT(cells[head].has_seq_id(dest_seq_id)); - GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id)); - } else { - // whole KV cache restore - - if (cell_count > size) { - LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__); - return false; - } - - clear(); - - for (uint32_t i = 0; i < cell_count; ++i) { - kv_cell & cell = cells[i]; - - llama_pos pos; - uint32_t n_seq_id; - - io.read_to(&pos, sizeof(pos)); - io.read_to(&n_seq_id, sizeof(n_seq_id)); - - cell.pos = pos; - - for (uint32_t j = 0; j < n_seq_id; ++j) { - llama_seq_id seq_id; - io.read_to(&seq_id, sizeof(seq_id)); - - // TODO: llama_kv_cache_recurrent should have a notion of max sequences - //if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) { - if (seq_id < 0) { - //LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx)); - LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, inf)\n", __func__, seq_id); - return false; - } - - cell.seq_id.insert(seq_id); - - int32_t & tail = cells[seq_id].tail; - if (tail != -1) { - LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail); - return false; - } - tail = i; - } - } - - head = 0; - used = cell_count; - } - - for (uint32_t i = 0; i < cell_count; ++i) { - uint32_t cell_id = head + i; - // make sure the recurrent states will keep their restored state - cells[cell_id].src = cell_id; - } - - return true; -} - -bool llama_kv_cache_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) { - uint32_t v_trans; - uint32_t n_layer; - io.read_to(&v_trans, sizeof(v_trans)); - io.read_to(&n_layer, sizeof(n_layer)); - - if (n_layer != hparams.n_layer) { - LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer); - return false; - } - if (cell_count > size) { - LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size); - return false; - } - if (false != (bool) v_trans) { - LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__); - return false; - } - - // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); - - // Read type of key - int32_t k_type_i_ref; - io.read_to(&k_type_i_ref, sizeof(k_type_i_ref)); - const int32_t k_type_i = (int32_t) k_l[il]->type; - if (k_type_i != k_type_i_ref) { - LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il); - return false; - } - - // Read row size of key - uint64_t k_size_row_ref; - io.read_to(&k_size_row_ref, sizeof(k_size_row_ref)); - const size_t k_size_row = ggml_row_size(k_l[il]->type, n_embd_k_gqa); - if (k_size_row != k_size_row_ref) { - LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il); - return false; - } - - if (cell_count) { - // Read and set the keys for the whole cell range - ggml_backend_tensor_set(k_l[il], io.read(cell_count * k_size_row), head * k_size_row, cell_count * k_size_row); - } - } - - if (!v_trans) { - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); - - // Read type of value - int32_t v_type_i_ref; - io.read_to(&v_type_i_ref, sizeof(v_type_i_ref)); - const int32_t v_type_i = (int32_t)v_l[il]->type; - if (v_type_i != v_type_i_ref) { - LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); - return false; - } - - // Read row size of value - uint64_t v_size_row_ref; - io.read_to(&v_size_row_ref, sizeof(v_size_row_ref)); - const size_t v_size_row = ggml_row_size(v_l[il]->type, n_embd_v_gqa); - if (v_size_row != v_size_row_ref) { - LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il); - return false; - } - - if (cell_count) { - // Read and set the values for the whole cell range - ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_row), head * v_size_row, cell_count * v_size_row); - } - } - } else { - // For each layer, read the values for each cell (transposed) - for (uint32_t il = 0; il < n_layer; ++il) { - const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); - - // Read type of value - int32_t v_type_i_ref; - io.read_to(&v_type_i_ref, sizeof(v_type_i_ref)); - const int32_t v_type_i = (int32_t)v_l[il]->type; - if (v_type_i != v_type_i_ref) { - LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); - return false; - } - - // Read element size of value - uint32_t v_size_el_ref; - io.read_to(&v_size_el_ref, sizeof(v_size_el_ref)); - const size_t v_size_el = ggml_type_size(v_l[il]->type); - if (v_size_el != v_size_el_ref) { - LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il); - return false; - } - - // Read GQA embedding size - uint32_t n_embd_v_gqa_ref; - io.read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref)); - if (n_embd_v_gqa != n_embd_v_gqa_ref) { - LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il); - return false; - } - - if (cell_count) { - // For each row in the transposed matrix, read the values for the whole cell range - for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { - const size_t dst_offset = (head + j * size) * v_size_el; - ggml_backend_tensor_set(v_l[il], io.read(cell_count * v_size_el), dst_offset, cell_count * v_size_el); - } - } - } - } - - return true; -} - -// -// kv cache view -// - -llama_kv_cache_view llama_kv_cache_view_init(const llama_kv_cache & kv, int32_t n_seq_max) { - llama_kv_cache_view result = { - /*.n_cells = */ 0, - /*.n_seq_max = */ n_seq_max, - /*.token_count = */ 0, - /*.used_cells = */ kv.get_used_cells(), - /*.max_contiguous = */ 0, - /*.max_contiguous_idx = */ -1, - /*.cells = */ nullptr, - /*.cells_sequences = */ nullptr, - }; - - return result; -} - -void llama_kv_cache_view_free(llama_kv_cache_view * view) { - if (view->cells != nullptr) { - free(view->cells); - view->cells = nullptr; - } - if (view->cells_sequences != nullptr) { - free(view->cells_sequences); - view->cells_sequences = nullptr; - } -} - -void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache * kv) { - // TODO: rework this in the future, for now quick hack - const llama_kv_cache_unified * kvu = dynamic_cast(kv); - if (kvu == nullptr) { - LLAMA_LOG_ERROR("%s: the kv_cache_view currently works only with llama_kv_cache_unified\n", __func__); - return; - } - - if (uint32_t(view->n_cells) < kvu->size || view->cells == nullptr) { - view->n_cells = int32_t(kvu->size); - void * p = realloc(view->cells, sizeof(llama_kv_cache_view_cell) * view->n_cells); - GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells"); - view->cells = (llama_kv_cache_view_cell *)p; - p = realloc(view->cells_sequences, sizeof(llama_seq_id) * view->n_seq_max * view->n_cells); - GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells sequences"); - view->cells_sequences = (llama_seq_id *)p; - } - - const std::vector & kv_cells = kvu->cells; - llama_kv_cache_view_cell * c_curr = view->cells; - llama_seq_id * cs_curr = view->cells_sequences; - int32_t used_cells = 0; - int32_t token_count = 0; - int32_t curr_contig_idx = -1; - uint32_t max_contig = 0; - int32_t max_contig_idx = -1; - - for (int32_t i = 0; i < int32_t(kvu->size); i++, c_curr++, cs_curr += view->n_seq_max) { - const size_t curr_size = kv_cells[i].seq_id.size(); - token_count += curr_size; - c_curr->pos = kv_cells[i].pos + kv_cells[i].delta; - - if (curr_size > 0) { - if (curr_contig_idx >= 0 && uint32_t(i - curr_contig_idx) > max_contig) { - max_contig = i - curr_contig_idx; - max_contig_idx = curr_contig_idx; - } - curr_contig_idx = -1; - } else if (curr_contig_idx < 0) { - curr_contig_idx = i; - } - - int seq_idx = 0; - for (const llama_seq_id it : kv_cells[i].seq_id) { - if (seq_idx >= view->n_seq_max) { - break; - } - cs_curr[seq_idx] = it; - seq_idx++; - } - if (seq_idx != 0) { - used_cells++; - } - for (; seq_idx < view->n_seq_max; seq_idx++) { - cs_curr[seq_idx] = -1; - } - } - if (curr_contig_idx >= 0 && kv_cells.size() - curr_contig_idx > max_contig) { - max_contig_idx = curr_contig_idx; - max_contig = kv_cells.size() - curr_contig_idx; - } - view->max_contiguous = max_contig; - view->max_contiguous_idx = max_contig_idx; - view->token_count = token_count; - view->used_cells = used_cells; - if (uint32_t(used_cells) != kvu->used) { - LLAMA_LOG_ERROR("%s: used cells mismatch. kv_cache says %d but we calculated %d\n", - __func__, kvu->used, used_cells); - } -} diff --git a/llama/llama.cpp/src/llama-kv-cache.h b/llama/llama.cpp/src/llama-kv-cache.h deleted file mode 100644 index 928b97125..000000000 --- a/llama/llama.cpp/src/llama-kv-cache.h +++ /dev/null @@ -1,413 +0,0 @@ -#pragma once - -#include "llama.h" -#include "llama-io.h" -#include "llama-graph.h" -#include "llama-memory.h" - -#include "ggml-cpp.h" - -#include -#include - -struct llama_cparams; -struct llama_hparams; -struct llama_ubatch; -struct llama_sbatch; -struct llama_model; -struct llama_context; - -struct llama_kv_cache : public llama_memory_i { - virtual ~llama_kv_cache() = default; - - // call if batch processing fails - restores the cache state - virtual void restore() = 0; - - // call after successful batch processing - clears any pending state - virtual void commit() = 0; - - // process any pending defrag/shift/etc. operations - // optionally call once before processing a new batch - virtual bool update(llama_context & lctx) = 0; - - // schedule a defrag if the fragmentation threshold is exceeded. otherwise, do nothing - virtual void defrag_sched(float thold) = 0; - - // simulate full cache, used for allocating worst-case compute buffers - virtual void set_full() = 0; - - // - // batch processing - // - - virtual llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) = 0; - - // different KV caches require different batch splitting strategies - virtual llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const = 0; - - // find an empty slot of size "n_tokens" in the cache - virtual bool find_slot(const llama_ubatch & batch) = 0; - - // getters - virtual int32_t get_n_tokens() const = 0; - virtual int32_t get_used_cells() const = 0; // TODO: remove, this is too-specific to the unified cache - virtual llama_pos get_pos_max() const = 0; - virtual bool get_can_shift() const = 0; - - bool get_can_edit() const override { return get_can_shift(); } - - // - // state write/read - // - - virtual void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const = 0; - virtual void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) = 0; -}; - -// -// llama_kv_cache_guard -// - -struct llama_kv_cache_guard { - llama_kv_cache_guard(llama_kv_cache * kv) : kv(kv) {} - - ~llama_kv_cache_guard() { - kv->restore(); - } - - void commit() { - kv->commit(); - } - -private: - llama_kv_cache * kv; -}; - -// block of KV slots to move when defragging -struct llama_kv_defrag_move { - uint32_t src; - uint32_t dst; - uint32_t len; -}; - -// -// llama_kv_cache_unified -// - -// TODO: add notion of max sequences -class llama_kv_cache_unified : public llama_kv_cache { -public: - struct kv_cell { - llama_pos pos = -1; - llama_pos delta = 0; - - std::set seq_id; - - bool has_seq_id(const llama_seq_id & id) const { - return seq_id.find(id) != seq_id.end(); - } - - bool is_empty() const { - return seq_id.empty(); - } - - bool is_same_seq(const kv_cell & other) const { - return seq_id == other.seq_id; - } - }; - - static uint32_t get_padding(const llama_cparams & cparams); - - llama_kv_cache_unified( - const llama_model & model, - ggml_type type_k, - ggml_type type_v, - bool v_trans, - bool offload, - uint32_t kv_size, - uint32_t padding); - - ~llama_kv_cache_unified() = default; - - // - // llama_memory_i - // - - void clear() override; - - bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override; - void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override; - void seq_keep(llama_seq_id seq_id) override; - void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override; - void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override; - - llama_pos seq_pos_max(llama_seq_id seq_id) const override; - - // - // llama_kv_cache - // - - void restore() override; - void commit() override; - - bool update(llama_context & ctx) override; - - void defrag_sched(float thold) override; - - void set_full() override; - - llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override; - - llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override; - - // updates the cache head - // Note: On success, it's important that cache.head points - // to the first cell of the slot. - bool find_slot(const llama_ubatch & batch) override; - - int32_t get_n_tokens() const override; - int32_t get_used_cells() const override; - - // TODO: better data structures to reduce the cost of this operation - llama_pos get_pos_max() const override; - - bool get_can_shift() const override; - - // state write/load - - void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override; - void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override; - - // Note: The value of head isn't only used to optimize searching - // for a free KV slot. llama_decode_impl also uses it, so it - // cannot be freely changed after a slot has been allocated. - uint32_t head = 0; - uint32_t size = 0; - uint32_t used = 0; // used cells (i.e. at least one seq_id) - - // computed before each graph build - uint32_t n = 0; - - std::vector cells; - - std::vector k_l; // per layer - std::vector v_l; - -private: - const llama_model & model; - const llama_hparams & hparams; - - bool has_shift = false; - bool do_defrag = false; - - bool v_trans = true; // the value tensor is transposed - bool can_shift = false; - - // required padding - uint32_t padding = 1; - - ggml_type type_k = GGML_TYPE_F16; - ggml_type type_v = GGML_TYPE_F16; - - std::vector ctxs; - std::vector bufs; - - // defrag - struct { - std::vector moves; - } defrag_info; - - // return true if cells have been moved - bool defrag_prepare(int32_t n_max_nodes); - - // commit/restore cache - struct slot_range { - uint32_t c0 = 0; // note: these are cell indices, not sequence positions - uint32_t c1 = 0; - }; - - // pending cell updates that are not yet committed - struct { - std::vector ranges; - } pending; - - // find how many cells are currently in use - uint32_t cell_max() const; - - size_t total_size() const; - - size_t size_k_bytes() const; - size_t size_v_bytes() const; - - ggml_tensor * build_rope_shift( - const llama_cparams & cparams, - ggml_context * ctx, - ggml_tensor * cur, - ggml_tensor * shift, - ggml_tensor * factors, - float freq_base, - float freq_scale) const; - - llm_graph_result_ptr build_graph_shift( - const llama_cparams & cparams, - ggml_context * ctx, - ggml_cgraph * gf) const; - - llm_graph_result_ptr build_graph_defrag( - const llama_cparams & cparams, - ggml_context * ctx, - ggml_cgraph * gf, - const std::vector & moves) const; - - void state_write_meta(llama_io_write_i & io, const std::vector> & cell_ranges, llama_seq_id seq_id = -1) const; - void state_write_data(llama_io_write_i & io, const std::vector> & cell_ranges) const; - - bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1); - bool state_read_data(llama_io_read_i & io, uint32_t cell_count); -}; - -// -// llama_kv_cache_recurrent -// - -class llama_kv_cache_recurrent : public llama_kv_cache { -public: - struct kv_cell { - llama_pos pos = -1; - int32_t src = -1; // used to copy states - int32_t tail = -1; - - std::set seq_id; - - bool has_seq_id(const llama_seq_id & id) const { - return seq_id.find(id) != seq_id.end(); - } - - bool is_empty() const { - return seq_id.empty(); - } - - bool is_same_seq(const kv_cell & other) const { - return seq_id == other.seq_id; - } - }; - - llama_kv_cache_recurrent( - const llama_model & model, - ggml_type type_k, - ggml_type type_v, - bool offload, - uint32_t kv_size); - - ~llama_kv_cache_recurrent() = default; - - // - // llama_memory_i - // - - void clear() override; - - bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override; - void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override; - void seq_keep(llama_seq_id seq_id) override; - void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) override; - void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override; - - llama_pos seq_pos_max(llama_seq_id seq_id) const override; - - // - // llama_kv_cache - // - - void restore() override; - void commit() override; - - bool update(llama_context & lctx) override; - - void defrag_sched(float thold) override; - - void set_full() override; - - llama_sbatch sbatch_init(const llama_batch & batch, bool logits_all) override; - - llama_ubatch ubatch_next(llama_sbatch & sbatch, uint32_t n_ubatch, bool embd_pooled) const override; - - bool find_slot(const llama_ubatch & batch) override; - - int32_t get_n_tokens() const override; - int32_t get_used_cells() const override; - - // TODO: better data structures to reduce the cost of this operation - llama_pos get_pos_max() const override; - - bool get_can_shift() const override; - - // TODO: temporary methods - they are not really const as they do const_cast<>, fix this - int32_t s_copy(int i) const; - float s_mask(int i) const; - - // state write/load - - void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override; - void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override; - - // Note: The value of head isn't only used to optimize searching - // for a free KV slot. llama_decode_impl also uses it, so it - // cannot be freely changed after a slot has been allocated. - uint32_t head = 0; - uint32_t size = 0; - uint32_t used = 0; // used cells (i.e. at least one seq_id) - - // computed before each graph build - uint32_t n = 0; - - std::vector cells; - - std::vector k_l; // per layer - std::vector v_l; - -private: - //const llama_model & model; - const llama_hparams & hparams; - - // commit/restore cache - // TODO: rework for recurrent cache - struct slot_range { - uint32_t c0 = 0; // note: these are cell indices, not sequence positions - uint32_t c1 = 0; - }; - - // pending cell updates that are not yet committed - struct { - std::vector ranges; - } pending; - - ggml_type type_k = GGML_TYPE_F16; - ggml_type type_v = GGML_TYPE_F16; - - std::vector ctxs; - std::vector bufs; - - // find how many cells are currently in use - uint32_t cell_max() const; - - size_t total_size() const; - - size_t size_k_bytes() const; - size_t size_v_bytes() const; - - void state_write_meta(llama_io_write_i & io, const std::vector> & cell_ranges, llama_seq_id seq_id = -1) const; - void state_write_data(llama_io_write_i & io, const std::vector> & cell_ranges) const; - - bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1); - bool state_read_data(llama_io_read_i & io, uint32_t cell_count); -}; - - -// -// kv cache view -// - -llama_kv_cache_view llama_kv_cache_view_init(const llama_kv_cache & kv, int32_t n_seq_max); - -void llama_kv_cache_view_update(llama_kv_cache_view * view, const llama_kv_cache * kv); diff --git a/llama/llama.cpp/src/llama-kv-cells.h b/llama/llama.cpp/src/llama-kv-cells.h new file mode 100644 index 000000000..0d0dd316f --- /dev/null +++ b/llama/llama.cpp/src/llama-kv-cells.h @@ -0,0 +1,491 @@ +#pragma once + +#include "llama.h" +#include "llama-cparams.h" + +#include +#include +#include +#include +#include + +// meta information about KV cells that can be part of multiple sequences at the same time +// TODO: add unit tests +class llama_kv_cells_unified { +public: + void reset() { + for (uint32_t i = 0; i < pos.size(); ++i) { + pos[i] = -1; + shift[i] = 0; + seq[i].reset(); + } + + has_shift = false; + + used.clear(); + + for (uint32_t s = 0; s < LLAMA_MAX_SEQ; ++s) { + seq_pos[s].clear(); + } + } + + void reset_shift() { + has_shift = false; + + for (uint32_t i = 0; i < shift.size(); ++i) { + shift[i] = 0; + } + } + + uint32_t size() const { + return pos.size(); + } + + void resize(uint32_t n) { + pos.resize(n); + shift.resize(n); + seq.resize(n); + + reset(); + } + + bool is_empty(uint32_t i) const { + assert(i < pos.size()); + assert((pos[i] < 0 && pos[i] == -1) || pos[i] >= 0); + + return pos[i] == -1; + } + + uint32_t get_used() const { + return used.size(); + } + + // the index of the first cell that is used + // return 0 if no cells are used + uint32_t used_min() const { + return used.empty() ? 0 : *used.begin(); + } + + // the index of the last cell that is used + 1 + // return 0 if no cells are used + uint32_t used_max_p1() const { + return used.empty() ? 0 : *used.rbegin() + 1; + } + + bool get_has_shift() const { + return has_shift; + } + + // move cell isrc to idst (used during defrag) + void mv(uint32_t isrc, uint32_t idst) { + assert(isrc < pos.size()); + assert(idst < pos.size()); + + assert(pos[idst] == -1); + assert(pos[isrc] != -1); + + pos [idst] = pos [isrc]; + shift[idst] = shift[isrc]; + seq [idst] = seq [isrc]; + + pos [isrc] = -1; + shift[isrc] = 0; + seq [isrc].reset(); + + used.erase (isrc); + used.insert(idst); + } + + // copy the state of cells [i, i + n) (used for save/restore the state of the cells) + llama_kv_cells_unified cp(uint32_t i, uint32_t n) const { + assert(i + n <= pos.size()); + + llama_kv_cells_unified res; + + res.resize(n); + + for (uint32_t j = 0; j < n; ++j) { + const auto idx = i + j; + + res.pos[j] = pos[idx]; + res.seq[j] = seq[idx]; + + assert(shift[idx] == 0); + } + + return res; + } + + // copy the state of cells [idxs[0], idxs[1], ..., idxs[idxs.size() - 1]) + llama_kv_cells_unified cp(const std::vector & idxs) const { + llama_kv_cells_unified res; + + res.resize(idxs.size()); + + for (uint32_t j = 0; j < idxs.size(); ++j) { + const auto idx = idxs[j]; + + res.pos[j] = pos[idx]; + res.seq[j] = seq[idx]; + + assert(shift[idx] == 0); + } + + return res; + } + + // set the state of cells [i, i + other.pos.size()) (used for save/restore the state of the cells) + void set(uint32_t i, const llama_kv_cells_unified & other) { + assert(i + other.pos.size() <= pos.size()); + + for (uint32_t j = 0; j < other.pos.size(); ++j) { + const auto idx = i + j; + + if (pos[idx] == -1 && other.pos[j] != -1) { + used.insert(i + j); + } + + if (pos[idx] != -1 && other.pos[j] == -1) { + used.erase(i + j); + } + + if (pos[idx] != -1) { + seq_pos_rm(i + j); + } + + pos[idx] = other.pos[j]; + seq[idx] = other.seq[j]; + + if (pos[idx] != -1) { + seq_pos_add(i + j); + } + + assert(shift[idx] == 0); + } + } + + // set the state of cells [idxs[0], idxs[1], ..., idxs[idxs.size() - 1]) + void set(const std::vector & idxs, const llama_kv_cells_unified & other) { + assert(idxs.size() == other.pos.size()); + + for (uint32_t j = 0; j < other.pos.size(); ++j) { + const auto idx = idxs[j]; + + if (pos[idx] == -1 && other.pos[j] != -1) { + used.insert(idx); + } + + if (pos[idx] != -1 && other.pos[j] == -1) { + used.erase(idx); + } + + if (pos[idx] != -1) { + seq_pos_rm(idx); + } + + pos[idx] = other.pos[j]; + seq[idx] = other.seq[j]; + + if (pos[idx] != -1) { + seq_pos_add(idx); + } + + assert(shift[idx] == 0); + } + } + + // clear a non-empty cell + void rm(uint32_t i) { + assert(i < pos.size()); + assert(pos[i] != -1); + + seq_pos_rm(i); + seq[i].reset(); + + pos[i] = -1; + shift[i] = 0; + + used.erase(i); + } + + // note: call only if the cell has seq_id + // return true if the cell becomes empty + bool seq_rm(uint32_t i, llama_seq_id seq_id) { + assert(i < pos.size()); + assert(seq[i].test(seq_id)); + assert(pos[i] != -1); + assert(seq_id >= 0); + + seq[i].reset(seq_id); + seq_pos_dec(seq_id, pos[i]); + + if (seq[i].none()) { + pos[i] = -1; + shift[i] = 0; + + used.erase(i); + + return true; + } + + return false; + } + + // return true if the cell becomes empty (i.e. it did not contain seq_id before the call) + bool seq_keep(uint32_t i, llama_seq_id seq_id) { + assert(i < pos.size()); + + if (seq[i].test(seq_id)) { + seq_pos_rm(i); + seq[i].reset(); + + seq[i].set(seq_id); + seq_pos_inc(seq_id, pos[i]); + + return false; + } + + if (seq[i].any()) { + seq_pos_rm(i); + seq[i].reset(); + + pos[i] = -1; + shift[i] = 0; + + used.erase(i); + + return true; + } + + assert(pos[i] == -1); + + return false; + } + + // number of different sequences in the cell + int seq_count(uint32_t i) const { + assert(i < pos.size()); + assert(pos[i] != -1); + + return seq[i].count(); + } + + // check if the cell contains seq_id + bool seq_has(uint32_t i, llama_seq_id seq_id) const { + assert(i < pos.size()); + assert(seq_id >= 0); + + return seq[i].test(seq_id); + } + + // note: call only if the cell is not empty and the seq_id is not in the cell + void seq_add(uint32_t i, llama_seq_id seq_id) { + assert(i < pos.size()); + assert(pos[i] != -1); + assert(!seq[i].test(seq_id)); + + seq[i].set(seq_id); + seq_pos_inc(seq_id, pos[i]); + } + + // return the sequence id of this cell + // note: call only for cells with exactly one sequence + llama_seq_id seq_get(uint32_t i) const { + assert(seq[i].count() == 1); + + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { + if (seq[i].test(s)) { + return s; + } + } + + return -1; + } + + // the minimum position of sequence seq_id currently present in any of the cells + // return -1 if the sequence is not present + llama_pos seq_pos_min(llama_seq_id seq_id) const { + assert(seq_id >= 0); + assert(seq_id < LLAMA_MAX_SEQ); + + if (seq_pos[seq_id].empty()) { + return -1; + } + + assert(seq_pos[seq_id].begin()->second > 0); + + return seq_pos[seq_id].begin()->first; + } + + // the maximum position of sequence seq_id currently present in any of the cells + // return -1 if the sequence is not present + llama_pos seq_pos_max(llama_seq_id seq_id) const { + assert(seq_id >= 0); + assert(seq_id < LLAMA_MAX_SEQ); + + if (seq_pos[seq_id].empty()) { + return -1; + } + + assert(seq_pos[seq_id].rbegin()->second > 0); + + return seq_pos[seq_id].rbegin()->first; + } + + // note: call only if the cell is not empty + llama_pos pos_get(uint32_t i) const { + assert(i < pos.size()); + assert(pos[i] != -1); + + return pos[i]; + } + + // note: call only if the cell is not empty + llama_pos get_shift(uint32_t i) const { + assert(i < pos.size()); + assert(pos[i] != -1); + + return shift[i]; + } + + // check if a cell is not empty and its position is within [p0, p1) + bool pos_in(uint32_t i, llama_pos p0, llama_pos p1) const { + assert(i < pos.size()); + + return pos[i] >= p0 && pos[i] < p1; + } + + // set the position of an empty cell + // does not modify "has_shift" + // note: call only if the cell is empty + void pos_set(uint32_t i, llama_pos p) { + assert(i < pos.size()); + assert(pos[i] == -1); + assert(seq[i].none()); + + pos[i] = p; + + used.insert(i); + } + + // pos[i] = pos[i] + d + // sets "has_shift" to true + // note: call only if the cell is not empty + bool pos_add(uint32_t i, llama_pos d) { + assert(i < pos.size()); + assert(pos[i] != -1); + + seq_pos_rm(i); + + pos[i] += d; + shift[i] += d; + + has_shift = true; + + if (pos[i] < 0) { + seq[i].reset(); + pos[i] = -1; + shift[i] = 0; + + used.erase(i); + + return true; + } + + seq_pos_add(i); + + return false; + } + + // pos[i] = pos[i] / d + // sets "has_shift" to true + // note: call only if the cell is not empty + void pos_div(uint32_t i, int d) { + assert(i < pos.size()); + assert(pos[i] != -1); + + const llama_pos p_old = pos[i]; + + seq_pos_rm(i); + + pos[i] /= d; + shift[i] += p_old - pos[i]; + + seq_pos_add(i); + + has_shift = true; + } + +private: + bool has_shift = false; + + // set of indices of used cells (i.e. pos[i] != -1, allowed to not have any seq_id) + std::set used; + + std::vector pos; + + // this array accumulates any applied shifts to the pos array since the last reset_shift() call + // this is used to queue multiple updates to the pos array, which in the end can be applied in one go: + // + // cells.pos_add(x, shift_x); + // cells.pos_div(y, shift_y); + // ... + // + // if (cells.has_shift()) { + // for (int i = 0; i < n; ++i) { + // auto shift_i = cells.get_shift(i); + // ... + // } + // cells.reset_shift(); + // } + // + std::vector shift; + + using seq_set_t = std::bitset; + + // the bitset seq[i] tells us which sequences are currently occupying the i-th cell + std::vector seq; + + // the set seq_pos[s][p] tells us how many times the position p is currently present for sequence s + // if the position p is not present, seq_pos[s][p] is not set + // this way seq_pos[s].begin() and seq_pos[s].rbegin() give us the min/max positions currently in the cache + // + // note that we cannot a use an std::set because in some cases a position can occur more than once for the same seq: + // - during performing a cache reuse via (rm + add) + // - some vision models have input embeddings with repeating positions + // + std::map seq_pos[LLAMA_MAX_SEQ]; + + // helper functions for updating `seq_pos`, once cell at a time: + + void seq_pos_dec(llama_seq_id s, llama_pos p) { + auto it = seq_pos[s].find(p); + assert(it != seq_pos[s].end()); + + if (--it->second == 0) { + seq_pos[s].erase(it); + } + } + + void seq_pos_inc(llama_seq_id s, llama_pos p) { + seq_pos[s][p]++; + } + + // remove cell i + void seq_pos_rm(uint32_t i) { + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { + if (seq[i].test(s)) { + seq_pos_dec(s, pos[i]); + } + } + } + + // add cell i + void seq_pos_add(uint32_t i) { + for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { + if (seq[i].test(s)) { + seq_pos_inc(s, pos[i]); + } + } + } +}; diff --git a/llama/llama.cpp/src/llama-memory-hybrid.cpp b/llama/llama.cpp/src/llama-memory-hybrid.cpp new file mode 100644 index 000000000..e98b4e354 --- /dev/null +++ b/llama/llama.cpp/src/llama-memory-hybrid.cpp @@ -0,0 +1,253 @@ +#include "llama-memory-hybrid.h" + +#include "llama-impl.h" +#include "llama-model.h" +#include "llama-context.h" + +// +// llama_memory_hybrid +// + +llama_memory_hybrid::llama_memory_hybrid( + const llama_model & model, + /* attn */ + ggml_type type_k, + ggml_type type_v, + bool v_trans, + uint32_t kv_size, + uint32_t n_pad, + uint32_t n_swa, + llama_swa_type swa_type, + /* recurrent */ + ggml_type type_r, + ggml_type type_s, + uint32_t rs_size, + /* common */ + uint32_t n_seq_max, + bool offload, + bool unified, + /* layer filters */ + layer_filter_cb && filter_attn, + layer_filter_cb && filter_recr) : + hparams(model.hparams), + mem_attn(new llama_kv_cache_unified( + model, + filter_attn == nullptr ? + [&](int32_t il) { return !hparams.is_recurrent(il); } + : filter_attn, + type_k, + type_v, + v_trans, + offload, + unified, + kv_size, + n_seq_max, + n_pad, + n_swa, + swa_type + )), + mem_recr(new llama_memory_recurrent( + model, + filter_recr == nullptr ? + [&](int32_t il) { return hparams.is_recurrent(il); } + : filter_recr, + type_r, + type_s, + offload, + rs_size, + n_seq_max + )) {} + +llama_memory_context_ptr llama_memory_hybrid::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { + do { + balloc.split_reset(); + + // follow the recurrent pattern for creating the ubatch splits + std::vector ubatches; + + while (true) { + llama_ubatch ubatch; + + if (embd_all) { + // if all tokens are output, split by sequence + ubatch = balloc.split_seq(n_ubatch); + } else { + ubatch = balloc.split_equal(n_ubatch, false); + } + + if (ubatch.n_tokens == 0) { + break; + } + + ubatches.push_back(std::move(ubatch)); // NOLINT + } + + if (balloc.get_n_used() < balloc.get_n_tokens()) { + // failed to find a suitable split + break; + } + + // prepare the recurrent batches first + if (!mem_recr->prepare(ubatches)) { + // TODO: will the recurrent cache be in an undefined context at this point? + LLAMA_LOG_ERROR("%s: failed to prepare recurrent ubatches\n", __func__); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + } + + // prepare the attention cache + auto heads_attn = mem_attn->prepare(ubatches); + if (heads_attn.empty()) { + LLAMA_LOG_ERROR("%s: failed to prepare attention ubatches\n", __func__); + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); + } + + return std::make_unique( + this, std::move(heads_attn), std::move(ubatches)); + } while(false); + + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); +} + +llama_memory_context_ptr llama_memory_hybrid::init_full() { + return std::make_unique(this); +} + +llama_memory_context_ptr llama_memory_hybrid::init_update(llama_context * lctx, bool optimize) { + return std::make_unique(this, lctx, optimize); +} + +bool llama_memory_hybrid::get_can_shift() const { + // Shifting is trivially supported for recurrent + return mem_attn->get_can_shift(); +} + +void llama_memory_hybrid::clear(bool data) { + mem_attn->clear(data); + mem_recr->clear(data); +} + +bool llama_memory_hybrid::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) { + // Try removing from the recurrent cache first since it may fail. If it does + // fail, the cache will not have been mutated. + if (!mem_recr->seq_rm(seq_id, p0, p1)) { + return false; + } + return mem_attn->seq_rm(seq_id, p0, p1); +} + +void llama_memory_hybrid::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) { + mem_attn->seq_cp(seq_id_src, seq_id_dst, p0, p1); + mem_recr->seq_cp(seq_id_src, seq_id_dst, p0, p1); +} + +void llama_memory_hybrid::seq_keep(llama_seq_id seq_id) { + mem_attn->seq_keep(seq_id); + mem_recr->seq_keep(seq_id); +} + +void llama_memory_hybrid::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) { + mem_attn->seq_add(seq_id, p0, p1, shift); + mem_recr->seq_add(seq_id, p0, p1, shift); +} + +void llama_memory_hybrid::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) { + mem_attn->seq_div(seq_id, p0, p1, d); + mem_recr->seq_div(seq_id, p0, p1, d); +} + +llama_pos llama_memory_hybrid::seq_pos_min(llama_seq_id seq_id) const { + // the min of the total cache is the max of the two caches' min values + return std::max(mem_attn->seq_pos_min(seq_id), mem_recr->seq_pos_min(seq_id)); +} + +llama_pos llama_memory_hybrid::seq_pos_max(llama_seq_id seq_id) const { + // the max of the total cache is the min of the two caches' max values + return std::min(mem_attn->seq_pos_max(seq_id), mem_recr->seq_pos_max(seq_id)); +} + +void llama_memory_hybrid::state_write(llama_io_write_i & io, llama_seq_id seq_id) const { + mem_attn->state_write(io, seq_id); + mem_recr->state_write(io, seq_id); +} + +void llama_memory_hybrid::state_read(llama_io_read_i & io, llama_seq_id seq_id) { + mem_attn->state_read(io, seq_id); + mem_recr->state_read(io, seq_id); +} + +llama_kv_cache_unified * llama_memory_hybrid::get_mem_attn() const { + return mem_attn.get(); +} + +llama_memory_recurrent * llama_memory_hybrid::get_mem_recr() const { + return mem_recr.get(); +} + +llama_memory_hybrid_context::llama_memory_hybrid_context(llama_memory_status status) : status(status) {} + +llama_memory_hybrid_context::llama_memory_hybrid_context(llama_memory_hybrid * mem) : + ctx_attn(mem->get_mem_attn()->init_full()), + ctx_recr(mem->get_mem_recr()->init_full()), + status(llama_memory_status_combine(ctx_attn->get_status(), ctx_recr->get_status())) { +} + +llama_memory_hybrid_context::llama_memory_hybrid_context( + llama_memory_hybrid * mem, + llama_context * lctx, + bool optimize) : + ctx_attn(mem->get_mem_attn()->init_update(lctx, optimize)), + ctx_recr(mem->get_mem_recr()->init_update(lctx, optimize)), + status(llama_memory_status_combine(ctx_attn->get_status(), ctx_recr->get_status())) { +} + +llama_memory_hybrid_context::llama_memory_hybrid_context( + llama_memory_hybrid * mem, + slot_info_vec_t sinfos_attn, + std::vector ubatches) : + ubatches(std::move(ubatches)), + // note: here we copy the ubatches. not sure if this is ideal + ctx_attn(new llama_kv_cache_unified_context(mem->get_mem_attn(), std::move(sinfos_attn), this->ubatches)), + ctx_recr(new llama_memory_recurrent_context(mem->get_mem_recr(), this->ubatches)), + status(llama_memory_status_combine(ctx_attn->get_status(), ctx_recr->get_status())) { +} + +bool llama_memory_hybrid_context::next() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + ctx_attn->next(); + ctx_recr->next(); + + if (++i_next >= ubatches.size()) { + return false; + } + + return true; +} + +bool llama_memory_hybrid_context::apply() { + assert(!llama_memory_status_is_fail(status)); + + bool res = true; + + res = res & ctx_attn->apply(); + res = res & ctx_recr->apply(); + + return res; +} + +llama_memory_status llama_memory_hybrid_context::get_status() const { + return status; +} + +const llama_ubatch & llama_memory_hybrid_context::get_ubatch() const { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + return ubatches[i_next]; +} + +const llama_kv_cache_unified_context * llama_memory_hybrid_context::get_attn() const { + return static_cast(ctx_attn.get()); +} + +const llama_memory_recurrent_context * llama_memory_hybrid_context::get_recr() const { + return static_cast(ctx_recr.get()); +} diff --git a/llama/llama.cpp/src/llama-memory-hybrid.h b/llama/llama.cpp/src/llama-memory-hybrid.h new file mode 100644 index 000000000..c2d56cd54 --- /dev/null +++ b/llama/llama.cpp/src/llama-memory-hybrid.h @@ -0,0 +1,141 @@ +#pragma once + +#include "llama-batch.h" +#include "llama-graph.h" +#include "llama-kv-cache-unified.h" +#include "llama-memory.h" +#include "llama-memory-recurrent.h" + +#include +#include + +// +// llama_memory_hybrid +// + +// utilizes instances of llama_memory_recurrent and llama_kv_cache_unified to +// support models where each layer may be either attention-based or recurrent + +class llama_memory_hybrid : public llama_memory_i { +public: + + // this callback is used to filter out layers that should not be included in the cache + using layer_filter_cb = std::function; + + llama_memory_hybrid( + const llama_model & model, + /* attn */ + ggml_type type_k, + ggml_type type_v, + bool v_trans, + uint32_t kv_size, + uint32_t n_pad, + uint32_t n_swa, + llama_swa_type swa_type, + /* recurrent */ + ggml_type type_r, + ggml_type type_s, + uint32_t rs_size, + /* common */ + uint32_t n_seq_max, + bool offload, + bool unified, + /* layer filters */ + layer_filter_cb && filter_attn = nullptr, + layer_filter_cb && filter_recr = nullptr); + + ~llama_memory_hybrid() = default; + + // + // llama_memory_i + // + + llama_memory_context_ptr init_batch( + llama_batch_allocr & balloc, + uint32_t n_ubatch, + bool embd_all) override; + + llama_memory_context_ptr init_full() override; + + llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override; + + bool get_can_shift() const override; + + void clear(bool data) override; + + bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override; + void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override; + void seq_keep(llama_seq_id seq_id) override; + void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override; + void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override; + + llama_pos seq_pos_min(llama_seq_id seq_id) const override; + llama_pos seq_pos_max(llama_seq_id seq_id) const override; + + // state write/load + + void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override; + void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override; + + // + // llama_memory_hybrid specific API + // + + llama_kv_cache_unified * get_mem_attn() const; + llama_memory_recurrent * get_mem_recr() const; + +private: + const llama_hparams & hparams; + + const std::unique_ptr mem_attn; + const std::unique_ptr mem_recr; +}; + +class llama_memory_hybrid_context : public llama_memory_context_i { +public: + using slot_info_vec_t = llama_kv_cache_unified::slot_info_vec_t; + + // init failure + explicit llama_memory_hybrid_context(llama_memory_status status); + + // init full + explicit llama_memory_hybrid_context(llama_memory_hybrid * mem); + + // init update + explicit llama_memory_hybrid_context( + llama_memory_hybrid * mem, + llama_context * lctx, + bool optimize); + + // init success + llama_memory_hybrid_context( + llama_memory_hybrid * mem, + slot_info_vec_t sinfos_attn, + std::vector ubatches); + + ~llama_memory_hybrid_context() = default; + + bool next() override; + bool apply() override; + + llama_memory_status get_status() const override; + const llama_ubatch & get_ubatch() const override; + + // + // llama_memory_hybrid_context + // + + const llama_kv_cache_unified_context * get_attn() const; + const llama_memory_recurrent_context * get_recr() const; + +private: + // the index of the next ubatch to process + size_t i_next = 0; + + std::vector ubatches; + + const llama_memory_context_ptr ctx_attn; + const llama_memory_context_ptr ctx_recr; + + const llama_memory_status status; +}; diff --git a/llama/llama.cpp/src/llama-memory-recurrent.cpp b/llama/llama.cpp/src/llama-memory-recurrent.cpp new file mode 100644 index 000000000..c0c2ec084 --- /dev/null +++ b/llama/llama.cpp/src/llama-memory-recurrent.cpp @@ -0,0 +1,1142 @@ +#include "llama-memory-recurrent.h" + +#include "llama-impl.h" +#include "llama-io.h" +#include "llama-batch.h" +#include "llama-model.h" + +#include +#include +#include +#include +#include + +// +// llama_memory_recurrent +// + +llama_memory_recurrent::llama_memory_recurrent( + const llama_model & model, + layer_filter_cb && filter, + ggml_type type_r, + ggml_type type_s, + bool offload, + uint32_t mem_size, + uint32_t n_seq_max) : hparams(model.hparams), n_seq_max(n_seq_max) { + const int32_t n_layer = hparams.n_layer; + + head = 0; + size = mem_size; + used = 0; + + cells.clear(); + cells.resize(mem_size); + + // create a context for each buffer type + std::map ctx_map; + auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { + auto it = ctx_map.find(buft); + if (it == ctx_map.end()) { + ggml_init_params params = { + /*.mem_size =*/ size_t(2u*n_layer*ggml_tensor_overhead()), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + + ggml_context * ctx = ggml_init(params); + if (!ctx) { + return nullptr; + } + + ctx_map[buft] = ctx; + ctxs.emplace_back(ctx); + + return ctx; + } + + return it->second; + }; + + r_l.resize(n_layer); + s_l.resize(n_layer); + + for (int i = 0; i < n_layer; i++) { + if (filter && !filter(i)) { + LLAMA_LOG_DEBUG("%s: layer %3d: skipped\n", __func__, i); + continue; + } + + const char * dev_name = "CPU"; + + ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type(); + + if (offload) { + auto * dev = model.dev_layer(i); + buft = ggml_backend_dev_buffer_type(dev); + + dev_name = ggml_backend_dev_name(dev); + } + + LLAMA_LOG_DEBUG("%s, layer %3d: dev = %s\n", __func__, i, dev_name); + + ggml_context * ctx = ctx_for_buft(buft); + if (!ctx) { + throw std::runtime_error("failed to create ggml context for rs cache"); + } + + ggml_tensor * r = ggml_new_tensor_1d(ctx, type_r, hparams.n_embd_r()*mem_size); + ggml_tensor * s = ggml_new_tensor_1d(ctx, type_s, hparams.n_embd_s()*mem_size); + ggml_format_name(r, "cache_r_l%d", i); + ggml_format_name(s, "cache_s_l%d", i); + r_l[i] = r; + s_l[i] = s; + } + + // allocate tensors and initialize the buffers to avoid NaNs in the padding + for (auto it : ctx_map) { + auto * buft = it.first; + auto * ctx = it.second; + + ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); + if (!buf) { + throw std::runtime_error("failed to allocate buffer for rs cache"); + } + ggml_backend_buffer_clear(buf, 0); + LLAMA_LOG_INFO("%s: %10s RS buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0); + bufs.emplace_back(buf); + } + + { + const size_t memory_size_r = size_r_bytes(); + const size_t memory_size_s = size_s_bytes(); + + LLAMA_LOG_INFO("%s: size = %7.2f MiB (%6u cells, %3d layers, %2u seqs), R (%s): %7.2f MiB, S (%s): %7.2f MiB\n", __func__, + (float)(memory_size_r + memory_size_s) / (1024.0f * 1024.0f), mem_size, n_layer, n_seq_max, + ggml_type_name(type_r), (float)memory_size_r / (1024.0f * 1024.0f), + ggml_type_name(type_s), (float)memory_size_s / (1024.0f * 1024.0f)); + } +} + +void llama_memory_recurrent::clear(bool data) { + for (int32_t i = 0; i < (int32_t) size; ++i) { + cells[i].pos = -1; + cells[i].seq_id.clear(); + cells[i].src = -1; + cells[i].tail = -1; + } + + head = 0; + used = 0; + + if (data) { + for (auto & buf : bufs) { + ggml_backend_buffer_clear(buf.get(), 0); + } + } +} + +bool llama_memory_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) { + uint32_t new_head = size; + + if (p0 < 0) { + p0 = 0; + } + + if (p1 < 0) { + p1 = std::numeric_limits::max(); + } + + // models like Mamba or RWKV can't have a state partially erased + if (seq_id >= (int64_t) size) { + // could be fatal + return false; + } + if (0 <= seq_id) { + int32_t & tail_id = cells[seq_id].tail; + if (tail_id >= 0) { + const auto & cell = cells[tail_id]; + // partial intersection is invalid + if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) { + return false; + } + // invalidate tails which will be cleared + if (p0 <= cell.pos && cell.pos < p1) { + tail_id = -1; + } + } + } else { + // seq_id is negative, then the range should include everything or nothing + if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits::max())) { + return false; + } + } + + for (uint32_t i = 0; i < size; ++i) { + if (cells[i].pos >= p0 && cells[i].pos < p1) { + if (seq_id < 0) { + cells[i].seq_id.clear(); + } else if (cells[i].has_seq_id(seq_id)) { + cells[i].seq_id.erase(seq_id); + } else { + continue; + } + if (cells[i].is_empty()) { + // keep count of the number of used cells + if (cells[i].pos >= 0) { + used--; + } + cells[i].pos = -1; + cells[i].src = -1; + if (new_head == size) { + new_head = i; + } + } + } + } + + // If we freed up a slot, set head to it so searching can start there. + if (new_head != size && new_head < head) { + head = new_head; + } + + return true; +} + +void llama_memory_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) { + if (seq_id_src == seq_id_dst) { + return; + } + + if (p0 < 0) { + p0 = 0; + } + + if (p1 < 0) { + p1 = std::numeric_limits::max(); + } + + if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) { + auto & tail_src = cells[seq_id_src]; + auto & tail_dst = cells[seq_id_dst]; + if (tail_dst.tail >= 0) { + // clear destination seq_id if it wasn't empty + auto & cell_dst = cells[tail_dst.tail]; + + cell_dst.seq_id.erase(seq_id_dst); + tail_dst.tail = -1; + if (cell_dst.seq_id.empty()) { + cell_dst.pos = -1; + cell_dst.src = -1; + used -= 1; + } + } + if (tail_src.tail >= 0) { + auto & cell_src = cells[tail_src.tail]; + + cell_src.seq_id.insert(seq_id_dst); + tail_dst.tail = tail_src.tail; + } + } +} + +void llama_memory_recurrent::seq_keep(llama_seq_id seq_id) { + uint32_t new_head = size; + + for (uint32_t i = 0; i < size; ++i) { + if ((llama_seq_id) i != seq_id) { + cells[i].tail = -1; + } + + if (!cells[i].has_seq_id(seq_id)) { + if (cells[i].pos >= 0) { + used--; + } + + cells[i].pos = -1; + cells[i].src = -1; + cells[i].seq_id.clear(); + + if (new_head == size){ + new_head = i; + } + } else { + cells[i].seq_id.clear(); + cells[i].seq_id.insert(seq_id); + } + } + + // If we freed up a slot, set head to it so searching can start there. + if (new_head != size && new_head < head) { + head = new_head; + } +} + +void llama_memory_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) { + if (shift == 0) { + return; + } + + if (p0 < 0) { + p0 = 0; + } + + if (p1 < 0) { + p1 = std::numeric_limits::max(); + } + + // If there is no range then return early to avoid looping over the + if (p0 == p1) { + return; + } + + // for Mamba-like or RWKV models, only the pos needs to be shifted + if (0 <= seq_id && seq_id < (int64_t) size) { + const int32_t tail_id = cells[seq_id].tail; + if (tail_id >= 0) { + auto & cell = cells[tail_id]; + if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { + cell.pos += shift; + } + } + } +} + +void llama_memory_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) { + if (d == 1) { + return; + } + + if (p0 < 0) { + p0 = 0; + } + + if (p1 < 0) { + p1 = std::numeric_limits::max(); + } + + // If there is no range then return early to avoid looping over the cache. + if (p0 == p1) { + return; + } + + // for Mamba-like or RWKV models, only the pos needs to be changed + if (0 <= seq_id && seq_id < (int64_t) size) { + const int32_t tail_id = cells[seq_id].tail; + if (tail_id >= 0) { + auto & cell = cells[tail_id]; + if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { + cell.pos /= d; + } + } + } +} + +llama_pos llama_memory_recurrent::seq_pos_min(llama_seq_id seq_id) const { + llama_pos result = std::numeric_limits::max(); + + for (uint32_t i = 0; i < size; ++i) { + if (cells[i].has_seq_id(seq_id)) { + result = std::min(result, cells[i].pos); + } + } + + if (result == std::numeric_limits::max()) { + result = -1; + } + + return result; +} + +llama_pos llama_memory_recurrent::seq_pos_max(llama_seq_id seq_id) const { + llama_pos result = -1; + + for (uint32_t i = 0; i < size; ++i) { + if (cells[i].has_seq_id(seq_id)) { + result = std::max(result, cells[i].pos); + } + } + + return result; +} + +llama_memory_context_ptr llama_memory_recurrent::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) { + do { + balloc.split_reset(); + + std::vector ubatches; + while (true) { + llama_ubatch ubatch; + + if (embd_all) { + // if all tokens are output, split by sequence + ubatch = balloc.split_seq(n_ubatch); + } else { + ubatch = balloc.split_equal(n_ubatch, false); + } + + if (ubatch.n_tokens == 0) { + break; + } + + ubatches.push_back(std::move(ubatch)); // NOLINT + } + + if (balloc.get_n_used() < balloc.get_n_tokens()) { + // failed to find a suitable split + break; + } + + if (!prepare(ubatches)) { + break; + } + + return std::make_unique(this, std::move(ubatches)); + } while (false); + + return std::make_unique(LLAMA_MEMORY_STATUS_FAILED_PREPARE); +} + +llama_memory_context_ptr llama_memory_recurrent::init_full() { + return std::make_unique(this); +} + +llama_memory_context_ptr llama_memory_recurrent::init_update(llama_context * lctx, bool optimize) { + GGML_UNUSED(lctx); + GGML_UNUSED(optimize); + + return std::make_unique(LLAMA_MEMORY_STATUS_NO_UPDATE); +} + +bool llama_memory_recurrent::prepare(const std::vector & ubatches) { + // simply remember the full state because it is very small for this type of cache + // TODO: optimize + auto org_cells = cells; + auto org_used = used; + auto org_head = head; + + bool success = true; + + for (const auto & ubatch : ubatches) { + if (!find_slot(ubatch)) { + success = false; + break; + } + } + + // restore the original state + cells = std::move(org_cells); + used = org_used; + head = org_head; + + return success; +} + +bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) { + const uint32_t n_seq_tokens = ubatch.n_seq_tokens; + const uint32_t n_seqs = ubatch.n_seqs; + + // if we have enough unused cells before the current head -> + // better to start searching from the beginning of the cache, hoping to fill it + if (head > used + 2*n_seqs) { + head = 0; + } + + // For recurrent state architectures (like Mamba or RWKV), + // each cache cell can store the state for a whole sequence. + // A slot should be always be contiguous. + + // can only process batches with an equal number of new tokens in each sequence + GGML_ASSERT(ubatch.equal_seqs()); + + int32_t min = size - 1; + int32_t max = 0; + + // everything should fit if all seq_ids are smaller than the max + for (uint32_t s = 0; s < n_seqs; ++s) { + const uint32_t i = s*n_seq_tokens; // first token of sequence set s + const uint32_t n_seq_id = ubatch.n_seq_id[i]; + + for (uint32_t j = 0; j < n_seq_id; ++j) { + const llama_seq_id seq_id = ubatch.seq_id[i][j]; + + if (seq_id < 0 || (uint32_t) seq_id >= size) { + // too big seq_id + // TODO: would it be possible to resize the cache instead? + LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%u Try using a bigger --parallel value\n", __func__, seq_id, n_seq_max); + return false; + } + if (j > 0) { + auto & seq = cells[seq_id]; + if (seq.tail >= 0) { + auto & cell = cells[seq.tail]; + // clear cells from seq_ids that become shared + // (should not normally happen, but let's handle it anyway) + cell.seq_id.erase(seq_id); + seq.tail = -1; + if (cell.seq_id.empty()) { + cell.pos = -1; + cell.src = -1; + used -= 1; + } + } + } + } + } + +#ifndef NDEBUG + { + std::vector tails_verif; + tails_verif.assign(size, -1); + for (uint32_t i = 0; i < size; ++i) { + auto & cell = cells[i]; + for (llama_seq_id seq_id : cell.seq_id) { + if (tails_verif[seq_id] != -1) { + LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]); + } + tails_verif[seq_id] = i; + } + } + for (uint32_t i = 0; i < size; ++i) { + if (tails_verif[i] != cells[i].tail) { + LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cells[i].tail, tails_verif[i]); + } + } + } +#endif + + // find next empty cell + uint32_t next_empty_cell = head; + + for (uint32_t i = 0; i < size; ++i) { + if (next_empty_cell >= size) { next_empty_cell -= size; } + auto & cell = cells[next_empty_cell]; + if (cell.is_empty()) { break; } + next_empty_cell += 1; + } + + // find usable cell range + for (uint32_t s = 0; s < n_seqs; ++s) { + const uint32_t i = s*n_seq_tokens; + const llama_seq_id seq_id = ubatch.seq_id[i][0]; + auto & seq_meta = cells[seq_id]; + bool has_cell = false; + if (seq_meta.tail >= 0) { + auto & cell = cells[seq_meta.tail]; + GGML_ASSERT(cell.has_seq_id(seq_id)); + // does this seq_id "own" the cell? + if (cell.seq_id.size() == 1) { has_cell = true; } + } + if (!has_cell) { + auto & empty_cell = cells[next_empty_cell]; + GGML_ASSERT(empty_cell.is_empty()); + // copy old tail into the empty cell + if (seq_meta.tail >= 0) { + auto & orig_cell = cells[seq_meta.tail]; + empty_cell.pos = orig_cell.pos; + empty_cell.src = orig_cell.src; + orig_cell.seq_id.erase(seq_id); + empty_cell.seq_id.insert(seq_id); // will be overwritten + GGML_ASSERT(!orig_cell.is_empty()); // has at least one remaining seq_id + } + seq_meta.tail = next_empty_cell; + // find next empty cell + if (s + 1 < n_seqs) { + for (uint32_t j = 0; j < size; ++j) { + next_empty_cell += 1; + if (next_empty_cell >= size) { next_empty_cell -= size; } + auto & cell = cells[next_empty_cell]; + if (cell.is_empty()) { break; } + } + } + } + if (min > seq_meta.tail) { min = seq_meta.tail; } + if (max < seq_meta.tail) { max = seq_meta.tail; } + } + + // gather and re-order + for (uint32_t s = 0; s < n_seqs; ++s) { + const uint32_t i = s*n_seq_tokens; + const int32_t dst_id = s + min; + const int32_t src_id = cells[ubatch.seq_id[i][0]].tail; + if (dst_id != src_id) { + auto & dst_cell = cells[dst_id]; + auto & src_cell = cells[src_id]; + + std::swap(dst_cell.pos, src_cell.pos); + std::swap(dst_cell.src, src_cell.src); + std::swap(dst_cell.seq_id, src_cell.seq_id); + + // swap tails + for (uint32_t j = 0; j < size; ++j) { + int32_t & tail = cells[j].tail; + if (tail == src_id) { + tail = dst_id; + } else if (tail == dst_id) { + tail = src_id; + } + } + } + } + + // update the pos of the used seqs + for (uint32_t s = 0; s < n_seqs; ++s) { + const uint32_t i = s*n_seq_tokens; + const llama_pos last_pos = ubatch.pos[i + n_seq_tokens - 1]; + const int32_t cell_id = s + min; + auto & cell = cells[cell_id]; + + if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) { + // What should happen when the pos backtracks or skips a value? + // Clearing the state mid-batch would require special-casing which isn't done. + LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n", + __func__, last_pos, cell.pos, ubatch.seq_id[i][0], n_seq_tokens); + } + cell.pos = last_pos; + cell.seq_id.clear(); + for (int32_t j = 0; j < ubatch.n_seq_id[i]; ++j) { + const llama_seq_id seq_id = ubatch.seq_id[i][j]; + cell.seq_id.insert(seq_id); + cells[seq_id].tail = cell_id; + } + } + + // Find first cell without src refs, to use as the zero-ed state + { + // TODO: bake-in src refcounts in the cell metadata + std::vector refcounts(size, 0); + for (size_t i = 0; i < size; ++i) { + const int32_t src = cells[i].src; + if (src >= 0) { + refcounts[src] += 1; + } + } + + rs_z = -1; + for (int i = min; i <= max; ++i) { + if (refcounts[i] == 0) { + rs_z = i; + break; + } + } + + for (int i = min; i <= max; ++i) { + if (cells[i].src < 0) { + GGML_ASSERT(rs_z >= 0); + cells[i].src0 = rs_z; + } else { + // Stage the source ids for all used cells to allow correct seq_* behavior + // and still make these values available when setting the inputs + cells[i].src0 = cells[i].src; + } + cells[i].src = i; // avoid moving or clearing twice + } + } + + // allow getting the range of used cells, from head to head + n + head = min; + n = max - min + 1; + used = std::count_if(cells.begin(), cells.end(), + [](const mem_cell & cell){ return !cell.is_empty(); }); + + // sanity check + return n >= n_seqs; +} + +bool llama_memory_recurrent::get_can_shift() const { + // shifting the pos is trivial for recurrent models + return true; +} + +size_t llama_memory_recurrent::total_size() const { + size_t size = 0; + for (const auto & buf : bufs) { + size += ggml_backend_buffer_get_size(buf.get()); + } + + return size; +} + +size_t llama_memory_recurrent::size_r_bytes() const { + size_t size_r_bytes = 0; + + for (const auto & r : r_l) { + if (r != nullptr) { + size_r_bytes += ggml_nbytes(r); + } + } + + return size_r_bytes; +} + +size_t llama_memory_recurrent::size_s_bytes() const { + size_t size_s_bytes = 0; + + for (const auto & s : s_l) { + if (s != nullptr) { + size_s_bytes += ggml_nbytes(s); + } + } + + return size_s_bytes; +} + +void llama_memory_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id) const { + std::vector> cell_ranges; // ranges, from inclusive, to exclusive + uint32_t cell_count = 0; + + // Count the number of cells with the specified seq_id + // Find all the ranges of cells with this seq id (or all, when -1) + uint32_t cell_range_begin = size; + for (uint32_t i = 0; i < size; ++i) { + const auto & cell = cells[i]; + if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) { + ++cell_count; + if (cell_range_begin == size) { + cell_range_begin = i; + } + } else { + if (cell_range_begin != size) { + cell_ranges.emplace_back(cell_range_begin, i); + cell_range_begin = size; + } + } + } + if (cell_range_begin != size) { + cell_ranges.emplace_back(cell_range_begin, size); + } + + // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count + uint32_t cell_count_check = 0; + for (const auto & range : cell_ranges) { + cell_count_check += range.second - range.first; + } + GGML_ASSERT(cell_count == cell_count_check); + + io.write(&cell_count, sizeof(cell_count)); + + state_write_meta(io, cell_ranges, seq_id); + state_write_data(io, cell_ranges); +} + +void llama_memory_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id) { + uint32_t cell_count; + io.read_to(&cell_count, sizeof(cell_count)); + + bool res = true; + + res = res && state_read_meta(io, cell_count, seq_id); + res = res && state_read_data(io, cell_count); + + if (!res) { + if (seq_id == -1) { + clear(true); + } else { + seq_rm(seq_id, -1, -1); + } + throw std::runtime_error("failed to restore kv cache"); + } +} + +void llama_memory_recurrent::state_write_meta(llama_io_write_i & io, const std::vector> & cell_ranges, llama_seq_id seq_id) const { + for (const auto & range : cell_ranges) { + for (uint32_t i = range.first; i < range.second; ++i) { + const auto & cell = cells[i]; + const llama_pos pos = cell.pos; + const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0; + + io.write(&pos, sizeof(pos)); + io.write(&n_seq_id, sizeof(n_seq_id)); + + if (n_seq_id) { + for (auto seq_id : cell.seq_id) { + io.write(&seq_id, sizeof(seq_id)); + } + } + } + } +} + +void llama_memory_recurrent::state_write_data(llama_io_write_i & io, const std::vector> & cell_ranges) const { + const uint32_t s_trans = 0; + const uint32_t n_layer = hparams.n_layer; + + io.write(&s_trans, sizeof(s_trans)); + io.write(&n_layer, sizeof(n_layer)); + + std::vector tmp_buf; + + // Iterate and write all the keys first, each row is a cell + // Get whole range at a time + for (uint32_t il = 0; il < n_layer; ++il) { + // skip null layers (read_data will handle this by checking "r_l" and "s_l" for null) + if (r_l[il] == nullptr) continue; + + // Write key type + const int32_t r_type_i = (int32_t)r_l[il]->type; + io.write(&r_type_i, sizeof(r_type_i)); + + // Write row size of key + const uint64_t r_size_row = ggml_row_size(r_l[il]->type, hparams.n_embd_r()); + io.write(&r_size_row, sizeof(r_size_row)); + + // Read each range of cells of k_size length each into tmp_buf and write out + for (const auto & range : cell_ranges) { + const size_t range_size = range.second - range.first; + const size_t buf_size = range_size * r_size_row; + io.write_tensor(r_l[il], range.first * r_size_row, buf_size); + } + } + + if (!s_trans) { + for (uint32_t il = 0; il < n_layer; ++il) { + // skip null layers (read_data will handle this by checking "r_l" and "s_l" for null) + if (s_l[il] == nullptr) continue; + + // Write value type + const int32_t s_type_i = (int32_t)s_l[il]->type; + io.write(&s_type_i, sizeof(s_type_i)); + + // Write row size of value + const uint64_t s_size_row = ggml_row_size(s_l[il]->type, hparams.n_embd_s()); + io.write(&s_size_row, sizeof(s_size_row)); + + // Read each range of cells of s_size length each into tmp_buf and write out + for (const auto & range : cell_ranges) { + const size_t range_size = range.second - range.first; + const size_t buf_size = range_size * s_size_row; + io.write_tensor(s_l[il], range.first * s_size_row, buf_size); + } + } + } else { + // When v is transposed, we also need the element size and get the element ranges from each row + const uint32_t mem_size = size; + for (uint32_t il = 0; il < n_layer; ++il) { + // skip null layers (read_data will handle this by checking "r_l" and "s_l" for null) + if (s_l[il] == nullptr) continue; + + const uint32_t n_embd_s = hparams.n_embd_s(); + + // Write value type + const int32_t s_type_i = (int32_t)s_l[il]->type; + io.write(&s_type_i, sizeof(s_type_i)); + + // Write element size + const uint32_t s_size_el = ggml_type_size(s_l[il]->type); + io.write(&s_size_el, sizeof(s_size_el)); + + // Write GQA embedding size + io.write(&n_embd_s, sizeof(n_embd_s)); + + // For each row, we get the element values of each cell + for (uint32_t j = 0; j < n_embd_s; ++j) { + // Read each range of cells of v_size_el length each into tmp_buf and write out + for (const auto & range : cell_ranges) { + const size_t range_size = range.second - range.first; + const size_t src_offset = (range.first + j * mem_size) * s_size_el; + const size_t buf_size = range_size * s_size_el; + io.write_tensor(s_l[il], src_offset, buf_size); + } + } + } + } +} + +bool llama_memory_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) { + if (dest_seq_id != -1) { + // single sequence + + seq_rm(dest_seq_id, -1, -1); + + llama_batch_allocr balloc(hparams.n_pos_per_embd()); + + llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1); + + for (uint32_t i = 0; i < cell_count; ++i) { + llama_pos pos; + uint32_t n_seq_id; + + io.read_to(&pos, sizeof(pos)); + io.read_to(&n_seq_id, sizeof(n_seq_id)); + + if (n_seq_id != 0) { + LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__); + return false; + } + + ubatch.pos[i] = pos; + } + ubatch.n_seq_id[0] = 1; + ubatch.seq_id[0] = &dest_seq_id; + + if (!find_slot(ubatch)) { + LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__); + return false; + } + + // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values) + // Assume that this is one contiguous block of cells + GGML_ASSERT(head + cell_count <= size); + GGML_ASSERT(cells[head].pos == ubatch.pos[0]); + GGML_ASSERT(cells[head + cell_count - 1].pos == ubatch.pos[cell_count - 1]); + GGML_ASSERT(cells[head].has_seq_id(dest_seq_id)); + GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id)); + } else { + // whole KV cache restore + + if (cell_count > size) { + LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__); + return false; + } + + clear(true); + + for (uint32_t i = 0; i < cell_count; ++i) { + auto & cell = cells[i]; + + llama_pos pos; + uint32_t n_seq_id; + + io.read_to(&pos, sizeof(pos)); + io.read_to(&n_seq_id, sizeof(n_seq_id)); + + cell.pos = pos; + + for (uint32_t j = 0; j < n_seq_id; ++j) { + llama_seq_id seq_id; + io.read_to(&seq_id, sizeof(seq_id)); + + // TODO: llama_memory_recurrent should have a notion of max sequences + //if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) { + if (seq_id < 0) { + //LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx)); + LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, inf)\n", __func__, seq_id); + return false; + } + + cell.seq_id.insert(seq_id); + + int32_t & tail = cells[seq_id].tail; + if (tail != -1) { + LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail); + return false; + } + tail = i; + } + } + + head = 0; + used = cell_count; + } + + for (uint32_t i = 0; i < cell_count; ++i) { + uint32_t cell_id = head + i; + // make sure the recurrent states will keep their restored state + cells[cell_id].src = cell_id; + } + + return true; +} + +bool llama_memory_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) { + uint32_t s_trans; + uint32_t n_layer; + io.read_to(&s_trans, sizeof(s_trans)); + io.read_to(&n_layer, sizeof(n_layer)); + + if (n_layer != hparams.n_layer) { + LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer); + return false; + } + if (cell_count > size) { + LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size); + return false; + } + if (false != (bool) s_trans) { + LLAMA_LOG_ERROR("%s: incompatible s transposition\n", __func__); + return false; + } + + // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block + for (uint32_t il = 0; il < n_layer; ++il) { + // skip null layers + if (r_l[il] == nullptr) continue; + + // Read type of key + int32_t r_type_i_ref; + io.read_to(&r_type_i_ref, sizeof(r_type_i_ref)); + const int32_t r_type_i = (int32_t) r_l[il]->type; + if (r_type_i != r_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched r type (%d != %d, layer %d)\n", __func__, r_type_i, r_type_i_ref, il); + return false; + } + + // Read row size of key + uint64_t r_size_row_ref; + io.read_to(&r_size_row_ref, sizeof(r_size_row_ref)); + const size_t r_size_row = ggml_row_size(r_l[il]->type, hparams.n_embd_r()); + if (r_size_row != r_size_row_ref) { + LLAMA_LOG_ERROR("%s: mismatched r row size (%zu != %zu, layer %d)\n", __func__, r_size_row, (size_t) r_size_row_ref, il); + return false; + } + + if (cell_count) { + // Read and set the keys for the whole cell range + ggml_backend_tensor_set(r_l[il], io.read(cell_count * r_size_row), head * r_size_row, cell_count * r_size_row); + } + } + + if (!s_trans) { + for (uint32_t il = 0; il < n_layer; ++il) { + // skip null layers + if (s_l[il] == nullptr) continue; + + // Read type of value + int32_t s_type_i_ref; + io.read_to(&s_type_i_ref, sizeof(s_type_i_ref)); + const int32_t s_type_i = (int32_t)s_l[il]->type; + + if (s_type_i != s_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched s type (%d != %d, layer %d)\n", __func__, s_type_i, s_type_i_ref, il); + return false; + } + + // Read row size of value + uint64_t s_size_row_ref; + io.read_to(&s_size_row_ref, sizeof(s_size_row_ref)); + const size_t s_size_row = ggml_row_size(s_l[il]->type, hparams.n_embd_s()); + if (s_size_row != s_size_row_ref) { + LLAMA_LOG_ERROR("%s: mismatched s row size (%zu != %zu, layer %d)\n", __func__, s_size_row, (size_t) s_size_row_ref, il); + return false; + } + + if (cell_count) { + // Read and set the values for the whole cell range + ggml_backend_tensor_set(s_l[il], io.read(cell_count * s_size_row), head * s_size_row, cell_count * s_size_row); + } + } + } else { + // For each layer, read the values for each cell (transposed) + for (uint32_t il = 0; il < n_layer; ++il) { + // skip null layers + if (s_l[il] == nullptr) continue; + + const uint32_t n_embd_s = hparams.n_embd_s(); + + // Read type of value + int32_t s_type_i_ref; + io.read_to(&s_type_i_ref, sizeof(s_type_i_ref)); + const int32_t s_type_i = (int32_t)s_l[il]->type; + if (s_type_i != s_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched s type (%d != %d, layer %d)\n", __func__, s_type_i, s_type_i_ref, il); + return false; + } + + // Read element size of value + uint32_t s_size_el_ref; + io.read_to(&s_size_el_ref, sizeof(s_size_el_ref)); + const size_t s_size_el = ggml_type_size(s_l[il]->type); + if (s_size_el != s_size_el_ref) { + LLAMA_LOG_ERROR("%s: mismatched s element size (%zu != %zu, layer %d)\n", __func__, s_size_el, (size_t) s_size_el_ref, il); + return false; + } + + // Read state embedding size + uint32_t n_embd_s_ref; + io.read_to(&n_embd_s_ref, sizeof(n_embd_s_ref)); + if (n_embd_s != n_embd_s_ref) { + LLAMA_LOG_ERROR("%s: mismatched s embedding size (%u != %u, layer %d)\n", __func__, n_embd_s, n_embd_s_ref, il); + return false; + } + + if (cell_count) { + // For each row in the transposed matrix, read the values for the whole cell range + for (uint32_t j = 0; j < n_embd_s; ++j) { + const size_t dst_offset = (head + j * size) * s_size_el; + ggml_backend_tensor_set(s_l[il], io.read(cell_count * s_size_el), dst_offset, cell_count * s_size_el); + } + } + } + } + + return true; +} + +// +// llama_memory_recurrent_context +// + +llama_memory_recurrent_context::llama_memory_recurrent_context(llama_memory_status status) : status(status) {} + +llama_memory_recurrent_context::llama_memory_recurrent_context( + llama_memory_recurrent * mem) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), is_full(true) { +} + +llama_memory_recurrent_context::llama_memory_recurrent_context( + llama_memory_recurrent * mem, + std::vector ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), ubatches(std::move(ubatches)) {} + +llama_memory_recurrent_context::~llama_memory_recurrent_context() = default; + +bool llama_memory_recurrent_context::next() { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + if (++i_next >= ubatches.size()) { + return false; + } + + return true; +} + +bool llama_memory_recurrent_context::apply() { + assert(!llama_memory_status_is_fail(status)); + + // no ubatches -> this is an update + if (ubatches.empty()) { + // recurrent cache never performs updates + assert(status == LLAMA_MEMORY_STATUS_NO_UPDATE); + + return true; + } + + mem->find_slot(ubatches[i_next]); + + return true; +} + +llama_memory_status llama_memory_recurrent_context::get_status() const { + return status; +} + +const llama_ubatch & llama_memory_recurrent_context::get_ubatch() const { + assert(status == LLAMA_MEMORY_STATUS_SUCCESS); + + return ubatches[i_next]; +} + +uint32_t llama_memory_recurrent_context::get_n_rs() const { + return is_full ? mem->size : mem->n; +} + +uint32_t llama_memory_recurrent_context::get_head() const { + return is_full ? 0 : mem->head; +} + +int32_t llama_memory_recurrent_context::get_rs_z() const { + return is_full ? 0 : mem->rs_z; +} + +uint32_t llama_memory_recurrent_context::get_size() const { + return mem->size; +} + +ggml_tensor * llama_memory_recurrent_context::get_r_l(int32_t il) const { + return mem->r_l[il]; +} + +ggml_tensor * llama_memory_recurrent_context::get_s_l(int32_t il) const { + return mem->s_l[il]; +} + +int32_t llama_memory_recurrent_context::s_copy(int i) const { + return mem->cells[i + mem->head].src0; +} diff --git a/llama/llama.cpp/src/llama-memory-recurrent.h b/llama/llama.cpp/src/llama-memory-recurrent.h new file mode 100644 index 000000000..4d094f9a0 --- /dev/null +++ b/llama/llama.cpp/src/llama-memory-recurrent.h @@ -0,0 +1,183 @@ +#pragma once + +#include "llama-batch.h" +#include "llama-graph.h" +#include "llama-memory.h" + +#include +#include + +// +// llama_memory_recurrent +// + +// TODO: extract the cache state used for graph computation into llama_memory_recurrent_context_i +// see the implementation of llama_kv_cache_unified_context_i for an example how to do it +class llama_memory_recurrent : public llama_memory_i { +public: + + // this callback is used to filter out layers that should not be included in the cache + using layer_filter_cb = std::function; + + llama_memory_recurrent( + const llama_model & model, + layer_filter_cb && filter, + ggml_type type_r, + ggml_type type_s, + bool offload, + uint32_t mem_size, + uint32_t n_seq_max); + + ~llama_memory_recurrent() = default; + + // + // llama_memory_i + // + + llama_memory_context_ptr init_batch( + llama_batch_allocr & balloc, + uint32_t n_ubatch, + bool embd_all) override; + + llama_memory_context_ptr init_full() override; + + llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override; + + void clear(bool data) override; + + bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override; + void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override; + void seq_keep(llama_seq_id seq_id) override; + void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override; + void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override; + + llama_pos seq_pos_min(llama_seq_id seq_id) const override; + llama_pos seq_pos_max(llama_seq_id seq_id) const override; + + bool prepare(const std::vector & ubatches); + + // find a contiguous slot of memory cells and emplace the ubatch there + bool find_slot(const llama_ubatch & ubatch); + + bool get_can_shift() const override; + + // state write/load + + void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const override; + void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) override; + + uint32_t head = 0; // the location where the batch will be placed in the cache (see find_slot()) + uint32_t size = 0; // total number of cells, shared across all sequences + uint32_t used = 0; // used cells (i.e. at least one seq_id) + + // computed before each graph build + uint32_t n = 0; + + // first zero-ed state + int32_t rs_z = -1; + + // TODO: optimize for recurrent state needs + struct mem_cell { + llama_pos pos = -1; + int32_t src = -1; // used to know where states should be copied from + int32_t src0 = -1; // like src, but only used when setting the inputs (allowing to copy once) + int32_t tail = -1; + + std::set seq_id; + + bool has_seq_id(const llama_seq_id & id) const { + return seq_id.find(id) != seq_id.end(); + } + + bool is_empty() const { + return seq_id.empty(); + } + + bool is_same_seq(const mem_cell & other) const { + return seq_id == other.seq_id; + } + }; + + std::vector cells; + + // per layer + std::vector r_l; + std::vector s_l; + +private: + //const llama_model & model; + const llama_hparams & hparams; + + const uint32_t n_seq_max = 1; + + std::vector ctxs; + std::vector bufs; + + size_t total_size() const; + + size_t size_r_bytes() const; + size_t size_s_bytes() const; + + void state_write_meta(llama_io_write_i & io, const std::vector> & cell_ranges, llama_seq_id seq_id = -1) const; + void state_write_data(llama_io_write_i & io, const std::vector> & cell_ranges) const; + + bool state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id = -1); + bool state_read_data(llama_io_read_i & io, uint32_t cell_count); +}; + +class llama_memory_recurrent_context : public llama_memory_context_i { +public: + // used for errors + llama_memory_recurrent_context(llama_memory_status status); + + // used to create a full-cache or update context + llama_memory_recurrent_context( + llama_memory_recurrent * mem); + + // used to create a batch processing context from a batch + llama_memory_recurrent_context( + llama_memory_recurrent * mem, + std::vector ubatches); + + virtual ~llama_memory_recurrent_context(); + + // + // llama_memory_context_i + // + + bool next() override; + bool apply() override; + + llama_memory_status get_status() const override; + const llama_ubatch & get_ubatch() const override; + + // + // llama_memory_recurrent_context specific API + // + + uint32_t get_n_rs() const; + uint32_t get_head() const; + int32_t get_rs_z() const; + uint32_t get_size() const; + + ggml_tensor * get_r_l(int32_t il) const; + ggml_tensor * get_s_l(int32_t il) const; + + int32_t s_copy(int i) const; + +private: + const llama_memory_status status; + + llama_memory_recurrent * mem; + + size_t i_next = 0; + + std::vector ubatches; + + // + // data needed for building the compute graph for the current ubatch: + // TODO: extract all the state like `head` and `n` here + // + + const bool is_full = false; +}; diff --git a/llama/llama.cpp/src/llama-memory.cpp b/llama/llama.cpp/src/llama-memory.cpp index 10173253e..ca6844c32 100644 --- a/llama/llama.cpp/src/llama-memory.cpp +++ b/llama/llama.cpp/src/llama-memory.cpp @@ -1 +1,59 @@ #include "llama-memory.h" + +llama_memory_status llama_memory_status_combine(llama_memory_status s0, llama_memory_status s1) { + bool has_update = false; + + switch (s0) { + case LLAMA_MEMORY_STATUS_SUCCESS: + { + has_update = true; + break; + } + case LLAMA_MEMORY_STATUS_NO_UPDATE: + { + break; + } + case LLAMA_MEMORY_STATUS_FAILED_PREPARE: + case LLAMA_MEMORY_STATUS_FAILED_COMPUTE: + { + return s0; + } + } + + switch (s1) { + case LLAMA_MEMORY_STATUS_SUCCESS: + { + has_update = true; + break; + } + case LLAMA_MEMORY_STATUS_NO_UPDATE: + { + break; + } + case LLAMA_MEMORY_STATUS_FAILED_PREPARE: + case LLAMA_MEMORY_STATUS_FAILED_COMPUTE: + { + return s1; + } + } + + // if either status has an update, then the combined status has an update + return has_update ? LLAMA_MEMORY_STATUS_SUCCESS : LLAMA_MEMORY_STATUS_NO_UPDATE; +} + +bool llama_memory_status_is_fail(llama_memory_status status) { + switch (status) { + case LLAMA_MEMORY_STATUS_SUCCESS: + case LLAMA_MEMORY_STATUS_NO_UPDATE: + { + return false; + } + case LLAMA_MEMORY_STATUS_FAILED_PREPARE: + case LLAMA_MEMORY_STATUS_FAILED_COMPUTE: + { + return true; + } + } + + return false; +} diff --git a/llama/llama.cpp/src/llama-memory.h b/llama/llama.cpp/src/llama-memory.h index c7412d591..e8ba336e8 100644 --- a/llama/llama.cpp/src/llama-memory.h +++ b/llama/llama.cpp/src/llama-memory.h @@ -2,30 +2,115 @@ #include "llama.h" +#include + +struct llama_ubatch; + +class llama_batch_allocr; + +class llama_io_write_i; +class llama_io_read_i; + struct llama_memory_params { // kv cache ggml_type type_k; ggml_type type_v; - // parameters for other types of memory - // ... + // use full-size SWA cache + bool swa_full; }; +enum llama_memory_status { + LLAMA_MEMORY_STATUS_SUCCESS = 0, + LLAMA_MEMORY_STATUS_NO_UPDATE, + LLAMA_MEMORY_STATUS_FAILED_PREPARE, + LLAMA_MEMORY_STATUS_FAILED_COMPUTE, +}; + +// helper function for combining the status of two memory contexts +// useful for implementing hybrid memory types (e.g. iSWA) +llama_memory_status llama_memory_status_combine(llama_memory_status s0, llama_memory_status s1); + +// helper function for checking if a memory status indicates a failure +bool llama_memory_status_is_fail(llama_memory_status status); + +// the interface for managing the memory context during batch processing +// this interface is implemented per memory type. see: +// - llama_kv_cache_unified_context +// - llama_kv_cache_unified_iswa_context +// ... +// +// the only method that should mutate the memory and the memory context is llama_memory_i::apply() +struct llama_memory_context_i { + virtual ~llama_memory_context_i() = default; + + // consume the current ubatch from the context and proceed to the next one + // return false if we are done + virtual bool next() = 0; + + // apply the memory state for the current ubatch to the memory object + // return false on failure + virtual bool apply() = 0; + + // get the current ubatch + virtual const llama_ubatch & get_ubatch() const = 0; + + // get the status of the memory context - used for error handling and checking if any updates would be applied + virtual llama_memory_status get_status() const = 0; +}; + +using llama_memory_context_ptr = std::unique_ptr; + // general concept of LLM memory // the KV cache is a type of LLM memory, but there can be other types -class llama_memory_i { -public: +struct llama_memory_i { virtual ~llama_memory_i() = default; - virtual void clear() = 0; + // split the input batch into a set of ubatches and verify that they can fit into the cache + // return a context object containing the ubatches and memory state required to process them + // check the llama_memory_context_i::get_status() for the result + virtual llama_memory_context_ptr init_batch( + llama_batch_allocr & balloc, + uint32_t n_ubatch, + bool embd_all) = 0; + + // simulate full cache, used for allocating worst-case compute buffers + virtual llama_memory_context_ptr init_full() = 0; + + // prepare for any pending memory updates, such as shifts, defrags, etc. + // status == LLAMA_MEMORY_STATUS_NO_UPDATE if there is nothing to update + virtual llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) = 0; + + // getters + virtual bool get_can_shift() const = 0; + + // + // ops + // + + // if data == true, the data buffers will also be cleared together with the metadata + virtual void clear(bool data) = 0; virtual bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) = 0; virtual void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) = 0; virtual void seq_keep(llama_seq_id seq_id) = 0; - virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) = 0; + virtual void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) = 0; virtual void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) = 0; + virtual llama_pos seq_pos_min(llama_seq_id seq_id) const = 0; virtual llama_pos seq_pos_max(llama_seq_id seq_id) const = 0; - virtual bool get_can_edit() const = 0; + // + // state write/read + // + + virtual void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1) const = 0; + virtual void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1) = 0; +}; + +using llama_memory_ptr = std::unique_ptr; + +// TODO: temporary until the llama_kv_cache is removed from the public API +struct llama_kv_cache : public llama_memory_i { + virtual ~llama_kv_cache() = default; }; diff --git a/llama/llama.cpp/src/llama-mmap.cpp b/llama/llama.cpp/src/llama-mmap.cpp index 9da97f1bc..47497cf95 100644 --- a/llama/llama.cpp/src/llama-mmap.cpp +++ b/llama/llama.cpp/src/llama-mmap.cpp @@ -401,7 +401,7 @@ struct llama_mmap::impl { } } #else - throw std::runtime_error("PrefetchVirtualMemory unavailable"); + LLAMA_LOG_DEBUG("skipping PrefetchVirtualMemory because _WIN32_WINNT < 0x602\n"); #endif } } diff --git a/llama/llama.cpp/src/llama-model-loader.cpp b/llama/llama.cpp/src/llama-model-loader.cpp index 7f6617fac..7eab9b68a 100644 --- a/llama/llama.cpp/src/llama-model-loader.cpp +++ b/llama/llama.cpp/src/llama-model-loader.cpp @@ -35,6 +35,7 @@ static std::string llama_model_ftype_name(llama_ftype ftype) { case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0"; case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1"; case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0"; + case LLAMA_FTYPE_MOSTLY_MXFP4_MOE: return "MXFP4 MoE"; case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium"; case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small"; case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small"; @@ -288,9 +289,10 @@ namespace GGUFMeta { template bool llama_model_loader::get_arr(const std::string & key, std::vector & result, bool required) { - const int kid = gguf_find_key(meta.get(), key.c_str()); + const gguf_context * ctx = meta.get(); + const int kid = gguf_find_key(ctx, key.c_str()); - if (kid < 0 || gguf_get_kv_type(meta.get(), kid) != GGUF_TYPE_ARRAY) { + if (kid < 0 || gguf_get_kv_type(ctx, kid) != GGUF_TYPE_ARRAY) { if (required) { throw std::runtime_error(format("array key not found in model: %s", key.c_str())); } @@ -298,28 +300,40 @@ namespace GGUFMeta { } struct GGUFMeta::ArrayInfo arr_info = - GGUFMeta::GKV::get_kv(meta.get(), kid); + GGUFMeta::GKV::get_kv(ctx, kid); switch (arr_info.gt) { case GGUF_TYPE_UINT32: - case GGUF_TYPE_INT32: GGML_ASSERT((std::is_same::value) || - (std::is_same::value)); break; - case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same::value)); break; + case GGUF_TYPE_INT32: GGML_ASSERT((std::is_same::value) || + (std::is_same::value)); break; + case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same::value)); break; + case GGUF_TYPE_STRING: GGML_ASSERT((std::is_same::value)); break; default: - throw std::runtime_error(format("%s is not a float32/uint32/int32 array", key.c_str())); + throw std::runtime_error(format("%s is not a string/float32/uint32/int32 array", key.c_str())); } - result.resize(arr_info.length); - result.assign((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length); + if constexpr (std::is_same::value) { + const size_t n_items = gguf_get_arr_n(ctx, kid); + result.clear(); + + for (size_t i = 0; i < n_items; i++) { + const T value = gguf_get_arr_str(ctx, kid, i); + result.emplace_back(value); + } + } else { + result.resize(arr_info.length); + result.assign((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length); + } return true; } template bool llama_model_loader::get_arr(const std::string & key, std::array & result, bool required) { - const int kid = gguf_find_key(meta.get(), key.c_str()); + const gguf_context * ctx = meta.get(); + const int kid = gguf_find_key(ctx, key.c_str()); - if (kid < 0 || gguf_get_kv_type(meta.get(), kid) != GGUF_TYPE_ARRAY) { + if (kid < 0 || gguf_get_kv_type(ctx, kid) != GGUF_TYPE_ARRAY) { if (required) { throw std::runtime_error(format("array key not found in model: %s", key.c_str())); } @@ -327,22 +341,32 @@ namespace GGUFMeta { } struct GGUFMeta::ArrayInfo arr_info = - GGUFMeta::GKV::get_kv(meta.get(), kid); + GGUFMeta::GKV::get_kv(ctx, kid); switch (arr_info.gt) { case GGUF_TYPE_UINT32: - case GGUF_TYPE_INT32: GGML_ASSERT((std::is_same::value) || - (std::is_same::value)); break; - case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same::value)); break; + case GGUF_TYPE_INT32: GGML_ASSERT((std::is_same::value) || + (std::is_same::value)); break; + case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same::value)); break; + case GGUF_TYPE_STRING: GGML_ASSERT((std::is_same::value)); break; default: - throw std::runtime_error(format("%s is not a float32/uint32/int32 array", key.c_str())); + throw std::runtime_error(format("%s is not a string/float32/uint32/int32 array", key.c_str())); } if (arr_info.length > N_MAX) { throw std::runtime_error(format("array length %u for key %s exceeds max %u", (uint32_t) arr_info.length, key.c_str(), (uint32_t) N_MAX)); } - std::copy((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length, result.begin()); + if constexpr (std::is_same::value) { + const size_t n_items = gguf_get_arr_n(ctx, kid); + + for (size_t i = 0; i < n_items; i++) { + const T value = gguf_get_arr_str(ctx, kid, i); + result[i] = value; + } + } else { + std::copy((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length, result.begin()); + } return true; } @@ -352,6 +376,8 @@ namespace GGUFMeta { return get_arr(llm_kv(kid), result, required); } + template bool llama_model_loader::get_arr>(enum llm_kv kid, std::vector & result, bool required); + template bool llama_model_loader::get_key(const std::string & key, T & result, bool required) { auto it = kv_overrides.find(key); @@ -470,7 +496,7 @@ llama_model_loader::llama_model_loader( meta.reset(gguf_init_from_file(fname.c_str(), params)); if (!meta) { - throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str())); + throw std::runtime_error(format("%s: failed to load model from %s", __func__, fname.c_str())); } get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false); @@ -529,7 +555,7 @@ llama_model_loader::llama_model_loader( }; gguf_context_ptr ctx_gguf { gguf_init_from_file(fname_split, split_params) }; if (!ctx_gguf) { - throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, fname_split)); + throw std::runtime_error(format("%s: failed to load GGUF split from %s", __func__, fname_split)); } // check idx @@ -823,13 +849,18 @@ void llama_model_loader::init_mappings(bool prefetch, llama_mlocks * mlock_mmaps mappings.reserve(files.size()); mmaps_used.reserve(files.size()); for (const auto & file : files) { - auto * reg = ggml_backend_dev_backend_reg(ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU)); - if (!reg) { - throw std::runtime_error(format("%s: no CPU backend found", __func__)); + bool is_numa = false; + + auto * dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + if (dev) { + auto * reg = ggml_backend_dev_backend_reg(dev); + auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa"); + if (is_numa_fn) { + is_numa = is_numa_fn(); + } } - auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa"); - std::unique_ptr mapping = std::make_unique(file.get(), prefetch ? -1 : 0, is_numa_fn()); + std::unique_ptr mapping = std::make_unique(file.get(), prefetch ? -1 : 0, is_numa); mmaps_used.emplace_back(mapping->size(), 0); if (mlock_mmaps) { std::unique_ptr mlock_mmap(new llama_mlock()); diff --git a/llama/llama.cpp/src/llama-model-loader.h b/llama/llama.cpp/src/llama-model-loader.h index 0f52b011b..c9189f6cb 100644 --- a/llama/llama.cpp/src/llama-model-loader.h +++ b/llama/llama.cpp/src/llama-model-loader.h @@ -58,8 +58,9 @@ struct llama_model_loader { } }; - static const int TENSOR_NOT_REQUIRED = 1; - static const int TENSOR_DUPLICATED = 2; + static const int TENSOR_NOT_REQUIRED = 1 << 0; + static const int TENSOR_DUPLICATED = 1 << 1; + static const int TENSOR_SKIP = 1 << 2; int n_kv = 0; int n_tensors = 0; diff --git a/llama/llama.cpp/src/llama-model-saver.cpp b/llama/llama.cpp/src/llama-model-saver.cpp index a70b98923..563823dc3 100644 --- a/llama/llama.cpp/src/llama-model-saver.cpp +++ b/llama/llama.cpp/src/llama-model-saver.cpp @@ -228,6 +228,7 @@ void llama_model_saver::add_kv_from_model() { // add_kv(LLM_KV_TOKENIZER_MASK_ID, ???); add_kv(LLM_KV_TOKENIZER_ADD_BOS, vocab.get_add_bos()); add_kv(LLM_KV_TOKENIZER_ADD_EOS, vocab.get_add_eos()); + add_kv(LLM_KV_TOKENIZER_ADD_SEP, vocab.get_add_sep()); add_kv(LLM_KV_TOKENIZER_ADD_PREFIX, vocab.get_add_space_prefix()); add_kv(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.get_remove_extra_whitespaces()); add_kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, vocab.get_precompiled_charsmap()); diff --git a/llama/llama.cpp/src/llama-model.cpp b/llama/llama.cpp/src/llama-model.cpp index db62973fa..280129e19 100644 --- a/llama/llama.cpp/src/llama-model.cpp +++ b/llama/llama.cpp/src/llama-model.cpp @@ -5,7 +5,11 @@ #include "llama-batch.h" #include "llama-cparams.h" #include "llama-model-loader.h" -#include "llama-kv-cache.h" + +#include "llama-kv-cache-unified.h" +#include "llama-kv-cache-unified-iswa.h" +#include "llama-memory-hybrid.h" +#include "llama-memory-recurrent.h" #include "ggml-cpp.h" @@ -36,16 +40,21 @@ const char * llm_type_name(llm_type type) { case LLM_TYPE_190M: return "190M"; case LLM_TYPE_220M: return "220M"; case LLM_TYPE_250M: return "250M"; + case LLM_TYPE_256M: return "256M"; case LLM_TYPE_270M: return "270M"; case LLM_TYPE_335M: return "335M"; + case LLM_TYPE_350M: return "350M"; case LLM_TYPE_410M: return "410M"; case LLM_TYPE_450M: return "450M"; case LLM_TYPE_475M: return "475M"; + case LLM_TYPE_700M: return "700M"; case LLM_TYPE_770M: return "770M"; case LLM_TYPE_780M: return "780M"; + case LLM_TYPE_0_3B: return "0.3B"; case LLM_TYPE_0_5B: return "0.5B"; case LLM_TYPE_0_6B: return "0.6B"; case LLM_TYPE_1B: return "1B"; + case LLM_TYPE_1_2B: return "1.2B"; case LLM_TYPE_1_3B: return "1.3B"; case LLM_TYPE_1_4B: return "1.4B"; case LLM_TYPE_1_5B: return "1.5B"; @@ -77,6 +86,7 @@ const char * llm_type_name(llm_type type) { case LLM_TYPE_40B: return "40B"; case LLM_TYPE_65B: return "65B"; case LLM_TYPE_70B: return "70B"; + case LLM_TYPE_142B: return "142B"; case LLM_TYPE_236B: return "236B"; case LLM_TYPE_290B: return "290B"; case LLM_TYPE_314B: return "314B"; @@ -96,8 +106,15 @@ const char * llm_type_name(llm_type type) { case LLM_TYPE_57B_A14B: return "57B.A14B"; case LLM_TYPE_17B_16E: return "17Bx16E (Scout)"; case LLM_TYPE_17B_128E: return "17Bx128E (Maverick)"; + case LLM_TYPE_A13B: return "A13B"; + case LLM_TYPE_21B_A3B: return "21B.A3B"; case LLM_TYPE_30B_A3B: return "30B.A3B"; + case LLM_TYPE_106B_A12B: return "106B.A12B"; case LLM_TYPE_235B_A22B: return "235B.A22B"; + case LLM_TYPE_300B_A47B: return "300B.A47B"; + case LLM_TYPE_355B_A32B: return "355B.A32B"; + case LLM_TYPE_E2B: return "E2B"; + case LLM_TYPE_E4B: return "E4B"; default: return "?B"; } } @@ -175,6 +192,13 @@ static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]); op_tensor = ggml_add(ctx, a, w); } break; + case GGML_OP_ADD_ID: + { + int n_expert_used = hparams.n_expert_used; + ggml_tensor * a = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0], n_expert_used, 512); + ggml_tensor * c = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_expert_used, 512); + op_tensor = ggml_add_id(ctx, a, w, c); + } break; case GGML_OP_MUL: { ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]); @@ -200,23 +224,27 @@ static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w } break; case GGML_OP_SSM_CONV: { - // FIXME - ggml_tensor * conv_x = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 12345, w->ne[1], 6789); + const int64_t n_seq_tokens = 512; + const int64_t n_seqs = 3; + ggml_tensor * conv_x = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0] - 1 + n_seq_tokens, w->ne[1], n_seqs); op_tensor = ggml_ssm_conv(ctx, conv_x, w); } break; case GGML_OP_SSM_SCAN: { - // FIXME - const int64_t d_state = w->ne[0]; - const int64_t d_inner = w->ne[1]; + // w is ssm_a, which is used to distinguish Mamba-1 and Mamba-2 + const int64_t d_state = w->ne[0] == 1 ? hparams.ssm_d_state : w->ne[0]; + const int64_t n_head = w->ne[1]; + const int64_t head_dim = hparams.ssm_d_inner / n_head; + const int64_t n_group = hparams.ssm_n_group ? hparams.ssm_n_group : 1; const int64_t n_seq_tokens = 512; - const int64_t n_seqs = 1; - ggml_tensor * s = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, d_inner, n_seqs); - ggml_tensor * x = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_inner, n_seq_tokens, n_seqs); - ggml_tensor * dt = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_inner, n_seq_tokens, n_seqs); - ggml_tensor * B = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, n_seq_tokens, n_seqs); - ggml_tensor * C = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, n_seq_tokens, n_seqs); - op_tensor = ggml_ssm_scan(ctx, s, x, dt, w, B, C); + const int64_t n_seqs = 3; + ggml_tensor * s = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, d_state, head_dim, n_head, n_seqs); + ggml_tensor * x = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, head_dim, n_head, n_seq_tokens, n_seqs); + ggml_tensor * dt = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, n_head, n_seq_tokens, n_seqs); + ggml_tensor * B = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, d_state, n_group, n_seq_tokens, n_seqs); + ggml_tensor * C = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, d_state, n_group, n_seq_tokens, n_seqs); + ggml_tensor * ids = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_seqs); + op_tensor = ggml_ssm_scan(ctx, s, x, dt, w, B, C, ids); } break; case GGML_OP_RWKV_WKV6: { @@ -239,6 +267,10 @@ static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, n_embd, w->ne[1], 1, 1); op_tensor = ggml_im2col(ctx, w, b, 1, 0, 0, 0, 1, 0, false, GGML_TYPE_F16); } break; + case GGML_OP_SCALE: + { + op_tensor = ggml_scale(ctx, w, 1.0f); + } break; default: GGML_ABORT("%s: missing test for op %s for tensor %s", __func__, ggml_op_name(op), w->name); } @@ -271,7 +303,7 @@ static ggml_backend_buffer_type_t select_weight_buft(const llama_hparams & hpara } // CPU: ACCEL -> GPU host -> CPU extra -> CPU -static buft_list_t make_cpu_buft_list(const std::vector & devices) { +static buft_list_t make_cpu_buft_list(const std::vector & devices, bool use_extra_bufts) { buft_list_t buft_list; // add ACCEL buffer types @@ -300,21 +332,22 @@ static buft_list_t make_cpu_buft_list(const std::vector & de } } - // add extra buffer types, only if no GPU device is present - // ref: https://github.com/ggml-org/llama.cpp/issues/12481#issuecomment-2743136094 - auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); - if (cpu_dev == nullptr) { - throw std::runtime_error(format("%s: no CPU backend found", __func__)); - } + // add extra buffer types + if (use_extra_bufts) { + auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + if (cpu_dev == nullptr) { + throw std::runtime_error(format("%s: no CPU backend found", __func__)); + } - auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev); - auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t) - ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts"); - if (ggml_backend_dev_get_extra_bufts_fn) { - ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev); - while (extra_bufts && *extra_bufts) { - buft_list.emplace_back(cpu_dev, *extra_bufts); - ++extra_bufts; + auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev); + auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t) + ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts"); + if (ggml_backend_dev_get_extra_bufts_fn) { + ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev); + while (extra_bufts && *extra_bufts) { + buft_list.emplace_back(cpu_dev, *extra_bufts); + ++extra_bufts; + } } } @@ -463,10 +496,17 @@ void llama_model::load_hparams(llama_model_loader & ml) { GGML_ASSERT(hparams.n_expert_used == 0); } - // zero-out the array hparams std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0); std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0); std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0); + std::fill( + hparams.recurrent_layer_arr.begin(), + hparams.recurrent_layer_arr.end(), + llm_arch_is_recurrent(ml.get_arch())); + + std::fill(hparams.rope_sections.begin(), hparams.rope_sections.end(), 0); + + std::fill(hparams.swa_layers.begin(), hparams.swa_layers.end(), 0); ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, hparams.n_layer, false); ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false); @@ -537,6 +577,12 @@ void llama_model::load_hparams(llama_model_loader & ml) { uint32_t n_vocab = 0; ml.get_key(LLM_KV_VOCAB_SIZE, n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, n_vocab, false); + // for classifier models + ml.get_arr(LLM_KV_CLASSIFIER_OUTPUT_LABELS, classifier_labels, false); + if (!classifier_labels.empty()) { + hparams.n_cls_out = classifier_labels.size(); + } + // arch-specific KVs switch (arch) { case LLM_ARCH_LLAMA: @@ -555,6 +601,7 @@ void llama_model::load_hparams(llama_model_loader & ml) { case 22: type = LLM_TYPE_1B; break; case 26: type = LLM_TYPE_3B; break; case 28: type = LLM_TYPE_3B; break; // Llama 3.2 3B + case 30: type = LLM_TYPE_256M; break; // smoldocling 256M // granite uses a vocab with len 49152 case 32: type = n_vocab == 49152 ? LLM_TYPE_3B : (n_vocab < 40000 ? LLM_TYPE_7B : LLM_TYPE_8B); break; case 36: type = LLM_TYPE_8B; break; // granite @@ -571,9 +618,10 @@ void llama_model::load_hparams(llama_model_loader & ml) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); ml.get_key(LLM_KV_INTERLEAVE_MOE_LAYER_STEP, hparams.n_moe_layer_step); - hparams.n_swa_pattern = 4; // pattern: 3 chunked - 1 full - hparams.n_attn_chunk = 8192; // should this be a gguf kv? currently it's the same for Scout and Maverick - hparams.n_swa = 1; // TODO @ngxson : this is added to trigger the SWA branch (we store the chunked attn mask in the SWA tensor), will need to clean this up later + + hparams.swa_type = LLAMA_SWA_TYPE_CHUNKED; + hparams.n_swa = 8192; // should this be a gguf kv? currently it's the same for Scout and Maverick + hparams.set_swa_pattern(4); // pattern: 3 chunked - 1 full switch (hparams.n_expert) { case 16: type = LLM_TYPE_17B_16E; break; @@ -585,6 +633,16 @@ void llama_model::load_hparams(llama_model_loader & ml) { hparams.use_kq_norm = false; } } break; + case LLM_ARCH_ARCEE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + // Arcee uses the same structure as Llama + switch (hparams.n_layer) { + case 36: type = LLM_TYPE_4B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; case LLM_ARCH_DECI: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); @@ -602,6 +660,9 @@ void llama_model::load_hparams(llama_model_loader & ml) { ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale); ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); + // MiniCPM uses rope by default, unlike Granite which uses it as a switch + hparams.rope_finetuned = true; + switch (hparams.n_layer) { case 52: type = LLM_TYPE_1B; break; case 40: type = LLM_TYPE_2B; break; @@ -725,6 +786,16 @@ void llama_model::load_hparams(llama_model_loader & ml) { } } } break; + case LLM_ARCH_NEO_BERT: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); + ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type); + + if (hparams.n_layer == 28) { + type = LLM_TYPE_250M; + } + } break; case LLM_ARCH_BLOOM: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); @@ -797,6 +868,36 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } } break; + case LLM_ARCH_DREAM: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + // Dream models are primarily 7B with 28 layers + switch (hparams.n_layer) { + case 28: + type = LLM_TYPE_7B; + break; + default: + type = LLM_TYPE_UNKNOWN; + } + // Set non-causal attention for diffusion models + hparams.causal_attn = false; + } + break; + case LLM_ARCH_LLADA: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + // LLaDA-8B has 32 layers, similar to LLaMA but for diffusion + switch (hparams.n_layer) { + case 32: + type = LLM_TYPE_8B; + break; + default: + type = LLM_TYPE_UNKNOWN; + } + // Set non-causal attention for diffusion models + hparams.causal_attn = false; + } + break; case LLM_ARCH_QWEN2MOE: { ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false); @@ -811,6 +912,7 @@ void llama_model::load_hparams(llama_model_loader & ml) { } break; case LLM_ARCH_QWEN3: { + ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false); ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { case 28: type = hparams.n_embd == 1024 ? LLM_TYPE_0_6B : LLM_TYPE_1_7B; break; @@ -852,22 +954,17 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } - // for backward compatibility ; see: https://github.com/ggerganov/llama.cpp/pull/8931 - if ((hparams.n_layer == 32 || hparams.n_layer == 40) && hparams.n_ctx_train == 4096) { - // default value for Phi-3-mini-4k-instruct and Phi-3-medium-4k-instruct - hparams.n_swa = 2047; - } else if (hparams.n_layer == 32 && hparams.n_head_kv(0) == 32 && hparams.n_ctx_train == 131072) { - // default value for Phi-3-mini-128k-instruct - // note: this seems incorrect because the window is bigger than the train context? - hparams.n_swa = 262144; - } else if (hparams.n_layer == 40 && hparams.n_ctx_train == 131072) { - // default value for Phi-3-medium-128k-instruct - // note: this seems incorrect because the window is equal to the train context? - hparams.n_swa = 131072; - } - bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false); - if (!found_swa && hparams.n_swa == 0) { - throw std::runtime_error("invalid value for sliding_window"); + const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false); + + if (found_swa && hparams.n_swa > 0) { + LLAMA_LOG_WARN("%s: Phi SWA is currently disabled - results might be suboptimal for some models (see %s)\n", + __func__, "https://github.com/ggml-org/llama.cpp/pull/13676"); + + // TODO: fix conversion scripts to correctly populate `n_swa` and `n_swa_pattern` + hparams.swa_type = LLAMA_SWA_TYPE_NONE; + + hparams.n_swa = 0; + hparams.set_swa_pattern(1); } } break; case LLM_ARCH_PHIMOE: @@ -888,6 +985,33 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } } break; + case LLM_ARCH_PLAMO2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + // Load Mamba SSM parameters + ml.get_key(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv); + ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner); + ml.get_key(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state); + ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank); + ml.get_key(LLM_KV_SSM_GROUP_COUNT, hparams.ssm_n_group); + + for (uint32_t i = 0; i < hparams.n_layer; ++i) { + hparams.recurrent_layer_arr[i] = hparams.n_head_kv(i) == 0; + } + + switch (hparams.n_layer) { + case 16: type = LLM_TYPE_1B; break; + case 32: + if (hparams.n_embd == 2048) { + type = LLM_TYPE_2B; + } else if (hparams.n_embd == 4096) { + type = LLM_TYPE_8B; + } + break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; case LLM_ARCH_GPT2: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); @@ -937,8 +1061,9 @@ void llama_model::load_hparams(llama_model_loader & ml) { } break; case LLM_ARCH_GEMMA2: { + hparams.swa_type = LLAMA_SWA_TYPE_STANDARD; hparams.n_swa = 4096; // default value of gemma 2 - hparams.n_swa_pattern = 2; + hparams.set_swa_pattern(2); hparams.attn_soft_cap = true; ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false); @@ -952,10 +1077,16 @@ void llama_model::load_hparams(llama_model_loader & ml) { case 46: type = LLM_TYPE_27B; break; default: type = LLM_TYPE_UNKNOWN; } + + // ref: https://github.com/google/gemma_pytorch/blob/014acb7ac4563a5f77c76d7ff98f31b568c16508/gemma/config.py#L173 + hparams.f_attention_scale = type == LLM_TYPE_27B + ? 1.0f / std::sqrt(float(hparams.n_embd / hparams.n_head(0))) + : 1.0f / std::sqrt(float(hparams.n_embd_head_k)); } break; case LLM_ARCH_GEMMA3: { - hparams.n_swa_pattern = 6; + hparams.swa_type = LLAMA_SWA_TYPE_STANDARD; + hparams.set_swa_pattern(6); hparams.rope_freq_base_train_swa = 10000.0f; hparams.rope_freq_scale_train_swa = 1.0f; @@ -971,10 +1102,29 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } + // ref: https://github.com/google/gemma_pytorch/blob/014acb7ac4563a5f77c76d7ff98f31b568c16508/gemma/config.py#L289 hparams.f_attention_scale = type == LLM_TYPE_27B ? 1.0f / std::sqrt(float(hparams.n_embd / hparams.n_head(0))) : 1.0f / std::sqrt(float(hparams.n_embd_head_k)); } break; + case LLM_ARCH_GEMMA3N: + { + hparams.swa_type = LLAMA_SWA_TYPE_STANDARD; + hparams.set_swa_pattern(5); + + hparams.rope_freq_base_train_swa = 10000.0f; + hparams.rope_freq_scale_train_swa = 1.0f; + hparams.f_attention_scale = 1.0f; + + ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 30: type = LLM_TYPE_E2B; break; + case 35: type = LLM_TYPE_E4B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; case LLM_ARCH_STARCODER2: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); @@ -1018,6 +1168,58 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } } break; + case LLM_ARCH_MAMBA2: + { + ml.get_key(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv); + ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner); + ml.get_key(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state); + ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank); + ml.get_key(LLM_KV_SSM_GROUP_COUNT, hparams.ssm_n_group); + + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 24: + switch (hparams.n_embd) { + case 768: type = LLM_TYPE_SMALL; break; + default: type = LLM_TYPE_UNKNOWN; + } break; + case 48: + switch (hparams.n_embd) { + case 1024: type = LLM_TYPE_MEDIUM; break; + case 1536: type = LLM_TYPE_LARGE; break; + case 2048: type = LLM_TYPE_XL; break; + default: type = LLM_TYPE_UNKNOWN; + } break; + case 64: + switch (hparams.n_embd) { + case 2560: type = LLM_TYPE_3B; break; + case 4096: type = LLM_TYPE_7B; break; + default: type = LLM_TYPE_UNKNOWN; + } break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; + case LLM_ARCH_JAMBA: + { + ml.get_key(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv); + ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner); + ml.get_key(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state); + ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank); + + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + for (uint32_t i = 0; i < hparams.n_layer; ++i) { + hparams.recurrent_layer_arr[i] = hparams.n_head_kv(i) == 0; + } + + switch (hparams.n_layer) { + // TODO: Jamba layers are a bit heterogenous, so naming this is hard. + case 12: // 900M 8x???M + case 32: // 51B 16x?B + default: type = LLM_TYPE_UNKNOWN; + } + } break; case LLM_ARCH_XVERSE: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); @@ -1039,7 +1241,8 @@ void llama_model::load_hparams(llama_model_loader & ml) { } break; case LLM_ARCH_COHERE2: { - hparams.n_swa_pattern = 4; + hparams.swa_type = LLAMA_SWA_TYPE_STANDARD; + hparams.set_swa_pattern(4); ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa); ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); @@ -1196,7 +1399,7 @@ void llama_model::load_hparams(llama_model_loader & ml) { // that have no expert_gating_func model parameter set hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX; } - ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul); + ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul, false); switch (hparams.n_layer) { case 27: type = LLM_TYPE_16B; break; @@ -1244,6 +1447,34 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } } break; + case LLM_ARCH_GLM4_MOE: + { + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + // MoE parameters + ml.get_key(LLM_KV_EXPERT_COUNT, hparams.n_expert); + ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used); + ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); + ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead, false); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false); + + // Expert gating function (GLM-4.5 uses sigmoid) + ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false); + if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) { + hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID; + } + + // NextN/MTP parameters + ml.get_key(LLM_KV_NEXTN_PREDICT_LAYERS, hparams.nextn_predict_layers, false); + + switch (hparams.n_layer) { + case 47: type = LLM_TYPE_106B_A12B; break; // GLM-4.5-Air (46 layers + 1 NextN layer) + case 93: type = LLM_TYPE_355B_A32B; break; // GLM-4.5 (92 layers + 1 NextN layer) + default: type = LLM_TYPE_UNKNOWN; + } + } break; case LLM_ARCH_BITNET: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); @@ -1320,6 +1551,23 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } } break; + case LLM_ARCH_EXAONE4: + { + if (hparams.n_layer == 64) { // 32B + hparams.swa_type = LLAMA_SWA_TYPE_STANDARD; + hparams.n_swa = 4096; + hparams.set_swa_pattern(4); + } + + ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 30: type = LLM_TYPE_1_2B; break; + case 64: type = LLM_TYPE_32B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; case LLM_ARCH_RWKV6: case LLM_ARCH_RWKV6QWEN2: { @@ -1357,7 +1605,11 @@ void llama_model::load_hparams(llama_model_loader & ml) { ml.get_key(LLM_KV_TOKEN_SHIFT_COUNT, hparams.token_shift_count, false); switch (hparams.n_layer) { - case 12: type = LLM_TYPE_190M; break; + case 12: + switch (hparams.n_embd) { + case 768: type = LLM_TYPE_190M; break; + default: type = LLM_TYPE_UNKNOWN; + } break; case 24: switch (hparams.n_embd) { case 1024: type = LLM_TYPE_450M; break; @@ -1370,7 +1622,17 @@ void llama_model::load_hparams(llama_model_loader & ml) { case 3584: type = LLM_TYPE_7B; break; default: type = LLM_TYPE_UNKNOWN; } break; - case 32: type = LLM_TYPE_2_9B; break; // RWKV-7-World + case 32: + switch (hparams.n_embd) { + case 2560: type = LLM_TYPE_2_9B; break; + case 4096: type = LLM_TYPE_7B; break; + default: type = LLM_TYPE_UNKNOWN; + } break; + case 61: + switch (hparams.n_embd) { + case 4096: type = LLM_TYPE_14B; break; + default: type = LLM_TYPE_UNKNOWN; + } break; default: type = LLM_TYPE_UNKNOWN; } } break; @@ -1383,12 +1645,54 @@ void llama_model::load_hparams(llama_model_loader & ml) { ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale); ml.get_key(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale); + // Granite uses rope_finetuned as a switch for rope, so default to true + bool rope_finetuned = true; + ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false); + hparams.rope_finetuned = rope_finetuned; + switch (hparams.n_layer) { case 32: type = LLM_TYPE_3B; break; case 40: type = LLM_TYPE_3B; break; // Add additional layer/vocab/etc checks here for other model sizes default: type = LLM_TYPE_UNKNOWN; } + + // For Granite MoE Shared + ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, /* required */ false); + } break; + case LLM_ARCH_GRANITE_HYBRID: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale, /* required */ false); + ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale, /* required */ false); + ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale, /* required */ false); + ml.get_key(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale, /* required */ false); + + ml.get_key(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv); + ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner); + ml.get_key(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state); + ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank); + ml.get_key(LLM_KV_SSM_GROUP_COUNT, hparams.ssm_n_group); + + // Granite uses rope_finetuned as a switch for rope, so default to true + bool rope_finetuned = true; + ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false); + hparams.rope_finetuned = rope_finetuned; + + // A layer is recurrent IFF the n_head_kv value is set to 0 + for (uint32_t i = 0; i < hparams.n_layer; ++i) { + hparams.recurrent_layer_arr[i] = hparams.n_head_kv(i) == 0; + } + + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + // TODO: Add llm type label (not sure this is useful) + default: type = LLM_TYPE_UNKNOWN; + } + + // For Granite MoE Shared + ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, /* required */ false); } break; case LLM_ARCH_CHAMELEON: { @@ -1439,6 +1743,150 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } } break; + case LLM_ARCH_DOTS1: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); + ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false); + ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false); + switch (hparams.n_layer) { + case 62: type = LLM_TYPE_142B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; + case LLM_ARCH_ERNIE4_5: + case LLM_ARCH_ERNIE4_5_MOE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + if (arch == LLM_ARCH_ERNIE4_5_MOE) { + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); + ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false); + ml.get_key(LLM_KV_INTERLEAVE_MOE_LAYER_STEP, hparams.n_moe_layer_step); + ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); + } + + switch (hparams.n_layer) { + case 18: type = LLM_TYPE_0_3B; break; + case 28: type = LLM_TYPE_21B_A3B; break; + case 54: type = LLM_TYPE_300B_A47B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; + case LLM_ARCH_FALCON_H1: + { + // Common parameters + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + // SSM parameters + ml.get_key(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv); + ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner); + ml.get_key(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state); + ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank); + ml.get_key(LLM_KV_SSM_GROUP_COUNT, hparams.ssm_n_group); + + std::fill(hparams.recurrent_layer_arr.begin(), hparams.recurrent_layer_arr.end(), true); + + switch (hparams.n_layer) { + case 36: + type = LLM_TYPE_0_5B; break; + case 24: + type = LLM_TYPE_1_5B; break; + case 66: + type = LLM_TYPE_1B; break; + case 32: + type = LLM_TYPE_3B; break; + case 44: + type = LLM_TYPE_7B; break; + case 72: + type = LLM_TYPE_34B; break; + default: + type = LLM_TYPE_UNKNOWN; + } + } break; + case LLM_ARCH_HUNYUAN_MOE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); + ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp); + + switch (hparams.n_layer) { + case 32: type = LLM_TYPE_A13B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; + case LLM_ARCH_HUNYUAN_DENSE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_embd) { + case 1024: type = LLM_TYPE_0_5B; break; + case 2048: type = LLM_TYPE_1_8B; break; + case 3072: type = LLM_TYPE_4B; break; + case 4096: type = LLM_TYPE_7B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; + case LLM_ARCH_SMOLLM3: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + hparams.n_no_rope_layer_step = 4; + + switch (hparams.n_layer) { + case 36: type = LLM_TYPE_3B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; + case LLM_ARCH_OPENAI_MOE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); + ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa); + + hparams.swa_type = LLAMA_SWA_TYPE_STANDARD; + hparams.set_swa_pattern(2); + + // TODO: switch (hparams.n_layer) + } break; + case LLM_ARCH_LFM2: + { + ml.get_key(LLM_KV_SHORTCONV_L_CACHE, hparams.n_shortconv_l_cache); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + for (uint32_t il = 0; il < hparams.n_layer; ++il) { + hparams.recurrent_layer_arr[il] = hparams.n_head_kv(il) == 0; + } + switch (hparams.n_embd) { + case 1024: type = LLM_TYPE_350M; break; + case 1536: type = LLM_TYPE_700M; break; + case 2048: type = LLM_TYPE_1_2B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; + case LLM_ARCH_SMALLTHINKER: + { + const bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false); + + if (found_swa && hparams.n_swa > 0) { + hparams.swa_type = LLAMA_SWA_TYPE_STANDARD; + hparams.n_swa = 4096; + hparams.set_swa_pattern(4, true); + } else { + hparams.swa_type = LLAMA_SWA_TYPE_NONE; + hparams.n_no_rope_layer_step = hparams.n_layer; + } + + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false); + + switch (hparams.n_layer) { + case 32: type = LLM_TYPE_4B; break; + case 52: type = LLM_TYPE_20B; break; + default: type = LLM_TYPE_UNKNOWN; + } + } break; default: throw std::runtime_error("unsupported model architecture"); } @@ -1472,7 +1920,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, ml.use_mmap ? "true" : "false"); // build a list of buffer types for the CPU and GPU devices - pimpl->cpu_buft_list = make_cpu_buft_list(devices); + pimpl->cpu_buft_list = make_cpu_buft_list(devices, params.use_extra_bufts); for (auto * dev : devices) { buft_list_t buft_list = make_gpu_buft_list(dev, split_mode, tensor_split); // add CPU buffer types as a fallback @@ -1568,6 +2016,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { const auto TENSOR_DUPLICATED = llama_model_loader::TENSOR_DUPLICATED; const auto TENSOR_NOT_REQUIRED = llama_model_loader::TENSOR_NOT_REQUIRED; + const auto TENSOR_SKIP = llama_model_loader::TENSOR_SKIP; // create tensors for the weights { @@ -1623,7 +2072,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { } // skip unused tensors - if (info.op == GGML_OP_NONE) { + if (info.op == GGML_OP_NONE || flags & TENSOR_SKIP) { const size_t nbytes = ggml_nbytes(t_meta); LLAMA_LOG_WARN("model has unused tensor %s (size = %zu bytes) -- ignoring\n", tn.str().c_str(), nbytes); @@ -1633,11 +2082,15 @@ bool llama_model::load_tensors(llama_model_loader & ml) { return nullptr; } - // tensors with "bias" suffix are always used with GGML_OP_ADD + // tensors with "bias" suffix are always used with GGML_OP_ADD or GGML_OP_ADD_ID ggml_op op; bool bias = tn.suffix != nullptr && strcmp(tn.suffix, "bias") == 0; if (bias) { - op = GGML_OP_ADD; + if (info.op == GGML_OP_MUL_MAT_ID) { + op = GGML_OP_ADD_ID; + } else { + op = GGML_OP_ADD; + } } else { op = info.op; } @@ -1677,7 +2130,13 @@ bool llama_model::load_tensors(llama_model_loader & ml) { for (const auto * overrides = ml.tensor_buft_overrides; overrides->pattern != nullptr; ++overrides) { std::regex pattern(overrides->pattern); if (std::regex_search(tensor_name, pattern)) { - buft = overrides->buft; + if (overrides->buft == ggml_backend_cpu_buffer_type()) { + // when overriding to a CPU buffer, consider the extra buffer types + buft = select_weight_buft(hparams, t_meta, op, pimpl->cpu_buft_list); + } else { + buft = overrides->buft; + } + LLAMA_LOG_DEBUG("tensor %s (%zu MiB %s) buffer type overridden to %s\n", tensor_name.c_str(), ggml_nbytes(t_meta) / 1024 / 1024, ggml_type_name(t_meta->type), @@ -1787,9 +2246,63 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, TENSOR_NOT_REQUIRED); layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert}, 0); layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); + + // For Granite MoE Shared + if (hparams.n_ff_shexp > 0) { + layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0); + layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0); + layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {hparams.n_ff_shexp, n_embd}, 0); + } } } } break; + case LLM_ARCH_LLADA: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = + create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0); + + // Use separate Q, K, V projections without bias, matching LLaDALlamaBlock + layer.wq = + create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_k_gqa }, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_v_gqa }, 0); + // No bias for QKV projections as per config: include_bias=false, include_qkv_bias=false + layer.wo = + create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), { n_embd }, TENSOR_NOT_REQUIRED); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0); + + layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), { n_rot / 2 }, + TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), { n_embd, n_ff }, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, 0); + + // optional MLP bias + layer.ffn_gate_b = + create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), { n_ff }, TENSOR_NOT_REQUIRED); + layer.ffn_down_b = + create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), { n_embd }, TENSOR_NOT_REQUIRED); + layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), { n_ff }, TENSOR_NOT_REQUIRED); + } + } + break; case LLM_ARCH_LLAMA4: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); @@ -2116,7 +2629,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { case LLM_ARCH_NOMIC_BERT_MOE: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); - type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, 0); + type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, TENSOR_NOT_REQUIRED); if (arch == LLM_ARCH_BERT) { pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, 0); @@ -2124,8 +2637,8 @@ bool llama_model::load_tensors(llama_model_loader & ml) { cls = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED); cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"), {n_embd}, TENSOR_NOT_REQUIRED); - cls_out = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, 1}, TENSOR_NOT_REQUIRED); - cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"), {1}, TENSOR_NOT_REQUIRED); + cls_out = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED); + cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"), {hparams.n_cls_out}, TENSOR_NOT_REQUIRED); } tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0); @@ -2134,7 +2647,10 @@ bool llama_model::load_tensors(llama_model_loader & ml) { for (int i = 0; i < n_layer; ++i) { auto & layer = layers[i]; - if (arch == LLM_ARCH_BERT) { + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED); + layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED); + + if (!layer.wqkv) { layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0); @@ -2143,12 +2659,6 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0); - } else { - layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); - } - - if (arch == LLM_ARCH_NOMIC_BERT_MOE) { - layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0); } layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); @@ -2178,6 +2688,32 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd}, 0); } } break; + case LLM_ARCH_NEO_BERT: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + cls = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED); + cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"), {n_embd}, TENSOR_NOT_REQUIRED); + + cls_out = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, hparams.n_cls_out}, TENSOR_NOT_REQUIRED); + cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"), {hparams.n_cls_out}, TENSOR_NOT_REQUIRED); + + output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff*2}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); + } + } break; case LLM_ARCH_JINA_BERT_V2: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); // word_embeddings @@ -2215,8 +2751,8 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.attn_norm_2 = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED); layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); - layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); - layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, TENSOR_NOT_REQUIRED); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, layer.ffn_gate ? n_ff : n_ff * 2}, 0); layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0); @@ -2372,12 +2908,14 @@ bool llama_model::load_tensors(llama_model_loader & ml) { } break; case LLM_ARCH_QWEN2: case LLM_ARCH_QWEN2VL: + case LLM_ARCH_DREAM: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); // output output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab}, TENSOR_NOT_REQUIRED); // if output is NULL, init from the input tok embed if (output == NULL) { output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); @@ -2492,7 +3030,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) { // output output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); - output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } for (int i = 0; i < n_layer; ++i) { auto & layer = layers[i]; @@ -2663,6 +3205,73 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); } } break; + case LLM_ARCH_PLAMO2: + { + const uint32_t d_conv = hparams.ssm_d_conv; + const uint32_t d_state = hparams.ssm_d_state; + const uint32_t num_heads = hparams.ssm_dt_rank; + const uint32_t intermediate_size = hparams.ssm_d_inner; + const uint32_t head_dim = intermediate_size / num_heads; + const uint32_t qk_dim = head_dim; + const uint32_t v_dim = head_dim; + const int64_t num_attention_heads = hparams.n_head(); + const int64_t q_num_heads = num_attention_heads; + const int64_t dt_dim = std::max(64, int(hparams.n_embd / 16)); + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + bool is_mamba_layer = hparams.is_recurrent(i); + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + if (is_mamba_layer) { + layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, 2 * intermediate_size}, 0); + layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, intermediate_size}, 0); + + layer.ssm_x = create_tensor(tn(LLM_TENSOR_SSM_X, "weight", i), {intermediate_size, dt_dim + 2*d_state}, 0); + layer.ssm_dt = create_tensor(tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_dim, num_heads}, 0); + layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {num_heads}, 0); + + layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {num_heads}, 0); + layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {num_heads}, 0); + + layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {intermediate_size, n_embd}, 0); + + layer.ssm_dt_norm = create_tensor(tn(LLM_TENSOR_SSM_DT_NORM, i), {dt_dim}, 0); + layer.ssm_b_norm = create_tensor(tn(LLM_TENSOR_SSM_B_NORM, i), {d_state}, 0); + layer.ssm_c_norm = create_tensor(tn(LLM_TENSOR_SSM_C_NORM, i), {d_state}, 0); + } else { + const int64_t num_key_value_heads = hparams.n_head_kv(i); + const int64_t k_num_heads = num_key_value_heads; + const int64_t v_num_heads = num_key_value_heads; + const int64_t q_proj_dim = q_num_heads * qk_dim; + const int64_t k_proj_dim = k_num_heads * qk_dim; + const int64_t v_proj_dim = v_num_heads * v_dim; + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, q_proj_dim + k_proj_dim + v_proj_dim}, 0); + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {head_dim, num_attention_heads}, 0); + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {head_dim, k_num_heads}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {q_num_heads * v_dim, n_embd}, 0); + } + + // All layers have post-attention norm, FFN norm, and FFN tensors + layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, i), {n_embd}, 0); + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff * 2}, 0); + layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, i), {n_embd}, 0); + } + } break; case LLM_ARCH_GPT2: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); @@ -2871,6 +3480,62 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0); } } break; + case LLM_ARCH_GEMMA3N: + { + const int64_t n_altup = hparams.n_altup; + const int64_t laurel_rank = hparams.laurel_rank; + const int64_t n_embd_altup = hparams.n_embd_altup; + + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + tok_embd_per_layer = create_tensor(tn(LLM_TENSOR_PER_LAYER_TOKEN_EMBD, "weight"), {n_embd_altup * n_layer, n_vocab}, 0); + + altup_proj = create_tensor(tn(LLM_TENSOR_ALTUP_PROJ, "weight"), {n_embd, n_embd, n_altup - 1}, 0); + altup_unembd_proj = create_tensor(tn(LLM_TENSOR_ALTUP_UNEMBD_PROJ, "weight"), {n_embd, n_embd, n_altup - 1}, 0); + per_layer_model_proj = create_tensor(tn(LLM_TENSOR_PER_LAYER_MODEL_PROJ, "weight"), {n_embd, n_embd_altup * n_layer}, 0); + per_layer_proj_norm = create_tensor(tn(LLM_TENSOR_PER_LAYER_PROJ_NORM, "weight"), {n_embd_altup}, 0); + + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0); + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0); + layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0); + + // altup & laurel + layer.per_layer_inp_gate = create_tensor(tn(LLM_TENSOR_PER_LAYER_INP_GATE, "weight", i), {n_embd, n_embd_altup}, 0); + layer.per_layer_proj = create_tensor(tn(LLM_TENSOR_PER_LAYER_PROJ, "weight", i), {n_embd_altup, n_embd}, 0); + layer.per_layer_post_norm = create_tensor(tn(LLM_TENSOR_PER_LAYER_POST_NORM, "weight", i), {n_embd}, 0); + layer.altup_correct_coef = create_tensor(tn(LLM_TENSOR_ALTUP_CORRECT_COEF, "weight", i), {n_altup, n_altup}, 0); + layer.altup_correct_scale = create_tensor(tn(LLM_TENSOR_ALTUP_CORRECT_SCALE, "weight", i), {n_embd}, 0); + layer.altup_predict_coef = create_tensor(tn(LLM_TENSOR_ALTUP_PREDICT_COEF, "weight", i), {n_altup, n_altup * n_altup}, 0); + layer.altup_router = create_tensor(tn(LLM_TENSOR_ALTUP_ROUTER, "weight", i), {n_embd, n_altup}, 0); + layer.altup_router_norm = create_tensor(tn(LLM_TENSOR_ALTUP_ROUTER_NORM, "weight", i), {n_embd}, 0); + layer.laurel_l = create_tensor(tn(LLM_TENSOR_LAUREL_L, "weight", i), {n_embd, laurel_rank}, 0); + layer.laurel_r = create_tensor(tn(LLM_TENSOR_LAUREL_R, "weight", i), {laurel_rank, n_embd}, 0); + layer.laurel_post_norm = create_tensor(tn(LLM_TENSOR_LAUREL_POST_NORM, "weight", i), {n_embd}, 0); + } + } break; case LLM_ARCH_STARCODER2: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); @@ -2960,6 +3625,228 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0); } } break; + case LLM_ARCH_MAMBA2: + { + const int64_t d_conv = hparams.ssm_d_conv; + const int64_t d_inner = hparams.ssm_d_inner; + const int64_t d_state = hparams.ssm_d_state; + const int64_t n_head = hparams.ssm_dt_rank; + const int64_t n_group = hparams.ssm_n_group; + const int64_t d_in_proj = 2*d_inner + 2*n_group*d_state + n_head; + + // only an expansion factor of 2 is supported for now + GGML_ASSERT(2 * n_embd == d_inner); + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + { + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed, duplicated to allow offloading + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + // norm + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, d_in_proj}, 0); + + layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner + 2*n_group*d_state}, 0); + layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner + 2*n_group*d_state}, 0); + + layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {n_head}, 0); + + // no "weight" suffix for these + layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {1, n_head}, 0); + layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {1, n_head}, 0); + + layer.ssm_norm = create_tensor(tn(LLM_TENSOR_SSM_NORM, "weight", i), {d_inner / n_group, n_group}, 0); + + // out_proj + layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0); + } + } break; + case LLM_ARCH_JAMBA: + { + const int64_t d_conv = hparams.ssm_d_conv; + const int64_t d_inner = hparams.ssm_d_inner; + const int64_t d_state = hparams.ssm_d_state; + const int64_t dt_rank = hparams.ssm_dt_rank; + + // only an expansion factor of 2 is supported for now + GGML_ASSERT(2 * n_embd == d_inner); + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + { + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed, duplicated to allow offloading + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + } + + for (int i = 0; i < n_layer; ++i) { + const int64_t n_head_kv = hparams.n_head_kv(i); + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(i); + + auto & layer = layers[i]; + + // norm + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + if (n_head_kv == 0) { + // Mamba layer + layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, 2*d_inner}, 0); + + layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner}, 0); + layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner}, 0); + + layer.ssm_x = create_tensor(tn(LLM_TENSOR_SSM_X, "weight", i), {d_inner, dt_rank + 2*d_state}, 0); + + layer.ssm_dt_norm = create_tensor(tn(LLM_TENSOR_SSM_DT_NORM, "weight", i), {dt_rank}, 0); + + layer.ssm_dt = create_tensor(tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_rank, d_inner}, 0); + layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {d_inner}, 0); + + layer.ssm_b_norm = create_tensor(tn(LLM_TENSOR_SSM_B_NORM, "weight", i), {d_state}, 0); + layer.ssm_c_norm = create_tensor(tn(LLM_TENSOR_SSM_C_NORM, "weight", i), {d_state}, 0); + + // no "weight" suffix for these + layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {d_state, d_inner}, 0); + layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {d_inner}, 0); + + // out_proj + layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0); + } else { + // Attention layers + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + } + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, TENSOR_NOT_REQUIRED); + + if (layer.ffn_gate_inp) { + // MoE + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); + } else { + // FFN (no MoE) + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } + } break; + case LLM_ARCH_GRANITE_HYBRID: + { + // mamba2 Mixer SSM params + // NOTE: int64_t for tensor dimensions + const int64_t d_conv = hparams.ssm_d_conv; + const int64_t d_inner = hparams.ssm_d_inner; + const int64_t d_state = hparams.ssm_d_state; + const int64_t n_ssm_head = hparams.ssm_dt_rank; + const int64_t n_group = hparams.ssm_n_group; + const int64_t d_in_proj = 2*d_inner + 2*n_group*d_state + n_ssm_head; + + // only an expansion factor of 2 is supported for now + GGML_ASSERT(2 * n_embd == d_inner); + + // embeddings + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + { + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed, duplicated to allow offloading + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + // norm + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + if (hparams.is_recurrent(i)) { + // ssm layers + layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, d_in_proj}, 0); + + layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner + 2*n_group*d_state}, 0); + layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner + 2*n_group*d_state}, TENSOR_NOT_REQUIRED); + + layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {n_ssm_head}, 0); + + // no "weight" suffix for these + layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {1, n_ssm_head}, 0); + layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {1, n_ssm_head}, 0); + + layer.ssm_norm = create_tensor(tn(LLM_TENSOR_SSM_NORM, "weight", i), {d_inner / n_group, n_group}, 0); + + // out_proj + layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0); + } else { + // attention layers (with optional bias) + const int64_t n_head_i = hparams.n_head(i); + const int64_t n_embd_k_gqa_i = hparams.n_embd_k_gqa(i); + const int64_t n_embd_v_gqa_i = hparams.n_embd_v_gqa(i); + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head_i}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa_i}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa_i}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head_i, n_embd}, 0); + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_k_gqa_i}, TENSOR_NOT_REQUIRED); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_v_gqa_i}, TENSOR_NOT_REQUIRED); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + } + + // feed forward (w/ optional biases) + if (n_expert > 0) { + // MoE FFN + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0); + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, TENSOR_NOT_REQUIRED); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); + + // For Granite MoE Shared + if (hparams.n_ff_shexp > 0) { + layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0); + layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0); + layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {hparams.n_ff_shexp, n_embd}, 0); + } + } else { + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED); + layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED); + } + } + } break; case LLM_ARCH_XVERSE: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); @@ -3612,6 +4499,105 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0); } } break; + case LLM_ARCH_GLM4_MOE: + { + const int64_t n_expert = hparams.n_expert; + const int64_t n_expert_used = hparams.n_expert_used; + const int64_t n_expert_shared = hparams.n_expert_shared; + + GGML_ASSERT(hparams.n_expert > 0 && "n_expert must be > 0 for GLM4_MOE MoE layers"); + GGML_ASSERT(hparams.n_expert_used > 0 && "n_expert_used must be > 0 for GLM4_MOE MoE layers"); + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, TENSOR_DUPLICATED); + } + + // Load ALL tensors including NextN layer to satisfy total tensor count + // but only PROCESS up to last layer (skipping final NextN layer) in forward pass + for (int i = 0; i < n_layer; ++i) { + int flags = 0; + if (hparams.nextn_predict_layers > 0 && static_cast(i) >= n_layer - hparams.nextn_predict_layers) { + // skip all tensors in the NextN layers + flags |= TENSOR_SKIP; + } + + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, flags); + + // GLM-style attention with bias terms + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, flags); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_k_gqa }, flags); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_v_gqa }, flags); + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), { n_embd_head_k * n_head }, flags); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), { n_embd_k_gqa }, flags); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), { n_embd_v_gqa }, flags); + + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, flags); + + // K/Q norm tensors (optional for GLM-4.5 355B variant) + layer.attn_q_norm = create_tensor( + tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), { n_embd_head_k }, TENSOR_NOT_REQUIRED | flags); + layer.attn_k_norm = create_tensor( + tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), { n_embd_head_k }, TENSOR_NOT_REQUIRED | flags); + + layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), { n_embd }, flags); + + // Check if this layer uses MoE or dense FFN based on n_layer_dense_lead + // GLM 4.5 uses hybrid architecture: layer 0 is dense, layers 1+ are MoE + const bool use_moe = (static_cast(i) >= hparams.n_layer_dense_lead); + + if (use_moe) { + // MoE layers + layer.ffn_gate_inp = + create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), { n_embd, n_expert }, flags); + layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), { n_expert }, flags); + + // MoE branch + const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used; + + layer.ffn_gate_exps = create_tensor( + tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, flags); + layer.ffn_down_exps = create_tensor( + tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff_exp, n_embd, n_expert }, flags); + layer.ffn_up_exps = create_tensor( + tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, flags); + + // Shared expert + if (n_expert_shared > 0) { + const int64_t n_ff_shexp = n_ff_exp * n_expert_shared; + layer.ffn_gate_shexp = create_tensor( + tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), { n_embd, n_ff_shexp }, flags); + layer.ffn_down_shexp = create_tensor( + tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_shexp, n_embd }, flags); + layer.ffn_up_shexp = create_tensor( + tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), { n_embd, n_ff_shexp }, flags); + } + } else { + // Dense layers (first k layers) - GLM uses separate gate/up projections + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), { n_embd, n_ff }, flags); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, flags); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, flags); + } + + // NextN/MTP tensors (preserved but unused) - conditionally load for last nextn_predict_layers + if (hparams.nextn_predict_layers > 0 && static_cast(i) >= n_layer - hparams.nextn_predict_layers) { + layer.nextn.eh_proj = create_tensor(tn(LLM_TENSOR_NEXTN_EH_PROJ, "weight", i), { 2 * n_embd, n_embd }, flags); + layer.nextn.embed_tokens = create_tensor(tn(LLM_TENSOR_NEXTN_EMBED_TOKENS, "weight", i), { n_embd, n_vocab }, flags); + layer.nextn.enorm = create_tensor(tn(LLM_TENSOR_NEXTN_ENORM, "weight", i), { n_embd }, flags); + layer.nextn.hnorm = create_tensor(tn(LLM_TENSOR_NEXTN_HNORM, "weight", i), { n_embd }, flags); + layer.nextn.shared_head_head = create_tensor(tn(LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "weight", i), { n_embd, n_vocab }, flags); + layer.nextn.shared_head_norm = create_tensor(tn(LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "weight", i), { n_embd }, flags); + } + } + } + break; case LLM_ARCH_NEMOTRON: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); @@ -3679,6 +4665,39 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); } } break; + case LLM_ARCH_EXAONE4: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + + layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0); + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0); + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0); + } + } break; case LLM_ARCH_RWKV6: { tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); @@ -4138,6 +5157,427 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0); } } break; + case LLM_ARCH_DOTS1: + { + const int64_t n_ff_exp = hparams.n_ff_exp; + const int64_t n_expert_shared = hparams.n_expert_shared; + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0); + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + if (i < (int) hparams.n_layer_dense_lead) { + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } else { + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0); + layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED); + + if (n_expert == 0) { + throw std::runtime_error("n_expert must be > 0"); + } + if (n_expert_used == 0) { + throw std::runtime_error("n_expert_used must be > 0"); + } + + // MoE branch + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0); + + // Shared expert branch + layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0); + layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_exp * n_expert_shared, n_embd}, 0); + layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0); + } + } + } break; + case LLM_ARCH_ARCEE: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; + case LLM_ARCH_ERNIE4_5: + case LLM_ARCH_ERNIE4_5_MOE: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + // optional bias tensors + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + if (arch == LLM_ARCH_ERNIE4_5_MOE && static_cast(i) >= hparams.n_layer_dense_lead) { // MoE layers + int n_ff_exp = hparams.n_ff_exp; + + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0); + layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED); + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff_exp, n_expert}, TENSOR_NOT_REQUIRED); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff_exp, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff_exp, n_expert}, 0); + + // Shared expert (if present) + if (hparams.n_ff_shexp > 0) { + layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), { n_embd, hparams.n_ff_shexp}, 0); + layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {hparams.n_ff_shexp, n_embd }, 0); + layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), { n_embd, hparams.n_ff_shexp}, 0); + } + } else { // Dense layers + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } + } break; + case LLM_ARCH_FALCON_H1: + { + // Common + const int64_t hidden_size = hparams.n_embd; // hidden_size + + // mamba2 Mixer SSM params + const int64_t ssm_conv_kernel_size = hparams.ssm_d_conv; // ssm_conv_kernel_size + const int64_t ssm_n_groups = hparams.ssm_n_group; // ssm_n_groups + const int64_t ssm_state_size = hparams.ssm_d_state; // ssm_state_size + const int64_t ssm_intermediate_size = hparams.ssm_d_inner; // TODO expand + const int64_t ssm_num_heads = hparams.ssm_dt_rank; // ssm_num_heads + const int64_t ssm_conv_dim = ssm_intermediate_size + 2 * ssm_n_groups * ssm_state_size; + const int64_t ssm_projection_size = ssm_intermediate_size + ssm_conv_dim + ssm_num_heads; + + // attn params + const int64_t attn_num_attention_head = hparams.n_head(0); // rename to: attn_num_attention_head + const int64_t attn_num_key_value_head = hparams.n_head_kv(0); + + // ffn params + const int64_t ffn_intermediate_size = hparams.n_ff(0); + + // embeddings + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {hidden_size, n_vocab}, 0); + + // output + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {hidden_size, n_vocab}, TENSOR_NOT_REQUIRED); + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {hidden_size}, 0); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {hidden_size, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + /*SSM LAYERS*/ + // ssm in + layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {hidden_size, ssm_projection_size}, 0); + // ssm 1d conv + layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {ssm_conv_kernel_size, ssm_conv_dim}, 0); + layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {ssm_conv_dim}, TENSOR_NOT_REQUIRED); + // ssm_dt + layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {ssm_num_heads}, 0); + // no "weight" suffix for these + layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {1, ssm_num_heads}, 0); + layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {1, ssm_num_heads}, 0); + // ssm_norm + layer.ssm_norm = create_tensor(tn(LLM_TENSOR_SSM_NORM, "weight", i), {ssm_intermediate_size / ssm_n_groups, ssm_n_groups}, TENSOR_NOT_REQUIRED); + // out_proj + layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {ssm_intermediate_size, hidden_size}, 0); + + /*ATTENTION LAYERS*/ + // attention layers (with optional bias) + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {hidden_size, n_embd_head_k * attn_num_attention_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {hidden_size, attn_num_key_value_head * n_embd_head_k}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {hidden_size, attn_num_key_value_head * n_embd_head_v}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * attn_num_attention_head, hidden_size}, 0); + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {hidden_size}, TENSOR_NOT_REQUIRED); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {attn_num_key_value_head * n_embd_head_k}, TENSOR_NOT_REQUIRED); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {attn_num_key_value_head * n_embd_head_v}, TENSOR_NOT_REQUIRED); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {hidden_size}, TENSOR_NOT_REQUIRED); + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {hidden_size}, 0); + + + // feed forward (w/ optional biases) + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, i), {hidden_size}, 0); + layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {hidden_size, ffn_intermediate_size}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { ffn_intermediate_size, hidden_size}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {hidden_size, ffn_intermediate_size}, 0); + + layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {ffn_intermediate_size}, TENSOR_NOT_REQUIRED); + layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {hidden_size}, TENSOR_NOT_REQUIRED); + layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {ffn_intermediate_size}, TENSOR_NOT_REQUIRED); + } + } break; + case LLM_ARCH_HUNYUAN_MOE: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0); + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0); + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); + + layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0); + layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, hparams.n_ff_shexp}, 0); + layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {hparams.n_ff_shexp, n_embd}, 0); + } + } break; + case LLM_ARCH_HUNYUAN_DENSE: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0); + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + + } + } break; + case LLM_ARCH_SMOLLM3: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; + case LLM_ARCH_OPENAI_MOE: + { + const int64_t n_ff_exp = hparams.n_ff_exp; + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_head * n_rot}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_head_kv * n_rot}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_head_kv * n_rot}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head * n_rot, n_embd}, 0); + + layer.attn_sinks = create_tensor(tn(LLM_TENSOR_ATTN_SINKS, "weight", i), {n_head}, 0); + + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), { n_embd, n_expert}, 0); + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0); + + // bias + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_head * n_rot}, 0); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_head_kv * n_rot}, 0); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_head_kv * n_rot}, 0); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0); + + layer.ffn_gate_inp_b = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "bias", i), {n_expert}, 0); + layer.ffn_gate_exps_b = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "bias", i), {n_ff_exp, n_expert}, 0); + layer.ffn_down_exps_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "bias", i), { n_embd, n_expert}, 0); + layer.ffn_up_exps_b = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "bias", i), {n_ff_exp, n_expert}, 0); + } + } break; + case LLM_ARCH_LFM2: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + // ffn is same for transformer and conv layers + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + + // for operator_norm + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + if (!hparams.is_recurrent(i)) { + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0); + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0); + GGML_ASSERT(n_embd_v_gqa == n_embd_k_gqa); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, hparams.n_embd_k_gqa(i)}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, hparams.n_embd_v_gqa(i)}, 0); + + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + } else { + layer.shortconv.conv = create_tensor(tn(LLM_TENSOR_SHORTCONV_CONV, "weight", i), {hparams.n_shortconv_l_cache, n_embd}, 0); + layer.shortconv.in_proj = create_tensor(tn(LLM_TENSOR_SHORTCONV_INPROJ, "weight", i), {n_embd, 3 * n_embd}, 0); + layer.shortconv.out_proj = create_tensor(tn(LLM_TENSOR_SHORTCONV_OUTPROJ, "weight", i), {n_embd, n_embd}, 0); + } + } + } break; + case LLM_ARCH_SMALLTHINKER: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_gqa }, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0); + + GGML_ASSERT(n_expert > 0 && "n_expert must be > 0 for SMALLTHINKER"); + GGML_ASSERT(n_expert_used > 0 && "n_expert_used must be > 0 for SMALLTHINKER"); + + // MoE branch + const int64_t n_ff_exp = hparams.n_ff_exp; + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), { n_embd, n_expert }, 0); + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, 0); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff_exp, n_embd, n_expert }, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert }, 0); + } + } break; default: throw std::runtime_error("unknown architecture"); } @@ -4354,7 +5794,7 @@ void llama_model::print_info() const { LLAMA_LOG_INFO("%s: n_head_kv = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_head_kv(il); }, hparams.n_layer).c_str()); LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); LLAMA_LOG_INFO("%s: n_swa = %u\n", __func__, hparams.n_swa); - LLAMA_LOG_INFO("%s: n_swa_pattern = %u\n", __func__, hparams.n_swa_pattern); + LLAMA_LOG_INFO("%s: is_swa_any = %u\n", __func__, hparams.is_swa_any()); LLAMA_LOG_INFO("%s: n_embd_head_k = %u\n", __func__, hparams.n_embd_head_k); LLAMA_LOG_INFO("%s: n_embd_head_v = %u\n", __func__, hparams.n_embd_head_v); LLAMA_LOG_INFO("%s: n_gqa = %s\n", __func__, print_f([&](uint32_t il) { return hparams.n_gqa(il); }, hparams.n_layer).c_str()); @@ -4377,10 +5817,27 @@ void llama_model::print_info() const { LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train); LLAMA_LOG_INFO("%s: n_ctx_orig_yarn = %u\n", __func__, hparams.n_ctx_orig_yarn); LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown"); + if (!classifier_labels.empty()) { + LLAMA_LOG_INFO("%s: n_cls_out = %u\n", __func__, hparams.n_cls_out); + + size_t i = 0; + for (auto label : classifier_labels) { + LLAMA_LOG_INFO("%s: cls_label[%2zu] = %s\n", __func__, i++, label.c_str()); + } + } + } + + if (arch == LLM_ARCH_MAMBA || + arch == LLM_ARCH_MAMBA2 || + arch == LLM_ARCH_JAMBA || + arch == LLM_ARCH_FALCON_H1 || + arch == LLM_ARCH_PLAMO2 || + arch == LLM_ARCH_GRANITE_HYBRID) { LLAMA_LOG_INFO("%s: ssm_d_conv = %u\n", __func__, hparams.ssm_d_conv); LLAMA_LOG_INFO("%s: ssm_d_inner = %u\n", __func__, hparams.ssm_d_inner); LLAMA_LOG_INFO("%s: ssm_d_state = %u\n", __func__, hparams.ssm_d_state); LLAMA_LOG_INFO("%s: ssm_dt_rank = %u\n", __func__, hparams.ssm_dt_rank); + LLAMA_LOG_INFO("%s: ssm_n_group = %u\n", __func__, hparams.ssm_n_group); LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms = %d\n", __func__, hparams.ssm_dt_b_c_rms); } @@ -4424,14 +5881,18 @@ void llama_model::print_info() const { LLAMA_LOG_INFO("%s: n_ff_shexp = %d\n", __func__, hparams.n_ff_shexp); } - if (arch == LLM_ARCH_QWEN3MOE) { + if (arch == LLM_ARCH_QWEN3MOE || arch == LLM_ARCH_OPENAI_MOE) { LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp); } - if (arch == LLM_ARCH_MINICPM || arch == LLM_ARCH_GRANITE || arch == LLM_ARCH_GRANITE_MOE) { + if (arch == LLM_ARCH_MINICPM || + arch == LLM_ARCH_GRANITE || + arch == LLM_ARCH_GRANITE_MOE || + arch == LLM_ARCH_GRANITE_HYBRID) { LLAMA_LOG_INFO("%s: f_embedding_scale = %f\n", __func__, hparams.f_embedding_scale); LLAMA_LOG_INFO("%s: f_residual_scale = %f\n", __func__, hparams.f_residual_scale); LLAMA_LOG_INFO("%s: f_attention_scale = %f\n", __func__, hparams.f_attention_scale); + LLAMA_LOG_INFO("%s: n_ff_shexp = %d\n", __func__, hparams.n_ff_shexp); } if (arch == LLM_ARCH_BAILINGMOE) { @@ -4442,6 +5903,11 @@ void llama_model::print_info() const { LLAMA_LOG_INFO("%s: expert_weights_norm = %d\n", __func__, hparams.expert_weights_norm); } + if (arch == LLM_ARCH_SMALLTHINKER) { + LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp); + LLAMA_LOG_INFO("%s: expert_gating_func = %s\n", __func__, llama_expert_gating_func_name((llama_expert_gating_func_type) hparams.expert_gating_func)); + } + vocab.print_info(); } @@ -4519,7 +5985,17 @@ const ggml_tensor * llama_model::get_tensor(const char * name) const { return it->second; } -ggml_tensor * llama_model::get_rope_factors(uint32_t n_ctx_per_seq, int il) const { +float llama_model::get_rope_freq_base (const llama_cparams & cparams, int il) const { + return hparams.is_swa(il) ? hparams.rope_freq_base_train_swa : cparams.rope_freq_base; +} + +float llama_model::get_rope_freq_scale(const llama_cparams & cparams, int il) const { + return hparams.is_swa(il) ? hparams.rope_freq_scale_train_swa : cparams.rope_freq_scale; +} + +ggml_tensor * llama_model::get_rope_factors(const llama_cparams & cparams, int il) const { + const uint32_t n_ctx_per_seq = cparams.n_ctx / cparams.n_seq_max; + // choose long/short freq factors based on the context size if (layers[il].rope_freqs != nullptr) { return layers[il].rope_freqs; @@ -4533,7 +6009,163 @@ ggml_tensor * llama_model::get_rope_factors(uint32_t n_ctx_per_seq, int il) cons } struct llm_build_llama : public llm_graph_context { - llm_build_llama(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_llama(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // rope freq factors for llama3; may return nullptr for llama2 and other models + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); + + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); + cb(cur, "attn_out", il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network (non-MoE) + if (model.layers[il].ffn_gate_inp == nullptr) { + + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } else { + // MoE branch + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SILU, true, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + il); + cb(cur, "ffn_moe_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_llama_iswa : public llm_graph_context { + llm_build_llama_iswa(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -4549,19 +6181,18 @@ struct llm_build_llama : public llm_graph_context { // temperature tuning ggml_tensor * inp_attn_scale = nullptr; - if (arch == LLM_ARCH_LLAMA4) { - inp_attn_scale = build_inp_attn_scale(); - } + inp_attn_scale = build_inp_attn_scale(); - auto * inp_attn = build_attn_inp_kv_unified(); + auto * inp_attn = build_attn_inp_kv_unified_iswa(); const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; - bool use_rope = arch == LLM_ARCH_LLAMA4 - ? (il + 1) % hparams.n_no_rope_layer_step != 0 - : true; + const bool use_rope = (il + 1) % hparams.n_no_rope_layer_step != 0; // norm cur = build_norm(inpL, @@ -4572,7 +6203,7 @@ struct llm_build_llama : public llm_graph_context { // self-attention { // rope freq factors for llama3; may return nullptr for llama2 and other models - ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il); + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); // compute Q and K and RoPE them ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); @@ -4620,7 +6251,7 @@ struct llm_build_llama : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - if (arch == LLM_ARCH_LLAMA4 && use_rope && hparams.use_kq_norm) { + if (use_rope && hparams.use_kq_norm) { // Llama4TextL2Norm Qcur = ggml_rms_norm(ctx0, Qcur, hparams.f_norm_rms_eps); Kcur = ggml_rms_norm(ctx0, Kcur, hparams.f_norm_rms_eps); @@ -4628,30 +6259,22 @@ struct llm_build_llama : public llm_graph_context { cb(Kcur, "Kcur_normed", il); } - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); cb(cur, "attn_out", il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } - // For Granite architecture - if (hparams.f_residual_scale) { - cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); - } - ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); cb(ffn_inp, "ffn_inp", il); // feed-forward network (non-MoE) if (model.layers[il].ffn_gate_inp == nullptr) { - cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il); @@ -4664,9 +6287,7 @@ struct llm_build_llama : public llm_graph_context { NULL, LLM_FFN_SILU, LLM_FFN_PAR, il); cb(cur, "ffn_out", il); - - } else if (arch == LLM_ARCH_LLAMA4) { - // llama4 MoE + } else { ggml_tensor * ffn_inp_normed = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il); @@ -4695,31 +6316,6 @@ struct llm_build_llama : public llm_graph_context { cur = ggml_add(ctx0, moe_out, shexp_out); cb(cur, "ffn_moe_out_merged", il); - - } else { - // MoE branch - cur = build_norm(ffn_inp, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, il); - cb(cur, "ffn_norm", il); - - cur = build_moe_ffn(cur, - model.layers[il].ffn_gate_inp, - model.layers[il].ffn_up_exps, - model.layers[il].ffn_gate_exps, - model.layers[il].ffn_down_exps, - nullptr, - n_expert, n_expert_used, - LLM_FFN_SILU, true, - false, 0.0, - LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, - il); - cb(cur, "ffn_moe_out", il); - } - - // For Granite architecture - if (hparams.f_residual_scale) { - cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); } cur = ggml_add(ctx0, cur, ffn_inp); @@ -4744,11 +6340,6 @@ struct llm_build_llama : public llm_graph_context { // lm_head cur = build_lora_mm(model.output, cur); - // For Granite architecture - if (hparams.f_logit_scale) { - cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale); - } - cb(cur, "result_output", -1); res->t_logits = cur; @@ -4757,7 +6348,7 @@ struct llm_build_llama : public llm_graph_context { }; struct llm_build_deci : public llm_graph_context { - llm_build_deci(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_deci(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -4774,6 +6365,9 @@ struct llm_build_deci : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; const int64_t n_head_kv = hparams.n_head_kv(il); @@ -4798,7 +6392,7 @@ struct llm_build_deci : public llm_graph_context { } else if (n_head > 0) { // self-attention // rope freq factors for llama3; may return nullptr for llama2 and other models - ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il); + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); // compute Q and K and RoPE them ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); @@ -4842,14 +6436,12 @@ struct llm_build_deci : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -4859,11 +6451,6 @@ struct llm_build_deci : public llm_graph_context { continue; } - // For Granite architecture - if (hparams.f_residual_scale) { - cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); - } - // modified to support attention-free layer of Llama-3_1-Nemotron-51B ggml_tensor * ffn_inp = cur; if (n_head > 0) { @@ -4887,11 +6474,6 @@ struct llm_build_deci : public llm_graph_context { cb(cur, "ffn_out", il); } - // For Granite architecture - if (hparams.f_residual_scale) { - cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); - } - cur = ggml_add(ctx0, cur, ffn_inp); cb(cur, "ffn_out", il); @@ -4914,11 +6496,6 @@ struct llm_build_deci : public llm_graph_context { // lm_head cur = build_lora_mm(model.output, cur); - // For Granite architecture - if (hparams.f_logit_scale) { - cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale); - } - cb(cur, "result_output", -1); res->t_logits = cur; @@ -4927,7 +6504,7 @@ struct llm_build_deci : public llm_graph_context { }; struct llm_build_baichuan : public llm_graph_context { - llm_build_baichuan(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_baichuan(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -4943,6 +6520,8 @@ struct llm_build_baichuan : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -4989,14 +6568,12 @@ struct llm_build_baichuan : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -5049,7 +6626,7 @@ struct llm_build_baichuan : public llm_graph_context { }; struct llm_build_xverse : public llm_graph_context { - llm_build_xverse(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_xverse(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -5065,6 +6642,8 @@ struct llm_build_xverse : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -5104,14 +6683,12 @@ struct llm_build_xverse : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -5162,7 +6739,7 @@ struct llm_build_xverse : public llm_graph_context { }; struct llm_build_falcon : public llm_graph_context { - llm_build_falcon(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_falcon(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -5179,6 +6756,8 @@ struct llm_build_falcon : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * attn_norm; @@ -5204,12 +6783,10 @@ struct llm_build_falcon : public llm_graph_context { cur = build_lora_mm(model.layers[il].wqkv, cur); cb(cur, "wqkv", il); - ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 0*sizeof(float)*(n_embd)); + ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd)); ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); // using mode = 2 for neox mode @@ -5229,14 +6806,12 @@ struct llm_build_falcon : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); attn_norm = ggml_get_rows(ctx0, attn_norm, inp_out_ids); @@ -5286,7 +6861,7 @@ struct llm_build_falcon : public llm_graph_context { }; struct llm_build_grok : public llm_graph_context { - llm_build_grok(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_grok(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -5305,6 +6880,8 @@ struct llm_build_grok : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -5359,14 +6936,12 @@ struct llm_build_grok : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -5448,7 +7023,7 @@ struct llm_build_grok : public llm_graph_context { }; struct llm_build_dbrx : public llm_graph_context { - llm_build_dbrx(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_dbrx(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -5465,6 +7040,8 @@ struct llm_build_dbrx : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -5486,12 +7063,10 @@ struct llm_build_dbrx : public llm_graph_context { cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv); cb(cur, "wqkv_clamped", il); - Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 0*sizeof(float)*(n_embd)); + Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd)); Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); Qcur = ggml_rope_ext( @@ -5510,14 +7085,12 @@ struct llm_build_dbrx : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -5575,7 +7148,7 @@ struct llm_build_dbrx : public llm_graph_context { }; struct llm_build_starcoder : public llm_graph_context { - llm_build_starcoder(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_starcoder(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -5597,6 +7170,8 @@ struct llm_build_starcoder : public llm_graph_context { inpL = ggml_add(ctx0, inpL, pos); cb(inpL, "inpL", -1); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, model.layers[il].attn_norm, @@ -5624,14 +7199,12 @@ struct llm_build_starcoder : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -5684,7 +7257,7 @@ struct llm_build_starcoder : public llm_graph_context { }; struct llm_build_refact : public llm_graph_context { - llm_build_refact(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_refact(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -5696,6 +7269,8 @@ struct llm_build_refact : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -5723,14 +7298,12 @@ struct llm_build_refact : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -5783,7 +7356,7 @@ struct llm_build_refact : public llm_graph_context { }; struct llm_build_bert : public llm_graph_context { - llm_build_bert(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_bert(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -5801,8 +7374,10 @@ struct llm_build_bert : public llm_graph_context { inpL = build_inp_embd(model.tok_embd); // token types are hardcoded to zero ("Sentence A") - ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0); - inpL = ggml_add(ctx0, inpL, type_row0); + if (model.type_embd) { + ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0); + inpL = ggml_add(ctx0, inpL, type_row0); + } if (model.arch == LLM_ARCH_BERT) { inpL = ggml_add(ctx0, ggml_get_rows(ctx0, model.pos_embd, inp_pos), inpL); } @@ -5814,17 +7389,34 @@ struct llm_build_bert : public llm_graph_context { auto * inp_attn = build_attn_inp_no_cache(); - // iterate layers + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * cur = inpL; - ggml_tensor * Qcur; - ggml_tensor * Kcur; - ggml_tensor * Vcur; + { + ggml_tensor * Qcur; + ggml_tensor * Kcur; + ggml_tensor * Vcur; - // self-attention - if (model.arch == LLM_ARCH_BERT || model.arch == LLM_ARCH_JINA_BERT_V2) { - Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, cur), model.layers[il].bq); + // self-attention + if (model.layers[il].wqkv) { + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + if (model.layers[il].bqkv) { + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + } + + Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + } else { + Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, cur), model.layers[il].bq); + Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, cur), model.layers[il].bk); + Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, cur), model.layers[il].bv); + } if (model.layers[il].attn_q_norm) { Qcur = build_norm(Qcur, @@ -5833,8 +7425,6 @@ struct llm_build_bert : public llm_graph_context { LLM_NORM, il); } - Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, cur), model.layers[il].bk); - if (model.layers[il].attn_k_norm) { Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, @@ -5842,54 +7432,36 @@ struct llm_build_bert : public llm_graph_context { LLM_NORM, il); } - Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, cur), model.layers[il].bv); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - } else { - // compute Q and K and RoPE them - cur = build_lora_mm(model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - if (model.arch == LLM_ARCH_NOMIC_BERT_MOE) { - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); + // RoPE + if (model.arch == LLM_ARCH_NOMIC_BERT || model.arch == LLM_ARCH_NOMIC_BERT_MOE) { + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); } - Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); + cur = build_attn(inp_attn, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + cb(cur, "kqv_out", il); } - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - cur = build_attn(inp_attn, gf, - model.layers[il].wo, model.layers[il].bo, - Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); - cb(cur, "kqv_out", il); - - if (il == n_layer - 1 && pooling_type == LLAMA_POOLING_TYPE_NONE) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -5938,7 +7510,7 @@ struct llm_build_bert : public llm_graph_context { model.layers[il].ffn_gate, NULL, NULL, model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, NULL, - LLM_FFN_GELU, LLM_FFN_PAR, il); + model.layers[il].ffn_gate ? LLM_FFN_GELU : LLM_FFN_GEGLU, LLM_FFN_PAR, il); cb(cur, "ffn_out", il); } else { cur = build_ffn(cur, @@ -5969,8 +7541,118 @@ struct llm_build_bert : public llm_graph_context { } }; +struct llm_build_neo_bert : public llm_graph_context { + llm_build_neo_bert(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + ggml_tensor * inp_pos = build_inp_pos(); + + // construct input embeddings (token, type, position) + inpL = build_inp_embd(model.tok_embd); + cb(inpL, "inp_embd", -1); + + auto * inp_attn = build_attn_inp_no_cache(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * cur = inpL; + + // pre-norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + + { + ggml_tensor * Qcur; + ggml_tensor * Kcur; + ggml_tensor * Vcur; + + // self-attention + cur = build_lora_mm(model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + + Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 0*sizeof(float)*(n_embd)); + Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd)); + Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + // RoPE + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, nullptr, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + cb(cur, "kqv_out", il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // re-add the layer input + cur = ggml_add(ctx0, cur, inpL); + + ggml_tensor * ffn_inp = cur; + cb(ffn_inp, "ffn_inp", il); + + // pre-norm + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + // feed-forward network + cur = build_ffn(cur, + model.layers[il].ffn_up, + NULL, NULL, NULL, NULL, NULL, + model.layers[il].ffn_down, + NULL, NULL, NULL, + LLM_FFN_SWIGLU, LLM_FFN_SEQ, il); + + // attentions bypass the intermediate layer + cur = ggml_add(ctx0, cur, ffn_inp); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm_enc, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_embd", -1); + res->t_embd = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + struct llm_build_bloom : public llm_graph_context { - llm_build_bloom(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_bloom(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -5989,6 +7671,8 @@ struct llm_build_bloom : public llm_graph_context { LLM_NORM, -1); cb(inpL, "inp_norm", -1); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, model.layers[il].attn_norm, @@ -6016,14 +7700,12 @@ struct llm_build_bloom : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -6076,7 +7758,7 @@ struct llm_build_bloom : public llm_graph_context { }; struct llm_build_mpt : public llm_graph_context { - llm_build_mpt(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_mpt(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -6100,6 +7782,8 @@ struct llm_build_mpt : public llm_graph_context { cb(inpL, "inpL", -1); } + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * attn_norm; @@ -6126,8 +7810,8 @@ struct llm_build_mpt : public llm_graph_context { cb(cur, "wqkv_clamped", il); } - ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + ggml_tensor * Qcur = ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)); + ggml_tensor * Kcur = ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)); ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); cb(Qcur, "Qcur", il); @@ -6147,6 +7831,12 @@ struct llm_build_mpt : public llm_graph_context { model.layers[il].attn_k_norm_b, LLM_NORM, il); cb(Kcur, "Kcur", il); + } else { + Qcur = ggml_cont(ctx0, Qcur); + cb(Qcur, "Qcur", il); + + Kcur = ggml_cont(ctx0, Kcur); + cb(Kcur, "Kcur", il); } Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); @@ -6157,14 +7847,12 @@ struct llm_build_mpt : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -6218,7 +7906,7 @@ struct llm_build_mpt : public llm_graph_context { }; struct llm_build_stablelm : public llm_graph_context { - llm_build_stablelm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_stablelm(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -6233,6 +7921,8 @@ struct llm_build_stablelm : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { // norm cur = build_norm(inpL, @@ -6303,14 +7993,12 @@ struct llm_build_stablelm : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); @@ -6370,7 +8058,7 @@ struct llm_build_stablelm : public llm_graph_context { }; struct llm_build_qwen : public llm_graph_context { - llm_build_qwen(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_qwen(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -6385,6 +8073,8 @@ struct llm_build_qwen : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -6401,12 +8091,10 @@ struct llm_build_qwen : public llm_graph_context { cur = ggml_add(ctx0, cur, model.layers[il].bqkv); cb(cur, "bqkv", il); - ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 0*sizeof(float)*(n_embd)); + ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd)); ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*(n_embd))); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); // using mode = 2 for neox mode @@ -6426,14 +8114,12 @@ struct llm_build_qwen : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -6486,7 +8172,7 @@ struct llm_build_qwen : public llm_graph_context { }; struct llm_build_qwen2 : public llm_graph_context { - llm_build_qwen2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_qwen2(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -6502,6 +8188,8 @@ struct llm_build_qwen2 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -6546,14 +8234,12 @@ struct llm_build_qwen2 : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -6596,6 +8282,213 @@ struct llm_build_qwen2 : public llm_graph_context { // lm_head cur = build_lora_mm(model.output, cur); + if (model.output_b != nullptr) { + cur = ggml_add(ctx0, cur, model.output_b); + } + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_dream : public llm_graph_context { + llm_build_dream(const llama_model & model, const llm_graph_params & params) : + llm_graph_context(params) { + //copied from qwen2 + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_no_cache(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + + Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, + nullptr, 1.0f / sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, model.layers[il].ffn_up, NULL, NULL, model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, NULL, LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_llada : public llm_graph_context { + llm_build_llada(const llama_model & model, const llm_graph_params & params) : + llm_graph_context(params) { + // LLaDA is similar to LLaMA but uses non-causal attention for diffusion + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + // Non-causal attention for diffusion + auto * inp_attn = build_attn_inp_no_cache(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute separate Q, K, V projections without bias, matching LLaDALlamaBlock + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + + Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, + 1.0f / sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, model.layers[il].ffn_up, NULL, NULL, model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, NULL, LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + cb(cur, "result_output", -1); res->t_logits = cur; @@ -6604,7 +8497,7 @@ struct llm_build_qwen2 : public llm_graph_context { }; struct llm_build_qwen2vl : public llm_graph_context { - llm_build_qwen2vl(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_qwen2vl(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -6623,6 +8516,8 @@ struct llm_build_qwen2vl : public llm_graph_context { int sections[4]; std::copy(std::begin(hparams.rope_sections), std::begin(hparams.rope_sections) + 4, sections); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -6667,14 +8562,12 @@ struct llm_build_qwen2vl : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -6725,7 +8618,7 @@ struct llm_build_qwen2vl : public llm_graph_context { }; struct llm_build_qwen2moe : public llm_graph_context { - llm_build_qwen2moe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_qwen2moe(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -6741,6 +8634,8 @@ struct llm_build_qwen2moe : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -6794,14 +8689,12 @@ struct llm_build_qwen2moe : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -6884,7 +8777,7 @@ struct llm_build_qwen2moe : public llm_graph_context { }; struct llm_build_qwen3 : public llm_graph_context { - llm_build_qwen3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_qwen3(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -6900,6 +8793,8 @@ struct llm_build_qwen3 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -6947,14 +8842,12 @@ struct llm_build_qwen3 : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -7005,7 +8898,7 @@ struct llm_build_qwen3 : public llm_graph_context { }; struct llm_build_qwen3moe : public llm_graph_context { - llm_build_qwen3moe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_qwen3moe(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -7021,6 +8914,8 @@ struct llm_build_qwen3moe : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -7068,14 +8963,12 @@ struct llm_build_qwen3moe : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -7133,7 +9026,7 @@ struct llm_build_qwen3moe : public llm_graph_context { }; struct llm_build_phi2 : public llm_graph_context { - llm_build_phi2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_phi2(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -7151,6 +9044,8 @@ struct llm_build_phi2 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { attn_norm_output = build_norm(inpL, model.layers[il].attn_norm, @@ -7171,21 +9066,21 @@ struct llm_build_phi2 : public llm_graph_context { cur = ggml_add(ctx0, cur, model.layers[il].bqkv); cb(cur, "bqkv", il); - Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 0*sizeof(float)*(n_embd)); + Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd)); Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); } else { Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, attn_norm_output), model.layers[il].bq); Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, attn_norm_output), model.layers[il].bk); Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, attn_norm_output), model.layers[il].bv); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); } cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); Qcur = ggml_rope_ext( @@ -7208,14 +9103,12 @@ struct llm_build_phi2 : public llm_graph_context { // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66 Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head))); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); attn_norm_output = ggml_get_rows(ctx0, attn_norm_output, inp_out_ids); @@ -7262,8 +9155,9 @@ struct llm_build_phi2 : public llm_graph_context { } }; +template struct llm_build_phi3 : public llm_graph_context { - llm_build_phi3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_phi3(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -7277,7 +9171,16 @@ struct llm_build_phi3 : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(); + using inp_attn_type = std::conditional_t; + inp_attn_type * inp_attn = nullptr; + + if constexpr (iswa) { + inp_attn = build_attn_inp_kv_unified_iswa(); + } else { + inp_attn = build_attn_inp_kv_unified(); + } + + ggml_tensor * inp_out_ids = build_inp_out_ids(); for (int il = 0; il < n_layer; ++il) { auto * residual = inpL; @@ -7285,7 +9188,7 @@ struct llm_build_phi3 : public llm_graph_context { // self-attention { // rope freq factors for 128k context - ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il); + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); ggml_tensor* attn_norm_output = build_norm(inpL, model.layers[il].attn_norm, @@ -7301,21 +9204,21 @@ struct llm_build_phi3 : public llm_graph_context { cur = build_lora_mm(model.layers[il].wqkv, attn_norm_output); cb(cur, "wqkv", il); - Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0 * sizeof(float) * (n_embd))); - Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd))); + Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head * sizeof(float), cur->nb[1], 0 * sizeof(float) * (n_embd)); + Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head * sizeof(float), cur->nb[1], 1 * sizeof(float) * (n_embd)); Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd + n_embd_gqa))); } else { Qcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wq, attn_norm_output), model.layers[il].bq); Kcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wk, attn_norm_output), model.layers[il].bk); Vcur = ggml_add(ctx0, build_lora_mm(model.layers[il].wv, attn_norm_output), model.layers[il].bv); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); } cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); Qcur = ggml_rope_ext( @@ -7337,14 +9240,12 @@ struct llm_build_phi3 : public llm_graph_context { Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head))); cb(Qcur, "Qcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor* inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); residual = ggml_get_rows(ctx0, residual, inp_out_ids); } @@ -7414,7 +9315,7 @@ struct llm_build_phi3 : public llm_graph_context { }; struct llm_build_plamo : public llm_graph_context { - llm_build_plamo(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_plamo(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -7430,15 +9331,16 @@ struct llm_build_plamo : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); - for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { // norm cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); cb(cur, "attn_norm", il); - ggml_tensor * attention_norm = cur; + ggml_tensor * sa_inp = cur; // self-attention { @@ -7472,22 +9374,21 @@ struct llm_build_plamo : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - ggml_tensor * sa_out = cur; - cur = attention_norm; - - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); - sa_out = ggml_get_rows(ctx0, sa_out, inp_out_ids); + sa_inp = ggml_get_rows(ctx0, sa_inp, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } + ggml_tensor * sa_out = cur; + + cur = sa_inp; + // feed-forward network { cur = build_ffn(cur, @@ -7529,7 +9430,7 @@ struct llm_build_plamo : public llm_graph_context { }; struct llm_build_gpt2 : public llm_graph_context { - llm_build_gpt2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_gpt2(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -7552,6 +9453,8 @@ struct llm_build_gpt2 : public llm_graph_context { inpL = ggml_add(ctx0, inpL, pos); cb(inpL, "inpL", -1); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, model.layers[il].attn_norm, @@ -7579,14 +9482,12 @@ struct llm_build_gpt2 : public llm_graph_context { Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -7639,7 +9540,7 @@ struct llm_build_gpt2 : public llm_graph_context { }; struct llm_build_codeshell : public llm_graph_context { - llm_build_codeshell(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_codeshell(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -7656,6 +9557,8 @@ struct llm_build_codeshell : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, model.layers[il].attn_norm, @@ -7671,12 +9574,10 @@ struct llm_build_codeshell : public llm_graph_context { cur = ggml_add(ctx0, cur, model.layers[il].bqkv); cb(cur, "bqkv", il); - ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 0*sizeof(float)*(n_embd)); + ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd)); ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); Qcur = ggml_rope_ext( @@ -7695,14 +9596,12 @@ struct llm_build_codeshell : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -7755,134 +9654,7 @@ struct llm_build_codeshell : public llm_graph_context { }; struct llm_build_orion : public llm_graph_context { - llm_build_orion(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { - const int64_t n_embd_head = hparams.n_embd_head_v; - - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - ggml_tensor * cur; - ggml_tensor * inpL; - - inpL = build_inp_embd(model.tok_embd); - - // inp_pos - contains the positions - ggml_tensor * inp_pos = build_inp_pos(); - - auto * inp_attn = build_attn_inp_kv_unified(); - - for (int il = 0; il < n_layer; ++il) { - ggml_tensor * inpSA = inpL; - - // norm - cur = build_norm(inpL, - model.layers[il].attn_norm, model.layers[il].attn_norm_b, - LLM_NORM, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // compute Q and K and RoPE them - ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - // if (model.layers[il].bq) { - // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - // cb(Qcur, "Qcur", il); - // } - - ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - // if (model.layers[il].bk) { - // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - // cb(Kcur, "Kcur", il); - // } - - ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - // if (model.layers[il].bv) { - // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - // cb(Vcur, "Vcur", il); - // } - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - cur = build_attn(inp_attn, gf, - model.layers[il].wo, NULL, - Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - cur = build_norm(ffn_inp, - model.layers[il].ffn_norm, model.layers[il].ffn_norm_b, - LLM_NORM, il); - cb(cur, "ffn_norm", il); - - cur = build_ffn(cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, il); - cb(cur, "ffn_out", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - - cur = build_cvec(cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = build_norm(cur, - model.output_norm, model.output_norm_b, - LLM_NORM, -1); - - cb(cur, "result_norm", -1); - res->t_embd = cur; - - // lm_head - cur = build_lora_mm(model.output, cur); - - cb(cur, "result_output", -1); - res->t_logits = cur; - - ggml_build_forward_expand(gf, cur); - } -}; - -struct llm_build_internlm2 : public llm_graph_context { - llm_build_internlm2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_orion(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -7898,6 +9670,135 @@ struct llm_build_internlm2 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, model.layers[il].attn_norm_b, + LLM_NORM, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + // if (model.layers[il].bq) { + // Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + // cb(Qcur, "Qcur", il); + // } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + // if (model.layers[il].bk) { + // Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + // cb(Kcur, "Kcur", il); + // } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + // if (model.layers[il].bv) { + // Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + // cb(Vcur, "Vcur", il); + // } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, model.layers[il].ffn_norm_b, + LLM_NORM, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, model.output_norm_b, + LLM_NORM, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_internlm2 : public llm_graph_context { + llm_build_internlm2(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -7951,14 +9852,12 @@ struct llm_build_internlm2 : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -8009,7 +9908,7 @@ struct llm_build_internlm2 : public llm_graph_context { }; struct llm_build_minicpm3 : public llm_graph_context { - llm_build_minicpm3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_minicpm3(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { //TODO: if the model varies, these parameters need to be read from the model const int64_t n_embd_base = 256; const float scale_embd = 12.0f; @@ -8034,10 +9933,12 @@ struct llm_build_minicpm3 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; - ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il); + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); // norm cur = build_norm(inpL, @@ -8092,8 +9993,6 @@ struct llm_build_minicpm3 : public llm_graph_context { ggml_row_size(kv_pe_compresseed->type, kv_lora_rank)); cb(k_pe, "k_pe", il); - // TODO: the CUDA backend used to not support non-cont. (RMS) norm, investigate removing ggml_cont - kv_compressed = ggml_cont(ctx0, kv_compressed); kv_compressed = build_norm(kv_compressed, model.layers[il].attn_kv_a_norm, NULL, LLM_NORM_RMS, il); @@ -8120,12 +10019,6 @@ struct llm_build_minicpm3 : public llm_graph_context { v_states = ggml_cont(ctx0, v_states); cb(v_states, "v_states", il); - v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens, - ggml_row_size(kv->type, hparams.n_embd_head_v * n_head), - 0); - cb(v_states, "v_states", il); - - q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this q_pe = ggml_rope_ext( ctx0, q_pe, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, @@ -8134,7 +10027,6 @@ struct llm_build_minicpm3 : public llm_graph_context { cb(q_pe, "q_pe", il); // shared RoPE key - k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend used to not support non-cont. RoPE, investigate removing this k_pe = ggml_rope_ext( ctx0, k_pe, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, @@ -8148,20 +10040,18 @@ struct llm_build_minicpm3 : public llm_graph_context { ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0); cb(k_states, "k_states", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, q_states, k_states, v_states, nullptr, nullptr, kq_scale, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } // scale_res - scale the hidden states for residual connection - const float scale_res = scale_depth/sqrtf(float(n_layer)); + const float scale_res = scale_depth/sqrtf(float(n_layer)); // TODO: is this correct? cur = ggml_scale(ctx0, cur, scale_res); cb(cur, "hidden_scaled", il); @@ -8222,7 +10112,7 @@ struct llm_build_minicpm3 : public llm_graph_context { }; struct llm_build_gemma : public llm_graph_context { - llm_build_gemma(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_gemma(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; ggml_tensor * cur; @@ -8238,6 +10128,8 @@ struct llm_build_gemma : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { // norm cur = build_norm(inpL, @@ -8278,14 +10170,12 @@ struct llm_build_gemma : public llm_graph_context { Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head))); cb(Qcur, "Qcur_scaled", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -8337,8 +10227,8 @@ struct llm_build_gemma : public llm_graph_context { } }; -struct llm_build_gemma2 : public llm_graph_context { - llm_build_gemma2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { +struct llm_build_gemma2_iswa : public llm_graph_context { + llm_build_gemma2_iswa(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_k; ggml_tensor * cur; @@ -8352,7 +10242,9 @@ struct llm_build_gemma2 : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(); + auto * inp_attn = build_attn_inp_kv_unified_iswa(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); for (int il = 0; il < n_layer; ++il) { // norm @@ -8391,32 +10283,23 @@ struct llm_build_gemma2 : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - // ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e - switch (model.type) { - case LLM_TYPE_2B: - case LLM_TYPE_9B: - case LLM_TYPE_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head))); break; - default: GGML_ABORT("fatal error"); - }; - cb(Qcur, "Qcur_scaled", il); + Qcur = ggml_scale(ctx0, Qcur, hparams.f_attention_scale); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il); } + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + cur = build_norm(cur, model.layers[il].attn_post_norm, NULL, LLM_NORM_RMS, il); cb(cur, "attn_post_norm", il); - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL); cb(sa_out, "sa_out", il); @@ -8474,8 +10357,8 @@ struct llm_build_gemma2 : public llm_graph_context { } }; -struct llm_build_gemma3 : public llm_graph_context { - llm_build_gemma3(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { +struct llm_build_gemma3_iswa : public llm_graph_context { + llm_build_gemma3_iswa(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_k; ggml_tensor * cur; @@ -8493,13 +10376,13 @@ struct llm_build_gemma3 : public llm_graph_context { ggml_tensor * inp_pos = build_inp_pos(); // TODO: is causal == true correct? might need some changes - auto * inp_attn = build_attn_inp_kv_unified(); + auto * inp_attn = build_attn_inp_kv_unified_iswa(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); for (int il = 0; il < n_layer; ++il) { - const bool is_swa = hparams.is_swa(il); - - const float freq_base_l = is_swa ? hparams.rope_freq_base_train_swa : cparams.rope_freq_base; - const float freq_scale_l = is_swa ? hparams.rope_freq_scale_train_swa : cparams.rope_freq_scale; + const float freq_base_l = model.get_rope_freq_base (cparams, il); + const float freq_scale_l = model.get_rope_freq_scale(cparams, il); // norm cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); @@ -8541,9 +10424,17 @@ struct llm_build_gemma3 : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + // ref: https://github.com/google/gemma_pytorch/blob/014acb7ac4563a5f77c76d7ff98f31b568c16508/gemma/model.py#L315 + Qcur = ggml_scale(ctx0, Qcur, hparams.f_attention_scale); + + cur = build_attn(inp_attn, model.layers[il].wo, NULL, - Qcur, Kcur, Vcur, nullptr, nullptr, hparams.f_attention_scale, il); + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } cur = build_norm(cur, @@ -8551,13 +10442,6 @@ struct llm_build_gemma3 : public llm_graph_context { LLM_NORM_RMS, il); cb(cur, "attn_post_norm", il); - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL); cb(sa_out, "sa_out", il); @@ -8610,9 +10494,433 @@ struct llm_build_gemma3 : public llm_graph_context { } }; +struct llm_build_gemma3n_iswa : public llm_graph_context { + const llama_model & model; + + const int64_t n_embd_head; + const int64_t n_embd_altup; + const int64_t n_altup; + const int i_altup_act; + const int n_layer_kv = 20; // number of layers having KV [KV_REUSE] + const int n_layer_sparsity = 10; // number of layers using activation sparsity + const float f_sparsity_std_mul = 1.6448533535003662f; // std_multiplier = normal_dist.icdf(0.95) + + llm_build_gemma3n_iswa(const llama_model & model, const llm_graph_params & params) + : llm_graph_context(params), + model(model), + n_embd_head(model.hparams.n_embd_head_k), + n_embd_altup(model.hparams.n_embd_altup), + n_altup(model.hparams.n_altup), + i_altup_act(model.hparams.i_altup_act) { + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // important: do not normalize weights for raw embeddings input (i.e. encoded image emdeddings) + if (ubatch.token) { + inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd)); + cb(inpL, "inp_scaled", -1); + } + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + // TODO: is causal == true correct? might need some changes + auto * inp_attn = build_attn_inp_kv_unified_iswa(); + + // inp_per_layer shape: [n_embd_altup, n_tokens, n_layer] + ggml_tensor * inp_per_layer = project_per_layer_inputs(inpL, get_per_layer_inputs()); + + // inpL now has only 1 altup, project it to the rest of the altups + // these "added" altups will be concat to the last dim of inpL + { + ggml_tensor * target_magnitude = calc_magnitude(inpL); + ggml_tensor * inp_repeated = ggml_repeat_4d(ctx0, inpL, n_embd, n_tokens, n_altup - 1, 1); + ggml_tensor * altup_added = ggml_mul_mat(ctx0, model.altup_proj, inp_repeated); // shape: [n_embd, n_tokens, n_altup - 1] + ggml_tensor * new_magnitude = calc_magnitude(altup_added); + altup_added = ggml_div(ctx0, + ggml_mul(ctx0, altup_added, target_magnitude), + new_magnitude); + inpL = ggml_concat(ctx0, inpL, altup_added, 2); // shape: [n_embd, n_tokens, n_altup] + cb(inpL, "inp_stacked", -1); + } + + // inpL now has shape: [n_embd, n_tokens, n_altup] + // inp_per_layer now has shape: [n_embd_altup, n_tokens, n_layer] + + for (int il = 0; il < n_layer; ++il) { + // this block is made to be closely resemble Gemma3p5DecoderLayer on python code + const bool has_kv = (il < n_layer_kv); + + const float freq_base_l = model.get_rope_freq_base (cparams, il); + const float freq_scale_l = model.get_rope_freq_scale(cparams, il); + + ggml_tensor * cur = inpL; // [n_embd, n_tokens, n_altup] + ggml_tensor * predictions = altup_predict(cur, il); // [n_embd, n_tokens, n_altup] + + // predicted value will go through self-attention and laurel + ggml_tensor * active_prediction = view_2d_slice(predictions, i_altup_act); // [n_embd, n_tokens] + cur = active_prediction; + cb(cur, "active_prediction", il); + + // norm + cur = build_norm(cur, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // laurel + ggml_tensor * laurel_out = laurel(cur, il); // [n_embd, n_tokens] + + // self-attention + if (has_kv) { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); + Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); + Vcur = ggml_rms_norm(ctx0, Vcur, hparams.f_norm_rms_eps); + + cb(Qcur, "Qcur_normed", il); + cb(Kcur, "Kcur_normed", il); + cb(Vcur, "Vcur_normed", il); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l, + ext_factor, attn_factor, beta_fast, beta_slow); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l, + ext_factor, attn_factor, beta_fast, beta_slow); + + cb(Qcur, "Qcur_pos", il); + cb(Kcur, "Kcur_pos", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, nullptr, hparams.f_attention_scale, il); + } else { + // no KV layers + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); + cb(Qcur, "Qcur_normed", il); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l, + ext_factor, attn_factor, beta_fast, beta_slow); + cb(Qcur, "Qcur_pos", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, NULL, + Qcur, nullptr, nullptr, nullptr, nullptr, hparams.f_attention_scale, il); + } + + cur = build_norm(cur, + model.layers[il].attn_post_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_post_norm", il); + + cur = ggml_add(ctx0, cur, active_prediction); // [n_embd, n_tokens] + cb(cur, "attn_gated", il); + + ggml_tensor * attn_laurel = ggml_scale(ctx0, + ggml_add(ctx0, cur, laurel_out), + 1.0f / sqrtf(2.0f)); // [n_embd, n_tokens] + cb(attn_laurel, "attn_laurel", il); + + cur = build_norm(attn_laurel, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + // feed-forward network + { + ggml_tensor * up_proj = build_lora_mm(model.layers[il].ffn_up, cur); + ggml_tensor * gate_proj = build_lora_mm(model.layers[il].ffn_gate, cur); + + if (il < n_layer_sparsity) { + // apply activation sparsity + gate_proj = gaussian_topk(gate_proj); + } + gate_proj = ggml_gelu(ctx0, gate_proj); + + cur = ggml_mul(ctx0, up_proj, gate_proj); + cur = build_lora_mm(model.layers[il].ffn_down, cur); + cb(cur, "ffn_out", il); + } + + cur = build_norm(cur, + model.layers[il].ffn_post_norm, NULL, + LLM_NORM_RMS, -1); + cb(cur, "ffn_post_norm", il); + + ggml_tensor * attn_ffw_laurel_gated = ggml_add(ctx0, cur, attn_laurel); // [n_embd, n_tokens] + cb(attn_ffw_laurel_gated, "attn_ffw_laurel_gated", il); + + ggml_tensor * corrected = altup_correct(predictions, attn_ffw_laurel_gated, il); // [n_embd, n_tokens, n_altup] + + ggml_tensor * first_prediction; // [n_embd, n_tokens] + { + first_prediction = view_2d_slice(corrected, i_altup_act); // [n_embd, n_tokens] + first_prediction = ggml_mul(ctx0, first_prediction, model.layers[il].altup_correct_scale); + first_prediction = build_lora_mm(model.layers[il].per_layer_inp_gate, first_prediction); + first_prediction = ggml_gelu(ctx0, first_prediction); // [n_embd_altup, n_tokens] + cb(first_prediction, "first_prediction_gated", il); + ggml_tensor * inp_this_layer = view_2d_slice(inp_per_layer, il); // [n_embd_altup, n_tokens] + first_prediction = ggml_mul(ctx0, first_prediction, inp_this_layer); // [n_embd_altup, n_tokens] + cb(first_prediction, "first_prediction_scaled", il); + + first_prediction = build_lora_mm(model.layers[il].per_layer_proj, first_prediction); // [n_embd, n_tokens] + first_prediction = build_norm(first_prediction, + model.layers[il].per_layer_post_norm, NULL, + LLM_NORM_RMS, il); + cb(first_prediction, "first_prediction_out", il); + } + + // equivalent to python code: corrected_predictions[1:] += first_prediction + { + ggml_tensor * slice_first = view_2d_slice(corrected, 0); + ggml_tensor * slice_rest = ggml_view_3d(ctx0, corrected, n_embd, n_tokens, n_altup - 1, + ggml_row_size(corrected->type, n_embd), + ggml_row_size(corrected->type, n_embd*n_tokens), + n_embd*n_tokens*ggml_element_size(corrected)); + ggml_tensor * tmp = ggml_add(ctx0, slice_rest, first_prediction); // [n_embd, n_tokens, n_altup - 1] + corrected = ggml_concat(ctx0, slice_first, tmp, 2); // [n_embd, n_tokens, n_altup] + } + + cur = corrected; // [n_embd, n_tokens, n_altup] + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; // [n_embd, n_tokens, n_altup] + + // cur now has multiple altup(s), we want to merge them back to 1 altup + { + ggml_tensor * target_magnitude = calc_magnitude(view_2d_slice(cur, i_altup_act)); // [n_embd, n_tokens] + // do a view to skip the first slice (active altup) + ggml_tensor * alt_slice = ggml_view_3d(ctx0, cur, n_embd, n_tokens, n_altup - 1, + ggml_row_size(cur->type, n_embd), + ggml_row_size(cur->type, n_embd*n_tokens), + n_embd*n_tokens*ggml_element_size(cur)); + ggml_tensor * altup_unembd = ggml_mul_mat(ctx0, model.altup_unembd_proj, alt_slice); // shape: [n_embd, n_tokens, n_altup - 1] + ggml_tensor * new_magnitude = calc_magnitude(altup_unembd); + altup_unembd = ggml_div(ctx0, + ggml_mul(ctx0, altup_unembd, target_magnitude), + new_magnitude); + cb(altup_unembd, "altup_unembd", -1); + + // equivalent to torch.mean(hidden_states, dim=0) + cur = view_2d_slice(cur, 0); // [n_embd, n_tokens] + for (int i = 0; i < n_altup - 1; ++i) { + cur = ggml_add(ctx0, cur, view_2d_slice(altup_unembd, i)); + } + cur = ggml_scale(ctx0, cur, 1.0f / float(n_altup)); // [n_embd, n_tokens] + cb(cur, "unembd_merged", -1); + } + + // cur now has shape: [n_embd, n_tokens] + + // TODO: move this to right after the last KV layer + { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + } + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + cur = build_lora_mm(model.output, cur); + + { + // final logit soft-capping + cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_final_logit_softcapping); + cur = ggml_tanh(ctx0, cur); + cur = ggml_scale(ctx0, cur, hparams.f_final_logit_softcapping); + } + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } + + ggml_tensor * calc_magnitude(ggml_tensor * x) { + return ggml_sqrt(ctx0, ggml_sum_rows(ctx0, ggml_sqr(ctx0, x))); + } + + // get 2D slice view from a 3D tensor, the idx corresponds to the 3rd dim + ggml_tensor * view_2d_slice(ggml_tensor * x, int idx) { + GGML_ASSERT(idx < (int)x->ne[2]); + return ggml_view_2d(ctx0, x, x->ne[0], x->ne[1], + ggml_row_size(x->type, x->ne[0]), + idx * x->ne[0] * x->ne[1] * ggml_element_size(x)); + } + + // equivalent to get_per_layer_inputs() in python code + // output shape: [n_embd_altup, n_layer, n_tokens] + ggml_tensor * get_per_layer_inputs() { + auto inp = std::make_unique(); + ggml_tensor * inp_per_layer; + if (ubatch.token) { + inp->tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ubatch.n_tokens); + ggml_set_input(inp->tokens); + res->t_tokens = inp->tokens; + inp_per_layer = ggml_get_rows(ctx0, model.tok_embd_per_layer, inp->tokens); + inp_per_layer = ggml_reshape_3d(ctx0, inp_per_layer, n_embd_altup, n_layer, n_tokens); + inp_per_layer = ggml_scale(ctx0, inp_per_layer, sqrtf((float)n_embd_altup)); + cb(inp_per_layer, "inp_per_layer_selected", -1); + } else { + GGML_ABORT("TODO: support embd input"); + } + res->add_input(std::move(inp)); + return inp_per_layer; + } + + // equivalent to project_per_layer_inputs() in python code + // this calculates the per-layer inputs, so the final tensor shape will have n_layer as the last dim + // output shape: [n_embd_altup, n_tokens, n_layer] + ggml_tensor * project_per_layer_inputs(ggml_tensor * inputs_embeds, ggml_tensor * inp_per_layer) { + const float per_layer_projection_scale = 1.0f / sqrtf((float)n_embd); + const float per_layer_input_scale = 1.0f / sqrtf(2.0f); + + ggml_tensor * per_layer_proj = ggml_mul_mat(ctx0, model.per_layer_model_proj, inputs_embeds); + per_layer_proj = ggml_scale(ctx0, per_layer_proj, per_layer_projection_scale); + per_layer_proj = ggml_reshape_3d(ctx0, per_layer_proj, n_embd_altup, n_layer, n_tokens); + per_layer_proj = build_norm(per_layer_proj, + model.per_layer_proj_norm, NULL, + LLM_NORM_RMS, -1); // [n_embd_altup, n_layer, n_tokens] + cb(per_layer_proj, "per_layer_proj", -1); + + inp_per_layer = ggml_add(ctx0, inp_per_layer, per_layer_proj); + inp_per_layer = ggml_scale(ctx0, inp_per_layer, per_layer_input_scale); + cb(inp_per_layer, "inp_per_layer", -1); + + // permute to shape: [n_embd_altup, n_tokens, n_layer] + inp_per_layer = ggml_cont(ctx0, ggml_permute(ctx0, inp_per_layer, 0, 2, 1, 3)); + return inp_per_layer; + } + + // input cur shape: [n_altup, n_tokens] + // output shape: [n_altup, n_tokens] + ggml_tensor * laurel(ggml_tensor * cur, int il) { + ggml_tensor * tmp = cur; + tmp = build_lora_mm(model.layers[il].laurel_l, tmp); + tmp = build_lora_mm(model.layers[il].laurel_r, tmp); + tmp = build_norm(tmp, model.layers[il].laurel_post_norm, NULL, LLM_NORM_RMS, il); + tmp = ggml_add(ctx0, tmp, cur); + cb(tmp, "laurel_out", il); + return tmp; + } + + // input x shape: [n_embd, n_tokens] + // output shape: [n_embd, n_tokens] + ggml_tensor * gaussian_topk(ggml_tensor * x) { + ggml_tensor * mean = ggml_mean(ctx0, x); + ggml_tensor * std = ggml_sqrt(ctx0, ggml_scale(ctx0, + ggml_sum_rows(ctx0, ggml_sqr(ctx0, ggml_sub(ctx0, x, mean))), + 1.0f / (float)(x->ne[0] - 1) + )); + ggml_tensor * cutoff_x = ggml_add(ctx0, mean, ggml_scale(ctx0, std, f_sparsity_std_mul)); + return ggml_relu(ctx0, ggml_sub(ctx0, x, cutoff_x)); + } + + // + // altup functions + // + + // equivalent to compute_router_modalities() in python code + // input x shape: [n_embd, n_tokens] + // output shape: [n_altup, n_tokens] + ggml_tensor * altup_compute_router_modalities(ggml_tensor * x, int il) { + ggml_tensor * router_inputs = build_norm(x, + model.layers[il].altup_router_norm, NULL, + LLM_NORM_RMS, il); + + // router_input_scale + router_inputs = ggml_scale(ctx0, router_inputs, 1.0f / (float)n_embd); + + ggml_tensor * output = ggml_mul_mat(ctx0, model.layers[il].altup_router, router_inputs); + return ggml_tanh(ctx0, output); // [n_altup, n_tokens] + } + + // input cur shape: [n_embd, n_tokens, n_altup] + // output shape: [n_embd, n_tokens, n_altup] + ggml_tensor * altup_predict(ggml_tensor * cur, int il) { + ggml_tensor * activated = view_2d_slice(cur, i_altup_act); // [n_embd, n_tokens] + ggml_tensor * modalities = altup_compute_router_modalities(activated, il); // [n_altup, n_tokens] + cb(modalities, "modalities", il); + + ggml_tensor * all_coefs = build_lora_mm(model.layers[il].altup_predict_coef, modalities); + cb(all_coefs, "all_coefs", il); + // first dim now having n_altup^2 elements, we reshape it to 2D (so we end up with 3D tensor) + all_coefs = ggml_reshape_3d(ctx0, all_coefs, n_altup, n_altup, n_tokens); + + // permute to [n_altup, n_embd, n_tokens] + ggml_tensor * cur_permuted = ggml_cont(ctx0, ggml_permute(ctx0, cur, 1, 2, 0, 3)); + ggml_tensor * predictions = ggml_mul_mat(ctx0, cur_permuted, all_coefs); // [n_altup, n_embd, n_tokens] + + // final shape must be the same as cur: [n_embd, n_tokens, n_altup] + predictions = ggml_cont(ctx0, ggml_permute(ctx0, predictions, 0, 2, 1, 3)); + predictions = ggml_add(ctx0, predictions, cur); + cb(predictions, "predictions", il); + + return predictions; + } + + // input predictions shape: [n_embd, n_tokens, n_altup] + // input activated shape: [n_embd, n_tokens] + // output shape: [n_embd, n_tokens, n_altup] + ggml_tensor * altup_correct(ggml_tensor * predictions, ggml_tensor * activated, int il) { + ggml_tensor * modalities = altup_compute_router_modalities(activated, il); // [n_altup, n_tokens] + cb(modalities, "modalities", il); + + ggml_tensor * active_prediction = view_2d_slice(predictions, i_altup_act); + ggml_tensor * innovation = ggml_sub(ctx0, activated, active_prediction); // [n_embd, n_tokens] + cb(innovation, "innovation", il); + + ggml_tensor * all_coefs = build_lora_mm(model.layers[il].altup_correct_coef, modalities); // [n_altup, n_tokens] + all_coefs = ggml_scale_bias(ctx0, all_coefs, 1.0f, 1.0f); // + 1.0 + cb(all_coefs, "all_coefs", il); + all_coefs = ggml_cont(ctx0, ggml_transpose(ctx0, all_coefs)); // [n_tokens, n_altup] + all_coefs = ggml_reshape_3d(ctx0, all_coefs, 1, n_tokens, n_altup); // [1, n_tokens, n_altup] + + innovation = ggml_repeat_4d(ctx0, innovation, n_embd, n_tokens, n_altup, 1); + ggml_tensor * corrected = ggml_mul(ctx0, innovation, all_coefs); // [n_embd, n_tokens, n_altup] + corrected = ggml_add(ctx0, corrected, predictions); // [n_embd, n_tokens, n_altup] + cb(corrected, "corrected", il); + + return corrected; + } +}; + // TODO: move up next to build_starcoder struct llm_build_starcoder2 : public llm_graph_context { - llm_build_starcoder2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_starcoder2(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -8628,6 +10936,8 @@ struct llm_build_starcoder2 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -8681,14 +10991,12 @@ struct llm_build_starcoder2 : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -8739,109 +11047,49 @@ struct llm_build_starcoder2 : public llm_graph_context { } }; -struct llm_build_mamba : public llm_graph_context { - const llama_model & model; +struct llm_graph_context_mamba : public llm_graph_context { + llm_graph_context_mamba(const llm_graph_params & params) : llm_graph_context(params) {} - llm_build_mamba(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params), model(model) { - ggml_tensor * cur; - ggml_tensor * inpL; - - // {n_embd, n_tokens} - inpL = build_inp_embd(model.tok_embd); - - ggml_tensor * state_copy = build_inp_s_copy(); - ggml_tensor * state_mask = build_inp_s_mask(); - - for (int il = 0; il < n_layer; ++il) { - // norm - cur = build_norm(inpL, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, il); - cb(cur, "attn_norm", il); - - //cur = build_mamba_layer(gf, cur, state_copy, state_mask, il); - cur = build_mamba_layer(gf, cur, state_copy, state_mask, ubatch, il); - - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); - } - - // residual - cur = ggml_add(ctx0, cur, inpL); - - cur = build_cvec(cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - // final rmsnorm - cur = build_norm(inpL, - model.output_norm, NULL, - LLM_NORM_RMS, -1); - - cb(cur, "result_norm", -1); - res->t_embd = cur; - - // lm_head - cur = build_lora_mm(model.output, cur); - - cb(cur, "result_output", -1); - res->t_logits = cur; - - ggml_build_forward_expand(gf, cur); - } - - // TODO: split ggml_tensor * build_mamba_layer( - ggml_cgraph * gf, - ggml_tensor * cur, - ggml_tensor * state_copy, - ggml_tensor * state_mask, - const llama_ubatch & ubatch, - int il) const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); + llm_graph_input_rs * inp, + ggml_tensor * cur, + const llama_model & model, + const llama_ubatch & ubatch, + int il) { - const auto kv_head = kv_self->head; + const auto * mctx_cur = inp->mctx; + + const auto kv_head = mctx_cur->get_head(); + + const auto & layer = model.layers[il]; const int64_t d_conv = hparams.ssm_d_conv; const int64_t d_inner = hparams.ssm_d_inner; const int64_t d_state = hparams.ssm_d_state; const int64_t dt_rank = hparams.ssm_dt_rank; + const int64_t n_head = d_inner; + const int64_t head_dim = 1; const int64_t n_seqs = ubatch.n_seqs; // Some variants of Mamba arch (e.g. FalconMamba do apply layer norm on B and Dt layers) const bool ssm_dt_b_c_rms = hparams.ssm_dt_b_c_rms; - // Use the same RMS norm as the final layer norm - const float norm_rms_eps = hparams.f_norm_rms_eps; const int64_t n_seq_tokens = ubatch.n_seq_tokens; GGML_ASSERT(n_seqs != 0); - GGML_ASSERT(ubatch.equal_seqs); + GGML_ASSERT(ubatch.equal_seqs()); GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs); - ggml_tensor * conv_states_all = kv_self->k_l[il]; - ggml_tensor * ssm_states_all = kv_self->v_l[il]; + ggml_tensor * conv_states_all = mctx_cur->get_r_l(il); + ggml_tensor * ssm_states_all = mctx_cur->get_s_l(il); - // (ab)using the KV cache to store the states - ggml_tensor * conv = build_copy_mask_state( - gf, conv_states_all, state_copy, state_mask, - hparams.n_embd_k_s(), n_seqs); + ggml_tensor * conv = build_rs(inp, conv_states_all, hparams.n_embd_r(), n_seqs); conv = ggml_reshape_3d(ctx0, conv, d_conv - 1, d_inner, n_seqs); - ggml_tensor * ssm = build_copy_mask_state( - gf, ssm_states_all, state_copy, state_mask, - hparams.n_embd_v_s(), n_seqs); - ssm = ggml_reshape_3d(ctx0, ssm, d_state, d_inner, n_seqs); // {n_embd, n_tokens} => {n_embd, n_seq_tokens, n_seqs} cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], n_seq_tokens, n_seqs); // {n_embd, 2*d_inner} @ {n_embd, n_seq_tokens, n_seqs} => {2*d_inner, n_seq_tokens, n_seqs} - ggml_tensor * xz = build_lora_mm(model.layers[il].ssm_in, cur); + ggml_tensor * xz = build_lora_mm(layer.ssm_in, cur); // split the above in two // => {d_inner, n_seq_tokens, n_seqs} ggml_tensor * x = ggml_view_3d(ctx0, xz, d_inner, xz->ne[1], xz->ne[2], xz->nb[1], xz->nb[2], 0); @@ -8870,10 +11118,10 @@ struct llm_build_mamba : public llm_graph_context { // then permute away the ne[0] dimension, // and then you're left with the resulting x tensor. // For simultaneous sequences, all sequences need to have the same length. - x = ggml_ssm_conv(ctx0, conv_x, model.layers[il].ssm_conv1d); + x = ggml_ssm_conv(ctx0, conv_x, layer.ssm_conv1d); // bias - x = ggml_add(ctx0, x, model.layers[il].ssm_conv1d_b); + x = ggml_add(ctx0, x, layer.ssm_conv1d_b); x = ggml_silu(ctx0, x); } @@ -8881,41 +11129,185 @@ struct llm_build_mamba : public llm_graph_context { // ssm { // {d_inner, dt_rank + 2*d_state} @ {d_inner, n_seq_tokens, n_seqs} => {dt_rank + 2*d_state, n_seq_tokens, n_seqs} - ggml_tensor * x_db = build_lora_mm(model.layers[il].ssm_x, x); + ggml_tensor * x_db = build_lora_mm(layer.ssm_x, x); // split ggml_tensor * dt = ggml_view_3d(ctx0, x_db, dt_rank, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], 0); - ggml_tensor * B = ggml_view_3d(ctx0, x_db, d_state, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], ggml_element_size(x_db)*dt_rank); - ggml_tensor * C = ggml_view_3d(ctx0, x_db, d_state, n_seq_tokens, n_seqs, x_db->nb[1], x_db->nb[2], ggml_element_size(x_db)*(dt_rank+d_state)); + ggml_tensor * B = ggml_view_4d(ctx0, x_db, d_state, /* n_group */ 1, n_seq_tokens, n_seqs, d_state*x_db->nb[0], x_db->nb[1], x_db->nb[2], ggml_element_size(x_db)*dt_rank); + ggml_tensor * C = ggml_view_4d(ctx0, x_db, d_state, /* n_group */ 1, n_seq_tokens, n_seqs, d_state*x_db->nb[0], x_db->nb[1], x_db->nb[2], ggml_element_size(x_db)*(dt_rank+d_state)); - // Some Mamba variants (e.g. FalconMamba) apply RMS norm in B, C & Dt layers - if (ssm_dt_b_c_rms) { - dt = ggml_rms_norm(ctx0, dt, norm_rms_eps); - B = ggml_rms_norm(ctx0, B, norm_rms_eps); - C = ggml_rms_norm(ctx0, C, norm_rms_eps); + // Some Mamba variants (e.g. FalconMamba, Jamba) apply RMS norm in B, C & Dt layers + if (ssm_dt_b_c_rms || (layer.ssm_dt_norm && layer.ssm_b_norm && layer.ssm_c_norm)) { + dt = build_norm(dt, layer.ssm_dt_norm, NULL, LLM_NORM_RMS, il); + B = build_norm(B, layer.ssm_b_norm, NULL, LLM_NORM_RMS, il); + C = build_norm(C, layer.ssm_c_norm, NULL, LLM_NORM_RMS, il); } // {dt_rank, d_inner} @ {dt_rank, n_seq_tokens, n_seqs} => {d_inner, n_seq_tokens, n_seqs} - dt = build_lora_mm(model.layers[il].ssm_dt, dt); - dt = ggml_add(ctx0, dt, model.layers[il].ssm_dt_b); + dt = build_lora_mm(layer.ssm_dt, dt); + dt = ggml_add(ctx0, dt, layer.ssm_dt_b); - // Custom operator to optimize the parallel associative scan - // as described in the Annex D of the Mamba paper. - // => {d_inner, n_seq_tokens, n_seqs} and {d_state, d_inner, n_seqs} - ggml_tensor * y_ssm = ggml_ssm_scan(ctx0, ssm, x, dt, model.layers[il].ssm_a, B, C); + cur = x; + x = ggml_reshape_4d(ctx0, x, head_dim, n_head, n_seq_tokens, n_seqs); + + ggml_tensor * A = layer.ssm_a; + + // use the states and the indices provided by build_recurrent_state + // (this is necessary in order to properly use the states before they are overwritten, + // while avoiding to make unnecessary copies of the states) + auto get_ssm_rows = [&](ggml_context * ctx, ggml_tensor * states, ggml_tensor * ids) { + ggml_tensor * ssm = ggml_reshape_4d(ctx, states, d_state, head_dim, n_head, mctx_cur->get_size()); + + // Custom operator to optimize the parallel associative scan + // as described in the Annex D of the Mamba paper. + // => {d_inner, n_seq_tokens, n_seqs} and {d_state, d_inner, n_seqs} + return ggml_ssm_scan(ctx, ssm, x, dt, A, B, C, ids); + }; + + ggml_tensor * y_ssm = build_rs(inp, ssm_states_all, hparams.n_embd_s(), ubatch.n_seqs, get_ssm_rows); // store last states ggml_build_forward_expand(gf, ggml_cpy(ctx0, - ggml_view_1d(ctx0, y_ssm, d_state*d_inner*n_seqs, x->nb[3]), + ggml_view_1d(ctx0, y_ssm, d_state*d_inner*n_seqs, x->nb[3]*x->ne[3]), ggml_view_1d(ctx0, ssm_states_all, d_state*d_inner*n_seqs, kv_head*d_state*d_inner*ggml_element_size(ssm_states_all)))); - ggml_tensor * y = ggml_view_3d(ctx0, y_ssm, d_inner, n_seq_tokens, n_seqs, x->nb[1], x->nb[2], 0); + ggml_tensor * y = ggml_view_3d(ctx0, y_ssm, d_inner, n_seq_tokens, n_seqs, x->nb[2], x->nb[3], 0); + + // TODO: skip computing output earlier for unused tokens + + y = ggml_add(ctx0, y, ggml_mul(ctx0, cur, layer.ssm_d)); + y = ggml_swiglu_split(ctx0, ggml_cont(ctx0, z), y); + + // {d_inner, n_embd} @ {d_inner, n_seq_tokens, n_seqs} => {n_embd, n_seq_tokens, n_seqs} + cur = build_lora_mm(layer.ssm_out, y); + } + + // {n_embd, n_seq_tokens, n_seqs} => {n_embd, n_tokens} + cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], n_seq_tokens * n_seqs); + + return cur; + } + + ggml_tensor * build_mamba2_layer( + llm_graph_input_rs * inp, + ggml_tensor * cur, + const llama_model & model, + const llama_ubatch & ubatch, + int il) const { + + const auto * mctx_cur = inp->mctx; + + const auto kv_head = mctx_cur->get_head(); + + const int64_t d_conv = hparams.ssm_d_conv; + const int64_t d_inner = hparams.ssm_d_inner; + const int64_t d_state = hparams.ssm_d_state; + const int64_t n_head = hparams.ssm_dt_rank; + const int64_t head_dim = d_inner / n_head; + const int64_t n_group = hparams.ssm_n_group; + const int64_t n_seqs = ubatch.n_seqs; + + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + + GGML_ASSERT(n_seqs != 0); + GGML_ASSERT(ubatch.equal_seqs()); + GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs); + + ggml_tensor * conv_states_all = mctx_cur->get_r_l(il); + ggml_tensor * ssm_states_all = mctx_cur->get_s_l(il); + + ggml_tensor * conv = build_rs(inp, conv_states_all, hparams.n_embd_r(), n_seqs); + conv = ggml_reshape_3d(ctx0, conv, d_conv - 1, d_inner + 2*n_group*d_state, n_seqs); + + // {n_embd, n_tokens} => {n_embd, n_seq_tokens, n_seqs} + cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], n_seq_tokens, n_seqs); + + // d_in_proj = 2 * self.d_inner + 2 * self.ngroups * self.d_state + self.nheads + + // {n_embd, d_in_proj} @ {n_embd, n_seq_tokens, n_seqs} => {d_in_proj, n_seq_tokens, n_seqs} + ggml_tensor * zxBCdt = build_lora_mm(model.layers[il].ssm_in, cur); + + // split the above in three + ggml_tensor * z = ggml_view_4d(ctx0, zxBCdt, head_dim, n_head, n_seq_tokens, n_seqs, head_dim*zxBCdt->nb[0], zxBCdt->nb[1], zxBCdt->nb[2], 0); + ggml_tensor * xBC = ggml_view_3d(ctx0, zxBCdt, d_inner + 2*n_group*d_state, n_seq_tokens, n_seqs, zxBCdt->nb[1], zxBCdt->nb[2], d_inner*ggml_element_size(zxBCdt)); + ggml_tensor * dt = ggml_view_3d(ctx0, zxBCdt, n_head, n_seq_tokens, n_seqs, zxBCdt->nb[1], zxBCdt->nb[2], (2*d_inner + 2*n_group*d_state)*ggml_element_size(zxBCdt)); + + // conv + { + // => {d_conv - 1 + n_seq_tokens, d_inner + 2*n_group*d_state, n_seqs} + ggml_tensor * conv_x = ggml_concat(ctx0, conv, ggml_transpose(ctx0, xBC), 0); + + // copy last (d_conv - 1) columns back into the state cache + ggml_tensor * last_conv = ggml_view_3d(ctx0, conv_x, d_conv - 1, d_inner + 2*n_group*d_state, n_seqs, conv_x->nb[1], conv_x->nb[2], n_seq_tokens*(conv_x->nb[0])); + + ggml_build_forward_expand(gf, + ggml_cpy(ctx0, last_conv, + ggml_view_1d(ctx0, conv_states_all, + (d_conv - 1)*(d_inner + 2*n_group*d_state)*(n_seqs), + kv_head*(d_conv - 1)*(d_inner + 2*n_group*d_state)*ggml_element_size(conv_states_all)))); + + // 1D convolution + // The equivalent is to make a self-overlapping view of conv_x + // over d_conv columns at each stride in the 3rd dimension, + // then element-wise multiply that with the conv1d weight, + // then sum the elements of each row, + // (the last two steps are a dot product over rows (also doable with mul_mat)) + // then permute away the ne[0] dimension, + // and then you're left with the resulting x tensor. + // For simultaneous sequences, all sequences need to have the same length. + xBC = ggml_ssm_conv(ctx0, conv_x, model.layers[il].ssm_conv1d); + + // bias + xBC = ggml_add(ctx0, xBC, model.layers[il].ssm_conv1d_b); + + xBC = ggml_silu(ctx0, xBC); + } + + // ssm + { + // These correspond to V K Q in SSM/attention duality + ggml_tensor * x = ggml_view_4d(ctx0, xBC, head_dim, n_head, n_seq_tokens, n_seqs, head_dim*xBC->nb[0], xBC->nb[1], xBC->nb[2], 0); + ggml_tensor * B = ggml_view_4d(ctx0, xBC, d_state, n_group, n_seq_tokens, n_seqs, d_state*xBC->nb[0], xBC->nb[1], xBC->nb[2], d_inner*ggml_element_size(xBC)); + ggml_tensor * C = ggml_view_4d(ctx0, xBC, d_state, n_group, n_seq_tokens, n_seqs, d_state*xBC->nb[0], xBC->nb[1], xBC->nb[2], (d_inner + n_group*d_state)*ggml_element_size(xBC)); + + // {n_head, n_seq_tokens, n_seqs} + dt = ggml_add(ctx0, ggml_cont(ctx0, dt), model.layers[il].ssm_dt_b); + + ggml_tensor * A = model.layers[il].ssm_a; + + // use the states and the indices provided by build_recurrent_state + // (this is necessary in order to properly use the states before they are overwritten, + // while avoiding to make unnecessary copies of the states) + auto get_ssm_rows = [&](ggml_context * ctx, ggml_tensor * states, ggml_tensor * ids) { + ggml_tensor * ssm = ggml_reshape_4d(ctx, states, d_state, head_dim, n_head, mctx_cur->get_size()); + + // TODO: use semistructured matrices to implement state-space duality + // => {d_inner, n_seq_tokens, n_seqs} and {d_state, d_inner, n_seqs} + return ggml_ssm_scan(ctx, ssm, x, dt, A, B, C, ids); + }; + + ggml_tensor * y_ssm = build_rs(inp, ssm_states_all, hparams.n_embd_s(), ubatch.n_seqs, get_ssm_rows); + + // store last states + ggml_build_forward_expand(gf, + ggml_cpy(ctx0, + ggml_view_1d(ctx0, y_ssm, d_state*d_inner*n_seqs, ggml_nelements(x)*x->nb[0]), + ggml_view_1d(ctx0, ssm_states_all, d_state*d_inner*n_seqs, kv_head*d_state*d_inner*ggml_element_size(ssm_states_all)))); + + ggml_tensor * y = ggml_view_4d(ctx0, y_ssm, head_dim, n_head, n_seq_tokens, n_seqs, x->nb[1], n_head*x->nb[1], n_seq_tokens*n_head*x->nb[1], 0); // TODO: skip computing output earlier for unused tokens - // {d_inner, n_seq_tokens, n_seqs} * {d_inner} => {d_inner, n_seq_tokens, n_seqs} y = ggml_add(ctx0, y, ggml_mul(ctx0, x, model.layers[il].ssm_d)); - y = ggml_mul(ctx0, y, ggml_silu(ctx0, ggml_cont(ctx0, z))); + y = ggml_swiglu_split(ctx0, ggml_cont(ctx0, z), y); + + // grouped RMS norm + if (model.layers[il].ssm_norm) { + y = ggml_reshape_4d(ctx0, y, d_inner / n_group, n_group, n_seq_tokens, n_seqs); + y = build_norm(y, model.layers[il].ssm_norm, NULL, LLM_NORM_RMS, il); + } + + y = ggml_reshape_3d(ctx0, y, d_inner, n_seq_tokens, n_seqs); // {d_inner, n_embd} @ {d_inner, n_seq_tokens, n_seqs} => {n_embd, n_seq_tokens, n_seqs} cur = build_lora_mm(model.layers[il].ssm_out, y); @@ -8923,14 +11315,180 @@ struct llm_build_mamba : public llm_graph_context { // {n_embd, n_seq_tokens, n_seqs} => {n_embd, n_tokens} cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], n_seq_tokens * n_seqs); - //cb(cur, "mamba_out", il); + cb(cur, "mamba_out", il); return cur; } }; +struct llm_build_mamba : public llm_graph_context_mamba { + llm_build_mamba(const llama_model & model, const llm_graph_params & params) : llm_graph_context_mamba(params) { + ggml_tensor * cur; + ggml_tensor * inpL; + + // {n_embd, n_tokens} + inpL = build_inp_embd(model.tok_embd); + + auto * rs_inp = build_rs_inp(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + if (model.arch == LLM_ARCH_MAMBA2) { + cur = build_mamba2_layer(rs_inp, cur, model, ubatch, il); + } else { + cur = build_mamba_layer(rs_inp, cur, model, ubatch, il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // residual + cur = ggml_add(ctx0, cur, inpL); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + // final rmsnorm + cur = build_norm(inpL, model.output_norm, NULL, LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } + +}; + +struct llm_build_jamba : public llm_graph_context_mamba { + llm_build_jamba(const llama_model & model, const llm_graph_params & params) : llm_graph_context_mamba(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + ggml_tensor * cur; + ggml_tensor * inpL; + + // {n_embd, n_tokens} + inpL = build_inp_embd(model.tok_embd); + + auto * inp_hybrid = build_inp_mem_hybrid(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + const int64_t n_head_kv = hparams.n_head_kv(il); + + cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + if (n_head_kv == 0) { + cur = build_mamba_layer(inp_hybrid->get_recr(), cur, model, ubatch, il); + } else { + // Attention + + struct ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + struct ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + struct ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + // No RoPE :) + cur = build_attn(inp_hybrid->get_attn(), model.layers[il].wo, NULL, Qcur, Kcur, Vcur, NULL, NULL, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); + } + + // residual + struct ggml_tensor * ffn_inp = ggml_add(ctx0, inpL, cur); + cb(cur, "ffn_inp", il); + + cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + // feed-forward network + if (model.layers[il].ffn_gate_inp == nullptr) { + // FFN + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } else { + // MoE branch + cur = build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SILU, false, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + il); + cb(cur, "ffn_moe_out", il); + } + + // residual + cur = ggml_add(ctx0, ffn_inp, cur); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + // final rmsnorm + cur = build_norm(inpL, model.output_norm, NULL, LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + struct llm_build_command_r : public llm_graph_context { - llm_build_command_r(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_command_r(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -8947,13 +11505,15 @@ struct llm_build_command_r : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); - for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { // norm cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM, il); cb(cur, "attn_norm", il); + ggml_tensor * ffn_inp = cur; // self-attention @@ -9016,14 +11576,12 @@ struct llm_build_command_r : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); @@ -9076,8 +11634,8 @@ struct llm_build_command_r : public llm_graph_context { } }; -struct llm_build_cohere2 : public llm_graph_context { - llm_build_cohere2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { +struct llm_build_cohere2_iswa : public llm_graph_context { + llm_build_cohere2_iswa(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -9092,7 +11650,9 @@ struct llm_build_cohere2 : public llm_graph_context { // inp_pos - contains the positions ggml_tensor * inp_pos = build_inp_pos(); - auto * inp_attn = build_attn_inp_kv_unified(); + auto * inp_attn = build_attn_inp_kv_unified_iswa(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); for (int il = 0; il < n_layer; ++il) { const bool is_swa = hparams.is_swa(il); @@ -9105,7 +11665,7 @@ struct llm_build_cohere2 : public llm_graph_context { // self-attention { // rope freq factors for 128k context - ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il); + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); // compute Q and K and RoPE them ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); @@ -9151,14 +11711,12 @@ struct llm_build_cohere2 : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); @@ -9213,7 +11771,7 @@ struct llm_build_cohere2 : public llm_graph_context { // * removed bias // * removed MoE struct llm_build_olmo : public llm_graph_context { - llm_build_olmo(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_olmo(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -9229,6 +11787,8 @@ struct llm_build_olmo : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -9282,14 +11842,12 @@ struct llm_build_olmo : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, nullptr, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -9341,7 +11899,7 @@ struct llm_build_olmo : public llm_graph_context { }; struct llm_build_olmo2 : public llm_graph_context { - llm_build_olmo2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_olmo2(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -9357,6 +11915,8 @@ struct llm_build_olmo2 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -9402,23 +11962,21 @@ struct llm_build_olmo2 : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + cur = build_norm(cur, model.layers[il].attn_post_norm, NULL, LLM_NORM_RMS, il); cb(cur, "attn_post_norm", il); - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); cb(ffn_inp, "ffn_inp", il); @@ -9470,7 +12028,7 @@ struct llm_build_olmo2 : public llm_graph_context { // * removed bias // * added q, k norm struct llm_build_olmoe : public llm_graph_context { - llm_build_olmoe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_olmoe(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -9486,6 +12044,8 @@ struct llm_build_olmoe : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -9535,14 +12095,12 @@ struct llm_build_olmoe : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -9598,7 +12156,7 @@ struct llm_build_olmoe : public llm_graph_context { }; struct llm_build_openelm : public llm_graph_context { - llm_build_openelm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_openelm(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -9612,6 +12170,8 @@ struct llm_build_openelm : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { const int64_t n_head = hparams.n_head(il); const int64_t n_head_kv = hparams.n_head_kv(il); @@ -9633,10 +12193,10 @@ struct llm_build_openelm : public llm_graph_context { cur = ggml_reshape_3d(ctx0, cur, n_embd_head_k, n_head_qkv, n_tokens); - ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, cur->nb[1], cur->nb[2], 0)); + ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, cur->nb[1], cur->nb[2], 0); cb(Qcur, "Qcur", il); - ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, cur->nb[1], cur->nb[2], cur->nb[1]*n_head)); + ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, cur->nb[1], cur->nb[2], cur->nb[1]*n_head); cb(Kcur, "Kcur", il); ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, cur->nb[1], cur->nb[2], cur->nb[1]*(n_head+n_head_kv))); @@ -9668,16 +12228,14 @@ struct llm_build_openelm : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Qcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { residual = ggml_get_rows(ctx0, residual, inp_out_ids); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); } ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur); @@ -9727,7 +12285,7 @@ struct llm_build_openelm : public llm_graph_context { }; struct llm_build_gptneox : public llm_graph_context { - llm_build_gptneox(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_gptneox(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -9743,6 +12301,8 @@ struct llm_build_gptneox : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, model.layers[il].attn_norm, @@ -9758,12 +12318,10 @@ struct llm_build_gptneox : public llm_graph_context { cur = ggml_add(ctx0, cur, model.layers[il].bqkv); cb(cur, "bqkv", il); - ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 0*sizeof(float)*(n_embd)); + ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd)); ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); Qcur = ggml_rope_ext( @@ -9782,14 +12340,12 @@ struct llm_build_gptneox : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -9875,7 +12431,7 @@ struct llm_build_gptneox : public llm_graph_context { }; struct llm_build_arctic : public llm_graph_context { - llm_build_arctic(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_arctic(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -9891,6 +12447,8 @@ struct llm_build_arctic : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -9932,14 +12490,12 @@ struct llm_build_arctic : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -10013,7 +12569,7 @@ struct llm_build_arctic : public llm_graph_context { }; struct llm_build_deepseek : public llm_graph_context { - llm_build_deepseek(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_deepseek(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -10031,6 +12587,8 @@ struct llm_build_deepseek : public llm_graph_context { const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -10043,7 +12601,7 @@ struct llm_build_deepseek : public llm_graph_context { // self-attention { // rope freq factors for llama3; may return nullptr for llama2 and other models - ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il); + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); // compute Q and K and RoPE them ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); @@ -10087,19 +12645,16 @@ struct llm_build_deepseek : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } - ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); cb(ffn_inp, "ffn_inp", il); @@ -10176,7 +12731,7 @@ struct llm_build_deepseek : public llm_graph_context { }; struct llm_build_deepseek2 : public llm_graph_context { - llm_build_deepseek2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_deepseek2(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { bool is_lite = (hparams.n_layer == 27); const bool is_mla = (hparams.n_embd_head_k_mla != 0 && hparams.n_embd_head_v_mla != 0); @@ -10207,6 +12762,8 @@ struct llm_build_deepseek2 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -10316,7 +12873,7 @@ struct llm_build_deepseek2 : public llm_graph_context { cb(Vcur, "Vcur", il); // note: MLA with the absorption optimzation converts into MQA (ie: GQA with 1 group) - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, model.layers[il].wv_b, kq_scale, il); } else { @@ -10350,15 +12907,13 @@ struct llm_build_deepseek2 : public llm_graph_context { cb(Kcur, "Kcur", il); // note: MLA without the absorption optimization converts into MHA (ie: GQA with full n_head groups) - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); } } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -10439,7 +12994,7 @@ struct llm_build_deepseek2 : public llm_graph_context { }; struct llm_build_bitnet : public llm_graph_context { - llm_build_bitnet(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_bitnet(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -10454,6 +13009,8 @@ struct llm_build_bitnet : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -10517,7 +13074,7 @@ struct llm_build_bitnet : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, NULL, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); @@ -10536,9 +13093,7 @@ struct llm_build_bitnet : public llm_graph_context { cb(cur, "attn_o_out", il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -10599,7 +13154,7 @@ struct llm_build_bitnet : public llm_graph_context { }; struct llm_build_t5_enc : public llm_graph_context { - llm_build_t5_enc(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_t5_enc(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -10613,6 +13168,8 @@ struct llm_build_t5_enc : public llm_graph_context { auto * inp_attn = build_attn_inp_no_cache(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -10640,15 +13197,13 @@ struct llm_build_t5_enc : public llm_graph_context { ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b_enc ? model.layers[il].attn_rel_b_enc : model.layers[0].attn_rel_b_enc; ggml_tensor * kq_b = build_pos_bias(pos_bucket_enc, attn_rel_b); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo_enc, nullptr, Qcur, Kcur, Vcur, kq_b, nullptr, 1.0f, il); cb(cur, "kqv_out", il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -10700,7 +13255,7 @@ struct llm_build_t5_enc : public llm_graph_context { }; struct llm_build_t5_dec : public llm_graph_context { - llm_build_t5_dec(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_t5_dec(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; //const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -10719,6 +13274,8 @@ struct llm_build_t5_dec : public llm_graph_context { auto * inp_attn_self = build_attn_inp_kv_unified(); auto * inp_attn_cross = build_attn_inp_cross(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -10746,7 +13303,7 @@ struct llm_build_t5_dec : public llm_graph_context { ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b ? model.layers[il].attn_rel_b : model.layers[0].attn_rel_b; ggml_tensor * kq_b = build_pos_bias(pos_bucket_dec, attn_rel_b); - cur = build_attn(inp_attn_self, gf, + cur = build_attn(inp_attn_self, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, kq_b, nullptr, 1.0f, il); cb(cur, "kqv_out", il); @@ -10778,7 +13335,7 @@ struct llm_build_t5_dec : public llm_graph_context { Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_outputs_enc); Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_outputs_enc); - cur = build_attn(inp_attn_cross, gf, + cur = build_attn(inp_attn_cross, model.layers[il].wo_cross, nullptr, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f, il); cb(cur, "kqv_out", il); @@ -10810,11 +13367,8 @@ struct llm_build_t5_dec : public llm_graph_context { //cb(cur, "kqv_out", il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); inpCA = ggml_get_rows(ctx0, inpCA, inp_out_ids); } @@ -10871,7 +13425,7 @@ struct llm_build_t5_dec : public llm_graph_context { }; struct llm_build_jais : public llm_graph_context { - llm_build_jais(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_jais(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -10884,6 +13438,8 @@ struct llm_build_jais : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { cur = build_norm(inpL, model.layers[il].attn_norm, @@ -10911,14 +13467,12 @@ struct llm_build_jais : public llm_graph_context { Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/float(n_embd_head), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); } @@ -10966,7 +13520,7 @@ struct llm_build_jais : public llm_graph_context { }; struct llm_build_chatglm : public llm_graph_context { - llm_build_chatglm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_chatglm(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -10982,6 +13536,8 @@ struct llm_build_chatglm : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -11010,6 +13566,8 @@ struct llm_build_chatglm : public llm_graph_context { if (model.layers[il].bv) { Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); } else { cur = build_lora_mm(model.layers[il].wqkv, cur); cb(cur, "wqkv", il); @@ -11017,13 +13575,11 @@ struct llm_build_chatglm : public llm_graph_context { cur = ggml_add(ctx0, cur, model.layers[il].bqkv); cb(cur, "bqkv", il); } - Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 0*sizeof(float)*(n_embd)); + Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd)); Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); } - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); //printf("freq_base: %f freq_scale: %f ext_factor: %f attn_factor: %f\n", freq_base, freq_scale, ext_factor, attn_factor); @@ -11043,14 +13599,12 @@ struct llm_build_chatglm : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -11099,7 +13653,7 @@ struct llm_build_chatglm : public llm_graph_context { }; struct llm_build_glm4 : public llm_graph_context { - llm_build_glm4(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_glm4(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); @@ -11115,6 +13669,8 @@ struct llm_build_glm4 : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -11144,6 +13700,8 @@ struct llm_build_glm4 : public llm_graph_context { if (model.layers[il].bv) { Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); } + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); } else { cur = build_lora_mm(model.layers[il].wqkv, cur); cb(cur, "wqkv", il); @@ -11151,13 +13709,11 @@ struct llm_build_glm4 : public llm_graph_context { cur = ggml_add(ctx0, cur, model.layers[il].bqkv); cb(cur, "bqkv", il); } - Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 0*sizeof(float)*(n_embd)); + Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd)); Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); } - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); Qcur = ggml_rope_ext( @@ -11176,14 +13732,12 @@ struct llm_build_glm4 : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -11249,8 +13803,167 @@ struct llm_build_glm4 : public llm_graph_context { } }; +struct llm_build_glm4_moe : public llm_graph_context { + llm_build_glm4_moe(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + // Only process up to last layer (skip final NextN layer) + // Final layer tensors are loaded but not processed in forward pass + const int n_transformer_layers = n_layer - hparams.nextn_predict_layers; + for (int il = 0; il < n_transformer_layers; ++il) { + ggml_tensor * inpSA = inpL; + + // Pre-attention norm + cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + } + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + } + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + } + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + // Apply Q/K norm if available (GLM-4.5 355B variant) + if (model.layers[il].attn_q_norm) { + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); + cb(Qcur, "Qcur_normed", il); + } + if (model.layers[il].attn_k_norm) { + Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); + cb(Kcur, "Kcur_normed", il); + } + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_transformer_layers - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // Post-attention norm + cur = build_norm(ffn_inp, model.layers[il].attn_post_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "post_attn_norm", il); + + // Check if this is a dense layer (n_layer_dense_lead=1, so layer 0 is dense) + if (static_cast(il) < hparams.n_layer_dense_lead) { + // Dense FFN layer + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } else { + // Process routed experts using existing MoE infrastructure + ggml_tensor * routed_out = build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + model.layers[il].ffn_exp_probs_b, + n_expert, n_expert_used, + LLM_FFN_SILU, hparams.expert_weights_norm, + true, hparams.expert_weights_scale, + (llama_expert_gating_func_type) hparams.expert_gating_func, + il); + cb(routed_out, "ffn_moe_out", il); + + // Process shared expert on original input + ggml_tensor * shared_out = build_ffn(cur, + model.layers[il].ffn_up_shexp, NULL, NULL, + model.layers[il].ffn_gate_shexp, NULL, NULL, + model.layers[il].ffn_down_shexp, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(shared_out, "ffn_shexp_out", il); + + // Final output: routed_output + shared_output + cur = ggml_add(ctx0, routed_out, shared_out); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + struct llm_build_nemotron : public llm_graph_context { - llm_build_nemotron(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_nemotron(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -11266,6 +13979,8 @@ struct llm_build_nemotron : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -11320,14 +14035,12 @@ struct llm_build_nemotron : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -11379,7 +14092,7 @@ struct llm_build_nemotron : public llm_graph_context { }; struct llm_build_exaone : public llm_graph_context { - llm_build_exaone(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_exaone(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -11395,6 +14108,8 @@ struct llm_build_exaone : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -11407,7 +14122,7 @@ struct llm_build_exaone : public llm_graph_context { // self-attention { // rope freq factors for llama3; may return nullptr for llama2 and other models - ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il); + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); // compute Q and K and RoPE them ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); @@ -11451,14 +14166,12 @@ struct llm_build_exaone : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -11509,6 +14222,142 @@ struct llm_build_exaone : public llm_graph_context { } }; +template +struct llm_build_exaone4 : public llm_graph_context { + llm_build_exaone4(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_k; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_v); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + using inp_attn_type = std::conditional_t; + inp_attn_type * inp_attn = nullptr; + + if constexpr (iswa) { + inp_attn = build_attn_inp_kv_unified_iswa(); + } else { + inp_attn = build_attn_inp_kv_unified(); + } + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // use RoPE for SWA layers or non-SWA models + const bool use_rope = hparams.is_swa(il) || hparams.swa_type == LLAMA_SWA_TYPE_NONE; + + cur = inpL; + + // self-attention + { + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); + + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); + Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); + cb(Qcur, "Qcur_normed", il); + cb(Kcur, "Kcur_normed", il); + + if (use_rope) { + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + } + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + cb(cur, "attn_out", il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + cur = build_norm(cur, + model.layers[il].attn_post_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_post_norm", il); + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_ffn(ffn_inp, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = build_norm(cur, + model.layers[il].ffn_post_norm, NULL, + LLM_NORM_RMS, -1); + cb(cur, "ffn_post_norm", -1); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + struct llm_build_rwkv6_base : public llm_graph_context { const llama_model & model; @@ -11545,14 +14394,12 @@ struct llm_build_rwkv6_base : public llm_graph_context { } ggml_tensor * build_rwkv6_time_mix( - ggml_cgraph * gf, + llm_graph_input_rs * inp, ggml_tensor * cur, ggml_tensor * x_prev, - ggml_tensor * state_copy, - ggml_tensor * state_mask, const llama_ubatch & ubatch, int il) const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); + const auto * mctx_cur = static_cast(mctx); const auto n_tokens = ubatch.n_tokens; const auto n_seqs = ubatch.n_seqs; @@ -11562,7 +14409,7 @@ struct llm_build_rwkv6_base : public llm_graph_context { const auto n_head = n_embd / head_size; const auto n_head_kv = hparams.n_head_kv(il); - const auto kv_head = kv_self->head; + const auto kv_head = mctx_cur->get_head(); const auto & layer = model.layers[il]; @@ -11673,9 +14520,9 @@ struct llm_build_rwkv6_base : public llm_graph_context { k = ggml_sub(ctx0, k, ggml_mul(ctx0, k, w)); } - ggml_tensor * wkv_state = build_copy_mask_state( - gf, kv_self->v_l[il], state_copy, state_mask, - hparams.n_embd_v_s(), n_seqs); + ggml_tensor * wkv_state = build_rs( + inp, mctx_cur->get_s_l(il), + hparams.n_embd_s(), n_seqs); ggml_tensor * wkv_output; if (is_qrwkv) { @@ -11693,9 +14540,9 @@ struct llm_build_rwkv6_base : public llm_graph_context { wkv_state, ggml_view_1d( ctx0, - kv_self->v_l[il], - hparams.n_embd_v_s() * n_seqs, - hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self->v_l[il]) + mctx_cur->get_s_l(il), + hparams.n_embd_s() * n_seqs, + hparams.n_embd_s() * kv_head * ggml_element_size(mctx_cur->get_s_l(il)) ) ) ); @@ -11720,7 +14567,7 @@ struct llm_build_rwkv6_base : public llm_graph_context { }; struct llm_build_rwkv6 : public llm_build_rwkv6_base { - llm_build_rwkv6(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv6_base(model, params) { + llm_build_rwkv6(const llama_model & model, const llm_graph_params & params) : llm_build_rwkv6_base(model, params) { GGML_ASSERT(hparams.token_shift_count == 2); ggml_tensor * cur; @@ -11729,20 +14576,19 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { inpL = build_inp_embd(model.tok_embd); inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1); - ggml_tensor * state_copy = build_inp_s_copy(); - ggml_tensor * state_mask = build_inp_s_mask(); + auto * rs_inp = build_rs_inp(); const auto n_embd = hparams.n_embd; const auto n_seq_tokens = ubatch.n_seq_tokens; const auto n_seqs = ubatch.n_seqs; + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { const llama_layer * layer = &model.layers[il]; inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); - ggml_tensor * token_shift = build_rwkv_token_shift_load( - gf, state_copy, state_mask, ubatch, il - ); + ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, ubatch, il); ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0); ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift)); @@ -11757,7 +14603,7 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { 1 ); - cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, state_mask, ubatch, il); + cur = build_rwkv6_time_mix(rs_inp, att_norm, x_prev, ubatch, il); ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); cb(ffn_inp, "ffn_inp", il); @@ -11779,13 +14625,16 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { ); ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids); - ffn_norm = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens), inp_out_ids); - x_prev = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens), inp_out_ids); - cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids); + ffn_inp = ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens); + ffn_norm = ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens); + x_prev = ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens); + cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); + + if (il == n_layer - 1 && inp_out_ids) { + ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); + ffn_norm = ggml_get_rows(ctx0, ffn_norm, inp_out_ids); + x_prev = ggml_get_rows(ctx0, x_prev, inp_out_ids); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); } cur = build_rwkv6_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV6); @@ -11819,28 +14668,27 @@ struct llm_build_rwkv6 : public llm_build_rwkv6_base { // ref: https://huggingface.co/recursal/QRWKV6-32B-Instruct-Preview-v0.1/blob/main/modeling_rwkv6qwen2.py struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { - llm_build_rwkv6qwen2(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv6_base(model, params) { - GGML_ASSERT(n_embd == hparams.n_embd_k_s()); + llm_build_rwkv6qwen2(const llama_model & model, const llm_graph_params & params) : llm_build_rwkv6_base(model, params) { + GGML_ASSERT(n_embd == hparams.n_embd_r()); ggml_tensor * cur; ggml_tensor * inpL; inpL = build_inp_embd(model.tok_embd); - ggml_tensor * state_copy = build_inp_s_copy(); - ggml_tensor * state_mask = build_inp_s_mask(); + auto * rs_inp = build_rs_inp(); const auto n_embd = hparams.n_embd; const auto n_seq_tokens = ubatch.n_seq_tokens; const auto n_seqs = ubatch.n_seqs; + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { const llama_layer * layer = &model.layers[il]; inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); - ggml_tensor * token_shift = build_rwkv_token_shift_load( - gf, state_copy, state_mask, ubatch, il - ); + ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, ubatch, il); ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il); cb(att_norm, "attn_norm", il); @@ -11852,7 +14700,7 @@ struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { 1 ); - cur = build_rwkv6_time_mix(gf, att_norm, x_prev, state_copy, state_mask, ubatch, il); + cur = build_rwkv6_time_mix(rs_inp, att_norm, x_prev, ubatch, il); token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)); ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); @@ -11860,11 +14708,12 @@ struct llm_build_rwkv6qwen2 : public llm_build_rwkv6_base { ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); cb(ffn_inp, "ffn_inp", il); - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids); - ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids); + cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); + ffn_inp = ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens); + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); } // feed-forward network @@ -11940,15 +14789,13 @@ struct llm_build_rwkv7_base : public llm_graph_context { } ggml_tensor * build_rwkv7_time_mix( - ggml_cgraph * gf, + llm_graph_input_rs * inp, ggml_tensor * cur, ggml_tensor * x_prev, - ggml_tensor * state_copy, - ggml_tensor * state_mask, ggml_tensor *& first_layer_value, const llama_ubatch & ubatch, int il) const { - const llama_kv_cache_recurrent * kv_self = static_cast(memory); + const auto * mctx_cur = static_cast(mctx); const auto n_tokens = ubatch.n_tokens; const auto n_seqs = ubatch.n_seqs; @@ -11957,7 +14804,7 @@ struct llm_build_rwkv7_base : public llm_graph_context { const auto head_count = n_embd / head_size; const auto n_seq_tokens = ubatch.n_seq_tokens; - const auto kv_head = kv_self->head; + const auto kv_head = mctx_cur->get_head(); const auto & layer = model.layers[il]; @@ -12027,9 +14874,9 @@ struct llm_build_rwkv7_base : public llm_graph_context { v = ggml_reshape_3d(ctx0, v, head_size, head_count, n_tokens); a = ggml_reshape_3d(ctx0, a, head_size, head_count, n_tokens); - ggml_tensor * wkv_state = build_copy_mask_state( - gf, kv_self->v_l[il], state_copy, state_mask, - hparams.n_embd_v_s(), n_seqs); + ggml_tensor * wkv_state = build_rs( + inp, mctx_cur->get_s_l(il), + hparams.n_embd_s(), n_seqs); ggml_tensor * wkv_output = ggml_rwkv_wkv7(ctx0, r, w, k, v, ggml_neg(ctx0, kk), ggml_mul(ctx0, kk, a), wkv_state); cur = ggml_view_1d(ctx0, wkv_output, n_embd * n_tokens, 0); @@ -12042,9 +14889,9 @@ struct llm_build_rwkv7_base : public llm_graph_context { wkv_state, ggml_view_1d( ctx0, - kv_self->v_l[il], - hparams.n_embd_v_s() * n_seqs, - hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self->v_l[il]) + mctx_cur->get_s_l(il), + hparams.n_embd_s() * n_seqs, + hparams.n_embd_s() * kv_head * ggml_element_size(mctx_cur->get_s_l(il)) ) ) ); @@ -12075,7 +14922,7 @@ struct llm_build_rwkv7_base : public llm_graph_context { }; struct llm_build_rwkv7 : public llm_build_rwkv7_base { - llm_build_rwkv7(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv7_base(model, params) { + llm_build_rwkv7(const llama_model & model, const llm_graph_params & params) : llm_build_rwkv7_base(model, params) { GGML_ASSERT(hparams.token_shift_count == 2); ggml_tensor * cur; @@ -12085,20 +14932,19 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base { inpL = build_inp_embd(model.tok_embd); inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1); - ggml_tensor * state_copy = build_inp_s_copy(); - ggml_tensor * state_mask = build_inp_s_mask(); + auto * rs_inp = build_rs_inp(); const auto n_embd = hparams.n_embd; const auto n_seq_tokens = ubatch.n_seq_tokens; const auto n_seqs = ubatch.n_seqs; + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { const llama_layer * layer = &model.layers[il]; inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); - ggml_tensor * token_shift = build_rwkv_token_shift_load( - gf, state_copy, state_mask, ubatch, il - ); + ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, ubatch, il); ggml_tensor * att_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0); ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], n_embd * ggml_element_size(token_shift)); @@ -12113,7 +14959,7 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base { 1 ); - cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, state_mask, v_first, ubatch, il); + cur = build_rwkv7_time_mix(rs_inp, att_norm, x_prev, v_first, ubatch, il); ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); cb(ffn_inp, "ffn_inp", il); @@ -12135,12 +14981,14 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base { ); ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids); - ffn_norm = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens), inp_out_ids); - x_prev = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens), inp_out_ids); + ffn_inp = ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens); + ffn_norm = ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens); + x_prev = ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens); + + if (il == n_layer - 1 && inp_out_ids) { + ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); + ffn_norm = ggml_get_rows(ctx0, ffn_norm, inp_out_ids); + x_prev = ggml_get_rows(ctx0, x_prev, inp_out_ids); } cur = build_rwkv7_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV7); @@ -12170,8 +15018,8 @@ struct llm_build_rwkv7 : public llm_build_rwkv7_base { struct llm_build_arwkv7 : public llm_build_rwkv7_base { - llm_build_arwkv7(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_build_rwkv7_base(model, params) { - GGML_ASSERT(n_embd == hparams.n_embd_k_s()); + llm_build_arwkv7(const llama_model & model, const llm_graph_params & params) : llm_build_rwkv7_base(model, params) { + GGML_ASSERT(n_embd == hparams.n_embd_r()); ggml_tensor * cur; ggml_tensor * inpL; @@ -12179,20 +15027,19 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base { inpL = build_inp_embd(model.tok_embd); - ggml_tensor * state_copy = build_inp_s_copy(); - ggml_tensor * state_mask = build_inp_s_mask(); + auto * rs_inp = build_rs_inp(); const auto n_embd = hparams.n_embd; const auto n_seq_tokens = ubatch.n_seq_tokens; const auto n_seqs = ubatch.n_seqs; + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { const llama_layer * layer = &model.layers[il]; inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); - ggml_tensor * token_shift = build_rwkv_token_shift_load( - gf, state_copy, state_mask, ubatch, il - ); + ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, ubatch, il); ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, il); cb(att_norm, "attn_norm", il); @@ -12204,7 +15051,7 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base { 1 ); - cur = build_rwkv7_time_mix(gf, att_norm, x_prev, state_copy, state_mask, v_first, ubatch, il); + cur = build_rwkv7_time_mix(rs_inp, att_norm, x_prev, v_first, ubatch, il); token_shift = ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(att_norm)); ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); @@ -12212,11 +15059,12 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base { ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); cb(ffn_inp, "ffn_inp", il); - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, cur, n_embd, n_tokens), inp_out_ids); - ffn_inp = ggml_get_rows(ctx0, ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens), inp_out_ids); + cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); + ffn_inp = ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens); + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); } // feed-forward network @@ -12257,6 +15105,598 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base { } }; +struct llm_build_granite : public llm_graph_context { + llm_build_granite( + const llama_model & model, + const llm_graph_params & params) + : llm_graph_context(params) { + + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - built only if rope enabled + ggml_tensor * inp_pos = nullptr; + if (hparams.rope_finetuned) { + inp_pos = build_inp_pos(); + } + + auto * inp_attn = build_attn_inp_kv_unified(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + cur = build_attention_layer( + cur, inp_pos, inp_attn, + model, n_embd_head, il); + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + // ffn + cur = build_layer_ffn(cur, inpSA, model, il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + // For Granite architectures - scale logits + cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale); + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } + + ggml_tensor * build_attention_layer( + ggml_tensor * cur, + ggml_tensor * inp_pos, + llm_graph_input_attn_kv_unified * inp_attn, + const llama_model & model, + const int64_t n_embd_head, + const int il) { + + // compute Q and K and (optionally) RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, hparams.n_head(il), n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, hparams.n_head_kv(il), n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, hparams.n_head_kv(il), n_tokens); + + const bool use_rope = hparams.rope_finetuned; + if (use_rope) { + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + } + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + cur = build_attn(inp_attn, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); + cb(cur, "attn_out", il); + return cur; + } + + ggml_tensor * build_layer_ffn( + ggml_tensor * cur, + ggml_tensor * inpSA, + const llama_model & model, + const int il) { + + // For Granite architectures - scale residual + if (hparams.f_residual_scale) { + cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); + } + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network (non-MoE) + if (model.layers[il].ffn_gate_inp == nullptr) { + + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + } else { + // MoE branch + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + ggml_tensor * moe_out = build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SILU, true, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + il); + cb(moe_out, "ffn_moe_out", il); + + // For Granite MoE Shared + if (hparams.n_ff_shexp > 0) { + ggml_tensor * ffn_shexp = build_ffn(cur, + model.layers[il].ffn_up_shexp, NULL, NULL, + model.layers[il].ffn_gate_shexp, NULL, NULL, + model.layers[il].ffn_down_shexp, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(ffn_shexp, "ffn_shexp", il); + + cur = ggml_add(ctx0, moe_out, ffn_shexp); + cb(cur, "ffn_out", il); + } else { + cur = moe_out; + } + } + + // For Granite architectures - scale residual + if (hparams.f_residual_scale) { + cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); + } + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + return cur; + } +}; + +struct llm_build_granite_hybrid : public llm_graph_context_mamba { + llm_build_granite_hybrid( + const llama_model & model, + const llm_graph_params & params) : + llm_graph_context_mamba(params) { + + const int64_t n_embd_head = hparams.n_embd_head_v; + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + auto * inp = build_inp_mem_hybrid(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + // Positional embeddings populated if rope enabled + ggml_tensor * inp_pos = nullptr; + if (hparams.rope_finetuned) { + inp_pos = build_inp_pos(); + } + + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + if (hparams.is_recurrent(il)) { + // ssm layer // + cur = build_mamba2_layer(inp->get_recr(), cur, model, ubatch, il); + } else { + // attention layer // + cur = build_attention_layer( + cur, inp_pos, inp->get_attn(), model, + n_embd_head, il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + // ffn + cur = build_layer_ffn(cur, inpSA, model, il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + // For Granite architectures - scale logits + if (hparams.f_logit_scale) { + cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale); + } + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } + + ggml_tensor * build_attention_layer( + ggml_tensor * cur, + ggml_tensor * inp_pos, + llm_graph_input_attn_kv_unified * inp_attn, + const llama_model & model, + const int64_t n_embd_head, + const int il) { + + // compute Q and K and (optionally) RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, hparams.n_head(il), n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, hparams.n_head_kv(il), n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, hparams.n_head_kv(il), n_tokens); + + const bool use_rope = hparams.rope_finetuned; + if (use_rope) { + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + } + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + cur = build_attn(inp_attn, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); + cb(cur, "attn_out", il); + return cur; + } + + ggml_tensor * build_layer_ffn( + ggml_tensor * cur, + ggml_tensor * inpSA, + const llama_model & model, + const int il) { + + // For Granite architectures - scale residual + if (hparams.f_residual_scale) { + cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); + } + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network (non-MoE) + if (model.layers[il].ffn_gate_inp == nullptr) { + + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + } else { + // MoE branch + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + ggml_tensor * moe_out = build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SILU, true, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + il); + cb(moe_out, "ffn_moe_out", il); + + // For Granite MoE Shared + if (hparams.n_ff_shexp > 0) { + ggml_tensor * ffn_shexp = build_ffn(cur, + model.layers[il].ffn_up_shexp, NULL, NULL, + model.layers[il].ffn_gate_shexp, NULL, NULL, + model.layers[il].ffn_down_shexp, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(ffn_shexp, "ffn_shexp", il); + + cur = ggml_add(ctx0, moe_out, ffn_shexp); + cb(cur, "ffn_out", il); + } else { + cur = moe_out; + } + } + + // For Granite architectures - scale residual + if (hparams.f_residual_scale) { + cur = ggml_scale(ctx0, cur, hparams.f_residual_scale); + } + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + return cur; + } +}; + +struct llm_build_solar : public llm_graph_context { + llm_build_solar(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + struct ggml_tensor * cur; + struct ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = build_inp_pos(); + + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + auto * inp_attn = build_attn_inp_kv_unified(); + + const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + + struct ggml_tensor * bskcn_1; + struct ggml_tensor * bskcn_2; + + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * inpSA = inpL; + + if (hparams.n_bskcn(0, il)) { + bskcn_1 = inpSA; + } + + if (hparams.n_bskcn(1, il)) { + bskcn_2 = inpSA; + } + + if (hparams.n_bskcn(2, il)) { + inpSA = ggml_add( + ctx0, + ggml_mul(ctx0, bskcn_1, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)), + ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv)))); + } + + if (hparams.n_bskcn(3, il)) { + inpSA = ggml_add( + ctx0, + ggml_mul(ctx0, bskcn_2, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)), + ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv)))); + } + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // rope freq factors for llama3; may return nullptr for llama2 and other models + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); + + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); + cb(cur, "attn_out", il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + // ref: https://github.com/facebookresearch/chameleon // based on the original build_llama() function, changes: // * qk-norm @@ -12264,7 +15704,7 @@ struct llm_build_arwkv7 : public llm_build_rwkv7_base { // * removed bias // * removed MoE struct llm_build_chameleon : public llm_graph_context { - llm_build_chameleon(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_chameleon(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const int64_t n_embd_head = hparams.n_embd_head_v; GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); @@ -12280,6 +15720,8 @@ struct llm_build_chameleon : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -12353,24 +15795,22 @@ struct llm_build_chameleon : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, nullptr, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); - - if (hparams.swin_norm) { - cur = build_norm(cur, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, il); - } } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } + if (hparams.swin_norm) { + cur = build_norm(cur, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + } + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); cb(ffn_inp, "ffn_inp", il); @@ -12440,167 +15880,8 @@ struct llm_build_chameleon : public llm_graph_context { } }; -struct llm_build_solar : public llm_graph_context { - llm_build_solar(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = build_inp_embd(model.tok_embd); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - auto * inp_attn = build_attn_inp_kv_unified(); - - const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; - - struct ggml_tensor * bskcn_1; - struct ggml_tensor * bskcn_2; - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - if (hparams.n_bskcn(0, il)) { - bskcn_1 = inpSA; - } - - if (hparams.n_bskcn(1, il)) { - bskcn_2 = inpSA; - } - - if (hparams.n_bskcn(2, il)) { - inpSA = ggml_add( - ctx0, - ggml_mul(ctx0, bskcn_1, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)), - ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv)))); - } - - if (hparams.n_bskcn(3, il)) { - inpSA = ggml_add( - ctx0, - ggml_mul(ctx0, bskcn_2, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)), - ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv)))); - } - - // norm - cur = build_norm(inpL, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, il); - cb(cur, "attn_norm", il); - - // self-attention - { - // rope freq factors for llama3; may return nullptr for llama2 and other models - ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il); - - // compute Q and K and RoPE them - ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - } - - ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - } - - ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - } - - Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); - Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); - Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); - - Qcur = ggml_rope_ext( - ctx0, Qcur, inp_pos, rope_factors, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - - Kcur = ggml_rope_ext( - ctx0, Kcur, inp_pos, rope_factors, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - - cb(Qcur, "Qcur", il); - cb(Kcur, "Kcur", il); - cb(Vcur, "Vcur", il); - - cur = build_attn(inp_attn, gf, - model.layers[il].wo, model.layers[il].bo, - Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); - cb(cur, "attn_out", il); - } - - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } - - ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); - - // feed-forward network - cur = build_norm(ffn_inp, - model.layers[il].ffn_norm, NULL, - LLM_NORM_RMS, il); - cb(cur, "ffn_norm", il); - - cur = build_ffn(cur, - model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, - model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, - model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, il); - cb(cur, "ffn_out", il); - - cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "ffn_out", il); - - cur = build_cvec(cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = build_norm(cur, - model.output_norm, NULL, - LLM_NORM_RMS, -1); - - cb(cur, "result_norm", -1); - res->t_embd = cur; - - // lm_head - cur = build_lora_mm(model.output, cur); - - cb(cur, "result_output", -1); - res->t_logits = cur; - - ggml_build_forward_expand(gf, cur); - } -}; - struct llm_build_wavtokenizer_dec : public llm_graph_context { - llm_build_wavtokenizer_dec(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_wavtokenizer_dec(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { ggml_tensor * cur; ggml_tensor * inpL; @@ -12752,7 +16033,7 @@ struct llm_build_wavtokenizer_dec : public llm_graph_context { }; struct llm_build_plm : public llm_graph_context { - llm_build_plm(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_plm(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { const float kq_scale = 1.0f/sqrtf(float(hparams.n_embd_head_k)); const uint32_t n_embd_head_qk_rope = hparams.n_rot; @@ -12770,6 +16051,8 @@ struct llm_build_plm : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -12868,14 +16151,12 @@ struct llm_build_plm : public llm_graph_context { ggml_tensor * k_states = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0); cb(k_states, "k_states", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, NULL, q_states, k_states, v_states, nullptr, nullptr, kq_scale, il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -12924,7 +16205,7 @@ struct llm_build_plm : public llm_graph_context { }; struct llm_build_bailingmoe : public llm_graph_context { - llm_build_bailingmoe(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { + llm_build_bailingmoe(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { ggml_tensor * cur; ggml_tensor * inpL; @@ -12935,6 +16216,8 @@ struct llm_build_bailingmoe : public llm_graph_context { auto * inp_attn = build_attn_inp_kv_unified(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + for (int il = 0; il < n_layer; ++il) { ggml_tensor * inpSA = inpL; @@ -12947,7 +16230,7 @@ struct llm_build_bailingmoe : public llm_graph_context { // self-attention { // rope freq factors for llama3; may return nullptr for llama2 and other models - ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il); + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); // compute Q and K and RoPE them ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); @@ -12991,14 +16274,12 @@ struct llm_build_bailingmoe : public llm_graph_context { cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); - cur = build_attn(inp_attn, gf, + cur = build_attn(inp_attn, model.layers[il].wo, model.layers[il].bo, Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_rot)), il); } - if (il == n_layer - 1) { - // skip computing output for unused tokens - ggml_tensor * inp_out_ids = build_inp_out_ids(); + if (il == n_layer - 1 && inp_out_ids) { cur = ggml_get_rows(ctx0, cur, inp_out_ids); inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); } @@ -13067,256 +16348,2239 @@ struct llm_build_bailingmoe : public llm_graph_context { } }; +struct llm_build_dots1 : public llm_graph_context { + llm_build_dots1(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self_attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); + cb(Qcur, "Qcur_normed", il); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); + cb(Kcur, "Kcur_normed", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // MoE branch + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + if ((uint32_t) il < hparams.n_layer_dense_lead) { + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } else { + ggml_tensor * moe_out = + build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + model.layers[il].ffn_exp_probs_b, + n_expert, n_expert_used, + LLM_FFN_SILU, hparams.expert_weights_norm, + true, hparams.expert_weights_scale, + (llama_expert_gating_func_type) hparams.expert_gating_func, + il); + cb(moe_out, "ffn_moe_out", il); + + { + ggml_tensor * ffn_shexp = build_ffn(cur, + model.layers[il].ffn_up_shexp, NULL, NULL, + model.layers[il].ffn_gate_shexp, NULL, NULL, + model.layers[il].ffn_down_shexp, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(ffn_shexp, "ffn_shexp", il); + + cur = ggml_add(ctx0, moe_out, ffn_shexp); + cb(cur, "ffn_out", il); + } + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_ernie4_5 : public llm_graph_context { + llm_build_ernie4_5(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + { + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + } + + // self-attention + { + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_ernie4_5_moe : public llm_graph_context { + llm_build_ernie4_5_moe(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + GGML_ASSERT(hparams.n_moe_layer_step > 0 && "Ernie 4.5 MoE requires n_moe_layer_step > 0"); + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + // norm + { + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + } + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + cb(cur, "attn_out", il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + bool is_moe_layer = static_cast(il) >= hparams.n_layer_dense_lead && (il + 1) % hparams.n_moe_layer_step == 0; + + if (!is_moe_layer) { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } else { + // MoE branch + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + ggml_tensor * moe_out = build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + model.layers[il].ffn_exp_probs_b, + n_expert, n_expert_used, + LLM_FFN_SILU, true, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + il); + cb(moe_out, "ffn_moe_out", il); + + // Shared expert (if present) + if (hparams.n_ff_shexp > 0) { + ggml_tensor * ffn_shexp = build_ffn(cur, + model.layers[il].ffn_up_shexp, NULL, NULL, + model.layers[il].ffn_gate_shexp, NULL, NULL, + model.layers[il].ffn_down_shexp, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(ffn_shexp, "ffn_shexp", il); + + cur = ggml_add(ctx0, moe_out, ffn_shexp); + } else { + cur = moe_out; + } + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_falcon_h1 : public llm_graph_context_mamba { + llm_build_falcon_h1(const llama_model & model, const llm_graph_params & params) : llm_graph_context_mamba(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + // Build the inputs in the recurrent & kv cache + auto * inp = build_inp_mem_hybrid(); + + const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, hparams.rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, hparams.rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur-post-rope", il); + cb(Kcur, "Kcur-post-rope", il); + cb(Vcur, "Vcur-post-rope", il); + + ggml_tensor * attn_out = build_attn(inp->get_attn(), + model.layers[il].wo, NULL, + Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); + cb(attn_out, "attn_out", il); + + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + // Mamba2 layer + cb(cur, "ssm_in", il); + + ggml_tensor * ssm_out = build_mamba2_layer(inp->get_recr(), cur, model, ubatch, il); + cb(ssm_out, "ssm_out", il); + + // // Aggregation + cur = ggml_add(ctx0, attn_out, ssm_out); + inpSA = ggml_add(ctx0, cur, inpSA); + cb(cur, "layer_out", il); + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = inpSA; + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, inpSA); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_plamo2 : public llm_graph_context_mamba { + llm_build_plamo2(const llama_model & model, const llm_graph_params & params) : llm_graph_context_mamba(params) { + ggml_tensor * cur; + ggml_tensor * inpL; + + // {n_embd, n_tokens} + inpL = build_inp_embd(model.tok_embd); + cb(inpL, "embedding_output", -1); + + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_hybrid = build_inp_mem_hybrid(); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * residual = inpL; + + // ggml_graph_add_node(gf, model.layers[il].attn_norm); + // cb(model.layers[il].attn_norm, "attn_norm", il); + + // pre_mixer_norm + cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); + + // check if this layer is Mamba or Attention + bool is_mamba_layer = hparams.is_recurrent(il); + + if (is_mamba_layer) { + // PLaMo-2 Mamba layer + cur = build_plamo2_mamba_layer(inp_hybrid->get_recr(), cur, model, ubatch, il); + } else { + // PLaMo-2 Attention layer + cur = build_plamo2_attn_layer(inp_hybrid->get_attn(), inp_pos, cur, model, il); + } + + // post_mixer_norm + cur = build_norm(cur, model.layers[il].attn_post_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "attn_post_norm", il); + + // residual connection + cur = ggml_add(ctx0, cur, residual); + cb(cur, "attn_residual", il); + residual = cur; + + // pre-ffn norm + cur = build_norm(cur, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "ffn_pre_norm", il); + + // feed-forward network + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SWIGLU, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + + // post ffn norm + cur = build_norm(cur, model.layers[il].ffn_post_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "ffn_post_norm", il); + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + residual = ggml_get_rows(ctx0, residual, inp_out_ids); + } + + // residual connection + cur = ggml_add(ctx0, cur, residual); + cb(cur, "ffn_residual", il); + + inpL = cur; + } + + cur = inpL; + + // final norm + cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1); + cb(cur, "result_norm", -1); + + // lm_head + cur = build_lora_mm(model.output, cur); + cb(cur, "result_output", -1); + + // Explicitly mark as output tensor to ensure proper backend assignment + ggml_set_output(cur); + + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } + +private: + ggml_tensor * build_plamo2_attn_layer( + llm_graph_input_attn_kv_unified * inp, + ggml_tensor * inp_pos, + ggml_tensor * cur, + const llama_model & model, + int il) { + + // self-attention + { + // PLaMo-2 uses combined QKV tensor + ggml_tensor * qkv = build_lora_mm(model.layers[il].wqkv, cur); + cb(qkv, "wqkv", il); + + // split QKV tensor into Q, K, V + const int64_t n_embd_head_q = hparams.n_embd_head_k; + const int64_t n_embd_head_k = hparams.n_embd_head_k; + const int64_t n_embd_head_v = hparams.n_embd_head_v; + int32_t n_head_kv = hparams.n_head_kv(il); + + const int64_t q_offset = 0; + const int64_t k_offset = n_embd_head_q * n_head; + const int64_t v_offset = k_offset + n_embd_head_k * n_head_kv; + + ggml_tensor * Qcur = ggml_view_3d(ctx0, qkv, n_embd_head_q, n_head, n_tokens, n_embd_head_q * sizeof(float), qkv->nb[1], q_offset * ggml_element_size(qkv)); + ggml_tensor * Kcur = ggml_view_3d(ctx0, qkv, n_embd_head_k, n_head_kv, n_tokens, n_embd_head_k * sizeof(float), qkv->nb[1], k_offset * ggml_element_size(qkv)); + ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, qkv, n_embd_head_v * n_head_kv, n_tokens, qkv->nb[1], v_offset * ggml_element_size(qkv))); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head_v, n_head_kv, n_tokens); + + Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); + cb(Qcur, "Qcur_normed", il); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); + cb(Kcur, "Kcur_normed", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cur = build_attn(inp, model.layers[il].wo, NULL, Qcur, Kcur, Vcur, NULL, NULL, 1.0f/sqrtf(float(n_embd_head_v)), il); + } + + cb(cur, "attn_out", il); + + return cur; + } + + ggml_tensor * build_plamo2_mamba_layer( + llm_graph_input_rs * inp, + ggml_tensor * cur, + const llama_model & model, + const llama_ubatch & ubatch, + int il) { + + const auto * mctx_cur = inp->mctx; + + const auto kv_head = mctx_cur->get_head(); + + const int64_t d_conv = hparams.ssm_d_conv; + const int64_t d_inner = hparams.ssm_d_inner; + const int64_t d_state = hparams.ssm_d_state; + const int64_t n_heads = hparams.ssm_dt_rank; + const int64_t head_dim = d_inner / n_heads; + const int64_t n_group = hparams.ssm_n_group; + const int64_t n_seqs = ubatch.n_seqs; + + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + + GGML_ASSERT(n_seqs != 0); + GGML_ASSERT(ubatch.equal_seqs()); + GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs); + + ggml_tensor * conv_states_all = mctx_cur->get_r_l(il); + ggml_tensor * ssm_states_all = mctx_cur->get_s_l(il); + + ggml_tensor * conv = build_rs(inp, conv_states_all, hparams.n_embd_r(), n_seqs); + conv = ggml_reshape_3d(ctx0, conv, d_conv - 1, d_inner + 2*n_group*d_state, n_seqs); + + // {n_embd, n_tokens} => {n_embd, n_seq_tokens, n_seqs} + cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], n_seq_tokens, n_seqs); + + // in_proj: {n_embd, 2*d_inner} @ {n_embd, n_seq_tokens, n_seqs} => {2*d_inner, n_seq_tokens, n_seqs} + ggml_tensor * zx = build_lora_mm(model.layers[il].ssm_in, cur); + cb(zx, "mamba_in_proj", il); + // {8192, 5, 1, 1} -> {8192, 1, 5, 1} + zx = ggml_permute(ctx0, zx, 0, 2, 1, 3); + zx = ggml_cont(ctx0, zx); + zx = ggml_reshape_4d(ctx0, zx, head_dim * 2, n_heads, n_seq_tokens, n_seqs); + cb(zx, "mamba_in_proj_out", il); + + // split into z and x + // => {head_dim * n_heads, n_seq_tokens, n_seqs} + ggml_tensor * x = ggml_view_4d(ctx0, zx, head_dim, n_heads, n_seq_tokens, n_seqs, zx->nb[1], zx->nb[2], zx->nb[3], head_dim*ggml_element_size(zx)); + x = ggml_cont(ctx0, x); + x = ggml_reshape_3d(ctx0, x, head_dim * n_heads, n_seq_tokens, n_seqs); + // x = ggml_permute(ctx0, x, 0, 2, 1, 3); + cb(x, "mamba_x_split", il); + + ggml_tensor * z = ggml_view_4d(ctx0, zx, head_dim, n_heads, n_seq_tokens, n_seqs, zx->nb[1], zx->nb[2], zx->nb[3], 0); + cb(z, "mamba_z_split", il); + + // conv1d + { + // => {d_conv - 1 + n_seq_tokens, d_inner, n_seqs} + ggml_tensor * conv_x = ggml_concat(ctx0, conv, ggml_transpose(ctx0, x), 0); + cb(conv_x, "mamba_conv1d_input", il); + + // copy last (d_conv - 1) columns back into the state cache + ggml_tensor * last_conv = ggml_view_3d(ctx0, conv_x, d_conv - 1, d_inner, n_seqs, + conv_x->nb[1], conv_x->nb[2], n_seq_tokens*(conv_x->nb[0])); + + ggml_build_forward_expand(gf, + ggml_cpy(ctx0, last_conv, + ggml_view_1d(ctx0, conv_states_all, + (d_conv - 1)*(d_inner + 2*n_group*d_state)*(n_seqs), + kv_head*(d_conv - 1)*(d_inner + 2*n_group*d_state)*ggml_element_size(conv_states_all)))); + cb(conv_states_all, "mamba_conv1d_state", il); + + // 1D convolution + x = ggml_ssm_conv(ctx0, conv_x, model.layers[il].ssm_conv1d); + cb(x, "mamba_conv1d", il); + + x = ggml_silu(ctx0, x); + cb(x, "mamba_conv1d_silu", il); + } + + // SSM + { + // bcdt_proj: {d_inner, dt_rank + 2*d_state} @ {d_inner, n_seq_tokens, n_seqs} => {dt_rank + 2*d_state, n_seq_tokens, n_seqs} + ggml_tensor * x_bcdt = build_lora_mm(model.layers[il].ssm_x, x); + cb(x_bcdt, "mamba_bcdt_proj", il); + + // split into dt, B, C + const int64_t dt_dim = std::max(64, int(hparams.n_embd / 16)); + ggml_tensor * B = ggml_view_3d(ctx0, x_bcdt, d_state, n_seq_tokens, n_seqs, x_bcdt->nb[1], x_bcdt->nb[2], 0); + ggml_tensor * C = ggml_view_3d(ctx0, x_bcdt, d_state, n_seq_tokens, n_seqs, x_bcdt->nb[1], x_bcdt->nb[2], ggml_element_size(x_bcdt)*d_state); + ggml_tensor * dt = ggml_view_3d(ctx0, x_bcdt, dt_dim, n_seq_tokens, n_seqs, x_bcdt->nb[1], x_bcdt->nb[2], ggml_element_size(x_bcdt)*(2*d_state)); + cb(B, "mamba_B_raw", il); + cb(C, "mamba_C_raw", il); + cb(dt, "mamba_dt_raw", il); + + // Apply RMS norm to dt, B, C (PLaMo-2 specific) + B = build_norm(B, model.layers[il].ssm_b_norm, NULL, LLM_NORM_RMS, il); + C = build_norm(C, model.layers[il].ssm_c_norm, NULL, LLM_NORM_RMS, il); + dt = build_norm(dt, model.layers[il].ssm_dt_norm, NULL, LLM_NORM_RMS, il); + cb(B, "mamba_B_normed", il); + cb(C, "mamba_C_normed", il); + cb(dt, "mamba_dt_normed", il); + + // dt_proj: {dt_rank, d_inner} @ {dt_rank, n_seq_tokens, n_seqs} => {d_inner, n_seq_tokens, n_seqs} + dt = build_lora_mm(model.layers[il].ssm_dt, dt); + dt = ggml_add(ctx0, dt, model.layers[il].ssm_dt_b); + cb(dt, "mamba_dt_proj", il); + + ggml_tensor * A = ggml_reshape_2d(ctx0, model.layers[il].ssm_a, 1, n_heads); + cb(A, "mamba_A", il); + + x = ggml_view_4d(ctx0, x, head_dim, n_heads, n_seq_tokens, n_seqs, head_dim * ggml_element_size(x), head_dim * n_heads * ggml_element_size(x), head_dim * n_heads * n_seq_tokens * ggml_element_size(x), 0); + B = ggml_view_4d(ctx0, B, d_state, 1, n_seq_tokens, n_seqs, d_state * B->nb[0], B->nb[1], B->nb[2], 0); + C = ggml_view_4d(ctx0, C, d_state, 1, n_seq_tokens, n_seqs, d_state * C->nb[0], C->nb[1], C->nb[2], 0); + + // use the states and the indices provided by build_recurrent_state + // (this is necessary in order to properly use the states before they are overwritten, + // while avoiding to make unnecessary copies of the states) + auto get_ssm_rows = [&](ggml_context * ctx, ggml_tensor * states, ggml_tensor * ids) { + ggml_tensor * ssm = ggml_reshape_4d(ctx, states, d_state, head_dim, n_heads, mctx_cur->get_size()); + + // Custom operator to optimize the parallel associative scan + // as described in the Annex D of the Mamba paper. + // => {d_inner, n_seq_tokens, n_seqs} and {d_state, d_inner, n_seqs} + return ggml_ssm_scan(ctx, ssm, x, dt, A, B, C, ids); + }; + + ggml_tensor * y_ssm = build_rs(inp, ssm_states_all, hparams.n_embd_s(), ubatch.n_seqs, get_ssm_rows); + cb(y_ssm, "mamba_ssm_scan", il); + + // store last states + ggml_build_forward_expand(gf, + ggml_cpy(ctx0, + ggml_view_1d(ctx0, y_ssm, n_heads*head_dim*d_state*n_seqs, n_heads*head_dim*n_seq_tokens*n_seqs*ggml_element_size(y_ssm)), + ggml_view_1d(ctx0, ssm_states_all, n_heads*head_dim*d_state*n_seqs, kv_head*n_seqs*n_heads*head_dim*d_state*ggml_element_size(ssm_states_all)))); + cb(ssm_states_all, "mamba_ssm_states", il); + + ggml_tensor * y = ggml_view_4d(ctx0, y_ssm, head_dim, n_heads, n_seq_tokens, n_seqs, head_dim * ggml_element_size(x), head_dim * n_heads * ggml_element_size(x), head_dim * n_heads * n_seq_tokens * ggml_element_size(x), 0); + cb(y, "mamba_y_view", il); + + // Add D parameter and apply gating with z + // {d_inner, n_seq_tokens, n_seqs} * {d_inner} => {d_inner, n_seq_tokens, n_seqs} + ggml_tensor * D = ggml_reshape_2d(ctx0, model.layers[il].ssm_d, 1, n_heads); + y = ggml_add(ctx0, y, ggml_mul(ctx0, x, D)); + cb(y, "mamba_y_add_d", il); + + y = ggml_swiglu_split(ctx0, ggml_cont(ctx0, z), y); + cb(y, "mamba_y_swiglu_z", il); + + // out_proj: {d_inner, n_embd} @ {d_inner, n_seq_tokens, n_seqs} => {n_embd, n_seq_tokens, n_seqs} + y = ggml_view_3d(ctx0, y, head_dim * n_heads, n_seq_tokens, n_seqs, y->nb[2], y->nb[3], 0); + cur = build_lora_mm(model.layers[il].ssm_out, y); + cb(cur, "mamba_out_proj", il); + } + + // {n_embd, n_seq_tokens, n_seqs} => {n_embd, n_tokens} + cur = ggml_reshape_2d(ctx0, cur, cur->ne[0], n_seq_tokens * n_seqs); + cb(cur, "mamba_out", il); + + return cur; + } +}; + +struct llm_build_arcee : public llm_graph_context { + llm_build_arcee(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // rope freq factors for llama3; may return nullptr for llama2 and other models + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); + + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); + cb(cur, "attn_out", il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + // ARCEE uses relu^2 instead of silu + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + NULL, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_RELU_SQR, LLM_FFN_SEQ, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_hunyuan_moe : public llm_graph_context { + llm_build_hunyuan_moe(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + const float kq_scale = 1.0f / sqrtf(float(n_embd_head)); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // rope freq factors for llama3; may return nullptr for llama2 and other models + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); + + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = build_norm(Kcur, + model.layers[il].attn_k_norm, nullptr, + LLM_NORM_RMS, il); + cb(Kcur, "Kcur_norm", il); + + Qcur = build_norm(Qcur, + model.layers[il].attn_q_norm, nullptr, + LLM_NORM_RMS, il); + cb(Qcur, "Qcur_norm", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); + cb(cur, "attn_out", il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + // feed-forward network (non-MoE) + ggml_tensor * cur_mlp = build_ffn(cur, + model.layers[il].ffn_up_shexp, NULL, NULL, + model.layers[il].ffn_gate_shexp, NULL, NULL, + model.layers[il].ffn_down_shexp, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur_mlp, "ffn_mlp", il); + + // MoE branch + ggml_tensor * cur_moe = build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SILU, + true, // norm_topk_prob + false, + 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + il); + cb(cur_moe, "ffn_moe_out", il); + + ggml_tensor * ffn_out = ggml_add(ctx0, cur_moe, cur_mlp); + cb(ffn_out, "ffn_out", il); + + cur = ggml_add(ctx0, ffn_out, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_hunyuan_dense : public llm_graph_context { + llm_build_hunyuan_dense(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + const float kq_scale = 1.0f / sqrtf(float(n_embd_head)); + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + // self-attention + { + // rope freq factors for llama3; may return nullptr for llama2 and other models + ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); + + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = build_norm(Kcur, + model.layers[il].attn_k_norm, nullptr, + LLM_NORM_RMS, il); + cb(Kcur, "Kcur_norm", il); + + Qcur = build_norm(Qcur, + model.layers[il].attn_q_norm, nullptr, + LLM_NORM_RMS, il); + cb(Qcur, "Qcur_norm", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); + cb(cur, "attn_out", il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + // feed-forward network (non-MoE) + ggml_tensor * cur_mlp = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur_mlp, "ffn_out", il); + + cur = ggml_add(ctx0, cur_mlp, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + // lm_head + cur = build_lora_mm(model.output, cur); + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_smollm3 : public llm_graph_context { + llm_build_smollm3(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified(); + + const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + const bool use_rope = (il + 1) % hparams.n_no_rope_layer_step != 0; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + if (use_rope) { + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + } + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); + cb(cur, "attn_out", il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + { + cur = build_norm(ffn_inp, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + cur = build_ffn(cur, + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, + model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL, + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "ffn_out", il); + } + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_openai_moe_iswa : public llm_graph_context { + llm_build_openai_moe_iswa(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + auto * inp_attn = build_attn_inp_kv_unified_iswa(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + + // norm + cur = build_norm(inpL, + model.layers[il].attn_norm, nullptr, + LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_rot, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_rot, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_rot, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + + cur = build_attn_with_sinks(inp_attn, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, model.layers[il].attn_sinks, 1.0f/sqrtf(float(n_rot)), il); + + cb(cur, "attn_out", il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + cur = ffn_inp; + cur = build_norm(cur, + model.layers[il].attn_post_norm, nullptr, + LLM_NORM_RMS, il); + cb(cur, "attn_post_norm", il); + + // MoE branch + cur = build_moe_ffn(cur, + model.layers[il].ffn_gate_inp, model.layers[il].ffn_gate_inp_b, + model.layers[il].ffn_up_exps, model.layers[il].ffn_up_exps_b, + model.layers[il].ffn_gate_exps, model.layers[il].ffn_gate_exps_b, + model.layers[il].ffn_down_exps, model.layers[il].ffn_down_exps_b, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SWIGLU_OAI_MOE, false, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT, + il); + cb(cur, "ffn_moe_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, + model.output_norm, NULL, + LLM_NORM_RMS, -1); + + cb(cur, "result_norm", -1); + res->t_embd = cur; + + // lm_head + cur = build_lora_mm(model.output, cur); + + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + +struct llm_build_lfm2 : public llm_graph_context { + const llama_model & model; + + llm_build_lfm2(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params), model(model) { + + ggml_tensor * cur = build_inp_embd(model.tok_embd); + cb(cur, "model.embed_tokens", -1); + + ggml_tensor * inp_pos = build_inp_pos(); + auto * inp_hybrid = build_inp_mem_hybrid(); + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + auto * prev_cur = cur; + cur = build_norm(cur, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "model.layers.{}.operator_norm", il); + + cur = hparams.is_recurrent(il) ? + build_shortconv_block(cur, inp_hybrid->get_recr(), il) : + build_attn_block(cur, inp_pos, inp_hybrid->get_attn(), il) ; + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + prev_cur = ggml_get_rows(ctx0, prev_cur, inp_out_ids); + } + + cur = ggml_add(ctx0, prev_cur, cur); + cur = ggml_add(ctx0, cur, build_feed_forward(cur, il)); + } + + cur = build_norm(cur, model.tok_norm, NULL, LLM_NORM_RMS, -1); + cb(cur, "model.embedding_norm", -1); + res->t_embd = cur; + + // lm_head is tied with embeddings + cur = build_lora_mm(model.tok_embd, cur); + cb(cur, "lm_head", -1); + + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } + + ggml_tensor * build_feed_forward(ggml_tensor * cur, + int il) const { + cur = build_norm(cur, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "model.layers.{}.ffn_norm", il); + + GGML_ASSERT(!model.layers[il].ffn_up_b); + GGML_ASSERT(!model.layers[il].ffn_gate_b); + GGML_ASSERT(!model.layers[il].ffn_down_b); + cur = build_ffn(cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, il); + cb(cur, "model.layers.{}.feed_forward.w2", il); + + return cur; + } + + ggml_tensor * build_attn_block(ggml_tensor * cur, + ggml_tensor * inp_pos, + llm_graph_input_attn_kv_unified * inp_attn, + int il) const { + GGML_ASSERT(hparams.n_embd_v_gqa(il) == hparams.n_embd_k_gqa(il)); + auto const n_embd_head = hparams.n_embd_head_v; + auto const n_head_kv = hparams.n_head_kv(il); + + auto * q = build_lora_mm(model.layers[il].wq, cur); + cb(q, "model.layers.{}.self_attn.q_proj", il); + auto * k = build_lora_mm(model.layers[il].wk, cur); + cb(k, "model.layers.{}.self_attn.k_proj", il); + auto * v = build_lora_mm(model.layers[il].wv, cur); + cb(v, "model.layers.{}.self_attn.v_proj", il); + + q = ggml_reshape_3d(ctx0, q, n_embd_head, n_head, n_tokens); + k = ggml_reshape_3d(ctx0, k, n_embd_head, n_head_kv, n_tokens); + v = ggml_reshape_3d(ctx0, v, n_embd_head, n_head_kv, n_tokens); + + // qk norm + q = build_norm(q, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); + cb(q, "model.layers.{}.self_attn.q_layernorm", il); + k = build_norm(k, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); + cb(k, "model.layers.{}.self_attn.k_layernorm", il); + + // RoPE + q = ggml_rope_ext( + ctx0, q, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + k = ggml_rope_ext( + ctx0, k, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + + cur = build_attn(inp_attn, model.layers[il].wo, NULL, + q, k, v, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); + + cb(cur, "model.layers.{}.self_attn.out_proj", il); + + return cur; + } + + ggml_tensor * build_shortconv_block(ggml_tensor * cur, + llm_graph_input_rs * inp_recr, + int il) { + const auto * mctx_cur = static_cast(mctx)->get_recr(); + const uint32_t kv_head = mctx_cur->get_head(); + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + const int64_t n_seqs = ubatch.n_seqs; + GGML_ASSERT(n_seqs != 0); + GGML_ASSERT(ubatch.equal_seqs()); + GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs); + + GGML_ASSERT(hparams.n_shortconv_l_cache > 1); + const uint32_t d_conv = hparams.n_shortconv_l_cache - 1; + + // {n_embd, n_tokens} => {n_embd, n_seq_tokens, n_seqs} + cur = ggml_reshape_3d(ctx0, cur, cur->ne[0], n_seq_tokens, n_seqs); + + auto * bcx = build_lora_mm(model.layers[il].shortconv.in_proj, cur); + cb(bcx, "model.layers.{}.conv.in_proj", il); + + constexpr auto n_chunks = 3; + GGML_ASSERT(bcx->ne[0] % n_chunks == 0); + auto const chunk_size = bcx->ne[0] / n_chunks; + auto * b = ggml_view_3d(ctx0, bcx, chunk_size, bcx->ne[1], bcx->ne[2], bcx->nb[1], bcx->nb[2], 0*chunk_size*ggml_element_size(bcx)); + auto * c = ggml_view_3d(ctx0, bcx, chunk_size, bcx->ne[1], bcx->ne[2], bcx->nb[1], bcx->nb[2], 1*chunk_size*ggml_element_size(bcx)); + auto * x = ggml_view_3d(ctx0, bcx, chunk_size, bcx->ne[1], bcx->ne[2], bcx->nb[1], bcx->nb[2], 2*chunk_size*ggml_element_size(bcx)); + + auto * bx = ggml_transpose(ctx0, ggml_mul(ctx0, b, x)); + + // read conv state + auto * conv_state = mctx_cur->get_r_l(il); + auto * conv_rs = build_rs(inp_recr, conv_state, hparams.n_embd_r(), n_seqs); + auto * conv = ggml_reshape_3d(ctx0, conv_rs, d_conv, hparams.n_embd, n_seqs); + + bx = ggml_concat(ctx0, conv, bx, 0); + GGML_ASSERT(bx->ne[0] > conv->ne[0]); + + // last d_conv columns is a new conv state + auto * new_conv = ggml_view_3d(ctx0, bx, conv->ne[0], bx->ne[1], bx->ne[2], bx->nb[1], bx->nb[2], (bx->ne[0] - conv->ne[0])*ggml_element_size(bx)); + GGML_ASSERT(ggml_are_same_shape(conv, new_conv)); + + // write new conv conv state + ggml_build_forward_expand( + gf, + ggml_cpy( + ctx0, + new_conv, + ggml_view_1d( + ctx0, + conv_state, + ggml_nelements(new_conv), + kv_head*d_conv*n_embd*ggml_element_size(new_conv) + ) + ) + ); + + auto * conv_kernel = model.layers[il].shortconv.conv; + auto * conv_out = ggml_ssm_conv(ctx0, bx, conv_kernel); + cb(conv_out, "model.layers.{}.conv.conv", il); + + auto * y = ggml_mul(ctx0, c, conv_out); + y = build_lora_mm(model.layers[il].shortconv.out_proj, y); + cb(y, "model.layers.{}.conv.out_proj", il); + // {n_embd, n_seq_tokens, n_seqs} => {n_embd, n_tokens} + y = ggml_reshape_2d(ctx0, y, y->ne[0], n_seq_tokens * n_seqs); + + return y; + } +}; + +template +struct llm_build_smallthinker : public llm_graph_context{ + llm_build_smallthinker(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params){ + const int64_t n_embd_head = hparams.n_embd_head_v; + + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + ggml_tensor * cur; + ggml_tensor * inpL; + + inpL = build_inp_embd(model.tok_embd); + + // inp_pos - contains the positions + ggml_tensor * inp_pos = build_inp_pos(); + + using inp_attn_type = std::conditional_t; + inp_attn_type * inp_attn = nullptr; + + if constexpr (iswa) { + inp_attn = build_attn_inp_kv_unified_iswa(); + } else { + inp_attn = build_attn_inp_kv_unified(); + } + + ggml_tensor * inp_out_ids = build_inp_out_ids(); + + for (int il = 0; il < n_layer; ++il) { + ggml_tensor * inpSA = inpL; + ggml_tensor * probs = nullptr; + + probs = build_lora_mm(model.layers[il].ffn_gate_inp, inpL); // [n_expert, n_tokens] + cb(probs, "ffn_moe_logits", il); + + // norm + cur = build_norm(inpL,model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "attn_norm", il); + + // self_attention + { + // compute Q and K and RoPE them + struct ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + struct ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + struct ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); + + if (hparams.n_no_rope_layer_step == n_layer || il % hparams.n_no_rope_layer_step != 0) { + Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + + Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow); + } + + cb(Qcur, "Qcur", il); + cb(Kcur, "Kcur", il); + + cur = build_attn(inp_attn, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), il); + } + + if (il == n_layer - 1 && inp_out_ids) { + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + probs = ggml_get_rows(ctx0, probs, inp_out_ids); + } + + ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // MoE branch + cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il); + cb(cur, "ffn_norm", il); + + ggml_tensor * ffn_out = + build_moe_ffn(cur, + nullptr, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_RELU, true, + false, 0.0, + static_cast(hparams.expert_gating_func), + il, probs); + + cb(ffn_out, "ffn_out", il); + cur = ffn_out; + + cur = ggml_add(ctx0, cur, ffn_inp); + cur = build_cvec(cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1); + cb(cur, "result_norm", -1); + + // lm_head + cur = build_lora_mm(model.output, cur); + cb(cur, "result_output", -1); + res->t_logits = cur; + + ggml_build_forward_expand(gf, cur); + } +}; + llama_memory_i * llama_model::create_memory(const llama_memory_params & params, llama_cparams & cparams) const { llama_memory_i * res; switch (arch) { + // Models that need specific instantiation should be handled in the + // switch statement case LLM_ARCH_BERT: case LLM_ARCH_JINA_BERT_V2: case LLM_ARCH_NOMIC_BERT: case LLM_ARCH_NOMIC_BERT_MOE: + case LLM_ARCH_NEO_BERT: + case LLM_ARCH_WAVTOKENIZER_DEC: + case LLM_ARCH_DREAM: + case LLM_ARCH_LLADA: { res = nullptr; } break; - case LLM_ARCH_MAMBA: - case LLM_ARCH_RWKV6: - case LLM_ARCH_RWKV6QWEN2: - case LLM_ARCH_RWKV7: - case LLM_ARCH_ARWKV7: - { - res = new llama_kv_cache_recurrent( - *this, - GGML_TYPE_F32, - GGML_TYPE_F32, - cparams.offload_kqv, - std::max((uint32_t) 1, cparams.n_seq_max)); - } break; + // Models that need standard caching should rely on recurrent/hybrid + // checks default: { - const auto padding = llama_kv_cache_unified::get_padding(cparams); + if (llm_arch_is_recurrent(arch)) { + res = new llama_memory_recurrent( + *this, + nullptr, + GGML_TYPE_F32, + GGML_TYPE_F32, + cparams.offload_kqv, + std::max((uint32_t) 1, cparams.n_seq_max), + cparams.n_seq_max); + } else if (llm_arch_is_hybrid(arch)) { + const auto padding = llama_kv_cache_unified::get_padding(cparams); - cparams.n_ctx = GGML_PAD(cparams.n_ctx, padding); + cparams.n_ctx = GGML_PAD(cparams.n_ctx, padding); - LLAMA_LOG_DEBUG("%s: n_ctx = %u (padded)\n", __func__, cparams.n_ctx); + res = new llama_memory_hybrid( + /* model */ *this, + /* attn_type_k */ params.type_k, + /* attn_type_v */ params.type_v, + /* attn_v_trans */ !cparams.flash_attn, + /* attn_kv_size */ cparams.n_ctx, + /* attn_n_pad */ padding, + /* attn_n_swa */ hparams.n_swa, + /* attn_swa_type */ hparams.swa_type, + /* recurrent_type_k */ GGML_TYPE_F32, + /* recurrent_type_v */ GGML_TYPE_F32, + /* recurrent_kv_size */ std::max((uint32_t) 1, cparams.n_seq_max), + /* n_seq_max */ cparams.n_seq_max, + /* offload */ cparams.offload_kqv, + /* unified */ cparams.kv_unified, + /* filter_attn */ (arch == LLM_ARCH_FALCON_H1) ? [&](int32_t) { return true; } : (llama_memory_hybrid::layer_filter_cb)nullptr, + /* filter_recr */ (arch == LLM_ARCH_FALCON_H1) ? [&](int32_t) { return true; } : (llama_memory_hybrid::layer_filter_cb)nullptr); + } else { + const auto padding = llama_kv_cache_unified::get_padding(cparams); - res = new llama_kv_cache_unified( - *this, - params.type_k, - params.type_v, - !cparams.flash_attn, - cparams.offload_kqv, - cparams.n_ctx, - padding); + uint32_t n_ctx_per_stream = cparams.n_ctx; + + if (!cparams.kv_unified) { + n_ctx_per_stream = (cparams.n_ctx + cparams.n_seq_max - 1)/cparams.n_seq_max; + n_ctx_per_stream = GGML_PAD(n_ctx_per_stream, padding); + + cparams.n_ctx = n_ctx_per_stream*cparams.n_seq_max; + } else { + n_ctx_per_stream = GGML_PAD(n_ctx_per_stream, padding); + + cparams.n_ctx = n_ctx_per_stream; + } + + LLAMA_LOG_DEBUG("%s: n_ctx = %u (padded)\n", __func__, cparams.n_ctx); + + if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) { + GGML_ASSERT(hparams.is_swa_any()); + + res = new llama_kv_cache_unified_iswa( + *this, + params.type_k, + params.type_v, + !cparams.flash_attn, + cparams.offload_kqv, + params.swa_full, + cparams.kv_unified, + n_ctx_per_stream, + cparams.n_seq_max, + cparams.n_ubatch, + padding); + } else { + GGML_ASSERT(!hparams.is_swa_any()); + + res = new llama_kv_cache_unified( + *this, + nullptr, + params.type_k, + params.type_v, + !cparams.flash_attn, + cparams.offload_kqv, + cparams.kv_unified, + n_ctx_per_stream, + cparams.n_seq_max, + padding, + hparams.n_swa, + hparams.swa_type); + } + } } } return res; } -llm_graph_result_ptr llama_model::build_graph( - const llm_graph_params & params, - ggml_cgraph * gf, - llm_graph_type type) const { +ggml_cgraph * llama_model::build_graph(const llm_graph_params & params) const { std::unique_ptr llm; switch (arch) { case LLM_ARCH_LLAMA: - case LLM_ARCH_LLAMA4: - case LLM_ARCH_MINICPM: - case LLM_ARCH_GRANITE: - case LLM_ARCH_GRANITE_MOE: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_LLAMA4: + { + llm = std::make_unique(*this, params); } break; case LLM_ARCH_DECI: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_BAICHUAN: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_FALCON: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_GROK: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_STARCODER: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_REFACT: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_BERT: case LLM_ARCH_JINA_BERT_V2: case LLM_ARCH_NOMIC_BERT: case LLM_ARCH_NOMIC_BERT_MOE: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_NEO_BERT: + { + llm = std::make_unique(*this, params); } break; case LLM_ARCH_BLOOM: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_MPT: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_STABLELM: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_QWEN: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_QWEN2: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; + case LLM_ARCH_DREAM: + { + llm = std::make_unique(*this, params); + } + break; + case LLM_ARCH_LLADA: + { + llm = std::make_unique(*this, params); + } + break; case LLM_ARCH_QWEN2VL: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_QWEN2MOE: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_QWEN3: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_QWEN3MOE: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_PHI2: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_PHI3: case LLM_ARCH_PHIMOE: { - llm = std::make_unique(*this, params, gf); + if (hparams.swa_type != LLAMA_SWA_TYPE_NONE) { + llm = std::make_unique> (*this, params); + } else { + llm = std::make_unique>(*this, params); + } } break; case LLM_ARCH_PLAMO: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_PLAMO2: + { + llm = std::make_unique(*this, params); } break; case LLM_ARCH_GPT2: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_CODESHELL: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_ORION: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_INTERNLM2: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_MINICPM3: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_GEMMA: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_GEMMA2: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_GEMMA3: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_GEMMA3N: + { + llm = std::make_unique(*this, params); } break; case LLM_ARCH_STARCODER2: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_MAMBA: + case LLM_ARCH_MAMBA2: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_JAMBA: + { + llm = std::make_unique(*this, params); } break; case LLM_ARCH_XVERSE: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_COMMAND_R: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_COHERE2: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_DBRX: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_OLMO: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_OLMO2: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_OLMOE: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_OPENELM: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_GPTNEOX: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_ARCTIC: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_DEEPSEEK: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_DEEPSEEK2: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_CHATGLM: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_GLM4: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_GLM4_MOE: + { + llm = std::make_unique(*this, params); } break; case LLM_ARCH_BITNET: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_T5: { - switch (type) { + switch (params.gtype) { case LLM_GRAPH_TYPE_ENCODER: - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); break; case LLM_GRAPH_TYPE_DEFAULT: case LLM_GRAPH_TYPE_DECODER: - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); break; default: GGML_ABORT("invalid graph type"); @@ -13324,65 +18588,131 @@ llm_graph_result_ptr llama_model::build_graph( } break; case LLM_ARCH_T5ENCODER: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_JAIS: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_NEMOTRON: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_EXAONE: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_EXAONE4: + { + if (hparams.swa_type == LLAMA_SWA_TYPE_STANDARD) { + llm = std::make_unique>(*this, params); + } else { + llm = std::make_unique>(*this, params); + } } break; case LLM_ARCH_RWKV6: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_RWKV6QWEN2: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_RWKV7: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_ARWKV7: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_GRANITE: + case LLM_ARCH_GRANITE_MOE: + case LLM_ARCH_MINICPM: + { + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_GRANITE_HYBRID: + { + llm = std::make_unique(*this, params); } break; case LLM_ARCH_CHAMELEON: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_SOLAR: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_WAVTOKENIZER_DEC: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_PLM: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; case LLM_ARCH_BAILINGMOE: { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_DOTS1: + { + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_ARCEE: + { + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_ERNIE4_5: + { + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_ERNIE4_5_MOE: + { + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_HUNYUAN_MOE: + { + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_HUNYUAN_DENSE: + { + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_SMOLLM3: + { + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_OPENAI_MOE: + { + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_FALCON_H1: + { + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_LFM2: + { + llm = std::make_unique(*this, params); + } break; + case LLM_ARCH_SMALLTHINKER: + { + if (hparams.swa_type == LLAMA_SWA_TYPE_STANDARD) { + llm = std::make_unique> (*this, params); + } else { + llm = std::make_unique>(*this, params); + } } break; default: GGML_ABORT("fatal error"); } // add on pooling layer - llm->build_pooling(gf, cls, cls_b, cls_out, cls_out_b); + llm->build_pooling(cls, cls_b, cls_out, cls_out_b); - return std::move(llm->res); + return llm->res->get_gf(); } // @@ -13404,6 +18734,7 @@ llama_model_params llama_model_default_params() { /*.use_mmap =*/ true, /*.use_mlock =*/ false, /*.check_tensors =*/ false, + /*.use_extra_bufts =*/ true, }; #ifdef GGML_USE_METAL @@ -13446,6 +18777,22 @@ int32_t llama_model_n_head_kv(const llama_model * model) { return model->hparams.n_head_kv(); } +int32_t llama_model_n_swa(const llama_model * model) { + return model->hparams.n_swa; +} + +uint32_t llama_model_n_cls_out(const struct llama_model * model) { + return model->hparams.n_cls_out; +} + +const char * llama_model_cls_label(const struct llama_model * model, uint32_t i) { + if (i < model->classifier_labels.size()) { + return model->classifier_labels[i].c_str(); + } + + return nullptr; +} + // deprecated int32_t llama_n_ctx_train(const llama_model * model) { return llama_model_n_ctx_train(model); @@ -13475,6 +18822,8 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_REFACT: case LLM_ARCH_BLOOM: case LLM_ARCH_MAMBA: + case LLM_ARCH_MAMBA2: + case LLM_ARCH_JAMBA: case LLM_ARCH_JINA_BERT_V2: case LLM_ARCH_T5: case LLM_ARCH_T5ENCODER: @@ -13488,6 +18837,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { // use what we call a normal RoPE, operating on pairs of consecutive head values case LLM_ARCH_LLAMA: + case LLM_ARCH_LLADA: case LLM_ARCH_LLAMA4: case LLM_ARCH_DECI: case LLM_ARCH_BAICHUAN: @@ -13506,13 +18856,20 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_GLM4: case LLM_ARCH_GRANITE: case LLM_ARCH_GRANITE_MOE: + case LLM_ARCH_GRANITE_HYBRID: case LLM_ARCH_CHAMELEON: case LLM_ARCH_SOLAR: case LLM_ARCH_BAILINGMOE: + case LLM_ARCH_NEO_BERT: + case LLM_ARCH_SMOLLM3: + case LLM_ARCH_ARCEE: + case LLM_ARCH_ERNIE4_5: + case LLM_ARCH_ERNIE4_5_MOE: return LLAMA_ROPE_TYPE_NORM; // the pairs of head values are offset by n_rot/2 case LLM_ARCH_FALCON: + case LLM_ARCH_FALCON_H1: case LLM_ARCH_GROK: case LLM_ARCH_DBRX: case LLM_ARCH_BERT: @@ -13522,6 +18879,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_BITNET: case LLM_ARCH_QWEN: case LLM_ARCH_QWEN2: + case LLM_ARCH_DREAM: case LLM_ARCH_QWEN2MOE: case LLM_ARCH_QWEN3: case LLM_ARCH_QWEN3MOE: @@ -13531,9 +18889,11 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_PHI3: case LLM_ARCH_PHIMOE: case LLM_ARCH_PLAMO: + case LLM_ARCH_PLAMO2: case LLM_ARCH_GEMMA: case LLM_ARCH_GEMMA2: case LLM_ARCH_GEMMA3: + case LLM_ARCH_GEMMA3N: case LLM_ARCH_STARCODER2: case LLM_ARCH_OPENELM: case LLM_ARCH_GPTNEOX: @@ -13541,7 +18901,15 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_ORION: case LLM_ARCH_NEMOTRON: case LLM_ARCH_EXAONE: + case LLM_ARCH_EXAONE4: case LLM_ARCH_MINICPM3: + case LLM_ARCH_DOTS1: + case LLM_ARCH_HUNYUAN_MOE: + case LLM_ARCH_OPENAI_MOE: + case LLM_ARCH_HUNYUAN_DENSE: + case LLM_ARCH_LFM2: + case LLM_ARCH_SMALLTHINKER: + case LLM_ARCH_GLM4_MOE: return LLAMA_ROPE_TYPE_NEOX; case LLM_ARCH_QWEN2VL: @@ -13607,7 +18975,7 @@ uint64_t llama_model_size(const llama_model * model) { } const char * llama_model_chat_template(const llama_model * model, const char * name) { - const auto key = name ? LLM_KV(model->arch, name)(LLM_KV_TOKENIZER_CHAT_TEMPLATE_N) + const auto key = name ? LLM_KV(model->arch, name)(LLM_KV_TOKENIZER_CHAT_TEMPLATE) : LLM_KV(model->arch)(LLM_KV_TOKENIZER_CHAT_TEMPLATE); const auto & it = model->gguf_kv.find(key); if (it == model->gguf_kv.end()) { @@ -13615,7 +18983,7 @@ const char * llama_model_chat_template(const llama_model * model, const char * n // do not extend this list unless absolutely necessary // Mistral-Small-2503 does not have built-in chat template llama_vocab_pre_type pre_type = model->vocab.get_pre_type(); - if (pre_type == LLAMA_VOCAB_PRE_TYPE_TEKKEN && model->layers.size() == 40) { + if (!name && pre_type == LLAMA_VOCAB_PRE_TYPE_TEKKEN && model->layers.size() == 40) { return "mistral-v7-tekken"; } @@ -13649,14 +19017,11 @@ llama_token llama_model_decoder_start_token(const llama_model * model) { } bool llama_model_is_recurrent(const llama_model * model) { - switch (model->arch) { - case LLM_ARCH_MAMBA: return true; - case LLM_ARCH_RWKV6: return true; - case LLM_ARCH_RWKV6QWEN2: return true; - case LLM_ARCH_RWKV7: return true; - case LLM_ARCH_ARWKV7: return true; - default: return false; - } + return llm_arch_is_recurrent(model->arch); +} + +bool llama_model_is_diffusion(const llama_model * model) { + return llm_arch_is_diffusion(model->arch); } const std::vector> & llama_internal_get_tensor_map(const llama_model * model) { diff --git a/llama/llama.cpp/src/llama-model.h b/llama/llama.cpp/src/llama-model.h index 43746c7dd..099645332 100644 --- a/llama/llama.cpp/src/llama-model.h +++ b/llama/llama.cpp/src/llama-model.h @@ -32,16 +32,21 @@ enum llm_type { LLM_TYPE_190M, LLM_TYPE_220M, LLM_TYPE_250M, + LLM_TYPE_256M, LLM_TYPE_270M, LLM_TYPE_335M, + LLM_TYPE_350M, LLM_TYPE_410M, LLM_TYPE_450M, LLM_TYPE_475M, + LLM_TYPE_700M, LLM_TYPE_770M, LLM_TYPE_780M, + LLM_TYPE_0_3B, LLM_TYPE_0_5B, LLM_TYPE_0_6B, LLM_TYPE_1B, + LLM_TYPE_1_2B, LLM_TYPE_1_3B, LLM_TYPE_1_4B, LLM_TYPE_1_5B, @@ -74,6 +79,7 @@ enum llm_type { LLM_TYPE_40B, LLM_TYPE_65B, LLM_TYPE_70B, + LLM_TYPE_142B, LLM_TYPE_236B, LLM_TYPE_290B, LLM_TYPE_314B, @@ -93,8 +99,15 @@ enum llm_type { LLM_TYPE_57B_A14B, LLM_TYPE_17B_16E, // llama4 Scout LLM_TYPE_17B_128E, // llama4 Maverick + LLM_TYPE_A13B, + LLM_TYPE_21B_A3B, // Ernie MoE small LLM_TYPE_30B_A3B, + LLM_TYPE_106B_A12B, // GLM-4.5-Air LLM_TYPE_235B_A22B, + LLM_TYPE_300B_A47B, // Ernie MoE big + LLM_TYPE_355B_A32B, // GLM-4.5 + LLM_TYPE_E2B, + LLM_TYPE_E4B, }; std::string llama_rope_scaling_type_name(llama_rope_scaling_type rope_scaling_type); @@ -150,6 +163,21 @@ struct llama_layer_convnext { struct ggml_tensor * gamma = nullptr; }; +struct llama_layer_shortconv { + struct ggml_tensor * in_proj = nullptr; + struct ggml_tensor * conv = nullptr; + struct ggml_tensor * out_proj = nullptr; +}; + +struct llama_layer_nextn { + struct ggml_tensor * eh_proj = nullptr; + struct ggml_tensor * embed_tokens = nullptr; + struct ggml_tensor * enorm = nullptr; + struct ggml_tensor * hnorm = nullptr; + struct ggml_tensor * shared_head_head = nullptr; + struct ggml_tensor * shared_head_norm = nullptr; +}; + struct llama_layer { // normalization struct ggml_tensor * attn_norm = nullptr; @@ -169,6 +197,10 @@ struct llama_layer { struct ggml_tensor * ffn_sub_norm = nullptr; struct ggml_tensor * attn_norm_cross = nullptr; struct ggml_tensor * attn_norm_enc = nullptr; + struct ggml_tensor * ssm_norm = nullptr; + struct ggml_tensor * ssm_dt_norm = nullptr; + struct ggml_tensor * ssm_b_norm = nullptr; + struct ggml_tensor * ssm_c_norm = nullptr; // attention struct ggml_tensor * wq = nullptr; @@ -221,10 +253,14 @@ struct llama_layer { struct ggml_tensor * ffn_up_enc = nullptr; // ff MoE - struct ggml_tensor * ffn_gate_inp = nullptr; - struct ggml_tensor * ffn_gate_exps = nullptr; - struct ggml_tensor * ffn_down_exps = nullptr; - struct ggml_tensor * ffn_up_exps = nullptr; + struct ggml_tensor * ffn_gate_inp = nullptr; + struct ggml_tensor * ffn_gate_exps = nullptr; + struct ggml_tensor * ffn_down_exps = nullptr; + struct ggml_tensor * ffn_up_exps = nullptr; + struct ggml_tensor * ffn_gate_inp_b = nullptr; + struct ggml_tensor * ffn_gate_exps_b = nullptr; + struct ggml_tensor * ffn_down_exps_b = nullptr; + struct ggml_tensor * ffn_up_exps_b = nullptr; // ff shared expert (shexp) struct ggml_tensor * ffn_gate_inp_shexp = nullptr; @@ -316,11 +352,31 @@ struct llama_layer { struct ggml_tensor * ffn_up_scale = nullptr; struct ggml_tensor * ffn_down_scale = nullptr; + // altup & laurel + struct ggml_tensor * per_layer_inp_gate = nullptr; + struct ggml_tensor * per_layer_proj = nullptr; + struct ggml_tensor * per_layer_post_norm = nullptr; + struct ggml_tensor * altup_correct_coef = nullptr; + struct ggml_tensor * altup_correct_scale = nullptr; + struct ggml_tensor * altup_predict_coef = nullptr; + struct ggml_tensor * altup_router = nullptr; + struct ggml_tensor * altup_router_norm = nullptr; + struct ggml_tensor * laurel_l = nullptr; + struct ggml_tensor * laurel_r = nullptr; + struct ggml_tensor * laurel_post_norm = nullptr; + + // openai-moe + struct ggml_tensor * attn_sinks = nullptr; + struct ggml_tensor * bskcn_tv = nullptr; struct llama_layer_posnet posnet; struct llama_layer_convnext convnext; + + struct llama_layer_shortconv shortconv; + + struct llama_layer_nextn nextn; }; struct llama_model { @@ -332,6 +388,9 @@ struct llama_model { llama_hparams hparams = {}; llama_vocab vocab; + // for classifier models + std::vector classifier_labels; + struct ggml_tensor * tok_embd = nullptr; struct ggml_tensor * type_embd = nullptr; struct ggml_tensor * pos_embd = nullptr; @@ -353,6 +412,13 @@ struct llama_model { struct ggml_tensor * conv1d = nullptr; struct ggml_tensor * conv1d_b = nullptr; + // gemma3n altup + struct ggml_tensor * tok_embd_per_layer = nullptr; + struct ggml_tensor * altup_proj = nullptr; + struct ggml_tensor * altup_unembd_proj = nullptr; + struct ggml_tensor * per_layer_model_proj = nullptr; + struct ggml_tensor * per_layer_proj_norm = nullptr; + std::vector layers; llama_model_params params; @@ -401,17 +467,17 @@ struct llama_model { const struct ggml_tensor * get_tensor(const char * name) const; - ggml_tensor * get_rope_factors(uint32_t n_ctx_per_seq, int il) const; + float get_rope_freq_base (const llama_cparams & cparams, int il) const; + float get_rope_freq_scale(const llama_cparams & cparams, int il) const; + + ggml_tensor * get_rope_factors(const llama_cparams & cparams, int il) const; // note: can mutate `cparams` // TODO: move this to new llm_arch_model_i interface llama_memory_i * create_memory(const llama_memory_params & params, llama_cparams & cparams) const; // TODO: move this to new llm_arch_model_i interface - llm_graph_result_ptr build_graph( - const llm_graph_params & params, - ggml_cgraph * gf, - llm_graph_type type) const; + ggml_cgraph * build_graph(const llm_graph_params & params) const; private: struct impl; diff --git a/llama/llama.cpp/src/llama-quant.cpp b/llama/llama.cpp/src/llama-quant.cpp index 820d5128e..1d0361cc1 100644 --- a/llama/llama.cpp/src/llama-quant.cpp +++ b/llama/llama.cpp/src/llama-quant.cpp @@ -1,5 +1,4 @@ #include "llama-quant.h" - #include "llama-impl.h" #include "llama-model.h" #include "llama-model-loader.h" @@ -14,6 +13,12 @@ #include #include +// Quantization types. Changes to this struct must be replicated in quantize.cpp +struct tensor_quantization { + std::string name; + ggml_type quant = GGML_TYPE_COUNT; +}; + static void zeros(std::ofstream & file, size_t n) { char zero = 0; for (size_t i = 0; i < n; ++i) { @@ -21,6 +26,56 @@ static void zeros(std::ofstream & file, size_t n) { } } +static std::string remap_layer(const std::string & orig_name, const std::vector & prune, std::map & mapped, int & next_id) { + if (prune.empty()) { + return orig_name; + } + + static const std::regex pattern(R"(blk\.(\d+)\.)"); + if (std::smatch match; std::regex_search(orig_name, match, pattern)) { + const int blk = std::stoi(match[1]); + std::string new_name = orig_name; + + if (mapped.count(blk)) { + // Already mapped, do nothing + } else if (std::find(prune.begin(), prune.end(), blk) != prune.end()) { + mapped[blk] = ""; + } else if (blk < prune.front()) { + mapped[blk] = std::to_string(blk); + next_id = blk + 1; + } else { + mapped[blk] = std::to_string(next_id); + ++next_id; + } + + return mapped[blk].empty() ? mapped[blk] : new_name.replace(match.position(1), match.length(1), mapped[blk]); + } + + return orig_name; +} + +static std::string remap_imatrix (const std::string & orig_name, const std::map & mapped) { + if (mapped.empty()) { + return orig_name; + } + + static const std::regex pattern(R"(blk\.(\d+)\.)"); + if (std::smatch match; std::regex_search(orig_name, match, pattern)) { + const std::string blk(match[1]); + std::string new_name = orig_name; + + for (const auto & p : mapped) { + if (p.second == blk) { + LLAMA_LOG_DEBUG("(blk.%d imatrix) ", p.first); + return new_name.replace(match.position(1), match.length(1), std::to_string(p.first)); + } + } + GGML_ABORT("\n%s: imatrix mapping error for %s\n", __func__, orig_name.c_str()); + } + + return orig_name; +} + struct quantize_state_impl { const llama_model & model; const llama_model_quantize_params * params; @@ -48,12 +103,6 @@ struct quantize_state_impl { {} }; -// changes to this struct must be replicated in quantize.cpp -struct tensor_quantization { - std::string name; - ggml_type quant = GGML_TYPE_COUNT; -}; - static void llama_tensor_dequantize_impl( ggml_tensor * tensor, std::vector> & output, std::vector & workers, const size_t nelements, const int nthread @@ -162,7 +211,10 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t const int64_t nx = tensor->ne[0]; const int64_t qk_k = ggml_blck_size(new_type); - if (arch == LLM_ARCH_FALCON || nx % qk_k != 0) { + if (ftype == LLAMA_FTYPE_MOSTLY_MXFP4_MOE) { + new_type = GGML_TYPE_Q8_0; + } + else if (arch == LLM_ARCH_FALCON || nx % qk_k != 0) { new_type = GGML_TYPE_Q8_0; } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS || @@ -174,7 +226,15 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t new_type = GGML_TYPE_Q6_K; } } - } else if (name == "token_embd.weight") { + } else if (ftype == LLAMA_FTYPE_MOSTLY_MXFP4_MOE) { + // MoE tensors -> MXFP4 + // other tensors -> Q8_0 + if (tensor->ne[2] > 1) { + new_type = GGML_TYPE_MXFP4; + } else { + new_type = GGML_TYPE_Q8_0; + } + } else if (name == "token_embd.weight" || name == "per_layer_token_embd.weight") { if (qs.params->token_embedding_type < GGML_TYPE_COUNT) { new_type = qs.params->token_embedding_type; } else { @@ -484,6 +544,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: case LLAMA_FTYPE_MOSTLY_BF16: default_type = GGML_TYPE_BF16; break; case LLAMA_FTYPE_ALL_F32: default_type = GGML_TYPE_F32; break; + case LLAMA_FTYPE_MOSTLY_MXFP4_MOE: default_type = GGML_TYPE_MXFP4; break; + // K-quants case LLAMA_FTYPE_MOSTLY_Q2_K_S: case LLAMA_FTYPE_MOSTLY_Q2_K: default_type = GGML_TYPE_Q2_K; break; @@ -568,6 +630,11 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: const size_t align = GGUF_DEFAULT_ALIGNMENT; gguf_context_ptr ctx_out { gguf_init_empty() }; + std::vector prune_list = {}; + if (params->prune_layers) { + prune_list = *static_cast *>(params->prune_layers); + } + // copy the KV pairs from the input file gguf_set_kv (ctx_out.get(), ml.meta.get()); gguf_set_val_u32(ctx_out.get(), "general.quantization_version", GGML_QNT_VERSION); // TODO: use LLM_KV @@ -585,7 +652,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) { gguf_set_val_f32(ctx_out.get(), o.key, o.val_f64); } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) { - gguf_set_val_i32(ctx_out.get(), o.key, o.val_i64); + // Setting type to UINT32. See https://github.com/ggml-org/llama.cpp/pull/14182 for context + gguf_set_val_u32(ctx_out.get(), o.key, (uint32_t)abs(o.val_i64)); } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) { gguf_set_val_bool(ctx_out.get(), o.key, o.val_bool); } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) { @@ -596,12 +664,32 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } } + std::map mapped; + int blk_id = 0; + int pruned_attention_w = 0; + // make a list of weights std::vector tensors; tensors.reserve(ml.weights_map.size()); for (const auto & it : ml.weights_map) { + const std::string remapped_name(remap_layer(it.first, prune_list, mapped, blk_id)); + if (remapped_name.empty()) { + if (it.first.find("attn_v.weight") != std::string::npos || + it.first.find("attn_qkv.weight") != std::string::npos || + it.first.find("attn_kv_b.weight") != std::string::npos) { + pruned_attention_w++; + } + LLAMA_LOG_DEBUG("%s: pruning tensor %s\n", __func__, it.first.c_str()); + continue; + } else if (remapped_name != it.first) { + ggml_set_name(it.second.tensor, remapped_name.c_str()); + LLAMA_LOG_DEBUG("%s: tensor %s remapped to %s\n", __func__, it.first.c_str(), ggml_get_name(it.second.tensor)); + } tensors.push_back(&it.second); } + if (!prune_list.empty()) { + gguf_set_val_u32(ctx_out.get(), ml.llm_kv(LLM_KV_BLOCK_COUNT).c_str(), blk_id); + } // keep_split requires that the weights are sorted by split index if (params->keep_split) { @@ -639,7 +727,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: if (llama_model_has_encoder(&model)) { n_attn_layer *= 3; } - GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected"); + GGML_ASSERT((qs.n_attention_wv == n_attn_layer - pruned_attention_w) && "n_attention_wv is unexpected"); } size_t total_size_org = 0; @@ -680,7 +768,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: for (size_t i = 0; i < ctx_outs.size(); ++i) { gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str(), i); gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str(), n_split); - gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), ml.n_tensors); + gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), (int32_t)tensors.size()); } } @@ -755,6 +843,13 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: // NOTE: can't use LLM_TN here because the layer number is not known quantize &= name.find("ffn_gate_inp.weight") == std::string::npos; + // these are very small (e.g. 4x4) + quantize &= name.find("altup") == std::string::npos; + quantize &= name.find("laurel") == std::string::npos; + + // these are not too big so keep them as it is + quantize &= name.find("per_layer_model_proj") == std::string::npos; + // do not quantize positional embeddings and token types (BERT) quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD, "weight"); quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_TOKEN_TYPES, "weight"); @@ -762,6 +857,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: // do not quantize Mamba's small yet 2D weights // NOTE: can't use LLM_TN here because the layer number is not known quantize &= name.find("ssm_conv1d.weight") == std::string::npos; + quantize &= name.find("shortconv.conv.weight") == std::string::npos; // do not quantize RWKV's small yet 2D weights quantize &= name.find("time_mix_first.weight") == std::string::npos; @@ -792,17 +888,18 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: // get more optimal quantization type based on the tensor shape, layer, etc. if (!params->pure && ggml_is_quantized(default_type)) { + int fallback = qs.n_fallback; new_type = llama_tensor_get_type(qs, new_type, tensor, ftype); - // unless the user specifies a type - if (params->tensor_types) { + // unless the user specifies a type, and the tensor geometry will not require fallback quantisation + if (params->tensor_types && qs.n_fallback - fallback == 0) { const std::vector & tensor_types = *static_cast *>(params->tensor_types); + const std::string tensor_name(tensor->name); for (const auto & [tname, qtype] : tensor_types) { - if (std::regex pattern(tname); std::regex_search(tensor->name, pattern)) { - if (qtype != new_type) { - LLAMA_LOG_DEBUG("(overriding %s -> %s), ", ggml_type_name(new_type), ggml_type_name(qtype)); + if (std::regex pattern(tname); std::regex_search(tensor_name, pattern)) { + if (qtype != new_type) { + LLAMA_LOG_DEBUG("(overriding %s) ", ggml_type_name(new_type)); + new_type = qtype; // if two or more types are specified for the same tensor, the last match wins } - new_type = qtype; - break; } } } @@ -829,7 +926,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: const float * imatrix = nullptr; if (imatrix_data) { - auto it = imatrix_data->find(tensor->name); + auto it = imatrix_data->find(remap_imatrix(tensor->name, mapped)); if (it == imatrix_data->end()) { LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name); } else { @@ -900,6 +997,29 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: const float * imatrix_03 = imatrix ? imatrix + i03 * n_per_row : nullptr; new_size += llama_tensor_quantize_impl(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use); + + // TODO: temporary sanity check that the F16 -> MXFP4 is lossless +#if 0 + if (new_type == GGML_TYPE_MXFP4) { + auto * x = f32_data_03; + + //LLAMA_LOG_INFO("nrows = %d, n_per_row = %d\n", nrows, n_per_row); + std::vector deq(nrows*n_per_row); + const ggml_type_traits * qtype = ggml_get_type_traits(new_type); + qtype->to_float(new_data_03, deq.data(), deq.size()); + + double err = 0.0f; + for (int i = 0; i < (int) deq.size(); ++i) { + err += fabsf(deq[i] - x[i]); + //if (fabsf(deq[i] - x[i]) > 0.00001 && i < 256) { + if (deq[i] != x[i]) { + LLAMA_LOG_INFO("deq[%d] = %f, x[%d] = %f\n", i, deq[i], i, x[i]); + } + } + //LLAMA_LOG_INFO("err = %f\n", err); + GGML_ASSERT(err == 0.00000); + } +#endif } LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0); } @@ -944,6 +1064,7 @@ llama_model_quantize_params llama_model_quantize_default_params() { /*.imatrix =*/ nullptr, /*.kv_overrides =*/ nullptr, /*.tensor_type =*/ nullptr, + /*.prune_layers =*/ nullptr }; return result; diff --git a/llama/llama.cpp/src/llama-sampling.cpp b/llama/llama.cpp/src/llama-sampling.cpp index 15a10ca8c..11f93f426 100644 --- a/llama/llama.cpp/src/llama-sampling.cpp +++ b/llama/llama.cpp/src/llama-sampling.cpp @@ -798,7 +798,7 @@ static void llama_sampler_min_p_apply(struct llama_sampler * smpl, llama_token_d } // if we have enough values the operation was a success - if (filtered_tokens.size() >= ctx->min_keep) { + if (!filtered_tokens.empty() && filtered_tokens.size() >= ctx->min_keep) { memcpy(cur_p->data, filtered_tokens.data(), filtered_tokens.size()*sizeof(llama_token_data)); cur_p->size = filtered_tokens.size(); min_p_applied = true; @@ -909,7 +909,7 @@ static void llama_sampler_typical_apply(struct llama_sampler * smpl, llama_token cum_sum += cur_p->data[idx].p; // Check if the running sum is greater than typical or if we have kept at least min_keep tokens - if (cum_sum > ctx->p && i >= ctx->min_keep - 1) { + if (cum_sum > ctx->p && (ctx->min_keep == 0 || i >= ctx->min_keep - 1)) { last_idx = i + 1; break; } diff --git a/llama/llama.cpp/src/llama-vocab.cpp b/llama/llama.cpp/src/llama-vocab.cpp index 9f5fd57b8..fa388b03a 100644 --- a/llama/llama.cpp/src/llama-vocab.cpp +++ b/llama/llama.cpp/src/llama-vocab.cpp @@ -9,16 +9,17 @@ #include #include +#include #include -#include +#include #include #include #include +#include #include #include #include #include -#include // // helpers @@ -306,6 +307,7 @@ struct llm_tokenizer_bpe : llm_tokenizer { }; break; case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM: + case LLAMA_VOCAB_PRE_TYPE_HUNYUAN_DENSE: regex_exprs = { "\\p{N}{1,3}", "[一-龥぀-ゟ゠-ヿ]+", @@ -351,6 +353,7 @@ struct llm_tokenizer_bpe : llm_tokenizer { break; case LLAMA_VOCAB_PRE_TYPE_STABLELM2: case LLAMA_VOCAB_PRE_TYPE_QWEN2: + case LLAMA_VOCAB_PRE_TYPE_HUNYUAN: regex_exprs = { // original regex from tokenizer.json // "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+" @@ -403,6 +406,13 @@ struct llm_tokenizer_bpe : llm_tokenizer { "[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))*((?=[\\p{L}])([^A-Z]))+(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])?|[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))+((?=[\\p{L}])([^A-Z]))*(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])?|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", }; break; + case LLAMA_VOCAB_PRE_TYPE_KIMI_K2: + regex_exprs = { + // K2 trigger pattern - this will activate the custom K2 handler in unicode.cpp + // The custom handler implements all K2 patterns with proper Han character exclusion + "\\p{Han}+", + }; + break; case LLAMA_VOCAB_PRE_TYPE_SUPERBPE: regex_exprs = { "\\p{N}+", @@ -835,7 +845,7 @@ struct llm_tokenizer_ugm_session { } // initialize score_sum to -FLT_MAX so it will be always lower than sums of token scores - std::vector tokenization_results(input_len + 1, {vocab.token_unk(), 0, -FLT_MAX}); + std::vector tokenization_results(input_len + 1, {vocab.token_unk(), 0, -DBL_MAX}); // at the beginning tokenization score is zero tokenization_results[0] = { vocab.token_unk(), 0, 0 }; @@ -867,7 +877,7 @@ struct llm_tokenizer_ugm_session { const double challenger_score = current_best.score_sum + token_score; struct best_tokenization & current_champ = tokenization_results[prefix_offset]; if (challenger_score > current_champ.score_sum) { - struct best_tokenization challenger = { token_id, input_offset, (float) challenger_score }; + struct best_tokenization challenger = { token_id, input_offset, challenger_score }; current_champ = challenger; } } @@ -881,7 +891,7 @@ struct llm_tokenizer_ugm_session { prefix_offset = input_offset + n_utf8_code_units; struct best_tokenization & current_champ = tokenization_results[prefix_offset]; if (challenger_score > current_champ.score_sum) { - struct best_tokenization challenger = { vocab.token_unk(), input_offset, (float) challenger_score }; + struct best_tokenization challenger = { vocab.token_unk(), input_offset, challenger_score }; current_champ = challenger; } } @@ -1007,7 +1017,7 @@ private: struct best_tokenization { llama_token token_id; size_t input_offset; - float score_sum; + double score_sum; }; struct normalization_result normalize_prefix(const std::string & input, size_t input_offset) { @@ -1195,6 +1205,284 @@ private: const llm_tokenizer_rwkv & tokenizer; }; +struct llm_tokenizer_plamo2 : llm_tokenizer { + llm_tokenizer_plamo2(const llama_vocab & vocab) { + build(vocab); + } + + void build(const llama_vocab & vocab) { + // Reset internal structures + tokens_.clear(); + bytes_.assign(256, 0); + to_suffix_id_.clear(); + table_.clear(); + + // Build token list and byte mapping + std::unordered_map suffix_to_score; + std::unordered_map token_to_id; + + for (size_t token_id = 0; token_id < vocab.n_tokens(); ++token_id) { + const auto & entry = vocab.get_token_data(token_id); + tokens_.push_back(entry.text); + token_to_id[entry.text] = static_cast(token_id); + + // Handle byte tokens + if (vocab.is_byte(token_id)) { + if (entry.text.length() == 6 && entry.text.substr(0, 3) == "<0x" && entry.text.back() == '>') { + std::string hex_str = entry.text.substr(3, 2); + int byte_val = std::stoi(hex_str, nullptr, 16); + bytes_[byte_val] = static_cast(token_id); + } + continue; + } + + // Add token and all its suffixes to suffix_to_score + suffix_to_score[entry.text] = entry.score; + + // Extract suffixes character by character (UTF-8 aware) + std::vector cpts = unicode_cpts_from_utf8(entry.text); + for (size_t i = 1; i < cpts.size(); ++i) { + std::string suffix; + for (size_t j = i; j < cpts.size(); ++j) { + suffix += unicode_cpt_to_utf8(cpts[j]); + } + if (suffix_to_score.find(suffix) == suffix_to_score.end()) { + suffix_to_score[suffix] = std::numeric_limits::quiet_NaN(); + } + } + } + + // Check that all byte tokens are set + for (int i = 0; i < 256; ++i) { + if (bytes_[i] == 0) { + throw std::runtime_error("Byte token for <0x" + std::to_string(i) + "> is not set"); + } + } + + // Build suffix list in lexicographical order of reversed strings + std::vector suffixes; + for (const auto & pair : suffix_to_score) { + suffixes.push_back(pair.first); + } + suffixes.push_back(""); // Empty suffix + + std::sort(suffixes.begin(), suffixes.end(), [](const std::string & a, const std::string & b) { + std::string rev_a(a.rbegin(), a.rend()); + std::string rev_b(b.rbegin(), b.rend()); + return rev_a < rev_b; + }); + + // Build suffix_to_id and to_suffix_id_ + std::unordered_map suffix_to_id; + int32_t num_pieces = 0; + + for (const auto & suffix : suffixes) { + suffix_to_id[suffix] = num_pieces; + if (!suffix.empty()) { + std::vector cpts = unicode_cpts_from_utf8(suffix); + + std::string remaining; + for (size_t i = 1; i < cpts.size(); ++i) { + remaining += unicode_cpt_to_utf8(cpts[i]); + } + + int64_t piece_code = (static_cast(cpts[0]) << 32) | suffix_to_id[remaining]; + to_suffix_id_[piece_code] = num_pieces; + + // Count number of pieces for this suffix + int32_t pieces_for_suffix = 1; // sentinel row + for (int32_t piece_length = static_cast(cpts.size()); piece_length > 0; --piece_length) { + std::string piece; + for (int32_t i = 0; i < piece_length; ++i) { + piece += unicode_cpt_to_utf8(cpts[i]); + } + if (suffix_to_score.find(piece) != suffix_to_score.end()) { + pieces_for_suffix++; + } + } + num_pieces += pieces_for_suffix; + } else { + num_pieces++; // Empty suffix contributes one piece (sentinel row) + } + } + + // Build flattened table + table_.resize(num_pieces, std::vector(4, 0)); + int32_t table_idx = 0; + + for (const auto & suffix : suffixes) { + // Add all prefixes of the suffix to the table (in decreasing order of length) + std::vector cpts = unicode_cpts_from_utf8(suffix); + for (int32_t piece_length = static_cast(cpts.size()); piece_length > 0; --piece_length) { + std::string piece; + for (int32_t i = 0; i < piece_length; ++i) { + piece += unicode_cpt_to_utf8(cpts[i]); + } + + auto score_it = suffix_to_score.find(piece); + if (score_it == suffix_to_score.end()) { + continue; + } + + table_[table_idx][TABLE_PIECE_LENGTH] = piece_length; + auto token_it = token_to_id.find(piece); + table_[table_idx][TABLE_TOKEN_ID] = (token_it != token_to_id.end()) ? token_it->second : -1; + + float score = score_it->second; + table_[table_idx][TABLE_SCORE] = std::isfinite(score) ? + static_cast(std::round(score * 1e4)) : INVALID_SCORE; + table_[table_idx][TABLE_PIECE_ID] = suffix_to_id[piece]; + + table_idx++; + } + + // Add sentinel row + table_[table_idx][TABLE_PIECE_LENGTH] = 1; + table_[table_idx][TABLE_TOKEN_ID] = -1; + table_[table_idx][TABLE_SCORE] = UNKNOWN_SCORE; + table_idx++; + } + } + + std::vector encode(const std::string & text) const { + std::vector unicode_data = unicode_cpts_from_utf8(text); + // Skip the first code point if it is a BOM (Byte Order Mark) + if (!unicode_data.empty() && unicode_data[0] == 0xFEFF) { + unicode_data.erase(unicode_data.begin()); + } + + if (unicode_data.empty()) { + return {}; + } + + const size_t data_len = unicode_data.size(); + + // Initialize scores array (dynamic programming) + std::vector scores(data_len + 1, static_cast(1) << 60); + scores[data_len] = 0; + + // Path array to track best tokenization + std::vector> path(data_len + 1, std::vector(3, 0)); + + int32_t suffix_id = 0; + + // Process from end to beginning + for (int i = static_cast(data_len) - 1; i >= 0; --i) { + uint32_t c = unicode_data[i]; + + // Find next suffix ID + for (size_t p = suffix_id; p < table_.size(); ++p) { + int64_t piece_code = (static_cast(c) << 32) | table_[p][TABLE_PIECE_ID]; + auto it = to_suffix_id_.find(piece_code); + suffix_id = (it != to_suffix_id_.end()) ? it->second : 0; + + if (suffix_id > 0 || table_[p][TABLE_SCORE] == UNKNOWN_SCORE) { + break; + } + } + + // Update best path + for (size_t p = suffix_id; p < table_.size(); ++p) { + int32_t score = table_[p][TABLE_SCORE]; + if (score > INVALID_SCORE) { + int32_t piece_length = table_[p][TABLE_PIECE_LENGTH]; + int64_t s = scores[i + piece_length] - score; + + if (s < scores[i]) { + scores[i] = s; + path[i][PATH_TOKEN_LENGTH] = piece_length; + path[i][PATH_TOKEN_ID] = table_[p][TABLE_TOKEN_ID]; + path[i][PATH_NUM_TOKENS] = path[i + piece_length][PATH_NUM_TOKENS] + 1; + + if (score == UNKNOWN_SCORE) { + // Add UTF-8 byte count + path[i][PATH_NUM_TOKENS] += (c >= 0x80) + (c >= 0x800) + (c >= 0x10000); + } + } + } + + if (score == UNKNOWN_SCORE) { + break; + } + } + } + + // Decode the best path + std::vector token_ids; + token_ids.reserve(path[0][PATH_NUM_TOKENS]); + + int pos = 0; + while (pos < static_cast(data_len)) { + if (path[pos][PATH_TOKEN_ID] >= 0) { + token_ids.push_back(path[pos][PATH_TOKEN_ID]); + } else { + // Fall back to byte tokens + uint32_t c = unicode_data[pos]; + int s = 1 + (c >= 0x80) + (c >= 0x800) + (c >= 0x10000); + + for (int i = 0; i < s; ++i) { + uint8_t b; + if (s == 1) { + b = c; + } else { + if (i == 0) { + b = (0xF00 >> s) & 0xFF; + } else { + b = 0x80; + } + } + token_ids.push_back(bytes_[b | ((c >> ((s - i - 1) * 6)) & 0x3F)]); + } + } + + assert(path[pos][PATH_TOKEN_LENGTH] > 0); + pos += path[pos][PATH_TOKEN_LENGTH]; + } + + return token_ids; + } +private: + // Constants for table structure + static constexpr int32_t TABLE_PIECE_LENGTH = 0; + static constexpr int32_t TABLE_TOKEN_ID = 1; + static constexpr int32_t TABLE_SCORE = 2; + static constexpr int32_t TABLE_PIECE_ID = 3; + + // Constants for path array + static constexpr int32_t PATH_TOKEN_LENGTH = 0; + static constexpr int32_t PATH_TOKEN_ID = 1; + static constexpr int32_t PATH_NUM_TOKENS = 2; + + // Score constants + static constexpr int32_t INVALID_SCORE = -20000000; + static constexpr int32_t UNKNOWN_SCORE = -10000000; + + // List of tokens in the vocabulary + std::vector tokens_; + + // Mapping from byte code point to token ID (for byte fallback) + std::vector bytes_; + + // Mapping from piece code to suffix ID + std::unordered_map to_suffix_id_; + + // Flattened table representing the Trie structure + // Each row contains: [piece_length, token_id, score, piece_id] + std::vector> table_; +}; + +struct llm_tokenizer_plamo2_session { + llm_tokenizer_plamo2_session(const llm_tokenizer_plamo2 & tokenizer) : tokenizer(tokenizer) {} + + void tokenize(const std::string & text, std::vector & output) { + std::vector tokens = tokenizer.encode(text); + output.insert(output.end(), tokens.begin(), tokens.end()); + } + +private: + const llm_tokenizer_plamo2 & tokenizer; +}; + // // impl // @@ -1269,6 +1557,7 @@ struct llama_vocab::impl { bool add_space_prefix = false; bool add_bos = false; bool add_eos = false; + bool add_sep = false; bool ignore_merges = false; bool clean_spaces = false; // clean_up_tokenization_spaces bool remove_extra_whitespaces = false; @@ -1421,6 +1710,8 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { special_sep_id = 102; special_pad_id = 0; special_mask_id = 103; + + add_sep = true; } else if (tokenizer_model == "gpt2") { type = LLAMA_VOCAB_TYPE_BPE; @@ -1493,6 +1784,16 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { special_unk_id = LLAMA_TOKEN_NULL; special_sep_id = LLAMA_TOKEN_NULL; special_pad_id = LLAMA_TOKEN_NULL; + } else if (tokenizer_model == "plamo2") { + type = LLAMA_VOCAB_TYPE_PLAMO2; + + // PLaMo-2 default special tokens (these will be overridden by model config) + special_bos_id = 1; // <|plamo:bos|> + special_eos_id = 2; // <|plamo:eos|> + special_unk_id = 0; // <|plamo:unk|> + special_sep_id = LLAMA_TOKEN_NULL; + special_pad_id = 3; // <|plamo:pad|> + special_mask_id = LLAMA_TOKEN_NULL; } else { throw std::runtime_error(format("unknown tokenizer: '%s'", tokenizer_model.c_str())); } @@ -1508,7 +1809,10 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { tokenizer_pre == "llama-v3" || tokenizer_pre == "llama-bpe"|| tokenizer_pre == "falcon3" || - tokenizer_pre == "pixtral") { + tokenizer_pre == "falcon-h1" || + tokenizer_pre == "pixtral" || + tokenizer_pre == "midm-2.0" || + tokenizer_pre == "lfm2") { pre_type = LLAMA_VOCAB_PRE_TYPE_LLAMA3; ignore_merges = true; add_bos = true; @@ -1539,12 +1843,17 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { tokenizer_pre == "jina-es" || tokenizer_pre == "jina-de" || tokenizer_pre == "gigachat" || - tokenizer_pre == "jina-v1-en" || tokenizer_pre == "jina-v2-es" || tokenizer_pre == "jina-v2-de" || + tokenizer_pre == "a.x-4.0" || + tokenizer_pre == "mellum") { + pre_type = LLAMA_VOCAB_PRE_TYPE_GPT2; + } else if ( + tokenizer_pre == "jina-v1-en" || tokenizer_pre == "jina-v2-code" || tokenizer_pre == "roberta-bpe") { pre_type = LLAMA_VOCAB_PRE_TYPE_GPT2; + add_sep = true; } else if ( tokenizer_pre == "refact") { pre_type = LLAMA_VOCAB_PRE_TYPE_REFACT; @@ -1607,6 +1916,9 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { } else if ( tokenizer_pre == "exaone") { pre_type = LLAMA_VOCAB_PRE_TYPE_EXAONE; + } else if ( + tokenizer_pre == "exaone4") { + pre_type = LLAMA_VOCAB_PRE_TYPE_GPT2; } else if ( tokenizer_pre == "chameleon") { pre_type = LLAMA_VOCAB_PRE_TYPE_CHAMELEON; @@ -1639,6 +1951,18 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { tokenizer_pre == "seed-coder") { pre_type = LLAMA_VOCAB_PRE_TYPE_SEED_CODER; clean_spaces = false; + } else if ( + tokenizer_pre == "hunyuan") { + pre_type = LLAMA_VOCAB_PRE_TYPE_HUNYUAN; + clean_spaces = false; + } else if ( + tokenizer_pre == "hunyuan-dense") { + pre_type = LLAMA_VOCAB_PRE_TYPE_HUNYUAN_DENSE; + clean_spaces = false; + } else if ( + tokenizer_pre == "kimi-k2") { + pre_type = LLAMA_VOCAB_PRE_TYPE_KIMI_K2; + clean_spaces = false; } else { LLAMA_LOG_WARN("%s: missing or unrecognized pre-tokenizer type, using: 'default'\n", __func__); pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT; @@ -1655,6 +1979,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { clean_spaces = true; add_bos = true; add_eos = false; + add_sep = true; } else if (type == LLAMA_VOCAB_TYPE_UGM) { pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT; add_bos = false; @@ -1791,7 +2116,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { } } - // Handle add_bos and add_eos + // Handle add_bos, add_eos and add_sep { bool temp = true; @@ -1801,6 +2126,9 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) { add_eos = temp; } + if (ml.get_key(LLM_KV_TOKENIZER_ADD_SEP, temp, false)) { + add_sep = temp; + } } // auto-detect special tokens by text @@ -1819,6 +2147,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { || t.first == "" || t.first == "_" || t.first == "<|end▁of▁sentence|>" // DeepSeek + || t.first == "" // smoldocling ) { special_eot_id = t.second; if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) { @@ -1852,6 +2181,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { || t.first == "<|fim▁begin|>" // DeepSeek || t.first == "
"
                         || t.first == "▁
"          // CodeLlama
+                        || t.first == "<|code_prefix|>" // GLM-4.5
                         ) {
                     special_fim_pre_id = t.second;
                     if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
@@ -1871,6 +2201,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
                         || t.first == "<|fim▁hole|>" // DeepSeek
                         || t.first == ""
                         || t.first == "▁"         // CodeLlama
+                        || t.first == "<|code_suffix|>" // GLM-4.5
                         ) {
                     special_fim_suf_id = t.second;
                     if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
@@ -1890,6 +2221,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
                         || t.first == "<|fim▁end|>"  // DeepSeek
                         || t.first == ""
                         || t.first == "▁"         // CodeLlama
+                        || t.first == "<|code_middle|>" // GLM-4.5
                         ) {
                     special_fim_mid_id = t.second;
                     if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
@@ -1972,11 +2304,15 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
                     || t.first == "<|eot_id|>"
                     || t.first == "<|im_end|>"
                     || t.first == "<|end|>"
+                    || t.first == "<|return|>" // o200k_harmony
+                    || t.first == "<|call|>"   // o200k_harmony
                     || t.first == ""
                     || t.first == "<|endoftext|>"
                     || t.first == "<|eom_id|>"
                     || t.first == ""
                     || t.first == "_"
+                    || t.first == "<|end_of_text|>"
+                    || t.first == "" // smoldocling
                ) {
                 special_eog_ids.insert(t.second);
                 if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
@@ -1993,6 +2329,13 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
             }
         }
 
+        // @ngxson : quick hack for gpt-oss, always render these tokens
+        for (const auto & t : token_to_id) {
+            if (t.first == "<|channel|>" || t.first == "<|message|>" || t.first == "<|start|>") {
+                id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_USER_DEFINED;
+            }
+        }
+
         // sanity checks
         if (special_eos_id != LLAMA_TOKEN_NULL && special_eog_ids.count(special_eos_id) == 0) {
             special_eog_ids.insert(special_eos_id);
@@ -2008,6 +2351,36 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
             special_eog_ids.insert(special_eom_id);
             LLAMA_LOG_WARN("%s: special_eom_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
         }
+
+        // TODO: workaround for o200k_harmony tokenizer: the "<|end|>" token should not be EOG
+        //       we don't have a good way to detect this, so for now, if we have "<|return|>" and "<|call|>" tokens,
+        //       we remove the "<|end|>" token from the EOG list
+        {
+            bool has_return = false;
+            bool has_call   = false;
+            bool has_end    = false;
+
+            llama_token end_id = LLAMA_TOKEN_NULL;
+
+            LLAMA_LOG_INFO("%s: printing all EOG tokens:\n", __func__);
+            for (auto tid : special_eog_ids) {
+                LLAMA_LOG_INFO("%s:   - %d ('%s')\n", __func__, tid, id_to_token[tid].text.c_str());
+
+                if (id_to_token[tid].text == "<|return|>") {
+                    has_return = true;
+                } else if (id_to_token[tid].text == "<|call|>") {
+                    has_call = true;
+                } else if (id_to_token[tid].text == "<|end|>") {
+                    has_end = true;
+                    end_id = tid;
+                }
+            }
+
+            if (has_return && has_call && has_end) {
+                special_eog_ids.erase(end_id);
+                LLAMA_LOG_WARN("%s: special_eog_ids contains both '<|return|>' and '<|call|>' tokens, removing '<|end|>' token from EOG list\n", __func__);
+            }
+        }
     }
 
     // build special tokens cache
@@ -2049,9 +2422,9 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
     //NOTE: Per token attributes are missing from the GGUF file.
     //TODO: Extract attributes from GGUF file.
     {
-        auto _contains_any = [] (const std::string & str, const std::vector & substrs) -> bool {
+        auto _contains_any = [] (const std::string & str, const std::vector & substrs) -> bool {
             for (const auto & substr : substrs) {
-                if (str.find(substr) < std::string::npos) {
+                if (str.find(substr) != std::string::npos) {
                     return true;
                 }
             }
@@ -2070,9 +2443,11 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
 
         std::string model_name;
         std::string tokenizer_pre;
+        std::string general_arch;
 
         ml.get_key(LLM_KV_GENERAL_NAME,  model_name,    false);
         ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false);
+        ml.get_key(LLM_KV_GENERAL_ARCHITECTURE, general_arch, false);
 
         // model name to lowercase
         std::transform(model_name.begin(), model_name.end(), model_name.begin(),
@@ -2081,9 +2456,16 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
             }
         );
 
-        // set attributes by model/tokenizer name
-        if (_contains_any(tokenizer_pre, {"jina-v2-de", "jina-v2-es", "jina-v2-code"})) {
-            _set_token_attr("", LLAMA_TOKEN_ATTR_LSTRIP, true);
+        // set attributes by model/tokenizer/architecture name
+        if (false
+                || _contains_any(tokenizer_pre, {"jina-v2-de", "jina-v2-es", "jina-v2-code"})
+                || _contains_any(general_arch, {"nomic-bert-moe"})
+           ) {
+            if (token_to_id.count("") == 0) {
+                LLAMA_LOG_WARN("%s: Mask token is missing in vocab, please reconvert model!\n", __func__);
+            } else {
+                _set_token_attr("", LLAMA_TOKEN_ATTR_LSTRIP, true);
+            }
         } else if (_contains_any(model_name, {"phi-3", "phi3"})) {
             for (auto id : cache_special_tokens) {
                 _set_tokenid_attr(id, LLAMA_TOKEN_ATTR_RSTRIP, true);
@@ -2104,13 +2486,14 @@ enum llama_vocab_type llama_vocab::impl::get_type() const {
 
 std::string llama_vocab::impl::type_name() const{
     switch (type) {
-        case LLAMA_VOCAB_TYPE_NONE: return "no vocab";
-        case LLAMA_VOCAB_TYPE_SPM:  return "SPM";
-        case LLAMA_VOCAB_TYPE_BPE:  return "BPE";
-        case LLAMA_VOCAB_TYPE_WPM:  return "WPM";
-        case LLAMA_VOCAB_TYPE_UGM:  return "UGM";
-        case LLAMA_VOCAB_TYPE_RWKV: return "RWKV";
-        default:                    return "unknown";
+        case LLAMA_VOCAB_TYPE_NONE:   return "no vocab";
+        case LLAMA_VOCAB_TYPE_SPM:    return "SPM";
+        case LLAMA_VOCAB_TYPE_BPE:    return "BPE";
+        case LLAMA_VOCAB_TYPE_WPM:    return "WPM";
+        case LLAMA_VOCAB_TYPE_UGM:    return "UGM";
+        case LLAMA_VOCAB_TYPE_RWKV:   return "RWKV";
+        case LLAMA_VOCAB_TYPE_PLAMO2: return "PLaMo2";
+        default:                      return "unknown";
     }
 }
 
@@ -2193,6 +2576,9 @@ void llama_vocab::impl::init_tokenizer(enum llama_vocab_type type) {
         case LLAMA_VOCAB_TYPE_RWKV:
             tokenizer = std::make_unique(vocab);
             break;
+        case LLAMA_VOCAB_TYPE_PLAMO2:
+            tokenizer = std::make_unique(vocab);
+            break;
         default:
             GGML_ABORT("unsupported vocab type");
     }
@@ -2525,6 +2911,23 @@ std::vector llama_vocab::impl::tokenize(
                     if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
                         std::string text = fragment.raw_text.substr(fragment.offset, fragment.length);
 
+#ifdef PRETOKENIZERDEBUG
+                        LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", text.length(), fragment.offset, fragment.length, text.c_str());
+#endif
+
+                        session.tokenize(text, output);
+                    } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
+                        output.push_back(fragment.token);
+                    }
+                }
+            } break;
+        case LLAMA_VOCAB_TYPE_PLAMO2:
+            {
+                llm_tokenizer_plamo2_session session(*static_cast(tokenizer.get()));
+                for (const auto & fragment : fragment_buffer) {
+                    if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
+                        std::string text = fragment.raw_text.substr(fragment.offset, fragment.length);
+
 #ifdef PRETOKENIZERDEBUG
                         LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", text.length(), fragment.offset, fragment.length, text.c_str());
 #endif
@@ -2553,6 +2956,10 @@ int32_t llama_vocab::impl::token_to_piece(llama_token token, char * buf, int32_t
     // copy piece chars to output text buffer
     // skip up to 'lstrip' leading spaces before copying
     auto _try_copy = [=] (const char * token, size_t size) -> int32_t {
+        if (size >= static_cast(std::numeric_limits::max())) {
+            GGML_ABORT("invalid token size: %zu exceeds int32_t limit", size);
+        }
+
         for (int32_t i = 0; i < lstrip && size && *token == ' '; ++i) {
             token++;
             size--;
@@ -2619,6 +3026,24 @@ int32_t llama_vocab::impl::token_to_piece(llama_token token, char * buf, int32_t
                 memcpy(buf, result.data(), result.size());
                 return (int)result.size();
             }
+            case LLAMA_VOCAB_TYPE_PLAMO2: {
+                // PLaMo-2 uses similar token handling as BPE/SPM
+                if (vocab.is_byte(token)) {
+                    // Handle byte tokens like <0xXX>
+                    if (token_text.length() == 6 && token_text.substr(0, 3) == "<0x" && token_text.back() == '>') {
+                        int hex_val = std::stoi(token_text.substr(3, 2), nullptr, 16);
+                        if (length < 1) {
+                            return -1;
+                        }
+                        buf[0] = static_cast(hex_val);
+                        return 1;
+                    }
+                }
+
+                // Normal token - just copy the text
+                std::string result = token_text;
+                return _try_copy(result.data(), result.size());
+            }
             default:
                 GGML_ABORT("fatal error");
         }
@@ -2749,26 +3174,26 @@ void llama_vocab::impl::print_info() const {
     LLAMA_LOG_INFO("%s: n_merges         = %u\n",     __func__, (uint32_t) bpe_ranks.size());
 
     // special tokens
-    if (special_bos_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: BOS token        = %d '%s'\n", __func__, special_bos_id,     id_to_token[special_bos_id].text.c_str() );  }
-    if (special_eos_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOS token        = %d '%s'\n", __func__, special_eos_id,     id_to_token[special_eos_id].text.c_str() );  }
-    if (special_eot_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOT token        = %d '%s'\n", __func__, special_eot_id,     id_to_token[special_eot_id].text.c_str() );  }
-    if (special_eom_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOM token        = %d '%s'\n", __func__, special_eom_id,     id_to_token[special_eom_id].text.c_str() );  }
-    if (special_unk_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: UNK token        = %d '%s'\n", __func__, special_unk_id,     id_to_token[special_unk_id].text.c_str() );  }
-    if (special_sep_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: SEP token        = %d '%s'\n", __func__, special_sep_id,     id_to_token[special_sep_id].text.c_str() );  }
-    if (special_pad_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: PAD token        = %d '%s'\n", __func__, special_pad_id,     id_to_token[special_pad_id].text.c_str() );  }
-    if (special_mask_id != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: MASK token       = %d '%s'\n", __func__, special_mask_id,    id_to_token[special_mask_id].text.c_str() ); }
+    if (special_bos_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: BOS token        = %d '%s'\n", __func__, special_bos_id,     id_to_token.at(special_bos_id).text.c_str() );  }
+    if (special_eos_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOS token        = %d '%s'\n", __func__, special_eos_id,     id_to_token.at(special_eos_id).text.c_str() );  }
+    if (special_eot_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOT token        = %d '%s'\n", __func__, special_eot_id,     id_to_token.at(special_eot_id).text.c_str() );  }
+    if (special_eom_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOM token        = %d '%s'\n", __func__, special_eom_id,     id_to_token.at(special_eom_id).text.c_str() );  }
+    if (special_unk_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: UNK token        = %d '%s'\n", __func__, special_unk_id,     id_to_token.at(special_unk_id).text.c_str() );  }
+    if (special_sep_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: SEP token        = %d '%s'\n", __func__, special_sep_id,     id_to_token.at(special_sep_id).text.c_str() );  }
+    if (special_pad_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: PAD token        = %d '%s'\n", __func__, special_pad_id,     id_to_token.at(special_pad_id).text.c_str() );  }
+    if (special_mask_id != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: MASK token       = %d '%s'\n", __func__, special_mask_id,    id_to_token.at(special_mask_id).text.c_str() ); }
 
-    if (linefeed_id != LLAMA_TOKEN_NULL)        { LLAMA_LOG_INFO( "%s: LF token         = %d '%s'\n", __func__, linefeed_id,        id_to_token[linefeed_id].text.c_str() ); }
+    if (linefeed_id != LLAMA_TOKEN_NULL)        { LLAMA_LOG_INFO( "%s: LF token         = %d '%s'\n", __func__, linefeed_id,        id_to_token.at(linefeed_id).text.c_str() ); }
 
-    if (special_fim_pre_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PRE token    = %d '%s'\n", __func__, special_fim_pre_id, id_to_token[special_fim_pre_id].text.c_str() ); }
-    if (special_fim_suf_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SUF token    = %d '%s'\n", __func__, special_fim_suf_id, id_to_token[special_fim_suf_id].text.c_str() ); }
-    if (special_fim_mid_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM MID token    = %d '%s'\n", __func__, special_fim_mid_id, id_to_token[special_fim_mid_id].text.c_str() ); }
-    if (special_fim_pad_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PAD token    = %d '%s'\n", __func__, special_fim_pad_id, id_to_token[special_fim_pad_id].text.c_str() ); }
-    if (special_fim_rep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM REP token    = %d '%s'\n", __func__, special_fim_rep_id, id_to_token[special_fim_rep_id].text.c_str() ); }
-    if (special_fim_sep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SEP token    = %d '%s'\n", __func__, special_fim_sep_id, id_to_token[special_fim_sep_id].text.c_str() ); }
+    if (special_fim_pre_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PRE token    = %d '%s'\n", __func__, special_fim_pre_id, id_to_token.at(special_fim_pre_id).text.c_str() ); }
+    if (special_fim_suf_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SUF token    = %d '%s'\n", __func__, special_fim_suf_id, id_to_token.at(special_fim_suf_id).text.c_str() ); }
+    if (special_fim_mid_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM MID token    = %d '%s'\n", __func__, special_fim_mid_id, id_to_token.at(special_fim_mid_id).text.c_str() ); }
+    if (special_fim_pad_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PAD token    = %d '%s'\n", __func__, special_fim_pad_id, id_to_token.at(special_fim_pad_id).text.c_str() ); }
+    if (special_fim_rep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM REP token    = %d '%s'\n", __func__, special_fim_rep_id, id_to_token.at(special_fim_rep_id).text.c_str() ); }
+    if (special_fim_sep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SEP token    = %d '%s'\n", __func__, special_fim_sep_id, id_to_token.at(special_fim_sep_id).text.c_str() ); }
 
     for (const auto & id : special_eog_ids) {
-        LLAMA_LOG_INFO( "%s: EOG token        = %d '%s'\n", __func__, id, id_to_token[id].text.c_str() );
+        LLAMA_LOG_INFO( "%s: EOG token        = %d '%s'\n", __func__, id, id_to_token.at(id).text.c_str() );
     }
 
     LLAMA_LOG_INFO("%s: max token length = %d\n", __func__, max_token_len);
@@ -2863,6 +3288,12 @@ llama_token llama_vocab::byte_to_token(uint8_t ch) const {
         case LLAMA_VOCAB_TYPE_BPE: {
             return pimpl->token_to_id.at(unicode_byte_to_utf8(ch));
         }
+        case LLAMA_VOCAB_TYPE_PLAMO2: {
+            // PLaMo-2 uses byte tokens in format <0xXX>
+            char hex_str[8];
+            snprintf(hex_str, sizeof(hex_str), "<0x%02X>", ch);
+            return pimpl->token_to_id.at(hex_str);
+        }
         default:
             GGML_ABORT("fatal error");
     }
@@ -2964,6 +3395,10 @@ llama_token llama_vocab::token_fim_sep() const {
     return pimpl->special_fim_sep_id;
 }
 
+llama_token llama_vocab::token_mask() const {
+    return pimpl->special_mask_id;
+}
+
 bool llama_vocab::get_add_space_prefix() const {
     return pimpl->add_space_prefix;
 }
@@ -2976,6 +3411,10 @@ bool llama_vocab::get_add_eos() const {
     return pimpl->add_eos;
 }
 
+bool llama_vocab::get_add_sep() const {
+    return pimpl->add_sep;
+}
+
 bool llama_vocab::get_ignore_merges() const {
     return pimpl->ignore_merges;
 }
@@ -3036,6 +3475,11 @@ int32_t llama_vocab::tokenize(
                         bool   add_special,
                         bool   parse_special) const {
     auto res = tokenize(std::string(text, text_len), add_special, parse_special);
+    if (res.size() >= static_cast(std::numeric_limits::max())) {
+        LLAMA_LOG_ERROR("%s: tokenization result size %zu exceeds int32_t limit\n", __func__, res.size());
+        return std::numeric_limits::min();
+    }
+
     if (n_tokens_max < (int) res.size()) {
         // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
         return -((int) res.size());
@@ -3167,6 +3611,10 @@ bool llama_vocab_get_add_eos(const struct llama_vocab * vocab) {
     return vocab->get_add_eos();
 }
 
+bool llama_vocab_get_add_sep(const struct llama_vocab * vocab) {
+    return vocab->get_add_sep();
+}
+
 llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab) {
     return vocab->token_fim_pre();
 }
@@ -3191,6 +3639,10 @@ llama_token llama_vocab_fim_sep(const struct llama_vocab * vocab) {
     return vocab->token_fim_sep();
 }
 
+llama_token llama_vocab_mask(const struct llama_vocab* vocab) {
+    return vocab->token_mask();
+}
+
 // deprecated
 const char * llama_token_get_text(const struct llama_vocab * vocab, llama_token token) {
     return llama_vocab_get_text(vocab, token);
@@ -3327,4 +3779,3 @@ int32_t llama_detokenize(
                         bool   unparse_special) {
     return vocab->detokenize(tokens, n_tokens, text, text_len_max, remove_special, unparse_special);
 }
-
diff --git a/llama/llama.cpp/src/llama-vocab.h b/llama/llama.cpp/src/llama-vocab.h
index daa6cf308..61b812421 100644
--- a/llama/llama.cpp/src/llama-vocab.h
+++ b/llama/llama.cpp/src/llama-vocab.h
@@ -6,6 +6,49 @@
 #include 
 #include 
 
+// pre-tokenization types
+enum llama_vocab_pre_type {
+    LLAMA_VOCAB_PRE_TYPE_DEFAULT        = 0,
+    LLAMA_VOCAB_PRE_TYPE_LLAMA3         = 1,
+    LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM   = 2,
+    LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3,
+    LLAMA_VOCAB_PRE_TYPE_FALCON         = 4,
+    LLAMA_VOCAB_PRE_TYPE_MPT            = 5,
+    LLAMA_VOCAB_PRE_TYPE_STARCODER      = 6,
+    LLAMA_VOCAB_PRE_TYPE_GPT2           = 7,
+    LLAMA_VOCAB_PRE_TYPE_REFACT         = 8,
+    LLAMA_VOCAB_PRE_TYPE_COMMAND_R      = 9,
+    LLAMA_VOCAB_PRE_TYPE_STABLELM2      = 10,
+    LLAMA_VOCAB_PRE_TYPE_QWEN2          = 11,
+    LLAMA_VOCAB_PRE_TYPE_OLMO           = 12,
+    LLAMA_VOCAB_PRE_TYPE_DBRX           = 13,
+    LLAMA_VOCAB_PRE_TYPE_SMAUG          = 14,
+    LLAMA_VOCAB_PRE_TYPE_PORO           = 15,
+    LLAMA_VOCAB_PRE_TYPE_CHATGLM3       = 16,
+    LLAMA_VOCAB_PRE_TYPE_CHATGLM4       = 17,
+    LLAMA_VOCAB_PRE_TYPE_VIKING         = 18,
+    LLAMA_VOCAB_PRE_TYPE_JAIS           = 19,
+    LLAMA_VOCAB_PRE_TYPE_TEKKEN         = 20,
+    LLAMA_VOCAB_PRE_TYPE_SMOLLM         = 21,
+    LLAMA_VOCAB_PRE_TYPE_CODESHELL      = 22,
+    LLAMA_VOCAB_PRE_TYPE_BLOOM          = 23,
+    LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH   = 24,
+    LLAMA_VOCAB_PRE_TYPE_EXAONE         = 25,
+    LLAMA_VOCAB_PRE_TYPE_CHAMELEON      = 26,
+    LLAMA_VOCAB_PRE_TYPE_MINERVA        = 27,
+    LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM  = 28,
+    LLAMA_VOCAB_PRE_TYPE_GPT4O          = 29,
+    LLAMA_VOCAB_PRE_TYPE_SUPERBPE       = 30,
+    LLAMA_VOCAB_PRE_TYPE_TRILLION       = 31,
+    LLAMA_VOCAB_PRE_TYPE_BAILINGMOE     = 32,
+    LLAMA_VOCAB_PRE_TYPE_LLAMA4         = 33,
+    LLAMA_VOCAB_PRE_TYPE_PIXTRAL        = 34,
+    LLAMA_VOCAB_PRE_TYPE_SEED_CODER     = 35,
+    LLAMA_VOCAB_PRE_TYPE_HUNYUAN        = 36,
+    LLAMA_VOCAB_PRE_TYPE_KIMI_K2        = 37,
+    LLAMA_VOCAB_PRE_TYPE_HUNYUAN_DENSE  = 38,
+};
+
 struct LLM_KV;
 struct llama_model_loader;
 
@@ -59,6 +102,7 @@ struct llama_vocab {
     llama_token token_sep() const;
     llama_token token_nl () const;
     llama_token token_pad() const;
+    llama_token token_mask() const;
 
     llama_token token_prefix() const;
     llama_token token_middle() const;
@@ -74,6 +118,7 @@ struct llama_vocab {
     bool get_add_space_prefix          () const;
     bool get_add_bos                   () const;
     bool get_add_eos                   () const;
+    bool get_add_sep                   () const;
     bool get_ignore_merges             () const;
     bool get_clean_spaces              () const;
     bool get_remove_extra_whitespaces  () const;
diff --git a/llama/llama.cpp/src/llama.cpp b/llama/llama.cpp/src/llama.cpp
index 9fdddf7b0..34906cdb6 100644
--- a/llama/llama.cpp/src/llama.cpp
+++ b/llama/llama.cpp/src/llama.cpp
@@ -140,6 +140,11 @@ static struct llama_model * llama_model_load_from_file_impl(
         struct llama_model_params params) {
     ggml_time_init();
 
+    if (!params.vocab_only && ggml_backend_reg_count() == 0) {
+        LLAMA_LOG_ERROR("%s: no backends are loaded. hint: use ggml_backend_load() or ggml_backend_load_all() to load a backend before calling this function\n", __func__);
+        return nullptr;
+    }
+
     unsigned cur_percentage = 0;
     if (params.progress_callback == NULL) {
         params.progress_callback_user_data = &cur_percentage;
@@ -193,14 +198,18 @@ static struct llama_model * llama_model_load_from_file_impl(
 
     // if using single GPU mode, remove all except the main GPU
     if (params.split_mode == LLAMA_SPLIT_MODE_NONE) {
-        if (params.main_gpu < 0 || params.main_gpu >= (int)model->devices.size()) {
-            LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %d)\n", __func__, params.main_gpu, (int)model->devices.size());
-            llama_model_free(model);
-            return nullptr;
+        if (params.main_gpu < 0) {
+            model->devices.clear();
+        } else {
+            if (params.main_gpu >= (int)model->devices.size()) {
+                LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %zu)\n", __func__, params.main_gpu, model->devices.size());
+                llama_model_free(model);
+                return nullptr;
+            }
+            ggml_backend_dev_t main_gpu = model->devices[params.main_gpu];
+            model->devices.clear();
+            model->devices.push_back(main_gpu);
         }
-        ggml_backend_dev_t main_gpu = model->devices[params.main_gpu];
-        model->devices.clear();
-        model->devices.push_back(main_gpu);
     }
 
     for (auto * dev : model->devices) {
diff --git a/llama/llama.cpp/src/unicode.cpp b/llama/llama.cpp/src/unicode.cpp
index 73cb2b1a2..ce336a228 100644
--- a/llama/llama.cpp/src/unicode.cpp
+++ b/llama/llama.cpp/src/unicode.cpp
@@ -224,12 +224,17 @@ static inline std::wstring unicode_wstring_from_utf8(const std::string & s) {
     // disable C++17 deprecation warning for std::codecvt_utf8
 #    pragma clang diagnostic push
 #    pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#elif defined(__GNUC__)
+#    pragma GCC diagnostic push
+#    pragma GCC diagnostic ignored "-Wdeprecated-declarations"
 #endif
 
     std::wstring_convert> conv;
 
 #if defined(__clang__)
 #    pragma clang diagnostic pop
+#elif defined(__GNUC__)
+#    pragma GCC diagnostic pop
 #endif
 
     return conv.from_bytes(s);
@@ -573,6 +578,178 @@ static std::vector unicode_regex_split_stl(const std::string & text, con
     return bpe_offsets;
 }
 
+// K2 system regex patterns (from tokenization_kimi.py):
+// [\p{Han}]+|[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]*[\p{Ll}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]+(?i:'s|'t|'re|'ve|'m|'ll|'d)?|[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]+[\p{Ll}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]*(?i:'s|'t|'re|'ve|'m|'ll|'d)?|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+
+static std::vector unicode_regex_split_custom_kimi_k2(const std::string & text, const std::vector & offsets) {
+    std::vector bpe_offsets;
+    bpe_offsets.reserve(offsets.size());
+
+    const auto cpts = unicode_cpts_from_utf8(text);
+
+    size_t start = 0;
+    for (auto offset : offsets) {
+        const size_t offset_ini = start;
+        const size_t offset_end = start + offset;
+        assert(offset_end <= cpts.size());
+        start = offset_end;
+
+        static const uint32_t OUT_OF_RANGE = 0xFFFFFFFF;
+        auto _get_cpt = [&] (const size_t pos) -> uint32_t {
+            return (offset_ini <= pos && pos < offset_end) ? cpts[pos] : OUT_OF_RANGE;
+        };
+
+        auto _get_flags = [&] (const size_t pos) -> unicode_cpt_flags {
+            return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags_from_cpt(cpts[pos]) : unicode_cpt_flags{};
+        };
+
+        size_t _prev_end = offset_ini;
+        auto _add_token = [&] (const size_t end) -> size_t {
+            assert(_prev_end <= end && end <= offset_end);
+            size_t len = end - _prev_end;
+            if (len > 0) {
+                bpe_offsets.push_back(len);
+            }
+            _prev_end = end;
+            return len;
+        };
+
+        for (size_t pos = offset_ini; pos < offset_end; /*pos++*/ ) {
+            const uint32_t cpt = _get_cpt(pos);
+            const auto flags = _get_flags(pos);
+
+            // Pattern 1: [\p{Han}]+ (Chinese characters)
+            if (unicode_cpt_is_han(cpt)) {
+                while (unicode_cpt_is_han(_get_cpt(pos))) {
+                    pos++;
+                }
+                _add_token(pos);
+                continue;
+            }
+
+            // Pattern 2 & 3: Letter words excluding Han characters with optional contractions
+            // [^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]*[\p{Ll}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]+(?:'s|'t|'re|'ve|'m|'ll|'d)?
+            // [^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]+[\p{Ll}\p{Lm}\p{Lo}\p{M}&&[^\p{Han}]]*(?:'s|'t|'re|'ve|'m|'ll|'d)?
+            // Check if current char is a letter OR if current char could be a leading char and next char is a letter
+            bool is_letter_pattern = (flags.is_letter && !unicode_cpt_is_han(cpt)) ||
+                                     (!(cpt == '\r' || cpt == '\n' || flags.is_letter || flags.is_number) &&
+                                      _get_flags(pos + 1).is_letter && !unicode_cpt_is_han(_get_cpt(pos + 1)));
+
+            if (is_letter_pattern) {
+                // Handle optional leading non-letter/non-number character
+                bool has_leading_char = false;
+                if (!(cpt == '\r' || cpt == '\n' || flags.is_letter || flags.is_number)) {
+                    has_leading_char = true;
+                    pos++;
+                }
+
+                // Match letter sequence (excluding Han characters)
+                bool has_letters = false;
+                while (_get_flags(pos).is_letter && !unicode_cpt_is_han(_get_cpt(pos))) {
+                    has_letters = true;
+                    pos++;
+                }
+
+                // Only proceed if we found letters (after potentially skipping leading char)
+                if (has_letters || (!has_leading_char && _get_flags(pos).is_letter && !unicode_cpt_is_han(_get_cpt(pos)))) {
+                    if (!has_letters) pos++; // consume the first letter if we didn't already
+
+                    // Continue consuming letters
+                    while (_get_flags(pos).is_letter && !unicode_cpt_is_han(_get_cpt(pos))) {
+                        pos++;
+                    }
+
+                    // Check for optional contractions (?:'s|'t|'re|'ve|'m|'ll|'d)
+                    if (_get_cpt(pos) == '\'' && pos + 1 < offset_end) {
+                        uint32_t cpt_next = unicode_tolower(_get_cpt(pos + 1));
+                        if (cpt_next == 's' || cpt_next == 't' || cpt_next == 'm' || cpt_next == 'd') {
+                            pos += 2;
+                        } else if (pos + 2 < offset_end) {
+                            uint32_t cpt_next_next = unicode_tolower(_get_cpt(pos + 2));
+                            if ((cpt_next == 'r' && cpt_next_next == 'e') ||
+                                (cpt_next == 'v' && cpt_next_next == 'e') ||
+                                (cpt_next == 'l' && cpt_next_next == 'l')) {
+                                pos += 3;
+                            }
+                        }
+                    }
+
+                    _add_token(pos);
+                    continue;
+                } else if (has_leading_char) {
+                    // We consumed a leading char but found no letters, backtrack
+                    pos--;
+                }
+            }
+
+            // Pattern 4: \p{N}{1,3} (numbers 1-3 digits)
+            if (flags.is_number) {
+                size_t ini = pos;
+                while (_get_flags(pos).is_number) {
+                    if (++pos - ini >= 3) {
+                        _add_token(pos);
+                        ini = pos;
+                    }
+                }
+                _add_token(pos);
+                continue;
+            }
+
+            // Pattern 5:  ?[^\s\p{L}\p{N}]+[\r\n]* (optional space + non-word chars + optional newlines)
+            auto flags2 = (cpt == ' ' ? _get_flags(pos + 1) : flags);
+            if (!(flags2.is_whitespace || flags2.is_letter || flags2.is_number) && flags2.as_uint()) {
+                pos += (cpt == ' ');
+                while (!(flags2.is_whitespace || flags2.is_letter || flags2.is_number) && flags2.as_uint()) {
+                    flags2 = _get_flags(++pos);
+                }
+                // Match optional [\r\n]*
+                uint32_t cpt2 = _get_cpt(pos);
+                while (cpt2 == '\r' || cpt2 == '\n') {
+                    cpt2 = _get_cpt(++pos);
+                }
+                _add_token(pos);
+                continue;
+            }
+
+            // Count whitespace characters
+            size_t num_whitespaces = 0;
+            size_t last_end_r_or_n = 0;
+            while (_get_flags(pos + num_whitespaces).is_whitespace) {
+                uint32_t cpt2 = _get_cpt(pos + num_whitespaces);
+                if (cpt2 == '\r' || cpt2 == '\n') {
+                    last_end_r_or_n = pos + num_whitespaces + 1;
+                }
+                num_whitespaces++;
+            }
+
+            // Pattern 6: \s*[\r\n]+ (whitespace with newlines)
+            if (last_end_r_or_n > 0) {
+                pos = last_end_r_or_n;
+                _add_token(pos);
+                continue;
+            }
+
+            // Pattern 7: \s+(?!\S) (trailing whitespace)
+            if (num_whitespaces > 1 && _get_cpt(pos + num_whitespaces) != OUT_OF_RANGE) {
+                pos += num_whitespaces - 1;
+                _add_token(pos);
+                continue;
+            }
+
+            // Pattern 8: \s+ (general whitespace)
+            if (num_whitespaces > 0) {
+                pos += num_whitespaces;
+                _add_token(pos);
+                continue;
+            }
+
+            // No matches - consume single character
+            _add_token(++pos);
+        }
+    }
+
+    return bpe_offsets;
+}
+
 static std::vector unicode_regex_split_custom(const std::string & text, const std::string & regex_expr, const std::vector & offsets) {
     std::vector bpe_offsets;
 
@@ -583,6 +760,9 @@ static std::vector unicode_regex_split_custom(const std::string & text,
             regex_expr == "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+") {
 
         bpe_offsets = unicode_regex_split_custom_llama3(text, offsets);
+    } else if (regex_expr == "\\p{Han}+") {
+        // K2's first pattern - handle all K2 patterns together
+        bpe_offsets = unicode_regex_split_custom_kimi_k2(text, offsets);
     }
 
     return bpe_offsets;
@@ -688,6 +868,38 @@ uint32_t unicode_tolower(uint32_t cpt) {
     return cpt;  // Return the original code point if no lowercase mapping is found
 }
 
+bool unicode_cpt_is_han(uint32_t cpt) {
+    // Han character ranges (Chinese/CJK characters)
+    // CJK Unified Ideographs (most common)
+    if (cpt >= 0x4E00 && cpt <= 0x9FFF) return true;
+
+    // CJK Extension A
+    if (cpt >= 0x3400 && cpt <= 0x4DBF) return true;
+
+    // CJK Extension B
+    if (cpt >= 0x20000 && cpt <= 0x2A6DF) return true;
+
+    // CJK Extension C
+    if (cpt >= 0x2A700 && cpt <= 0x2B73F) return true;
+
+    // CJK Extension D
+    if (cpt >= 0x2B740 && cpt <= 0x2B81F) return true;
+
+    // CJK Extension E
+    if (cpt >= 0x2B820 && cpt <= 0x2CEAF) return true;
+
+    // CJK Extension F
+    if (cpt >= 0x2CEB0 && cpt <= 0x2EBEF) return true;
+
+    // CJK Compatibility Ideographs
+    if (cpt >= 0xF900 && cpt <= 0xFAFF) return true;
+
+    // CJK Compatibility Ideographs Supplement
+    if (cpt >= 0x2F800 && cpt <= 0x2FA1F) return true;
+
+    return false;
+}
+
 std::vector unicode_regex_split(const std::string & text, const std::vector & regex_exprs) {
     // unicode categories
     static const std::map k_ucat_enum = {
diff --git a/llama/llama.cpp/src/unicode.h b/llama/llama.cpp/src/unicode.h
index c27098df7..0a5fa2a78 100644
--- a/llama/llama.cpp/src/unicode.h
+++ b/llama/llama.cpp/src/unicode.h
@@ -63,4 +63,6 @@ uint8_t     unicode_utf8_to_byte(const std::string & utf8);
 
 uint32_t unicode_tolower(uint32_t cpt);
 
+bool unicode_cpt_is_han(uint32_t cpt);
+
 std::vector unicode_regex_split(const std::string & text, const std::vector & regex_exprs);
diff --git a/llama/llama.cpp/tools/mtmd/clip-impl.h b/llama/llama.cpp/tools/mtmd/clip-impl.h
index 23036ba72..c8822dcf5 100644
--- a/llama/llama.cpp/tools/mtmd/clip-impl.h
+++ b/llama/llama.cpp/tools/mtmd/clip-impl.h
@@ -4,6 +4,7 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -15,22 +16,26 @@
 #define KEY_FTYPE               "general.file_type"
 #define KEY_NAME                "general.name"
 #define KEY_DESCRIPTION         "general.description"
-#define KEY_MINICPMV_VERSION    "clip.minicpmv_version"
+#define KEY_PROJ_TYPE           "clip.projector_type"
+#define KEY_HAS_AUDIO_ENC       "clip.has_audio_encoder"
+#define KEY_HAS_VISION_ENC      "clip.has_vision_encoder"
 #define KEY_USE_GELU            "clip.use_gelu"
 #define KEY_USE_SILU            "clip.use_silu"
-#define KEY_N_EMBD              "clip.vision.embedding_length"
-#define KEY_N_FF                "clip.vision.feed_forward_length"
-#define KEY_N_BLOCK             "clip.vision.block_count"
-#define KEY_N_HEAD              "clip.vision.attention.head_count"
-#define KEY_LAYER_NORM_EPS      "clip.vision.attention.layer_norm_epsilon"
-#define KEY_PROJ_DIM            "clip.vision.projection_dim"
+
+#define KEY_N_EMBD              "clip.%s.embedding_length"
+#define KEY_N_FF                "clip.%s.feed_forward_length"
+#define KEY_N_BLOCK             "clip.%s.block_count"
+#define KEY_PROJ_DIM            "clip.%s.projection_dim"
+#define KEY_N_HEAD              "clip.%s.attention.head_count"
+#define KEY_LAYER_NORM_EPS      "clip.%s.attention.layer_norm_epsilon"
+
+// vision-specific
 #define KEY_IMAGE_SIZE          "clip.vision.image_size"
 #define KEY_PATCH_SIZE          "clip.vision.patch_size"
 #define KEY_IMAGE_MEAN          "clip.vision.image_mean"
 #define KEY_IMAGE_STD           "clip.vision.image_std"
 #define KEY_FEATURE_LAYER       "clip.vision.feature_layer"
 #define KEY_PROJ_SCALE_FACTOR   "clip.vision.projector.scale_factor"
-#define KEY_PROJ_TYPE           "clip.projector_type"
 #define KEY_SPATIAL_MERGE_SIZE  "clip.vision.spatial_merge_size"
 
 #define KEY_MM_PATCH_MERGE_TYPE   "clip.vision.mm_patch_merge_type"
@@ -38,6 +43,11 @@
 #define KEY_IMAGE_CROP_RESOLUTION "clip.vision.image_crop_resolution"
 #define KEY_WIN_ATTN_PATTERN      "clip.vision.n_wa_pattern"
 #define KEY_ATTN_WINDOW_SIZE      "clip.vision.window_size"
+#define KEY_MINICPMV_VERSION      "clip.minicpmv_version"
+
+// audio-specific
+#define KEY_A_NUM_MEL_BINS      "clip.audio.num_mel_bins"
+#define KEY_A_PROJ_STACK_FACTOR "clip.audio.projector.stack_factor"
 
 
 //
@@ -94,6 +104,13 @@
 #define TN_GLM_ADAPTER_GATE     "adapter.linear.gate.%s"
 #define TN_GLM_ADAPTER_D_4H_2_H "adapter.linear.dense_4h_to_h.%s"
 
+// ultravox
+#define TN_CONV1D       "a.conv1d.%d.%s"
+#define TN_MM_AUDIO_MLP "mm.a.mlp.%d.%s"
+#define TN_MM_AUDIO_FC  "mm.a.fc.%s" // fully connected layer
+#define TN_MM_NORM_PRE  "mm.a.norm_pre.%s"
+#define TN_MM_NORM_MID  "mm.a.norm_mid.%s"
+
 // align x to upper multiple of n
 #define CLIP_ALIGN(x, n) ((((x) + (n) - 1) / (n)) * (n))
 
@@ -109,7 +126,12 @@ enum projector_type {
     PROJECTOR_TYPE_IDEFICS3,
     PROJECTOR_TYPE_PIXTRAL,
     PROJECTOR_TYPE_QWEN25VL,
+    PROJECTOR_TYPE_ULTRAVOX,
     PROJECTOR_TYPE_INTERNVL,
+    PROJECTOR_TYPE_LLAMA4,
+    PROJECTOR_TYPE_QWEN2A,
+    PROJECTOR_TYPE_QWEN25O, // will be replaced by QWEN2A or QWEN25VL depending on clip_ctx
+    PROJECTOR_TYPE_VOXTRAL,
     PROJECTOR_TYPE_UNKNOWN,
 };
 
@@ -124,7 +146,12 @@ static std::map PROJECTOR_TYPE_NAMES = {
     { PROJECTOR_TYPE_GEMMA3,    "gemma3"},
     { PROJECTOR_TYPE_IDEFICS3,  "idefics3"},
     { PROJECTOR_TYPE_PIXTRAL,   "pixtral"},
+    { PROJECTOR_TYPE_ULTRAVOX,  "ultravox"},
     { PROJECTOR_TYPE_INTERNVL,  "internvl"},
+    { PROJECTOR_TYPE_LLAMA4,    "llama4"},
+    { PROJECTOR_TYPE_QWEN2A,    "qwen2a"},
+    { PROJECTOR_TYPE_QWEN25O,   "qwen2.5o"},
+    { PROJECTOR_TYPE_VOXTRAL,   "voxtral"},
 };
 
 static projector_type clip_projector_type_from_string(const std::string & str) {
@@ -144,8 +171,10 @@ struct clip_image_u8 {
     std::vector buf;
 };
 
-// RGB float32 image (NHWC)
-// Memory layout: RGBRGBRGB...
+// For images, buf.size() == nx*ny*3
+//     Memory layout: RGBRGBRGB...
+// For audio, only one channel is used, buf.size() == nx*ny
+//     nx will be n_frames and ny will be n_mel
 struct clip_image_f32 {
     int nx;
     int ny;
@@ -239,9 +268,20 @@ struct clip_image_u8_batch {
 
 struct clip_image_f32_batch {
     std::vector entries;
+    bool is_audio = false;
+
+    // for llava-uhd style models, we need to know the grid size
+    // note: entries.size() == grid_x * grid_y + 1 (one overview image)
+    int grid_x = 0;
+    int grid_y = 0;
 
     clip_image_f32_batch clone() const {
-        clip_image_f32_batch new_batch;
+        clip_image_f32_batch new_batch{
+            /* entries  */ {},
+            /* is_audio */ is_audio,
+            /* grid_x   */ grid_x,
+            /* grid_y   */ grid_y,
+        };
         new_batch.entries.reserve(entries.size());
         for (const auto & entry : entries) {
             new_batch.entries.emplace_back(new clip_image_f32(*entry));
@@ -358,6 +398,70 @@ static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
     }
 }
 
+//
+// debugging
+//
+
+static void print_tensor_shape(ggml_tensor * t) {
+    printf("%s.shape = [", t->name);
+    for (int i = 0; i < ggml_n_dims(t); ++i) {
+        printf("%" PRId64, t->ne[i]);
+        if (i < ggml_n_dims(t) - 1) {
+            printf(", ");
+        }
+    }
+    printf("]\n");
+}
+
+static void print_tensor_data(ggml_tensor * t, uint8_t * data, int64_t n) {
+    ggml_type type = t->type;
+    int64_t * ne = t->ne;
+    size_t * nb = t->nb;
+    for (int64_t i3 = 0; i3 < ne[3]; i3++) {
+        printf("%s.data: [\n", t->name);
+        for (int64_t i2 = 0; i2 < ne[2]; i2++) {
+            if (i2 == n && ne[2] > 2*n) {
+                printf("     ..., \n");
+                i2 = ne[2] - n;
+            }
+            printf("     [\n");
+            for (int64_t i1 = 0; i1 < ne[1]; i1++) {
+                if (i1 == n && ne[1] > 2*n) {
+                    printf("      ..., \n");
+                    i1 = ne[1] - n;
+                }
+                printf("      [");
+                for (int64_t i0 = 0; i0 < ne[0]; i0++) {
+                    if (i0 == n && ne[0] > 2*n) {
+                        printf("..., ");
+                        i0 = ne[0] - n;
+                    }
+                    size_t i = i3 * nb[3] + i2 * nb[2] + i1 * nb[1] + i0 * nb[0];
+                    float v;
+                    if (type == GGML_TYPE_F16) {
+                        v = ggml_fp16_to_fp32(*(ggml_fp16_t *) &data[i]);
+                    } else if (type == GGML_TYPE_F32) {
+                        v = *(float *) &data[i];
+                    } else if (type == GGML_TYPE_I32) {
+                        v = (float) *(int32_t *) &data[i];
+                    } else if (type == GGML_TYPE_I16) {
+                        v = (float) *(int16_t *) &data[i];
+                    } else if (type == GGML_TYPE_I8) {
+                        v = (float) *(int8_t *) &data[i];
+                    } else {
+                        GGML_ABORT("fatal error");
+                    }
+                    printf("%8.4f", v);
+                    if (i0 < ne[0] - 1) printf(", ");
+                }
+                printf("],\n");
+            }
+            printf("     ],\n");
+        }
+        printf("    ]\n");
+    }
+}
+
 //
 // API used internally with mtmd
 //
diff --git a/llama/llama.cpp/tools/mtmd/clip.cpp b/llama/llama.cpp/tools/mtmd/clip.cpp
index cdd8ca44e..f4f69cfc4 100644
--- a/llama/llama.cpp/tools/mtmd/clip.cpp
+++ b/llama/llama.cpp/tools/mtmd/clip.cpp
@@ -11,9 +11,6 @@
 #include "ggml-backend.h"
 #include "gguf.h"
 
-#define STB_IMAGE_IMPLEMENTATION
-#include "stb_image.h"
-
 #include 
 #include 
 #include 
@@ -48,6 +45,7 @@ struct clip_logger_state g_logger_state = {GGML_LOG_LEVEL_CONT, clip_log_callbac
 
 enum ffn_op_type {
     FFN_GELU,
+    FFN_GELU_ERF,
     FFN_SILU,
     FFN_GELU_QUICK,
 };
@@ -187,9 +185,13 @@ struct clip_hparams {
     int32_t n_layer;
     int32_t proj_scale_factor = 0; // idefics3
 
+    float image_mean[3];
+    float image_std[3];
+
     // for models using dynamic image size, we need to have a smaller image size to warmup
     // otherwise, user will get OOM everytime they load the model
     int32_t warmup_image_size = 0;
+    int32_t warmup_audio_size = 3000;
 
     ffn_op_type ffn_op = FFN_GELU;
 
@@ -198,12 +200,20 @@ struct clip_hparams {
     float eps = 1e-6;
     float rope_theta = 0.0;
 
-    std::vector image_grid_pinpoints;
+    std::vector image_res_candidates; // for llava-uhd style models
     int32_t image_crop_resolution;
     std::unordered_set vision_feature_layer;
     int32_t attn_window_size = 0;
     int32_t n_wa_pattern = 0;
     int32_t spatial_merge_size = 0;
+
+    // audio
+    int32_t n_mel_bins = 0; // whisper preprocessor
+    int32_t proj_stack_factor = 0; // ultravox
+
+    // legacy
+    bool has_llava_projector = false;
+    int minicpmv_version = 0;
 };
 
 struct clip_layer {
@@ -241,8 +251,10 @@ struct clip_layer {
     ggml_tensor * ls_2_w = nullptr;
 };
 
-struct clip_vision_model {
-    struct clip_hparams hparams;
+struct clip_model {
+    clip_modality modality = CLIP_MODALITY_VISION;
+    projector_type proj_type = PROJECTOR_TYPE_MLP;
+    clip_hparams hparams;
 
     // embeddings
     ggml_tensor * class_embedding = nullptr;
@@ -259,7 +271,9 @@ struct clip_vision_model {
     ggml_tensor * post_ln_w;
     ggml_tensor * post_ln_b;
 
-    ggml_tensor * projection;
+    ggml_tensor * projection; // TODO: rename it to fc (fully connected layer)
+    ggml_tensor * mm_fc_w;
+    ggml_tensor * mm_fc_b;
 
     // LLaVA projection
     ggml_tensor * mm_input_norm_w = nullptr;
@@ -345,17 +359,28 @@ struct clip_vision_model {
     // pixtral
     ggml_tensor * token_embd_img_break = nullptr;
     ggml_tensor * mm_patch_merger_w = nullptr;
+
+    // ultravox / whisper encoder
+    ggml_tensor * conv1d_1_w = nullptr;
+    ggml_tensor * conv1d_1_b = nullptr;
+    ggml_tensor * conv1d_2_w = nullptr;
+    ggml_tensor * conv1d_2_b = nullptr;
+    ggml_tensor * mm_norm_pre_w = nullptr;
+    ggml_tensor * mm_norm_mid_w = nullptr;
+
+    bool audio_has_avgpool() const {
+        return proj_type == PROJECTOR_TYPE_QWEN2A
+            || proj_type == PROJECTOR_TYPE_VOXTRAL;
+    }
+
+    bool audio_has_stack_frames() const {
+        return proj_type == PROJECTOR_TYPE_ULTRAVOX
+            || proj_type == PROJECTOR_TYPE_VOXTRAL;
+    }
 };
 
 struct clip_ctx {
-    bool has_llava_projector = false;
-    int minicpmv_version = 0;
-
-    struct clip_vision_model vision_model;
-    projector_type proj_type = PROJECTOR_TYPE_MLP;
-
-    float image_mean[3];
-    float image_std[3];
+    clip_model model;
 
     gguf_context_ptr ctx_gguf;
     ggml_context_ptr ctx_data;
@@ -365,23 +390,35 @@ struct clip_ctx {
     std::vector backend_ptrs;
     std::vector backend_buft;
 
-    ggml_backend_t backend;
-    ggml_backend_t backend_cpu;
+    ggml_backend_t backend = nullptr;
+    ggml_backend_t backend_cpu = nullptr;
     ggml_backend_buffer_ptr buf;
 
     int max_nodes = 8192;
     ggml_backend_sched_ptr sched;
 
-    clip_image_size load_image_size;
+    // for debugging
+    bool debug_graph = false;
+    std::vector debug_print_tensors;
 
     clip_ctx(clip_context_params & ctx_params) {
+        debug_graph = std::getenv("MTMD_DEBUG_GRAPH") != nullptr;
         backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
         if (!backend_cpu) {
             throw std::runtime_error("failed to initialize CPU backend");
         }
-        backend = ctx_params.use_gpu
-                    ? ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr)
-                    : nullptr;
+        if (ctx_params.use_gpu) {
+            auto backend_name = std::getenv("MTMD_BACKEND_DEVICE");
+            if (backend_name != nullptr) {
+                backend = ggml_backend_init_by_name(backend_name, nullptr);
+                if (!backend) {
+                    LOG_WRN("%s: Warning: Failed to initialize \"%s\" backend, falling back to default GPU backend\n", __func__, backend_name);
+                }
+            }
+            if (!backend) {
+                backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr);
+            }
+        }
 
         if (backend) {
             LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend));
@@ -406,11 +443,16 @@ struct clip_ctx {
             ggml_backend_free(backend_cpu);
         }
     }
+
+    // this function is added so that we don't change too much of the existing code
+    projector_type proj_type() const {
+        return model.proj_type;
+    }
 };
 
 struct clip_graph {
     clip_ctx * ctx;
-    const clip_vision_model & model;
+    const clip_model & model;
     const clip_hparams & hparams;
 
     // we only support single image per batch
@@ -433,7 +475,7 @@ struct clip_graph {
 
     clip_graph(clip_ctx * ctx, const clip_image_f32 & img) :
             ctx(ctx),
-            model(ctx->vision_model),
+            model(ctx->model),
             hparams(model.hparams),
             img(img),
             patch_size(hparams.patch_size),
@@ -453,7 +495,7 @@ struct clip_graph {
         };
         ctx0_ptr.reset(ggml_init(params));
         ctx0 = ctx0_ptr.get();
-        gf = ggml_new_graph(ctx0);
+        gf = ggml_new_graph_custom(ctx0, ctx->max_nodes, false);
     }
 
     ggml_cgraph * build_siglip() {
@@ -465,7 +507,7 @@ struct clip_graph {
                                 model.position_embeddings,
                                 nullptr);
 
-        if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) {
+        if (ctx->proj_type() == PROJECTOR_TYPE_GEMMA3) {
             const int batch_size = 1;
             GGML_ASSERT(n_patches_x == n_patches_y);
             const int patches_per_image = n_patches_x;
@@ -488,7 +530,7 @@ struct clip_graph {
                 ggml_cont(ctx0, ggml_transpose(ctx0, model.mm_input_proj_w)),
                 cur);
 
-        } else if (ctx->proj_type == PROJECTOR_TYPE_IDEFICS3) {
+        } else if (ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3) {
             // https://github.com/huggingface/transformers/blob/0a950e0bbe1ed58d5401a6b547af19f15f0c195e/src/transformers/models/idefics3/modeling_idefics3.py#L578
 
             const int scale_factor = model.hparams.proj_scale_factor;
@@ -535,7 +577,7 @@ struct clip_graph {
         ggml_set_input(pos_w);
 
         auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
-            return build_rope_2d(ctx0, cur, pos_h, pos_w, hparams.rope_theta);
+            return build_rope_2d(ctx0, cur, pos_h, pos_w, hparams.rope_theta, true);
         };
 
         ggml_tensor * inp = build_inp();
@@ -622,7 +664,7 @@ struct clip_graph {
         const int n_pos            = n_patches;
         const int num_position_ids = n_pos * 4; // m-rope requires 4 dim per position
 
-        norm_type norm_t = ctx->proj_type == PROJECTOR_TYPE_QWEN25VL
+        norm_type norm_t = ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL
             ? NORM_TYPE_RMS // qwen 2.5 vl
             : NORM_TYPE_NORMAL; // qwen 2 vl
 
@@ -838,11 +880,17 @@ struct clip_graph {
             const int d_head = 128;
             int n_head = n_embd/d_head;
             int num_query = 96;
-            if (ctx->minicpmv_version == 2) {
+            if (ctx->model.hparams.minicpmv_version == 2) {
+                // MiniCPM-V 2.5
                 num_query = 96;
-            } else if (ctx->minicpmv_version == 3) {
+            } else if (ctx->model.hparams.minicpmv_version == 3) {
+                // MiniCPM-V 2.6
                 num_query = 64;
-            } else if (ctx->minicpmv_version == 4) {
+            } else if (ctx->model.hparams.minicpmv_version == 4) {
+                // MiniCPM-o 2.6
+                num_query = 64;
+            } else if (ctx->model.hparams.minicpmv_version == 5) {
+                // MiniCPM-V 4.0
                 num_query = 64;
             }
 
@@ -949,6 +997,101 @@ struct clip_graph {
         return gf;
     }
 
+    ggml_cgraph * build_llama4() {
+        GGML_ASSERT(model.class_embedding != nullptr);
+        GGML_ASSERT(model.position_embeddings != nullptr);
+
+        const int n_pos = n_patches + 1; // +1 for [CLS]
+
+        // 2D input positions
+        ggml_tensor * pos_h = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
+        ggml_set_name(pos_h, "pos_h");
+        ggml_set_input(pos_h);
+
+        ggml_tensor * pos_w = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos);
+        ggml_set_name(pos_w, "pos_w");
+        ggml_set_input(pos_w);
+
+        ggml_tensor * inp = build_inp_raw();
+
+        // Llama4UnfoldConvolution
+        {
+            ggml_tensor * kernel = ggml_reshape_4d(ctx0, model.patch_embeddings_0,
+                                                    patch_size, patch_size, 3, n_embd);
+            inp = ggml_im2col(ctx0, kernel, inp, patch_size, patch_size, 0, 0, 1, 1, true, inp->type);
+            inp = ggml_mul_mat(ctx0, model.patch_embeddings_0, inp);
+            inp = ggml_reshape_2d(ctx0, inp, n_embd, n_patches);
+            cb(inp, "patch_conv", -1);
+        }
+
+        // add CLS token
+        inp = ggml_concat(ctx0, inp, model.class_embedding, 1);
+
+        // build ViT with 2D position embeddings
+        auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
+            // first half is X axis and second half is Y axis
+            // ref: https://github.com/huggingface/transformers/blob/40a493c7ed4f19f08eadb0639cf26d49bfa5e180/src/transformers/models/llama4/modeling_llama4.py#L1312
+            // ref: https://github.com/Blaizzy/mlx-vlm/blob/a57156aa87b33cca6e5ee6cfc14dd4ef8f611be6/mlx_vlm/models/llama4/vision.py#L441
+            return build_rope_2d(ctx0, cur, pos_w, pos_h, hparams.rope_theta, false);
+        };
+        ggml_tensor * cur = build_vit(
+                                inp, n_pos,
+                                NORM_TYPE_NORMAL,
+                                hparams.ffn_op,
+                                model.position_embeddings,
+                                add_pos);
+
+        // remove CLS token
+        cur = ggml_view_2d(ctx0, cur,
+            n_embd, n_patches,
+            ggml_row_size(cur->type, n_embd), 0);
+
+        // pixel shuffle
+        // based on Llama4VisionPixelShuffleMLP
+        // https://github.com/huggingface/transformers/blob/2932f318a20d9e54cc7aea052e040164d85de7d6/src/transformers/models/llama4/modeling_llama4.py#L1151
+        {
+            const int scale_factor = model.hparams.proj_scale_factor;
+            const int bsz = 1; // batch size, always 1 for now since we don't support batching
+            GGML_ASSERT(scale_factor > 0);
+            GGML_ASSERT(n_patches_x == n_patches_y); // llama4 only supports square images
+            cur = ggml_reshape_4d(ctx0, cur,
+                n_embd * scale_factor,
+                n_patches_x / scale_factor,
+                n_patches_y,
+                bsz);
+            cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
+            cur = ggml_reshape_4d(ctx0, ggml_cont(ctx0, cur),
+                n_embd * scale_factor * scale_factor,
+                n_patches_x / scale_factor,
+                n_patches_y / scale_factor,
+                bsz);
+            cur = ggml_permute(ctx0, cur, 0, 2, 1, 3);
+            // flatten to 2D
+            cur = ggml_reshape_2d(ctx0, ggml_cont(ctx0, cur),
+                n_embd * scale_factor * scale_factor,
+                n_patches / scale_factor / scale_factor);
+            cb(cur, "pixel_shuffle", -1);
+        }
+
+        // based on Llama4VisionMLP2 (always uses GELU activation, no bias)
+        {
+            cur = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, cur);
+            cur = ggml_gelu(ctx0, cur);
+            cur = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, cur);
+            cur = ggml_gelu(ctx0, cur);
+            cb(cur, "adapter_mlp", -1);
+        }
+
+        // Llama4MultiModalProjector
+        cur = ggml_mul_mat(ctx0, model.mm_model_proj, cur);
+        cb(cur, "projected", -1);
+
+        // build the graph
+        ggml_build_forward_expand(gf, cur);
+
+        return gf;
+    }
+
     // this graph is used by llava, granite and glm
     // due to having embedding_stack (used by granite), we cannot reuse build_vit
     ggml_cgraph * build_llava() {
@@ -964,7 +1107,7 @@ struct clip_graph {
             int il_last = hparams.n_layer - 1;
             int deepest_feature_layer = -1;
 
-            if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
+            if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV || ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE) {
                 il_last += 1;
             }
 
@@ -1098,7 +1241,7 @@ struct clip_graph {
         }
 
         // llava projector (also used by granite)
-        if (ctx->has_llava_projector) {
+        if (ctx->model.hparams.has_llava_projector) {
             embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
 
             ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
@@ -1112,7 +1255,7 @@ struct clip_graph {
             // print_tensor_info(embeddings, "embeddings");
 
             // llava projector
-            if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
+            if (ctx->proj_type() == PROJECTOR_TYPE_MLP) {
                 embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
                 embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
 
@@ -1122,7 +1265,7 @@ struct clip_graph {
                     embeddings = ggml_add(ctx0, embeddings, model.mm_2_b);
                 }
             }
-            else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) {
+            else if (ctx->proj_type() == PROJECTOR_TYPE_MLP_NORM) {
                 embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
                 embeddings = ggml_add(ctx0, embeddings, model.mm_0_b);
                 // ggml_tensor_printf(embeddings, "mm_0_w",0,true,false);
@@ -1143,7 +1286,7 @@ struct clip_graph {
                 embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_4_w),
                                     model.mm_4_b);
             }
-            else if (ctx->proj_type == PROJECTOR_TYPE_LDP) {
+            else if (ctx->proj_type() == PROJECTOR_TYPE_LDP) {
                 // MobileVLM projector
                 int n_patch = 24;
                 ggml_tensor * mlp_1 = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, embeddings);
@@ -1253,7 +1396,7 @@ struct clip_graph {
                 }
                 embeddings = block_1;
             }
-            else if (ctx->proj_type == PROJECTOR_TYPE_LDPV2)
+            else if (ctx->proj_type() == PROJECTOR_TYPE_LDPV2)
             {
                 int n_patch = 24;
                 ggml_tensor * mlp_0 = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
@@ -1283,7 +1426,7 @@ struct clip_graph {
         }
 
         // glm projector
-        else if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
+        else if (ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE) {
             size_t gridsz = (size_t)sqrt(embeddings->ne[1]);
             embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings,1,0,2,3));
             embeddings = ggml_reshape_3d(ctx0, embeddings, gridsz, gridsz, embeddings->ne[1]);
@@ -1300,8 +1443,7 @@ struct clip_graph {
                 ggml_tensor * x = embeddings;
                 embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, embeddings);
                 x = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w,x);
-                embeddings = ggml_silu_inplace(ctx0, embeddings);
-                embeddings = ggml_mul(ctx0, embeddings,x);
+                embeddings = ggml_swiglu_split(ctx0, embeddings, x);
                 embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, embeddings);
             }
             // arrangement of BOI/EOI token embeddings
@@ -1323,16 +1465,124 @@ struct clip_graph {
         return gf;
     }
 
+    // whisper encoder with custom projector
+    ggml_cgraph * build_whisper_enc() {
+        const int n_frames = img.nx;
+        const int n_pos    = n_frames / 2;
+        GGML_ASSERT(model.position_embeddings->ne[1] >= n_pos);
+
+        ggml_tensor * inp = build_inp_raw(1);
+
+        // conv1d block
+        {
+            // convolution + gelu
+            ggml_tensor * cur = ggml_conv_1d_ph(ctx0, model.conv1d_1_w, inp, 1, 1);
+            cur = ggml_add(ctx0, cur, model.conv1d_1_b);
+
+            cur = ggml_gelu_erf(ctx0, cur);
+
+            cur = ggml_conv_1d_ph(ctx0, model.conv1d_2_w, cur, 2, 1);
+            cur = ggml_add(ctx0, cur, model.conv1d_2_b);
+
+            cur = ggml_gelu_erf(ctx0, cur);
+            // transpose
+            inp = ggml_cont(ctx0, ggml_transpose(ctx0, cur));
+            cb(inp, "after_conv1d", -1);
+        }
+
+        // sanity check (only check one layer, but it should be the same for all)
+        GGML_ASSERT(model.layers[0].ln_1_w && model.layers[0].ln_1_b);
+        GGML_ASSERT(model.layers[0].ln_2_w && model.layers[0].ln_2_b);
+        GGML_ASSERT(model.layers[0].q_b);
+        GGML_ASSERT(model.layers[0].v_b);
+        GGML_ASSERT(!model.layers[0].k_b); // no bias for k
+        GGML_ASSERT(model.post_ln_w && model.post_ln_b);
+
+        ggml_tensor * pos_embd_selected = ggml_view_2d(
+            ctx0, model.position_embeddings,
+            model.position_embeddings->ne[0], n_pos,
+            model.position_embeddings->nb[1], 0
+        );
+        ggml_tensor * cur = build_vit(
+                                inp, n_pos,
+                                NORM_TYPE_NORMAL,
+                                hparams.ffn_op,
+                                pos_embd_selected,
+                                nullptr);
+
+        cb(cur, "after_transformer", -1);
+
+        if (model.audio_has_stack_frames()) {
+            // StackAudioFrames
+            // https://huggingface.co/fixie-ai/ultravox-v0_5-llama-3_2-1b/blob/main/ultravox_model.py
+            int64_t stride = n_embd * hparams.proj_stack_factor;
+            int64_t padded_len = GGML_PAD(ggml_nelements(cur), stride);
+            int64_t pad = padded_len - ggml_nelements(cur);
+            if (pad > 0) {
+                cur = ggml_view_1d(ctx0, cur, ggml_nelements(cur), 0);
+                cur = ggml_pad(ctx0, cur, pad, 0, 0, 0);
+            }
+            cur = ggml_view_2d(ctx0, cur, stride, padded_len / stride,
+                                ggml_row_size(cur->type, stride), 0);
+            cb(cur, "after_stacked", -1);
+        }
+
+        if (ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX) {
+            // UltravoxProjector
+            // pre-norm
+            cur = ggml_rms_norm(ctx0, cur, 1e-6);
+            cur = ggml_mul(ctx0, cur, model.mm_norm_pre_w);
+
+            // ffn in
+            cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
+
+            // swiglu
+            // see SwiGLU in ultravox_model.py, the second half passed through is silu, not the first half
+            cur = ggml_swiglu_swapped(ctx0, cur);
+
+            // mid-norm
+            cur = ggml_rms_norm(ctx0, cur, 1e-6);
+            cur = ggml_mul(ctx0, cur, model.mm_norm_mid_w);
+
+            // ffn out
+            cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
+
+        } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2A) {
+            // projector
+            cur = ggml_mul_mat(ctx0, model.mm_fc_w, cur);
+            cur = ggml_add(ctx0, cur, model.mm_fc_b);
+
+        } else if (ctx->proj_type() == PROJECTOR_TYPE_VOXTRAL) {
+            // projector
+            cur = ggml_mul_mat(ctx0, model.mm_1_w, cur);
+            cur = ggml_gelu_erf(ctx0, cur);
+            cur = ggml_mul_mat(ctx0, model.mm_2_w, cur);
+
+        } else {
+            GGML_ABORT("%s: unknown projector type", __func__);
+        }
+
+        cb(cur, "projected", -1);
+
+        ggml_build_forward_expand(gf, cur);
+
+        return gf;
+    }
+
 private:
     //
     // utility functions
     //
 
-    void cb(ggml_tensor * cur, const char * name, int il) const {
-        // TODO: implement this
-        GGML_UNUSED(cur);
-        GGML_UNUSED(name);
-        GGML_UNUSED(il);
+    void cb(ggml_tensor * cur0, const char * name, int il) const {
+        if (ctx->debug_graph) {
+            ggml_tensor * cur = ggml_cpy(ctx0, cur0, ggml_dup_tensor(ctx0, cur0));
+            std::string cur_name = il >= 0 ? std::string(name) + "_" + std::to_string(il) : name;
+            ggml_set_name(cur, cur_name.c_str());
+            ggml_set_output(cur);
+            ggml_build_forward_expand(gf, cur);
+            ctx->debug_print_tensors.push_back(cur);
+        }
     }
 
     // build vision transformer (ViT) cgraph
@@ -1452,6 +1702,16 @@ private:
             inpL = cur;
         }
 
+        if (ctx->model.audio_has_avgpool()) {
+            ggml_tensor * cur = inpL;
+            cur = ggml_transpose(ctx0, cur);
+            cur = ggml_cont(ctx0, cur);
+            cur = ggml_pool_1d(ctx0, cur, GGML_OP_POOL_AVG, 2, 2, 0);
+            cur = ggml_transpose(ctx0, cur);
+            cur = ggml_cont(ctx0, cur);
+            inpL = cur;
+        }
+
         // post-layernorm
         if (model.post_ln_w) {
             inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, -1);
@@ -1473,8 +1733,8 @@ private:
         return inp;
     }
 
-    ggml_tensor * build_inp_raw() {
-        ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, img.nx, img.ny, 3);
+    ggml_tensor * build_inp_raw(int channels = 3) {
+        ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, img.nx, img.ny, channels);
         ggml_set_name(inp_raw, "inp_raw");
         ggml_set_input(inp_raw);
         return inp_raw;
@@ -1541,28 +1801,40 @@ private:
             cur = tmp;
         }
 
+        // we only support parallel ffn for now
         switch (type_op) {
             case FFN_SILU:
-                {
+                if (gate) {
+                    cur = ggml_swiglu_split(ctx0, cur, tmp);
+                    cb(cur, "ffn_swiglu", il);
+                } else {
                     cur = ggml_silu(ctx0, cur);
                     cb(cur, "ffn_silu", il);
                 } break;
             case FFN_GELU:
-                {
+                if (gate) {
+                    cur = ggml_geglu_split(ctx0, cur, tmp);
+                    cb(cur, "ffn_geglu", il);
+                } else {
                     cur = ggml_gelu(ctx0, cur);
                     cb(cur, "ffn_gelu", il);
                 } break;
-            case FFN_GELU_QUICK:
-                {
-                    cur = ggml_gelu_quick(ctx0, cur);
-                    cb(cur, "ffn_relu", il);
+            case FFN_GELU_ERF:
+                if (gate) {
+                    cur = ggml_geglu_erf_split(ctx0, cur, tmp);
+                    cb(cur, "ffn_geglu_erf", il);
+                } else {
+                    cur = ggml_gelu_erf(ctx0, cur);
+                    cb(cur, "ffn_gelu_erf", il);
+                } break;
+            case FFN_GELU_QUICK:
+                if (gate) {
+                    cur = ggml_geglu_quick_split(ctx0, cur, tmp);
+                    cb(cur, "ffn_geglu_quick", il);
+                } else {
+                    cur = ggml_gelu_quick(ctx0, cur);
+                    cb(cur, "ffn_gelu_quick", il);
                 } break;
-        }
-
-        // we only support parallel ffn for now
-        if (gate) {
-            cur = ggml_mul(ctx0, cur, tmp);
-            cb(cur, "ffn_gate_par", il);
         }
 
         if (down) {
@@ -1643,9 +1915,10 @@ private:
     static ggml_tensor * build_rope_2d(
         ggml_context * ctx0,
         ggml_tensor * cur,
-        ggml_tensor * pos_h,
-        ggml_tensor * pos_w,
-        const float freq_base
+        ggml_tensor * pos_a, // first half
+        ggml_tensor * pos_b, // second half
+        const float freq_base,
+        const bool interleave_freq
     ) {
         const int64_t n_dim  = cur->ne[0];
         const int64_t n_head = cur->ne[1];
@@ -1659,7 +1932,9 @@ private:
         //  ^ don't ask me why, it's math! -2(2i) / n_dim == -2i / (n_dim/2)
         // then for the second half, we use freq_scale to shift the inv_freq
         //  ^ why? replace (2i) with (2i+1) in the above equation
-        const float freq_scale_odd = std::pow(freq_base, (float)-2/n_dim);
+        const float freq_scale_odd = interleave_freq
+                                    ? std::pow(freq_base, (float)-2/n_dim)
+                                    : 1.0;
 
         // first half
         ggml_tensor * first;
@@ -1672,7 +1947,7 @@ private:
             first = ggml_rope_ext(
                 ctx0,
                 first,
-                pos_h,      // positions
+                pos_a,      // positions
                 nullptr,    // freq factors
                 n_dim/2,    // n_dims
                 0, 0, freq_base,
@@ -1692,7 +1967,7 @@ private:
             second = ggml_rope_ext(
                 ctx0,
                 second,
-                pos_w,      // positions
+                pos_b,      // positions
                 nullptr,    // freq factors
                 n_dim/2,    // n_dims
                 0, 0, freq_base,
@@ -1713,7 +1988,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
 
     ggml_cgraph * res;
 
-    switch (ctx->proj_type) {
+    switch (ctx->proj_type()) {
         case PROJECTOR_TYPE_GEMMA3:
         case PROJECTOR_TYPE_IDEFICS3:
             {
@@ -1736,6 +2011,16 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
             {
                 res = graph.build_internvl();
             } break;
+        case PROJECTOR_TYPE_LLAMA4:
+            {
+                res = graph.build_llama4();
+            } break;
+        case PROJECTOR_TYPE_ULTRAVOX:
+        case PROJECTOR_TYPE_VOXTRAL:
+        case PROJECTOR_TYPE_QWEN2A:
+            {
+                res = graph.build_whisper_enc();
+            } break;
         default:
             {
                 res = graph.build_llava();
@@ -1748,13 +2033,15 @@ struct clip_model_loader {
     ggml_context_ptr ctx_meta;
     gguf_context_ptr ctx_gguf;
 
-    clip_ctx & ctx_clip;
     std::string fname;
 
     size_t model_size = 0; // in bytes
 
-    // TODO @ngxson : we should not pass clip_ctx here, it should be clip_vision_model
-    clip_model_loader(const char * fname, clip_ctx & ctx_clip) : ctx_clip(ctx_clip), fname(fname) {
+    bool has_vision = false;
+    bool has_audio  = false;
+
+    // TODO @ngxson : we should not pass clip_ctx here, it should be clip_model
+    clip_model_loader(const char * fname) : fname(fname) {
         struct ggml_context * meta = nullptr;
 
         struct gguf_init_params params = {
@@ -1786,6 +2073,19 @@ struct clip_model_loader {
             LOG_INF("\n");
         }
 
+        // modalities
+        {
+            get_bool(KEY_HAS_VISION_ENC, has_vision, false);
+            get_bool(KEY_HAS_AUDIO_ENC,  has_audio,  false);
+
+            if (has_vision) {
+                LOG_INF("%s: has vision encoder\n", __func__);
+            }
+            if (has_audio) {
+                LOG_INF("%s: has audio encoder\n", __func__);
+            }
+        }
+
         // tensors
         {
             for (int i = 0; i < n_tensors; ++i) {
@@ -1801,44 +2101,85 @@ struct clip_model_loader {
         }
     }
 
-    void load_hparams() {
-        auto & hparams = ctx_clip.vision_model.hparams;
+    void load_hparams(clip_model & model, clip_modality modality) {
+        auto & hparams = model.hparams;
         std::string log_ffn_op; // for logging
 
+        // sanity check
+        if (modality == CLIP_MODALITY_VISION) {
+            GGML_ASSERT(has_vision);
+        } else if (modality == CLIP_MODALITY_AUDIO) {
+            GGML_ASSERT(has_audio);
+        }
+        model.modality = modality;
+
+
         // projector type
         std::string proj_type;
         {
             get_string(KEY_PROJ_TYPE, proj_type, false);
             if (!proj_type.empty()) {
-                ctx_clip.proj_type = clip_projector_type_from_string(proj_type);
+                model.proj_type = clip_projector_type_from_string(proj_type);
             }
-            if (ctx_clip.proj_type == PROJECTOR_TYPE_UNKNOWN) {
+            if (model.proj_type == PROJECTOR_TYPE_UNKNOWN) {
                 throw std::runtime_error(string_format("%s: unknown projector type: %s\n", __func__, proj_type.c_str()));
             }
+
+            // correct arch for multimodal models
+            if (model.proj_type == PROJECTOR_TYPE_QWEN25O) {
+                model.proj_type = modality == CLIP_MODALITY_VISION
+                                    ? PROJECTOR_TYPE_QWEN25VL
+                                    : PROJECTOR_TYPE_QWEN2A;
+            }
         }
 
+        const bool is_vision = model.modality == CLIP_MODALITY_VISION;
+        const bool is_audio  = model.modality == CLIP_MODALITY_AUDIO;
+
         // other hparams
         {
-            get_i32(KEY_MINICPMV_VERSION, ctx_clip.minicpmv_version, false); // legacy
+            const char * prefix = is_vision ? "vision" : "audio";
+            get_u32(string_format(KEY_N_EMBD,         prefix), hparams.n_embd);
+            get_u32(string_format(KEY_N_HEAD,         prefix), hparams.n_head);
+            get_u32(string_format(KEY_N_FF,           prefix), hparams.n_ff);
+            get_u32(string_format(KEY_N_BLOCK,        prefix), hparams.n_layer);
+            get_u32(string_format(KEY_PROJ_DIM,       prefix), hparams.projection_dim);
+            get_f32(string_format(KEY_LAYER_NORM_EPS, prefix), hparams.eps);
 
-            get_u32(KEY_N_EMBD,         hparams.n_embd);
-            get_u32(KEY_N_HEAD,         hparams.n_head);
-            get_u32(KEY_N_FF,           hparams.n_ff);
-            get_u32(KEY_N_BLOCK,        hparams.n_layer);
-            get_u32(KEY_PROJ_DIM,       hparams.projection_dim);
-            get_f32(KEY_LAYER_NORM_EPS, hparams.eps);
-            get_u32(KEY_IMAGE_SIZE,     hparams.image_size);
-            get_u32(KEY_PATCH_SIZE,     hparams.patch_size);
-            get_u32(KEY_IMAGE_CROP_RESOLUTION,    hparams.image_crop_resolution, false);
-            get_arr_int(KEY_IMAGE_GRID_PINPOINTS, hparams.image_grid_pinpoints, false);
+            if (is_vision) {
+                get_u32(KEY_IMAGE_SIZE, hparams.image_size);
+                get_u32(KEY_PATCH_SIZE, hparams.patch_size);
+                get_u32(KEY_IMAGE_CROP_RESOLUTION, hparams.image_crop_resolution, false);
+                get_i32(KEY_MINICPMV_VERSION, hparams.minicpmv_version, false); // legacy
+
+            } else if (is_audio) {
+                get_u32(KEY_A_NUM_MEL_BINS, hparams.n_mel_bins);
+
+            } else {
+                GGML_ASSERT(false && "unknown modality");
+            }
+
+            // for pinpoints, we need to convert it into a list of resolution candidates
+            {
+                std::vector pinpoints;
+                get_arr_int(KEY_IMAGE_GRID_PINPOINTS, pinpoints, false);
+                if (!pinpoints.empty()) {
+                    for (size_t i = 0; i < pinpoints.size(); i += 2) {
+                        hparams.image_res_candidates.push_back({
+                            pinpoints[i],
+                            pinpoints[i+1],
+                        });
+                    }
+                }
+            }
 
             // default warmup value
             hparams.warmup_image_size = hparams.image_size;
 
-            ctx_clip.has_llava_projector = ctx_clip.proj_type == PROJECTOR_TYPE_MLP
-                                        || ctx_clip.proj_type == PROJECTOR_TYPE_MLP_NORM
-                                        || ctx_clip.proj_type == PROJECTOR_TYPE_LDP
-                                        || ctx_clip.proj_type == PROJECTOR_TYPE_LDPV2;
+            hparams.has_llava_projector = model.proj_type == PROJECTOR_TYPE_MLP
+                                       || model.proj_type == PROJECTOR_TYPE_MLP_NORM
+                                       || model.proj_type == PROJECTOR_TYPE_LDP
+                                       || model.proj_type == PROJECTOR_TYPE_LDPV2;
 
             {
                 bool use_gelu = false;
@@ -1868,7 +2209,7 @@ struct clip_model_loader {
                 }
             }
 
-            {
+            if (is_vision) {
                 int idx_mean = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_MEAN);
                 int idx_std  = gguf_find_key(ctx_gguf.get(), KEY_IMAGE_STD);
                 GGML_ASSERT(idx_mean >= 0 && "image_mean not found");
@@ -1876,8 +2217,8 @@ struct clip_model_loader {
                 const float * mean_data = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_mean);
                 const float * std_data  = (const float *) gguf_get_arr_data(ctx_gguf.get(), idx_std);
                 for (int i = 0; i < 3; ++i) {
-                    ctx_clip.image_mean[i] = mean_data[i];
-                    ctx_clip.image_std[i]  = std_data[i];
+                    hparams.image_mean[i] = mean_data[i];
+                    hparams.image_std[i]  = std_data[i];
                 }
             }
 
@@ -1894,11 +2235,11 @@ struct clip_model_loader {
             }
 
             // model-specific params
-            switch (ctx_clip.proj_type) {
+            switch (model.proj_type) {
                 case PROJECTOR_TYPE_MINICPMV:
                     {
-                        if (ctx_clip.minicpmv_version == 0) {
-                            ctx_clip.minicpmv_version = 2; // default to 2 if not set
+                        if (hparams.minicpmv_version == 0) {
+                            hparams.minicpmv_version = 2; // default to 2 if not set
                         }
                     } break;
                 case PROJECTOR_TYPE_IDEFICS3:
@@ -1910,6 +2251,9 @@ struct clip_model_loader {
                     {
                         hparams.rope_theta = 10000.0f;
                         hparams.warmup_image_size = hparams.patch_size * 8;
+                        // Mistral Small 2506 needs 1024x1024 image size cap to prevent OOM
+                        // ref: https://github.com/ggml-org/llama.cpp/issues/14310
+                        hparams.image_size = 1024;
                         get_u32(KEY_SPATIAL_MERGE_SIZE, hparams.spatial_merge_size, false);
                     } break;
                 case PROJECTOR_TYPE_GEMMA3:
@@ -1939,6 +2283,25 @@ struct clip_model_loader {
                         hparams.warmup_image_size = hparams.patch_size * 8;
                         get_u32(KEY_WIN_ATTN_PATTERN, hparams.n_wa_pattern);
                     } break;
+                case PROJECTOR_TYPE_LLAMA4:
+                    {
+                        hparams.rope_theta = 10000.0f;
+                        get_u32(KEY_PROJ_SCALE_FACTOR, hparams.proj_scale_factor);
+                        set_llava_uhd_res_candidates(model, 3);
+                    } break;
+                case PROJECTOR_TYPE_ULTRAVOX:
+                case PROJECTOR_TYPE_QWEN2A:
+                case PROJECTOR_TYPE_VOXTRAL:
+                    {
+                        bool require_stack = model.proj_type == PROJECTOR_TYPE_ULTRAVOX ||
+                                             model.proj_type == PROJECTOR_TYPE_VOXTRAL;
+                        get_u32(KEY_A_PROJ_STACK_FACTOR, hparams.proj_stack_factor, require_stack);
+                        if (hparams.n_mel_bins != 128) {
+                            throw std::runtime_error(string_format("%s: only 128 mel bins are supported for ultravox\n", __func__));
+                        }
+                        hparams.ffn_op = FFN_GELU_ERF;
+                        log_ffn_op = "gelu_erf"; // temporary solution for logging
+                    } break;
                 default:
                     break;
             }
@@ -1948,25 +2311,36 @@ struct clip_model_loader {
             LOG_INF("%s: n_head:             %d\n", __func__, hparams.n_head);
             LOG_INF("%s: n_ff:               %d\n", __func__, hparams.n_ff);
             LOG_INF("%s: n_layer:            %d\n", __func__, hparams.n_layer);
-            LOG_INF("%s: projection_dim:     %d\n", __func__, hparams.projection_dim);
-            LOG_INF("%s: image_size:         %d\n", __func__, hparams.image_size);
-            LOG_INF("%s: patch_size:         %d\n", __func__, hparams.patch_size);
-            LOG_INF("\n");
-            LOG_INF("%s: has_llava_proj:     %d\n", __func__, ctx_clip.has_llava_projector);
-            LOG_INF("%s: minicpmv_version:   %d\n", __func__, ctx_clip.minicpmv_version);
-            LOG_INF("%s: proj_scale_factor:  %d\n", __func__, hparams.proj_scale_factor);
-            LOG_INF("%s: n_wa_pattern:       %d\n", __func__, hparams.n_wa_pattern);
             LOG_INF("%s: ffn_op:             %s\n", __func__, log_ffn_op.c_str());
+            LOG_INF("%s: projection_dim:     %d\n", __func__, hparams.projection_dim);
+            if (is_vision) {
+                LOG_INF("\n--- vision hparams ---\n");
+                LOG_INF("%s: image_size:         %d\n", __func__, hparams.image_size);
+                LOG_INF("%s: patch_size:         %d\n", __func__, hparams.patch_size);
+                LOG_INF("%s: has_llava_proj:     %d\n", __func__, hparams.has_llava_projector);
+                LOG_INF("%s: minicpmv_version:   %d\n", __func__, hparams.minicpmv_version);
+                LOG_INF("%s: proj_scale_factor:  %d\n", __func__, hparams.proj_scale_factor);
+                LOG_INF("%s: n_wa_pattern:       %d\n", __func__, hparams.n_wa_pattern);
+            } else if (is_audio) {
+                LOG_INF("\n--- audio hparams ---\n");
+                LOG_INF("%s: n_mel_bins:         %d\n", __func__, hparams.n_mel_bins);
+                LOG_INF("%s: proj_stack_factor:  %d\n", __func__, hparams.proj_stack_factor);
+            }
+            LOG_INF("\n");
             LOG_INF("%s: model size:         %.2f MiB\n", __func__, model_size / 1024.0 / 1024.0);
             LOG_INF("%s: metadata size:      %.2f MiB\n", __func__, ggml_get_mem_size(ctx_meta.get()) / 1024.0 / 1024.0);
         }
     }
 
-    void load_tensors() {
-        auto & hparams = ctx_clip.vision_model.hparams;
+    void load_tensors(clip_ctx & ctx_clip) {
+        auto & model = ctx_clip.model;
+        auto & hparams = model.hparams;
         std::map tensor_offset;
         std::vector tensors_to_load;
 
+        // TODO @ngxson : support both audio and video in the future
+        const char * prefix = model.modality == CLIP_MODALITY_AUDIO ? "a" : "v";
+
         // get offsets
         for (int64_t i = 0; i < gguf_get_n_tensors(ctx_gguf.get()); ++i) {
             const char * name = gguf_get_tensor_name(ctx_gguf.get(), i);
@@ -1975,7 +2349,7 @@ struct clip_model_loader {
 
         // create data context
         struct ggml_init_params params = {
-            /*.mem_size =*/ (gguf_get_n_tensors(ctx_gguf.get()) + 1) * ggml_tensor_overhead(),
+            /*.mem_size =*/ static_cast(gguf_get_n_tensors(ctx_gguf.get()) + 1) * ggml_tensor_overhead(),
             /*.mem_buffer =*/ NULL,
             /*.no_alloc =*/ true,
         };
@@ -2000,51 +2374,49 @@ struct clip_model_loader {
             return cur;
         };
 
-        auto & vision_model = ctx_clip.vision_model;
+        model.class_embedding = get_tensor(TN_CLASS_EMBD, false);
 
-        vision_model.class_embedding = get_tensor(TN_CLASS_EMBD, false);
+        model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, prefix, "weight"), false);
+        model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, prefix, "bias"),   false);
 
-        vision_model.pre_ln_w = get_tensor(string_format(TN_LN_PRE, "v", "weight"), false);
-        vision_model.pre_ln_b = get_tensor(string_format(TN_LN_PRE, "v", "bias"),   false);
+        model.post_ln_w = get_tensor(string_format(TN_LN_POST, prefix, "weight"), false);
+        model.post_ln_b = get_tensor(string_format(TN_LN_POST, prefix, "bias"),   false);
 
-        vision_model.post_ln_w = get_tensor(string_format(TN_LN_POST, "v", "weight"), false);
-        vision_model.post_ln_b = get_tensor(string_format(TN_LN_POST, "v", "bias"),   false);
+        model.patch_bias = get_tensor(TN_PATCH_BIAS, false);
+        model.patch_embeddings_0 = get_tensor(TN_PATCH_EMBD,   false);
+        model.patch_embeddings_1 = get_tensor(TN_PATCH_EMBD_1, false);
 
-        vision_model.patch_bias = get_tensor(TN_PATCH_BIAS, false);
-        vision_model.patch_embeddings_0 = get_tensor(TN_PATCH_EMBD,   false);
-        vision_model.patch_embeddings_1 = get_tensor(TN_PATCH_EMBD_1, false);
-
-        vision_model.position_embeddings = get_tensor(string_format(TN_POS_EMBD, "v"), false);
+        model.position_embeddings = get_tensor(string_format(TN_POS_EMBD, prefix), false);
 
         // layers
-        vision_model.layers.resize(hparams.n_layer);
+        model.layers.resize(hparams.n_layer);
         for (int il = 0; il < hparams.n_layer; ++il) {
-            auto & layer = vision_model.layers[il];
-            layer.k_w    = get_tensor(string_format(TN_ATTN_K,      "v", il, "weight"));
-            layer.q_w    = get_tensor(string_format(TN_ATTN_Q,      "v", il, "weight"));
-            layer.v_w    = get_tensor(string_format(TN_ATTN_V,      "v", il, "weight"));
-            layer.o_w    = get_tensor(string_format(TN_ATTN_OUTPUT, "v", il, "weight"));
-            layer.k_norm = get_tensor(string_format(TN_ATTN_K_NORM, "v", il, "weight"), false);
-            layer.q_norm = get_tensor(string_format(TN_ATTN_Q_NORM, "v", il, "weight"), false);
-            layer.ln_1_w = get_tensor(string_format(TN_LN_1,        "v", il, "weight"), false);
-            layer.ln_2_w = get_tensor(string_format(TN_LN_2,        "v", il, "weight"), false);
-            layer.ls_1_w = get_tensor(string_format(TN_LS_1,        "v", il, "weight"), false); // no bias
-            layer.ls_2_w = get_tensor(string_format(TN_LS_2,        "v", il, "weight"), false); // no bias
+            auto & layer = model.layers[il];
+            layer.k_w    = get_tensor(string_format(TN_ATTN_K,      prefix, il, "weight"));
+            layer.q_w    = get_tensor(string_format(TN_ATTN_Q,      prefix, il, "weight"));
+            layer.v_w    = get_tensor(string_format(TN_ATTN_V,      prefix, il, "weight"));
+            layer.o_w    = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "weight"));
+            layer.k_norm = get_tensor(string_format(TN_ATTN_K_NORM, prefix, il, "weight"), false);
+            layer.q_norm = get_tensor(string_format(TN_ATTN_Q_NORM, prefix, il, "weight"), false);
+            layer.ln_1_w = get_tensor(string_format(TN_LN_1,        prefix, il, "weight"), false);
+            layer.ln_2_w = get_tensor(string_format(TN_LN_2,        prefix, il, "weight"), false);
+            layer.ls_1_w = get_tensor(string_format(TN_LS_1,        prefix, il, "weight"), false); // no bias
+            layer.ls_2_w = get_tensor(string_format(TN_LS_2,        prefix, il, "weight"), false); // no bias
 
-            layer.k_b    = get_tensor(string_format(TN_ATTN_K,      "v", il, "bias"), false);
-            layer.q_b    = get_tensor(string_format(TN_ATTN_Q,      "v", il, "bias"), false);
-            layer.v_b    = get_tensor(string_format(TN_ATTN_V,      "v", il, "bias"), false);
-            layer.o_b    = get_tensor(string_format(TN_ATTN_OUTPUT, "v", il, "bias"), false);
-            layer.ln_1_b = get_tensor(string_format(TN_LN_1,        "v", il, "bias"), false);
-            layer.ln_2_b = get_tensor(string_format(TN_LN_2,        "v", il, "bias"), false);
+            layer.k_b    = get_tensor(string_format(TN_ATTN_K,      prefix, il, "bias"), false);
+            layer.q_b    = get_tensor(string_format(TN_ATTN_Q,      prefix, il, "bias"), false);
+            layer.v_b    = get_tensor(string_format(TN_ATTN_V,      prefix, il, "bias"), false);
+            layer.o_b    = get_tensor(string_format(TN_ATTN_OUTPUT, prefix, il, "bias"), false);
+            layer.ln_1_b = get_tensor(string_format(TN_LN_1,        prefix, il, "bias"), false);
+            layer.ln_2_b = get_tensor(string_format(TN_LN_2,        prefix, il, "bias"), false);
 
             // ffn
-            layer.ff_up_w   = get_tensor(string_format(TN_FFN_UP,   "v", il, "weight"));
-            layer.ff_up_b   = get_tensor(string_format(TN_FFN_UP,   "v", il, "bias"),   false);
-            layer.ff_gate_w = get_tensor(string_format(TN_FFN_GATE, "v", il, "weight"), false);
-            layer.ff_gate_b = get_tensor(string_format(TN_FFN_GATE, "v", il, "bias"),   false);
-            layer.ff_down_w = get_tensor(string_format(TN_FFN_DOWN, "v", il, "weight"));
-            layer.ff_down_b = get_tensor(string_format(TN_FFN_DOWN, "v", il, "bias"),   false);
+            layer.ff_up_w   = get_tensor(string_format(TN_FFN_UP,   prefix, il, "weight"));
+            layer.ff_up_b   = get_tensor(string_format(TN_FFN_UP,   prefix, il, "bias"),   false);
+            layer.ff_gate_w = get_tensor(string_format(TN_FFN_GATE, prefix, il, "weight"), false);
+            layer.ff_gate_b = get_tensor(string_format(TN_FFN_GATE, prefix, il, "bias"),   false);
+            layer.ff_down_w = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "weight"));
+            layer.ff_down_b = get_tensor(string_format(TN_FFN_DOWN, prefix, il, "bias"),   false);
 
             // some models already exported with legacy (incorrect) naming which is quite messy, let's fix it here
             // note: Qwen model converted from the old surgery script has n_ff = 0, so we cannot use n_ff to check!
@@ -2060,140 +2432,175 @@ struct clip_model_loader {
             }
         }
 
-        switch (ctx_clip.proj_type) {
+        switch (model.proj_type) {
             case PROJECTOR_TYPE_MLP:
             case PROJECTOR_TYPE_MLP_NORM:
                 {
                     // LLaVA projection
-                    vision_model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"), false);
-                    vision_model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"), false);
+                    model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"), false);
+                    model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"), false);
                     // Yi-type llava
-                    vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"), false);
-                    vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
+                    model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"), false);
+                    model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
                     // missing in Yi-type llava
-                    vision_model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"), false);
-                    vision_model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
+                    model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"), false);
+                    model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
                     // Yi-type llava
-                    vision_model.mm_3_w = get_tensor(string_format(TN_LLAVA_PROJ, 3, "weight"), false);
-                    vision_model.mm_3_b = get_tensor(string_format(TN_LLAVA_PROJ, 3, "bias"), false);
-                    vision_model.mm_4_w = get_tensor(string_format(TN_LLAVA_PROJ, 4, "weight"), false);
-                    vision_model.mm_4_b = get_tensor(string_format(TN_LLAVA_PROJ, 4, "bias"), false);
-                    if (vision_model.mm_3_w) {
+                    model.mm_3_w = get_tensor(string_format(TN_LLAVA_PROJ, 3, "weight"), false);
+                    model.mm_3_b = get_tensor(string_format(TN_LLAVA_PROJ, 3, "bias"), false);
+                    model.mm_4_w = get_tensor(string_format(TN_LLAVA_PROJ, 4, "weight"), false);
+                    model.mm_4_b = get_tensor(string_format(TN_LLAVA_PROJ, 4, "bias"), false);
+                    if (model.mm_3_w) {
                         // TODO: this is a hack to support Yi-type llava
-                        ctx_clip.proj_type = PROJECTOR_TYPE_MLP_NORM;
+                        model.proj_type = PROJECTOR_TYPE_MLP_NORM;
                     }
-                    vision_model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false);
+                    model.image_newline = get_tensor(TN_IMAGE_NEWLINE, false);
                 } break;
             case PROJECTOR_TYPE_LDP:
                 {
                     // MobileVLM projection
-                    vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
-                    vision_model.mm_model_mlp_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
-                    vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
-                    vision_model.mm_model_mlp_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
-                    vision_model.mm_model_block_1_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight"));
-                    vision_model.mm_model_block_1_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight"));
-                    vision_model.mm_model_block_1_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias"));
-                    vision_model.mm_model_block_1_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight"));
-                    vision_model.mm_model_block_1_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias"));
-                    vision_model.mm_model_block_1_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight"));
-                    vision_model.mm_model_block_1_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias"));
-                    vision_model.mm_model_block_1_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight"));
-                    vision_model.mm_model_block_1_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight"));
-                    vision_model.mm_model_block_1_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias"));
-                    vision_model.mm_model_block_2_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight"));
-                    vision_model.mm_model_block_2_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight"));
-                    vision_model.mm_model_block_2_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias"));
-                    vision_model.mm_model_block_2_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight"));
-                    vision_model.mm_model_block_2_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias"));
-                    vision_model.mm_model_block_2_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight"));
-                    vision_model.mm_model_block_2_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias"));
-                    vision_model.mm_model_block_2_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
-                    vision_model.mm_model_block_2_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
-                    vision_model.mm_model_block_2_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
+                    model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
+                    model.mm_model_mlp_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
+                    model.mm_model_mlp_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
+                    model.mm_model_mlp_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
+                    model.mm_model_block_1_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight"));
+                    model.mm_model_block_1_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight"));
+                    model.mm_model_block_1_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias"));
+                    model.mm_model_block_1_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight"));
+                    model.mm_model_block_1_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias"));
+                    model.mm_model_block_1_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight"));
+                    model.mm_model_block_1_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias"));
+                    model.mm_model_block_1_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight"));
+                    model.mm_model_block_1_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight"));
+                    model.mm_model_block_1_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias"));
+                    model.mm_model_block_2_block_0_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight"));
+                    model.mm_model_block_2_block_0_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight"));
+                    model.mm_model_block_2_block_0_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias"));
+                    model.mm_model_block_2_block_1_fc1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight"));
+                    model.mm_model_block_2_block_1_fc1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias"));
+                    model.mm_model_block_2_block_1_fc2_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight"));
+                    model.mm_model_block_2_block_1_fc2_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias"));
+                    model.mm_model_block_2_block_2_0_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight"));
+                    model.mm_model_block_2_block_2_1_w = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight"));
+                    model.mm_model_block_2_block_2_1_b = get_tensor(string_format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias"));
                 } break;
             case PROJECTOR_TYPE_LDPV2:
                 {
                     // MobilVLM_V2 projection
-                    vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
-                    vision_model.mm_model_mlp_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
-                    vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
-                    vision_model.mm_model_mlp_2_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "bias"));
-                    vision_model.mm_model_peg_0_w = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "weight"));
-                    vision_model.mm_model_peg_0_b = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "bias"));
+                    model.mm_model_mlp_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
+                    model.mm_model_mlp_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
+                    model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
+                    model.mm_model_mlp_2_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "bias"));
+                    model.mm_model_peg_0_w = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "weight"));
+                    model.mm_model_peg_0_b = get_tensor(string_format(TN_MVLM_PROJ_PEG, 0, "bias"));
                 } break;
             case PROJECTOR_TYPE_MINICPMV:
                 {
-                    // vision_model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD);
-                    vision_model.mm_model_pos_embed_k = get_tensor(TN_MINICPMV_POS_EMBD_K);
-                    vision_model.mm_model_query = get_tensor(TN_MINICPMV_QUERY);
-                    vision_model.mm_model_proj = get_tensor(TN_MINICPMV_PROJ);
-                    vision_model.mm_model_kv_proj = get_tensor(TN_MINICPMV_KV_PROJ);
-                    vision_model.mm_model_attn_q_w = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "weight"));
-                    vision_model.mm_model_attn_k_w = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "weight"));
-                    vision_model.mm_model_attn_v_w = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "weight"));
-                    vision_model.mm_model_attn_q_b = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "bias"));
-                    vision_model.mm_model_attn_k_b = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "bias"));
-                    vision_model.mm_model_attn_v_b = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "bias"));
-                    vision_model.mm_model_attn_o_w = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "weight"));
-                    vision_model.mm_model_attn_o_b = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "bias"));
-                    vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_MINICPMV_LN, "q", "weight"));
-                    vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_MINICPMV_LN, "q", "bias"));
-                    vision_model.mm_model_ln_kv_w = get_tensor(string_format(TN_MINICPMV_LN, "kv", "weight"));
-                    vision_model.mm_model_ln_kv_b = get_tensor(string_format(TN_MINICPMV_LN, "kv", "bias"));
-                    vision_model.mm_model_ln_post_w = get_tensor(string_format(TN_MINICPMV_LN, "post", "weight"));
-                    vision_model.mm_model_ln_post_b = get_tensor(string_format(TN_MINICPMV_LN, "post", "bias"));
+                    // model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD);
+                    model.mm_model_pos_embed_k = get_tensor(TN_MINICPMV_POS_EMBD_K);
+                    model.mm_model_query = get_tensor(TN_MINICPMV_QUERY);
+                    model.mm_model_proj = get_tensor(TN_MINICPMV_PROJ);
+                    model.mm_model_kv_proj = get_tensor(TN_MINICPMV_KV_PROJ);
+                    model.mm_model_attn_q_w = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "weight"));
+                    model.mm_model_attn_k_w = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "weight"));
+                    model.mm_model_attn_v_w = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "weight"));
+                    model.mm_model_attn_q_b = get_tensor(string_format(TN_MINICPMV_ATTN, "q", "bias"));
+                    model.mm_model_attn_k_b = get_tensor(string_format(TN_MINICPMV_ATTN, "k", "bias"));
+                    model.mm_model_attn_v_b = get_tensor(string_format(TN_MINICPMV_ATTN, "v", "bias"));
+                    model.mm_model_attn_o_w = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "weight"));
+                    model.mm_model_attn_o_b = get_tensor(string_format(TN_MINICPMV_ATTN, "out", "bias"));
+                    model.mm_model_ln_q_w = get_tensor(string_format(TN_MINICPMV_LN, "q", "weight"));
+                    model.mm_model_ln_q_b = get_tensor(string_format(TN_MINICPMV_LN, "q", "bias"));
+                    model.mm_model_ln_kv_w = get_tensor(string_format(TN_MINICPMV_LN, "kv", "weight"));
+                    model.mm_model_ln_kv_b = get_tensor(string_format(TN_MINICPMV_LN, "kv", "bias"));
+                    model.mm_model_ln_post_w = get_tensor(string_format(TN_MINICPMV_LN, "post", "weight"));
+                    model.mm_model_ln_post_b = get_tensor(string_format(TN_MINICPMV_LN, "post", "bias"));
                 } break;
             case PROJECTOR_TYPE_GLM_EDGE:
                 {
-                    vision_model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight"));
-                    vision_model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias"));
-                    vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight"));
-                    vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight"));
-                    vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias"));
-                    vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight"));
-                    vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight"));
-                    vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight"));
-                    vision_model.mm_glm_tok_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight"));
-                    vision_model.mm_glm_tok_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight"));
+                    model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight"));
+                    model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias"));
+                    model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight"));
+                    model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight"));
+                    model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias"));
+                    model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight"));
+                    model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight"));
+                    model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight"));
+                    model.mm_glm_tok_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight"));
+                    model.mm_glm_tok_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight"));
                 } break;
             case PROJECTOR_TYPE_QWEN2VL:
             case PROJECTOR_TYPE_QWEN25VL:
                 {
-                    vision_model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
-                    vision_model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
-                    vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
-                    vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
+                    model.mm_0_w = get_tensor(string_format(TN_LLAVA_PROJ, 0, "weight"));
+                    model.mm_0_b = get_tensor(string_format(TN_LLAVA_PROJ, 0, "bias"));
+                    model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
+                    model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
                 } break;
             case PROJECTOR_TYPE_GEMMA3:
                 {
-                    vision_model.mm_input_proj_w = get_tensor(TN_MM_INP_PROJ);
-                    vision_model.mm_soft_emb_norm_w = get_tensor(TN_MM_SOFT_EMB_N);
+                    model.mm_input_proj_w = get_tensor(TN_MM_INP_PROJ);
+                    model.mm_soft_emb_norm_w = get_tensor(TN_MM_SOFT_EMB_N);
                 } break;
             case PROJECTOR_TYPE_IDEFICS3:
                 {
-                    vision_model.projection = get_tensor(TN_MM_PROJECTOR);
+                    model.projection = get_tensor(TN_MM_PROJECTOR);
                 } break;
             case PROJECTOR_TYPE_PIXTRAL:
                 {
-                    vision_model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
-                    vision_model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
-                    vision_model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
-                    vision_model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
+                    model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
+                    model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"), false);
+                    model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
+                    model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"), false);
                     // [IMG_BREAK] token embedding
-                    vision_model.token_embd_img_break = get_tensor(TN_TOK_IMG_BREAK);
+                    model.token_embd_img_break = get_tensor(TN_TOK_IMG_BREAK);
                     // for mistral small 3.1
-                    vision_model.mm_input_norm_w   = get_tensor(TN_MM_INP_NORM,     false);
-                    vision_model.mm_patch_merger_w = get_tensor(TN_MM_PATCH_MERGER, false);
+                    model.mm_input_norm_w   = get_tensor(TN_MM_INP_NORM,     false);
+                    model.mm_patch_merger_w = get_tensor(TN_MM_PATCH_MERGER, false);
+                } break;
+            case PROJECTOR_TYPE_ULTRAVOX:
+                {
+                    model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
+                    model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
+                    model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
+                    model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
+                    model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
+                    model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
+                    model.mm_norm_pre_w = get_tensor(string_format(TN_MM_NORM_PRE, "weight"));
+                    model.mm_norm_mid_w = get_tensor(string_format(TN_MM_NORM_MID, "weight"));
+                } break;
+            case PROJECTOR_TYPE_QWEN2A:
+                {
+                    model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
+                    model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
+                    model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
+                    model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
+                    model.mm_fc_w = get_tensor(string_format(TN_MM_AUDIO_FC, "weight"));
+                    model.mm_fc_b = get_tensor(string_format(TN_MM_AUDIO_FC, "bias"));
+                } break;
+            case PROJECTOR_TYPE_VOXTRAL:
+                {
+                    model.conv1d_1_w = get_tensor(string_format(TN_CONV1D, 1, "weight"));
+                    model.conv1d_1_b = get_tensor(string_format(TN_CONV1D, 1, "bias"));
+                    model.conv1d_2_w = get_tensor(string_format(TN_CONV1D, 2, "weight"));
+                    model.conv1d_2_b = get_tensor(string_format(TN_CONV1D, 2, "bias"));
+                    model.mm_1_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 1, "weight"));
+                    model.mm_2_w = get_tensor(string_format(TN_MM_AUDIO_MLP, 2, "weight"));
                 } break;
             case PROJECTOR_TYPE_INTERNVL:
                 {
-                    vision_model.mm_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
-                    vision_model.mm_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
-                    vision_model.mm_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
-                    vision_model.mm_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
-                    vision_model.mm_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
-                    vision_model.mm_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
+                    model.mm_0_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "weight"));
+                    model.mm_0_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 0, "bias"));
+                    model.mm_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
+                    model.mm_1_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "bias"));
+                    model.mm_3_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "weight"));
+                    model.mm_3_b = get_tensor(string_format(TN_MVLM_PROJ_MLP, 3, "bias"));
+                } break;
+            case PROJECTOR_TYPE_LLAMA4:
+                {
+                    model.mm_model_proj    = get_tensor(TN_MM_PROJECTOR);
+                    model.mm_model_mlp_1_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 1, "weight"));
+                    model.mm_model_mlp_2_w = get_tensor(string_format(TN_MVLM_PROJ_MLP, 2, "weight"));
                 } break;
             default:
                 GGML_ASSERT(false && "unknown projector type");
@@ -2262,15 +2669,20 @@ struct clip_model_loader {
         }
     }
 
-    void alloc_compute_meta() {
+    void alloc_compute_meta(clip_ctx & ctx_clip) {
+        const auto & hparams = ctx_clip.model.hparams;
         ctx_clip.buf_compute_meta.resize(ctx_clip.max_nodes * ggml_tensor_overhead() + ggml_graph_overhead());
 
         // create a fake batch
         clip_image_f32_batch batch;
         clip_image_f32_ptr img(clip_image_f32_init());
-        img->nx = ctx_clip.vision_model.hparams.warmup_image_size;
-        img->ny = ctx_clip.vision_model.hparams.warmup_image_size;
-        img->buf.resize(img->nx * img->ny * 3);
+        if (ctx_clip.model.modality == CLIP_MODALITY_VISION) {
+            img->nx = hparams.warmup_image_size;
+            img->ny = hparams.warmup_image_size;
+        } else {
+            img->nx = hparams.warmup_audio_size;
+            img->ny = hparams.n_mel_bins;
+        }
         batch.entries.push_back(std::move(img));
 
         ggml_cgraph * gf = clip_image_build_graph(&ctx_clip, batch);
@@ -2346,41 +2758,57 @@ struct clip_model_loader {
             output[i] = values[i];
         }
     }
+
+    void set_llava_uhd_res_candidates(clip_model & model, const int max_patches_per_side) {
+        auto & hparams = model.hparams;
+        for (int x = 1; x <= max_patches_per_side; x++) {
+            for (int y = 1; y <= max_patches_per_side; y++) {
+                if (x == 1 && y == 1) {
+                    continue; // skip the first point
+                }
+                hparams.image_res_candidates.push_back(clip_image_size{
+                    x*hparams.image_size,
+                    y*hparams.image_size,
+                });
+            }
+        }
+    }
 };
 
-// read and create ggml_context containing the tensors and their data
-struct clip_ctx * clip_model_load(const char * fname, const int verbosity) {
-    return clip_init(fname, clip_context_params{
-        /* use_gpu */   true,
-        /* verbosity */ static_cast(verbosity),
-    });
-}
-
-struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_params) {
+struct clip_init_result clip_init(const char * fname, struct clip_context_params ctx_params) {
     g_logger_state.verbosity_thold = ctx_params.verbosity;
-    clip_ctx * ctx_clip = nullptr;
+    clip_ctx * ctx_vision = nullptr;
+    clip_ctx * ctx_audio = nullptr;
 
     try {
-        ctx_clip = new clip_ctx(ctx_params);
-        clip_model_loader loader(fname, *ctx_clip);
-        loader.load_hparams();
-        loader.load_tensors();
-        loader.alloc_compute_meta();
+        clip_model_loader loader(fname);
+
+        if (loader.has_vision) {
+            ctx_vision = new clip_ctx(ctx_params);
+            loader.load_hparams(ctx_vision->model, CLIP_MODALITY_VISION);
+            loader.load_tensors(*ctx_vision);
+            loader.alloc_compute_meta(*ctx_vision);
+        }
+
+        if (loader.has_audio) {
+            ctx_audio = new clip_ctx(ctx_params);
+            loader.load_hparams(ctx_audio->model, CLIP_MODALITY_AUDIO);
+            loader.load_tensors(*ctx_audio);
+            loader.alloc_compute_meta(*ctx_audio);
+        }
+
     } catch (const std::exception & e) {
         LOG_ERR("%s: failed to load model '%s': %s\n", __func__, fname, e.what());
-        delete ctx_clip;
-        return nullptr;
+        if (ctx_vision) {
+            delete ctx_vision;
+        }
+        if (ctx_audio) {
+            delete ctx_audio;
+        }
+        return {nullptr, nullptr};
     }
 
-    return ctx_clip;
-}
-
-void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size * load_image_size) {
-    ctx_clip->load_image_size = *load_image_size; // copy
-}
-
-struct clip_image_size * clip_get_load_image_size(struct clip_ctx * ctx_clip) {
-    return &ctx_clip->load_image_size;
+    return {ctx_vision, ctx_audio};
 }
 
 struct clip_image_size * clip_image_size_init() {
@@ -2454,30 +2882,6 @@ void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny
     memcpy(img->buf.data(), rgb_pixels, img->buf.size());
 }
 
-bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) {
-    int nx, ny, nc;
-    auto * data = stbi_load(fname, &nx, &ny, &nc, 3);
-    if (!data) {
-        LOG_ERR("%s: failed to load image '%s'\n", __func__, fname);
-        return false;
-    }
-    clip_build_img_from_pixels(data, nx, ny, img);
-    stbi_image_free(data);
-    return true;
-}
-
-bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img) {
-    int nx, ny, nc;
-    auto * data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3);
-    if (!data) {
-        LOG_ERR("%s: failed to decode image bytes\n", __func__);
-        return false;
-    }
-    clip_build_img_from_pixels(data, nx, ny, img);
-    stbi_image_free(data);
-    return true;
-}
-
 // Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not
 static void normalize_image_u8_to_f32(const clip_image_u8 & src, clip_image_f32 & dst, const float mean[3], const float std[3]) {
     dst.nx = src.nx;
@@ -2723,36 +3127,41 @@ struct llava_uhd {
         bool padding_refined = false;  // if true, refine image will be padded to the grid size (e.g. llava-1.6)
     };
 
-    static int get_max_slices(struct clip_ctx * ctx) {
-        if (clip_is_minicpmv(ctx)) {
-            return 9;
-        }
-        return 0;
-    }
-
     static slice_instructions get_slice_instructions(struct clip_ctx * ctx, const clip_image_size & original_size) {
         slice_instructions res;
         const int patch_size      = clip_get_patch_size(ctx);
         const int slice_size      = clip_get_image_size(ctx);
-        const int max_slice_nums  = get_max_slices(ctx);
         const int original_width  = original_size.width;
         const int original_height = original_size.height;
-        const float log_ratio = log((float)original_width / original_height);
-        const float ratio = (float)original_width * original_height / (slice_size * slice_size);
-        const int multiple = fmin(ceil(ratio), max_slice_nums);
-        const bool has_slices = (multiple > 1);
-        const bool has_pinpoints = !ctx->vision_model.hparams.image_grid_pinpoints.empty();
+
+        const bool has_slices    = original_size.width > slice_size || original_size.height > slice_size;
+        const bool has_pinpoints = !ctx->model.hparams.image_res_candidates.empty();
+
+        if (!has_slices) {
+            // skip slicing logic
+            res.overview_size = clip_image_size{slice_size, slice_size};
+            res.refined_size  = clip_image_size{0, 0};
+            res.grid_size     = clip_image_size{0, 0};
+
+            return res;
+        }
 
         if (has_pinpoints) {
             // has pinpoints, use them to calculate the grid size (e.g. llava-1.6)
             auto refine_size = llava_uhd::select_best_resolution(
-                ctx->vision_model.hparams.image_grid_pinpoints,
-                original_size);
+                original_size,
+                ctx->model.hparams.image_res_candidates);
             res.overview_size   = clip_image_size{slice_size, slice_size};
             res.refined_size    = refine_size;
             res.grid_size       = clip_image_size{0, 0};
             res.padding_refined = true;
 
+            LOG_DBG("%s: using pinpoints for slicing\n", __func__);
+            LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d\n",
+                    __func__, original_width, original_height,
+                    res.overview_size.width, res.overview_size.height,
+                    res.refined_size.width,  res.refined_size.height);
+
             for (int y = 0; y < refine_size.height; y += slice_size) {
                 for (int x = 0; x < refine_size.width; x += slice_size) {
                     slice_coordinates slice;
@@ -2761,13 +3170,16 @@ struct llava_uhd {
                     slice.size.width  = std::min(slice_size, refine_size.width  - x);
                     slice.size.height = std::min(slice_size, refine_size.height - y);
                     res.slices.push_back(slice);
-                    if (x == 0) {
-                        res.grid_size.width++;
-                    }
+                    LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n",
+                            __func__, (int)res.slices.size() - 1,
+                            slice.x, slice.y, slice.size.width, slice.size.height);
                 }
-                res.grid_size.height++;
             }
 
+            res.grid_size.height = refine_size.height / slice_size;
+            res.grid_size.width  = refine_size.width  / slice_size;
+            LOG_DBG("%s: grid size: %d x %d\n", __func__, res.grid_size.width, res.grid_size.height);
+
             return res;
         }
 
@@ -2776,17 +3188,23 @@ struct llava_uhd {
         auto best_size    = get_best_resize(original_size, slice_size, patch_size, !has_slices);
         res.overview_size = best_size;
 
-        if (!has_slices) {
-            // skip slicing logic
-            res.refined_size = clip_image_size{0, 0};
-            res.grid_size    = clip_image_size{0, 0};
+        {
+            const int max_slice_nums = 9; // TODO: this is only used by minicpmv, maybe remove it
+            const float log_ratio = log((float)original_width / original_height);
+            const float ratio = (float)original_width * original_height / (slice_size * slice_size);
+            const int multiple = fmin(ceil(ratio), max_slice_nums);
 
-        } else {
             auto best_grid   = get_best_grid(max_slice_nums, multiple, log_ratio);
             auto refine_size = get_refine_size(original_size, best_grid, slice_size, patch_size, true);
             res.grid_size    = best_grid;
             res.refined_size = refine_size;
 
+            LOG_DBG("%s: original size: %d x %d, overview size: %d x %d, refined size: %d x %d, grid size: %d x %d\n",
+                    __func__, original_width, original_height,
+                    res.overview_size.width, res.overview_size.height,
+                    res.refined_size.width, res.refined_size.height,
+                    res.grid_size.width, res.grid_size.height);
+
             int width  = refine_size.width;
             int height = refine_size.height;
             int grid_x = int(width  / best_grid.width);
@@ -2803,7 +3221,9 @@ struct llava_uhd {
                     slice.size.width  = grid_x;
                     slice.size.height = grid_y;
                     res.slices.push_back(slice);
-                    // LOG_INF("slice %d: %d %d %d %d\n", ic, patches_i, patches_j, grid_x, grid_y);
+                    LOG_DBG("%s: slice %d: x=%d, y=%d, size=%dx%d\n",
+                            __func__, (int)res.slices.size() - 1,
+                            slice.x, slice.y, slice.size.width, slice.size.height);
                 }
             }
         }
@@ -2861,48 +3281,55 @@ private:
         return res;
     }
 
+    static clip_image_size resize_maintain_aspect_ratio(const clip_image_size & orig, const clip_image_size & target_max) {
+        float scale_width  = static_cast(target_max.width)  / orig.width;
+        float scale_height = static_cast(target_max.height) / orig.height;
+        float scale = std::min(scale_width, scale_height);
+        return clip_image_size{
+            static_cast(orig.width  * scale),
+            static_cast(orig.height * scale),
+        };
+    }
+
     /**
      * Selects the best resolution from a list of possible resolutions based on the original size.
      *
+     * For example, when given a list of resolutions:
+     *  - 100x100
+     *  - 200x100
+     *  - 100x200
+     *  - 200x200
+     *
+     * And an input image of size 111x200, then 100x200 is the best fit (least wasted resolution).
+     *
      * @param original_size The original size of the image
      * @param possible_resolutions A list of possible resolutions
      * @return The best fit resolution
      */
     static clip_image_size select_best_resolution(const clip_image_size & original_size, const std::vector & possible_resolutions) {
-        int original_width = original_size.width;
-        int original_height = original_size.height;
         clip_image_size best_fit;
+        int min_wasted_area = std::numeric_limits::max();
         int max_effective_resolution = 0;
-        int min_wasted_resolution = std::numeric_limits::max();
 
-        for (const auto & resolution : possible_resolutions) {
-            int width  = resolution.width;
-            int height = resolution.height;
-            float scale = std::min(static_cast(width) / original_width, static_cast(height) / original_height);
-            int downscaled_width  = static_cast(original_width * scale);
-            int downscaled_height = static_cast(original_height * scale);
-            int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height);
-            int wasted_resolution = (width * height) - effective_resolution;
-            // LOG_INF("resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution);
-            if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) {
+        for (const clip_image_size & candidate : possible_resolutions) {
+            auto target_size = resize_maintain_aspect_ratio(original_size, candidate);
+            int effective_resolution = std::min(
+                target_size.width * target_size.height,
+                original_size.width * original_size.height);
+            int wasted_area = (candidate.width * candidate.height) - effective_resolution;
+
+            if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_area < min_wasted_area)) {
                 max_effective_resolution = effective_resolution;
-                min_wasted_resolution = wasted_resolution;
-                best_fit = resolution;
+                min_wasted_area = wasted_area;
+                best_fit = candidate;
             }
+
+            LOG_DBG("%s: candidate: %d x %d, target: %d x %d, wasted: %d, effective: %d\n", __func__, candidate.width, candidate.height, target_size.width, target_size.height, wasted_area, effective_resolution);
         }
 
         return best_fit;
     }
 
-    // used by llava 1.6 with custom list of pinpoints
-    static clip_image_size select_best_resolution(const std::vector & pinpoints, const clip_image_size & original_size) {
-        std::vector possible_resolutions;
-        for (size_t i = 0; i < pinpoints.size(); i += 2) {
-            possible_resolutions.push_back(clip_image_size{pinpoints[i], pinpoints[i+1]});
-        }
-        return select_best_resolution(original_size, possible_resolutions);
-    }
-
     static int ensure_divide(int length, int patch_size) {
         return std::max(static_cast(std::round(static_cast(length) / patch_size) * patch_size), patch_size);
     }
@@ -2963,18 +3390,12 @@ private:
     }
 };
 
-// TODO @ngxson : decprecate the load_image_size singleton pattern
-int clip_uhd_num_image_embeds_col(struct clip_ctx * ctx_clip) {
-    const auto inst = llava_uhd::get_slice_instructions(ctx_clip, ctx_clip->load_image_size);
-    return inst.grid_size.width;
-}
-
 // returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector
 // res_imgs memory is being allocated here, previous allocations will be freed if found
 bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, struct clip_image_f32_batch * res_imgs) {
     clip_image_size original_size{img->nx, img->ny};
     bool pad_to_square = true;
-    auto & params = ctx->vision_model.hparams;
+    auto & params = ctx->model.hparams;
     // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing
     if (params.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD) {
         pad_to_square = false;
@@ -2987,12 +3408,15 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str
         for (size_t i = 0; i < imgs.size(); ++i) {
             // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
             clip_image_f32_ptr res(clip_image_f32_init());
-            normalize_image_u8_to_f32(*imgs[i], *res, ctx->image_mean, ctx->image_std);
+            normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
             res_imgs->entries.push_back(std::move(res));
         }
+
+        res_imgs->grid_x = inst.grid_size.width;
+        res_imgs->grid_y = inst.grid_size.height;
         return true;
-    }
-    else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) {
+
+    } else if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) {
         clip_image_u8 resized;
         auto patch_size = params.patch_size * 2;
         auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, patch_size, params.image_size);
@@ -3000,33 +3424,49 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str
 
         clip_image_f32_ptr img_f32(clip_image_f32_init());
         // clip_image_f32_ptr res(clip_image_f32_init());
-        normalize_image_u8_to_f32(resized, *img_f32, ctx->image_mean, ctx->image_std);
+        normalize_image_u8_to_f32(resized, *img_f32, params.image_mean, params.image_std);
         // res_imgs->data[0] = *res;
         res_imgs->entries.push_back(std::move(img_f32));
         return true;
     }
-    else if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE
-            || ctx->proj_type == PROJECTOR_TYPE_GEMMA3
-            || ctx->proj_type == PROJECTOR_TYPE_IDEFICS3
-            || ctx->proj_type == PROJECTOR_TYPE_INTERNVL // TODO @ngxson : support dynamic resolution
+    else if (ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE
+            || ctx->proj_type() == PROJECTOR_TYPE_GEMMA3
+            || ctx->proj_type() == PROJECTOR_TYPE_IDEFICS3
+            || ctx->proj_type() == PROJECTOR_TYPE_INTERNVL // TODO @ngxson : support dynamic resolution
     ) {
         clip_image_u8 resized_image;
         int sz = params.image_size;
         image_manipulation::resize_and_pad_image(*img, resized_image, {sz, sz});
         clip_image_f32_ptr img_f32(clip_image_f32_init());
         //clip_image_save_to_bmp(resized_image, "resized.bmp");
-        normalize_image_u8_to_f32(resized_image, *img_f32, ctx->image_mean, ctx->image_std);
+        normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
         res_imgs->entries.push_back(std::move(img_f32));
         return true;
-    }
-    else if (ctx->proj_type == PROJECTOR_TYPE_PIXTRAL) {
+
+    } else if (ctx->proj_type() == PROJECTOR_TYPE_PIXTRAL) {
         clip_image_u8 resized_image;
         auto new_size = image_manipulation::calc_size_preserved_ratio(original_size, params.patch_size, params.image_size);
         image_manipulation::bilinear_resize(*img, resized_image, new_size.width, new_size.height);
         clip_image_f32_ptr img_f32(clip_image_f32_init());
-        normalize_image_u8_to_f32(resized_image, *img_f32, ctx->image_mean, ctx->image_std);
+        normalize_image_u8_to_f32(resized_image, *img_f32, params.image_mean, params.image_std);
         res_imgs->entries.push_back(std::move(img_f32));
         return true;
+
+    } else if (ctx->proj_type() == PROJECTOR_TYPE_LLAMA4) {
+        GGML_ASSERT(!params.image_res_candidates.empty());
+        auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
+        std::vector imgs = llava_uhd::slice_image(img, inst);
+
+        for (size_t i = 0; i < imgs.size(); ++i) {
+            clip_image_f32_ptr res(clip_image_f32_init());
+            normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
+            res_imgs->entries.push_back(std::move(res));
+        }
+
+        res_imgs->grid_x = inst.grid_size.width;
+        res_imgs->grid_y = inst.grid_size.height;
+        return true;
+
     }
 
     // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104)
@@ -3049,11 +3489,11 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str
         image_manipulation::resize_and_pad_image(*img, *temp, clip_image_size{params.image_size, params.image_size}, pad_color);
 
         clip_image_f32_ptr res(clip_image_f32_init());
-        normalize_image_u8_to_f32(*temp, *res, ctx->image_mean, ctx->image_std);
+        normalize_image_u8_to_f32(*temp, *res, params.image_mean, params.image_std);
         res_imgs->entries.push_back(std::move(res));
         return true;
 
-    } else if (!params.image_grid_pinpoints.empty()) {
+    } else if (!params.image_res_candidates.empty()) {
         // "spatial_unpad" with "anyres" processing for llava-1.6
         auto const inst = llava_uhd::get_slice_instructions(ctx, original_size);
         std::vector imgs = llava_uhd::slice_image(img, inst);
@@ -3061,7 +3501,7 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str
         for (size_t i = 0; i < imgs.size(); ++i) {
             // clip_image_save_to_bmp(*imgs[i], "slice_" + std::to_string(i) + ".bmp");
             clip_image_f32_ptr res(clip_image_f32_init());
-            normalize_image_u8_to_f32(*imgs[i], *res, ctx->image_mean, ctx->image_std);
+            normalize_image_u8_to_f32(*imgs[i], *res, params.image_mean, params.image_std);
             res_imgs->entries.push_back(std::move(res));
         }
 
@@ -3073,7 +3513,7 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str
 }
 
 ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) {
-    return ctx->vision_model.image_newline;
+    return ctx->model.image_newline;
 }
 
 void clip_free(clip_ctx * ctx) {
@@ -3085,8 +3525,8 @@ void clip_free(clip_ctx * ctx) {
 
 // deprecated
 size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
-    const int32_t nx = ctx->vision_model.hparams.image_size;
-    const int32_t ny = ctx->vision_model.hparams.image_size;
+    const int32_t nx = ctx->model.hparams.image_size;
+    const int32_t ny = ctx->model.hparams.image_size;
     return clip_embd_nbytes_by_img(ctx, nx, ny);
 }
 
@@ -3098,107 +3538,139 @@ size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_w, int img_h
 }
 
 int32_t clip_get_image_size(const struct clip_ctx * ctx) {
-    return ctx->vision_model.hparams.image_size;
+    return ctx->model.hparams.image_size;
 }
 
 int32_t clip_get_patch_size(const struct clip_ctx * ctx) {
-    return ctx->vision_model.hparams.patch_size;
+    return ctx->model.hparams.patch_size;
 }
 
 int32_t clip_get_hidden_size(const struct clip_ctx * ctx) {
-    return ctx->vision_model.hparams.n_embd;
+    return ctx->model.hparams.n_embd;
 }
 
 const char * clip_patch_merge_type(const struct clip_ctx * ctx) {
-    return ctx->vision_model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat";
-}
-
-const int32_t * clip_image_grid(const struct clip_ctx * ctx) {
-    if (ctx->vision_model.hparams.image_grid_pinpoints.size()) {
-        return &ctx->vision_model.hparams.image_grid_pinpoints.front();
-    }
-    return nullptr;
-}
-
-size_t get_clip_image_grid_size(const struct clip_ctx * ctx) {
-    return ctx->vision_model.hparams.image_grid_pinpoints.size();
-}
-
-// deprecated
-int clip_n_patches(const struct clip_ctx * ctx) {
-    clip_image_f32 img;
-    img.nx = ctx->vision_model.hparams.image_size;
-    img.ny = ctx->vision_model.hparams.image_size;
-    return clip_n_output_tokens(ctx, &img);
-}
-
-// deprecated
-int clip_n_patches_by_img(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
-    return clip_n_output_tokens(ctx, img);
+    return ctx->model.hparams.mm_patch_merge_type == PATCH_MERGE_SPATIAL_UNPAD ? "spatial_unpad" : "flat";
 }
 
 int clip_n_output_tokens_x(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
-    const auto & params = ctx->vision_model.hparams;
+    const auto & params = ctx->model.hparams;
     const int n_total = clip_n_output_tokens(ctx, img);
-    if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) {
+    if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) {
         return img->nx / (params.patch_size * 2) + (int)(img->nx % params.patch_size > 0);
     }
     return n_total;
 }
 
 int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
-    const auto & params = ctx->vision_model.hparams;
-    if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) {
+    const auto & params = ctx->model.hparams;
+    if (ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL) {
         return img->ny / (params.patch_size * 2) + (int)(img->ny % params.patch_size > 0);
     }
     return 1;
 }
 
 int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img) {
-    const auto & params = ctx->vision_model.hparams;
+    const auto & params = ctx->model.hparams;
 
-    int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size);
+    // only for models using fixed size square images
+    int n_patches_sq = (params.image_size / params.patch_size) * (params.image_size / params.patch_size);
 
-    if (ctx->proj_type == PROJECTOR_TYPE_LDP
-            || ctx->proj_type == PROJECTOR_TYPE_LDPV2
-            || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
-        n_patches /= 4;
-        if (ctx->vision_model.mm_glm_tok_boi) {
-            n_patches += 2; // for BOI and EOI token embeddings
-        }
-    } else if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) {
-        if (ctx->minicpmv_version == 2) {
-            n_patches = 96;
-        }
-        else if (ctx->minicpmv_version == 3) {
-            n_patches = 64;
-        }
-        else if (ctx->minicpmv_version == 4) {
-            n_patches = 64;
-        }
-        else {
-            GGML_ABORT("Unknown minicpmv version");
-        }
-    } else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL) {
-        int patch_size = params.patch_size * 2;
-        int x_patch = img->nx / patch_size + (int)(img->nx % patch_size > 0);
-        int y_patch = img->ny / patch_size + (int)(img->ny % patch_size > 0);
-        n_patches = x_patch * y_patch;
-    } else if (ctx->proj_type == PROJECTOR_TYPE_GEMMA3) {
-        int n_per_side = params.image_size / params.patch_size;
-        int n_per_side_2d_pool = n_per_side / params.proj_scale_factor;
-        n_patches = n_per_side_2d_pool * n_per_side_2d_pool;
-    } else if (ctx->proj_type == PROJECTOR_TYPE_IDEFICS3 || ctx->proj_type == PROJECTOR_TYPE_INTERNVL) {
-        // both W and H are divided by proj_scale_factor
-        n_patches /= (params.proj_scale_factor * params.proj_scale_factor);
-    } else if (ctx->proj_type == PROJECTOR_TYPE_PIXTRAL) {
-        int n_merge = params.spatial_merge_size;
-        int n_patches_x = img->nx / params.patch_size / (n_merge > 0 ? n_merge : 1);
-        int n_patches_y = img->ny / params.patch_size / (n_merge > 0 ? n_merge : 1);
-        n_patches = n_patches_y*n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row
+    projector_type proj = ctx->proj_type();
+
+    switch (proj) {
+        case PROJECTOR_TYPE_MLP:
+        case PROJECTOR_TYPE_MLP_NORM:
+            {
+                // do nothing
+            } break;
+        case PROJECTOR_TYPE_LDP:
+        case PROJECTOR_TYPE_LDPV2:
+        case PROJECTOR_TYPE_GLM_EDGE:
+            {
+                n_patches_sq /= 4;
+                if (ctx->model.mm_glm_tok_boi) {
+                    n_patches_sq += 2; // for BOI and EOI token embeddings
+                }
+            } break;
+        case PROJECTOR_TYPE_MINICPMV:
+            {
+                if (params.minicpmv_version == 2) {
+                    // MiniCPM-V 2.5
+                    n_patches_sq = 96;
+                } else if (params.minicpmv_version == 3) {
+                    // MiniCPM-V 2.6
+                    n_patches_sq = 64;
+                } else if (params.minicpmv_version == 4) {
+                    // MiniCPM-o 2.6
+                    n_patches_sq = 64;
+                } else if (params.minicpmv_version == 5) {
+                    // MiniCPM-V 4.0
+                    n_patches_sq = 64;
+                } else {
+                    GGML_ABORT("Unknown minicpmv version");
+                }
+            } break;
+        case PROJECTOR_TYPE_QWEN2VL:
+        case PROJECTOR_TYPE_QWEN25VL:
+            {
+                // dynamic size
+                int patch_size = params.patch_size * 2;
+                int x_patch = img->nx / patch_size + (int)(img->nx % patch_size > 0);
+                int y_patch = img->ny / patch_size + (int)(img->ny % patch_size > 0);
+                n_patches_sq = x_patch * y_patch;
+            } break;
+        case PROJECTOR_TYPE_GEMMA3:
+            {
+                int n_per_side = params.image_size / params.patch_size;
+                int n_per_side_2d_pool = n_per_side / params.proj_scale_factor;
+                n_patches_sq = n_per_side_2d_pool * n_per_side_2d_pool;
+            } break;
+        case PROJECTOR_TYPE_IDEFICS3:
+        case PROJECTOR_TYPE_INTERNVL:
+            {
+                // both W and H are divided by proj_scale_factor
+                n_patches_sq /= (params.proj_scale_factor * params.proj_scale_factor);
+            } break;
+        case PROJECTOR_TYPE_PIXTRAL:
+            {
+                // dynamic size
+                int n_merge = params.spatial_merge_size;
+                int n_patches_x = img->nx / params.patch_size / (n_merge > 0 ? n_merge : 1);
+                int n_patches_y = img->ny / params.patch_size / (n_merge > 0 ? n_merge : 1);
+                n_patches_sq = n_patches_y * n_patches_x + n_patches_y - 1; // + one [IMG_BREAK] per row, except the last row
+            } break;
+        case PROJECTOR_TYPE_LLAMA4:
+            {
+                int scale_factor = ctx->model.hparams.proj_scale_factor;
+                n_patches_sq /= (scale_factor * scale_factor);
+            } break;
+        case PROJECTOR_TYPE_VOXTRAL:
+        case PROJECTOR_TYPE_ULTRAVOX:
+        case PROJECTOR_TYPE_QWEN2A:
+            {
+                n_patches_sq = img->nx;
+
+                const int proj_stack_factor = ctx->model.hparams.proj_stack_factor;
+                if (ctx->model.audio_has_stack_frames()) {
+                    GGML_ASSERT(proj_stack_factor > 0);
+                    const int n_len = CLIP_ALIGN(n_patches_sq, proj_stack_factor);
+                    n_patches_sq = n_len / proj_stack_factor;
+                }
+
+                // whisper downscales input token by half after conv1d
+                n_patches_sq /= 2;
+
+                if (ctx->model.audio_has_avgpool()) {
+                    // divide by 2 because of nn.AvgPool1d(2, stride=2)
+                    n_patches_sq /= 2;
+                }
+            } break;
+        default:
+            GGML_ABORT("unsupported projector type");
     }
 
-    return n_patches;
+    return n_patches_sq;
 }
 
 static std::vector>> get_1d_sincos_pos_embed_from_grid_new(int embed_dim, const std::vector> & pos) {
@@ -3307,12 +3779,13 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
     }
 
     // build the inference graph
+    ctx->debug_print_tensors.clear();
     ggml_backend_sched_reset(ctx->sched.get());
     ggml_cgraph * gf = clip_image_build_graph(ctx, imgs);
     ggml_backend_sched_alloc_graph(ctx->sched.get(), gf);
 
     // set inputs
-    const auto & model   = ctx->vision_model;
+    const auto & model   = ctx->model;
     const auto & hparams = model.hparams;
 
     const int image_size_width  = imgs.entries[0]->nx;
@@ -3321,8 +3794,8 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
     const int patch_size    = hparams.patch_size;
     const int num_patches   = ((image_size_width / patch_size) * (image_size_height / patch_size));
     const int n_pos = num_patches + (model.class_embedding ? 1 : 0);
-    const int pos_w = ctx->load_image_size.width  / patch_size;
-    const int pos_h = ctx->load_image_size.height / patch_size;
+    const int pos_w = image_size_width  / patch_size;
+    const int pos_h = image_size_height / patch_size;
 
     const bool use_window_attn = hparams.n_wa_pattern > 0; // for qwen2.5vl
 
@@ -3352,7 +3825,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
     };
 
     // set input pixel values
-    {
+    if (!imgs.is_audio) {
         size_t nelem = 0;
         for (const auto & img : imgs.entries) {
             nelem += img->nx * img->ny * 3;
@@ -3389,10 +3862,20 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
             }
         }
         set_input_f32("inp_raw", inp_raw);
+
+    } else {
+        // audio input
+        GGML_ASSERT(imgs.entries.size() == 1);
+        const auto & mel_inp = imgs.entries[0];
+        const int n_step = mel_inp->nx;
+        const int n_mel  = mel_inp->ny;
+        std::vector inp_raw(n_step * n_mel);
+        std::memcpy(inp_raw.data(), mel_inp->buf.data(), n_step * n_mel * sizeof(float));
+        set_input_f32("inp_raw", inp_raw);
     }
 
     // set input per projector
-    switch (ctx->proj_type) {
+    switch (ctx->model.proj_type) {
         case PROJECTOR_TYPE_MINICPMV:
             {
                 // inspired from siglip:
@@ -3585,9 +4068,29 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
         case PROJECTOR_TYPE_GEMMA3:
         case PROJECTOR_TYPE_IDEFICS3:
         case PROJECTOR_TYPE_INTERNVL:
+        case PROJECTOR_TYPE_QWEN2A:
+        case PROJECTOR_TYPE_ULTRAVOX:
+        case PROJECTOR_TYPE_VOXTRAL:
             {
                 // do nothing
             } break;
+        case PROJECTOR_TYPE_LLAMA4:
+            {
+                // set the 2D positions
+                int n_patches_per_col = image_size_width / patch_size;
+                std::vector pos_data(num_patches + 1, 0); // +1 for the [CLS] token
+                // last pos is always kept 0, it's for CLS
+                // dimension H
+                for (int i = 0; i < num_patches; i++) {
+                    pos_data[i] = (i / n_patches_per_col) + 1;
+                }
+                set_input_i32("pos_h", pos_data);
+                // dimension W
+                for (int i = 0; i < num_patches; i++) {
+                    pos_data[i] = (i % n_patches_per_col) + 1;
+                }
+                set_input_i32("pos_w", pos_data);
+            } break;
         default:
             GGML_ABORT("Unknown projector type");
     }
@@ -3608,6 +4111,18 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
         return false;
     }
 
+    // print debug nodes
+    if (ctx->debug_graph) {
+        LOG_INF("\n\n---\n\n");
+        LOG_INF("\n\nDebug graph:\n\n");
+        for (ggml_tensor * t : ctx->debug_print_tensors) {
+            std::vector data(ggml_nbytes(t));
+            ggml_backend_tensor_get(t, data.data(), 0, ggml_nbytes(t));
+            print_tensor_shape(t);
+            print_tensor_data(t, data.data(), 3);
+        }
+    }
+
     // the last node is the embedding tensor
     ggml_tensor * embeddings = ggml_graph_node(gf, -1);
 
@@ -3615,7 +4130,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
     const int n_tokens_out = embeddings->ne[1];
     const int expected_n_tokens_out = clip_n_output_tokens(ctx, imgs.entries[0].get());
     if (n_tokens_out != expected_n_tokens_out) {
-        LOG_ERR("%s: expected %d tokens, got %d\n", __func__, expected_n_tokens_out, n_tokens_out);
+        LOG_ERR("%s: expected output %d tokens, got %d\n", __func__, expected_n_tokens_out, n_tokens_out);
         GGML_ABORT("Invalid number of output tokens");
     }
 
@@ -3625,198 +4140,92 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
     return true;
 }
 
-bool clip_model_quantize(const char * fname_inp, const char * fname_out, const int itype) {
-    assert(itype < GGML_TYPE_COUNT);
-    ggml_type type = static_cast(itype);
-
-    auto * ctx_clip = clip_init(fname_inp, clip_context_params{
-        /* use_gpu */   false,
-        /* verbosity */ GGML_LOG_LEVEL_ERROR,
-    });
-
-    const auto & ctx_src = ctx_clip->ctx_gguf.get();
-    const auto & ctx_data = ctx_clip->ctx_data.get();
-
-    auto * ctx_out = gguf_init_empty();
-    gguf_set_kv(ctx_out, ctx_src);
-    gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
-    gguf_set_val_u32(ctx_out, "general.file_type", itype);
-
-    auto fout = std::ofstream(fname_out, std::ios::binary);
-
-    const int n_tensors = gguf_get_n_tensors(ctx_src);
-
-    for (int i = 0; i < n_tensors; ++i) {
-        const char * name = gguf_get_tensor_name(ctx_src, i);
-        ggml_tensor * cur = ggml_get_tensor(ctx_data, name);
-        gguf_add_tensor(ctx_out, cur);
-    }
-
-    const size_t meta_size = gguf_get_meta_size(ctx_out);
-    for (size_t i = 0; i < meta_size; ++i) {
-        fout.put(0);
-    }
-
-    // regexes of tensor names to be quantized
-    const std::vector k_names = {
-        ".*weight",
-    };
-
-    std::vector work(512);
-    std::vector conv_buf(512);
-    size_t total_size_org = 0;
-    size_t total_size_new = 0;
-
-    for (int i = 0; i < n_tensors; ++i) {
-        const std::string name = gguf_get_tensor_name(ctx_src, i);
-        ggml_tensor * cur = ggml_get_tensor(ctx_data, name.c_str());
-
-        enum ggml_type new_type;
-        void * new_data;
-        size_t new_size;
-
-        bool quantize = false;
-        for (const auto & s : k_names) {
-            if (std::regex_match(name, std::regex(s))) {
-                quantize = true;
-                break;
-            }
-        }
-
-        // quantize only 2D tensors and bigger than block size
-        quantize &= (ggml_n_dims(cur) == 2) && cur->ne[0] > ggml_blck_size(type);
-
-        if (quantize) {
-            new_type = type;
-            if (new_type >= GGML_TYPE_Q2_K && name.find("embd") != std::string::npos) {
-                new_type = GGML_TYPE_Q8_0; // ggml_get_rows needs non K type
-                // LOG_ERR("%s: quantizing %s to %s\n", __func__, name.c_str(), ggml_type_name(new_type));
-            }
-            const size_t n_elms = ggml_nelements(cur);
-            float * f32_data;
-
-            switch (cur->type) {
-            case GGML_TYPE_F32:
-                f32_data = (float *)cur->data;
-                break;
-            case GGML_TYPE_F16:
-                if (conv_buf.size() < n_elms) {
-                    conv_buf.resize(n_elms);
-                }
-                for (size_t j = 0; j < n_elms; ++j) {
-                    conv_buf[j] = ggml_fp16_to_fp32(((ggml_fp16_t *)cur->data)[j]);
-                }
-                f32_data = (float *)conv_buf.data();
-                break;
-            default:
-                LOG_ERR("%s: Please use an input file in f32 or f16\n", __func__);
-                gguf_free(ctx_out);
-                return false;
-            }
-
-            if (work.size() < n_elms * 4) {
-                work.resize(n_elms * 4);
-            }
-            new_data = work.data();
-
-            new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, n_elms/cur->ne[0], cur->ne[0], nullptr);
-        } else {
-            new_type = cur->type;
-            new_data = cur->data;
-            new_size = ggml_nbytes(cur);
-        }
-        const size_t orig_size = ggml_nbytes(cur);
-        total_size_org += orig_size;
-        total_size_new += new_size;
-        gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
-        GGML_ASSERT(gguf_get_tensor_size(ctx_out, gguf_find_tensor(ctx_out, name.c_str())) == new_size);
-        gguf_set_tensor_data(ctx_out, name.c_str(), new_data);
-        fout.write((const char *)new_data, new_size);
-        size_t pad = GGML_PAD(new_size, gguf_get_alignment(ctx_out)) - new_size;
-        for (size_t j = 0; j < pad; ++j) {
-            fout.put(0);
-        }
-
-        LOG_INF("%s: n_dims = %d | quantize=%d | size = %f MB -> %f MB\n", name.c_str(), ggml_n_dims(cur), quantize,
-               orig_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
-    }
-
-    // go back to beginning of file and write the updated metadata
-    fout.seekp(0, std::ios::beg);
-    std::vector meta(meta_size);
-    gguf_get_meta_data(ctx_out, meta.data());
-    fout.write((const char *)meta.data(), meta_size);
-
-    fout.close();
-
-    clip_free(ctx_clip);
-    gguf_free(ctx_out);
-
-    {
-        LOG_INF("%s: original  size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0);
-        LOG_INF("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0);
-    }
-
-    return true;
-}
-
 int clip_n_mmproj_embd(const struct clip_ctx * ctx) {
-    switch (ctx->proj_type) {
+    const auto & hparams = ctx->model.hparams;
+    switch (ctx->model.proj_type) {
         case PROJECTOR_TYPE_LDP:
-            return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0];
+            return ctx->model.mm_model_block_1_block_2_1_b->ne[0];
         case PROJECTOR_TYPE_LDPV2:
-            return ctx->vision_model.mm_model_peg_0_b->ne[0];
+            return ctx->model.mm_model_peg_0_b->ne[0];
         case PROJECTOR_TYPE_MLP:
         case PROJECTOR_TYPE_PIXTRAL:
-            return ctx->vision_model.mm_2_w->ne[1];
+            return ctx->model.mm_2_w->ne[1];
         case PROJECTOR_TYPE_MLP_NORM:
-            return ctx->vision_model.mm_3_b->ne[0];
+            return ctx->model.mm_3_b->ne[0];
         case PROJECTOR_TYPE_MINICPMV:
-            if (ctx->minicpmv_version == 2) {
+            if (hparams.minicpmv_version == 2) {
+                // MiniCPM-V 2.5
                 return 4096;
-            } else if (ctx->minicpmv_version == 3) {
+            } else if (hparams.minicpmv_version == 3) {
+                // MiniCPM-V 2.6
                 return 3584;
-            } else if (ctx->minicpmv_version == 4) {
+            } else if (hparams.minicpmv_version == 4) {
+                // MiniCPM-o 2.6
                 return 3584;
+            } else if (hparams.minicpmv_version == 5) {
+                // MiniCPM-V 4.0
+                return 2560;
             }
             GGML_ABORT("Unknown minicpmv version");
         case PROJECTOR_TYPE_GLM_EDGE:
-            return ctx->vision_model.mm_model_mlp_3_w->ne[1];
+            return ctx->model.mm_model_mlp_3_w->ne[1];
         case PROJECTOR_TYPE_QWEN2VL:
         case PROJECTOR_TYPE_QWEN25VL:
-            return ctx->vision_model.mm_1_b->ne[0];
+            return ctx->model.mm_1_b->ne[0];
         case PROJECTOR_TYPE_GEMMA3:
-            return ctx->vision_model.mm_input_proj_w->ne[0];
+            return ctx->model.mm_input_proj_w->ne[0];
         case PROJECTOR_TYPE_IDEFICS3:
-            return ctx->vision_model.projection->ne[1];
+            return ctx->model.projection->ne[1];
+        case PROJECTOR_TYPE_ULTRAVOX:
+        case PROJECTOR_TYPE_VOXTRAL:
+            return ctx->model.mm_2_w->ne[1];
         case PROJECTOR_TYPE_INTERNVL:
-            return ctx->vision_model.mm_3_w->ne[1];
+            return ctx->model.mm_3_w->ne[1];
+        case PROJECTOR_TYPE_LLAMA4:
+            return ctx->model.mm_model_proj->ne[1];
+        case PROJECTOR_TYPE_QWEN2A:
+            return ctx->model.mm_fc_w->ne[1];
         default:
             GGML_ABORT("Unknown projector type");
     }
 }
 
 int clip_is_minicpmv(const struct clip_ctx * ctx) {
-    if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) {
-        return ctx->minicpmv_version;
+    if (ctx->proj_type() == PROJECTOR_TYPE_MINICPMV) {
+        return ctx->model.hparams.minicpmv_version;
     }
     return 0;
 }
 
 bool clip_is_glm(const struct clip_ctx * ctx) {
-    return ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE;
+    return ctx->proj_type() == PROJECTOR_TYPE_GLM_EDGE;
 }
 
 bool clip_is_qwen2vl(const struct clip_ctx * ctx) {
-    return ctx->proj_type == PROJECTOR_TYPE_QWEN2VL || ctx->proj_type == PROJECTOR_TYPE_QWEN25VL;
+    return ctx->proj_type() == PROJECTOR_TYPE_QWEN2VL
+        || ctx->proj_type() == PROJECTOR_TYPE_QWEN25VL;
 }
 
 bool clip_is_llava(const struct clip_ctx * ctx) {
-    return ctx->has_llava_projector;
+    return ctx->model.hparams.has_llava_projector;
 }
 
 bool clip_is_gemma3(const struct clip_ctx * ctx) {
-    return ctx->proj_type == PROJECTOR_TYPE_GEMMA3;
+    return ctx->proj_type() == PROJECTOR_TYPE_GEMMA3;
+}
+
+bool clip_has_vision_encoder(const struct clip_ctx * ctx) {
+    return ctx->model.modality == CLIP_MODALITY_VISION;
+}
+
+bool clip_has_audio_encoder(const struct clip_ctx * ctx) {
+    return ctx->model.modality == CLIP_MODALITY_AUDIO;
+}
+
+bool clip_has_whisper_encoder(const struct clip_ctx * ctx) {
+    return ctx->proj_type() == PROJECTOR_TYPE_ULTRAVOX
+        || ctx->proj_type() == PROJECTOR_TYPE_QWEN2A
+        || ctx->proj_type() == PROJECTOR_TYPE_VOXTRAL;
 }
 
 bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec) {
@@ -3837,5 +4246,16 @@ bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img,
 //
 
 projector_type clip_get_projector_type(const struct clip_ctx * ctx) {
-    return ctx->proj_type;
+    return ctx->proj_type();
+}
+
+void clip_image_f32_batch_add_mel(struct clip_image_f32_batch * batch, int n_mel, int n_frames, float * mel) {
+    clip_image_f32 * audio = new clip_image_f32;
+    audio->nx = n_frames;
+    audio->ny = n_mel;
+    audio->buf.resize(n_frames * n_mel);
+    std::memcpy(audio->buf.data(), mel, n_frames * n_mel * sizeof(float));
+
+    batch->entries.push_back(clip_image_f32_ptr(audio));
+    batch->is_audio = true;
 }
diff --git a/llama/llama.cpp/tools/mtmd/clip.h b/llama/llama.cpp/tools/mtmd/clip.h
index 0b0eb0295..08f3efb7b 100644
--- a/llama/llama.cpp/tools/mtmd/clip.h
+++ b/llama/llama.cpp/tools/mtmd/clip.h
@@ -1,27 +1,10 @@
-#ifndef CLIP_H
-#define CLIP_H
+#pragma once
 
 #include "ggml.h"
 #include 
 #include 
 
-#ifdef LLAMA_SHARED
-#    if defined(_WIN32) && !defined(__MINGW32__)
-#        ifdef LLAMA_BUILD
-#            define CLIP_API __declspec(dllexport)
-#        else
-#            define CLIP_API __declspec(dllimport)
-#        endif
-#    else
-#        define CLIP_API __attribute__ ((visibility ("default")))
-#    endif
-#else
-#    define CLIP_API
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
+// !!! Internal header, to be used by mtmd only !!!
 
 struct clip_ctx;
 
@@ -34,102 +17,95 @@ struct clip_image_f32;
 struct clip_image_u8_batch;
 struct clip_image_f32_batch;
 
+enum clip_modality {
+    CLIP_MODALITY_VISION,
+    CLIP_MODALITY_AUDIO,
+};
+
 struct clip_context_params {
     bool use_gpu;
     enum ggml_log_level verbosity;
 };
 
-// deprecated, use clip_init
-CLIP_API struct clip_ctx * clip_model_load(const char * fname, int verbosity);
+struct clip_init_result {
+    struct clip_ctx * ctx_v; // vision context
+    struct clip_ctx * ctx_a; // audio context
+};
 
-CLIP_API struct clip_ctx * clip_init(const char * fname, struct clip_context_params ctx_params);
+struct clip_init_result clip_init(const char * fname, struct clip_context_params ctx_params);
 
-CLIP_API void clip_free(struct clip_ctx * ctx);
+void clip_free(struct clip_ctx * ctx);
 
-CLIP_API size_t clip_embd_nbytes(const struct clip_ctx * ctx);
-CLIP_API size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_w, int img_h);
+size_t clip_embd_nbytes(const struct clip_ctx * ctx);
+size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_w, int img_h);
 
-CLIP_API int32_t clip_get_image_size (const struct clip_ctx * ctx);
-CLIP_API int32_t clip_get_patch_size (const struct clip_ctx * ctx);
-CLIP_API int32_t clip_get_hidden_size(const struct clip_ctx * ctx);
+int32_t clip_get_image_size (const struct clip_ctx * ctx);
+int32_t clip_get_patch_size (const struct clip_ctx * ctx);
+int32_t clip_get_hidden_size(const struct clip_ctx * ctx);
 
 // TODO: should be enum, not string
-CLIP_API const char * clip_patch_merge_type(const struct clip_ctx * ctx);
+const char * clip_patch_merge_type(const struct clip_ctx * ctx);
 
-CLIP_API const int32_t * clip_image_grid(const struct clip_ctx * ctx);
-CLIP_API size_t get_clip_image_grid_size(const struct clip_ctx * ctx);
-
-GGML_DEPRECATED(CLIP_API int clip_n_patches(const struct clip_ctx * ctx),
-    "use clip_n_output_tokens instead");
-GGML_DEPRECATED(CLIP_API int clip_n_patches_by_img(const struct clip_ctx * ctx, struct clip_image_f32 * img),
-    "use clip_n_output_tokens instead");
-
-CLIP_API int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img);
+int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * img);
 
 // for M-RoPE, this will be the number of token positions in X and Y directions
 // for other models, X will be the total number of tokens and Y will be 1
-CLIP_API int clip_n_output_tokens_x(const struct clip_ctx * ctx, struct clip_image_f32 * img);
-CLIP_API int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 * img);
+int clip_n_output_tokens_x(const struct clip_ctx * ctx, struct clip_image_f32 * img);
+int clip_n_output_tokens_y(const struct clip_ctx * ctx, struct clip_image_f32 * img);
 
 // this should be equal to the embedding dimension of the text model
-CLIP_API int clip_n_mmproj_embd(const struct clip_ctx * ctx);
+int clip_n_mmproj_embd(const struct clip_ctx * ctx);
 
-CLIP_API int clip_uhd_num_image_embeds_col(struct clip_ctx * ctx_clip);
-CLIP_API void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size * load_image_size);
-CLIP_API struct clip_image_size * clip_get_load_image_size(struct clip_ctx * ctx_clip);
-
-CLIP_API struct clip_image_size      * clip_image_size_init(void);
-CLIP_API struct clip_image_u8        * clip_image_u8_init (void);
-CLIP_API struct clip_image_f32       * clip_image_f32_init(void);
-CLIP_API struct clip_image_f32_batch * clip_image_f32_batch_init(void); // only used by libllava
+struct clip_image_size      * clip_image_size_init(void);
+struct clip_image_u8        * clip_image_u8_init (void);
+struct clip_image_f32       * clip_image_f32_init(void);
+struct clip_image_f32_batch * clip_image_f32_batch_init(void); // only used by libllava
 
 // nx, ny are the output image dimensions
-CLIP_API unsigned char * clip_image_u8_get_data(struct clip_image_u8 * img, uint32_t * nx, uint32_t * ny);
+unsigned char * clip_image_u8_get_data(struct clip_image_u8 * img, uint32_t * nx, uint32_t * ny);
 
-CLIP_API void clip_image_size_free (struct clip_image_size * img_size);
-CLIP_API void clip_image_u8_free (struct clip_image_u8  * img);
-CLIP_API void clip_image_f32_free(struct clip_image_f32 * img);
-CLIP_API void clip_image_u8_batch_free (struct clip_image_u8_batch  * batch);
-CLIP_API void clip_image_f32_batch_free(struct clip_image_f32_batch * batch);
+void clip_image_size_free (struct clip_image_size * img_size);
+void clip_image_u8_free (struct clip_image_u8  * img);
+void clip_image_f32_free(struct clip_image_f32 * img);
+void clip_image_u8_batch_free (struct clip_image_u8_batch  * batch);
+void clip_image_f32_batch_free(struct clip_image_f32_batch * batch);
 
 // use for accessing underlay data of clip_image_f32_batch
-CLIP_API size_t clip_image_f32_batch_n_images(const struct clip_image_f32_batch * batch); // equivalent to batch->size()
-CLIP_API size_t clip_image_f32_batch_nx(const struct clip_image_f32_batch * batch, int idx); // equivalent to batch[idx]->nx
-CLIP_API size_t clip_image_f32_batch_ny(const struct clip_image_f32_batch * batch, int idx); // equivalent to batch[idx]->ny
-CLIP_API struct clip_image_f32 * clip_image_f32_get_img(const struct clip_image_f32_batch * batch, int idx); // equivalent to batch[idx]->data
+size_t clip_image_f32_batch_n_images(const struct clip_image_f32_batch * batch); // equivalent to batch->size()
+size_t clip_image_f32_batch_nx(const struct clip_image_f32_batch * batch, int idx); // equivalent to batch[idx]->nx
+size_t clip_image_f32_batch_ny(const struct clip_image_f32_batch * batch, int idx); // equivalent to batch[idx]->ny
+struct clip_image_f32 * clip_image_f32_get_img(const struct clip_image_f32_batch * batch, int idx); // equivalent to batch[idx]->data
 
 /**
  * Build image from pixels decoded by other libraries instead of stb_image.h for better performance.
  * The memory layout is RGBRGBRGB..., input buffer length must be 3*nx*ny bytes
  */
-CLIP_API void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny, struct clip_image_u8 * img);
+void clip_build_img_from_pixels(const unsigned char * rgb_pixels, int nx, int ny, struct clip_image_u8 * img);
 
-CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img);
+bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img);
 
 /** interpret bytes as an image file with length bytes_length, and use the result to populate img */
-CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
+bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img);
 
 /** preprocess img and store the result in res_imgs, pad_to_square may be overridden to false depending on model configuration */
-CLIP_API bool clip_image_preprocess(struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32_batch * res_imgs );
+bool clip_image_preprocess(struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32_batch * res_imgs );
 
-CLIP_API struct ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx);
+struct ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx);
 
-CLIP_API bool clip_image_encode      (struct clip_ctx * ctx, int n_threads, struct clip_image_f32 * img, float * vec);
-CLIP_API bool clip_image_batch_encode(struct clip_ctx * ctx, int n_threads, const struct clip_image_f32_batch * imgs, float * vec);
+bool clip_image_encode      (struct clip_ctx * ctx, int n_threads, struct clip_image_f32 * img, float * vec);
+bool clip_image_batch_encode(struct clip_ctx * ctx, int n_threads, const struct clip_image_f32_batch * imgs, float * vec);
 
-CLIP_API bool clip_model_quantize(const char * fname_inp, const char * fname_out, int itype);
+int clip_is_minicpmv(const struct clip_ctx * ctx);
+bool clip_is_glm(const struct clip_ctx * ctx);
+bool clip_is_qwen2vl(const struct clip_ctx * ctx);
+bool clip_is_llava(const struct clip_ctx * ctx);
+bool clip_is_gemma3(const struct clip_ctx * ctx);
 
-CLIP_API int clip_is_minicpmv(const struct clip_ctx * ctx);
-CLIP_API bool clip_is_glm(const struct clip_ctx * ctx);
-CLIP_API bool clip_is_qwen2vl(const struct clip_ctx * ctx);
-CLIP_API bool clip_is_llava(const struct clip_ctx * ctx);
-CLIP_API bool clip_is_gemma3(const struct clip_ctx * ctx);
+bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec);
 
-CLIP_API bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec);
+// use by audio input
+void clip_image_f32_batch_add_mel(struct clip_image_f32_batch * batch, int n_mel, int n_frames, float * mel);
 
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif // CLIP_H
+bool clip_has_vision_encoder(const struct clip_ctx * ctx);
+bool clip_has_audio_encoder(const struct clip_ctx * ctx);
+bool clip_has_whisper_encoder(const struct clip_ctx * ctx);
diff --git a/llama/llama.cpp/tools/mtmd/llava.cpp b/llama/llama.cpp/tools/mtmd/llava.cpp
deleted file mode 100644
index ebef8b3c1..000000000
--- a/llama/llama.cpp/tools/mtmd/llava.cpp
+++ /dev/null
@@ -1,591 +0,0 @@
-#include "clip.h"
-#include "llava.h"
-
-#include "llama.h"
-#include "ggml-cpp.h"
-
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-
-#if defined(LLAVA_LOG_OFF)
-#   define LOG_INF(...)
-#   define LOG_WRN(...)
-#   define LOG_ERR(...)
-#   define LOG_DBG(...)
-#else // defined(LLAVA_LOG_OFF)
-#   define LOG_INF(...) do { fprintf(stdout, __VA_ARGS__); } while (0)
-#   define LOG_WRN(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
-#   define LOG_ERR(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
-#   define LOG_DBG(...) do { fprintf(stdout, __VA_ARGS__); } while (0)
-#endif // defined(LLAVA_LOG_OFF)
-
-// RGB uint8 image
-struct clip_image_u8 {
-    int nx;
-    int ny;
-
-    std::vector buf;
-};
-
-// RGB float32 image (NHWC)
-// Memory layout: RGBRGBRGB...
-struct clip_image_f32 {
-    int nx;
-    int ny;
-
-    std::vector buf;
-};
-
-struct clip_image_grid_shape {
-    int first;
-    int second;
-};
-
-// convenience cpp wrapper
-struct clip_image_f32_batch_deleter {
-    void operator()(clip_image_f32_batch * val) { clip_image_f32_batch_free(val); }
-};
-typedef std::unique_ptr clip_image_f32_batch_ptr;
-
-struct clip_image_size_deleter {
-    void operator()(clip_image_f32_batch * val) { clip_image_f32_batch_free(val); }
-};
-typedef std::unique_ptr clip_image_size_ptr;
-
-/**
- * Selects the best resolution from a list of possible resolutions based on the original size.
- *
- * @param original_size The original size of the image in the format (width, height).
- * @param possible_resolutions A list of possible resolutions in the format [(width1, height1), (width2, height2), ...].
- * @return The best fit resolution in the format (width, height).
- */
-static std::pair select_best_resolution(const std::pair& original_size, const std::vector>& possible_resolutions) {
-    int original_width  = original_size.first;
-    int original_height = original_size.second;
-
-    std::pair best_fit;
-    int max_effective_resolution = 0;
-    int min_wasted_resolution = std::numeric_limits::max();
-
-    for (const auto& resolution : possible_resolutions) {
-        int width = resolution.first;
-        int height = resolution.second;
-        float scale = std::min(static_cast(width) / original_width, static_cast(height) / original_height);
-        int downscaled_width  = static_cast(original_width * scale);
-        int downscaled_height = static_cast(original_height * scale);
-        int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height);
-        int wasted_resolution = (width * height) - effective_resolution;
-        // LOG_DBG("resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution);
-        if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) {
-            max_effective_resolution = effective_resolution;
-            min_wasted_resolution = wasted_resolution;
-            best_fit = resolution;
-        }
-    }
-
-    return best_fit;
-}
-
-/**
- * @brief Get the anyres image grid shape object
- *
- * @param image_size
- * @param grid_pinpoints
- * @param image_patch_size
- * @return 
- */
-static struct clip_image_grid_shape get_anyres_image_grid_shape(const std::pair & image_size, const std::vector> & grid_pinpoints, int image_patch_size) {
-    /**
-        Conversion from gguf flat array to vector:
-        std::vector> possible_resolutions;
-        for (int i = 0; i < 32 && params.image_grid_pinpoints[i] != 0; i+=2) {
-            possible_resolutions.push_back({params.image_grid_pinpoints[i], params.image_grid_pinpoints[i+1]});
-        }
-     */
-    auto best_resolution = select_best_resolution(image_size, grid_pinpoints);
-    return {best_resolution.first / image_patch_size, best_resolution.second / image_patch_size};
-}
-
-// Take the image segments in a grid configuration and return the embeddings and the number of embeddings into preallocated memory (image_embd_out)
-static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector & image_embd_v, struct clip_image_grid_shape grid_shape, float * image_embd_out, int * n_img_pos_out, clip_image_f32 * img_input) {
-    struct {
-        struct ggml_context * ctx;
-    } model;
-
-    const int32_t image_size = clip_get_image_size(ctx_clip);
-    const int32_t patch_size = clip_get_patch_size(ctx_clip);
-
-    int32_t num_patches_per_side = image_size / patch_size; // 336 / 14 = 24 - used for embedding-patching boxes (24*24 = 576 patches)
-
-    int num_patches_width  = grid_shape.first;  // grid 1-4
-    int num_patches_height = grid_shape.second; // grid 1-4
-
-    const size_t num_images = num_patches_width * num_patches_height + 1;
-
-    // TODO: size calculation is not calculated - it's only tens of MB
-    size_t ctx_size = 0;
-
-    {
-        ctx_size += clip_embd_nbytes(ctx_clip) * num_images * 8; // image_features
-        ctx_size += 1024*1024 * ggml_type_size(GGML_TYPE_F32);
-    }
-
-    struct ggml_init_params params {
-        /*.mem_size   =*/ ctx_size,
-        /*.mem_buffer =*/ NULL,
-        /*.no_alloc   =*/ false, // NOTE: this should be false when using the legacy API
-    };
-
-    // Python reference code for full unpad:
-    /*
-        base_image_feature = image_feature[0]
-        image_feature = image_feature[1:]
-        image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
-        image_feature = image_feature.flatten(1, 2).flatten(2, 3)
-        image_feature = unpad_image(image_feature, image_sizes[image_idx])
-        image_feature = torch.cat((
-            image_feature,
-            self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1)
-        ), dim=-1)
-        image_feature = image_feature.flatten(1, 2).transpose(0, 1)
-        image_feature = torch.cat((base_image_feature, image_feature), dim=0)
-    */
-    // We now have two options: unpad or no unpad. Unpad removes tokens for faster llm eval.
-    // In terms of result quality it appears to make no difference, so we'll start with the easier approach given 5D tensors are not supported in ggml yet.
-    // Without unpad we have to split the sub-image embeddings into patches of 24 features each and permute them.
-    // Once all images are processed to prepended the base_image_features without any changes.
-
-    // Pytorch reference simplified, modified for ggml compatibility - confirmed identical output in python (for a 2x2 grid image (676x676 scaling))
-    /*
-        image_feature = image_feature.view(2, 2, 24, 24, 4096)
-        image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous()
-        image_feature = image_feature.view(2, 24, 2, 24, 4096)
-        image_feature = image_feature.flatten(0, 3)
-
-        // Reshape to 4D tensor by merging the last two dimensions
-        image_feature = image_feature.view(2, 2, 24, 24*4096)
-        image_feature = image_feature.permute(0, 2, 1, 3).contiguous()
-        image_feature = image_feature.view(-1, 4096)
-    */
-
-    model.ctx = ggml_init(params);
-
-    struct ggml_tensor * image_features = ggml_new_tensor_3d(model.ctx, GGML_TYPE_F32, clip_n_mmproj_embd(ctx_clip), clip_n_output_tokens(ctx_clip, img_input), num_images - 1); // example: 4096 x 576 x 4
-    // ggml_tensor_printf(image_features,"image_features",__LINE__,false,false);
-    // fill it with the image embeddings, ignoring the base
-    for (size_t i = 1; i < num_images; i++) {
-        size_t offset = (i-1) * clip_embd_nbytes(ctx_clip);
-        memcpy((uint8_t *)(image_features->data) + offset, image_embd_v[i], clip_embd_nbytes(ctx_clip));
-    }
-
-    struct ggml_cgraph  * gf = ggml_new_graph(model.ctx);
-    size_t size_ele = ggml_type_size(GGML_TYPE_F32);
-
-    struct ggml_tensor *image_features_patchview = ggml_view_4d(model.ctx, image_features,
-                                                                num_patches_per_side * clip_n_mmproj_embd(ctx_clip),
-                                                                num_patches_per_side,
-                                                                num_patches_width,
-                                                                num_patches_height,
-                                                                size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip),
-                                                                size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip) * num_patches_per_side,
-                                                                size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip) * num_patches_per_side * num_patches_width, 0);
-    // ggml_tensor_printf(image_features_patchview,"image_features_patchview",__LINE__,false,false);
-    struct ggml_tensor *permuted_cont = ggml_cont(model.ctx, ggml_permute(model.ctx, image_features_patchview, 0, 2, 1, 3));
-    /**
-     At the end of each row we have to add the row_end embeddings, which are the same as the newline embeddings
-         image_feature = torch.cat((
-        image_feature,
-        self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device)
-    ), dim=-1)
-     *
-     */
-
-    // ggml_tensor_printf(permuted_cont,"permuted_cont",__LINE__,false,false);
-    struct ggml_tensor *flatten = ggml_view_2d(model.ctx, permuted_cont, clip_n_mmproj_embd(ctx_clip), num_patches_height * num_patches_width * num_patches_per_side * num_patches_per_side,  size_ele * clip_n_mmproj_embd(ctx_clip), 0);
-    // ggml_tensor_printf(flatten,"flatten",__LINE__,false,false);
-    ggml_build_forward_expand(gf, flatten);
-
-    ggml_backend_ptr backend { ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr) };
-    GGML_ASSERT(backend != nullptr && "failed to initialize CPU backend");
-    ggml_backend_graph_compute(backend.get(), gf);
-
-    struct ggml_tensor* result = ggml_graph_node(gf, -1);
-
-    memcpy(image_embd_out, image_embd_v[0], clip_embd_nbytes(ctx_clip)); // main image as global context
-    // append without newline tokens (default behavior in llava_arch when not using unpad ):
-    memcpy(image_embd_out + clip_n_output_tokens(ctx_clip, img_input) * clip_n_mmproj_embd(ctx_clip), (float*)result->data, clip_embd_nbytes(ctx_clip) * (num_images-1)); // grid patches
-    *n_img_pos_out = static_cast(result->ne[1]+clip_n_output_tokens(ctx_clip, img_input));
-
-    // Debug: Test single segments
-    // Current findings: sending base image, sending a segment embedding all works similar to python
-    // However, permuted embeddings do not work yet (stride issue?)
-    // memcpy(image_embd_out, image_embd_v[0], clip_embd_nbytes(ctx_clip)); // main image as context
-    // memcpy(image_embd_out, (float*)prepared_cont->data, clip_embd_nbytes(ctx_clip)); // main image as context
-    // *n_img_pos_out=576;
-
-    ggml_free(model.ctx);
-    return true;
-}
-
-static clip_image_f32 * reshape_by_patch(clip_image_f32 * image, int patch_size) {
-    int width = image->nx;
-    int height = image->ny;
-    int num_patches = (height / patch_size) * (width / patch_size);
-    clip_image_f32 * patch = clip_image_f32_init();
-    patch->nx = patch_size * num_patches;
-    patch->ny = patch_size;
-    patch->buf.resize(3 * patch->nx * patch->ny);
-
-    int patch_index = 0;
-
-    for (int i = 0; i < height; i += patch_size) {
-        for (int j = 0; j < width; j += patch_size) {
-            for (int pi = 0; pi < patch_size; ++pi) {
-                for (int pj = 0; pj < patch_size; ++pj) {
-                    int input_index = ((i + pi) * width + (j + pj)) * 3;
-                    int output_index = (pi * patch_size * num_patches + patch_index * patch_size + pj) * 3;
-                    patch->buf[output_index] = image->buf[input_index];
-                    patch->buf[output_index+1] = image->buf[input_index+1];
-                    patch->buf[output_index+2] = image->buf[input_index+2];
-                }
-            }
-            patch_index++;
-        }
-    }
-    return patch;
-}
-
-static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_pos) {
-    // std::vector img_res_v; // format VectN x H x W x RGB (N x 336 x 336 x 3), so interleaved RGB - different to the python implementation which is N x 3 x 336 x 336
-    clip_image_f32_batch_ptr img_res_v(clip_image_f32_batch_init());
-    if (!clip_image_preprocess(ctx_clip, img, img_res_v.get())) {
-        LOG_ERR("%s: unable to preprocess image\n", __func__);
-        return false;
-    }
-
-    const int64_t t_img_enc_start_us = ggml_time_us();
-
-    const char * mm_patch_merge_type = clip_patch_merge_type(ctx_clip);
-
-    const size_t n_imgs = clip_image_f32_batch_n_images(img_res_v.get());
-
-    if (clip_is_minicpmv(ctx_clip) || clip_is_qwen2vl(ctx_clip)) {
-        std::vector image_embd_v;
-        image_embd_v.resize(n_imgs);
-        clip_image_size load_image_size;
-
-        for (size_t i = 0; i < n_imgs; i++) {
-            const int64_t t_img_enc_step_start_us = ggml_time_us();
-            int nx = clip_image_f32_batch_nx(img_res_v.get(), i);
-            int ny = clip_image_f32_batch_ny(img_res_v.get(), i);
-            image_embd_v[i] = (float *)malloc(clip_embd_nbytes_by_img(ctx_clip, nx, ny));
-            int patch_size = 14;
-            load_image_size.width = nx;
-            load_image_size.height = ny;
-            clip_add_load_image_size(ctx_clip, &load_image_size);
-
-            bool encoded = false;
-            clip_image_f32 * img_res = clip_image_f32_get_img(img_res_v.get(), i);
-            if (clip_is_qwen2vl(ctx_clip)) {
-                encoded = clip_image_encode(ctx_clip, n_threads, img_res, image_embd_v[i]);
-            }
-            else {
-                encoded = clip_image_encode(ctx_clip, n_threads, reshape_by_patch(img_res, patch_size), image_embd_v[i]);
-            }
-
-            if (!encoded) {
-                LOG_ERR("Unable to encode image - spatial_unpad - subimage %d of %d\n", (int) i+1, (int) n_imgs);
-                return false;
-            }
-            const int64_t t_img_enc_steop_batch_us = ggml_time_us();
-            LOG_INF("%s: step %d of %d encoded in %8.2f ms\n", __func__, (int)i+1, (int)n_imgs, (t_img_enc_steop_batch_us - t_img_enc_step_start_us) / 1000.0);
-        }
-        const int64_t t_img_enc_batch_us = ggml_time_us();
-        LOG_INF("%s: all %d segments encoded in %8.2f ms\n", __func__, (int)n_imgs, (t_img_enc_batch_us - t_img_enc_start_us) / 1000.0);
-
-        int n_img_pos_out = 0;
-        for (size_t i = 0; i < image_embd_v.size(); i++) {
-            int nx = clip_image_f32_batch_nx(img_res_v.get(), i);
-            int ny = clip_image_f32_batch_ny(img_res_v.get(), i);
-            clip_image_f32 * img_res = clip_image_f32_get_img(img_res_v.get(), i);
-            std::memcpy(
-                image_embd + n_img_pos_out * clip_n_mmproj_embd(ctx_clip),
-                image_embd_v[i],
-                clip_embd_nbytes_by_img(ctx_clip, nx, ny));
-            n_img_pos_out += clip_n_output_tokens(ctx_clip, img_res);
-        }
-        *n_img_pos = n_img_pos_out;
-        for (size_t i = 0; i < image_embd_v.size(); i++) {
-            free(image_embd_v[i]);
-        }
-        image_embd_v.clear();
-        load_image_size.width = img->nx;
-        load_image_size.height = img->ny;
-        clip_add_load_image_size(ctx_clip, &load_image_size);
-        LOG_INF("%s: load_image_size %d %d\n", __func__, load_image_size.width, load_image_size.height);
-    }
-    else if (clip_is_glm(ctx_clip)){
-        struct clip_image_size * load_image_size = clip_image_size_init();
-        load_image_size->width  = clip_image_f32_batch_nx(img_res_v.get(), 0);
-        load_image_size->height = clip_image_f32_batch_ny(img_res_v.get(), 0);
-        clip_add_load_image_size(ctx_clip, load_image_size);
-
-        clip_image_f32 * img_res = clip_image_f32_get_img(img_res_v.get(), 0);
-        bool encoded = clip_image_encode(ctx_clip, n_threads, img_res, image_embd);
-        int pos = int(load_image_size->width/clip_get_patch_size(ctx_clip)/2);
-        *n_img_pos = (pos * pos + 2);
-        if (!encoded){
-            LOG_ERR("Unable to encode image \n");
-            return false;
-        }
-    }
-    else if (strcmp(mm_patch_merge_type, "spatial_unpad") != 0) {
-        // flat / default llava-1.5 type embedding
-        clip_image_f32 * img_res = clip_image_f32_get_img(img_res_v.get(), 0);
-        *n_img_pos = clip_n_output_tokens(ctx_clip, img_res);
-        bool encoded = clip_image_encode(ctx_clip, n_threads, img_res, image_embd); // image_embd shape is 576 x 4096
-        if (!encoded) {
-            LOG_ERR("Unable to encode image\n");
-
-            return false;
-        }
-    }
-    else {
-        // spatial_unpad llava-1.6 type embedding
-        // TODO: CLIP needs batching support - in HF the llm projection is separate after encoding, which might be a solution to quickly get batching working
-        std::vector image_embd_v;
-        image_embd_v.resize(n_imgs);
-        for (size_t i = 0; i < n_imgs; i++) {
-            clip_image_f32 * img_res = clip_image_f32_get_img(img_res_v.get(), i);
-            image_embd_v[i] = (float *)malloc(clip_embd_nbytes(ctx_clip)); // 576 patches * 4096 embeddings * 4 bytes = 9437184
-            const bool encoded = clip_image_encode(ctx_clip, n_threads, img_res, image_embd_v[i]); // image data is in 3x336x336 format and will be converted to 336x336x3 inside
-            if (!encoded) {
-                LOG_ERR("Unable to encode image - spatial_unpad - subimage %d of %d\n", (int) i+1, (int) n_imgs);
-                return false;
-            }
-        }
-        const int64_t t_img_enc_batch_us = ggml_time_us();
-        LOG_INF("%s: %d segments encoded in %8.2f ms\n", __func__, (int)n_imgs, (t_img_enc_batch_us - t_img_enc_start_us) / 1000.0);
-
-        const int32_t * image_grid = clip_image_grid(ctx_clip);
-        const size_t num_gridpoints = get_clip_image_grid_size(ctx_clip);
-
-        std::vector> grid_pinpoints;
-        for (size_t i = 0; i < num_gridpoints; i += 2) {
-            grid_pinpoints.push_back({image_grid[i], image_grid[i+1]});
-        }
-
-        const int32_t image_size = clip_get_image_size(ctx_clip);
-
-        struct clip_image_grid_shape grid_shape = get_anyres_image_grid_shape({img->nx,img->ny}, grid_pinpoints, image_size);
-
-        int n_img_pos_out;
-        clip_image_f32 * img_input = clip_image_f32_get_img(img_res_v.get(), 0);
-        clip_llava_handle_patches(ctx_clip, image_embd_v, grid_shape, image_embd, &n_img_pos_out, img_input);
-        *n_img_pos = n_img_pos_out;
-
-        for (size_t i = 0; i < image_embd_v.size(); i++) {
-            free(image_embd_v[i]);
-        }
-        image_embd_v.clear();
-
-        // debug image/segment/normalization content:
-        // clip_image_u8 * tmp = clip_image_u8_init();
-        // clip_image_convert_f32_to_u8(*image_feature, *tmp);
-        // clip_image_save_to_bmp(*tmp, "image_feature.bmp");
-    }
-
-    LOG_INF("%s: image embedding created: %d tokens\n", __func__, *n_img_pos);
-
-    const int64_t t_img_enc_end_us = ggml_time_us();
-    float t_img_enc_ms = (t_img_enc_end_us - t_img_enc_start_us) / 1000.0;
-
-    LOG_INF("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, t_img_enc_ms / *n_img_pos);
-
-    return true;
-}
-
-bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip) {
-        // make sure that the correct mmproj was used, i.e., compare apples to apples
-    int n_llama_embd = llama_model_n_embd(llama_get_model(ctx_llama));
-    auto n_image_embd = clip_n_mmproj_embd(ctx_clip);
-    if (n_image_embd != n_llama_embd) {
-        LOG_ERR("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_image_embd, n_llama_embd);
-        return false;
-    }
-    return true;
-}
-
-bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out) {
-    // Granite vision uses up to 10 patches + base patch
-    int num_max_patches = 11;
-    if (clip_is_minicpmv(ctx_clip)) {
-        num_max_patches = 10;
-    }
-    if (clip_is_glm(ctx_clip)) {
-        num_max_patches = 1;
-    }
-    float * image_embd;
-    if (clip_is_qwen2vl(ctx_clip)) {
-        // qwen2vl don't split image into chunks, so `num_max_patches` is not needed.
-        image_embd = (float *)malloc(clip_embd_nbytes_by_img(ctx_clip, img->nx, img->ny));
-    } else {
-        image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)*num_max_patches); // TODO: base on gridsize/llava model
-    }
-    if (!image_embd) {
-        LOG_ERR("Unable to allocate memory for image embeddings\n");
-        return false;
-    }
-
-    int n_img_pos;
-    if (!encode_image_with_clip(ctx_clip, n_threads, img, image_embd, &n_img_pos)) {
-        LOG_ERR("%s: cannot encode image, aborting\n", __func__);
-        free(image_embd);
-        return false;
-    }
-    *image_embd_out = image_embd;
-    *n_img_pos_out = n_img_pos;
-
-    return true;
-}
-
-struct llava_embd_batch {
-    std::vector      pos;
-    std::vector        n_seq_id;
-    std::vector   seq_id_0;
-    std::vector seq_ids;
-    std::vector         logits;
-    llama_batch batch;
-    llava_embd_batch(float * embd, int32_t n_tokens, llama_pos pos_0, llama_seq_id seq_id) {
-        pos     .resize(n_tokens);
-        n_seq_id.resize(n_tokens);
-        seq_ids .resize(n_tokens + 1);
-        logits  .resize(n_tokens);
-        seq_id_0.resize(1);
-        seq_id_0[0] = seq_id;
-        seq_ids [n_tokens] = nullptr;
-        batch = {
-            /*n_tokens       =*/ n_tokens,
-            /*tokens         =*/ nullptr,
-            /*embd           =*/ embd,
-            /*pos            =*/ pos.data(),
-            /*n_seq_id       =*/ n_seq_id.data(),
-            /*seq_id         =*/ seq_ids.data(),
-            /*logits         =*/ logits.data(),
-        };
-        for (int i = 0; i < n_tokens; i++) {
-            batch.pos     [i] = pos_0 + i;
-            batch.n_seq_id[i] = 1;
-            batch.seq_id  [i] = seq_id_0.data();
-            batch.logits  [i] = false;
-        }
-    }
-};
-
-bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_embed * image_embed, int n_batch, int * n_past) {
-    int n_embd  = llama_model_n_embd(llama_get_model(ctx_llama));
-
-    for (int i = 0; i < image_embed->n_image_pos; i += n_batch) {
-        int n_eval = image_embed->n_image_pos - i;
-        if (n_eval > n_batch) {
-            n_eval = n_batch;
-        }
-        float * embd = image_embed->embed+i*n_embd;
-        llava_embd_batch llava_batch = llava_embd_batch(embd, n_eval, *n_past, 0);
-        if (llama_decode(ctx_llama, llava_batch.batch)) {
-            LOG_ERR("%s : failed to eval\n", __func__);
-            return false;
-        }
-        *n_past += n_eval;
-    }
-    return true;
-}
-
-struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length) {
-    clip_image_u8 * img = clip_image_u8_init();
-    if (!clip_image_load_from_bytes(image_bytes, image_bytes_length, img)) {
-        clip_image_u8_free(img);
-        LOG_ERR("%s: can't load image from bytes, is it a valid image?", __func__);
-        return NULL;
-    }
-
-    float* image_embed = NULL;
-    int n_image_pos = 0;
-    bool image_embed_result = llava_image_embed_make_with_clip_img(ctx_clip, n_threads, img, &image_embed, &n_image_pos);
-    if (!image_embed_result) {
-        clip_image_u8_free(img);
-        LOG_ERR("%s: couldn't embed the image\n", __func__);
-        return NULL;
-    }
-
-    clip_image_u8_free(img);
-    auto result = (llava_image_embed*)malloc(sizeof(llava_image_embed));
-    result->embed = image_embed;
-    result->n_image_pos = n_image_pos;
-    return result;
-}
-
-static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long *sizeOut) {
-    auto file = fopen(path, "rb");
-    if (file == NULL) {
-        LOG_ERR("%s: can't read file %s\n", __func__, path);
-        return false;
-    }
-
-    fseek(file, 0, SEEK_END);
-    auto fileSize = ftell(file);
-    fseek(file, 0, SEEK_SET);
-
-    auto buffer = (unsigned char *)malloc(fileSize); // Allocate memory to hold the file data
-    if (buffer == NULL) {
-        LOG_ERR("%s: failed to alloc %ld bytes for file %s\n", __func__, fileSize, path);
-        perror("Memory allocation error");
-        fclose(file);
-        return false;
-    }
-    errno = 0;
-    size_t ret = fread(buffer, 1, fileSize, file); // Read the file into the buffer
-    if (ferror(file)) {
-        LOG_ERR("read error: %s", strerror(errno));
-        free(buffer);
-        fclose(file);
-        return false;
-    }
-    if (ret != (size_t) fileSize) {
-        LOG_ERR("unexpectedly reached end of file");
-        free(buffer);
-        fclose(file);
-        return false;
-    }
-    fclose(file); // Close the file
-
-    *bytesOut = buffer;
-    *sizeOut = fileSize;
-    return true;
-}
-
-struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path) {
-    unsigned char* image_bytes;
-    long image_bytes_length;
-    auto loaded = load_file_to_bytes(image_path, &image_bytes, &image_bytes_length);
-    if (!loaded) {
-        LOG_ERR("%s: failed to load %s\n", __func__, image_path);
-        return NULL;
-    }
-
-    llava_image_embed *embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, image_bytes, image_bytes_length);
-    free(image_bytes);
-
-    return embed;
-}
-
-void llava_image_embed_free(struct llava_image_embed * embed) {
-    free(embed->embed);
-    free(embed);
-}
diff --git a/llama/llama.cpp/tools/mtmd/llava.h b/llama/llama.cpp/tools/mtmd/llava.h
deleted file mode 100644
index b6feb3027..000000000
--- a/llama/llama.cpp/tools/mtmd/llava.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef LLAVA_H
-#define LLAVA_H
-
-#include "ggml.h"
-
-#ifdef LLAMA_SHARED
-#    if defined(_WIN32) && !defined(__MINGW32__)
-#        ifdef LLAMA_BUILD
-#            define LLAVA_API __declspec(dllexport)
-#        else
-#            define LLAVA_API __declspec(dllimport)
-#        endif
-#    else
-#        define LLAVA_API __attribute__ ((visibility ("default")))
-#    endif
-#else
-#    define LLAVA_API
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct clip_ctx;
-struct llava_image_embed {
-    float * embed;
-    int n_image_pos;
-};
-
-/** sanity check for clip <-> llava embed size match */
-LLAVA_API bool llava_validate_embed_size(const struct llama_context * ctx_llama, const struct clip_ctx * ctx_clip);
-
-LLAVA_API bool llava_image_embed_make_with_clip_img(struct clip_ctx * ctx_clip, int n_threads, const struct clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out);
-
-/** build an image embed from image file bytes */
-LLAVA_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length);
-/** build an image embed from a path to an image filename */
-LLAVA_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path);
-/** free an embedding made with llava_image_embed_make_* */
-LLAVA_API void llava_image_embed_free(struct llava_image_embed * embed);
-
-/** write the image represented by embed into the llama context with batch size n_batch, starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */
-LLAVA_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/llama/llama.cpp/tools/mtmd/mtmd-audio.cpp b/llama/llama.cpp/tools/mtmd/mtmd-audio.cpp
new file mode 100644
index 000000000..84bdc2777
--- /dev/null
+++ b/llama/llama.cpp/tools/mtmd/mtmd-audio.cpp
@@ -0,0 +1,769 @@
+#define _USE_MATH_DEFINES // for M_PI
+#include "mtmd-audio.h"
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+// most of the code here is copied from whisper.cpp
+
+// align x to upper multiple of n
+#define _ALIGN(x, n) ((((x) + (n) - 1) / (n)) * (n))
+
+namespace whisper_preprocessor {
+
+#define SIN_COS_N_COUNT WHISPER_N_FFT
+namespace {
+struct whisper_global_cache {
+    // In FFT, we frequently use sine and cosine operations with the same values.
+    // We can use precalculated values to speed up the process.
+    float sin_vals[SIN_COS_N_COUNT];
+    float cos_vals[SIN_COS_N_COUNT];
+
+    // Hann window (Use cosf to eliminate difference)
+    // ref: https://pytorch.org/docs/stable/generated/torch.hann_window.html
+    // ref: https://github.com/openai/whisper/blob/main/whisper/audio.py#L147
+    float hann_window[WHISPER_N_FFT];
+
+    whisper_global_cache() {
+        fill_sin_cos_table();
+        fill_hann_window(sizeof(hann_window)/sizeof(hann_window[0]), true, hann_window);
+    }
+
+    void fill_sin_cos_table() {
+        for (int i = 0; i < SIN_COS_N_COUNT; i++) {
+            double theta = (2 * M_PI * i) / SIN_COS_N_COUNT;
+            sin_vals[i] = sinf(theta);
+            cos_vals[i] = cosf(theta);
+        }
+    }
+
+    void fill_hann_window(int length, bool periodic, float * output) {
+        int offset = -1;
+        if (periodic) {
+            offset = 0;
+        }
+        for (int i = 0; i < length; i++) {
+            output[i] = 0.5 * (1.0 - cosf((2.0 * M_PI * i) / (length + offset)));
+        }
+    }
+} global_cache;
+}
+
+// naive Discrete Fourier Transform
+// input is real-valued
+// output is complex-valued
+static void dft(const float* in, int N, float* out) {
+    const int sin_cos_step = SIN_COS_N_COUNT / N;
+
+    for (int k = 0; k < N; k++) {
+        float re = 0;
+        float im = 0;
+
+        for (int n = 0; n < N; n++) {
+            int idx = (k * n * sin_cos_step) % (SIN_COS_N_COUNT); // t = 2*M_PI*k*n/N
+            re += in[n]*global_cache.cos_vals[idx]; // cos(t)
+            im -= in[n]*global_cache.sin_vals[idx]; // sin(t)
+        }
+
+        out[k*2 + 0] = re;
+        out[k*2 + 1] = im;
+    }
+}
+
+// Cooley-Tukey FFT
+// poor man's implementation - use something better
+// input is real-valued
+// output is complex-valued
+static void fft(float* in, int N, float* out) {
+    if (N == 1) {
+        out[0] = in[0];
+        out[1] = 0;
+        return;
+    }
+
+    const int half_N = N / 2;
+    if (N - half_N*2 == 1) {
+        dft(in, N, out);
+        return;
+    }
+
+    float* even = in + N;
+    for (int i = 0; i < half_N; ++i) {
+        even[i]= in[2*i];
+    }
+    float* even_fft = out + 2 * N;
+    fft(even, half_N, even_fft);
+
+    float* odd = even;
+    for (int i = 0; i < half_N; ++i) {
+        odd[i] = in[2*i + 1];
+    }
+    float* odd_fft = even_fft + N;
+    fft(odd, half_N, odd_fft);
+
+    const int sin_cos_step = SIN_COS_N_COUNT / N;
+    for (int k = 0; k < half_N; k++) {
+        int idx = k * sin_cos_step; // t = 2*M_PI*k/N
+        float re = global_cache.cos_vals[idx]; // cos(t)
+        float im = -global_cache.sin_vals[idx]; // sin(t)
+
+        float re_odd = odd_fft[2*k + 0];
+        float im_odd = odd_fft[2*k + 1];
+
+        out[2*k + 0] = even_fft[2*k + 0] + re*re_odd - im*im_odd;
+        out[2*k + 1] = even_fft[2*k + 1] + re*im_odd + im*re_odd;
+
+        out[2*(k + half_N) + 0] = even_fft[2*k + 0] - re*re_odd + im*im_odd;
+        out[2*(k + half_N) + 1] = even_fft[2*k + 1] - re*im_odd - im*re_odd;
+    }
+}
+
+static void log_mel_spectrogram_worker_thread(int ith, const float * hann, const std::vector & samples,
+                                              int n_samples, int frame_size, int frame_step, int n_threads,
+                                              const whisper_filters & filters, whisper_mel & mel) {
+    std::vector fft_in(frame_size * 2, 0.0);
+    std::vector fft_out(frame_size * 2 * 2 * 2);
+
+    int n_fft = filters.n_fft;
+    int i = ith;
+
+    // make sure n_fft == 1 + (WHISPER_N_FFT / 2), bin_0 to bin_nyquist
+    WHISPER_ASSERT(n_fft == 1 + (frame_size / 2));
+
+    // calculate FFT only when fft_in are not all zero
+    for (; i < std::min(n_samples / frame_step + 1, mel.n_len); i += n_threads) {
+        const int offset = i * frame_step;
+
+        // apply Hann window (~10% faster)
+        for (int j = 0; j < std::min(frame_size, n_samples - offset); j++) {
+            fft_in[j] = hann[j] * samples[offset + j];
+        }
+
+        // fill the rest with zeros
+        if (n_samples - offset < frame_size) {
+            std::fill(fft_in.begin() + (n_samples - offset), fft_in.end(), 0.0);
+        }
+
+        // FFT
+        fft(fft_in.data(), frame_size, fft_out.data());
+
+        // Calculate modulus^2 of complex numbers
+        // Use pow(fft_out[2 * j + 0], 2) + pow(fft_out[2 * j + 1], 2) causes inference quality problem? Interesting.
+        for (int j = 0; j < n_fft; j++) {
+            fft_out[j] = (fft_out[2 * j + 0] * fft_out[2 * j + 0] + fft_out[2 * j + 1] * fft_out[2 * j + 1]);
+        }
+
+        // mel spectrogram
+        for (int j = 0; j < mel.n_mel; j++) {
+            double sum = 0.0;
+            // unroll loop (suggested by GH user @lunixbochs)
+            int k = 0;
+            for (k = 0; k < n_fft - 3; k += 4) {
+                sum +=
+                        fft_out[k + 0] * filters.data[j * n_fft + k + 0] +
+                        fft_out[k + 1] * filters.data[j * n_fft + k + 1] +
+                        fft_out[k + 2] * filters.data[j * n_fft + k + 2] +
+                        fft_out[k + 3] * filters.data[j * n_fft + k + 3];
+            }
+            // handle n_fft remainder
+            for (; k < n_fft; k++) {
+                sum += fft_out[k] * filters.data[j * n_fft + k];
+            }
+            sum = log10(std::max(sum, 1e-10));
+            mel.data[j * mel.n_len + i] = sum;
+        }
+    }
+
+    // Otherwise fft_out are all zero
+    double sum = log10(1e-10);
+    for (; i < mel.n_len; i += n_threads) {
+        for (int j = 0; j < mel.n_mel; j++) {
+            mel.data[j * mel.n_len + i] = sum;
+        }
+    }
+}
+
+// ref: https://github.com/openai/whisper/blob/main/whisper/audio.py#L110-L157
+static bool log_mel_spectrogram(
+        const float * samples,
+        const int   n_samples,
+        const int   /*sample_rate*/,
+        const int   frame_size,
+        const int   frame_step,
+        const int   n_mel,
+        const int   n_threads,
+        const whisper_filters & filters,
+        const bool   debug,
+        whisper_mel & mel) {
+    //const int64_t t_start_us = ggml_time_us();
+
+    // Hann window
+    WHISPER_ASSERT(frame_size == WHISPER_N_FFT && "Unsupported frame_size");
+    const float * hann = global_cache.hann_window;
+
+    // Calculate the length of padding
+    int64_t stage_1_pad = WHISPER_SAMPLE_RATE * 30;
+    int64_t stage_2_pad = frame_size / 2;
+
+    // Initialize a vector and copy data from C array to it.
+    std::vector samples_padded;
+    samples_padded.resize(n_samples + stage_1_pad + stage_2_pad * 2);
+    std::copy(samples, samples + n_samples, samples_padded.begin() + stage_2_pad);
+
+    // pad 30 seconds of zeros at the end of audio (480,000 samples) + reflective pad 200 samples at the end of audio
+    std::fill(samples_padded.begin() + n_samples + stage_2_pad, samples_padded.begin() + n_samples + stage_1_pad + 2 * stage_2_pad, 0);
+
+    // reflective pad 200 samples at the beginning of audio
+    std::reverse_copy(samples + 1, samples + 1 + stage_2_pad, samples_padded.begin());
+
+    mel.n_mel     = n_mel;
+    // https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/SpectralOps.cpp#L936
+    // Calculate number of frames + remove the last frame
+    mel.n_len     = (samples_padded.size() - frame_size) / frame_step;
+    // Calculate semi-padded sample length to ensure compatibility
+    mel.n_len_org = 1 + (n_samples + stage_2_pad - frame_size) / frame_step;
+    mel.data.resize(mel.n_mel * mel.n_len);
+
+    {
+        std::vector workers(n_threads - 1);
+        for (int iw = 0; iw < n_threads - 1; ++iw) {
+            workers[iw] = std::thread(
+                    log_mel_spectrogram_worker_thread, iw + 1, hann, std::cref(samples_padded),
+                    n_samples + stage_2_pad, frame_size, frame_step, n_threads,
+                    std::cref(filters), std::ref(mel));
+        }
+
+        // main thread
+        log_mel_spectrogram_worker_thread(0, hann, samples_padded, n_samples + stage_2_pad, frame_size, frame_step, n_threads, filters, mel);
+
+        for (int iw = 0; iw < n_threads - 1; ++iw) {
+            workers[iw].join();
+        }
+    }
+
+    // clamping and normalization
+    double mmax = -1e20;
+    for (int i = 0; i < mel.n_mel*mel.n_len; i++) {
+        if (mel.data[i] > mmax) {
+            mmax = mel.data[i];
+        }
+    }
+
+    mmax -= 8.0;
+
+    for (int i = 0; i < mel.n_mel*mel.n_len; i++) {
+        if (mel.data[i] < mmax) {
+            mel.data[i] = mmax;
+        }
+
+        mel.data[i] = (mel.data[i] + 4.0)/4.0;
+    }
+
+    // Dump log_mel_spectrogram
+    if (debug) {
+        std::ofstream outFile("log_mel_spectrogram.json");
+        outFile << "[";
+        for (uint64_t i = 0; i < mel.data.size() - 1; i++) {
+            outFile << mel.data[i] << ", ";
+        }
+        outFile << mel.data[mel.data.size() - 1] << "]";
+        outFile.close();
+    }
+
+    return true;
+}
+
+bool preprocess_audio(
+        const float * samples,
+        size_t n_samples,
+        const whisper_filters & filters,
+        std::vector & output) {
+
+    if (n_samples == 0) {
+        // empty audio
+        return false;
+    }
+
+    whisper_mel out_full;
+    bool ok = log_mel_spectrogram(
+                samples,
+                n_samples,
+                COMMON_SAMPLE_RATE,
+                WHISPER_N_FFT,
+                WHISPER_HOP_LENGTH,
+                filters.n_mel,
+                4, // n_threads
+                filters,
+                false, // debug
+                out_full);
+    if (!ok) {
+        return false;
+    }
+
+    // because the cgraph in clip.cpp only accepts 3000 frames each, we need to split the mel
+    // we always expect the mel to have 3000 silent frames at the end
+    // printf("n_len %d\n", out_full.n_len);
+    const size_t frames_per_chunk = 3000;
+    GGML_ASSERT((size_t)out_full.n_len > frames_per_chunk);
+    for (size_t off = 0; off < (size_t)out_full.n_len; off += frames_per_chunk) {
+        int n_len = std::min(frames_per_chunk, (size_t)out_full.n_len - off);
+        if ((size_t)n_len < frames_per_chunk) {
+            break; // last uncomplete chunk will always be a padded chunk, safe to ignore
+        }
+
+        whisper_mel out_chunk;
+        out_chunk.n_len     = n_len;
+        out_chunk.n_mel     = out_full.n_mel;
+        out_chunk.n_len_org = out_full.n_mel; // unused
+        out_chunk.data.reserve(out_chunk.n_mel * out_chunk.n_len);
+
+        for (int i = 0; i < out_full.n_mel; i++) {
+            auto src = out_full.data.begin() + i*out_full.n_len + off;
+            out_chunk.data.insert(out_chunk.data.end(), src, src + frames_per_chunk);
+        }
+
+        output.push_back(std::move(out_chunk));
+    }
+
+    return true;
+}
+
+} // namespace whisper_preprocessor
+
+
+// precalculated mel filter banks
+// values are multiplied by 1000.0 to save space, and will be divided by 1000.0 in the end of the function
+//
+// generated from python code:
+//
+// from numpy import load
+// data = load('mel_filters.npz')
+// lst = data.files
+// for item in lst:
+//   print(item)
+//   print(data[item].shape)
+//   n_mel = data[item].shape[0]
+//   n_fft = data[item].shape[1]
+//   for i, row in enumerate(data[item]):
+//     for j, val in enumerate(row):
+//       val = val * 1000.0
+//       if val != 0:
+//         print(f"data[{i*n_fft + j}] = {val:.6f};")
+
+namespace whisper_precalc_filters {
+
+whisper_preprocessor::whisper_filters get_128_bins() {
+    whisper_preprocessor::whisper_filters filters;
+    filters.n_mel = 128;
+    filters.n_fft = 201;
+    std::vector data(filters.n_mel * filters.n_fft, 0.0f);
+
+    data[1] = 12.37398665;
+    data[202] = 30.39256483;
+    data[404] = 24.74797331;
+    data[605] = 18.01857911;
+    data[807] = 37.12195903;
+    data[1008] = 5.64459199;
+    data[1009] = 6.72939420;
+    data[1210] = 36.03715822;
+    data[1412] = 19.10337992;
+    data[1613] = 23.66316877;
+    data[1815] = 31.47736564;
+    data[2016] = 11.28918398;
+    data[2017] = 1.08480197;
+    data[2218] = 41.68175161;
+    data[2420] = 13.45878839;
+    data[2621] = 29.30776216;
+    data[2823] = 25.83277412;
+    data[3024] = 16.93377644;
+    data[3226] = 38.20675984;
+    data[3427] = 4.55979025;
+    data[3428] = 7.81419594;
+    data[3629] = 34.95235741;
+    data[3831] = 20.18818259;
+    data[4032] = 22.57836796;
+    data[4234] = 32.56217018;
+    data[4435] = 10.20438317;
+    data[4436] = 2.16960395;
+    data[4637] = 40.59694707;
+    data[4839] = 14.54358920;
+    data[5040] = 28.22295949;
+    data[5242] = 26.91757679;
+    data[5443] = 15.84897563;
+    data[5645] = 39.29156065;
+    data[5846] = 3.47498828;
+    data[5847] = 8.89899861;
+    data[6048] = 33.86755288;
+    data[6250] = 21.27298526;
+    data[6451] = 21.49356715;
+    data[6653] = 33.64697099;
+    data[6854] = 9.11958050;
+    data[6855] = 3.25440569;
+    data[7056] = 39.51214626;
+    data[7258] = 15.62839188;
+    data[7459] = 27.13815868;
+    data[7661] = 28.00237760;
+    data[7862] = 14.76417296;
+    data[8064] = 40.37636518;
+    data[8265] = 2.38068704;
+    data[8266] = 10.20263787;
+    data[8467] = 31.61146119;
+    data[8669] = 24.54700135;
+    data[8870] = 15.32919332;
+    data[8871] = 1.66583748;
+    data[9072] = 36.72905266;
+    data[9274] = 20.09709924;
+    data[9475] = 16.93102531;
+    data[9476] = 2.90265540;
+    data[9677] = 32.84499049;
+    data[9879] = 23.52004871;
+    data[10080] = 11.03894413;
+    data[10081] = 10.72582975;
+    data[10282] = 22.71829173;
+    data[10484] = 32.27872774;
+    data[10685] = 0.11626833;
+    data[10686] = 22.85348251;
+    data[10887] = 8.56344029;
+    data[10888] = 14.97978810;
+    data[11089] = 15.51398356;
+    data[11090] = 8.51490628;
+    data[11291] = 21.10680379;
+    data[11292] = 3.32652032;
+    data[11493] = 25.47064796;
+    data[11695] = 27.35907957;
+    data[11896] = 0.65853616;
+    data[11897] = 23.83812517;
+    data[12098] = 3.44359246;
+    data[12099] = 21.22455277;
+    data[12300] = 5.35842171;
+    data[12301] = 19.42555793;
+    data[12502] = 6.49324711;
+    data[12503] = 18.35542172;
+    data[12704] = 6.93138083;
+    data[12705] = 17.93504693;
+    data[12906] = 6.74968259;
+    data[12907] = 18.09151843;
+    data[13108] = 6.01899112;
+    data[13109] = 18.75767298;
+    data[13310] = 4.80452832;
+    data[13311] = 19.87172849;
+    data[13512] = 3.16627859;
+    data[13513] = 21.37690969;
+    data[13514] = 1.25317345;
+    data[13714] = 1.15934468;
+    data[13715] = 20.80361731;
+    data[13716] = 4.04486805;
+    data[13917] = 17.55363122;
+    data[13918] = 7.08320038;
+    data[14119] = 14.07538634;
+    data[14120] = 10.32655034;
+    data[14321] = 10.40921453;
+    data[14322] = 13.73696327;
+    data[14523] = 6.59187697;
+    data[14524] = 17.27988198;
+    data[14525] = 1.46804214;
+    data[14725] = 2.65681883;
+    data[14726] = 18.09193194;
+    data[14727] = 5.85655728;
+    data[14928] = 13.34277913;
+    data[14929] = 10.28267574;
+    data[15130] = 8.56800377;
+    data[15131] = 14.72230814;
+    data[15132] = 1.04039861;
+    data[15332] = 3.79085587;
+    data[15333] = 17.14678481;
+    data[15334] = 6.11609267;
+    data[15535] = 11.75929047;
+    data[15536] = 11.13393717;
+    data[15737] = 6.43857848;
+    data[15738] = 16.07806236;
+    data[15739] = 4.23917221;
+    data[15939] = 1.19989377;
+    data[15940] = 12.75671553;
+    data[15941] = 9.65298992;
+    data[16142] = 7.06935255;
+    data[16143] = 14.94054683;
+    data[16144] = 4.19024844;
+    data[16344] = 1.51483389;
+    data[16345] = 12.00899947;
+    data[16346] = 9.84823331;
+    data[16547] = 6.10224018;
+    data[16548] = 15.33857174;
+    data[16549] = 5.57676842;
+    data[16749] = 0.36827257;
+    data[16750] = 9.89749376;
+    data[16751] = 11.35340426;
+    data[16752] = 2.05122307;
+    data[16952] = 3.89297144;
+    data[16953] = 12.97352277;
+    data[16954] = 8.06631614;
+    data[17155] = 6.74493238;
+    data[17156] = 13.85874674;
+    data[17157] = 5.41190524;
+    data[17357] = 0.74220158;
+    data[17358] = 8.98779090;
+    data[17359] = 11.37871388;
+    data[17360] = 3.32958088;
+    data[17560] = 2.82313535;
+    data[17561] = 10.68049297;
+    data[17562] = 9.43340641;
+    data[17563] = 1.76325557;
+    data[17763] = 4.39018616;
+    data[17764] = 11.87758986;
+    data[17765] = 7.97005836;
+    data[17766] = 0.66104700;
+    data[17966] = 5.49466675;
+    data[17967] = 12.62953598;
+    data[17968] = 6.93987962;
+    data[18169] = 6.18401915;
+    data[18170] = 12.93473132;
+    data[18171] = 6.29778765;
+    data[18371] = 0.02325210;
+    data[18372] = 6.50206627;
+    data[18373] = 12.32661773;
+    data[18374] = 6.00216538;
+    data[18574] = 0.31548753;
+    data[18575] = 6.48925547;
+    data[18576] = 12.04130240;
+    data[18577] = 6.01462880;
+    data[18777] = 0.29979556;
+    data[18778] = 6.18288014;
+    data[18779] = 12.04272825;
+    data[18780] = 6.29981188;
+    data[18781] = 0.55689598;
+    data[18980] = 0.01120471;
+    data[18981] = 5.61729167;
+    data[18982] = 11.22337859;
+    data[18983] = 6.82516303;
+    data[18984] = 1.35264499;
+    data[19184] = 4.82410006;
+    data[19185] = 10.16623247;
+    data[19186] = 7.56075513;
+    data[19187] = 2.34590308;
+    data[19387] = 3.83235747;
+    data[19388] = 8.92296247;
+    data[19389] = 8.47910438;
+    data[19390] = 3.50978645;
+    data[19590] = 2.66873185;
+    data[19591] = 7.51965167;
+    data[19592] = 9.55500547;
+    data[19593] = 4.81966138;
+    data[19594] = 0.08431751;
+    data[19793] = 1.35767367;
+    data[19794] = 5.98019501;
+    data[19795] = 10.60271543;
+    data[19796] = 6.25298498;
+    data[19797] = 1.74059917;
+    data[19997] = 4.32644226;
+    data[19998] = 8.73131864;
+    data[19999] = 7.78916525;
+    data[20000] = 3.48923868;
+    data[20200] = 2.57835095;
+    data[20201] = 6.77582854;
+    data[20202] = 9.40941647;
+    data[20203] = 5.31194592;
+    data[20204] = 1.21447595;
+    data[20403] = 0.75411191;
+    data[20404] = 4.75395704;
+    data[20405] = 8.75380263;
+    data[20406] = 7.19209015;
+    data[20407] = 3.28754401;
+    data[20607] = 2.68179690;
+    data[20608] = 6.49331464;
+    data[20609] = 9.11457930;
+    data[20610] = 5.39387390;
+    data[20611] = 1.67316827;
+    data[20810] = 0.57394296;
+    data[20811] = 4.20600036;
+    data[20812] = 7.83805829;
+    data[20813] = 7.52023002;
+    data[20814] = 3.97470826;
+    data[20815] = 0.42918732;
+    data[21014] = 1.90464477;
+    data[21015] = 5.36569161;
+    data[21016] = 8.82673822;
+    data[21017] = 6.27609482;
+    data[21018] = 2.89750961;
+    data[21218] = 2.89885257;
+    data[21219] = 6.19694078;
+    data[21220] = 8.56699049;
+    data[21221] = 5.34748193;
+    data[21222] = 2.12797290;
+    data[21421] = 0.44750227;
+    data[21422] = 3.59030394;
+    data[21423] = 6.73310598;
+    data[21424] = 7.77023612;
+    data[21425] = 4.70231380;
+    data[21426] = 1.63439126;
+    data[21625] = 1.01536023;
+    data[21626] = 4.01018746;
+    data[21627] = 7.00501446;
+    data[21628] = 7.23442994;
+    data[21629] = 4.31095669;
+    data[21630] = 1.38748321;
+    data[21829] = 1.33348850;
+    data[21830] = 4.18730825;
+    data[21831] = 7.04112789;
+    data[21832] = 6.93188375;
+    data[21833] = 4.14605811;
+    data[21834] = 1.36023236;
+    data[22033] = 1.42879714;
+    data[22034] = 4.14824858;
+    data[22035] = 6.86769979;
+    data[22036] = 6.83705276;
+    data[22037] = 4.18239459;
+    data[22038] = 1.52773573;
+    data[22237] = 1.32610439;
+    data[22238] = 3.91751388;
+    data[22239] = 6.50892360;
+    data[22240] = 6.92639686;
+    data[22241] = 4.39672917;
+    data[22242] = 1.86706171;
+    data[22441] = 1.04827771;
+    data[22442] = 3.51767405;
+    data[22443] = 5.98707050;
+    data[22444] = 7.17824046;
+    data[22445] = 4.76767914;
+    data[22446] = 2.35711760;
+    data[22645] = 0.61636406;
+    data[22646] = 2.96949223;
+    data[22647] = 5.32262027;
+    data[22648] = 7.57265091;
+    data[22649] = 5.27558755;
+    data[22650] = 2.97852419;
+    data[22651] = 0.68146095;
+    data[22849] = 0.04971400;
+    data[22850] = 2.29204819;
+    data[22851] = 4.53438237;
+    data[22852] = 6.77671656;
+    data[22853] = 5.90240723;
+    data[22854] = 3.71349836;
+    data[22855] = 1.52458926;
+    data[23054] = 1.50285335;
+    data[23055] = 3.63961048;
+    data[23056] = 5.77636715;
+    data[23057] = 6.63159089;
+    data[23058] = 4.54574358;
+    data[23059] = 2.45989650;
+    data[23060] = 0.37404924;
+    data[23258] = 0.61795861;
+    data[23259] = 2.65410915;
+    data[23260] = 4.69025923;
+    data[23261] = 6.72641024;
+    data[23262] = 5.46034705;
+    data[23263] = 3.47270933;
+    data[23264] = 1.48507138;
+    data[23463] = 1.59233576;
+    data[23464] = 3.53261665;
+    data[23465] = 5.47289755;
+    data[23466] = 6.44368259;
+    data[23467] = 4.54962999;
+    data[23468] = 2.65557761;
+    data[23469] = 0.76152512;
+    data[23667] = 0.46749352;
+    data[23668] = 2.31641904;
+    data[23669] = 4.16534441;
+    data[23670] = 6.01426978;
+    data[23671] = 5.67844696;
+    data[23672] = 3.87357362;
+    data[23673] = 2.06870004;
+    data[23674] = 0.26382666;
+    data[23872] = 1.05349103;
+    data[23873] = 2.81536230;
+    data[23874] = 4.57723346;
+    data[23875] = 6.33910485;
+    data[23876] = 5.12815686;
+    data[23877] = 3.40826320;
+    data[23878] = 1.68837002;
+    data[24077] = 1.43350090;
+    data[24078] = 3.11241671;
+    data[24079] = 4.79133241;
+    data[24080] = 6.40943693;
+    data[24081] = 4.77052201;
+    data[24082] = 3.13160778;
+    data[24083] = 1.49269309;
+    data[24281] = 0.02932359;
+    data[24282] = 1.62918994;
+    data[24283] = 3.22905602;
+    data[24284] = 4.82892245;
+    data[24285] = 6.14671456;
+    data[24286] = 4.58496623;
+    data[24287] = 3.02321767;
+    data[24288] = 1.46146910;
+    data[24486] = 0.13601698;
+    data[24487] = 1.66055572;
+    data[24488] = 3.18509457;
+    data[24489] = 4.70963307;
+    data[24490] = 6.04072399;
+    data[24491] = 4.55250870;
+    data[24492] = 3.06429295;
+    data[24493] = 1.57607743;
+    data[24494] = 0.08786193;
+    data[24691] = 0.09328097;
+    data[24692] = 1.54603878;
+    data[24693] = 2.99879676;
+    data[24694] = 4.45155473;
+    data[24695] = 5.90431225;
+    data[24696] = 4.65566106;
+    data[24697] = 3.23751615;
+    data[24698] = 1.81937125;
+    data[24699] = 0.40122634;
+    data[24897] = 1.30262633;
+    data[24898] = 2.68698297;
+    data[24899] = 4.07133950;
+    data[24900] = 5.45569602;
+    data[24901] = 4.87832492;
+    data[24902] = 3.52695142;
+    data[24903] = 2.17557792;
+    data[24904] = 0.82420459;
+    data[25102] = 0.94595028;
+    data[25103] = 2.26512621;
+    data[25104] = 3.58430226;
+    data[25105] = 4.90347855;
+    data[25106] = 5.20569785;
+    data[25107] = 3.91795207;
+    data[25108] = 2.63020652;
+    data[25109] = 1.34246063;
+    data[25110] = 0.05471494;
+    data[25307] = 0.49037894;
+    data[25308] = 1.74744334;
+    data[25309] = 3.00450763;
+    data[25310] = 4.26157191;
+    data[25311] = 5.51863620;
+    data[25312] = 4.39707236;
+    data[25313] = 3.16995848;
+    data[25314] = 1.94284460;
+    data[25315] = 0.71573065;
+    data[25513] = 1.14698056;
+    data[25514] = 2.34485767;
+    data[25515] = 3.54273478;
+    data[25516] = 4.74061165;
+    data[25517] = 4.95198462;
+    data[25518] = 3.78264743;
+    data[25519] = 2.61331047;
+    data[25520] = 1.44397374;
+    data[25521] = 0.27463681;
+    data[25718] = 0.47569509;
+    data[25719] = 1.61717169;
+    data[25720] = 2.75864848;
+    data[25721] = 3.90012516;
+    data[25722] = 5.04160160;
+    data[25723] = 4.45712078;
+    data[25724] = 3.34284059;
+    data[25725] = 2.22856039;
+    data[25726] = 1.11428020;
+
+    for (auto & val : data) {
+        val /= 1000.0f;
+    }
+
+    filters.data = std::move(data);
+    return filters;
+}
+
+} // namespace whisper_precalc_filters
diff --git a/llama/llama.cpp/tools/mtmd/mtmd-audio.h b/llama/llama.cpp/tools/mtmd/mtmd-audio.h
new file mode 100644
index 000000000..b7b940aff
--- /dev/null
+++ b/llama/llama.cpp/tools/mtmd/mtmd-audio.h
@@ -0,0 +1,47 @@
+#pragma once
+
+#include "ggml.h"
+
+#include 
+#include 
+#include 
+
+#define WHISPER_ASSERT GGML_ASSERT
+
+#define WHISPER_SAMPLE_RATE 16000
+#define WHISPER_N_FFT       400
+#define WHISPER_HOP_LENGTH  160
+#define WHISPER_CHUNK_SIZE  30
+
+#define COMMON_SAMPLE_RATE 16000
+
+namespace whisper_preprocessor {
+
+struct whisper_mel {
+    int n_len;
+    int n_len_org;
+    int n_mel;
+
+    std::vector data;
+};
+
+struct whisper_filters {
+    int32_t n_mel;
+    int32_t n_fft;
+
+    std::vector data;
+};
+
+bool preprocess_audio(
+        const float * samples,
+        size_t n_samples,
+        const whisper_filters & filters,
+        std::vector & output);
+
+} // namespace whisper_preprocessor
+
+namespace whisper_precalc_filters {
+
+whisper_preprocessor::whisper_filters get_128_bins();
+
+} // namespace whisper_precalc_filters
diff --git a/llama/llama.cpp/tools/mtmd/mtmd-helper.cpp b/llama/llama.cpp/tools/mtmd/mtmd-helper.cpp
new file mode 100644
index 000000000..686f42f39
--- /dev/null
+++ b/llama/llama.cpp/tools/mtmd/mtmd-helper.cpp
@@ -0,0 +1,460 @@
+// fix problem with std::min and std::max
+#if defined(_WIN32)
+#define WIN32_LEAN_AND_MEAN
+#ifndef NOMINMAX
+#   define NOMINMAX
+#endif
+#include 
+#endif
+
+#include "mtmd.h"
+#include "mtmd-helper.h"
+#include "llama.h"
+
+#include 
+#include 
+#include 
+
+//#define MTMD_AUDIO_DEBUG
+
+#define MINIAUDIO_IMPLEMENTATION
+#ifndef MTMD_AUDIO_DEBUG
+#   define MA_NO_ENCODING
+#endif
+#define MA_NO_DEVICE_IO
+#define MA_NO_RESOURCE_MANAGER
+#define MA_NO_NODE_GRAPH
+#define MA_NO_ENGINE
+#define MA_NO_GENERATION
+#define MA_API static
+#include "miniaudio/miniaudio.h"
+
+#define STB_IMAGE_IMPLEMENTATION
+#include "stb/stb_image.h"
+
+#define LOG_INF(...) fprintf(stdout, __VA_ARGS__)
+#define LOG_ERR(...) fprintf(stderr, __VA_ARGS__)
+
+size_t mtmd_helper_get_n_tokens(const mtmd_input_chunks * chunks) {
+    size_t n_tokens = 0;
+    for (size_t i = 0; i < mtmd_input_chunks_size(chunks); i++) {
+        auto chunk = mtmd_input_chunks_get(chunks, i);
+        n_tokens += mtmd_input_chunk_get_n_tokens(chunk);
+    }
+    return n_tokens;
+}
+
+llama_pos mtmd_helper_get_n_pos(const mtmd_input_chunks * chunks) {
+    llama_pos n_pos = 0;
+    for (size_t i = 0; i < mtmd_input_chunks_size(chunks); i++) {
+        auto chunk = mtmd_input_chunks_get(chunks, i);
+        n_pos += mtmd_input_chunk_get_n_pos(chunk);
+    }
+    return n_pos;
+}
+
+// helper struct to make working with embd batch easier
+// note: this will be removed after llama_batch_ext refactoring
+struct decode_embd_batch {
+    int n_pos_per_embd;
+    int n_mmproj_embd;
+    std::vector      pos;
+    std::vector      pos_view; // used by mrope
+    std::vector        n_seq_id;
+    std::vector   seq_id_0;
+    std::vector seq_ids;
+    std::vector         logits;
+    llama_batch batch;
+    decode_embd_batch(float * embd, int32_t n_tokens, int n_pos_per_embd, int n_mmproj_embd) : n_pos_per_embd(n_pos_per_embd), n_mmproj_embd(n_mmproj_embd) {
+        pos     .resize(n_tokens * n_pos_per_embd);
+        n_seq_id.resize(n_tokens);
+        seq_ids .resize(n_tokens + 1);
+        logits  .resize(n_tokens);
+        seq_id_0.resize(1);
+        seq_ids [n_tokens] = nullptr;
+        batch = {
+            /*n_tokens       =*/ n_tokens,
+            /*tokens         =*/ nullptr,
+            /*embd           =*/ embd,
+            /*pos            =*/ pos.data(),
+            /*n_seq_id       =*/ n_seq_id.data(),
+            /*seq_id         =*/ seq_ids.data(),
+            /*logits         =*/ logits.data(),
+        };
+    }
+
+    void set_position_normal(llama_pos pos_0, llama_seq_id seq_id) {
+        seq_id_0[0] = seq_id;
+        for (int i = 0; i < batch.n_tokens; i++) {
+            batch.pos     [i] = pos_0 + i;
+            batch.n_seq_id[i] = 1;
+            batch.seq_id  [i] = seq_id_0.data();
+            batch.logits  [i] = false;
+        }
+    }
+
+    // M-RoPE for image
+    void set_position_mrope_2d(llama_pos pos_0, int nx, int ny, llama_seq_id seq_id) {
+        GGML_ASSERT(n_pos_per_embd == 4);
+        seq_id_0[0] = seq_id;
+        for (int y = 0; y < ny; y++) {
+            for (int x = 0; x < nx; x++) {
+                int i = y * nx + x;
+                pos[i                     ] = pos_0;
+                pos[i + batch.n_tokens    ] = pos_0 + y;
+                pos[i + batch.n_tokens * 2] = pos_0 + x;
+                pos[i + batch.n_tokens * 3] = 0; // last pos dim is unused
+            }
+        }
+        for (int i = 0; i < batch.n_tokens; i++) {
+            batch.n_seq_id[i] = 1;
+            batch.seq_id  [i] = seq_id_0.data();
+            batch.logits  [i] = false;
+        }
+    }
+
+    // M-RoPE for audio
+    void set_position_mrope_1d(llama_pos pos_0, llama_seq_id seq_id) {
+        GGML_ASSERT(n_pos_per_embd == 4);
+        seq_id_0[0] = seq_id;
+        for (int i = 0; i < batch.n_tokens; i++) {
+            pos[i                     ] = pos_0 + i;
+            pos[i + batch.n_tokens    ] = pos_0 + i;
+            pos[i + batch.n_tokens * 2] = pos_0 + i;
+            pos[i + batch.n_tokens * 3] = 0; // last pos dim is unused
+        }
+        for (int i = 0; i < batch.n_tokens; i++) {
+            batch.n_seq_id[i] = 1;
+            batch.seq_id  [i] = seq_id_0.data();
+            batch.logits  [i] = false;
+        }
+    }
+
+    llama_batch get_view(int offset, int n_tokens) {
+        llama_pos * pos_ptr;
+        pos_view.clear();
+        pos_view.reserve(n_tokens * n_pos_per_embd);
+        if (n_pos_per_embd > 1) {
+            // mrope
+            // for example, with layout of src: 1234...1234...1234...1234...
+            //       offset 2 will give us dst: 34...34...34...34...
+            for (int i = 0; i < n_pos_per_embd; i++) {
+                // assume n_tokens is less than or equal to batch.n_tokens
+                // batch.n_tokens is number of **total** tokens
+                // n_tokens is number of viewed token
+                size_t src_idx = i * batch.n_tokens + offset;
+                pos_view.insert(pos_view.end(),
+                    pos.data() + src_idx,
+                    pos.data() + src_idx + n_tokens);
+            }
+            pos_ptr = pos_view.data();
+        } else {
+            // normal
+            pos_ptr = pos.data() + offset;
+        }
+        return {
+            /*n_tokens       =*/ n_tokens,
+            /*tokens         =*/ nullptr,
+            /*embd           =*/ batch.embd     + offset * n_mmproj_embd,
+            /*pos            =*/ pos_ptr,
+            /*n_seq_id       =*/ batch.n_seq_id + offset,
+            /*seq_id         =*/ batch.seq_id   + offset,
+            /*logits         =*/ batch.logits   + offset,
+        };
+    }
+};
+
+// Helper function for decoding an image whose embeddings have already been calculated
+int32_t mtmd_helper_decode_image_chunk(
+        mtmd_context * ctx,
+        struct llama_context * lctx,
+        const mtmd_input_chunk * chunk,
+        float * encoded_embd,
+        llama_pos n_past,
+        llama_seq_id seq_id,
+        int32_t n_batch,
+        llama_pos * new_n_past) {
+    auto chunk_type = mtmd_input_chunk_get_type(chunk);
+    const char * name = chunk_type == MTMD_INPUT_CHUNK_TYPE_IMAGE ? "image" : "audio";
+    if (chunk_type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
+        LOG_ERR("failed to decode chunk: input chunk not of image/audio type\n");
+        return -1;
+    }
+
+    const llama_model * model = llama_get_model(lctx);
+    int n_mmproj_embd = llama_model_n_embd(model);
+    int n_pos_per_embd = mtmd_decode_use_mrope(ctx) ? 4 : 1;
+
+    int32_t n_tokens = mtmd_input_chunk_get_n_tokens(chunk);
+    int32_t i_batch = 0;
+    int32_t n_img_batches = GGML_PAD(n_tokens, n_batch) / n_batch;
+    decode_embd_batch batch_embd(encoded_embd, n_tokens, n_pos_per_embd, n_mmproj_embd);
+
+    if (mtmd_decode_use_mrope(ctx)) {
+        if (chunk_type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
+            const auto image_tokens = mtmd_input_chunk_get_tokens_image(chunk);
+            if (!image_tokens) {
+                LOG_ERR("failed to decode chunk: image tokens are null\n");
+                return -1;
+            }
+            const int nx = mtmd_image_tokens_get_nx(image_tokens);
+            const int ny = mtmd_image_tokens_get_ny(image_tokens);
+            batch_embd.set_position_mrope_2d(n_past, nx, ny, seq_id);
+        } else if (chunk_type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
+            batch_embd.set_position_mrope_1d(n_past, seq_id);
+        } else {
+            GGML_ABORT("invalid chunk type for M-RoPE");
+        }
+    } else {
+        batch_embd.set_position_normal(n_past, seq_id);
+    }
+
+    if (mtmd_decode_use_non_causal(ctx)) {
+        llama_set_causal_attn(lctx, false);
+        // TODO @ngxson : need to make sure only one image is processed at a time, and n_ubatch must be enough to hold the image
+    }
+
+    while (i_batch < n_img_batches) { // split into batches
+        int pos_offset = i_batch*n_batch;
+        int n_tokens_batch = std::min(n_batch, n_tokens - pos_offset);
+        llama_batch batch_embd_view = batch_embd.get_view(pos_offset, n_tokens_batch);
+
+        LOG_INF("decoding %s batch %d/%d, n_tokens_batch = %d\n", name, i_batch+1, n_img_batches, n_tokens_batch);
+
+        int64_t t1 = ggml_time_ms();
+        int32_t ret = llama_decode(lctx, batch_embd_view);
+        if (ret != 0) {
+            LOG_ERR("failed to decode %s\n", name);
+            llama_set_causal_attn(lctx, true); // restore causal attn
+            return ret;
+        }
+
+        LOG_INF("%s decoded (batch %d/%d) in %" PRId64 " ms\n", name, i_batch+1, n_img_batches, ggml_time_ms() - t1);
+
+        i_batch++;
+    }
+
+    n_past += mtmd_input_chunk_get_n_pos(chunk);
+    *new_n_past = n_past;
+
+    if (mtmd_decode_use_non_causal(ctx)) {
+        llama_set_causal_attn(lctx, true);
+    }
+    return 0;
+}
+
+int32_t mtmd_helper_eval_chunk_single(mtmd_context * ctx,
+        struct llama_context * lctx,
+        const mtmd_input_chunk * chunk,
+        llama_pos n_past,
+        llama_seq_id seq_id,
+        int32_t n_batch,
+        bool logits_last,
+        llama_pos * new_n_past) {
+    int32_t ret;
+    llama_batch text_batch = llama_batch_init(n_batch, 0, 1);
+    auto chunk_type = mtmd_input_chunk_get_type(chunk);
+
+    if (chunk_type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
+        size_t n_tokens;
+        const auto tokens = mtmd_input_chunk_get_tokens_text(chunk, &n_tokens);
+        // LOG_INF("decoding text chunk, n_tokens = %zu\n", n_tokens);
+        size_t i = 0;
+        while (i < n_tokens) { // split into batches
+            text_batch.n_tokens = 0; // clear the batch
+            for (; i < n_tokens && text_batch.n_tokens < n_batch; i++) {
+                int32_t j = text_batch.n_tokens;
+                text_batch.token   [j]    = tokens[i];
+                text_batch.pos     [j]    = n_past++;
+                text_batch.n_seq_id[j]    = 1;
+                text_batch.seq_id  [j][0] = seq_id;
+                text_batch.logits  [j]    = false;
+
+                text_batch.n_tokens++;
+            }
+            bool is_last_token = (i == n_tokens);
+            if (logits_last && is_last_token) {
+                text_batch.logits[text_batch.n_tokens - 1] = true;
+            }
+            ret = llama_decode(lctx, text_batch);
+            if (ret != 0) {
+                LOG_ERR("failed to decode text\n");
+                llama_batch_free(text_batch);
+                return ret;
+            }
+            *new_n_past += text_batch.n_tokens;
+        }
+
+    } else if (chunk_type == MTMD_INPUT_CHUNK_TYPE_IMAGE || chunk_type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
+        const char * name = chunk_type == MTMD_INPUT_CHUNK_TYPE_IMAGE ? "image" : "audio";
+        int64_t t0 = ggml_time_ms();
+
+        LOG_INF("encoding %s slice...\n", name);
+
+        ret = mtmd_encode_chunk(ctx, chunk);
+        if (ret != 0) {
+            LOG_ERR("failed to encode %s slice\n", name);
+            llama_batch_free(text_batch);
+            return ret;
+        }
+
+        LOG_INF("%s slice encoded in %" PRId64 " ms\n", name, ggml_time_ms() - t0);
+
+        float * embd = mtmd_get_output_embd(ctx);
+        ret = mtmd_helper_decode_image_chunk(ctx, lctx, chunk, embd, n_past, seq_id, n_batch, new_n_past);
+        if (ret != 0) {
+            LOG_ERR("failed to decode %s\n", name);
+            llama_batch_free(text_batch);
+            return ret;
+        }
+    } else {
+        GGML_ABORT("chunk type not supported");
+    }
+
+    llama_batch_free(text_batch);
+    return 0;
+}
+
+int32_t mtmd_helper_eval_chunks(mtmd_context * ctx,
+                                struct llama_context * lctx,
+                                const mtmd_input_chunks * chunks,
+                                llama_pos n_past,
+                                llama_seq_id seq_id,
+                                int32_t n_batch,
+                                bool logits_last,
+                                llama_pos * new_n_past) {
+    size_t n_chunks = mtmd_input_chunks_size(chunks);
+    if (n_chunks == 0) {
+        LOG_ERR("no chunks to eval\n");
+        return 0;
+    }
+
+    for (size_t i = 0; i < n_chunks; i++) {
+        bool chunk_logits_last = (i == n_chunks - 1) && logits_last;
+        auto chunk = mtmd_input_chunks_get(chunks, i);
+
+        int32_t res = mtmd_helper_eval_chunk_single(ctx, lctx, chunk, n_past, seq_id, n_batch, chunk_logits_last, &n_past);
+        if (res != 0) {
+            LOG_ERR("failed to eval chunk %zu\n", i);
+            return res;
+        }
+        *new_n_past = n_past;
+    }
+
+    return 0;
+}
+
+namespace audio_helpers {
+
+static bool is_audio_file(const char * buf, size_t len) {
+    if (len < 12) {
+        return false;
+    }
+
+    // RIFF ref: https://en.wikipedia.org/wiki/Resource_Interchange_File_Format
+    // WAV ref: https://www.mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html
+    bool is_wav = memcmp(buf, "RIFF", 4) == 0 && memcmp(buf + 8, "WAVE", 4) == 0;
+    bool is_mp3 = len >= 3 && (
+        memcmp(buf, "ID3", 3) == 0 ||
+        // Check for MPEG sync word (simplified check)
+        ((unsigned char)buf[0] == 0xFF && ((unsigned char)buf[1] & 0xE0) == 0xE0)
+    );
+    bool is_flac = memcmp(buf, "fLaC", 4) == 0;
+
+    return is_wav || is_mp3 || is_flac;
+}
+
+// returns true if the buffer is a valid audio file
+static bool decode_audio_from_buf(const unsigned char * buf_in, size_t len, int target_sampler_rate, std::vector & pcmf32_mono) {
+    ma_result result;
+    const int channels = 1;
+    ma_decoder_config decoder_config = ma_decoder_config_init(ma_format_f32, channels, target_sampler_rate);
+    ma_decoder decoder;
+
+    result = ma_decoder_init_memory(buf_in, len, &decoder_config, &decoder);
+    if (result != MA_SUCCESS) {
+        return false;
+    }
+
+    ma_uint64 frame_count;
+    ma_uint64 frames_read;
+    result = ma_decoder_get_length_in_pcm_frames(&decoder, &frame_count);
+    if (result != MA_SUCCESS) {
+        ma_decoder_uninit(&decoder);
+        return false;
+    }
+
+    pcmf32_mono.resize(frame_count);
+    result = ma_decoder_read_pcm_frames(&decoder, pcmf32_mono.data(), frame_count, &frames_read);
+    if (result != MA_SUCCESS) {
+        ma_decoder_uninit(&decoder);
+        return false;
+    }
+
+#ifdef MTMD_AUDIO_DEBUG
+    // save audio to wav file
+    ma_encoder_config config = ma_encoder_config_init(ma_encoding_format_wav, ma_format_f32, 1, target_sampler_rate);
+    ma_encoder encoder;
+    ma_encoder_init_file("output.wav", &config, &encoder);
+    ma_encoder_write_pcm_frames(&encoder, pcmf32_mono.data(), pcmf32_mono.size(), &frames_read);
+    ma_encoder_uninit(&encoder);
+#endif
+
+    ma_decoder_uninit(&decoder);
+    return true;
+}
+
+} // namespace audio_helpers
+
+mtmd_bitmap * mtmd_helper_bitmap_init_from_buf(mtmd_context * ctx, const unsigned char * buf, size_t len) {
+    if (audio_helpers::is_audio_file((const char *)buf, len)) {
+        std::vector pcmf32;
+        int bitrate = mtmd_get_audio_bitrate(ctx);
+        if (bitrate < 0) {
+            LOG_ERR("This model does not support audio input\n");
+            return nullptr;
+        }
+        if (!audio_helpers::decode_audio_from_buf(buf, len, bitrate, pcmf32)) {
+            LOG_ERR("Unable to read WAV audio file from buffer\n");
+            return nullptr;
+        }
+        return mtmd_bitmap_init_from_audio(pcmf32.size(), pcmf32.data());
+    }
+
+    // otherwise, we assume it's an image
+    mtmd_bitmap * result = nullptr;
+    {
+        int nx, ny, nc;
+        auto * data = stbi_load_from_memory(buf, len, &nx, &ny, &nc, 3);
+        if (!data) {
+            LOG_ERR("%s: failed to decode image bytes\n", __func__);
+            return nullptr;
+        }
+        result = mtmd_bitmap_init(nx, ny, data);
+        stbi_image_free(data);
+    }
+    return result;
+}
+
+mtmd_bitmap * mtmd_helper_bitmap_init_from_file(mtmd_context * ctx, const char * fname) {
+    std::vector buf;
+    FILE * f = fopen(fname, "rb");
+    if (!f) {
+        LOG_ERR("Unable to open file %s: %s\n", fname, strerror(errno));
+        return nullptr;
+    }
+
+    fseek(f, 0, SEEK_END);
+    long file_size = ftell(f);
+    fseek(f, 0, SEEK_SET);
+    buf.resize(file_size);
+
+    size_t n_read = fread(buf.data(), 1, file_size, f);
+    fclose(f);
+    if (n_read != (size_t)file_size) {
+        LOG_ERR("Failed to read entire file %s", fname);
+        return nullptr;
+    }
+
+    return mtmd_helper_bitmap_init_from_buf(ctx, buf.data(), buf.size());
+}
diff --git a/llama/llama.cpp/tools/mtmd/mtmd-helper.h b/llama/llama.cpp/tools/mtmd/mtmd-helper.h
new file mode 100644
index 000000000..5c0edc693
--- /dev/null
+++ b/llama/llama.cpp/tools/mtmd/mtmd-helper.h
@@ -0,0 +1,91 @@
+#ifndef MTMD_HELPER_H
+#define MTMD_HELPER_H
+
+#include "ggml.h"
+#include "llama.h"
+#include "mtmd.h"
+
+#include 
+#include 
+#include 
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+//
+// libmtmd helper functions
+//
+// Please note that these helpers are not guaranteed to be stable.
+// BREAKING CHANGES are expected.
+//
+
+// helper function to construct a mtmd_bitmap from a file
+// it calls mtmd_helper_bitmap_init_from_buf() internally
+// returns nullptr on failure
+// this function is thread-safe
+MTMD_API mtmd_bitmap * mtmd_helper_bitmap_init_from_file(mtmd_context * ctx, const char * fname);
+
+// helper function to construct a mtmd_bitmap from a buffer containing a file
+// supported formats:
+//     image: formats supported by stb_image: jpg, png, bmp, gif, etc.
+//     audio: formats supported by miniaudio: wav, mp3, flac
+// note: audio files will be auto-detected based on magic bytes
+// returns nullptr on failure
+// this function is thread-safe
+MTMD_API mtmd_bitmap * mtmd_helper_bitmap_init_from_buf(mtmd_context * ctx, const unsigned char * buf, size_t len);
+
+// helper to count the total number of tokens from a list of chunks, useful to keep track of KV cache
+MTMD_API size_t mtmd_helper_get_n_tokens(const mtmd_input_chunks * chunks);
+
+// helper to count the total position of tokens from a list of chunks, useful to keep track of n_past
+// normally, n_pos is equal to n_tokens, but for M-RoPE it is different
+MTMD_API llama_pos mtmd_helper_get_n_pos(const mtmd_input_chunks * chunks);
+
+// helper function that automatically:
+// 1. run llama_decode() on text chunks
+// 2. run mtmd_encode() on image chunks, then mtmd_get_output_embd() and then llama_decode()
+// if any of the mtmd_encode() or llama_decode() calls return non-zero, stop and forward the error
+// otherwise, returns 0 on success
+// this function is NOT thread-safe
+MTMD_API int32_t mtmd_helper_eval_chunks(mtmd_context * ctx,
+                                         struct llama_context * lctx,
+                                         const mtmd_input_chunks * chunks,
+                                         llama_pos n_past,
+                                         llama_seq_id seq_id,
+                                         int32_t n_batch,
+                                         bool logits_last,
+                                         llama_pos * new_n_past);
+
+// works like mtmd_helper_eval_chunks(), but only for a single chunk
+// this function is NOT thread-safe
+MTMD_API int32_t mtmd_helper_eval_chunk_single(mtmd_context * ctx,
+                                               struct llama_context * lctx,
+                                               const mtmd_input_chunk * chunk,
+                                               llama_pos n_past,
+                                               llama_seq_id seq_id,
+                                               int32_t n_batch,
+                                               bool logits_last,
+                                               llama_pos * new_n_past);
+
+// helper function to decode an image whose embeddings have already been calculated
+// this helper will handle batching and pre/post decoding setup (for ex. gemma 3 requires non-causal attention)
+// ret 0 on success, -1 on chunk not being a valid image chunk, 1 on decode failure
+MTMD_API int32_t mtmd_helper_decode_image_chunk(mtmd_context * ctx,
+                                                struct llama_context * lctx,
+                                                const mtmd_input_chunk * chunk,
+                                                float * encoded_embd,
+                                                llama_pos n_past,
+                                                llama_seq_id seq_id,
+                                                int32_t n_batch,
+                                                llama_pos * new_n_past);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+//
+// C++ wrappers
+//
+
+#endif
diff --git a/llama/llama.cpp/tools/mtmd/mtmd.cpp b/llama/llama.cpp/tools/mtmd/mtmd.cpp
new file mode 100644
index 000000000..6f70f7f40
--- /dev/null
+++ b/llama/llama.cpp/tools/mtmd/mtmd.cpp
@@ -0,0 +1,1076 @@
+#include "clip.h"
+#include "clip-impl.h"
+#include "mtmd.h"
+#include "mtmd-audio.h"
+
+#include "llama.h"
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+// represents raw image data, layout is RGBRGBRGB...
+// length of data must be nx * ny * 3
+struct mtmd_bitmap {
+    uint32_t nx;
+    uint32_t ny;
+    std::vector data;
+    std::string id; // optional user-defined id, for ex: can be set to image hash, useful for KV cache tracking
+    bool is_audio = false; // true if the bitmap is audio
+};
+
+struct mtmd_image_tokens {
+    uint32_t nx; // number of tokens in x direction
+    uint32_t ny; // number of tokens in y direction
+    bool use_mrope_pos = false; // use M-RoPE position counting (the whole image is 1 temporal position)
+    uint32_t n_tokens() const { return nx * ny; }
+    clip_image_f32_batch batch_f32; // preprocessed image patches
+    std::string id; // optional user-defined ID, useful for KV cache tracking
+
+    mtmd_image_tokens clone() {
+        return mtmd_image_tokens{
+            nx,
+            ny,
+            use_mrope_pos,
+            batch_f32.clone(),
+            id
+        };
+    }
+};
+using mtmd_image_tokens_ptr = std::unique_ptr;
+
+struct mtmd_audio_tokens {
+    uint32_t n_tokens; // number of tokens
+    clip_image_f32_batch batch_f32; // preprocessed image patches
+    std::string id; // optional user-defined ID, useful for KV cache tracking
+
+    mtmd_audio_tokens clone() {
+        return mtmd_audio_tokens{
+            n_tokens,
+            batch_f32.clone(),
+            id
+        };
+    }
+};
+using mtmd_audio_tokens_ptr = std::unique_ptr;
+
+struct mtmd_input_chunk {
+    mtmd_input_chunk_type type;
+    std::vector tokens_text;
+    mtmd_image_tokens_ptr tokens_image;
+    mtmd_audio_tokens_ptr tokens_audio;
+};
+
+struct mtmd_input_chunks {
+    std::vector entries;
+};
+
+// slice template, used by some llava-uhd models to correctly place the special tokens around image embeddings
+// models not having it (llava-1.6) will process embeddings without any special tokens in-between
+enum mtmd_slice_tmpl {
+    MTMD_SLICE_TMPL_NONE,
+    MTMD_SLICE_TMPL_MINICPMV_2_5,
+    MTMD_SLICE_TMPL_MINICPMV_2_6,
+    MTMD_SLICE_TMPL_LLAMA4,
+    // TODO @ngxson : add support for idefics (SmolVLM)
+};
+
+mtmd_input_text* mtmd_input_text_init(const char * text, bool add_special, bool parse_special) {
+    return new mtmd_input_text{text, add_special, parse_special};
+}
+
+void mtmd_input_text_free(mtmd_input_text* input_text) {
+    if (input_text) {
+        delete input_text;
+    }
+}
+
+const char * mtmd_default_marker() {
+    return "<__media__>";
+}
+
+mtmd_context_params mtmd_context_params_default() {
+    mtmd_context_params params;
+    params.use_gpu = true;
+    params.print_timings = true;
+    params.n_threads = 4;
+    params.verbosity = GGML_LOG_LEVEL_INFO;
+    params.image_marker = MTMD_DEFAULT_IMAGE_MARKER;
+    params.media_marker = mtmd_default_marker();
+    return params;
+}
+
+struct mtmd_context {
+    struct clip_ctx * ctx_v; // vision
+    struct clip_ctx * ctx_a; // audio
+    const struct llama_model * text_model;
+    std::vector image_embd_v; // image embedding vector
+
+    bool print_timings;
+    int n_threads;
+    std::string media_marker;
+    const int n_embd_text;
+
+    // these are not token, but strings used to mark the beginning and end of image/audio embeddings
+    std::string img_beg;
+    std::string img_end;
+    std::string aud_beg;
+    std::string aud_end;
+
+    // for llava-uhd style models, we need special tokens in-between slices
+    // minicpmv calls them "slices", llama 4 calls them "tiles"
+    mtmd_slice_tmpl slice_tmpl    = MTMD_SLICE_TMPL_NONE;
+    llama_token tok_ov_img_start  = LLAMA_TOKEN_NULL; // overview image
+    llama_token tok_ov_img_end    = LLAMA_TOKEN_NULL; // overview image
+    llama_token tok_slices_start  = LLAMA_TOKEN_NULL; // start of all slices
+    llama_token tok_slices_end    = LLAMA_TOKEN_NULL; // end of all slices
+    llama_token tok_sli_img_start = LLAMA_TOKEN_NULL; // single slice start
+    llama_token tok_sli_img_end   = LLAMA_TOKEN_NULL; // single slice end
+    llama_token tok_sli_img_mid   = LLAMA_TOKEN_NULL; // between 2 slices
+    llama_token tok_row_end       = LLAMA_TOKEN_NULL; // end of row
+    bool        tok_row_end_trail = false;
+    bool        ov_img_first      = false;
+
+    bool use_mrope = false; // for Qwen2VL, we need to use M-RoPE
+
+    // for whisper, we pre-calculate the mel filter bank
+    whisper_preprocessor::whisper_filters w_filters;
+
+    // TODO @ngxson : add timings
+
+    mtmd_context(const char * mmproj_fname,
+                   const llama_model * text_model,
+                   const mtmd_context_params & ctx_params) :
+        text_model   (text_model),
+        print_timings(ctx_params.print_timings),
+        n_threads    (ctx_params.n_threads),
+        media_marker (ctx_params.media_marker),
+        n_embd_text  (llama_model_n_embd(text_model))
+    {
+        if (std::string(ctx_params.image_marker) != MTMD_DEFAULT_IMAGE_MARKER) {
+            throw std::runtime_error("custom image_marker is not supported anymore, use media_marker instead");
+        }
+
+        if (media_marker.empty()) {
+            throw std::runtime_error("media_marker must not be empty");
+        }
+
+        clip_context_params ctx_clip_params;
+        ctx_clip_params.use_gpu   = ctx_params.use_gpu;
+        ctx_clip_params.verbosity = ctx_params.verbosity;
+        auto res = clip_init(mmproj_fname, ctx_clip_params);
+        ctx_v = res.ctx_v;
+        ctx_a = res.ctx_a;
+        if (!ctx_v && !ctx_a) {
+            throw std::runtime_error(string_format("Failed to load CLIP model from %s\n", mmproj_fname));
+        }
+
+        // if both vision and audio mmproj are present, we need to validate their n_embd
+        if (ctx_v && ctx_a) {
+            int n_embd_v = clip_n_mmproj_embd(ctx_v);
+            int n_embd_a = clip_n_mmproj_embd(ctx_a);
+            if (n_embd_v != n_embd_a) {
+                throw std::runtime_error(string_format(
+                    "mismatch between vision and audio mmproj (n_embd_v = %d, n_embd_a = %d)\n",
+                    n_embd_v, n_embd_a));
+            }
+        }
+
+        // since we already validate n_embd of vision and audio mmproj,
+        // we can safely assume that they are the same
+        int n_embd_clip = clip_n_mmproj_embd(ctx_v ? ctx_v : ctx_a);
+        if (n_embd_text != n_embd_clip) {
+            throw std::runtime_error(string_format(
+                "mismatch between text model (n_embd = %d) and mmproj (n_embd = %d)\n"
+                "hint: you may be using wrong mmproj\n",
+                n_embd_text, n_embd_clip));
+        }
+        if (ctx_v) {
+            init_vision();
+        }
+        if (ctx_a) {
+            init_audio();
+        }
+    }
+
+    void init_vision() {
+        GGML_ASSERT(ctx_v != nullptr);
+        use_mrope = clip_is_qwen2vl(ctx_v);
+
+        projector_type proj = clip_get_projector_type(ctx_v);
+        int minicpmv_version = clip_is_minicpmv(ctx_v);
+        if (minicpmv_version == 2) {
+            // minicpmv 2.5 format:
+            //  (overview)  (slice)  (slice) \n ... 
+            slice_tmpl        = MTMD_SLICE_TMPL_MINICPMV_2_5;
+            tok_ov_img_start  = lookup_token("");
+            tok_ov_img_end    = lookup_token("");
+            tok_slices_start  = lookup_token("");
+            tok_slices_end    = lookup_token("");
+            tok_sli_img_start = tok_ov_img_start;
+            tok_sli_img_end   = tok_ov_img_end;
+            tok_row_end       = lookup_token("\n");
+            tok_row_end_trail = false; // no trailing end-of-row token
+            ov_img_first      = true;
+
+        } else if (minicpmv_version == 3 || minicpmv_version == 4 || minicpmv_version == 5) {
+            // minicpmv 2.6 format:
+            //  (overview)  (slice)  (slice) \n ...
+            slice_tmpl        = MTMD_SLICE_TMPL_MINICPMV_2_6;
+            tok_ov_img_start  = lookup_token("");
+            tok_ov_img_end    = lookup_token("");
+            tok_sli_img_start = lookup_token("");
+            tok_sli_img_end   = lookup_token("");
+            tok_row_end       = lookup_token("\n");
+            tok_row_end_trail = false; // no trailing end-of-row token
+            ov_img_first      = true;
+
+        } else if (minicpmv_version != 0) {
+            GGML_ASSERT(false && "unsupported minicpmv version");
+        } else if (proj == PROJECTOR_TYPE_LLAMA4) {
+            // llama 4 format:
+            // <|image_start|>
+            //     (slice) <|tile_x_separator|> (slice) <|tile_x_separator|> ... <|tile_y_separator|>
+            //     (slice) <|tile_x_separator|> (slice) <|tile_x_separator|> ... <|tile_y_separator|>
+            //     ... <|tile_y_separator|>   <-- trailing end-of-row token
+            // <|image|> (overview)           <-- overview image is last
+            // <|image_end|>
+            slice_tmpl        = MTMD_SLICE_TMPL_LLAMA4;
+            tok_ov_img_start  = lookup_token("<|image|>");
+            tok_sli_img_mid   = lookup_token("<|tile_x_separator|>");
+            tok_row_end       = lookup_token("<|tile_y_separator|>");
+            tok_row_end_trail = true; // add trailing end-of-row token
+            ov_img_first      = false; // overview image is last
+        }
+
+        // set boi/eoi
+        if (proj == PROJECTOR_TYPE_GEMMA3) {
+            //  ... (image embeddings) ... 
+            img_beg = "";
+            img_end = "";
+
+        } else if (proj == PROJECTOR_TYPE_IDEFICS3) {
+            // https://github.com/huggingface/transformers/blob/a42ba80fa520c784c8f11a973ca9034e5f859b79/src/transformers/models/idefics3/processing_idefics3.py#L192-L215
+            img_beg = "";
+            img_end = "";
+
+        } else if (proj == PROJECTOR_TYPE_PIXTRAL) {
+            // https://github.com/huggingface/transformers/blob/1cd110c6cb6a6237614130c470e9a902dbc1a4bd/docs/source/en/model_doc/pixtral.md
+            img_end = "[IMG_END]";
+
+        } else if (proj == PROJECTOR_TYPE_QWEN2VL || proj == PROJECTOR_TYPE_QWEN25VL) {
+            // <|vision_start|> ... (image embeddings) ... <|vision_end|>
+            img_beg = "<|vision_start|>";
+            img_end = "<|vision_end|>";
+
+        } else if (proj == PROJECTOR_TYPE_LLAMA4) {
+            // (more details in mtmd_context constructor)
+            img_beg = "<|image_start|>";
+            img_end = "<|image_end|>";
+            LOG_WRN("%s: llama 4 vision is known to have degraded quality:\n"
+                    "    https://github.com/ggml-org/llama.cpp/pull/13282\n", __func__);
+
+        } else if (proj == PROJECTOR_TYPE_INTERNVL) {
+            //  ... (image embeddings) ... 
+            img_beg = "";
+            img_end = "";
+
+        }
+    }
+
+    void init_audio() {
+        GGML_ASSERT(ctx_a != nullptr);
+        projector_type proj = clip_get_projector_type(ctx_a);
+
+        if (clip_has_whisper_encoder(ctx_a)) {
+            // TODO @ngxson : check if model n_mel is 128 or 80
+            w_filters = whisper_precalc_filters::get_128_bins();
+        }
+
+        LOG_WRN("%s: audio input is in experimental stage and may have reduced quality:\n"
+                "    https://github.com/ggml-org/llama.cpp/discussions/13759\n", __func__);
+
+        if (proj == PROJECTOR_TYPE_QWEN2A) {
+            // <|audio_bos|> ... (embeddings) ... <|audio_eos|>
+            aud_beg = "<|audio_bos|>";
+            aud_end = "<|audio_eos|>";
+
+        } else if (proj == PROJECTOR_TYPE_ULTRAVOX) {
+            // [BEGIN_AUDIO] ... (embeddings) ...
+            aud_beg = "[BEGIN_AUDIO]";
+
+        }
+    }
+
+    // get clip ctx based on chunk type
+    clip_ctx * get_clip_ctx(const mtmd_input_chunk * chunk) const {
+        if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
+            return ctx_v;
+        } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
+            return ctx_a;
+        }
+        GGML_ABORT("unknown chunk type");
+    }
+
+    projector_type proj_type_v() const {
+        return ctx_v ? clip_get_projector_type(ctx_v) : PROJECTOR_TYPE_UNKNOWN;
+    }
+
+    projector_type proj_type_a() const {
+        return ctx_a ? clip_get_projector_type(ctx_a) : PROJECTOR_TYPE_UNKNOWN;
+    }
+
+    ~mtmd_context() {
+        clip_free(ctx_a);
+        clip_free(ctx_v);
+    }
+
+private:
+    llama_token lookup_token(const std::string & token_text) {
+        const llama_vocab * vocab = llama_model_get_vocab(text_model);
+        const int n_vocab = llama_vocab_n_tokens(vocab);
+        for (int i = 0; i < n_vocab; i++) {
+            if (token_to_piece(vocab, i, true) == token_text) {
+                return i;
+            }
+        }
+        return LLAMA_TOKEN_NULL;
+    }
+
+    std::string token_to_piece(const llama_vocab * vocab, llama_token token, bool special) {
+        std::string piece;
+        piece.resize(piece.capacity());  // using string internal cache, 15 bytes + '\n'
+        const int n_chars = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
+        if (n_chars < 0) {
+            piece.resize(-n_chars);
+            int check = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
+            GGML_ASSERT(check == -n_chars);
+        } else {
+            piece.resize(n_chars);
+        }
+        return piece;
+    }
+};
+
+mtmd_context * mtmd_init_from_file(const char * mmproj_fname,
+        const struct llama_model * text_model,
+        const struct mtmd_context_params ctx_params) {
+    try {
+        return new mtmd_context(mmproj_fname, text_model, ctx_params);
+    } catch (const std::exception & e) {
+        LOG_ERR("%s: error: %s\n", __func__, e.what());
+        return nullptr;
+    }
+}
+
+void mtmd_free(mtmd_context * ctx) {
+    if (ctx) {
+        delete ctx;
+    }
+}
+
+struct mtmd_tokenizer {
+    mtmd_context * ctx;
+    std::vector bitmaps;
+
+    std::string input_text;
+    bool add_special;
+    bool parse_special;
+    const llama_vocab * vocab;
+
+    mtmd_input_chunks cur;
+
+    mtmd_tokenizer(mtmd_context * ctx,
+            const mtmd_input_text * text,
+            const mtmd_bitmap ** bitmaps,
+            size_t n_bitmaps) : ctx(ctx), bitmaps(bitmaps, bitmaps + n_bitmaps) {
+        add_special   = text->add_special;
+        parse_special = text->parse_special;
+        input_text    = text->text;
+        vocab         = llama_model_get_vocab(ctx->text_model);
+
+        // for compatibility, we convert image marker to media marker
+        string_replace_all(input_text, MTMD_DEFAULT_IMAGE_MARKER, ctx->media_marker);
+    }
+
+    int32_t tokenize(mtmd_input_chunks * output) {
+        cur.entries.clear();
+        std::vector parts = split_text(input_text, ctx->media_marker);
+        size_t i_bm = 0; // index of the current bitmap
+        for (auto & part : parts) {
+            if (part == ctx->media_marker) {
+                // this is a marker, we should add the next bitmap
+                if (i_bm >= bitmaps.size()) {
+                    LOG_ERR("%s: error: number of bitmaps (%zu) does not match number of markers (%zu)\n",
+                            __func__, bitmaps.size(), parts.size() - 1);
+                    return 1;
+                }
+                const mtmd_bitmap * bitmap = bitmaps[i_bm++];
+                int32_t res = add_media(bitmap);
+                if (res != 0) {
+                    return res;
+                }
+            } else {
+                // this is a text part, we should add it as text
+                add_text(part, parse_special);
+            }
+        }
+
+        if (add_special && llama_vocab_get_add_bos(vocab)) {
+            // if first chunk is text, we add BOS token to first text chunk
+            // otherwise, create a new text chunk with BOS token
+            if (!cur.entries.empty() && cur.entries[0].type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
+                // add BOS token to the beginning of first text chunk
+                cur.entries[0].tokens_text.insert(cur.entries[0].tokens_text.begin(), llama_vocab_bos(vocab));
+            } else {
+                // create a new text chunk with BOS token at the beginning
+                mtmd_input_chunk bos_chunk{
+                    MTMD_INPUT_CHUNK_TYPE_TEXT,
+                    {llama_vocab_bos(vocab)},
+                    nullptr, // image tokens
+                    nullptr, // audio tokens
+                };
+                cur.entries.insert(cur.entries.begin(), std::move(bos_chunk));
+            }
+        }
+
+        if (add_special && llama_vocab_get_add_eos(vocab)) {
+            // if last chunk is text, we add EOS token to it
+            add_text({llama_vocab_eos(vocab)});
+        }
+
+        if (i_bm != bitmaps.size()) {
+            LOG_ERR("%s: error: number of bitmaps (%zu) does not match number of markers (%zu)\n",
+                    __func__, bitmaps.size(), parts.size() - 1);
+            return 1;
+        }
+
+        *output = std::move(cur);
+
+        return 0;
+    }
+
+    void add_text(const std::string & txt, bool parse_special) {
+        LOG_DBG("%s: %s\n", __func__, txt.c_str());
+        auto tokens = mtmd_tokenize_text_internal(vocab, txt, /* add_special */ false, parse_special);
+        add_text(tokens);
+    }
+
+    void add_text(const std::vector & tokens) {
+        if (tokens.empty()) {
+            return;
+        }
+        // if last entry is also a text chunk, add tokens to it instead of creating new chunk
+        if (!cur.entries.empty() && cur.entries.back().type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
+            cur.entries.back().tokens_text.insert(
+                                            cur.entries.back().tokens_text.end(),
+                                            tokens.begin(),
+                                            tokens.end());
+        } else {
+            mtmd_input_chunk chunk{
+                MTMD_INPUT_CHUNK_TYPE_TEXT,
+                tokens,
+                nullptr, // image tokens
+                nullptr, // audio tokens
+            };
+            cur.entries.emplace_back(std::move(chunk));
+        }
+    }
+
+    int32_t add_media(const mtmd_bitmap * bitmap) {
+        if (!bitmap->is_audio) {
+            // handle image
+
+            if (!ctx->ctx_v) {
+                LOG_ERR("%s: error: model does not support vision input\n", __func__);
+                return 2;
+            }
+
+            if (!ctx->img_beg.empty()) {
+                add_text(ctx->img_beg, true); // add image begin token
+            }
+
+            // convert mtmd_bitmap to clip_image_u8
+            clip_image_u8_ptr img_u8(clip_image_u8_init());
+            img_u8->nx = bitmap->nx;
+            img_u8->ny = bitmap->ny;
+            img_u8->buf.resize(bitmap->data.size());
+            std::memcpy(img_u8->buf.data(), bitmap->data.data(), img_u8->nx * img_u8->ny * 3);
+
+            // preprocess image
+            clip_image_f32_batch batch_f32;
+            bool ok = clip_image_preprocess(ctx->ctx_v, img_u8.get(), &batch_f32);
+            if (!ok) {
+                LOG_ERR("Unable to preprocess image\n");
+                return 2;
+            }
+
+            // handle llava-uhd style preprocessing
+            if (
+                ctx->slice_tmpl == MTMD_SLICE_TMPL_MINICPMV_2_5
+                || ctx->slice_tmpl == MTMD_SLICE_TMPL_MINICPMV_2_6
+                || ctx->slice_tmpl == MTMD_SLICE_TMPL_LLAMA4
+            ) {
+                const int n_col = batch_f32.grid_x;
+                const int n_row = batch_f32.grid_y;
+                // split batch into chunks of single images
+                // NOTE: batch_f32 will be invalidated after this call
+                auto chunks = split_batch_to_chunk(std::move(batch_f32), bitmap->id);
+                GGML_ASSERT(chunks.size() > 0);
+
+                auto ov_chunk = std::move(chunks.front());
+                chunks.erase(chunks.begin());
+
+                // add overview image (first)
+                if (ctx->ov_img_first) {
+                    if (ctx->tok_ov_img_start != LLAMA_TOKEN_NULL) {
+                        add_text({ctx->tok_ov_img_start});
+                    }
+                    cur.entries.emplace_back(std::move(ov_chunk));
+                    if (ctx->tok_ov_img_end != LLAMA_TOKEN_NULL) {
+                        add_text({ctx->tok_ov_img_end});
+                    }
+                }
+
+                // add slices (or tiles)
+                if (!chunks.empty()) {
+                    GGML_ASSERT((int)chunks.size() == n_row * n_col);
+                    if (ctx->tok_slices_start != LLAMA_TOKEN_NULL) {
+                        add_text({ctx->tok_slices_start});
+                    }
+                    for (int y = 0; y < n_row; y++) {
+                        for (int x = 0; x < n_col; x++) {
+                            const bool is_last_in_row = (x == n_col - 1);
+                            if (ctx->tok_sli_img_start != LLAMA_TOKEN_NULL) {
+                                add_text({ctx->tok_sli_img_start});
+                            }
+                            cur.entries.emplace_back(std::move(chunks[y * n_col + x]));
+                            if (ctx->tok_sli_img_end != LLAMA_TOKEN_NULL) {
+                                add_text({ctx->tok_sli_img_end});
+                            }
+                            if (!is_last_in_row && ctx->tok_sli_img_mid != LLAMA_TOKEN_NULL) {
+                                add_text({ctx->tok_sli_img_mid});
+                            }
+                        }
+                        if ((y != n_row - 1 || ctx->tok_row_end_trail) && ctx->tok_row_end != LLAMA_TOKEN_NULL) {
+                            add_text({ctx->tok_row_end});
+                        }
+                    }
+                    if (ctx->tok_slices_end != LLAMA_TOKEN_NULL) {
+                        add_text({ctx->tok_slices_end});
+                    }
+                }
+
+                // add overview image (last)
+                if (!ctx->ov_img_first) {
+                    if (ctx->tok_ov_img_start != LLAMA_TOKEN_NULL) {
+                        add_text({ctx->tok_ov_img_start});
+                    }
+                    cur.entries.emplace_back(std::move(ov_chunk));
+                    if (ctx->tok_ov_img_end != LLAMA_TOKEN_NULL) {
+                        add_text({ctx->tok_ov_img_end});
+                    }
+                }
+
+            } else {
+                size_t n_tokens = 0;
+                for (const auto & entry : batch_f32.entries) {
+                    n_tokens += clip_n_output_tokens(ctx->ctx_v, entry.get());
+                }
+
+                mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens);
+                if (ctx->use_mrope) {
+                    // for Qwen2VL, we need this information for M-RoPE decoding positions
+                    image_tokens->nx = clip_n_output_tokens_x(ctx->ctx_v, batch_f32.entries[0].get());
+                    image_tokens->ny = clip_n_output_tokens_y(ctx->ctx_v, batch_f32.entries[0].get());
+                    image_tokens->use_mrope_pos = true;
+                } else {
+                    // other models, we only need the total number of tokens
+                    image_tokens->nx = n_tokens;
+                    image_tokens->ny = 1;
+                }
+                image_tokens->batch_f32 = std::move(batch_f32);
+                image_tokens->id = bitmap->id; // optional
+
+                LOG_DBG("image_tokens->nx = %d\n", image_tokens->nx);
+                LOG_DBG("image_tokens->ny = %d\n", image_tokens->ny);
+                LOG_DBG("batch_f32 size = %d\n", (int)image_tokens->batch_f32.entries.size());
+
+                mtmd_input_chunk chunk{
+                    MTMD_INPUT_CHUNK_TYPE_IMAGE,
+                    {}, // text tokens
+                    std::move(image_tokens),
+                    nullptr, // audio tokens
+                };
+                cur.entries.emplace_back(std::move(chunk));
+            }
+
+            if (!ctx->img_end.empty()) {
+                add_text(ctx->img_end, true); // add image end token
+            }
+
+        } else {
+            // handle audio
+
+            if (!ctx->ctx_a) {
+                LOG_ERR("%s: error: model does not support audio input\n", __func__);
+                return 2;
+            }
+
+            if (bitmap->data.size() == 0) {
+                LOG_ERR("%s: error: empty audio data\n", __func__);
+                return 2;
+            }
+
+            if (!ctx->aud_beg.empty()) {
+                add_text(ctx->aud_beg, true); // add audio begin token
+            }
+
+            // preprocess audio
+            GGML_ASSERT(ctx->w_filters.n_mel); // make sure we have filter preloaded
+            std::vector mel_spec_chunks;
+            const float * samples = (const float *)bitmap->data.data();
+            size_t n_samples = bitmap->data.size() / sizeof(float);
+            bool ok = whisper_preprocessor::preprocess_audio(samples, n_samples, ctx->w_filters, mel_spec_chunks);
+            if (!ok) {
+                LOG_ERR("Unable to preprocess audio\n");
+                return 2;
+            }
+
+            // consider each mel_spec as a separate audio chunk
+            // TODO: maybe support batching, but this may come with memory cost
+            for (auto & mel_spec : mel_spec_chunks) {
+                clip_image_f32_ptr mel_f32(clip_image_f32_init());
+                mel_f32->nx  = mel_spec.n_len;
+                mel_f32->ny  = mel_spec.n_mel;
+                mel_f32->buf = std::move(mel_spec.data);
+                size_t n_tokens = clip_n_output_tokens(ctx->ctx_a, mel_f32.get());
+
+                clip_image_f32_batch batch_f32;
+                batch_f32.is_audio = true;
+                batch_f32.entries.push_back(std::move(mel_f32));
+
+                mtmd_audio_tokens_ptr audio_tokens(new mtmd_audio_tokens);
+                audio_tokens->n_tokens = n_tokens;
+                audio_tokens->batch_f32 = std::move(batch_f32);
+                audio_tokens->id = bitmap->id; // optional
+
+                LOG_DBG("audio_tokens->n_tokens = %d\n", audio_tokens->n_tokens);
+
+                mtmd_input_chunk chunk{
+                    MTMD_INPUT_CHUNK_TYPE_AUDIO,
+                    {}, // text tokens
+                    nullptr, // image tokens
+                    std::move(audio_tokens),
+                };
+                cur.entries.emplace_back(std::move(chunk));
+            }
+
+            if (!ctx->aud_end.empty()) {
+                add_text(ctx->aud_end, true); // add audio end token
+            }
+        }
+
+        return 0;
+    }
+
+    std::vector split_batch_to_chunk(clip_image_f32_batch && batch_f32, const std::string & id) {
+        std::vector chunks;
+
+        for (auto & entry : batch_f32.entries) {
+            mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens);
+            image_tokens->nx = clip_n_output_tokens(ctx->ctx_v, entry.get());
+            image_tokens->ny = 1;
+            image_tokens->batch_f32.entries.push_back(std::move(entry));
+            image_tokens->id = id;
+
+            mtmd_input_chunk chunk{
+                MTMD_INPUT_CHUNK_TYPE_IMAGE,
+                {}, // text tokens
+                std::move(image_tokens),
+                nullptr, // audio tokens
+            };
+            chunks.emplace_back(std::move(chunk));
+        }
+
+        return chunks;
+    }
+
+    // for example: "a <__media__> b <__media__> c" --> "a", "<__media__>", "b", "<__media__>", "c"
+    static std::vector split_text(const std::string & input, const std::string & delimiter) {
+        std::vector result;
+        if (input.empty()) {
+            return result;
+        }
+        size_t start = 0;
+        size_t pos = 0;
+        while ((pos = input.find(delimiter, start)) != std::string::npos) {
+            if (pos > start) {
+                result.push_back(input.substr(start, pos - start));
+            }
+            result.push_back(delimiter);
+            start = pos + delimiter.length();
+        }
+        if (start < input.length()) {
+            result.push_back(input.substr(start));
+        }
+        return result;
+    }
+
+    // copied from common_tokenize
+    static std::vector mtmd_tokenize_text_internal(
+        const struct llama_vocab * vocab,
+               const std::string & text,
+                            bool   add_special,
+                            bool   parse_special) {
+        // upper limit for the number of tokens
+        int n_tokens = text.length() + 2 * add_special;
+        std::vector result(n_tokens);
+        n_tokens = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
+        if (n_tokens < 0) {
+            result.resize(-n_tokens);
+            int check = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
+            GGML_ASSERT(check == -n_tokens);
+        } else {
+            result.resize(n_tokens);
+        }
+        return result;
+    }
+};
+
+int32_t mtmd_tokenize(mtmd_context * ctx,
+            mtmd_input_chunks * output,
+            const mtmd_input_text * text,
+            const mtmd_bitmap ** bitmaps,
+            size_t n_bitmaps) {
+    mtmd_tokenizer tokenizer(ctx, text, bitmaps, n_bitmaps);
+    return tokenizer.tokenize(output);
+}
+
+int32_t mtmd_encode_chunk(mtmd_context * ctx, const mtmd_input_chunk * chunk) {
+    if (chunk->type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
+        LOG_WRN("mtmd_encode_chunk has no effect for text chunks\n");
+        return 0;
+    } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
+        if (!ctx->ctx_v) {
+            LOG_ERR("%s: model does not support vision input\n", __func__);
+            return 1;
+        }
+        return mtmd_encode(ctx, chunk->tokens_image.get());
+    } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
+        if (!ctx->ctx_a) {
+            LOG_ERR("%s: model does not support audio input\n", __func__);
+            return 1;
+        }
+        int n_mmproj_embd = ctx->n_embd_text;
+        ctx->image_embd_v.resize(chunk->tokens_audio->n_tokens * n_mmproj_embd);
+        bool ok = clip_image_batch_encode(
+            ctx->ctx_a,
+            ctx->n_threads,
+            &chunk->tokens_audio->batch_f32,
+            ctx->image_embd_v.data());
+        return ok ? 0 : 1;
+    }
+
+    LOG_ERR("%s: unknown chunk type %d\n", __func__, (int)chunk->type);
+    return 1;
+}
+
+int32_t mtmd_encode(mtmd_context * ctx, const mtmd_image_tokens * image_tokens) {
+    clip_ctx * ctx_clip = ctx->ctx_v;
+    if (!ctx_clip) {
+        LOG_ERR("%s: this API does not support non-vision input, please use mtmd_encode_chunk instead\n", __func__);
+        return 1;
+    }
+    int n_mmproj_embd = clip_n_mmproj_embd(ctx_clip);
+    ctx->image_embd_v.resize(image_tokens->n_tokens() * n_mmproj_embd);
+    bool ok = false;
+
+    if (clip_is_llava(ctx_clip) || clip_is_minicpmv(ctx_clip) || clip_is_glm(ctx_clip)) {
+        // TODO @ngxson : llava does not support batched encoding ; this should be fixed inside clip_image_batch_encode()
+        const auto & entries = image_tokens->batch_f32.entries;
+        for (size_t i = 0; i < entries.size(); i++) {
+            int n_tokens_per_image = clip_n_output_tokens(ctx_clip, entries[i].get());
+            ok = clip_image_encode(
+                ctx_clip,
+                ctx->n_threads,
+                entries[i].get(),
+                ctx->image_embd_v.data() + i*n_mmproj_embd*n_tokens_per_image);
+        }
+    } else {
+        ok = clip_image_batch_encode(
+            ctx_clip,
+            ctx->n_threads,
+            &image_tokens->batch_f32,
+            ctx->image_embd_v.data());
+    }
+
+    return ok ? 0 : 1;
+}
+
+float * mtmd_get_output_embd(mtmd_context * ctx) {
+    return ctx->image_embd_v.data();
+}
+
+bool mtmd_decode_use_non_causal(mtmd_context * ctx) {
+    if (ctx->ctx_v && clip_get_projector_type(ctx->ctx_v) == PROJECTOR_TYPE_GEMMA3) {
+        return true;
+    }
+    return false;
+}
+
+bool mtmd_decode_use_mrope(mtmd_context * ctx) {
+    return ctx->use_mrope;
+}
+
+bool mtmd_support_vision(mtmd_context * ctx) {
+    return ctx->ctx_v != nullptr;
+}
+
+bool mtmd_support_audio(mtmd_context * ctx) {
+    return ctx->ctx_a != nullptr;
+}
+
+int mtmd_get_audio_bitrate(mtmd_context * ctx) {
+    if (!ctx->ctx_a) {
+        return -1;
+    }
+    // for now, we assume that all audio models have the same bitrate
+    return 16000; // 16kHz
+}
+
+//
+// public API functions
+//
+
+// mtmd_bitmap
+
+mtmd_bitmap * mtmd_bitmap_init(uint32_t nx,
+                               uint32_t ny,
+                               const unsigned char * data) {
+    mtmd_bitmap * bitmap = new mtmd_bitmap;
+    bitmap->nx = nx;
+    bitmap->ny = ny;
+    size_t data_size = (size_t)nx * ny * 3;
+    bitmap->data.resize(data_size);
+    std::memcpy(bitmap->data.data(), data, data_size);
+    return bitmap;
+}
+
+mtmd_bitmap * mtmd_bitmap_init_from_audio(size_t n_samples,
+                                          const float * data) {
+    mtmd_bitmap * bitmap = new mtmd_bitmap;
+    bitmap->nx = n_samples;
+    bitmap->ny = 1;
+    bitmap->is_audio = true;
+    size_t data_size = n_samples * sizeof(float);
+    bitmap->data.resize(data_size);
+    std::memcpy(bitmap->data.data(), data, data_size);
+    return bitmap;
+}
+
+uint32_t mtmd_bitmap_get_nx(const mtmd_bitmap * bitmap) {
+    return bitmap->nx;
+}
+
+uint32_t mtmd_bitmap_get_ny(const mtmd_bitmap * bitmap) {
+    return bitmap->ny;
+}
+
+const unsigned char * mtmd_bitmap_get_data(const mtmd_bitmap * bitmap) {
+    return bitmap->data.data();
+}
+
+size_t mtmd_bitmap_get_n_bytes(const mtmd_bitmap * bitmap) {
+    return bitmap->data.size();
+}
+
+bool mtmd_bitmap_is_audio(const mtmd_bitmap * bitmap) {
+    return bitmap->is_audio;
+}
+
+const char * mtmd_bitmap_get_id(const mtmd_bitmap * bitmap) {
+    return bitmap->id.c_str();
+}
+
+void mtmd_bitmap_set_id(mtmd_bitmap * bitmap, const char * id) {
+    if (id) {
+        bitmap->id = std::string(id);
+    } else {
+        bitmap->id.clear();
+    }
+}
+
+void mtmd_bitmap_free(mtmd_bitmap * bitmap) {
+    if (bitmap) {
+        delete bitmap;
+    }
+}
+
+// mtmd_input_chunks
+
+mtmd_input_chunks * mtmd_input_chunks_init() {
+    return new mtmd_input_chunks;
+}
+
+size_t mtmd_input_chunks_size(const mtmd_input_chunks * chunks) {
+    return chunks->entries.size();
+}
+
+const mtmd_input_chunk * mtmd_input_chunks_get(const mtmd_input_chunks * chunks, size_t idx) {
+    if (idx >= chunks->entries.size()) {
+        return nullptr;
+    }
+    return &chunks->entries[idx];
+}
+
+void mtmd_input_chunks_free(mtmd_input_chunks * chunks) {
+    if (chunks) {
+        delete chunks;
+    }
+}
+
+// mtmd_input_chunk
+
+enum mtmd_input_chunk_type mtmd_input_chunk_get_type(const mtmd_input_chunk * chunk) {
+    return chunk->type;
+}
+
+const llama_token * mtmd_input_chunk_get_tokens_text(const mtmd_input_chunk * chunk, size_t * n_tokens_output) {
+    if (chunk->type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
+        *n_tokens_output = chunk->tokens_text.size();
+        return chunk->tokens_text.data();
+    }
+    *n_tokens_output = 0;
+    return nullptr;
+}
+
+const mtmd_image_tokens * mtmd_input_chunk_get_tokens_image(const mtmd_input_chunk * chunk) {
+    if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
+        return chunk->tokens_image.get();
+    }
+    return nullptr;
+}
+
+size_t mtmd_input_chunk_get_n_tokens(const mtmd_input_chunk * chunk) {
+    if (chunk->type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
+        return chunk->tokens_text.size();
+    } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
+        return mtmd_image_tokens_get_n_tokens(chunk->tokens_image.get());
+    } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
+        return chunk->tokens_audio->n_tokens;
+    } else {
+        GGML_ABORT("invalid chunk type");
+    }
+}
+
+llama_pos mtmd_input_chunk_get_n_pos(const mtmd_input_chunk * chunk) {
+    if (chunk->type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
+        return chunk->tokens_text.size();
+    } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
+        return mtmd_image_tokens_get_n_pos(chunk->tokens_image.get());
+    } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
+        return chunk->tokens_audio->n_tokens;
+    } else {
+        GGML_ABORT("invalid chunk type");
+    }
+}
+
+const char * mtmd_input_chunk_get_id(const mtmd_input_chunk * chunk) {
+    if (chunk->type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
+        return chunk->tokens_image->id.c_str();
+    } else if (chunk->type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
+        return chunk->tokens_audio->id.c_str();
+    }
+    return nullptr;
+}
+
+mtmd_input_chunk * mtmd_input_chunk_copy(const mtmd_input_chunk * chunk) {
+    mtmd_input_chunk * copy = new mtmd_input_chunk{
+        chunk->type,
+        chunk->tokens_text,
+        nullptr,
+        nullptr,
+    };
+    if (chunk->tokens_image) {
+        // copy the image tokens
+        copy->tokens_image = mtmd_image_tokens_ptr(new mtmd_image_tokens());
+        *copy->tokens_image = chunk->tokens_image->clone();
+    }
+    if (chunk->tokens_audio) {
+        // copy the audio tokens
+        copy->tokens_audio = mtmd_audio_tokens_ptr(new mtmd_audio_tokens());
+        *copy->tokens_audio = chunk->tokens_audio->clone();
+    }
+    return copy;
+}
+
+void mtmd_input_chunk_free(mtmd_input_chunk * chunk) {
+    if (chunk) {
+        delete chunk;
+    }
+}
+
+// mtmd_image_tokens
+
+size_t mtmd_image_tokens_get_n_tokens(const mtmd_image_tokens * image_tokens) {
+    return image_tokens->n_tokens();
+}
+
+size_t mtmd_image_tokens_get_nx(const mtmd_image_tokens * image_tokens) {
+    return image_tokens->nx;
+}
+
+size_t mtmd_image_tokens_get_ny(const mtmd_image_tokens * image_tokens) {
+    return image_tokens->ny;
+}
+
+const char * mtmd_image_tokens_get_id(const mtmd_image_tokens * image_tokens) {
+    return image_tokens->id.c_str();
+}
+
+llama_pos mtmd_image_tokens_get_n_pos(const mtmd_image_tokens * image_tokens) {
+    if (image_tokens->use_mrope_pos) {
+        return 1; // for M-RoPE, the whole image is 1 in temporal dimension
+    }
+    return image_tokens->n_tokens();
+}
+
+// test function
+
+mtmd_input_chunks * mtmd_test_create_input_chunks() {
+    mtmd_input_chunks * chunks = mtmd_input_chunks_init();
+    if (!chunks) {
+        return nullptr;
+    }
+
+    // create a text chunk
+    std::vector tokens_text = { 1, 2, 3, 4, 5 };
+    mtmd_input_chunk chunk_text{
+        MTMD_INPUT_CHUNK_TYPE_TEXT,
+        std::move(tokens_text),
+        nullptr, // image tokens
+        nullptr, // audio tokens
+    };
+    chunks->entries.emplace_back(std::move(chunk_text));
+
+    // create an image chunk
+    mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens);
+    image_tokens->nx = 4;
+    image_tokens->ny = 4;
+    image_tokens->batch_f32.entries.resize(16);
+    image_tokens->id = "image_1";
+    mtmd_input_chunk chunk_image{
+        MTMD_INPUT_CHUNK_TYPE_IMAGE,
+        {}, // text tokens
+        std::move(image_tokens),
+        nullptr, // audio tokens
+    };
+    chunks->entries.emplace_back(std::move(chunk_image));
+
+    return chunks;
+}
diff --git a/llama/llama.cpp/tools/mtmd/mtmd.go b/llama/llama.cpp/tools/mtmd/mtmd.go
index 064790368..63212135b 100644
--- a/llama/llama.cpp/tools/mtmd/mtmd.go
+++ b/llama/llama.cpp/tools/mtmd/mtmd.go
@@ -1,6 +1,6 @@
 package mtmd
 
-// #cgo CXXFLAGS: -std=c++11
-// #cgo CPPFLAGS: -I${SRCDIR}/../../include -I${SRCDIR}/../../common
+// #cgo CXXFLAGS: -std=c++17
+// #cgo CPPFLAGS: -I${SRCDIR}/../../include -I${SRCDIR}/../../common -I${SRCDIR}/../../vendor
 // #cgo CPPFLAGS: -I${SRCDIR}/../../../../ml/backend/ggml/ggml/include
 import "C"
diff --git a/llama/llama.cpp/tools/mtmd/mtmd.h b/llama/llama.cpp/tools/mtmd/mtmd.h
new file mode 100644
index 000000000..cf287224b
--- /dev/null
+++ b/llama/llama.cpp/tools/mtmd/mtmd.h
@@ -0,0 +1,301 @@
+#ifndef MTMD_H
+#define MTMD_H
+
+#include "ggml.h"
+#include "llama.h"
+
+#include 
+#include 
+#include 
+
+#ifdef __cplusplus
+#include 
+#include 
+#include 
+#include 
+#endif
+
+/**
+ * libmtmd: A library for multimodal support in llama.cpp.
+ *
+ * WARNING: This API is experimental and subject to many BREAKING CHANGES.
+ *          Issues related to API usage may receive lower priority support.
+ *
+ * For the usage, see an example in mtmd-cli.cpp
+ */
+
+#ifdef LLAMA_SHARED
+#    if defined(_WIN32) && !defined(__MINGW32__)
+#        ifdef LLAMA_BUILD
+#            define MTMD_API __declspec(dllexport)
+#        else
+#            define MTMD_API __declspec(dllimport)
+#        endif
+#    else
+#        define MTMD_API __attribute__ ((visibility ("default")))
+#    endif
+#else
+#    define MTMD_API
+#endif
+
+// deprecated marker, use mtmd_default_marker() instead
+#define MTMD_DEFAULT_IMAGE_MARKER "<__image__>"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum mtmd_input_chunk_type {
+    MTMD_INPUT_CHUNK_TYPE_TEXT,
+    MTMD_INPUT_CHUNK_TYPE_IMAGE,
+    MTMD_INPUT_CHUNK_TYPE_AUDIO,
+};
+
+// opaque types
+struct mtmd_context;
+struct mtmd_bitmap;
+struct mtmd_image_tokens;
+struct mtmd_input_chunk;
+struct mtmd_input_chunks;
+
+struct mtmd_input_text {
+    const char * text;
+    bool add_special;
+    bool parse_special;
+};
+
+//
+// C API
+//
+
+typedef struct mtmd_context      mtmd_context;
+typedef struct mtmd_bitmap       mtmd_bitmap;
+typedef struct mtmd_image_tokens mtmd_image_tokens;
+typedef struct mtmd_input_chunk  mtmd_input_chunk;
+typedef struct mtmd_input_chunks mtmd_input_chunks;
+typedef struct mtmd_input_text   mtmd_input_text;
+
+MTMD_API mtmd_input_text* mtmd_input_text_init(const char * text, bool add_special, bool parse_special);
+MTMD_API void mtmd_input_text_free(mtmd_input_text* input_text);
+
+struct mtmd_context_params {
+    bool use_gpu;
+    bool print_timings;
+    int n_threads;
+    enum ggml_log_level verbosity;
+    const char * image_marker; // deprecated, use media_marker instead
+    const char * media_marker;
+};
+
+MTMD_API const char * mtmd_default_marker(void);
+
+MTMD_API struct mtmd_context_params mtmd_context_params_default(void);
+
+// initialize the mtmd context
+// return nullptr on failure
+MTMD_API mtmd_context * mtmd_init_from_file(const char * mmproj_fname,
+                                            const struct llama_model * text_model,
+                                            const struct mtmd_context_params ctx_params);
+
+MTMD_API void mtmd_free(mtmd_context * ctx);
+
+// whether we need to set non-causal mask before llama_decode
+MTMD_API bool mtmd_decode_use_non_causal(mtmd_context * ctx);
+
+// whether the current model use M-RoPE for llama_decode
+MTMD_API bool mtmd_decode_use_mrope(mtmd_context * ctx);
+
+// whether the current model supports vision input
+MTMD_API bool mtmd_support_vision(mtmd_context * ctx);
+
+// whether the current model supports audio input
+MTMD_API bool mtmd_support_audio(mtmd_context * ctx);
+
+// get audio bitrate in Hz, for example 16000 for Whisper
+// return -1 if audio is not supported
+MTMD_API int mtmd_get_audio_bitrate(mtmd_context * ctx);
+
+// mtmd_bitmap
+//
+// if bitmap is image:
+//     length of data must be nx * ny * 3
+//     the data is in RGBRGBRGB... format
+// if bitmap is audio:
+//     length of data must be n_samples * sizeof(float)
+//     the data is in float format (PCM F32)
+MTMD_API mtmd_bitmap *         mtmd_bitmap_init           (uint32_t nx, uint32_t ny, const unsigned char * data);
+MTMD_API mtmd_bitmap *         mtmd_bitmap_init_from_audio(size_t n_samples,         const float         * data);
+MTMD_API uint32_t              mtmd_bitmap_get_nx     (const mtmd_bitmap * bitmap);
+MTMD_API uint32_t              mtmd_bitmap_get_ny     (const mtmd_bitmap * bitmap);
+MTMD_API const unsigned char * mtmd_bitmap_get_data   (const mtmd_bitmap * bitmap);
+MTMD_API size_t                mtmd_bitmap_get_n_bytes(const mtmd_bitmap * bitmap);
+MTMD_API bool                  mtmd_bitmap_is_audio   (const mtmd_bitmap * bitmap);
+MTMD_API void                  mtmd_bitmap_free       (mtmd_bitmap * bitmap);
+// bitmap ID is optional, but useful for KV cache tracking
+// these getters/setters are dedicated functions, so you can for example calculate the hash of the image based on mtmd_bitmap_get_data()
+MTMD_API const char * mtmd_bitmap_get_id(const mtmd_bitmap * bitmap);
+MTMD_API void         mtmd_bitmap_set_id(mtmd_bitmap * bitmap, const char * id);
+
+
+// mtmd_input_chunks
+//
+// this is simply a list of mtmd_input_chunk
+// the elements can only be populated via mtmd_tokenize()
+MTMD_API mtmd_input_chunks *      mtmd_input_chunks_init(void);
+MTMD_API size_t                   mtmd_input_chunks_size(const mtmd_input_chunks * chunks);
+MTMD_API const mtmd_input_chunk * mtmd_input_chunks_get (const mtmd_input_chunks * chunks, size_t idx);
+MTMD_API void                     mtmd_input_chunks_free(mtmd_input_chunks * chunks);
+
+// mtmd_input_chunk
+//
+// the instance will be constructed via mtmd_tokenize()
+// it will be freed along with mtmd_input_chunks
+MTMD_API enum mtmd_input_chunk_type mtmd_input_chunk_get_type        (const mtmd_input_chunk * chunk);
+MTMD_API const llama_token *        mtmd_input_chunk_get_tokens_text (const mtmd_input_chunk * chunk, size_t * n_tokens_output);
+MTMD_API const mtmd_image_tokens *  mtmd_input_chunk_get_tokens_image(const mtmd_input_chunk * chunk);
+MTMD_API size_t                     mtmd_input_chunk_get_n_tokens    (const mtmd_input_chunk * chunk);
+// returns nullptr for ID on text chunk
+MTMD_API const char *               mtmd_input_chunk_get_id          (const mtmd_input_chunk * chunk);
+// number of temporal positions (always 1 for M-RoPE, n_tokens otherwise)
+MTMD_API llama_pos                  mtmd_input_chunk_get_n_pos       (const mtmd_input_chunk * chunk);
+
+// in case you want to use custom logic to handle the chunk (i.e. KV cache management)
+// you can move the chunk ownership to your own code by copying it
+// remember to free the chunk when you are done with it
+MTMD_API mtmd_input_chunk * mtmd_input_chunk_copy(const mtmd_input_chunk * chunk);
+MTMD_API void               mtmd_input_chunk_free(mtmd_input_chunk * chunk);
+
+
+// mtmd_image_tokens
+//
+// the instance will be constructed via mtmd_tokenize()
+// it will be freed along with mtmd_input_chunk
+MTMD_API size_t       mtmd_image_tokens_get_n_tokens(const mtmd_image_tokens * image_tokens); // TODO: deprecate
+MTMD_API size_t       mtmd_image_tokens_get_nx      (const mtmd_image_tokens * image_tokens);
+MTMD_API size_t       mtmd_image_tokens_get_ny      (const mtmd_image_tokens * image_tokens);
+MTMD_API const char * mtmd_image_tokens_get_id      (const mtmd_image_tokens * image_tokens); // TODO: deprecate
+// number of temporal positions (always 1 for M-RoPE, n_tokens otherwise)
+MTMD_API llama_pos    mtmd_image_tokens_get_n_pos   (const mtmd_image_tokens * image_tokens); // TODO: deprecate
+
+// tokenize an input text prompt and a list of bitmaps (images/audio)
+// the prompt must have the input image marker (default: "<__media__>") in it
+// the default marker is defined by mtmd_default_marker()
+// the marker will be replaced with the image/audio chunk
+// for example:
+//   "here is an image: <__media__>\ndescribe it in detail."
+//   this will gives 3 chunks:
+//   1. "here is an image: "
+//   2. (image/audio tokens)
+//   3. "\ndescribe it in detail."
+// number of bitmaps must be equal to the number of markers in the prompt
+// this function is thread-safe (shared ctx)
+// return values:
+//   0 on success
+//   1 on number of bitmaps not matching the number of markers
+//   2 on image preprocessing error
+MTMD_API int32_t mtmd_tokenize(mtmd_context * ctx,
+                               mtmd_input_chunks * output,
+                               const mtmd_input_text * text,
+                               const mtmd_bitmap ** bitmaps,
+                               size_t n_bitmaps);
+
+// returns 0 on success
+// TODO: deprecate
+MTMD_API int32_t mtmd_encode(mtmd_context * ctx,
+                             const mtmd_image_tokens * image_tokens);
+
+// returns 0 on success
+MTMD_API int32_t mtmd_encode_chunk(mtmd_context * ctx,
+                                   const mtmd_input_chunk * chunk);
+
+// get output embeddings from the last encode pass
+// the reading size (in bytes) is equal to:
+// llama_model_n_embd(model) * mtmd_input_chunk_get_n_tokens(chunk) * sizeof(float)
+MTMD_API float * mtmd_get_output_embd(mtmd_context * ctx);
+
+/////////////////////////////////////////
+
+// test function, to be used in test-mtmd-c-api.c
+MTMD_API mtmd_input_chunks * mtmd_test_create_input_chunks(void);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+//
+// C++ wrappers
+//
+
+#ifdef __cplusplus
+
+namespace mtmd {
+
+struct mtmd_context_deleter {
+    void operator()(mtmd_context * val) { mtmd_free(val); }
+};
+using context_ptr = std::unique_ptr;
+
+struct mtmd_bitmap_deleter {
+    void operator()(mtmd_bitmap * val) { mtmd_bitmap_free(val); }
+};
+using bitmap_ptr = std::unique_ptr;
+
+struct mtmd_input_chunks_deleter {
+    void operator()(mtmd_input_chunks * val) { mtmd_input_chunks_free(val); }
+};
+using input_chunks_ptr = std::unique_ptr;
+
+struct mtmd_input_chunk_deleter {
+    void operator()(mtmd_input_chunk * val) { mtmd_input_chunk_free(val); }
+};
+using input_chunk_ptr = std::unique_ptr;
+
+struct bitmap {
+    bitmap_ptr ptr;
+    bitmap() : ptr(nullptr) {}
+    bitmap(mtmd_bitmap * bitmap) : ptr(bitmap) {}
+    bitmap(bitmap && other) noexcept : ptr(std::move(other.ptr)) {}
+    bitmap(uint32_t nx, uint32_t ny, const unsigned char * data) {
+        ptr.reset(mtmd_bitmap_init(nx, ny, data));
+    }
+    ~bitmap() = default;
+    uint32_t nx() { return mtmd_bitmap_get_nx(ptr.get()); }
+    uint32_t ny() { return mtmd_bitmap_get_ny(ptr.get()); }
+    const unsigned char * data() { return mtmd_bitmap_get_data(ptr.get()); }
+    size_t n_bytes() { return mtmd_bitmap_get_n_bytes(ptr.get()); }
+    std::string id() { return mtmd_bitmap_get_id(ptr.get()); }
+    void set_id(const char * id) { mtmd_bitmap_set_id(ptr.get(), id); }
+};
+
+struct bitmaps {
+    std::vector entries;
+    ~bitmaps() = default;
+    // return list of pointers to mtmd_bitmap
+    // example:
+    //   auto bitmaps_c_ptr = bitmaps.c_ptr();
+    //   int32_t res = mtmd_tokenize(... bitmaps_c_ptr.data(), bitmaps_c_ptr.size());
+    std::vector c_ptr() {
+        std::vector res(entries.size());
+        for (size_t i = 0; i < entries.size(); i++) {
+            res[i] = entries[i].ptr.get();
+        }
+        return res;
+    }
+};
+
+struct input_chunks {
+    input_chunks_ptr ptr;
+    input_chunks() = default;
+    input_chunks(mtmd_input_chunks * chunks) : ptr(chunks) {}
+    ~input_chunks() = default;
+    size_t size() { return mtmd_input_chunks_size(ptr.get()); }
+    const mtmd_input_chunk * operator[](size_t idx) {
+        return mtmd_input_chunks_get(ptr.get(), idx);
+    }
+};
+
+} // namespace mtmd
+
+#endif
+
+#endif
diff --git a/llama/llama.cpp/vendor/miniaudio/miniaudio.h b/llama/llama.cpp/vendor/miniaudio/miniaudio.h
new file mode 100644
index 000000000..c74bebeb3
--- /dev/null
+++ b/llama/llama.cpp/vendor/miniaudio/miniaudio.h
@@ -0,0 +1,93468 @@
+/*
+Audio playback and capture library. Choice of public domain or MIT-0. See license statements at the end of this file.
+miniaudio - v0.11.22 - 2025-02-24
+
+David Reid - mackron@gmail.com
+
+Website:       https://miniaud.io
+Documentation: https://miniaud.io/docs
+GitHub:        https://github.com/mackron/miniaudio
+*/
+
+/*
+1. Introduction
+===============
+To use miniaudio, include "miniaudio.h":
+
+    ```c
+    #include "miniaudio.h"
+    ```
+
+The implementation is contained in "miniaudio.c". Just compile this like any other source file. You
+can include miniaudio.c if you want to compile your project as a single translation unit:
+
+    ```c
+    #include "miniaudio.c"
+    ```
+
+miniaudio includes both low level and high level APIs. The low level API is good for those who want
+to do all of their mixing themselves and only require a light weight interface to the underlying
+audio device. The high level API is good for those who have complex mixing and effect requirements.
+
+In miniaudio, objects are transparent structures. Unlike many other libraries, there are no handles
+to opaque objects which means you need to allocate memory for objects yourself. In the examples
+presented in this documentation you will often see objects declared on the stack. You need to be
+careful when translating these examples to your own code so that you don't accidentally declare
+your objects on the stack and then cause them to become invalid once the function returns. In
+addition, you must ensure the memory address of your objects remain the same throughout their
+lifetime. You therefore cannot be making copies of your objects.
+
+A config/init pattern is used throughout the entire library. The idea is that you set up a config
+object and pass that into the initialization routine. The advantage to this system is that the
+config object can be initialized with logical defaults and new properties added to it without
+breaking the API. The config object can be allocated on the stack and does not need to be
+maintained after initialization of the corresponding object.
+
+
+1.1. Low Level API
+------------------
+The low level API gives you access to the raw audio data of an audio device. It supports playback,
+capture, full-duplex and loopback (WASAPI only). You can enumerate over devices to determine which
+physical device(s) you want to connect to.
+
+The low level API uses the concept of a "device" as the abstraction for physical devices. The idea
+is that you choose a physical device to emit or capture audio from, and then move data to/from the
+device when miniaudio tells you to. Data is delivered to and from devices asynchronously via a
+callback which you specify when initializing the device.
+
+When initializing the device you first need to configure it. The device configuration allows you to
+specify things like the format of the data delivered via the callback, the size of the internal
+buffer and the ID of the device you want to emit or capture audio from.
+
+Once you have the device configuration set up you can initialize the device. When initializing a
+device you need to allocate memory for the device object beforehand. This gives the application
+complete control over how the memory is allocated. In the example below we initialize a playback
+device on the stack, but you could allocate it on the heap if that suits your situation better.
+
+    ```c
+    void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
+    {
+        // In playback mode copy data to pOutput. In capture mode read data from pInput. In full-duplex mode, both
+        // pOutput and pInput will be valid and you can move data from pInput into pOutput. Never process more than
+        // frameCount frames.
+    }
+
+    int main()
+    {
+        ma_device_config config = ma_device_config_init(ma_device_type_playback);
+        config.playback.format   = ma_format_f32;   // Set to ma_format_unknown to use the device's native format.
+        config.playback.channels = 2;               // Set to 0 to use the device's native channel count.
+        config.sampleRate        = 48000;           // Set to 0 to use the device's native sample rate.
+        config.dataCallback      = data_callback;   // This function will be called when miniaudio needs more data.
+        config.pUserData         = pMyCustomData;   // Can be accessed from the device object (device.pUserData).
+
+        ma_device device;
+        if (ma_device_init(NULL, &config, &device) != MA_SUCCESS) {
+            return -1;  // Failed to initialize the device.
+        }
+
+        ma_device_start(&device);     // The device is sleeping by default so you'll need to start it manually.
+
+        // Do something here. Probably your program's main loop.
+
+        ma_device_uninit(&device);
+        return 0;
+    }
+    ```
+
+In the example above, `data_callback()` is where audio data is written and read from the device.
+The idea is in playback mode you cause sound to be emitted from the speakers by writing audio data
+to the output buffer (`pOutput` in the example). In capture mode you read data from the input
+buffer (`pInput`) to extract sound captured by the microphone. The `frameCount` parameter tells you
+how many frames can be written to the output buffer and read from the input buffer. A "frame" is
+one sample for each channel. For example, in a stereo stream (2 channels), one frame is 2
+samples: one for the left, one for the right. The channel count is defined by the device config.
+The size in bytes of an individual sample is defined by the sample format which is also specified
+in the device config. Multi-channel audio data is always interleaved, which means the samples for
+each frame are stored next to each other in memory. For example, in a stereo stream the first pair
+of samples will be the left and right samples for the first frame, the second pair of samples will
+be the left and right samples for the second frame, etc.
+
+The configuration of the device is defined by the `ma_device_config` structure. The config object
+is always initialized with `ma_device_config_init()`. It's important to always initialize the
+config with this function as it initializes it with logical defaults and ensures your program
+doesn't break when new members are added to the `ma_device_config` structure. The example above
+uses a fairly simple and standard device configuration. The call to `ma_device_config_init()` takes
+a single parameter, which is whether or not the device is a playback, capture, duplex or loopback
+device (loopback devices are not supported on all backends). The `config.playback.format` member
+sets the sample format which can be one of the following (all formats are native-endian):
+
+    +---------------+----------------------------------------+---------------------------+
+    | Symbol        | Description                            | Range                     |
+    +---------------+----------------------------------------+---------------------------+
+    | ma_format_f32 | 32-bit floating point                  | [-1, 1]                   |
+    | ma_format_s16 | 16-bit signed integer                  | [-32768, 32767]           |
+    | ma_format_s24 | 24-bit signed integer (tightly packed) | [-8388608, 8388607]       |
+    | ma_format_s32 | 32-bit signed integer                  | [-2147483648, 2147483647] |
+    | ma_format_u8  | 8-bit unsigned integer                 | [0, 255]                  |
+    +---------------+----------------------------------------+---------------------------+
+
+The `config.playback.channels` member sets the number of channels to use with the device. The
+channel count cannot exceed MA_MAX_CHANNELS. The `config.sampleRate` member sets the sample rate
+(which must be the same for both playback and capture in full-duplex configurations). This is
+usually set to 44100 or 48000, but can be set to anything. It's recommended to keep this between
+8000 and 384000, however.
+
+Note that leaving the format, channel count and/or sample rate at their default values will result
+in the internal device's native configuration being used which is useful if you want to avoid the
+overhead of miniaudio's automatic data conversion.
+
+In addition to the sample format, channel count and sample rate, the data callback and user data
+pointer are also set via the config. The user data pointer is not passed into the callback as a
+parameter, but is instead set to the `pUserData` member of `ma_device` which you can access
+directly since all miniaudio structures are transparent.
+
+Initializing the device is done with `ma_device_init()`. This will return a result code telling you
+what went wrong, if anything. On success it will return `MA_SUCCESS`. After initialization is
+complete the device will be in a stopped state. To start it, use `ma_device_start()`.
+Uninitializing the device will stop it, which is what the example above does, but you can also stop
+the device with `ma_device_stop()`. To resume the device simply call `ma_device_start()` again.
+Note that it's important to never stop or start the device from inside the callback. This will
+result in a deadlock. Instead you set a variable or signal an event indicating that the device
+needs to stop and handle it in a different thread. The following APIs must never be called inside
+the callback:
+
+    ```c
+    ma_device_init()
+    ma_device_init_ex()
+    ma_device_uninit()
+    ma_device_start()
+    ma_device_stop()
+    ```
+
+You must never try uninitializing and reinitializing a device inside the callback. You must also
+never try to stop and start it from inside the callback. There are a few other things you shouldn't
+do in the callback depending on your requirements, however this isn't so much a thread-safety
+thing, but rather a real-time processing thing which is beyond the scope of this introduction.
+
+The example above demonstrates the initialization of a playback device, but it works exactly the
+same for capture. All you need to do is change the device type from `ma_device_type_playback` to
+`ma_device_type_capture` when setting up the config, like so:
+
+    ```c
+    ma_device_config config = ma_device_config_init(ma_device_type_capture);
+    config.capture.format   = MY_FORMAT;
+    config.capture.channels = MY_CHANNEL_COUNT;
+    ```
+
+In the data callback you just read from the input buffer (`pInput` in the example above) and leave
+the output buffer alone (it will be set to NULL when the device type is set to
+`ma_device_type_capture`).
+
+These are the available device types and how you should handle the buffers in the callback:
+
+    +-------------------------+--------------------------------------------------------+
+    | Device Type             | Callback Behavior                                      |
+    +-------------------------+--------------------------------------------------------+
+    | ma_device_type_playback | Write to output buffer, leave input buffer untouched.  |
+    | ma_device_type_capture  | Read from input buffer, leave output buffer untouched. |
+    | ma_device_type_duplex   | Read from input buffer, write to output buffer.        |
+    | ma_device_type_loopback | Read from input buffer, leave output buffer untouched. |
+    +-------------------------+--------------------------------------------------------+
+
+You will notice in the example above that the sample format and channel count is specified
+separately for playback and capture. This is to support different data formats between the playback
+and capture devices in a full-duplex system. An example may be that you want to capture audio data
+as a monaural stream (one channel), but output sound to a stereo speaker system. Note that if you
+use different formats between playback and capture in a full-duplex configuration you will need to
+convert the data yourself. There are functions available to help you do this which will be
+explained later.
+
+The example above did not specify a physical device to connect to which means it will use the
+operating system's default device. If you have multiple physical devices connected and you want to
+use a specific one you will need to specify the device ID in the configuration, like so:
+
+    ```c
+    config.playback.pDeviceID = pMyPlaybackDeviceID;    // Only if requesting a playback or duplex device.
+    config.capture.pDeviceID = pMyCaptureDeviceID;      // Only if requesting a capture, duplex or loopback device.
+    ```
+
+To retrieve the device ID you will need to perform device enumeration, however this requires the
+use of a new concept called the "context". Conceptually speaking the context sits above the device.
+There is one context to many devices. The purpose of the context is to represent the backend at a
+more global level and to perform operations outside the scope of an individual device. Mainly it is
+used for performing run-time linking against backend libraries, initializing backends and
+enumerating devices. The example below shows how to enumerate devices.
+
+    ```c
+    ma_context context;
+    if (ma_context_init(NULL, 0, NULL, &context) != MA_SUCCESS) {
+        // Error.
+    }
+
+    ma_device_info* pPlaybackInfos;
+    ma_uint32 playbackCount;
+    ma_device_info* pCaptureInfos;
+    ma_uint32 captureCount;
+    if (ma_context_get_devices(&context, &pPlaybackInfos, &playbackCount, &pCaptureInfos, &captureCount) != MA_SUCCESS) {
+        // Error.
+    }
+
+    // Loop over each device info and do something with it. Here we just print the name with their index. You may want
+    // to give the user the opportunity to choose which device they'd prefer.
+    for (ma_uint32 iDevice = 0; iDevice < playbackCount; iDevice += 1) {
+        printf("%d - %s\n", iDevice, pPlaybackInfos[iDevice].name);
+    }
+
+    ma_device_config config = ma_device_config_init(ma_device_type_playback);
+    config.playback.pDeviceID = &pPlaybackInfos[chosenPlaybackDeviceIndex].id;
+    config.playback.format    = MY_FORMAT;
+    config.playback.channels  = MY_CHANNEL_COUNT;
+    config.sampleRate         = MY_SAMPLE_RATE;
+    config.dataCallback       = data_callback;
+    config.pUserData          = pMyCustomData;
+
+    ma_device device;
+    if (ma_device_init(&context, &config, &device) != MA_SUCCESS) {
+        // Error
+    }
+
+    ...
+
+    ma_device_uninit(&device);
+    ma_context_uninit(&context);
+    ```
+
+The first thing we do in this example is initialize a `ma_context` object with `ma_context_init()`.
+The first parameter is a pointer to a list of `ma_backend` values which are used to override the
+default backend priorities. When this is NULL, as in this example, miniaudio's default priorities
+are used. The second parameter is the number of backends listed in the array pointed to by the
+first parameter. The third parameter is a pointer to a `ma_context_config` object which can be
+NULL, in which case defaults are used. The context configuration is used for setting the logging
+callback, custom memory allocation callbacks, user-defined data and some backend-specific
+configurations.
+
+Once the context has been initialized you can enumerate devices. In the example above we use the
+simpler `ma_context_get_devices()`, however you can also use a callback for handling devices by
+using `ma_context_enumerate_devices()`. When using `ma_context_get_devices()` you provide a pointer
+to a pointer that will, upon output, be set to a pointer to a buffer containing a list of
+`ma_device_info` structures. You also provide a pointer to an unsigned integer that will receive
+the number of items in the returned buffer. Do not free the returned buffers as their memory is
+managed internally by miniaudio.
+
+The `ma_device_info` structure contains an `id` member which is the ID you pass to the device
+config. It also contains the name of the device which is useful for presenting a list of devices
+to the user via the UI.
+
+When creating your own context you will want to pass it to `ma_device_init()` when initializing the
+device. Passing in NULL, like we do in the first example, will result in miniaudio creating the
+context for you, which you don't want to do since you've already created a context. Note that
+internally the context is only tracked by it's pointer which means you must not change the location
+of the `ma_context` object. If this is an issue, consider using `malloc()` to allocate memory for
+the context.
+
+
+1.2. High Level API
+-------------------
+The high level API consists of three main parts:
+
+  * Resource management for loading and streaming sounds.
+  * A node graph for advanced mixing and effect processing.
+  * A high level "engine" that wraps around the resource manager and node graph.
+
+The resource manager (`ma_resource_manager`) is used for loading sounds. It supports loading sounds
+fully into memory and also streaming. It will also deal with reference counting for you which
+avoids the same sound being loaded multiple times.
+
+The node graph is used for mixing and effect processing. The idea is that you connect a number of
+nodes into the graph by connecting each node's outputs to another node's inputs. Each node can
+implement its own effect. By chaining nodes together, advanced mixing and effect processing can
+be achieved.
+
+The engine encapsulates both the resource manager and the node graph to create a simple, easy to
+use high level API. The resource manager and node graph APIs are covered in more later sections of
+this manual.
+
+The code below shows how you can initialize an engine using it's default configuration.
+
+    ```c
+    ma_result result;
+    ma_engine engine;
+
+    result = ma_engine_init(NULL, &engine);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to initialize the engine.
+    }
+    ```
+
+This creates an engine instance which will initialize a device internally which you can access with
+`ma_engine_get_device()`. It will also initialize a resource manager for you which can be accessed
+with `ma_engine_get_resource_manager()`. The engine itself is a node graph (`ma_node_graph`) which
+means you can pass a pointer to the engine object into any of the `ma_node_graph` APIs (with a
+cast). Alternatively, you can use `ma_engine_get_node_graph()` instead of a cast.
+
+Note that all objects in miniaudio, including the `ma_engine` object in the example above, are
+transparent structures. There are no handles to opaque structures in miniaudio which means you need
+to be mindful of how you declare them. In the example above we are declaring it on the stack, but
+this will result in the struct being invalidated once the function encapsulating it returns. If
+allocating the engine on the heap is more appropriate, you can easily do so with a standard call
+to `malloc()` or whatever heap allocation routine you like:
+
+    ```c
+    ma_engine* pEngine = malloc(sizeof(*pEngine));
+    ```
+
+The `ma_engine` API uses the same config/init pattern used all throughout miniaudio. To configure
+an engine, you can fill out a `ma_engine_config` object and pass it into the first parameter of
+`ma_engine_init()`:
+
+    ```c
+    ma_result result;
+    ma_engine engine;
+    ma_engine_config engineConfig;
+
+    engineConfig = ma_engine_config_init();
+    engineConfig.pResourceManager = &myCustomResourceManager;   // <-- Initialized as some earlier stage.
+
+    result = ma_engine_init(&engineConfig, &engine);
+    if (result != MA_SUCCESS) {
+        return result;
+    }
+    ```
+
+This creates an engine instance using a custom config. In this particular example it's showing how
+you can specify a custom resource manager rather than having the engine initialize one internally.
+This is particularly useful if you want to have multiple engine's share the same resource manager.
+
+The engine must be uninitialized with `ma_engine_uninit()` when it's no longer needed.
+
+By default the engine will be started, but nothing will be playing because no sounds have been
+initialized. The easiest but least flexible way of playing a sound is like so:
+
+    ```c
+    ma_engine_play_sound(&engine, "my_sound.wav", NULL);
+    ```
+
+This plays what miniaudio calls an "inline" sound. It plays the sound once, and then puts the
+internal sound up for recycling. The last parameter is used to specify which sound group the sound
+should be associated with which will be explained later. This particular way of playing a sound is
+simple, but lacks flexibility and features. A more flexible way of playing a sound is to first
+initialize a sound:
+
+    ```c
+    ma_result result;
+    ma_sound sound;
+
+    result = ma_sound_init_from_file(&engine, "my_sound.wav", 0, NULL, NULL, &sound);
+    if (result != MA_SUCCESS) {
+        return result;
+    }
+
+    ma_sound_start(&sound);
+    ```
+
+This returns a `ma_sound` object which represents a single instance of the specified sound file. If
+you want to play the same file multiple times simultaneously, you need to create one sound for each
+instance.
+
+Sounds should be uninitialized with `ma_sound_uninit()`.
+
+Sounds are not started by default. Start a sound with `ma_sound_start()` and stop it with
+`ma_sound_stop()`. When a sound is stopped, it is not rewound to the start. Use
+`ma_sound_seek_to_pcm_frame(&sound, 0)` to seek back to the start of a sound. By default, starting
+and stopping sounds happens immediately, but sometimes it might be convenient to schedule the sound
+the be started and/or stopped at a specific time. This can be done with the following functions:
+
+    ```c
+    ma_sound_set_start_time_in_pcm_frames()
+    ma_sound_set_start_time_in_milliseconds()
+    ma_sound_set_stop_time_in_pcm_frames()
+    ma_sound_set_stop_time_in_milliseconds()
+    ```
+
+The start/stop time needs to be specified based on the absolute timer which is controlled by the
+engine. The current global time in PCM frames can be retrieved with
+`ma_engine_get_time_in_pcm_frames()`. The engine's global time can be changed with
+`ma_engine_set_time_in_pcm_frames()` for synchronization purposes if required. Note that scheduling
+a start time still requires an explicit call to `ma_sound_start()` before anything will play:
+
+    ```c
+    ma_sound_set_start_time_in_pcm_frames(&sound, ma_engine_get_time_in_pcm_frames(&engine) + (ma_engine_get_sample_rate(&engine) * 2);
+    ma_sound_start(&sound);
+    ```
+
+The third parameter of `ma_sound_init_from_file()` is a set of flags that control how the sound be
+loaded and a few options on which features should be enabled for that sound. By default, the sound
+is synchronously loaded fully into memory straight from the file system without any kind of
+decoding. If you want to decode the sound before storing it in memory, you need to specify the
+`MA_SOUND_FLAG_DECODE` flag. This is useful if you want to incur the cost of decoding at an earlier
+stage, such as a loading stage. Without this option, decoding will happen dynamically at mixing
+time which might be too expensive on the audio thread.
+
+If you want to load the sound asynchronously, you can specify the `MA_SOUND_FLAG_ASYNC` flag. This
+will result in `ma_sound_init_from_file()` returning quickly, but the sound will not start playing
+until the sound has had some audio decoded.
+
+The fourth parameter is a pointer to sound group. A sound group is used as a mechanism to organise
+sounds into groups which have their own effect processing and volume control. An example is a game
+which might have separate groups for sfx, voice and music. Each of these groups have their own
+independent volume control. Use `ma_sound_group_init()` or `ma_sound_group_init_ex()` to initialize
+a sound group.
+
+Sounds and sound groups are nodes in the engine's node graph and can be plugged into any `ma_node`
+API. This makes it possible to connect sounds and sound groups to effect nodes to produce complex
+effect chains.
+
+A sound can have its volume changed with `ma_sound_set_volume()`. If you prefer decibel volume
+control you can use `ma_volume_db_to_linear()` to convert from decibel representation to linear.
+
+Panning and pitching is supported with `ma_sound_set_pan()` and `ma_sound_set_pitch()`. If you know
+a sound will never have its pitch changed with `ma_sound_set_pitch()` or via the doppler effect,
+you can specify the `MA_SOUND_FLAG_NO_PITCH` flag when initializing the sound for an optimization.
+
+By default, sounds and sound groups have spatialization enabled. If you don't ever want to
+spatialize your sounds, initialize the sound with the `MA_SOUND_FLAG_NO_SPATIALIZATION` flag. The
+spatialization model is fairly simple and is roughly on feature parity with OpenAL. HRTF and
+environmental occlusion are not currently supported, but planned for the future. The supported
+features include:
+
+  * Sound and listener positioning and orientation with cones
+  * Attenuation models: none, inverse, linear and exponential
+  * Doppler effect
+
+Sounds can be faded in and out with `ma_sound_set_fade_in_pcm_frames()`.
+
+To check if a sound is currently playing, you can use `ma_sound_is_playing()`. To check if a sound
+is at the end, use `ma_sound_at_end()`. Looping of a sound can be controlled with
+`ma_sound_set_looping()`. Use `ma_sound_is_looping()` to check whether or not the sound is looping.
+
+
+
+2. Building
+===========
+miniaudio should work cleanly out of the box without the need to download or install any
+dependencies. See below for platform-specific details.
+
+Note that GCC and Clang require `-msse2`, `-mavx2`, etc. for SIMD optimizations.
+
+If you get errors about undefined references to `__sync_val_compare_and_swap_8`, `__atomic_load_8`,
+etc. you need to link with `-latomic`.
+
+
+2.1. Windows
+------------
+The Windows build should compile cleanly on all popular compilers without the need to configure any
+include paths nor link to any libraries.
+
+The UWP build may require linking to mmdevapi.lib if you get errors about an unresolved external
+symbol for `ActivateAudioInterfaceAsync()`.
+
+
+2.2. macOS and iOS
+------------------
+The macOS build should compile cleanly without the need to download any dependencies nor link to
+any libraries or frameworks. The iOS build needs to be compiled as Objective-C and will need to
+link the relevant frameworks but should compile cleanly out of the box with Xcode. Compiling
+through the command line requires linking to `-lpthread` and `-lm`.
+
+Due to the way miniaudio links to frameworks at runtime, your application may not pass Apple's
+notarization process. To fix this there are two options. The first is to compile with
+`-DMA_NO_RUNTIME_LINKING` which in turn will require linking with
+`-framework CoreFoundation -framework CoreAudio -framework AudioToolbox`. If you get errors about
+AudioToolbox, try with `-framework AudioUnit` instead. You may get this when using older versions
+of iOS. Alternatively, if you would rather keep using runtime linking you can add the following to
+your entitlements.xcent file:
+
+    ```
+    com.apple.security.cs.allow-dyld-environment-variables
+    
+    com.apple.security.cs.allow-unsigned-executable-memory
+    
+    ```
+
+See this discussion for more info: https://github.com/mackron/miniaudio/issues/203.
+
+
+2.3. Linux
+----------
+The Linux build only requires linking to `-ldl`, `-lpthread` and `-lm`. You do not need any
+development packages. You may need to link with `-latomic` if you're compiling for 32-bit ARM.
+
+
+2.4. BSD
+--------
+The BSD build only requires linking to `-lpthread` and `-lm`. NetBSD uses audio(4), OpenBSD uses
+sndio and FreeBSD uses OSS. You may need to link with `-latomic` if you're compiling for 32-bit
+ARM.
+
+
+2.5. Android
+------------
+AAudio is the highest priority backend on Android. This should work out of the box without needing
+any kind of compiler configuration. Support for AAudio starts with Android 8 which means older
+versions will fall back to OpenSL|ES which requires API level 16+.
+
+There have been reports that the OpenSL|ES backend fails to initialize on some Android based
+devices due to `dlopen()` failing to open "libOpenSLES.so". If this happens on your platform
+you'll need to disable run-time linking with `MA_NO_RUNTIME_LINKING` and link with -lOpenSLES.
+
+
+2.6. Emscripten
+---------------
+The Emscripten build emits Web Audio JavaScript directly and should compile cleanly out of the box.
+You cannot use `-std=c*` compiler flags, nor `-ansi`.
+
+You can enable the use of AudioWorkets by defining `MA_ENABLE_AUDIO_WORKLETS` and then compiling
+with the following options:
+
+    -sAUDIO_WORKLET=1 -sWASM_WORKERS=1 -sASYNCIFY
+
+An example for compiling with AudioWorklet support might look like this:
+
+    emcc program.c -o bin/program.html -DMA_ENABLE_AUDIO_WORKLETS -sAUDIO_WORKLET=1 -sWASM_WORKERS=1 -sASYNCIFY
+
+To run locally, you'll need to use emrun:
+
+    emrun bin/program.html
+
+
+
+2.7. Build Options
+------------------
+`#define` these options before including miniaudio.c, or pass them as compiler flags:
+
+    +----------------------------------+--------------------------------------------------------------------+
+    | Option                           | Description                                                        |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_WASAPI                     | Disables the WASAPI backend.                                       |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_DSOUND                     | Disables the DirectSound backend.                                  |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_WINMM                      | Disables the WinMM backend.                                        |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_ALSA                       | Disables the ALSA backend.                                         |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_PULSEAUDIO                 | Disables the PulseAudio backend.                                   |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_JACK                       | Disables the JACK backend.                                         |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_COREAUDIO                  | Disables the Core Audio backend.                                   |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_SNDIO                      | Disables the sndio backend.                                        |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_AUDIO4                     | Disables the audio(4) backend.                                     |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_OSS                        | Disables the OSS backend.                                          |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_AAUDIO                     | Disables the AAudio backend.                                       |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_OPENSL                     | Disables the OpenSL|ES backend.                                    |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_WEBAUDIO                   | Disables the Web Audio backend.                                    |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_CUSTOM                     | Disables support for custom backends.                              |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_NULL                       | Disables the null backend.                                         |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_ONLY_SPECIFIC_BACKENDS | Disables all backends by default and requires `MA_ENABLE_*` to     |
+    |                                  | enable specific backends.                                          |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_WASAPI                 | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable the WASAPI backend.                                         |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_DSOUND                 | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable the DirectSound backend.                                    |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_WINMM                  | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable the WinMM backend.                                          |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_ALSA                   | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable the ALSA backend.                                           |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_PULSEAUDIO             | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable the PulseAudio backend.                                     |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_JACK                   | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable the JACK backend.                                           |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_COREAUDIO              | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable the Core Audio backend.                                     |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_SNDIO                  | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable the sndio backend.                                          |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_AUDIO4                 | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable the audio(4) backend.                                       |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_OSS                    | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable the OSS backend.                                            |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_AAUDIO                 | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable the AAudio backend.                                         |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_OPENSL                 | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable the OpenSL|ES backend.                                      |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_WEBAUDIO               | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable the Web Audio backend.                                      |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_CUSTOM                 | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable custom backends.                                            |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ENABLE_NULL                   | Used in conjunction with MA_ENABLE_ONLY_SPECIFIC_BACKENDS to       |
+    |                                  | enable the null backend.                                           |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_DECODING                   | Disables decoding APIs.                                            |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_ENCODING                   | Disables encoding APIs.                                            |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_WAV                        | Disables the built-in WAV decoder and encoder.                     |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_FLAC                       | Disables the built-in FLAC decoder.                                |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_MP3                        | Disables the built-in MP3 decoder.                                 |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_DEVICE_IO                  | Disables playback and recording. This will disable `ma_context`    |
+    |                                  | and `ma_device` APIs. This is useful if you only want to use       |
+    |                                  | miniaudio's data conversion and/or decoding APIs.                  |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_RESOURCE_MANAGER           | Disables the resource manager. When using the engine this will     |
+    |                                  | also disable the following functions:                              |
+    |                                  |                                                                    |
+    |                                  | ```                                                                |
+    |                                  | ma_sound_init_from_file()                                          |
+    |                                  | ma_sound_init_from_file_w()                                        |
+    |                                  | ma_sound_init_copy()                                               |
+    |                                  | ma_engine_play_sound_ex()                                          |
+    |                                  | ma_engine_play_sound()                                             |
+    |                                  | ```                                                                |
+    |                                  |                                                                    |
+    |                                  | The only way to initialize a `ma_sound` object is to initialize it |
+    |                                  | from a data source.                                                |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_NODE_GRAPH                 | Disables the node graph API. This will also disable the engine API |
+    |                                  | because it depends on the node graph.                              |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_ENGINE                     | Disables the engine API.                                           |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_THREADING                  | Disables the `ma_thread`, `ma_mutex`, `ma_semaphore` and           |
+    |                                  | `ma_event` APIs. This option is useful if you only need to use     |
+    |                                  | miniaudio for data conversion, decoding and/or encoding. Some      |
+    |                                  | families of APIs require threading which means the following       |
+    |                                  | options must also be set:                                          |
+    |                                  |                                                                    |
+    |                                  |     ```                                                            |
+    |                                  |     MA_NO_DEVICE_IO                                                |
+    |                                  |     ```                                                            |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_GENERATION                 | Disables generation APIs such a `ma_waveform` and `ma_noise`.      |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_SSE2                       | Disables SSE2 optimizations.                                       |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_AVX2                       | Disables AVX2 optimizations.                                       |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_NEON                       | Disables NEON optimizations.                                       |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_NO_RUNTIME_LINKING            | Disables runtime linking. This is useful for passing Apple's       |
+    |                                  | notarization process. When enabling this, you may need to avoid    |
+    |                                  | using `-std=c89` or `-std=c99` on Linux builds or else you may end |
+    |                                  | up with compilation errors due to conflicts with `timespec` and    |
+    |                                  | `timeval` data types.                                              |
+    |                                  |                                                                    |
+    |                                  | You may need to enable this if your target platform does not allow |
+    |                                  | runtime linking via `dlopen()`.                                    |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_USE_STDINT                    | (Pass this in as a compiler flag. Do not `#define` this before     |
+    |                                  | miniaudio.c) Forces the use of stdint.h for sized types.           |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_DEBUG_OUTPUT                  | Enable `printf()` output of debug logs (`MA_LOG_LEVEL_DEBUG`).     |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_COINIT_VALUE                  | Windows only. The value to pass to internal calls to               |
+    |                                  | `CoInitializeEx()`. Defaults to `COINIT_MULTITHREADED`.            |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_FORCE_UWP                     | Windows only. Affects only the WASAPI backend. Will force the      |
+    |                                  | WASAPI backend to use the UWP code path instead of the regular     |
+    |                                  | desktop path. This is normally auto-detected and should rarely be  |
+    |                                  | needed to be used explicitly, but can be useful for debugging.     |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ON_THREAD_ENTRY               | Defines some code that will be executed as soon as an internal     |
+    |                                  | miniaudio-managed thread is created. This will be the first thing  |
+    |                                  | to be executed by the thread entry point.                          |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_ON_THREAD_EXIT                | Defines some code that will be executed from the entry point of an |
+    |                                  | internal miniaudio-managed thread upon exit. This will be the last |
+    |                                  | thing to be executed before the thread's entry point exits.        |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_THREAD_DEFAULT_STACK_SIZE     | If set, specifies the default stack size used by miniaudio-managed |
+    |                                  | threads.                                                           |
+    +----------------------------------+--------------------------------------------------------------------+
+    | MA_API                           | Controls how public APIs should be decorated. Default is `extern`. |
+    +----------------------------------+--------------------------------------------------------------------+
+
+
+3. Definitions
+==============
+This section defines common terms used throughout miniaudio. Unfortunately there is often ambiguity
+in the use of terms throughout the audio space, so this section is intended to clarify how miniaudio
+uses each term.
+
+3.1. Sample
+-----------
+A sample is a single unit of audio data. If the sample format is f32, then one sample is one 32-bit
+floating point number.
+
+3.2. Frame / PCM Frame
+----------------------
+A frame is a group of samples equal to the number of channels. For a stereo stream a frame is 2
+samples, a mono frame is 1 sample, a 5.1 surround sound frame is 6 samples, etc. The terms "frame"
+and "PCM frame" are the same thing in miniaudio. Note that this is different to a compressed frame.
+If ever miniaudio needs to refer to a compressed frame, such as a FLAC frame, it will always
+clarify what it's referring to with something like "FLAC frame".
+
+3.3. Channel
+------------
+A stream of monaural audio that is emitted from an individual speaker in a speaker system, or
+received from an individual microphone in a microphone system. A stereo stream has two channels (a
+left channel, and a right channel), a 5.1 surround sound system has 6 channels, etc. Some audio
+systems refer to a channel as a complex audio stream that's mixed with other channels to produce
+the final mix - this is completely different to miniaudio's use of the term "channel" and should
+not be confused.
+
+3.4. Sample Rate
+----------------
+The sample rate in miniaudio is always expressed in Hz, such as 44100, 48000, etc. It's the number
+of PCM frames that are processed per second.
+
+3.5. Formats
+------------
+Throughout miniaudio you will see references to different sample formats:
+
+    +---------------+----------------------------------------+---------------------------+
+    | Symbol        | Description                            | Range                     |
+    +---------------+----------------------------------------+---------------------------+
+    | ma_format_f32 | 32-bit floating point                  | [-1, 1]                   |
+    | ma_format_s16 | 16-bit signed integer                  | [-32768, 32767]           |
+    | ma_format_s24 | 24-bit signed integer (tightly packed) | [-8388608, 8388607]       |
+    | ma_format_s32 | 32-bit signed integer                  | [-2147483648, 2147483647] |
+    | ma_format_u8  | 8-bit unsigned integer                 | [0, 255]                  |
+    +---------------+----------------------------------------+---------------------------+
+
+All formats are native-endian.
+
+
+
+4. Data Sources
+===============
+The data source abstraction in miniaudio is used for retrieving audio data from some source. A few
+examples include `ma_decoder`, `ma_noise` and `ma_waveform`. You will need to be familiar with data
+sources in order to make sense of some of the higher level concepts in miniaudio.
+
+The `ma_data_source` API is a generic interface for reading from a data source. Any object that
+implements the data source interface can be plugged into any `ma_data_source` function.
+
+To read data from a data source:
+
+    ```c
+    ma_result result;
+    ma_uint64 framesRead;
+
+    result = ma_data_source_read_pcm_frames(pDataSource, pFramesOut, frameCount, &framesRead);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to read data from the data source.
+    }
+    ```
+
+If you don't need the number of frames that were successfully read you can pass in `NULL` to the
+`pFramesRead` parameter. If this returns a value less than the number of frames requested it means
+the end of the file has been reached. `MA_AT_END` will be returned only when the number of frames
+read is 0.
+
+When calling any data source function, with the exception of `ma_data_source_init()` and
+`ma_data_source_uninit()`, you can pass in any object that implements a data source. For example,
+you could plug in a decoder like so:
+
+    ```c
+    ma_result result;
+    ma_uint64 framesRead;
+    ma_decoder decoder;   // <-- This would be initialized with `ma_decoder_init_*()`.
+
+    result = ma_data_source_read_pcm_frames(&decoder, pFramesOut, frameCount, &framesRead);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to read data from the decoder.
+    }
+    ```
+
+If you want to seek forward you can pass in `NULL` to the `pFramesOut` parameter. Alternatively you
+can use `ma_data_source_seek_pcm_frames()`.
+
+To seek to a specific PCM frame:
+
+    ```c
+    result = ma_data_source_seek_to_pcm_frame(pDataSource, frameIndex);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to seek to PCM frame.
+    }
+    ```
+
+You can retrieve the total length of a data source in PCM frames, but note that some data sources
+may not have the notion of a length, such as noise and waveforms, and others may just not have a
+way of determining the length such as some decoders. To retrieve the length:
+
+    ```c
+    ma_uint64 length;
+
+    result = ma_data_source_get_length_in_pcm_frames(pDataSource, &length);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to retrieve the length.
+    }
+    ```
+
+Care should be taken when retrieving the length of a data source where the underlying decoder is
+pulling data from a data stream with an undefined length, such as internet radio or some kind of
+broadcast. If you do this, `ma_data_source_get_length_in_pcm_frames()` may never return.
+
+The current position of the cursor in PCM frames can also be retrieved:
+
+    ```c
+    ma_uint64 cursor;
+
+    result = ma_data_source_get_cursor_in_pcm_frames(pDataSource, &cursor);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to retrieve the cursor.
+    }
+    ```
+
+You will often need to know the data format that will be returned after reading. This can be
+retrieved like so:
+
+    ```c
+    ma_format format;
+    ma_uint32 channels;
+    ma_uint32 sampleRate;
+    ma_channel channelMap[MA_MAX_CHANNELS];
+
+    result = ma_data_source_get_data_format(pDataSource, &format, &channels, &sampleRate, channelMap, MA_MAX_CHANNELS);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to retrieve data format.
+    }
+    ```
+
+If you do not need a specific data format property, just pass in NULL to the respective parameter.
+
+There may be cases where you want to implement something like a sound bank where you only want to
+read data within a certain range of the underlying data. To do this you can use a range:
+
+    ```c
+    result = ma_data_source_set_range_in_pcm_frames(pDataSource, rangeBegInFrames, rangeEndInFrames);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to set the range.
+    }
+    ```
+
+This is useful if you have a sound bank where many sounds are stored in the same file and you want
+the data source to only play one of those sub-sounds. Note that once the range is set, everything
+that takes a position, such as cursors and loop points, should always be relatvie to the start of
+the range. When the range is set, any previously defined loop point will be reset.
+
+Custom loop points can also be used with data sources. By default, data sources will loop after
+they reach the end of the data source, but if you need to loop at a specific location, you can do
+the following:
+
+    ```c
+    result = ma_data_set_loop_point_in_pcm_frames(pDataSource, loopBegInFrames, loopEndInFrames);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to set the loop point.
+    }
+    ```
+
+The loop point is relative to the current range.
+
+It's sometimes useful to chain data sources together so that a seamless transition can be achieved.
+To do this, you can use chaining:
+
+    ```c
+    ma_decoder decoder1;
+    ma_decoder decoder2;
+
+    // ... initialize decoders with ma_decoder_init_*() ...
+
+    result = ma_data_source_set_next(&decoder1, &decoder2);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to set the next data source.
+    }
+
+    result = ma_data_source_read_pcm_frames(&decoder1, pFramesOut, frameCount, pFramesRead);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to read from the decoder.
+    }
+    ```
+
+In the example above we're using decoders. When reading from a chain, you always want to read from
+the top level data source in the chain. In the example above, `decoder1` is the top level data
+source in the chain. When `decoder1` reaches the end, `decoder2` will start seamlessly without any
+gaps.
+
+Note that when looping is enabled, only the current data source will be looped. You can loop the
+entire chain by linking in a loop like so:
+
+    ```c
+    ma_data_source_set_next(&decoder1, &decoder2);  // decoder1 -> decoder2
+    ma_data_source_set_next(&decoder2, &decoder1);  // decoder2 -> decoder1 (loop back to the start).
+    ```
+
+Note that setting up chaining is not thread safe, so care needs to be taken if you're dynamically
+changing links while the audio thread is in the middle of reading.
+
+Do not use `ma_decoder_seek_to_pcm_frame()` as a means to reuse a data source to play multiple
+instances of the same sound simultaneously. This can be extremely inefficient depending on the type
+of data source and can result in glitching due to subtle changes to the state of internal filters.
+Instead, initialize multiple data sources for each instance.
+
+
+4.1. Custom Data Sources
+------------------------
+You can implement a custom data source by implementing the functions in `ma_data_source_vtable`.
+Your custom object must have `ma_data_source_base` as it's first member:
+
+    ```c
+    struct my_data_source
+    {
+        ma_data_source_base base;
+        ...
+    };
+    ```
+
+In your initialization routine, you need to call `ma_data_source_init()` in order to set up the
+base object (`ma_data_source_base`):
+
+    ```c
+    static ma_result my_data_source_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead)
+    {
+        // Read data here. Output in the same format returned by my_data_source_get_data_format().
+    }
+
+    static ma_result my_data_source_seek(ma_data_source* pDataSource, ma_uint64 frameIndex)
+    {
+        // Seek to a specific PCM frame here. Return MA_NOT_IMPLEMENTED if seeking is not supported.
+    }
+
+    static ma_result my_data_source_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap)
+    {
+        // Return the format of the data here.
+    }
+
+    static ma_result my_data_source_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor)
+    {
+        // Retrieve the current position of the cursor here. Return MA_NOT_IMPLEMENTED and set *pCursor to 0 if there is no notion of a cursor.
+    }
+
+    static ma_result my_data_source_get_length(ma_data_source* pDataSource, ma_uint64* pLength)
+    {
+        // Retrieve the length in PCM frames here. Return MA_NOT_IMPLEMENTED and set *pLength to 0 if there is no notion of a length or if the length is unknown.
+    }
+
+    static ma_data_source_vtable g_my_data_source_vtable =
+    {
+        my_data_source_read,
+        my_data_source_seek,
+        my_data_source_get_data_format,
+        my_data_source_get_cursor,
+        my_data_source_get_length
+    };
+
+    ma_result my_data_source_init(my_data_source* pMyDataSource)
+    {
+        ma_result result;
+        ma_data_source_config baseConfig;
+
+        baseConfig = ma_data_source_config_init();
+        baseConfig.vtable = &g_my_data_source_vtable;
+
+        result = ma_data_source_init(&baseConfig, &pMyDataSource->base);
+        if (result != MA_SUCCESS) {
+            return result;
+        }
+
+        // ... do the initialization of your custom data source here ...
+
+        return MA_SUCCESS;
+    }
+
+    void my_data_source_uninit(my_data_source* pMyDataSource)
+    {
+        // ... do the uninitialization of your custom data source here ...
+
+        // You must uninitialize the base data source.
+        ma_data_source_uninit(&pMyDataSource->base);
+    }
+    ```
+
+Note that `ma_data_source_init()` and `ma_data_source_uninit()` are never called directly outside
+of the custom data source. It's up to the custom data source itself to call these within their own
+init/uninit functions.
+
+
+
+5. Engine
+=========
+The `ma_engine` API is a high level API for managing and mixing sounds and effect processing. The
+`ma_engine` object encapsulates a resource manager and a node graph, both of which will be
+explained in more detail later.
+
+Sounds are called `ma_sound` and are created from an engine. Sounds can be associated with a mixing
+group called `ma_sound_group` which are also created from the engine. Both `ma_sound` and
+`ma_sound_group` objects are nodes within the engine's node graph.
+
+When the engine is initialized, it will normally create a device internally. If you would rather
+manage the device yourself, you can do so and just pass a pointer to it via the engine config when
+you initialize the engine. You can also just use the engine without a device, which again can be
+configured via the engine config.
+
+The most basic way to initialize the engine is with a default config, like so:
+
+    ```c
+    ma_result result;
+    ma_engine engine;
+
+    result = ma_engine_init(NULL, &engine);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to initialize the engine.
+    }
+    ```
+
+This will result in the engine initializing a playback device using the operating system's default
+device. This will be sufficient for many use cases, but if you need more flexibility you'll want to
+configure the engine with an engine config:
+
+    ```c
+    ma_result result;
+    ma_engine engine;
+    ma_engine_config engineConfig;
+
+    engineConfig = ma_engine_config_init();
+    engineConfig.pDevice = &myDevice;
+
+    result = ma_engine_init(&engineConfig, &engine);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to initialize the engine.
+    }
+    ```
+
+In the example above we're passing in a pre-initialized device. Since the caller is the one in
+control of the device's data callback, it's their responsibility to manually call
+`ma_engine_read_pcm_frames()` from inside their data callback:
+
+    ```c
+    void playback_data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount)
+    {
+        ma_engine_read_pcm_frames(&g_Engine, pOutput, frameCount, NULL);
+    }
+    ```
+
+You can also use the engine independent of a device entirely:
+
+    ```c
+    ma_result result;
+    ma_engine engine;
+    ma_engine_config engineConfig;
+
+    engineConfig = ma_engine_config_init();
+    engineConfig.noDevice   = MA_TRUE;
+    engineConfig.channels   = 2;        // Must be set when not using a device.
+    engineConfig.sampleRate = 48000;    // Must be set when not using a device.
+
+    result = ma_engine_init(&engineConfig, &engine);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to initialize the engine.
+    }
+    ```
+
+Note that when you're not using a device, you must set the channel count and sample rate in the
+config or else miniaudio won't know what to use (miniaudio will use the device to determine this
+normally). When not using a device, you need to use `ma_engine_read_pcm_frames()` to process audio
+data from the engine. This kind of setup is useful if you want to do something like offline
+processing or want to use a different audio system for playback such as SDL.
+
+When a sound is loaded it goes through a resource manager. By default the engine will initialize a
+resource manager internally, but you can also specify a pre-initialized resource manager:
+
+    ```c
+    ma_result result;
+    ma_engine engine1;
+    ma_engine engine2;
+    ma_engine_config engineConfig;
+
+    engineConfig = ma_engine_config_init();
+    engineConfig.pResourceManager = &myResourceManager;
+
+    ma_engine_init(&engineConfig, &engine1);
+    ma_engine_init(&engineConfig, &engine2);
+    ```
+
+In this example we are initializing two engines, both of which are sharing the same resource
+manager. This is especially useful for saving memory when loading the same file across multiple
+engines. If you were not to use a shared resource manager, each engine instance would use their own
+which would result in any sounds that are used between both engine's being loaded twice. By using
+a shared resource manager, it would only be loaded once. Using multiple engine's is useful when you
+need to output to multiple playback devices, such as in a local multiplayer game where each player
+is using their own set of headphones.
+
+By default an engine will be in a started state. To make it so the engine is not automatically
+started you can configure it as such:
+
+    ```c
+    engineConfig.noAutoStart = MA_TRUE;
+
+    // The engine will need to be started manually.
+    ma_engine_start(&engine);
+
+    // Later on the engine can be stopped with ma_engine_stop().
+    ma_engine_stop(&engine);
+    ```
+
+The concept of starting or stopping an engine is only relevant when using the engine with a
+device. Attempting to start or stop an engine that is not associated with a device will result in
+`MA_INVALID_OPERATION`.
+
+The master volume of the engine can be controlled with `ma_engine_set_volume()` which takes a
+linear scale, with 0 resulting in silence and anything above 1 resulting in amplification. If you
+prefer decibel based volume control, use `ma_volume_db_to_linear()` to convert from dB to linear.
+
+When a sound is spatialized, it is done so relative to a listener. An engine can be configured to
+have multiple listeners which can be configured via the config:
+
+    ```c
+    engineConfig.listenerCount = 2;
+    ```
+
+The maximum number of listeners is restricted to `MA_ENGINE_MAX_LISTENERS`. By default, when a
+sound is spatialized, it will be done so relative to the closest listener. You can also pin a sound
+to a specific listener which will be explained later. Listener's have a position, direction, cone,
+and velocity (for doppler effect). A listener is referenced by an index, the meaning of which is up
+to the caller (the index is 0 based and cannot go beyond the listener count, minus 1). The
+position, direction and velocity are all specified in absolute terms:
+
+    ```c
+    ma_engine_listener_set_position(&engine, listenerIndex, worldPosX, worldPosY, worldPosZ);
+    ```
+
+The direction of the listener represents it's forward vector. The listener's up vector can also be
+specified and defaults to +1 on the Y axis.
+
+    ```c
+    ma_engine_listener_set_direction(&engine, listenerIndex, forwardX, forwardY, forwardZ);
+    ma_engine_listener_set_world_up(&engine, listenerIndex, 0, 1, 0);
+    ```
+
+The engine supports directional attenuation. The listener can have a cone the controls how sound is
+attenuated based on the listener's direction. When a sound is between the inner and outer cones, it
+will be attenuated between 1 and the cone's outer gain:
+
+    ```c
+    ma_engine_listener_set_cone(&engine, listenerIndex, innerAngleInRadians, outerAngleInRadians, outerGain);
+    ```
+
+When a sound is inside the inner code, no directional attenuation is applied. When the sound is
+outside of the outer cone, the attenuation will be set to `outerGain` in the example above. When
+the sound is in between the inner and outer cones, the attenuation will be interpolated between 1
+and the outer gain.
+
+The engine's coordinate system follows the OpenGL coordinate system where positive X points right,
+positive Y points up and negative Z points forward.
+
+The simplest and least flexible way to play a sound is like so:
+
+    ```c
+    ma_engine_play_sound(&engine, "my_sound.wav", pGroup);
+    ```
+
+This is a "fire and forget" style of function. The engine will manage the `ma_sound` object
+internally. When the sound finishes playing, it'll be put up for recycling. For more flexibility
+you'll want to initialize a sound object:
+
+    ```c
+    ma_sound sound;
+
+    result = ma_sound_init_from_file(&engine, "my_sound.wav", flags, pGroup, NULL, &sound);
+    if (result != MA_SUCCESS) {
+        return result;  // Failed to load sound.
+    }
+    ```
+
+Sounds need to be uninitialized with `ma_sound_uninit()`.
+
+The example above loads a sound from a file. If the resource manager has been disabled you will not
+be able to use this function and instead you'll need to initialize a sound directly from a data
+source:
+
+    ```c
+    ma_sound sound;
+
+    result = ma_sound_init_from_data_source(&engine, &dataSource, flags, pGroup, &sound);
+    if (result != MA_SUCCESS) {
+        return result;
+    }
+    ```
+
+Each `ma_sound` object represents a single instance of the sound. If you want to play the same
+sound multiple times at the same time, you need to initialize a separate `ma_sound` object.
+
+For the most flexibility when initializing sounds, use `ma_sound_init_ex()`. This uses miniaudio's
+standard config/init pattern:
+
+    ```c
+    ma_sound sound;
+    ma_sound_config soundConfig;
+
+    soundConfig = ma_sound_config_init();
+    soundConfig.pFilePath   = NULL; // Set this to load from a file path.
+    soundConfig.pDataSource = NULL; // Set this to initialize from an existing data source.
+    soundConfig.pInitialAttachment = &someNodeInTheNodeGraph;
+    soundConfig.initialAttachmentInputBusIndex = 0;
+    soundConfig.channelsIn  = 1;
+    soundConfig.channelsOut = 0;    // Set to 0 to use the engine's native channel count.
+
+    result = ma_sound_init_ex(&soundConfig, &sound);
+    if (result != MA_SUCCESS) {
+        return result;
+    }
+    ```
+
+In the example above, the sound is being initialized without a file nor a data source. This is
+valid, in which case the sound acts as a node in the middle of the node graph. This means you can
+connect other sounds to this sound and allow it to act like a sound group. Indeed, this is exactly
+what a `ma_sound_group` is.
+
+When loading a sound, you specify a set of flags that control how the sound is loaded and what
+features are enabled for that sound. When no flags are set, the sound will be fully loaded into
+memory in exactly the same format as how it's stored on the file system. The resource manager will
+allocate a block of memory and then load the file directly into it. When reading audio data, it
+will be decoded dynamically on the fly. In order to save processing time on the audio thread, it
+might be beneficial to pre-decode the sound. You can do this with the `MA_SOUND_FLAG_DECODE` flag:
+
+    ```c
+    ma_sound_init_from_file(&engine, "my_sound.wav", MA_SOUND_FLAG_DECODE, pGroup, NULL, &sound);
+    ```
+
+By default, sounds will be loaded synchronously, meaning `ma_sound_init_*()` will not return until
+the sound has been fully loaded. If this is prohibitive you can instead load sounds asynchronously
+by specifying the `MA_SOUND_FLAG_ASYNC` flag:
+
+    ```c
+    ma_sound_init_from_file(&engine, "my_sound.wav", MA_SOUND_FLAG_DECODE | MA_SOUND_FLAG_ASYNC, pGroup, NULL, &sound);
+    ```
+
+This will result in `ma_sound_init_*()` returning quickly, but the sound won't yet have been fully
+loaded. When you start the sound, it won't output anything until some sound is available. The sound
+will start outputting audio before the sound has been fully decoded when the `MA_SOUND_FLAG_DECODE`
+is specified.
+
+If you need to wait for an asynchronously loaded sound to be fully loaded, you can use a fence. A
+fence in miniaudio is a simple synchronization mechanism which simply blocks until it's internal
+counter hit's zero. You can specify a fence like so:
+
+    ```c
+    ma_result result;
+    ma_fence fence;
+    ma_sound sounds[4];
+
+    result = ma_fence_init(&fence);
+    if (result != MA_SUCCESS) {
+        return result;
+    }
+
+    // Load some sounds asynchronously.
+    for (int iSound = 0; iSound < 4; iSound += 1) {
+        ma_sound_init_from_file(&engine, mySoundFilesPaths[iSound], MA_SOUND_FLAG_DECODE | MA_SOUND_FLAG_ASYNC, pGroup, &fence, &sounds[iSound]);
+    }
+
+    // ... do some other stuff here in the mean time ...
+
+    // Wait for all sounds to finish loading.
+    ma_fence_wait(&fence);
+    ```
+
+If loading the entire sound into memory is prohibitive, you can also configure the engine to stream
+the audio data:
+
+    ```c
+    ma_sound_init_from_file(&engine, "my_sound.wav", MA_SOUND_FLAG_STREAM, pGroup, NULL, &sound);
+    ```
+
+When streaming sounds, 2 seconds worth of audio data is stored in memory. Although it should work
+fine, it's inefficient to use streaming for short sounds. Streaming is useful for things like music
+tracks in games.
+
+When loading a sound from a file path, the engine will reference count the file to prevent it from
+being loaded if it's already in memory. When you uninitialize a sound, the reference count will be
+decremented, and if it hits zero, the sound will be unloaded from memory. This reference counting
+system is not used for streams. The engine will use a 64-bit hash of the file name when comparing
+file paths which means there's a small chance you might encounter a name collision. If this is an
+issue, you'll need to use a different name for one of the colliding file paths, or just not load
+from files and instead load from a data source.
+
+You can use `ma_sound_init_copy()` to initialize a copy of another sound. Note, however, that this
+only works for sounds that were initialized with `ma_sound_init_from_file()` and without the
+`MA_SOUND_FLAG_STREAM` flag.
+
+When you initialize a sound, if you specify a sound group the sound will be attached to that group
+automatically. If you set it to NULL, it will be automatically attached to the engine's endpoint.
+If you would instead rather leave the sound unattached by default, you can specify the
+`MA_SOUND_FLAG_NO_DEFAULT_ATTACHMENT` flag. This is useful if you want to set up a complex node
+graph.
+
+Sounds are not started by default. To start a sound, use `ma_sound_start()`. Stop a sound with
+`ma_sound_stop()`.
+
+Sounds can have their volume controlled with `ma_sound_set_volume()` in the same way as the
+engine's master volume.
+
+Sounds support stereo panning and pitching. Set the pan with `ma_sound_set_pan()`. Setting the pan
+to 0 will result in an unpanned sound. Setting it to -1 will shift everything to the left, whereas
++1 will shift it to the right. The pitch can be controlled with `ma_sound_set_pitch()`. A larger
+value will result in a higher pitch. The pitch must be greater than 0.
+
+The engine supports 3D spatialization of sounds. By default sounds will have spatialization
+enabled, but if a sound does not need to be spatialized it's best to disable it. There are two ways
+to disable spatialization of a sound:
+
+    ```c
+    // Disable spatialization at initialization time via a flag:
+    ma_sound_init_from_file(&engine, "my_sound.wav", MA_SOUND_FLAG_NO_SPATIALIZATION, NULL, NULL, &sound);
+
+    // Dynamically disable or enable spatialization post-initialization:
+    ma_sound_set_spatialization_enabled(&sound, isSpatializationEnabled);
+    ```
+
+By default sounds will be spatialized based on the closest listener. If a sound should always be
+spatialized relative to a specific listener it can be pinned to one:
+
+    ```c
+    ma_sound_set_pinned_listener_index(&sound, listenerIndex);
+    ```
+
+Like listeners, sounds have a position. By default, the position of a sound is in absolute space,
+but it can be changed to be relative to a listener:
+
+    ```c
+    ma_sound_set_positioning(&sound, ma_positioning_relative);
+    ```
+
+Note that relative positioning of a sound only makes sense if there is either only one listener, or
+the sound is pinned to a specific listener. To set the position of a sound:
+
+    ```c
+    ma_sound_set_position(&sound, posX, posY, posZ);
+    ```
+
+The direction works the same way as a listener and represents the sound's forward direction:
+
+    ```c
+    ma_sound_set_direction(&sound, forwardX, forwardY, forwardZ);
+    ```
+
+Sound's also have a cone for controlling directional attenuation. This works exactly the same as
+listeners:
+
+    ```c
+    ma_sound_set_cone(&sound, innerAngleInRadians, outerAngleInRadians, outerGain);
+    ```
+
+The velocity of a sound is used for doppler effect and can be set as such:
+
+    ```c
+    ma_sound_set_velocity(&sound, velocityX, velocityY, velocityZ);
+    ```
+
+The engine supports different attenuation models which can be configured on a per-sound basis. By
+default the attenuation model is set to `ma_attenuation_model_inverse` which is the equivalent to
+OpenAL's `AL_INVERSE_DISTANCE_CLAMPED`. Configure the attenuation model like so:
+
+    ```c
+    ma_sound_set_attenuation_model(&sound, ma_attenuation_model_inverse);
+    ```
+
+The supported attenuation models include the following:
+
+    +----------------------------------+----------------------------------------------+
+    | ma_attenuation_model_none        | No distance attenuation.                     |
+    +----------------------------------+----------------------------------------------+
+    | ma_attenuation_model_inverse     | Equivalent to `AL_INVERSE_DISTANCE_CLAMPED`. |
+    +----------------------------------+----------------------------------------------+
+    | ma_attenuation_model_linear      | Linear attenuation.                          |
+    +----------------------------------+----------------------------------------------+
+    | ma_attenuation_model_exponential | Exponential attenuation.                     |
+    +----------------------------------+----------------------------------------------+
+
+To control how quickly a sound rolls off as it moves away from the listener, you need to configure
+the rolloff:
+
+    ```c
+    ma_sound_set_rolloff(&sound, rolloff);
+    ```
+
+You can control the minimum and maximum gain to apply from spatialization:
+
+    ```c
+    ma_sound_set_min_gain(&sound, minGain);
+    ma_sound_set_max_gain(&sound, maxGain);
+    ```
+
+Likewise, in the calculation of attenuation, you can control the minimum and maximum distances for
+the attenuation calculation. This is useful if you want to ensure sounds don't drop below a certain
+volume after the listener moves further away and to have sounds play a maximum volume when the
+listener is within a certain distance:
+
+    ```c
+    ma_sound_set_min_distance(&sound, minDistance);
+    ma_sound_set_max_distance(&sound, maxDistance);
+    ```
+
+The engine's spatialization system supports doppler effect. The doppler factor can be configure on
+a per-sound basis like so:
+
+    ```c
+    ma_sound_set_doppler_factor(&sound, dopplerFactor);
+    ```
+
+You can fade sounds in and out with `ma_sound_set_fade_in_pcm_frames()` and
+`ma_sound_set_fade_in_milliseconds()`. Set the volume to -1 to use the current volume as the
+starting volume:
+
+    ```c
+    // Fade in over 1 second.
+    ma_sound_set_fade_in_milliseconds(&sound, 0, 1, 1000);
+
+    // ... sometime later ...
+
+    // Fade out over 1 second, starting from the current volume.
+    ma_sound_set_fade_in_milliseconds(&sound, -1, 0, 1000);
+    ```
+
+By default sounds will start immediately, but sometimes for timing and synchronization purposes it
+can be useful to schedule a sound to start or stop:
+
+    ```c
+    // Start the sound in 1 second from now.
+    ma_sound_set_start_time_in_pcm_frames(&sound, ma_engine_get_time_in_pcm_frames(&engine) + (ma_engine_get_sample_rate(&engine) * 1));
+
+    // Stop the sound in 2 seconds from now.
+    ma_sound_set_stop_time_in_pcm_frames(&sound, ma_engine_get_time_in_pcm_frames(&engine) + (ma_engine_get_sample_rate(&engine) * 2));
+    ```
+
+Note that scheduling a start time still requires an explicit call to `ma_sound_start()` before
+anything will play.
+
+The time is specified in global time which is controlled by the engine. You can get the engine's
+current time with `ma_engine_get_time_in_pcm_frames()`. The engine's global time is incremented
+automatically as audio data is read, but it can be reset with `ma_engine_set_time_in_pcm_frames()`
+in case it needs to be resynchronized for some reason.
+
+To determine whether or not a sound is currently playing, use `ma_sound_is_playing()`. This will
+take the scheduled start and stop times into account.
+
+Whether or not a sound should loop can be controlled with `ma_sound_set_looping()`. Sounds will not
+be looping by default. Use `ma_sound_is_looping()` to determine whether or not a sound is looping.
+
+Use `ma_sound_at_end()` to determine whether or not a sound is currently at the end. For a looping
+sound this should never return true. Alternatively, you can configure a callback that will be fired
+when the sound reaches the end. Note that the callback is fired from the audio thread which means
+you cannot be uninitializing sound from the callback. To set the callback you can use
+`ma_sound_set_end_callback()`. Alternatively, if you're using `ma_sound_init_ex()`, you can pass it
+into the config like so:
+
+    ```c
+    soundConfig.endCallback = my_end_callback;
+    soundConfig.pEndCallbackUserData = pMyEndCallbackUserData;
+    ```
+
+The end callback is declared like so:
+
+    ```c
+    void my_end_callback(void* pUserData, ma_sound* pSound)
+    {
+        ...
+    }
+    ```
+
+Internally a sound wraps around a data source. Some APIs exist to control the underlying data
+source, mainly for convenience:
+
+    ```c
+    ma_sound_seek_to_pcm_frame(&sound, frameIndex);
+    ma_sound_get_data_format(&sound, &format, &channels, &sampleRate, pChannelMap, channelMapCapacity);
+    ma_sound_get_cursor_in_pcm_frames(&sound, &cursor);
+    ma_sound_get_length_in_pcm_frames(&sound, &length);
+    ```
+
+Sound groups have the same API as sounds, only they are called `ma_sound_group`, and since they do
+not have any notion of a data source, anything relating to a data source is unavailable.
+
+Internally, sound data is loaded via the `ma_decoder` API which means by default it only supports
+file formats that have built-in support in miniaudio. You can extend this to support any kind of
+file format through the use of custom decoders. To do this you'll need to use a self-managed
+resource manager and configure it appropriately. See the "Resource Management" section below for
+details on how to set this up.
+
+
+6. Resource Management
+======================
+Many programs will want to manage sound resources for things such as reference counting and
+streaming. This is supported by miniaudio via the `ma_resource_manager` API.
+
+The resource manager is mainly responsible for the following:
+
+  * Loading of sound files into memory with reference counting.
+  * Streaming of sound data.
+
+When loading a sound file, the resource manager will give you back a `ma_data_source` compatible
+object called `ma_resource_manager_data_source`. This object can be passed into any
+`ma_data_source` API which is how you can read and seek audio data. When loading a sound file, you
+specify whether or not you want the sound to be fully loaded into memory (and optionally
+pre-decoded) or streamed. When loading into memory, you can also specify whether or not you want
+the data to be loaded asynchronously.
+
+The example below is how you can initialize a resource manager using it's default configuration:
+
+    ```c
+    ma_resource_manager_config config;
+    ma_resource_manager resourceManager;
+
+    config = ma_resource_manager_config_init();
+    result = ma_resource_manager_init(&config, &resourceManager);
+    if (result != MA_SUCCESS) {
+        ma_device_uninit(&device);
+        printf("Failed to initialize the resource manager.");
+        return -1;
+    }
+    ```
+
+You can configure the format, channels and sample rate of the decoded audio data. By default it
+will use the file's native data format, but you can configure it to use a consistent format. This
+is useful for offloading the cost of data conversion to load time rather than dynamically
+converting at mixing time. To do this, you configure the decoded format, channels and sample rate
+like the code below:
+
+    ```c
+    config = ma_resource_manager_config_init();
+    config.decodedFormat     = device.playback.format;
+    config.decodedChannels   = device.playback.channels;
+    config.decodedSampleRate = device.sampleRate;
+    ```
+
+In the code above, the resource manager will be configured so that any decoded audio data will be
+pre-converted at load time to the device's native data format. If instead you used defaults and
+the data format of the file did not match the device's data format, you would need to convert the
+data at mixing time which may be prohibitive in high-performance and large scale scenarios like
+games.
+
+Internally the resource manager uses the `ma_decoder` API to load sounds. This means by default it
+only supports decoders that are built into miniaudio. It's possible to support additional encoding
+formats through the use of custom decoders. To do so, pass in your `ma_decoding_backend_vtable`
+vtables into the resource manager config:
+
+    ```c
+    ma_decoding_backend_vtable* pCustomBackendVTables[] =
+    {
+        &g_ma_decoding_backend_vtable_libvorbis,
+        &g_ma_decoding_backend_vtable_libopus
+    };
+
+    ...
+
+    resourceManagerConfig.ppCustomDecodingBackendVTables = pCustomBackendVTables;
+    resourceManagerConfig.customDecodingBackendCount     = sizeof(pCustomBackendVTables) / sizeof(pCustomBackendVTables[0]);
+    resourceManagerConfig.pCustomDecodingBackendUserData = NULL;
+    ```
+
+This system can allow you to support any kind of file format. See the "Decoding" section for
+details on how to implement custom decoders. The miniaudio repository includes examples for Opus
+via libopus and libopusfile and Vorbis via libvorbis and libvorbisfile.
+
+Asynchronicity is achieved via a job system. When an operation needs to be performed, such as the
+decoding of a page, a job will be posted to a queue which will then be processed by a job thread.
+By default there will be only one job thread running, but this can be configured, like so:
+
+    ```c
+    config = ma_resource_manager_config_init();
+    config.jobThreadCount = MY_JOB_THREAD_COUNT;
+    ```
+
+By default job threads are managed internally by the resource manager, however you can also self
+manage your job threads if, for example, you want to integrate the job processing into your
+existing job infrastructure, or if you simply don't like the way the resource manager does it. To
+do this, just set the job thread count to 0 and process jobs manually. To process jobs, you first
+need to retrieve a job using `ma_resource_manager_next_job()` and then process it using
+`ma_job_process()`:
+
+    ```c
+    config = ma_resource_manager_config_init();
+    config.jobThreadCount = 0;                            // Don't manage any job threads internally.
+    config.flags = MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING; // Optional. Makes `ma_resource_manager_next_job()` non-blocking.
+
+    // ... Initialize your custom job threads ...
+
+    void my_custom_job_thread(...)
+    {
+        for (;;) {
+            ma_job job;
+            ma_result result = ma_resource_manager_next_job(pMyResourceManager, &job);
+            if (result != MA_SUCCESS) {
+                if (result == MA_NO_DATA_AVAILABLE) {
+                    // No jobs are available. Keep going. Will only get this if the resource manager was initialized
+                    // with MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING.
+                    continue;
+                } else if (result == MA_CANCELLED) {
+                    // MA_JOB_TYPE_QUIT was posted. Exit.
+                    break;
+                } else {
+                    // Some other error occurred.
+                    break;
+                }
+            }
+
+            ma_job_process(&job);
+        }
+    }
+    ```
+
+In the example above, the `MA_JOB_TYPE_QUIT` event is the used as the termination
+indicator, but you can use whatever you would like to terminate the thread. The call to
+`ma_resource_manager_next_job()` is blocking by default, but can be configured to be non-blocking
+by initializing the resource manager with the `MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING` configuration
+flag. Note that the `MA_JOB_TYPE_QUIT` will never be removed from the job queue. This
+is to give every thread the opportunity to catch the event and terminate naturally.
+
+When loading a file, it's sometimes convenient to be able to customize how files are opened and
+read instead of using standard `fopen()`, `fclose()`, etc. which is what miniaudio will use by
+default. This can be done by setting `pVFS` member of the resource manager's config:
+
+    ```c
+    // Initialize your custom VFS object. See documentation for VFS for information on how to do this.
+    my_custom_vfs vfs = my_custom_vfs_init();
+
+    config = ma_resource_manager_config_init();
+    config.pVFS = &vfs;
+    ```
+
+This is particularly useful in programs like games where you want to read straight from an archive
+rather than the normal file system. If you do not specify a custom VFS, the resource manager will
+use the operating system's normal file operations.
+
+To load a sound file and create a data source, call `ma_resource_manager_data_source_init()`. When
+loading a sound you need to specify the file path and options for how the sounds should be loaded.
+By default a sound will be loaded synchronously. The returned data source is owned by the caller
+which means the caller is responsible for the allocation and freeing of the data source. Below is
+an example for initializing a data source:
+
+    ```c
+    ma_resource_manager_data_source dataSource;
+    ma_result result = ma_resource_manager_data_source_init(pResourceManager, pFilePath, flags, &dataSource);
+    if (result != MA_SUCCESS) {
+        // Error.
+    }
+
+    // ...
+
+    // A ma_resource_manager_data_source object is compatible with the `ma_data_source` API. To read data, just call
+    // the `ma_data_source_read_pcm_frames()` like you would with any normal data source.
+    result = ma_data_source_read_pcm_frames(&dataSource, pDecodedData, frameCount, &framesRead);
+    if (result != MA_SUCCESS) {
+        // Failed to read PCM frames.
+    }
+
+    // ...
+
+    ma_resource_manager_data_source_uninit(&dataSource);
+    ```
+
+The `flags` parameter specifies how you want to perform loading of the sound file. It can be a
+combination of the following flags:
+
+    ```
+    MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM
+    MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE
+    MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC
+    MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT
+    MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_LOOPING
+    ```
+
+When no flags are specified (set to 0), the sound will be fully loaded into memory, but not
+decoded, meaning the raw file data will be stored in memory, and then dynamically decoded when
+`ma_data_source_read_pcm_frames()` is called. To instead decode the audio data before storing it in
+memory, use the `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE` flag. By default, the sound file will
+be loaded synchronously, meaning `ma_resource_manager_data_source_init()` will only return after
+the entire file has been loaded. This is good for simplicity, but can be prohibitively slow. You
+can instead load the sound asynchronously using the `MA_RESOURCE_MANAGER_DATA_SOURCE_ASYNC` flag.
+This will result in `ma_resource_manager_data_source_init()` returning quickly, but no data will be
+returned by `ma_data_source_read_pcm_frames()` until some data is available. When no data is
+available because the asynchronous decoding hasn't caught up, `MA_BUSY` will be returned by
+`ma_data_source_read_pcm_frames()`.
+
+For large sounds, it's often prohibitive to store the entire file in memory. To mitigate this, you
+can instead stream audio data which you can do by specifying the
+`MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM` flag. When streaming, data will be decoded in 1
+second pages. When a new page needs to be decoded, a job will be posted to the job queue and then
+subsequently processed in a job thread.
+
+The `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_LOOPING` flag can be used so that the sound will loop
+when it reaches the end by default. It's recommended you use this flag when you want to have a
+looping streaming sound. If you try loading a very short sound as a stream, you will get a glitch.
+This is because the resource manager needs to pre-fill the initial buffer at initialization time,
+and if you don't specify the `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_LOOPING` flag, the resource
+manager will assume the sound is not looping and will stop filling the buffer when it reaches the
+end, therefore resulting in a discontinuous buffer.
+
+For in-memory sounds, reference counting is used to ensure the data is loaded only once. This means
+multiple calls to `ma_resource_manager_data_source_init()` with the same file path will result in
+the file data only being loaded once. Each call to `ma_resource_manager_data_source_init()` must be
+matched up with a call to `ma_resource_manager_data_source_uninit()`. Sometimes it can be useful
+for a program to register self-managed raw audio data and associate it with a file path. Use the
+`ma_resource_manager_register_*()` and `ma_resource_manager_unregister_*()` APIs to do this.
+`ma_resource_manager_register_decoded_data()` is used to associate a pointer to raw, self-managed
+decoded audio data in the specified data format with the specified name. Likewise,
+`ma_resource_manager_register_encoded_data()` is used to associate a pointer to raw self-managed
+encoded audio data (the raw file data) with the specified name. Note that these names need not be
+actual file paths. When `ma_resource_manager_data_source_init()` is called (without the
+`MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM` flag), the resource manager will look for these
+explicitly registered data buffers and, if found, will use it as the backing data for the data
+source. Note that the resource manager does *not* make a copy of this data so it is up to the
+caller to ensure the pointer stays valid for its lifetime. Use
+`ma_resource_manager_unregister_data()` to unregister the self-managed data. You can also use
+`ma_resource_manager_register_file()` and `ma_resource_manager_unregister_file()` to register and
+unregister a file. It does not make sense to use the `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM`
+flag with a self-managed data pointer.
+
+
+6.1. Asynchronous Loading and Synchronization
+---------------------------------------------
+When loading asynchronously, it can be useful to poll whether or not loading has finished. Use
+`ma_resource_manager_data_source_result()` to determine this. For in-memory sounds, this will
+return `MA_SUCCESS` when the file has been *entirely* decoded. If the sound is still being decoded,
+`MA_BUSY` will be returned. Otherwise, some other error code will be returned if the sound failed
+to load. For streaming data sources, `MA_SUCCESS` will be returned when the first page has been
+decoded and the sound is ready to be played. If the first page is still being decoded, `MA_BUSY`
+will be returned. Otherwise, some other error code will be returned if the sound failed to load.
+
+In addition to polling, you can also use a simple synchronization object called a "fence" to wait
+for asynchronously loaded sounds to finish. This is called `ma_fence`. The advantage to using a
+fence is that it can be used to wait for a group of sounds to finish loading rather than waiting
+for sounds on an individual basis. There are two stages to loading a sound:
+
+  * Initialization of the internal decoder; and
+  * Completion of decoding of the file (the file is fully decoded)
+
+You can specify separate fences for each of the different stages. Waiting for the initialization
+of the internal decoder is important for when you need to know the sample format, channels and
+sample rate of the file.
+
+The example below shows how you could use a fence when loading a number of sounds:
+
+    ```c
+    // This fence will be released when all sounds are finished loading entirely.
+    ma_fence fence;
+    ma_fence_init(&fence);
+
+    // This will be passed into the initialization routine for each sound.
+    ma_resource_manager_pipeline_notifications notifications = ma_resource_manager_pipeline_notifications_init();
+    notifications.done.pFence = &fence;
+
+    // Now load a bunch of sounds:
+    for (iSound = 0; iSound < soundCount; iSound += 1) {
+        ma_resource_manager_data_source_init(pResourceManager, pSoundFilePaths[iSound], flags, ¬ifications, &pSoundSources[iSound]);
+    }
+
+    // ... DO SOMETHING ELSE WHILE SOUNDS ARE LOADING ...
+
+    // Wait for loading of sounds to finish.
+    ma_fence_wait(&fence);
+    ```
+
+In the example above we used a fence for waiting until the entire file has been fully decoded. If
+you only need to wait for the initialization of the internal decoder to complete, you can use the
+`init` member of the `ma_resource_manager_pipeline_notifications` object:
+
+    ```c
+    notifications.init.pFence = &fence;
+    ```
+
+If a fence is not appropriate for your situation, you can instead use a callback that is fired on
+an individual sound basis. This is done in a very similar way to fences:
+
+    ```c
+    typedef struct
+    {
+        ma_async_notification_callbacks cb;
+        void* pMyData;
+    } my_notification;
+
+    void my_notification_callback(ma_async_notification* pNotification)
+    {
+        my_notification* pMyNotification = (my_notification*)pNotification;
+
+        // Do something in response to the sound finishing loading.
+    }
+
+    ...
+
+    my_notification myCallback;
+    myCallback.cb.onSignal = my_notification_callback;
+    myCallback.pMyData     = pMyData;
+
+    ma_resource_manager_pipeline_notifications notifications = ma_resource_manager_pipeline_notifications_init();
+    notifications.done.pNotification = &myCallback;
+
+    ma_resource_manager_data_source_init(pResourceManager, "my_sound.wav", flags, ¬ifications, &mySound);
+    ```
+
+In the example above we just extend the `ma_async_notification_callbacks` object and pass an
+instantiation into the `ma_resource_manager_pipeline_notifications` in the same way as we did with
+the fence, only we set `pNotification` instead of `pFence`. You can set both of these at the same
+time and they should both work as expected. If using the `pNotification` system, you need to ensure
+your `ma_async_notification_callbacks` object stays valid.
+
+
+
+6.2. Resource Manager Implementation Details
+--------------------------------------------
+Resources are managed in two main ways:
+
+  * By storing the entire sound inside an in-memory buffer (referred to as a data buffer)
+  * By streaming audio data on the fly (referred to as a data stream)
+
+A resource managed data source (`ma_resource_manager_data_source`) encapsulates a data buffer or
+data stream, depending on whether or not the data source was initialized with the
+`MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM` flag. If so, it will make use of a
+`ma_resource_manager_data_stream` object. Otherwise it will use a `ma_resource_manager_data_buffer`
+object. Both of these objects are data sources which means they can be used with any
+`ma_data_source_*()` API.
+
+Another major feature of the resource manager is the ability to asynchronously decode audio files.
+This relieves the audio thread of time-consuming decoding which can negatively affect scalability
+due to the audio thread needing to complete it's work extremely quickly to avoid glitching.
+Asynchronous decoding is achieved through a job system. There is a central multi-producer,
+multi-consumer, fixed-capacity job queue. When some asynchronous work needs to be done, a job is
+posted to the queue which is then read by a job thread. The number of job threads can be
+configured for improved scalability, and job threads can all run in parallel without needing to
+worry about the order of execution (how this is achieved is explained below).
+
+When a sound is being loaded asynchronously, playback can begin before the sound has been fully
+decoded. This enables the application to start playback of the sound quickly, while at the same
+time allowing to resource manager to keep loading in the background. Since there may be less
+threads than the number of sounds being loaded at a given time, a simple scheduling system is used
+to keep decoding time balanced and fair. The resource manager solves this by splitting decoding
+into chunks called pages. By default, each page is 1 second long. When a page has been decoded, a
+new job will be posted to start decoding the next page. By dividing up decoding into pages, an
+individual sound shouldn't ever delay every other sound from having their first page decoded. Of
+course, when loading many sounds at the same time, there will always be an amount of time required
+to process jobs in the queue so in heavy load situations there will still be some delay. To
+determine if a data source is ready to have some frames read, use
+`ma_resource_manager_data_source_get_available_frames()`. This will return the number of frames
+available starting from the current position.
+
+
+6.2.1. Job Queue
+----------------
+The resource manager uses a job queue which is multi-producer, multi-consumer, and fixed-capacity.
+This job queue is not currently lock-free, and instead uses a spinlock to achieve thread-safety.
+Only a fixed number of jobs can be allocated and inserted into the queue which is done through a
+lock-free data structure for allocating an index into a fixed sized array, with reference counting
+for mitigation of the ABA problem. The reference count is 32-bit.
+
+For many types of jobs it's important that they execute in a specific order. In these cases, jobs
+are executed serially. For the resource manager, serial execution of jobs is only required on a
+per-object basis (per data buffer or per data stream). Each of these objects stores an execution
+counter. When a job is posted it is associated with an execution counter. When the job is
+processed, it checks if the execution counter of the job equals the execution counter of the
+owning object and if so, processes the job. If the counters are not equal, the job will be posted
+back onto the job queue for later processing. When the job finishes processing the execution order
+of the main object is incremented. This system means the no matter how many job threads are
+executing, decoding of an individual sound will always get processed serially. The advantage to
+having multiple threads comes into play when loading multiple sounds at the same time.
+
+The resource manager's job queue is not 100% lock-free and will use a spinlock to achieve
+thread-safety for a very small section of code. This is only relevant when the resource manager
+uses more than one job thread. If only using a single job thread, which is the default, the
+lock should never actually wait in practice. The amount of time spent locking should be quite
+short, but it's something to be aware of for those who have pedantic lock-free requirements and
+need to use more than one job thread. There are plans to remove this lock in a future version.
+
+In addition, posting a job will release a semaphore, which on Win32 is implemented with
+`ReleaseSemaphore` and on POSIX platforms via a condition variable:
+
+    ```c
+    pthread_mutex_lock(&pSemaphore->lock);
+    {
+        pSemaphore->value += 1;
+        pthread_cond_signal(&pSemaphore->cond);
+    }
+    pthread_mutex_unlock(&pSemaphore->lock);
+    ```
+
+Again, this is relevant for those with strict lock-free requirements in the audio thread. To avoid
+this, you can use non-blocking mode (via the `MA_JOB_QUEUE_FLAG_NON_BLOCKING`
+flag) and implement your own job processing routine (see the "Resource Manager" section above for
+details on how to do this).
+
+
+
+6.2.2. Data Buffers
+-------------------
+When the `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM` flag is excluded at initialization time, the
+resource manager will try to load the data into an in-memory data buffer. Before doing so, however,
+it will first check if the specified file is already loaded. If so, it will increment a reference
+counter and just use the already loaded data. This saves both time and memory. When the data buffer
+is uninitialized, the reference counter will be decremented. If the counter hits zero, the file
+will be unloaded. This is a detail to keep in mind because it could result in excessive loading and
+unloading of a sound. For example, the following sequence will result in a file be loaded twice,
+once after the other:
+
+    ```c
+    ma_resource_manager_data_source_init(pResourceManager, "my_file", ..., &myDataBuffer0); // Refcount = 1. Initial load.
+    ma_resource_manager_data_source_uninit(&myDataBuffer0);                                 // Refcount = 0. Unloaded.
+
+    ma_resource_manager_data_source_init(pResourceManager, "my_file", ..., &myDataBuffer1); // Refcount = 1. Reloaded because previous uninit() unloaded it.
+    ma_resource_manager_data_source_uninit(&myDataBuffer1);                                 // Refcount = 0. Unloaded.
+    ```
+
+A binary search tree (BST) is used for storing data buffers as it has good balance between
+efficiency and simplicity. The key of the BST is a 64-bit hash of the file path that was passed
+into `ma_resource_manager_data_source_init()`. The advantage of using a hash is that it saves
+memory over storing the entire path, has faster comparisons, and results in a mostly balanced BST
+due to the random nature of the hash. The disadvantages are that file names are case-sensitive and
+there's a small chance of name collisions. If case-sensitivity is an issue, you should normalize
+your file names to upper- or lower-case before initializing your data sources. If name collisions
+become an issue, you'll need to change the name of one of the colliding names or just not use the
+resource manager.
+
+When a sound file has not already been loaded and the `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC`
+flag is excluded, the file will be decoded synchronously by the calling thread. There are two
+options for controlling how the audio is stored in the data buffer - encoded or decoded. When the
+`MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE` option is excluded, the raw file data will be stored
+in memory. Otherwise the sound will be decoded before storing it in memory. Synchronous loading is
+a very simple and standard process of simply adding an item to the BST, allocating a block of
+memory and then decoding (if `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE` is specified).
+
+When the `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC` flag is specified, loading of the data buffer
+is done asynchronously. In this case, a job is posted to the queue to start loading and then the
+function immediately returns, setting an internal result code to `MA_BUSY`. This result code is
+returned when the program calls `ma_resource_manager_data_source_result()`. When decoding has fully
+completed `MA_SUCCESS` will be returned. This can be used to know if loading has fully completed.
+
+When loading asynchronously, a single job is posted to the queue of the type
+`MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER_NODE`. This involves making a copy of the file path and
+associating it with job. When the job is processed by the job thread, it will first load the file
+using the VFS associated with the resource manager. When using a custom VFS, it's important that it
+be completely thread-safe because it will be used from one or more job threads at the same time.
+Individual files should only ever be accessed by one thread at a time, however. After opening the
+file via the VFS, the job will determine whether or not the file is being decoded. If not, it
+simply allocates a block of memory and loads the raw file contents into it and returns. On the
+other hand, when the file is being decoded, it will first allocate a decoder on the heap and
+initialize it. Then it will check if the length of the file is known. If so it will allocate a
+block of memory to store the decoded output and initialize it to silence. If the size is unknown,
+it will allocate room for one page. After memory has been allocated, the first page will be
+decoded. If the sound is shorter than a page, the result code will be set to `MA_SUCCESS` and the
+completion event will be signalled and loading is now complete. If, however, there is more to
+decode, a job with the code `MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_BUFFER_NODE` is posted. This job
+will decode the next page and perform the same process if it reaches the end. If there is more to
+decode, the job will post another `MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_BUFFER_NODE` job which will
+keep on happening until the sound has been fully decoded. For sounds of an unknown length, each
+page will be linked together as a linked list. Internally this is implemented via the
+`ma_paged_audio_buffer` object.
+
+
+6.2.3. Data Streams
+-------------------
+Data streams only ever store two pages worth of data for each instance. They are most useful for
+large sounds like music tracks in games that would consume too much memory if fully decoded in
+memory. After every frame from a page has been read, a job will be posted to load the next page
+which is done from the VFS.
+
+For data streams, the `MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC` flag will determine whether or
+not initialization of the data source waits until the two pages have been decoded. When unset,
+`ma_resource_manager_data_source_init()` will wait until the two pages have been loaded, otherwise
+it will return immediately.
+
+When frames are read from a data stream using `ma_resource_manager_data_source_read_pcm_frames()`,
+`MA_BUSY` will be returned if there are no frames available. If there are some frames available,
+but less than the number requested, `MA_SUCCESS` will be returned, but the actual number of frames
+read will be less than the number requested. Due to the asynchronous nature of data streams,
+seeking is also asynchronous. If the data stream is in the middle of a seek, `MA_BUSY` will be
+returned when trying to read frames.
+
+When `ma_resource_manager_data_source_read_pcm_frames()` results in a page getting fully consumed
+a job is posted to load the next page. This will be posted from the same thread that called
+`ma_resource_manager_data_source_read_pcm_frames()`.
+
+Data streams are uninitialized by posting a job to the queue, but the function won't return until
+that job has been processed. The reason for this is that the caller owns the data stream object and
+therefore miniaudio needs to ensure everything completes before handing back control to the caller.
+Also, if the data stream is uninitialized while pages are in the middle of decoding, they must
+complete before destroying any underlying object and the job system handles this cleanly.
+
+Note that when a new page needs to be loaded, a job will be posted to the resource manager's job
+thread from the audio thread. You must keep in mind the details mentioned in the "Job Queue"
+section above regarding locking when posting an event if you require a strictly lock-free audio
+thread.
+
+
+
+7. Node Graph
+=============
+miniaudio's routing infrastructure follows a node graph paradigm. The idea is that you create a
+node whose outputs are attached to inputs of another node, thereby creating a graph. There are
+different types of nodes, with each node in the graph processing input data to produce output,
+which is then fed through the chain. Each node in the graph can apply their own custom effects. At
+the start of the graph will usually be one or more data source nodes which have no inputs and
+instead pull their data from a data source. At the end of the graph is an endpoint which represents
+the end of the chain and is where the final output is ultimately extracted from.
+
+Each node has a number of input buses and a number of output buses. An output bus from a node is
+attached to an input bus of another. Multiple nodes can connect their output buses to another
+node's input bus, in which case their outputs will be mixed before processing by the node. Below is
+a diagram that illustrates a hypothetical node graph setup:
+
+    ```
+    >>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Data flows left to right >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+
+    +---------------+                              +-----------------+
+    | Data Source 1 =----+    +----------+    +----= Low Pass Filter =----+
+    +---------------+    |    |          =----+    +-----------------+    |    +----------+
+                         +----= Splitter |                                +----= ENDPOINT |
+    +---------------+    |    |          =----+    +-----------------+    |    +----------+
+    | Data Source 2 =----+    +----------+    +----=  Echo / Delay   =----+
+    +---------------+                              +-----------------+
+    ```
+
+In the above graph, it starts with two data sources whose outputs are attached to the input of a
+splitter node. It's at this point that the two data sources are mixed. After mixing, the splitter
+performs it's processing routine and produces two outputs which is simply a duplication of the
+input stream. One output is attached to a low pass filter, whereas the other output is attached to
+a echo/delay. The outputs of the low pass filter and the echo are attached to the endpoint, and
+since they're both connected to the same input bus, they'll be mixed.
+
+Each input bus must be configured to accept the same number of channels, but the number of channels
+used by input buses can be different to the number of channels for output buses in which case
+miniaudio will automatically convert the input data to the output channel count before processing.
+The number of channels of an output bus of one node must match the channel count of the input bus
+it's attached to. The channel counts cannot be changed after the node has been initialized. If you
+attempt to attach an output bus to an input bus with a different channel count, attachment will
+fail.
+
+To use a node graph, you first need to initialize a `ma_node_graph` object. This is essentially a
+container around the entire graph. The `ma_node_graph` object is required for some thread-safety
+issues which will be explained later. A `ma_node_graph` object is initialized using miniaudio's
+standard config/init system:
+
+    ```c
+    ma_node_graph_config nodeGraphConfig = ma_node_graph_config_init(myChannelCount);
+
+    result = ma_node_graph_init(&nodeGraphConfig, NULL, &nodeGraph);    // Second parameter is a pointer to allocation callbacks.
+    if (result != MA_SUCCESS) {
+        // Failed to initialize node graph.
+    }
+    ```
+
+When you initialize the node graph, you're specifying the channel count of the endpoint. The
+endpoint is a special node which has one input bus and one output bus, both of which have the
+same channel count, which is specified in the config. Any nodes that connect directly to the
+endpoint must be configured such that their output buses have the same channel count. When you read
+audio data from the node graph, it'll have the channel count you specified in the config. To read
+data from the graph:
+
+    ```c
+    ma_uint32 framesRead;
+    result = ma_node_graph_read_pcm_frames(&nodeGraph, pFramesOut, frameCount, &framesRead);
+    if (result != MA_SUCCESS) {
+        // Failed to read data from the node graph.
+    }
+    ```
+
+When you read audio data, miniaudio starts at the node graph's endpoint node which then pulls in
+data from its input attachments, which in turn recursively pull in data from their inputs, and so
+on. At the start of the graph there will be some kind of data source node which will have zero
+inputs and will instead read directly from a data source. The base nodes don't literally need to
+read from a `ma_data_source` object, but they will always have some kind of underlying object that
+sources some kind of audio. The `ma_data_source_node` node can be used to read from a
+`ma_data_source`. Data is always in floating-point format and in the number of channels you
+specified when the graph was initialized. The sample rate is defined by the underlying data sources.
+It's up to you to ensure they use a consistent and appropriate sample rate.
+
+The `ma_node` API is designed to allow custom nodes to be implemented with relative ease, but
+miniaudio includes a few stock nodes for common functionality. This is how you would initialize a
+node which reads directly from a data source (`ma_data_source_node`) which is an example of one
+of the stock nodes that comes with miniaudio:
+
+    ```c
+    ma_data_source_node_config config = ma_data_source_node_config_init(pMyDataSource);
+
+    ma_data_source_node dataSourceNode;
+    result = ma_data_source_node_init(&nodeGraph, &config, NULL, &dataSourceNode);
+    if (result != MA_SUCCESS) {
+        // Failed to create data source node.
+    }
+    ```
+
+The data source node will use the output channel count to determine the channel count of the output
+bus. There will be 1 output bus and 0 input buses (data will be drawn directly from the data
+source). The data source must output to floating-point (`ma_format_f32`) or else an error will be
+returned from `ma_data_source_node_init()`.
+
+By default the node will not be attached to the graph. To do so, use `ma_node_attach_output_bus()`:
+
+    ```c
+    result = ma_node_attach_output_bus(&dataSourceNode, 0, ma_node_graph_get_endpoint(&nodeGraph), 0);
+    if (result != MA_SUCCESS) {
+        // Failed to attach node.
+    }
+    ```
+
+The code above connects the data source node directly to the endpoint. Since the data source node
+has only a single output bus, the index will always be 0. Likewise, the endpoint only has a single
+input bus which means the input bus index will also always be 0.
+
+To detach a specific output bus, use `ma_node_detach_output_bus()`. To detach all output buses, use
+`ma_node_detach_all_output_buses()`. If you want to just move the output bus from one attachment to
+another, you do not need to detach first. You can just call `ma_node_attach_output_bus()` and it'll
+deal with it for you.
+
+Less frequently you may want to create a specialized node. This will be a node where you implement
+your own processing callback to apply a custom effect of some kind. This is similar to initializing
+one of the stock node types, only this time you need to specify a pointer to a vtable containing a
+pointer to the processing function and the number of input and output buses. Example:
+
+    ```c
+    static void my_custom_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut)
+    {
+        // Do some processing of ppFramesIn (one stream of audio data per input bus)
+        const float* pFramesIn_0 = ppFramesIn[0]; // Input bus @ index 0.
+        const float* pFramesIn_1 = ppFramesIn[1]; // Input bus @ index 1.
+        float* pFramesOut_0 = ppFramesOut[0];     // Output bus @ index 0.
+
+        // Do some processing. On input, `pFrameCountIn` will be the number of input frames in each
+        // buffer in `ppFramesIn` and `pFrameCountOut` will be the capacity of each of the buffers
+        // in `ppFramesOut`. On output, `pFrameCountIn` should be set to the number of input frames
+        // your node consumed and `pFrameCountOut` should be set the number of output frames that
+        // were produced.
+        //
+        // You should process as many frames as you can. If your effect consumes input frames at the
+        // same rate as output frames (always the case, unless you're doing resampling), you need
+        // only look at `ppFramesOut` and process that exact number of frames. If you're doing
+        // resampling, you'll need to be sure to set both `pFrameCountIn` and `pFrameCountOut`
+        // properly.
+    }
+
+    static ma_node_vtable my_custom_node_vtable =
+    {
+        my_custom_node_process_pcm_frames, // The function that will be called to process your custom node. This is where you'd implement your effect processing.
+        NULL,   // Optional. A callback for calculating the number of input frames that are required to process a specified number of output frames.
+        2,      // 2 input buses.
+        1,      // 1 output bus.
+        0       // Default flags.
+    };
+
+    ...
+
+    // Each bus needs to have a channel count specified. To do this you need to specify the channel
+    // counts in an array and then pass that into the node config.
+    ma_uint32 inputChannels[2];     // Equal in size to the number of input channels specified in the vtable.
+    ma_uint32 outputChannels[1];    // Equal in size to the number of output channels specified in the vtable.
+
+    inputChannels[0]  = channelsIn;
+    inputChannels[1]  = channelsIn;
+    outputChannels[0] = channelsOut;
+
+    ma_node_config nodeConfig = ma_node_config_init();
+    nodeConfig.vtable          = &my_custom_node_vtable;
+    nodeConfig.pInputChannels  = inputChannels;
+    nodeConfig.pOutputChannels = outputChannels;
+
+    ma_node_base node;
+    result = ma_node_init(&nodeGraph, &nodeConfig, NULL, &node);
+    if (result != MA_SUCCESS) {
+        // Failed to initialize node.
+    }
+    ```
+
+When initializing a custom node, as in the code above, you'll normally just place your vtable in
+static space. The number of input and output buses are specified as part of the vtable. If you need
+a variable number of buses on a per-node bases, the vtable should have the relevant bus count set
+to `MA_NODE_BUS_COUNT_UNKNOWN`. In this case, the bus count should be set in the node config:
+
+    ```c
+    static ma_node_vtable my_custom_node_vtable =
+    {
+        my_custom_node_process_pcm_frames, // The function that will be called process your custom node. This is where you'd implement your effect processing.
+        NULL,   // Optional. A callback for calculating the number of input frames that are required to process a specified number of output frames.
+        MA_NODE_BUS_COUNT_UNKNOWN,  // The number of input buses is determined on a per-node basis.
+        1,      // 1 output bus.
+        0       // Default flags.
+    };
+
+    ...
+
+    ma_node_config nodeConfig = ma_node_config_init();
+    nodeConfig.vtable          = &my_custom_node_vtable;
+    nodeConfig.inputBusCount   = myBusCount;        // <-- Since the vtable specifies MA_NODE_BUS_COUNT_UNKNOWN, the input bus count should be set here.
+    nodeConfig.pInputChannels  = inputChannels;     // <-- Make sure there are nodeConfig.inputBusCount elements in this array.
+    nodeConfig.pOutputChannels = outputChannels;    // <-- The vtable specifies 1 output bus, so there must be 1 element in this array.
+    ```
+
+In the above example it's important to never set the `inputBusCount` and `outputBusCount` members
+to anything other than their defaults if the vtable specifies an explicit count. They can only be
+set if the vtable specifies MA_NODE_BUS_COUNT_UNKNOWN in the relevant bus count.
+
+Most often you'll want to create a structure to encapsulate your node with some extra data. You
+need to make sure the `ma_node_base` object is your first member of the structure:
+
+    ```c
+    typedef struct
+    {
+        ma_node_base base; // <-- Make sure this is always the first member.
+        float someCustomData;
+    } my_custom_node;
+    ```
+
+By doing this, your object will be compatible with all `ma_node` APIs and you can attach it to the
+graph just like any other node.
+
+In the custom processing callback (`my_custom_node_process_pcm_frames()` in the example above), the
+number of channels for each bus is what was specified by the config when the node was initialized
+with `ma_node_init()`. In addition, all attachments to each of the input buses will have been
+pre-mixed by miniaudio. The config allows you to specify different channel counts for each
+individual input and output bus. It's up to the effect to handle it appropriate, and if it can't,
+return an error in it's initialization routine.
+
+Custom nodes can be assigned some flags to describe their behaviour. These are set via the vtable
+and include the following:
+
+    +-----------------------------------------+---------------------------------------------------+
+    | Flag Name                               | Description                                       |
+    +-----------------------------------------+---------------------------------------------------+
+    | MA_NODE_FLAG_PASSTHROUGH                | Useful for nodes that do not do any kind of audio |
+    |                                         | processing, but are instead used for tracking     |
+    |                                         | time, handling events, etc. Also used by the      |
+    |                                         | internal endpoint node. It reads directly from    |
+    |                                         | the input bus to the output bus. Nodes with this  |
+    |                                         | flag must have exactly 1 input bus and 1 output   |
+    |                                         | bus, and both buses must have the same channel    |
+    |                                         | counts.                                           |
+    +-----------------------------------------+---------------------------------------------------+
+    | MA_NODE_FLAG_CONTINUOUS_PROCESSING      | Causes the processing callback to be called even  |
+    |                                         | when no data is available to be read from input   |
+    |                                         | attachments. When a node has at least one input   |
+    |                                         | bus, but there are no inputs attached or the      |
+    |                                         | inputs do not deliver any data, the node's        |
+    |                                         | processing callback will not get fired. This flag |
+    |                                         | will make it so the callback is always fired      |
+    |                                         | regardless of whether or not any input data is    |
+    |                                         | received. This is useful for effects like         |
+    |                                         | echos where there will be a tail of audio data    |
+    |                                         | that still needs to be processed even when the    |
+    |                                         | original data sources have reached their ends. It |
+    |                                         | may also be useful for nodes that must always     |
+    |                                         | have their processing callback fired when there   |
+    |                                         | are no inputs attached.                           |
+    +-----------------------------------------+---------------------------------------------------+
+    | MA_NODE_FLAG_ALLOW_NULL_INPUT           | Used in conjunction with                          |
+    |                                         | `MA_NODE_FLAG_CONTINUOUS_PROCESSING`. When this   |
+    |                                         | is set, the `ppFramesIn` parameter of the         |
+    |                                         | processing callback will be set to NULL when      |
+    |                                         | there are no input frames are available. When     |
+    |                                         | this is unset, silence will be posted to the      |
+    |                                         | processing callback.                              |
+    +-----------------------------------------+---------------------------------------------------+
+    | MA_NODE_FLAG_DIFFERENT_PROCESSING_RATES | Used to tell miniaudio that input and output      |
+    |                                         | frames are processed at different rates. You      |
+    |                                         | should set this for any nodes that perform        |
+    |                                         | resampling.                                       |
+    +-----------------------------------------+---------------------------------------------------+
+    | MA_NODE_FLAG_SILENT_OUTPUT              | Used to tell miniaudio that a node produces only  |
+    |                                         | silent output. This is useful for nodes where you |
+    |                                         | don't want the output to contribute to the final  |
+    |                                         | mix. An example might be if you want split your   |
+    |                                         | stream and have one branch be output to a file.   |
+    |                                         | When using this flag, you should avoid writing to |
+    |                                         | the output buffer of the node's processing        |
+    |                                         | callback because miniaudio will ignore it anyway. |
+    +-----------------------------------------+---------------------------------------------------+
+
+
+If you need to make a copy of an audio stream for effect processing you can use a splitter node
+called `ma_splitter_node`. This takes has 1 input bus and splits the stream into 2 output buses.
+You can use it like this:
+
+    ```c
+    ma_splitter_node_config splitterNodeConfig = ma_splitter_node_config_init(channels);
+
+    ma_splitter_node splitterNode;
+    result = ma_splitter_node_init(&nodeGraph, &splitterNodeConfig, NULL, &splitterNode);
+    if (result != MA_SUCCESS) {
+        // Failed to create node.
+    }
+
+    // Attach your output buses to two different input buses (can be on two different nodes).
+    ma_node_attach_output_bus(&splitterNode, 0, ma_node_graph_get_endpoint(&nodeGraph), 0); // Attach directly to the endpoint.
+    ma_node_attach_output_bus(&splitterNode, 1, &myEffectNode,                          0); // Attach to input bus 0 of some effect node.
+    ```
+
+The volume of an output bus can be configured on a per-bus basis:
+
+    ```c
+    ma_node_set_output_bus_volume(&splitterNode, 0, 0.5f);
+    ma_node_set_output_bus_volume(&splitterNode, 1, 0.5f);
+    ```
+
+In the code above we're using the splitter node from before and changing the volume of each of the
+copied streams.
+
+You can start and stop a node with the following:
+
+    ```c
+    ma_node_set_state(&splitterNode, ma_node_state_started);    // The default state.
+    ma_node_set_state(&splitterNode, ma_node_state_stopped);
+    ```
+
+By default the node is in a started state, but since it won't be connected to anything won't
+actually be invoked by the node graph until it's connected. When you stop a node, data will not be
+read from any of its input connections. You can use this property to stop a group of sounds
+atomically.
+
+You can configure the initial state of a node in it's config:
+
+    ```c
+    nodeConfig.initialState = ma_node_state_stopped;
+    ```
+
+Note that for the stock specialized nodes, all of their configs will have a `nodeConfig` member
+which is the config to use with the base node. This is where the initial state can be configured
+for specialized nodes:
+
+    ```c
+    dataSourceNodeConfig.nodeConfig.initialState = ma_node_state_stopped;
+    ```
+
+When using a specialized node like `ma_data_source_node` or `ma_splitter_node`, be sure to not
+modify the `vtable` member of the `nodeConfig` object.
+
+
+7.1. Timing
+-----------
+The node graph supports starting and stopping nodes at scheduled times. This is especially useful
+for data source nodes where you want to get the node set up, but only start playback at a specific
+time. There are two clocks: local and global.
+
+A local clock is per-node, whereas the global clock is per graph. Scheduling starts and stops can
+only be done based on the global clock because the local clock will not be running while the node
+is stopped. The global clocks advances whenever `ma_node_graph_read_pcm_frames()` is called. On the
+other hand, the local clock only advances when the node's processing callback is fired, and is
+advanced based on the output frame count.
+
+To retrieve the global time, use `ma_node_graph_get_time()`. The global time can be set with
+`ma_node_graph_set_time()` which might be useful if you want to do seeking on a global timeline.
+Getting and setting the local time is similar. Use `ma_node_get_time()` to retrieve the local time,
+and `ma_node_set_time()` to set the local time. The global and local times will be advanced by the
+audio thread, so care should be taken to avoid data races. Ideally you should avoid calling these
+outside of the node processing callbacks which are always run on the audio thread.
+
+There is basic support for scheduling the starting and stopping of nodes. You can only schedule one
+start and one stop at a time. This is mainly intended for putting nodes into a started or stopped
+state in a frame-exact manner. Without this mechanism, starting and stopping of a node is limited
+to the resolution of a call to `ma_node_graph_read_pcm_frames()` which would typically be in blocks
+of several milliseconds. The following APIs can be used for scheduling node states:
+
+    ```c
+    ma_node_set_state_time()
+    ma_node_get_state_time()
+    ```
+
+The time is absolute and must be based on the global clock. An example is below:
+
+    ```c
+    ma_node_set_state_time(&myNode, ma_node_state_started, sampleRate*1);   // Delay starting to 1 second.
+    ma_node_set_state_time(&myNode, ma_node_state_stopped, sampleRate*5);   // Delay stopping to 5 seconds.
+    ```
+
+An example for changing the state using a relative time.
+
+    ```c
+    ma_node_set_state_time(&myNode, ma_node_state_started, sampleRate*1 + ma_node_graph_get_time(&myNodeGraph));
+    ma_node_set_state_time(&myNode, ma_node_state_stopped, sampleRate*5 + ma_node_graph_get_time(&myNodeGraph));
+    ```
+
+Note that due to the nature of multi-threading the times may not be 100% exact. If this is an
+issue, consider scheduling state changes from within a processing callback. An idea might be to
+have some kind of passthrough trigger node that is used specifically for tracking time and handling
+events.
+
+
+
+7.2. Thread Safety and Locking
+------------------------------
+When processing audio, it's ideal not to have any kind of locking in the audio thread. Since it's
+expected that `ma_node_graph_read_pcm_frames()` would be run on the audio thread, it does so
+without the use of any locks. This section discusses the implementation used by miniaudio and goes
+over some of the compromises employed by miniaudio to achieve this goal. Note that the current
+implementation may not be ideal - feedback and critiques are most welcome.
+
+The node graph API is not *entirely* lock-free. Only `ma_node_graph_read_pcm_frames()` is expected
+to be lock-free. Attachment, detachment and uninitialization of nodes use locks to simplify the
+implementation, but are crafted in a way such that such locking is not required when reading audio
+data from the graph. Locking in these areas are achieved by means of spinlocks.
+
+The main complication with keeping `ma_node_graph_read_pcm_frames()` lock-free stems from the fact
+that a node can be uninitialized, and it's memory potentially freed, while in the middle of being
+processed on the audio thread. There are times when the audio thread will be referencing a node,
+which means the uninitialization process of a node needs to make sure it delays returning until the
+audio thread is finished so that control is not handed back to the caller thereby giving them a
+chance to free the node's memory.
+
+When the audio thread is processing a node, it does so by reading from each of the output buses of
+the node. In order for a node to process data for one of its output buses, it needs to read from
+each of its input buses, and so on an so forth. It follows that once all output buses of a node
+are detached, the node as a whole will be disconnected and no further processing will occur unless
+it's output buses are reattached, which won't be happening when the node is being uninitialized.
+By having `ma_node_detach_output_bus()` wait until the audio thread is finished with it, we can
+simplify a few things, at the expense of making `ma_node_detach_output_bus()` a bit slower. By
+doing this, the implementation of `ma_node_uninit()` becomes trivial - just detach all output
+nodes, followed by each of the attachments to each of its input nodes, and then do any final clean
+up.
+
+With the above design, the worst-case scenario is `ma_node_detach_output_bus()` taking as long as
+it takes to process the output bus being detached. This will happen if it's called at just the
+wrong moment where the audio thread has just iterated it and has just started processing. The
+caller of `ma_node_detach_output_bus()` will stall until the audio thread is finished, which
+includes the cost of recursively processing its inputs. This is the biggest compromise made with
+the approach taken by miniaudio for its lock-free processing system. The cost of detaching nodes
+earlier in the pipeline (data sources, for example) will be cheaper than the cost of detaching
+higher level nodes, such as some kind of final post-processing endpoint. If you need to do mass
+detachments, detach starting from the lowest level nodes and work your way towards the final
+endpoint node (but don't try detaching the node graph's endpoint). If the audio thread is not
+running, detachment will be fast and detachment in any order will be the same. The reason nodes
+need to wait for their input attachments to complete is due to the potential for desyncs between
+data sources. If the node was to terminate processing mid way through processing its inputs,
+there's a chance that some of the underlying data sources will have been read, but then others not.
+That will then result in a potential desynchronization when detaching and reattaching higher-level
+nodes. A possible solution to this is to have an option when detaching to terminate processing
+before processing all input attachments which should be fairly simple.
+
+Another compromise, albeit less significant, is locking when attaching and detaching nodes. This
+locking is achieved by means of a spinlock in order to reduce memory overhead. A lock is present
+for each input bus and output bus. When an output bus is connected to an input bus, both the output
+bus and input bus is locked. This locking is specifically for attaching and detaching across
+different threads and does not affect `ma_node_graph_read_pcm_frames()` in any way. The locking and
+unlocking is mostly self-explanatory, but a slightly less intuitive aspect comes into it when
+considering that iterating over attachments must not break as a result of attaching or detaching a
+node while iteration is occurring.
+
+Attaching and detaching are both quite simple. When an output bus of a node is attached to an input
+bus of another node, it's added to a linked list. Basically, an input bus is a linked list, where
+each item in the list is and output bus. We have some intentional (and convenient) restrictions on
+what can done with the linked list in order to simplify the implementation. First of all, whenever
+something needs to iterate over the list, it must do so in a forward direction. Backwards iteration
+is not supported. Also, items can only be added to the start of the list.
+
+The linked list is a doubly-linked list where each item in the list (an output bus) holds a pointer
+to the next item in the list, and another to the previous item. A pointer to the previous item is
+only required for fast detachment of the node - it is never used in iteration. This is an
+important property because it means from the perspective of iteration, attaching and detaching of
+an item can be done with a single atomic assignment. This is exploited by both the attachment and
+detachment process. When attaching the node, the first thing that is done is the setting of the
+local "next" and "previous" pointers of the node. After that, the item is "attached" to the list
+by simply performing an atomic exchange with the head pointer. After that, the node is "attached"
+to the list from the perspective of iteration. Even though the "previous" pointer of the next item
+hasn't yet been set, from the perspective of iteration it's been attached because iteration will
+only be happening in a forward direction which means the "previous" pointer won't actually ever get
+used. The same general process applies to detachment. See `ma_node_attach_output_bus()` and
+`ma_node_detach_output_bus()` for the implementation of this mechanism.
+
+
+
+8. Decoding
+===========
+The `ma_decoder` API is used for reading audio files. Decoders are completely decoupled from
+devices and can be used independently. Built-in support is included for the following formats:
+
+    +---------+
+    | Format  |
+    +---------+
+    | WAV     |
+    | MP3     |
+    | FLAC    |
+    +---------+
+
+You can disable the built-in decoders by specifying one or more of the following options before the
+miniaudio implementation:
+
+    ```c
+    #define MA_NO_WAV
+    #define MA_NO_MP3
+    #define MA_NO_FLAC
+    ```
+
+miniaudio supports the ability to plug in custom decoders. See the section below for details on how
+to use custom decoders.
+
+A decoder can be initialized from a file with `ma_decoder_init_file()`, a block of memory with
+`ma_decoder_init_memory()`, or from data delivered via callbacks with `ma_decoder_init()`. Here is
+an example for loading a decoder from a file:
+
+    ```c
+    ma_decoder decoder;
+    ma_result result = ma_decoder_init_file("MySong.mp3", NULL, &decoder);
+    if (result != MA_SUCCESS) {
+        return false;   // An error occurred.
+    }
+
+    ...
+
+    ma_decoder_uninit(&decoder);
+    ```
+
+When initializing a decoder, you can optionally pass in a pointer to a `ma_decoder_config` object
+(the `NULL` argument in the example above) which allows you to configure the output format, channel
+count, sample rate and channel map:
+
+    ```c
+    ma_decoder_config config = ma_decoder_config_init(ma_format_f32, 2, 48000);
+    ```
+
+When passing in `NULL` for decoder config in `ma_decoder_init*()`, the output format will be the
+same as that defined by the decoding backend.
+
+Data is read from the decoder as PCM frames. This will output the number of PCM frames actually
+read. If this is less than the requested number of PCM frames it means you've reached the end. The
+return value will be `MA_AT_END` if no samples have been read and the end has been reached.
+
+    ```c
+    ma_result result = ma_decoder_read_pcm_frames(pDecoder, pFrames, framesToRead, &framesRead);
+    if (framesRead < framesToRead) {
+        // Reached the end.
+    }
+    ```
+
+You can also seek to a specific frame like so:
+
+    ```c
+    ma_result result = ma_decoder_seek_to_pcm_frame(pDecoder, targetFrame);
+    if (result != MA_SUCCESS) {
+        return false;   // An error occurred.
+    }
+    ```
+
+If you want to loop back to the start, you can simply seek back to the first PCM frame:
+
+    ```c
+    ma_decoder_seek_to_pcm_frame(pDecoder, 0);
+    ```
+
+When loading a decoder, miniaudio uses a trial and error technique to find the appropriate decoding
+backend. This can be unnecessarily inefficient if the type is already known. In this case you can
+use `encodingFormat` variable in the device config to specify a specific encoding format you want
+to decode:
+
+    ```c
+    decoderConfig.encodingFormat = ma_encoding_format_wav;
+    ```
+
+See the `ma_encoding_format` enum for possible encoding formats.
+
+The `ma_decoder_init_file()` API will try using the file extension to determine which decoding
+backend to prefer.
+
+
+8.1. Custom Decoders
+--------------------
+It's possible to implement a custom decoder and plug it into miniaudio. This is extremely useful
+when you want to use the `ma_decoder` API, but need to support an encoding format that's not one of
+the stock formats supported by miniaudio. This can be put to particularly good use when using the
+`ma_engine` and/or `ma_resource_manager` APIs because they use `ma_decoder` internally. If, for
+example, you wanted to support Opus, you can do so with a custom decoder (there if a reference
+Opus decoder in the "extras" folder of the miniaudio repository which uses libopus + libopusfile).
+
+A custom decoder must implement a data source. A vtable called `ma_decoding_backend_vtable` needs
+to be implemented which is then passed into the decoder config:
+
+    ```c
+    ma_decoding_backend_vtable* pCustomBackendVTables[] =
+    {
+        &g_ma_decoding_backend_vtable_libvorbis,
+        &g_ma_decoding_backend_vtable_libopus
+    };
+
+    ...
+
+    decoderConfig = ma_decoder_config_init_default();
+    decoderConfig.pCustomBackendUserData = NULL;
+    decoderConfig.ppCustomBackendVTables = pCustomBackendVTables;
+    decoderConfig.customBackendCount     = sizeof(pCustomBackendVTables) / sizeof(pCustomBackendVTables[0]);
+    ```
+
+The `ma_decoding_backend_vtable` vtable has the following functions:
+
+    ```
+    onInit
+    onInitFile
+    onInitFileW
+    onInitMemory
+    onUninit
+    ```
+
+There are only two functions that must be implemented - `onInit` and `onUninit`. The other
+functions can be implemented for a small optimization for loading from a file path or memory. If
+these are not specified, miniaudio will deal with it for you via a generic implementation.
+
+When you initialize a custom data source (by implementing the `onInit` function in the vtable) you
+will need to output a pointer to a `ma_data_source` which implements your custom decoder. See the
+section about data sources for details on how to implement this. Alternatively, see the
+"custom_decoders" example in the miniaudio repository.
+
+The `onInit` function takes a pointer to some callbacks for the purpose of reading raw audio data
+from some arbitrary source. You'll use these functions to read from the raw data and perform the
+decoding. When you call them, you will pass in the `pReadSeekTellUserData` pointer to the relevant
+parameter.
+
+The `pConfig` parameter in `onInit` can be used to configure the backend if appropriate. It's only
+used as a hint and can be ignored. However, if any of the properties are relevant to your decoder,
+an optimal implementation will handle the relevant properties appropriately.
+
+If memory allocation is required, it should be done so via the specified allocation callbacks if
+possible (the `pAllocationCallbacks` parameter).
+
+If an error occurs when initializing the decoder, you should leave `ppBackend` unset, or set to
+NULL, and make sure everything is cleaned up appropriately and an appropriate result code returned.
+When multiple custom backends are specified, miniaudio will cycle through the vtables in the order
+they're listed in the array that's passed into the decoder config so it's important that your
+initialization routine is clean.
+
+When a decoder is uninitialized, the `onUninit` callback will be fired which will give you an
+opportunity to clean up and internal data.
+
+
+
+9. Encoding
+===========
+The `ma_encoding` API is used for writing audio files. The only supported output format is WAV.
+This can be disabled by specifying the following option before the implementation of miniaudio:
+
+    ```c
+    #define MA_NO_WAV
+    ```
+
+An encoder can be initialized to write to a file with `ma_encoder_init_file()` or from data
+delivered via callbacks with `ma_encoder_init()`. Below is an example for initializing an encoder
+to output to a file.
+
+    ```c
+    ma_encoder_config config = ma_encoder_config_init(ma_encoding_format_wav, FORMAT, CHANNELS, SAMPLE_RATE);
+    ma_encoder encoder;
+    ma_result result = ma_encoder_init_file("my_file.wav", &config, &encoder);
+    if (result != MA_SUCCESS) {
+        // Error
+    }
+
+    ...
+
+    ma_encoder_uninit(&encoder);
+    ```
+
+When initializing an encoder you must specify a config which is initialized with
+`ma_encoder_config_init()`. Here you must specify the file type, the output sample format, output
+channel count and output sample rate. The following file types are supported:
+
+    +------------------------+-------------+
+    | Enum                   | Description |
+    +------------------------+-------------+
+    | ma_encoding_format_wav | WAV         |
+    +------------------------+-------------+
+
+If the format, channel count or sample rate is not supported by the output file type an error will
+be returned. The encoder will not perform data conversion so you will need to convert it before
+outputting any audio data. To output audio data, use `ma_encoder_write_pcm_frames()`, like in the
+example below:
+
+    ```c
+    ma_uint64 framesWritten;
+    result = ma_encoder_write_pcm_frames(&encoder, pPCMFramesToWrite, framesToWrite, &framesWritten);
+    if (result != MA_SUCCESS) {
+        ... handle error ...
+    }
+    ```
+
+The `framesWritten` variable will contain the number of PCM frames that were actually written. This
+is optionally and you can pass in `NULL` if you need this.
+
+Encoders must be uninitialized with `ma_encoder_uninit()`.
+
+
+
+10. Data Conversion
+===================
+A data conversion API is included with miniaudio which supports the majority of data conversion
+requirements. This supports conversion between sample formats, channel counts (with channel
+mapping) and sample rates.
+
+
+10.1. Sample Format Conversion
+------------------------------
+Conversion between sample formats is achieved with the `ma_pcm_*_to_*()`, `ma_pcm_convert()` and
+`ma_convert_pcm_frames_format()` APIs. Use `ma_pcm_*_to_*()` to convert between two specific
+formats. Use `ma_pcm_convert()` to convert based on a `ma_format` variable. Use
+`ma_convert_pcm_frames_format()` to convert PCM frames where you want to specify the frame count
+and channel count as a variable instead of the total sample count.
+
+
+10.1.1. Dithering
+-----------------
+Dithering can be set using the ditherMode parameter.
+
+The different dithering modes include the following, in order of efficiency:
+
+    +-----------+--------------------------+
+    | Type      | Enum Token               |
+    +-----------+--------------------------+
+    | None      | ma_dither_mode_none      |
+    | Rectangle | ma_dither_mode_rectangle |
+    | Triangle  | ma_dither_mode_triangle  |
+    +-----------+--------------------------+
+
+Note that even if the dither mode is set to something other than `ma_dither_mode_none`, it will be
+ignored for conversions where dithering is not needed. Dithering is available for the following
+conversions:
+
+    ```
+    s16 -> u8
+    s24 -> u8
+    s32 -> u8
+    f32 -> u8
+    s24 -> s16
+    s32 -> s16
+    f32 -> s16
+    ```
+
+Note that it is not an error to pass something other than ma_dither_mode_none for conversions where
+dither is not used. It will just be ignored.
+
+
+
+10.2. Channel Conversion
+------------------------
+Channel conversion is used for channel rearrangement and conversion from one channel count to
+another. The `ma_channel_converter` API is used for channel conversion. Below is an example of
+initializing a simple channel converter which converts from mono to stereo.
+
+    ```c
+    ma_channel_converter_config config = ma_channel_converter_config_init(
+        ma_format,                      // Sample format
+        1,                              // Input channels
+        NULL,                           // Input channel map
+        2,                              // Output channels
+        NULL,                           // Output channel map
+        ma_channel_mix_mode_default);   // The mixing algorithm to use when combining channels.
+
+    result = ma_channel_converter_init(&config, NULL, &converter);
+    if (result != MA_SUCCESS) {
+        // Error.
+    }
+    ```
+
+To perform the conversion simply call `ma_channel_converter_process_pcm_frames()` like so:
+
+    ```c
+    ma_result result = ma_channel_converter_process_pcm_frames(&converter, pFramesOut, pFramesIn, frameCount);
+    if (result != MA_SUCCESS) {
+        // Error.
+    }
+    ```
+
+It is up to the caller to ensure the output buffer is large enough to accommodate the new PCM
+frames.
+
+Input and output PCM frames are always interleaved. Deinterleaved layouts are not supported.
+
+
+10.2.1. Channel Mapping
+-----------------------
+In addition to converting from one channel count to another, like the example above, the channel
+converter can also be used to rearrange channels. When initializing the channel converter, you can
+optionally pass in channel maps for both the input and output frames. If the channel counts are the
+same, and each channel map contains the same channel positions with the exception that they're in
+a different order, a simple shuffling of the channels will be performed. If, however, there is not
+a 1:1 mapping of channel positions, or the channel counts differ, the input channels will be mixed
+based on a mixing mode which is specified when initializing the `ma_channel_converter_config`
+object.
+
+When converting from mono to multi-channel, the mono channel is simply copied to each output
+channel. When going the other way around, the audio of each output channel is simply averaged and
+copied to the mono channel.
+
+In more complicated cases blending is used. The `ma_channel_mix_mode_simple` mode will drop excess
+channels and silence extra channels. For example, converting from 4 to 2 channels, the 3rd and 4th
+channels will be dropped, whereas converting from 2 to 4 channels will put silence into the 3rd and
+4th channels.
+
+The `ma_channel_mix_mode_rectangle` mode uses spacial locality based on a rectangle to compute a
+simple distribution between input and output. Imagine sitting in the middle of a room, with
+speakers on the walls representing channel positions. The `MA_CHANNEL_FRONT_LEFT` position can be
+thought of as being in the corner of the front and left walls.
+
+Finally, the `ma_channel_mix_mode_custom_weights` mode can be used to use custom user-defined
+weights. Custom weights can be passed in as the last parameter of
+`ma_channel_converter_config_init()`.
+
+Predefined channel maps can be retrieved with `ma_channel_map_init_standard()`. This takes a
+`ma_standard_channel_map` enum as its first parameter, which can be one of the following:
+
+    +-----------------------------------+-----------------------------------------------------------+
+    | Name                              | Description                                               |
+    +-----------------------------------+-----------------------------------------------------------+
+    | ma_standard_channel_map_default   | Default channel map used by miniaudio. See below.         |
+    | ma_standard_channel_map_microsoft | Channel map used by Microsoft's bitfield channel maps.    |
+    | ma_standard_channel_map_alsa      | Default ALSA channel map.                                 |
+    | ma_standard_channel_map_rfc3551   | RFC 3551. Based on AIFF.                                  |
+    | ma_standard_channel_map_flac      | FLAC channel map.                                         |
+    | ma_standard_channel_map_vorbis    | Vorbis channel map.                                       |
+    | ma_standard_channel_map_sound4    | FreeBSD's sound(4).                                       |
+    | ma_standard_channel_map_sndio     | sndio channel map. http://www.sndio.org/tips.html.        |
+    | ma_standard_channel_map_webaudio  | https://webaudio.github.io/web-audio-api/#ChannelOrdering |
+    +-----------------------------------+-----------------------------------------------------------+
+
+Below are the channel maps used by default in miniaudio (`ma_standard_channel_map_default`):
+
+    +---------------+---------------------------------+
+    | Channel Count | Mapping                         |
+    +---------------+---------------------------------+
+    | 1 (Mono)      | 0: MA_CHANNEL_MONO              |
+    +---------------+---------------------------------+
+    | 2 (Stereo)    | 0: MA_CHANNEL_FRONT_LEFT   
| + | | 1: MA_CHANNEL_FRONT_RIGHT | + +---------------+---------------------------------+ + | 3 | 0: MA_CHANNEL_FRONT_LEFT
| + | | 1: MA_CHANNEL_FRONT_RIGHT
| + | | 2: MA_CHANNEL_FRONT_CENTER | + +---------------+---------------------------------+ + | 4 (Surround) | 0: MA_CHANNEL_FRONT_LEFT
| + | | 1: MA_CHANNEL_FRONT_RIGHT
| + | | 2: MA_CHANNEL_FRONT_CENTER
| + | | 3: MA_CHANNEL_BACK_CENTER | + +---------------+---------------------------------+ + | 5 | 0: MA_CHANNEL_FRONT_LEFT
| + | | 1: MA_CHANNEL_FRONT_RIGHT
| + | | 2: MA_CHANNEL_FRONT_CENTER
| + | | 3: MA_CHANNEL_BACK_LEFT
| + | | 4: MA_CHANNEL_BACK_RIGHT | + +---------------+---------------------------------+ + | 6 (5.1) | 0: MA_CHANNEL_FRONT_LEFT
| + | | 1: MA_CHANNEL_FRONT_RIGHT
| + | | 2: MA_CHANNEL_FRONT_CENTER
| + | | 3: MA_CHANNEL_LFE
| + | | 4: MA_CHANNEL_SIDE_LEFT
| + | | 5: MA_CHANNEL_SIDE_RIGHT | + +---------------+---------------------------------+ + | 7 | 0: MA_CHANNEL_FRONT_LEFT
| + | | 1: MA_CHANNEL_FRONT_RIGHT
| + | | 2: MA_CHANNEL_FRONT_CENTER
| + | | 3: MA_CHANNEL_LFE
| + | | 4: MA_CHANNEL_BACK_CENTER
| + | | 4: MA_CHANNEL_SIDE_LEFT
| + | | 5: MA_CHANNEL_SIDE_RIGHT | + +---------------+---------------------------------+ + | 8 (7.1) | 0: MA_CHANNEL_FRONT_LEFT
| + | | 1: MA_CHANNEL_FRONT_RIGHT
| + | | 2: MA_CHANNEL_FRONT_CENTER
| + | | 3: MA_CHANNEL_LFE
| + | | 4: MA_CHANNEL_BACK_LEFT
| + | | 5: MA_CHANNEL_BACK_RIGHT
| + | | 6: MA_CHANNEL_SIDE_LEFT
| + | | 7: MA_CHANNEL_SIDE_RIGHT | + +---------------+---------------------------------+ + | Other | All channels set to 0. This | + | | is equivalent to the same | + | | mapping as the device. | + +---------------+---------------------------------+ + + + +10.3. Resampling +---------------- +Resampling is achieved with the `ma_resampler` object. To create a resampler object, do something +like the following: + + ```c + ma_resampler_config config = ma_resampler_config_init( + ma_format_s16, + channels, + sampleRateIn, + sampleRateOut, + ma_resample_algorithm_linear); + + ma_resampler resampler; + ma_result result = ma_resampler_init(&config, NULL, &resampler); + if (result != MA_SUCCESS) { + // An error occurred... + } + ``` + +Do the following to uninitialize the resampler: + + ```c + ma_resampler_uninit(&resampler); + ``` + +The following example shows how data can be processed + + ```c + ma_uint64 frameCountIn = 1000; + ma_uint64 frameCountOut = 2000; + ma_result result = ma_resampler_process_pcm_frames(&resampler, pFramesIn, &frameCountIn, pFramesOut, &frameCountOut); + if (result != MA_SUCCESS) { + // An error occurred... + } + + // At this point, frameCountIn contains the number of input frames that were consumed and frameCountOut contains the + // number of output frames written. + ``` + +To initialize the resampler you first need to set up a config (`ma_resampler_config`) with +`ma_resampler_config_init()`. You need to specify the sample format you want to use, the number of +channels, the input and output sample rate, and the algorithm. + +The sample format can be either `ma_format_s16` or `ma_format_f32`. If you need a different format +you will need to perform pre- and post-conversions yourself where necessary. Note that the format +is the same for both input and output. The format cannot be changed after initialization. + +The resampler supports multiple channels and is always interleaved (both input and output). The +channel count cannot be changed after initialization. + +The sample rates can be anything other than zero, and are always specified in hertz. They should be +set to something like 44100, etc. The sample rate is the only configuration property that can be +changed after initialization. + +The miniaudio resampler has built-in support for the following algorithms: + + +-----------+------------------------------+ + | Algorithm | Enum Token | + +-----------+------------------------------+ + | Linear | ma_resample_algorithm_linear | + | Custom | ma_resample_algorithm_custom | + +-----------+------------------------------+ + +The algorithm cannot be changed after initialization. + +Processing always happens on a per PCM frame basis and always assumes interleaved input and output. +De-interleaved processing is not supported. To process frames, use +`ma_resampler_process_pcm_frames()`. On input, this function takes the number of output frames you +can fit in the output buffer and the number of input frames contained in the input buffer. On +output these variables contain the number of output frames that were written to the output buffer +and the number of input frames that were consumed in the process. You can pass in NULL for the +input buffer in which case it will be treated as an infinitely large buffer of zeros. The output +buffer can also be NULL, in which case the processing will be treated as seek. + +The sample rate can be changed dynamically on the fly. You can change this with explicit sample +rates with `ma_resampler_set_rate()` and also with a decimal ratio with +`ma_resampler_set_rate_ratio()`. The ratio is in/out. + +Sometimes it's useful to know exactly how many input frames will be required to output a specific +number of frames. You can calculate this with `ma_resampler_get_required_input_frame_count()`. +Likewise, it's sometimes useful to know exactly how many frames would be output given a certain +number of input frames. You can do this with `ma_resampler_get_expected_output_frame_count()`. + +Due to the nature of how resampling works, the resampler introduces some latency. This can be +retrieved in terms of both the input rate and the output rate with +`ma_resampler_get_input_latency()` and `ma_resampler_get_output_latency()`. + + +10.3.1. Resampling Algorithms +----------------------------- +The choice of resampling algorithm depends on your situation and requirements. + + +10.3.1.1. Linear Resampling +--------------------------- +The linear resampler is the fastest, but comes at the expense of poorer quality. There is, however, +some control over the quality of the linear resampler which may make it a suitable option depending +on your requirements. + +The linear resampler performs low-pass filtering before or after downsampling or upsampling, +depending on the sample rates you're converting between. When decreasing the sample rate, the +low-pass filter will be applied before downsampling. When increasing the rate it will be performed +after upsampling. By default a fourth order low-pass filter will be applied. This can be configured +via the `lpfOrder` configuration variable. Setting this to 0 will disable filtering. + +The low-pass filter has a cutoff frequency which defaults to half the sample rate of the lowest of +the input and output sample rates (Nyquist Frequency). + +The API for the linear resampler is the same as the main resampler API, only it's called +`ma_linear_resampler`. + + +10.3.2. Custom Resamplers +------------------------- +You can implement a custom resampler by using the `ma_resample_algorithm_custom` resampling +algorithm and setting a vtable in the resampler config: + + ```c + ma_resampler_config config = ma_resampler_config_init(..., ma_resample_algorithm_custom); + config.pBackendVTable = &g_customResamplerVTable; + ``` + +Custom resamplers are useful if the stock algorithms are not appropriate for your use case. You +need to implement the required functions in `ma_resampling_backend_vtable`. Note that not all +functions in the vtable need to be implemented, but if it's possible to implement, they should be. + +You can use the `ma_linear_resampler` object for an example on how to implement the vtable. The +`onGetHeapSize` callback is used to calculate the size of any internal heap allocation the custom +resampler will need to make given the supplied config. When you initialize the resampler via the +`onInit` callback, you'll be given a pointer to a heap allocation which is where you should store +the heap allocated data. You should not free this data in `onUninit` because miniaudio will manage +it for you. + +The `onProcess` callback is where the actual resampling takes place. On input, `pFrameCountIn` +points to a variable containing the number of frames in the `pFramesIn` buffer and +`pFrameCountOut` points to a variable containing the capacity in frames of the `pFramesOut` buffer. +On output, `pFrameCountIn` should be set to the number of input frames that were fully consumed, +whereas `pFrameCountOut` should be set to the number of frames that were written to `pFramesOut`. + +The `onSetRate` callback is optional and is used for dynamically changing the sample rate. If +dynamic rate changes are not supported, you can set this callback to NULL. + +The `onGetInputLatency` and `onGetOutputLatency` functions are used for retrieving the latency in +input and output rates respectively. These can be NULL in which case latency calculations will be +assumed to be NULL. + +The `onGetRequiredInputFrameCount` callback is used to give miniaudio a hint as to how many input +frames are required to be available to produce the given number of output frames. Likewise, the +`onGetExpectedOutputFrameCount` callback is used to determine how many output frames will be +produced given the specified number of input frames. miniaudio will use these as a hint, but they +are optional and can be set to NULL if you're unable to implement them. + + + +10.4. General Data Conversion +----------------------------- +The `ma_data_converter` API can be used to wrap sample format conversion, channel conversion and +resampling into one operation. This is what miniaudio uses internally to convert between the format +requested when the device was initialized and the format of the backend's native device. The API +for general data conversion is very similar to the resampling API. Create a `ma_data_converter` +object like this: + + ```c + ma_data_converter_config config = ma_data_converter_config_init( + inputFormat, + outputFormat, + inputChannels, + outputChannels, + inputSampleRate, + outputSampleRate + ); + + ma_data_converter converter; + ma_result result = ma_data_converter_init(&config, NULL, &converter); + if (result != MA_SUCCESS) { + // An error occurred... + } + ``` + +In the example above we use `ma_data_converter_config_init()` to initialize the config, however +there's many more properties that can be configured, such as channel maps and resampling quality. +Something like the following may be more suitable depending on your requirements: + + ```c + ma_data_converter_config config = ma_data_converter_config_init_default(); + config.formatIn = inputFormat; + config.formatOut = outputFormat; + config.channelsIn = inputChannels; + config.channelsOut = outputChannels; + config.sampleRateIn = inputSampleRate; + config.sampleRateOut = outputSampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_flac, config.channelMapIn, sizeof(config.channelMapIn)/sizeof(config.channelMapIn[0]), config.channelCountIn); + config.resampling.linear.lpfOrder = MA_MAX_FILTER_ORDER; + ``` + +Do the following to uninitialize the data converter: + + ```c + ma_data_converter_uninit(&converter, NULL); + ``` + +The following example shows how data can be processed + + ```c + ma_uint64 frameCountIn = 1000; + ma_uint64 frameCountOut = 2000; + ma_result result = ma_data_converter_process_pcm_frames(&converter, pFramesIn, &frameCountIn, pFramesOut, &frameCountOut); + if (result != MA_SUCCESS) { + // An error occurred... + } + + // At this point, frameCountIn contains the number of input frames that were consumed and frameCountOut contains the number + // of output frames written. + ``` + +The data converter supports multiple channels and is always interleaved (both input and output). +The channel count cannot be changed after initialization. + +Sample rates can be anything other than zero, and are always specified in hertz. They should be set +to something like 44100, etc. The sample rate is the only configuration property that can be +changed after initialization, but only if the `resampling.allowDynamicSampleRate` member of +`ma_data_converter_config` is set to `MA_TRUE`. To change the sample rate, use +`ma_data_converter_set_rate()` or `ma_data_converter_set_rate_ratio()`. The ratio must be in/out. +The resampling algorithm cannot be changed after initialization. + +Processing always happens on a per PCM frame basis and always assumes interleaved input and output. +De-interleaved processing is not supported. To process frames, use +`ma_data_converter_process_pcm_frames()`. On input, this function takes the number of output frames +you can fit in the output buffer and the number of input frames contained in the input buffer. On +output these variables contain the number of output frames that were written to the output buffer +and the number of input frames that were consumed in the process. You can pass in NULL for the +input buffer in which case it will be treated as an infinitely large +buffer of zeros. The output buffer can also be NULL, in which case the processing will be treated +as seek. + +Sometimes it's useful to know exactly how many input frames will be required to output a specific +number of frames. You can calculate this with `ma_data_converter_get_required_input_frame_count()`. +Likewise, it's sometimes useful to know exactly how many frames would be output given a certain +number of input frames. You can do this with `ma_data_converter_get_expected_output_frame_count()`. + +Due to the nature of how resampling works, the data converter introduces some latency if resampling +is required. This can be retrieved in terms of both the input rate and the output rate with +`ma_data_converter_get_input_latency()` and `ma_data_converter_get_output_latency()`. + + + +11. Filtering +============= + +11.1. Biquad Filtering +---------------------- +Biquad filtering is achieved with the `ma_biquad` API. Example: + + ```c + ma_biquad_config config = ma_biquad_config_init(ma_format_f32, channels, b0, b1, b2, a0, a1, a2); + ma_result result = ma_biquad_init(&config, NULL, &biquad); + if (result != MA_SUCCESS) { + // Error. + } + + ... + + ma_biquad_process_pcm_frames(&biquad, pFramesOut, pFramesIn, frameCount); + ``` + +Biquad filtering is implemented using transposed direct form 2. The numerator coefficients are b0, +b1 and b2, and the denominator coefficients are a0, a1 and a2. The a0 coefficient is required and +coefficients must not be pre-normalized. + +Supported formats are `ma_format_s16` and `ma_format_f32`. If you need to use a different format +you need to convert it yourself beforehand. When using `ma_format_s16` the biquad filter will use +fixed point arithmetic. When using `ma_format_f32`, floating point arithmetic will be used. + +Input and output frames are always interleaved. + +Filtering can be applied in-place by passing in the same pointer for both the input and output +buffers, like so: + + ```c + ma_biquad_process_pcm_frames(&biquad, pMyData, pMyData, frameCount); + ``` + +If you need to change the values of the coefficients, but maintain the values in the registers you +can do so with `ma_biquad_reinit()`. This is useful if you need to change the properties of the +filter while keeping the values of registers valid to avoid glitching. Do not use +`ma_biquad_init()` for this as it will do a full initialization which involves clearing the +registers to 0. Note that changing the format or channel count after initialization is invalid and +will result in an error. + + +11.2. Low-Pass Filtering +------------------------ +Low-pass filtering is achieved with the following APIs: + + +---------+------------------------------------------+ + | API | Description | + +---------+------------------------------------------+ + | ma_lpf1 | First order low-pass filter | + | ma_lpf2 | Second order low-pass filter | + | ma_lpf | High order low-pass filter (Butterworth) | + +---------+------------------------------------------+ + +Low-pass filter example: + + ```c + ma_lpf_config config = ma_lpf_config_init(ma_format_f32, channels, sampleRate, cutoffFrequency, order); + ma_result result = ma_lpf_init(&config, &lpf); + if (result != MA_SUCCESS) { + // Error. + } + + ... + + ma_lpf_process_pcm_frames(&lpf, pFramesOut, pFramesIn, frameCount); + ``` + +Supported formats are `ma_format_s16` and` ma_format_f32`. If you need to use a different format +you need to convert it yourself beforehand. Input and output frames are always interleaved. + +Filtering can be applied in-place by passing in the same pointer for both the input and output +buffers, like so: + + ```c + ma_lpf_process_pcm_frames(&lpf, pMyData, pMyData, frameCount); + ``` + +The maximum filter order is limited to `MA_MAX_FILTER_ORDER` which is set to 8. If you need more, +you can chain first and second order filters together. + + ```c + for (iFilter = 0; iFilter < filterCount; iFilter += 1) { + ma_lpf2_process_pcm_frames(&lpf2[iFilter], pMyData, pMyData, frameCount); + } + ``` + +If you need to change the configuration of the filter, but need to maintain the state of internal +registers you can do so with `ma_lpf_reinit()`. This may be useful if you need to change the sample +rate and/or cutoff frequency dynamically while maintaining smooth transitions. Note that changing the +format or channel count after initialization is invalid and will result in an error. + +The `ma_lpf` object supports a configurable order, but if you only need a first order filter you +may want to consider using `ma_lpf1`. Likewise, if you only need a second order filter you can use +`ma_lpf2`. The advantage of this is that they're lighter weight and a bit more efficient. + +If an even filter order is specified, a series of second order filters will be processed in a +chain. If an odd filter order is specified, a first order filter will be applied, followed by a +series of second order filters in a chain. + + +11.3. High-Pass Filtering +------------------------- +High-pass filtering is achieved with the following APIs: + + +---------+-------------------------------------------+ + | API | Description | + +---------+-------------------------------------------+ + | ma_hpf1 | First order high-pass filter | + | ma_hpf2 | Second order high-pass filter | + | ma_hpf | High order high-pass filter (Butterworth) | + +---------+-------------------------------------------+ + +High-pass filters work exactly the same as low-pass filters, only the APIs are called `ma_hpf1`, +`ma_hpf2` and `ma_hpf`. See example code for low-pass filters for example usage. + + +11.4. Band-Pass Filtering +------------------------- +Band-pass filtering is achieved with the following APIs: + + +---------+-------------------------------+ + | API | Description | + +---------+-------------------------------+ + | ma_bpf2 | Second order band-pass filter | + | ma_bpf | High order band-pass filter | + +---------+-------------------------------+ + +Band-pass filters work exactly the same as low-pass filters, only the APIs are called `ma_bpf2` and +`ma_hpf`. See example code for low-pass filters for example usage. Note that the order for +band-pass filters must be an even number which means there is no first order band-pass filter, +unlike low-pass and high-pass filters. + + +11.5. Notch Filtering +--------------------- +Notch filtering is achieved with the following APIs: + + +-----------+------------------------------------------+ + | API | Description | + +-----------+------------------------------------------+ + | ma_notch2 | Second order notching filter | + +-----------+------------------------------------------+ + + +11.6. Peaking EQ Filtering +------------------------- +Peaking filtering is achieved with the following APIs: + + +----------+------------------------------------------+ + | API | Description | + +----------+------------------------------------------+ + | ma_peak2 | Second order peaking filter | + +----------+------------------------------------------+ + + +11.7. Low Shelf Filtering +------------------------- +Low shelf filtering is achieved with the following APIs: + + +-------------+------------------------------------------+ + | API | Description | + +-------------+------------------------------------------+ + | ma_loshelf2 | Second order low shelf filter | + +-------------+------------------------------------------+ + +Where a high-pass filter is used to eliminate lower frequencies, a low shelf filter can be used to +just turn them down rather than eliminate them entirely. + + +11.8. High Shelf Filtering +-------------------------- +High shelf filtering is achieved with the following APIs: + + +-------------+------------------------------------------+ + | API | Description | + +-------------+------------------------------------------+ + | ma_hishelf2 | Second order high shelf filter | + +-------------+------------------------------------------+ + +The high shelf filter has the same API as the low shelf filter, only you would use `ma_hishelf` +instead of `ma_loshelf`. Where a low shelf filter is used to adjust the volume of low frequencies, +the high shelf filter does the same thing for high frequencies. + + + + +12. Waveform and Noise Generation +================================= + +12.1. Waveforms +--------------- +miniaudio supports generation of sine, square, triangle and sawtooth waveforms. This is achieved +with the `ma_waveform` API. Example: + + ```c + ma_waveform_config config = ma_waveform_config_init( + FORMAT, + CHANNELS, + SAMPLE_RATE, + ma_waveform_type_sine, + amplitude, + frequency); + + ma_waveform waveform; + ma_result result = ma_waveform_init(&config, &waveform); + if (result != MA_SUCCESS) { + // Error. + } + + ... + + ma_waveform_read_pcm_frames(&waveform, pOutput, frameCount); + ``` + +The amplitude, frequency, type, and sample rate can be changed dynamically with +`ma_waveform_set_amplitude()`, `ma_waveform_set_frequency()`, `ma_waveform_set_type()`, and +`ma_waveform_set_sample_rate()` respectively. + +You can invert the waveform by setting the amplitude to a negative value. You can use this to +control whether or not a sawtooth has a positive or negative ramp, for example. + +Below are the supported waveform types: + + +---------------------------+ + | Enum Name | + +---------------------------+ + | ma_waveform_type_sine | + | ma_waveform_type_square | + | ma_waveform_type_triangle | + | ma_waveform_type_sawtooth | + +---------------------------+ + + + +12.2. Noise +----------- +miniaudio supports generation of white, pink and Brownian noise via the `ma_noise` API. Example: + + ```c + ma_noise_config config = ma_noise_config_init( + FORMAT, + CHANNELS, + ma_noise_type_white, + SEED, + amplitude); + + ma_noise noise; + ma_result result = ma_noise_init(&config, &noise); + if (result != MA_SUCCESS) { + // Error. + } + + ... + + ma_noise_read_pcm_frames(&noise, pOutput, frameCount); + ``` + +The noise API uses simple LCG random number generation. It supports a custom seed which is useful +for things like automated testing requiring reproducibility. Setting the seed to zero will default +to `MA_DEFAULT_LCG_SEED`. + +The amplitude and seed can be changed dynamically with `ma_noise_set_amplitude()` and +`ma_noise_set_seed()` respectively. + +By default, the noise API will use different values for different channels. So, for example, the +left side in a stereo stream will be different to the right side. To instead have each channel use +the same random value, set the `duplicateChannels` member of the noise config to true, like so: + + ```c + config.duplicateChannels = MA_TRUE; + ``` + +Below are the supported noise types. + + +------------------------+ + | Enum Name | + +------------------------+ + | ma_noise_type_white | + | ma_noise_type_pink | + | ma_noise_type_brownian | + +------------------------+ + + + +13. Audio Buffers +================= +miniaudio supports reading from a buffer of raw audio data via the `ma_audio_buffer` API. This can +read from memory that's managed by the application, but can also handle the memory management for +you internally. Memory management is flexible and should support most use cases. + +Audio buffers are initialized using the standard configuration system used everywhere in miniaudio: + + ```c + ma_audio_buffer_config config = ma_audio_buffer_config_init( + format, + channels, + sizeInFrames, + pExistingData, + &allocationCallbacks); + + ma_audio_buffer buffer; + result = ma_audio_buffer_init(&config, &buffer); + if (result != MA_SUCCESS) { + // Error. + } + + ... + + ma_audio_buffer_uninit(&buffer); + ``` + +In the example above, the memory pointed to by `pExistingData` will *not* be copied and is how an +application can do self-managed memory allocation. If you would rather make a copy of the data, use +`ma_audio_buffer_init_copy()`. To uninitialize the buffer, use `ma_audio_buffer_uninit()`. + +Sometimes it can be convenient to allocate the memory for the `ma_audio_buffer` structure and the +raw audio data in a contiguous block of memory. That is, the raw audio data will be located +immediately after the `ma_audio_buffer` structure. To do this, use +`ma_audio_buffer_alloc_and_init()`: + + ```c + ma_audio_buffer_config config = ma_audio_buffer_config_init( + format, + channels, + sizeInFrames, + pExistingData, + &allocationCallbacks); + + ma_audio_buffer* pBuffer + result = ma_audio_buffer_alloc_and_init(&config, &pBuffer); + if (result != MA_SUCCESS) { + // Error + } + + ... + + ma_audio_buffer_uninit_and_free(&buffer); + ``` + +If you initialize the buffer with `ma_audio_buffer_alloc_and_init()` you should uninitialize it +with `ma_audio_buffer_uninit_and_free()`. In the example above, the memory pointed to by +`pExistingData` will be copied into the buffer, which is contrary to the behavior of +`ma_audio_buffer_init()`. + +An audio buffer has a playback cursor just like a decoder. As you read frames from the buffer, the +cursor moves forward. The last parameter (`loop`) can be used to determine if the buffer should +loop. The return value is the number of frames actually read. If this is less than the number of +frames requested it means the end has been reached. This should never happen if the `loop` +parameter is set to true. If you want to manually loop back to the start, you can do so with with +`ma_audio_buffer_seek_to_pcm_frame(pAudioBuffer, 0)`. Below is an example for reading data from an +audio buffer. + + ```c + ma_uint64 framesRead = ma_audio_buffer_read_pcm_frames(pAudioBuffer, pFramesOut, desiredFrameCount, isLooping); + if (framesRead < desiredFrameCount) { + // If not looping, this means the end has been reached. This should never happen in looping mode with valid input. + } + ``` + +Sometimes you may want to avoid the cost of data movement between the internal buffer and the +output buffer. Instead you can use memory mapping to retrieve a pointer to a segment of data: + + ```c + void* pMappedFrames; + ma_uint64 frameCount = frameCountToTryMapping; + ma_result result = ma_audio_buffer_map(pAudioBuffer, &pMappedFrames, &frameCount); + if (result == MA_SUCCESS) { + // Map was successful. The value in frameCount will be how many frames were _actually_ mapped, which may be + // less due to the end of the buffer being reached. + ma_copy_pcm_frames(pFramesOut, pMappedFrames, frameCount, pAudioBuffer->format, pAudioBuffer->channels); + + // You must unmap the buffer. + ma_audio_buffer_unmap(pAudioBuffer, frameCount); + } + ``` + +When you use memory mapping, the read cursor is increment by the frame count passed in to +`ma_audio_buffer_unmap()`. If you decide not to process every frame you can pass in a value smaller +than the value returned by `ma_audio_buffer_map()`. The disadvantage to using memory mapping is +that it does not handle looping for you. You can determine if the buffer is at the end for the +purpose of looping with `ma_audio_buffer_at_end()` or by inspecting the return value of +`ma_audio_buffer_unmap()` and checking if it equals `MA_AT_END`. You should not treat `MA_AT_END` +as an error when returned by `ma_audio_buffer_unmap()`. + + + +14. Ring Buffers +================ +miniaudio supports lock free (single producer, single consumer) ring buffers which are exposed via +the `ma_rb` and `ma_pcm_rb` APIs. The `ma_rb` API operates on bytes, whereas the `ma_pcm_rb` +operates on PCM frames. They are otherwise identical as `ma_pcm_rb` is just a wrapper around +`ma_rb`. + +Unlike most other APIs in miniaudio, ring buffers support both interleaved and deinterleaved +streams. The caller can also allocate their own backing memory for the ring buffer to use +internally for added flexibility. Otherwise the ring buffer will manage it's internal memory for +you. + +The examples below use the PCM frame variant of the ring buffer since that's most likely the one +you will want to use. To initialize a ring buffer, do something like the following: + + ```c + ma_pcm_rb rb; + ma_result result = ma_pcm_rb_init(FORMAT, CHANNELS, BUFFER_SIZE_IN_FRAMES, NULL, NULL, &rb); + if (result != MA_SUCCESS) { + // Error + } + ``` + +The `ma_pcm_rb_init()` function takes the sample format and channel count as parameters because +it's the PCM variant of the ring buffer API. For the regular ring buffer that operates on bytes you +would call `ma_rb_init()` which leaves these out and just takes the size of the buffer in bytes +instead of frames. The fourth parameter is an optional pre-allocated buffer and the fifth parameter +is a pointer to a `ma_allocation_callbacks` structure for custom memory allocation routines. +Passing in `NULL` for this results in `MA_MALLOC()` and `MA_FREE()` being used. + +Use `ma_pcm_rb_init_ex()` if you need a deinterleaved buffer. The data for each sub-buffer is +offset from each other based on the stride. To manage your sub-buffers you can use +`ma_pcm_rb_get_subbuffer_stride()`, `ma_pcm_rb_get_subbuffer_offset()` and +`ma_pcm_rb_get_subbuffer_ptr()`. + +Use `ma_pcm_rb_acquire_read()` and `ma_pcm_rb_acquire_write()` to retrieve a pointer to a section +of the ring buffer. You specify the number of frames you need, and on output it will set to what +was actually acquired. If the read or write pointer is positioned such that the number of frames +requested will require a loop, it will be clamped to the end of the buffer. Therefore, the number +of frames you're given may be less than the number you requested. + +After calling `ma_pcm_rb_acquire_read()` or `ma_pcm_rb_acquire_write()`, you do your work on the +buffer and then "commit" it with `ma_pcm_rb_commit_read()` or `ma_pcm_rb_commit_write()`. This is +where the read/write pointers are updated. When you commit you need to pass in the buffer that was +returned by the earlier call to `ma_pcm_rb_acquire_read()` or `ma_pcm_rb_acquire_write()` and is +only used for validation. The number of frames passed to `ma_pcm_rb_commit_read()` and +`ma_pcm_rb_commit_write()` is what's used to increment the pointers, and can be less that what was +originally requested. + +If you want to correct for drift between the write pointer and the read pointer you can use a +combination of `ma_pcm_rb_pointer_distance()`, `ma_pcm_rb_seek_read()` and +`ma_pcm_rb_seek_write()`. Note that you can only move the pointers forward, and you should only +move the read pointer forward via the consumer thread, and the write pointer forward by the +producer thread. If there is too much space between the pointers, move the read pointer forward. If +there is too little space between the pointers, move the write pointer forward. + +You can use a ring buffer at the byte level instead of the PCM frame level by using the `ma_rb` +API. This is exactly the same, only you will use the `ma_rb` functions instead of `ma_pcm_rb` and +instead of frame counts you will pass around byte counts. + +The maximum size of the buffer in bytes is `0x7FFFFFFF-(MA_SIMD_ALIGNMENT-1)` due to the most +significant bit being used to encode a loop flag and the internally managed buffers always being +aligned to `MA_SIMD_ALIGNMENT`. + +Note that the ring buffer is only thread safe when used by a single consumer thread and single +producer thread. + + + +15. Backends +============ +The following backends are supported by miniaudio. These are listed in order of default priority. +When no backend is specified when initializing a context or device, miniaudio will attempt to use +each of these backends in the order listed in the table below. + +Note that backends that are not usable by the build target will not be included in the build. For +example, ALSA, which is specific to Linux, will not be included in the Windows build. + + +-------------+-----------------------+--------------------------------------------------------+ + | Name | Enum Name | Supported Operating Systems | + +-------------+-----------------------+--------------------------------------------------------+ + | WASAPI | ma_backend_wasapi | Windows Vista+ | + | DirectSound | ma_backend_dsound | Windows XP+ | + | WinMM | ma_backend_winmm | Windows 95+ | + | Core Audio | ma_backend_coreaudio | macOS, iOS | + | sndio | ma_backend_sndio | OpenBSD | + | audio(4) | ma_backend_audio4 | NetBSD, OpenBSD | + | OSS | ma_backend_oss | FreeBSD | + | PulseAudio | ma_backend_pulseaudio | Cross Platform (disabled on Windows, BSD and Android) | + | ALSA | ma_backend_alsa | Linux | + | JACK | ma_backend_jack | Cross Platform (disabled on BSD and Android) | + | AAudio | ma_backend_aaudio | Android 8+ | + | OpenSL ES | ma_backend_opensl | Android (API level 16+) | + | Web Audio | ma_backend_webaudio | Web (via Emscripten) | + | Custom | ma_backend_custom | Cross Platform | + | Null | ma_backend_null | Cross Platform (not used on Web) | + +-------------+-----------------------+--------------------------------------------------------+ + +Some backends have some nuance details you may want to be aware of. + +15.1. WASAPI +------------ +- Low-latency shared mode will be disabled when using an application-defined sample rate which is + different to the device's native sample rate. To work around this, set `wasapi.noAutoConvertSRC` + to true in the device config. This is due to IAudioClient3_InitializeSharedAudioStream() failing + when the `AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM` flag is specified. Setting wasapi.noAutoConvertSRC + will result in miniaudio's internal resampler being used instead which will in turn enable the + use of low-latency shared mode. + +15.2. PulseAudio +---------------- +- If you experience bad glitching/noise on Arch Linux, consider this fix from the Arch wiki: + https://wiki.archlinux.org/index.php/PulseAudio/Troubleshooting#Glitches,_skips_or_crackling. + Alternatively, consider using a different backend such as ALSA. + +15.3. Android +------------- +- To capture audio on Android, remember to add the RECORD_AUDIO permission to your manifest: + `` +- With OpenSL|ES, only a single ma_context can be active at any given time. This is due to a + limitation with OpenSL|ES. +- With AAudio, only default devices are enumerated. This is due to AAudio not having an enumeration + API (devices are enumerated through Java). You can however perform your own device enumeration + through Java and then set the ID in the ma_device_id structure (ma_device_id.aaudio) and pass it + to ma_device_init(). +- The backend API will perform resampling where possible. The reason for this as opposed to using + miniaudio's built-in resampler is to take advantage of any potential device-specific + optimizations the driver may implement. + +BSD +--- +- The sndio backend is currently only enabled on OpenBSD builds. +- The audio(4) backend is supported on OpenBSD, but you may need to disable sndiod before you can + use it. + +15.4. UWP +--------- +- UWP only supports default playback and capture devices. +- UWP requires the Microphone capability to be enabled in the application's manifest (Package.appxmanifest): + + ``` + + ... + + + + + ``` + +15.5. Web Audio / Emscripten +---------------------------- +- You cannot use `-std=c*` compiler flags, nor `-ansi`. This only applies to the Emscripten build. +- The first time a context is initialized it will create a global object called "miniaudio" whose + primary purpose is to act as a factory for device objects. +- Currently the Web Audio backend uses ScriptProcessorNode's, but this may need to change later as + they've been deprecated. +- Google has implemented a policy in their browsers that prevent automatic media output without + first receiving some kind of user input. The following web page has additional details: + https://developers.google.com/web/updates/2017/09/autoplay-policy-changes. Starting the device + may fail if you try to start playback without first handling some kind of user input. + + + +16. Optimization Tips +===================== +See below for some tips on improving performance. + +16.1. Low Level API +------------------- +- In the data callback, if your data is already clipped prior to copying it into the output buffer, + set the `noClip` config option in the device config to true. This will disable miniaudio's built + in clipping function. +- By default, miniaudio will pre-silence the data callback's output buffer. If you know that you + will always write valid data to the output buffer you can disable pre-silencing by setting the + `noPreSilence` config option in the device config to true. + +16.2. High Level API +-------------------- +- If a sound does not require doppler or pitch shifting, consider disabling pitching by + initializing the sound with the `MA_SOUND_FLAG_NO_PITCH` flag. +- If a sound does not require spatialization, disable it by initializing the sound with the + `MA_SOUND_FLAG_NO_SPATIALIZATION` flag. It can be re-enabled again post-initialization with + `ma_sound_set_spatialization_enabled()`. +- If you know all of your sounds will always be the same sample rate, set the engine's sample + rate to match that of the sounds. Likewise, if you're using a self-managed resource manager, + consider setting the decoded sample rate to match your sounds. By configuring everything to + use a consistent sample rate, sample rate conversion can be avoided. + + + +17. Miscellaneous Notes +======================= +- Automatic stream routing is enabled on a per-backend basis. Support is explicitly enabled for + WASAPI and Core Audio, however other backends such as PulseAudio may naturally support it, though + not all have been tested. +- When compiling with VC6 and earlier, decoding is restricted to files less than 2GB in size. This + is due to 64-bit file APIs not being available. +*/ + +#ifndef miniaudio_h +#define miniaudio_h + +#ifdef __cplusplus +extern "C" { +#endif + +#define MA_STRINGIFY(x) #x +#define MA_XSTRINGIFY(x) MA_STRINGIFY(x) + +#define MA_VERSION_MAJOR 0 +#define MA_VERSION_MINOR 11 +#define MA_VERSION_REVISION 22 +#define MA_VERSION_STRING MA_XSTRINGIFY(MA_VERSION_MAJOR) "." MA_XSTRINGIFY(MA_VERSION_MINOR) "." MA_XSTRINGIFY(MA_VERSION_REVISION) + +#if defined(_MSC_VER) && !defined(__clang__) + #pragma warning(push) + #pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union */ + #pragma warning(disable:4214) /* nonstandard extension used: bit field types other than int */ + #pragma warning(disable:4324) /* structure was padded due to alignment specifier */ +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpedantic" /* For ISO C99 doesn't support unnamed structs/unions [-Wpedantic] */ + #if defined(__clang__) + #pragma GCC diagnostic ignored "-Wc11-extensions" /* anonymous unions are a C11 extension */ + #endif +#endif + + +#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || defined(__aarch64__) || defined(_M_ARM64) || defined(__powerpc64__) || defined(__ppc64__) + #define MA_SIZEOF_PTR 8 +#else + #define MA_SIZEOF_PTR 4 +#endif + +#include /* For size_t. */ + +/* Sized types. */ +#if defined(MA_USE_STDINT) + #include + typedef int8_t ma_int8; + typedef uint8_t ma_uint8; + typedef int16_t ma_int16; + typedef uint16_t ma_uint16; + typedef int32_t ma_int32; + typedef uint32_t ma_uint32; + typedef int64_t ma_int64; + typedef uint64_t ma_uint64; +#else + typedef signed char ma_int8; + typedef unsigned char ma_uint8; + typedef signed short ma_int16; + typedef unsigned short ma_uint16; + typedef signed int ma_int32; + typedef unsigned int ma_uint32; + #if defined(_MSC_VER) && !defined(__clang__) + typedef signed __int64 ma_int64; + typedef unsigned __int64 ma_uint64; + #else + #if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wlong-long" + #if defined(__clang__) + #pragma GCC diagnostic ignored "-Wc++11-long-long" + #endif + #endif + typedef signed long long ma_int64; + typedef unsigned long long ma_uint64; + #if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) + #pragma GCC diagnostic pop + #endif + #endif +#endif /* MA_USE_STDINT */ + +#if MA_SIZEOF_PTR == 8 + typedef ma_uint64 ma_uintptr; +#else + typedef ma_uint32 ma_uintptr; +#endif + +typedef ma_uint8 ma_bool8; +typedef ma_uint32 ma_bool32; +#define MA_TRUE 1 +#define MA_FALSE 0 + +/* These float types are not used universally by miniaudio. It's to simplify some macro expansion for atomic types. */ +typedef float ma_float; +typedef double ma_double; + +typedef void* ma_handle; +typedef void* ma_ptr; + +/* +ma_proc is annoying because when compiling with GCC we get pedantic warnings about converting +between `void*` and `void (*)()`. We can't use `void (*)()` with MSVC however, because we'll get +warning C4191 about "type cast between incompatible function types". To work around this I'm going +to use a different data type depending on the compiler. +*/ +#if defined(__GNUC__) +typedef void (*ma_proc)(void); +#else +typedef void* ma_proc; +#endif + +#if defined(_MSC_VER) && !defined(_WCHAR_T_DEFINED) +typedef ma_uint16 wchar_t; +#endif + +/* Define NULL for some compilers. */ +#ifndef NULL +#define NULL 0 +#endif + +#if defined(SIZE_MAX) + #define MA_SIZE_MAX SIZE_MAX +#else + #define MA_SIZE_MAX 0xFFFFFFFF /* When SIZE_MAX is not defined by the standard library just default to the maximum 32-bit unsigned integer. */ +#endif + + +/* Platform/backend detection. */ +#if defined(_WIN32) || defined(__COSMOPOLITAN__) + #define MA_WIN32 + #if defined(MA_FORCE_UWP) || (defined(WINAPI_FAMILY) && ((defined(WINAPI_FAMILY_PC_APP) && WINAPI_FAMILY == WINAPI_FAMILY_PC_APP) || (defined(WINAPI_FAMILY_PHONE_APP) && WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP))) + #define MA_WIN32_UWP + #elif defined(WINAPI_FAMILY) && (defined(WINAPI_FAMILY_GAMES) && WINAPI_FAMILY == WINAPI_FAMILY_GAMES) + #define MA_WIN32_GDK + #else + #define MA_WIN32_DESKTOP + #endif +#endif +#if !defined(_WIN32) /* If it's not Win32, assume POSIX. */ + #define MA_POSIX + + /* + Use the MA_NO_PTHREAD_IN_HEADER option at your own risk. This is intentionally undocumented. + You can use this to avoid including pthread.h in the header section. The downside is that it + results in some fixed sized structures being declared for the various types that are used in + miniaudio. The risk here is that these types might be too small for a given platform. This + risk is yours to take and no support will be offered if you enable this option. + */ + #ifndef MA_NO_PTHREAD_IN_HEADER + #include /* Unfortunate #include, but needed for pthread_t, pthread_mutex_t and pthread_cond_t types. */ + typedef pthread_t ma_pthread_t; + typedef pthread_mutex_t ma_pthread_mutex_t; + typedef pthread_cond_t ma_pthread_cond_t; + #else + typedef ma_uintptr ma_pthread_t; + typedef union ma_pthread_mutex_t { char __data[40]; ma_uint64 __alignment; } ma_pthread_mutex_t; + typedef union ma_pthread_cond_t { char __data[48]; ma_uint64 __alignment; } ma_pthread_cond_t; + #endif + + #if defined(__unix__) + #define MA_UNIX + #endif + #if defined(__linux__) + #define MA_LINUX + #endif + #if defined(__APPLE__) + #define MA_APPLE + #endif + #if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) + #define MA_BSD + #endif + #if defined(__ANDROID__) + #define MA_ANDROID + #endif + #if defined(__EMSCRIPTEN__) + #define MA_EMSCRIPTEN + #endif + #if defined(__ORBIS__) + #define MA_ORBIS + #endif + #if defined(__PROSPERO__) + #define MA_PROSPERO + #endif + #if defined(__NX__) + #define MA_NX + #endif + #if defined(__BEOS__) || defined(__HAIKU__) + #define MA_BEOS + #endif + #if defined(__HAIKU__) + #define MA_HAIKU + #endif +#endif + +#if defined(__has_c_attribute) + #if __has_c_attribute(fallthrough) + #define MA_FALLTHROUGH [[fallthrough]] + #endif +#endif +#if !defined(MA_FALLTHROUGH) && defined(__has_attribute) && (defined(__clang__) || defined(__GNUC__)) + #if __has_attribute(fallthrough) + #define MA_FALLTHROUGH __attribute__((fallthrough)) + #endif +#endif +#if !defined(MA_FALLTHROUGH) + #define MA_FALLTHROUGH ((void)0) +#endif + +#ifdef _MSC_VER + #define MA_INLINE __forceinline + + /* noinline was introduced in Visual Studio 2005. */ + #if _MSC_VER >= 1400 + #define MA_NO_INLINE __declspec(noinline) + #else + #define MA_NO_INLINE + #endif +#elif defined(__GNUC__) + /* + I've had a bug report where GCC is emitting warnings about functions possibly not being inlineable. This warning happens when + the __attribute__((always_inline)) attribute is defined without an "inline" statement. I think therefore there must be some + case where "__inline__" is not always defined, thus the compiler emitting these warnings. When using -std=c89 or -ansi on the + command line, we cannot use the "inline" keyword and instead need to use "__inline__". In an attempt to work around this issue + I am using "__inline__" only when we're compiling in strict ANSI mode. + */ + #if defined(__STRICT_ANSI__) + #define MA_GNUC_INLINE_HINT __inline__ + #else + #define MA_GNUC_INLINE_HINT inline + #endif + + #if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 2)) || defined(__clang__) + #define MA_INLINE MA_GNUC_INLINE_HINT __attribute__((always_inline)) + #define MA_NO_INLINE __attribute__((noinline)) + #else + #define MA_INLINE MA_GNUC_INLINE_HINT + #define MA_NO_INLINE __attribute__((noinline)) + #endif +#elif defined(__WATCOMC__) + #define MA_INLINE __inline + #define MA_NO_INLINE +#else + #define MA_INLINE + #define MA_NO_INLINE +#endif + +/* MA_DLL is not officially supported. You're on your own if you want to use this. */ +#if defined(MA_DLL) + #if defined(_WIN32) + #define MA_DLL_IMPORT __declspec(dllimport) + #define MA_DLL_EXPORT __declspec(dllexport) + #define MA_DLL_PRIVATE static + #else + #if defined(__GNUC__) && __GNUC__ >= 4 + #define MA_DLL_IMPORT __attribute__((visibility("default"))) + #define MA_DLL_EXPORT __attribute__((visibility("default"))) + #define MA_DLL_PRIVATE __attribute__((visibility("hidden"))) + #else + #define MA_DLL_IMPORT + #define MA_DLL_EXPORT + #define MA_DLL_PRIVATE static + #endif + #endif +#endif + +#if !defined(MA_API) + #if defined(MA_DLL) + #if defined(MINIAUDIO_IMPLEMENTATION) || defined(MA_IMPLEMENTATION) + #define MA_API MA_DLL_EXPORT + #else + #define MA_API MA_DLL_IMPORT + #endif + #else + #define MA_API extern + #endif +#endif + +#if !defined(MA_STATIC) + #if defined(MA_DLL) + #define MA_PRIVATE MA_DLL_PRIVATE + #else + #define MA_PRIVATE static + #endif +#endif + + +/* SIMD alignment in bytes. Currently set to 32 bytes in preparation for future AVX optimizations. */ +#define MA_SIMD_ALIGNMENT 32 + +/* +Special wchar_t type to ensure any structures in the public sections that reference it have a +consistent size across all platforms. + +On Windows, wchar_t is 2 bytes, whereas everywhere else it's 4 bytes. Since Windows likes to use +wchar_t for its IDs, we need a special explicitly sized wchar type that is always 2 bytes on all +platforms. +*/ +#if !defined(MA_POSIX) && defined(MA_WIN32) +typedef wchar_t ma_wchar_win32; +#else +typedef ma_uint16 ma_wchar_win32; +#endif + + + +/* +Logging Levels +============== +Log levels are only used to give logging callbacks some context as to the severity of a log message +so they can do filtering. All log levels will be posted to registered logging callbacks. If you +don't want to output a certain log level you can discriminate against the log level in the callback. + +MA_LOG_LEVEL_DEBUG + Used for debugging. Useful for debug and test builds, but should be disabled in release builds. + +MA_LOG_LEVEL_INFO + Informational logging. Useful for debugging. This will never be called from within the data + callback. + +MA_LOG_LEVEL_WARNING + Warnings. You should enable this in you development builds and action them when encountered. These + logs usually indicate a potential problem or misconfiguration, but still allow you to keep + running. This will never be called from within the data callback. + +MA_LOG_LEVEL_ERROR + Error logging. This will be fired when an operation fails and is subsequently aborted. This can + be fired from within the data callback, in which case the device will be stopped. You should + always have this log level enabled. +*/ +typedef enum +{ + MA_LOG_LEVEL_DEBUG = 4, + MA_LOG_LEVEL_INFO = 3, + MA_LOG_LEVEL_WARNING = 2, + MA_LOG_LEVEL_ERROR = 1 +} ma_log_level; + +/* +Variables needing to be accessed atomically should be declared with this macro for two reasons: + + 1) It allows people who read the code to identify a variable as such; and + 2) It forces alignment on platforms where it's required or optimal. + +Note that for x86/64, alignment is not strictly necessary, but does have some performance +implications. Where supported by the compiler, alignment will be used, but otherwise if the CPU +architecture does not require it, it will simply leave it unaligned. This is the case with old +versions of Visual Studio, which I've confirmed with at least VC6. +*/ +#if !defined(_MSC_VER) && defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) + #include + #define MA_ATOMIC(alignment, type) _Alignas(alignment) type +#else + #if defined(__GNUC__) + /* GCC-style compilers. */ + #define MA_ATOMIC(alignment, type) type __attribute__((aligned(alignment))) + #elif defined(_MSC_VER) && _MSC_VER > 1200 /* 1200 = VC6. Alignment not supported, but not necessary because x86 is the only supported target. */ + /* MSVC. */ + #define MA_ATOMIC(alignment, type) __declspec(align(alignment)) type + #else + /* Other compilers. */ + #define MA_ATOMIC(alignment, type) type + #endif +#endif + +typedef struct ma_context ma_context; +typedef struct ma_device ma_device; + +typedef ma_uint8 ma_channel; +typedef enum +{ + MA_CHANNEL_NONE = 0, + MA_CHANNEL_MONO = 1, + MA_CHANNEL_FRONT_LEFT = 2, + MA_CHANNEL_FRONT_RIGHT = 3, + MA_CHANNEL_FRONT_CENTER = 4, + MA_CHANNEL_LFE = 5, + MA_CHANNEL_BACK_LEFT = 6, + MA_CHANNEL_BACK_RIGHT = 7, + MA_CHANNEL_FRONT_LEFT_CENTER = 8, + MA_CHANNEL_FRONT_RIGHT_CENTER = 9, + MA_CHANNEL_BACK_CENTER = 10, + MA_CHANNEL_SIDE_LEFT = 11, + MA_CHANNEL_SIDE_RIGHT = 12, + MA_CHANNEL_TOP_CENTER = 13, + MA_CHANNEL_TOP_FRONT_LEFT = 14, + MA_CHANNEL_TOP_FRONT_CENTER = 15, + MA_CHANNEL_TOP_FRONT_RIGHT = 16, + MA_CHANNEL_TOP_BACK_LEFT = 17, + MA_CHANNEL_TOP_BACK_CENTER = 18, + MA_CHANNEL_TOP_BACK_RIGHT = 19, + MA_CHANNEL_AUX_0 = 20, + MA_CHANNEL_AUX_1 = 21, + MA_CHANNEL_AUX_2 = 22, + MA_CHANNEL_AUX_3 = 23, + MA_CHANNEL_AUX_4 = 24, + MA_CHANNEL_AUX_5 = 25, + MA_CHANNEL_AUX_6 = 26, + MA_CHANNEL_AUX_7 = 27, + MA_CHANNEL_AUX_8 = 28, + MA_CHANNEL_AUX_9 = 29, + MA_CHANNEL_AUX_10 = 30, + MA_CHANNEL_AUX_11 = 31, + MA_CHANNEL_AUX_12 = 32, + MA_CHANNEL_AUX_13 = 33, + MA_CHANNEL_AUX_14 = 34, + MA_CHANNEL_AUX_15 = 35, + MA_CHANNEL_AUX_16 = 36, + MA_CHANNEL_AUX_17 = 37, + MA_CHANNEL_AUX_18 = 38, + MA_CHANNEL_AUX_19 = 39, + MA_CHANNEL_AUX_20 = 40, + MA_CHANNEL_AUX_21 = 41, + MA_CHANNEL_AUX_22 = 42, + MA_CHANNEL_AUX_23 = 43, + MA_CHANNEL_AUX_24 = 44, + MA_CHANNEL_AUX_25 = 45, + MA_CHANNEL_AUX_26 = 46, + MA_CHANNEL_AUX_27 = 47, + MA_CHANNEL_AUX_28 = 48, + MA_CHANNEL_AUX_29 = 49, + MA_CHANNEL_AUX_30 = 50, + MA_CHANNEL_AUX_31 = 51, + MA_CHANNEL_LEFT = MA_CHANNEL_FRONT_LEFT, + MA_CHANNEL_RIGHT = MA_CHANNEL_FRONT_RIGHT, + MA_CHANNEL_POSITION_COUNT = (MA_CHANNEL_AUX_31 + 1) +} _ma_channel_position; /* Do not use `_ma_channel_position` directly. Use `ma_channel` instead. */ + +typedef enum +{ + MA_SUCCESS = 0, + MA_ERROR = -1, /* A generic error. */ + MA_INVALID_ARGS = -2, + MA_INVALID_OPERATION = -3, + MA_OUT_OF_MEMORY = -4, + MA_OUT_OF_RANGE = -5, + MA_ACCESS_DENIED = -6, + MA_DOES_NOT_EXIST = -7, + MA_ALREADY_EXISTS = -8, + MA_TOO_MANY_OPEN_FILES = -9, + MA_INVALID_FILE = -10, + MA_TOO_BIG = -11, + MA_PATH_TOO_LONG = -12, + MA_NAME_TOO_LONG = -13, + MA_NOT_DIRECTORY = -14, + MA_IS_DIRECTORY = -15, + MA_DIRECTORY_NOT_EMPTY = -16, + MA_AT_END = -17, + MA_NO_SPACE = -18, + MA_BUSY = -19, + MA_IO_ERROR = -20, + MA_INTERRUPT = -21, + MA_UNAVAILABLE = -22, + MA_ALREADY_IN_USE = -23, + MA_BAD_ADDRESS = -24, + MA_BAD_SEEK = -25, + MA_BAD_PIPE = -26, + MA_DEADLOCK = -27, + MA_TOO_MANY_LINKS = -28, + MA_NOT_IMPLEMENTED = -29, + MA_NO_MESSAGE = -30, + MA_BAD_MESSAGE = -31, + MA_NO_DATA_AVAILABLE = -32, + MA_INVALID_DATA = -33, + MA_TIMEOUT = -34, + MA_NO_NETWORK = -35, + MA_NOT_UNIQUE = -36, + MA_NOT_SOCKET = -37, + MA_NO_ADDRESS = -38, + MA_BAD_PROTOCOL = -39, + MA_PROTOCOL_UNAVAILABLE = -40, + MA_PROTOCOL_NOT_SUPPORTED = -41, + MA_PROTOCOL_FAMILY_NOT_SUPPORTED = -42, + MA_ADDRESS_FAMILY_NOT_SUPPORTED = -43, + MA_SOCKET_NOT_SUPPORTED = -44, + MA_CONNECTION_RESET = -45, + MA_ALREADY_CONNECTED = -46, + MA_NOT_CONNECTED = -47, + MA_CONNECTION_REFUSED = -48, + MA_NO_HOST = -49, + MA_IN_PROGRESS = -50, + MA_CANCELLED = -51, + MA_MEMORY_ALREADY_MAPPED = -52, + + /* General non-standard errors. */ + MA_CRC_MISMATCH = -100, + + /* General miniaudio-specific errors. */ + MA_FORMAT_NOT_SUPPORTED = -200, + MA_DEVICE_TYPE_NOT_SUPPORTED = -201, + MA_SHARE_MODE_NOT_SUPPORTED = -202, + MA_NO_BACKEND = -203, + MA_NO_DEVICE = -204, + MA_API_NOT_FOUND = -205, + MA_INVALID_DEVICE_CONFIG = -206, + MA_LOOP = -207, + MA_BACKEND_NOT_ENABLED = -208, + + /* State errors. */ + MA_DEVICE_NOT_INITIALIZED = -300, + MA_DEVICE_ALREADY_INITIALIZED = -301, + MA_DEVICE_NOT_STARTED = -302, + MA_DEVICE_NOT_STOPPED = -303, + + /* Operation errors. */ + MA_FAILED_TO_INIT_BACKEND = -400, + MA_FAILED_TO_OPEN_BACKEND_DEVICE = -401, + MA_FAILED_TO_START_BACKEND_DEVICE = -402, + MA_FAILED_TO_STOP_BACKEND_DEVICE = -403 +} ma_result; + + +#define MA_MIN_CHANNELS 1 +#ifndef MA_MAX_CHANNELS +#define MA_MAX_CHANNELS 254 +#endif + +#ifndef MA_MAX_FILTER_ORDER +#define MA_MAX_FILTER_ORDER 8 +#endif + +typedef enum +{ + ma_stream_format_pcm = 0 +} ma_stream_format; + +typedef enum +{ + ma_stream_layout_interleaved = 0, + ma_stream_layout_deinterleaved +} ma_stream_layout; + +typedef enum +{ + ma_dither_mode_none = 0, + ma_dither_mode_rectangle, + ma_dither_mode_triangle +} ma_dither_mode; + +typedef enum +{ + /* + I like to keep these explicitly defined because they're used as a key into a lookup table. When items are + added to this, make sure there are no gaps and that they're added to the lookup table in ma_get_bytes_per_sample(). + */ + ma_format_unknown = 0, /* Mainly used for indicating an error, but also used as the default for the output format for decoders. */ + ma_format_u8 = 1, + ma_format_s16 = 2, /* Seems to be the most widely supported format. */ + ma_format_s24 = 3, /* Tightly packed. 3 bytes per sample. */ + ma_format_s32 = 4, + ma_format_f32 = 5, + ma_format_count +} ma_format; + +typedef enum +{ + /* Standard rates need to be in priority order. */ + ma_standard_sample_rate_48000 = 48000, /* Most common */ + ma_standard_sample_rate_44100 = 44100, + + ma_standard_sample_rate_32000 = 32000, /* Lows */ + ma_standard_sample_rate_24000 = 24000, + ma_standard_sample_rate_22050 = 22050, + + ma_standard_sample_rate_88200 = 88200, /* Highs */ + ma_standard_sample_rate_96000 = 96000, + ma_standard_sample_rate_176400 = 176400, + ma_standard_sample_rate_192000 = 192000, + + ma_standard_sample_rate_16000 = 16000, /* Extreme lows */ + ma_standard_sample_rate_11025 = 11025, + ma_standard_sample_rate_8000 = 8000, + + ma_standard_sample_rate_352800 = 352800, /* Extreme highs */ + ma_standard_sample_rate_384000 = 384000, + + ma_standard_sample_rate_min = ma_standard_sample_rate_8000, + ma_standard_sample_rate_max = ma_standard_sample_rate_384000, + ma_standard_sample_rate_count = 14 /* Need to maintain the count manually. Make sure this is updated if items are added to enum. */ +} ma_standard_sample_rate; + + +typedef enum +{ + ma_channel_mix_mode_rectangular = 0, /* Simple averaging based on the plane(s) the channel is sitting on. */ + ma_channel_mix_mode_simple, /* Drop excess channels; zeroed out extra channels. */ + ma_channel_mix_mode_custom_weights, /* Use custom weights specified in ma_channel_converter_config. */ + ma_channel_mix_mode_default = ma_channel_mix_mode_rectangular +} ma_channel_mix_mode; + +typedef enum +{ + ma_standard_channel_map_microsoft, + ma_standard_channel_map_alsa, + ma_standard_channel_map_rfc3551, /* Based off AIFF. */ + ma_standard_channel_map_flac, + ma_standard_channel_map_vorbis, + ma_standard_channel_map_sound4, /* FreeBSD's sound(4). */ + ma_standard_channel_map_sndio, /* www.sndio.org/tips.html */ + ma_standard_channel_map_webaudio = ma_standard_channel_map_flac, /* https://webaudio.github.io/web-audio-api/#ChannelOrdering. Only 1, 2, 4 and 6 channels are defined, but can fill in the gaps with logical assumptions. */ + ma_standard_channel_map_default = ma_standard_channel_map_microsoft +} ma_standard_channel_map; + +typedef enum +{ + ma_performance_profile_low_latency = 0, + ma_performance_profile_conservative +} ma_performance_profile; + + +typedef struct +{ + void* pUserData; + void* (* onMalloc)(size_t sz, void* pUserData); + void* (* onRealloc)(void* p, size_t sz, void* pUserData); + void (* onFree)(void* p, void* pUserData); +} ma_allocation_callbacks; + +typedef struct +{ + ma_int32 state; +} ma_lcg; + + +/* +Atomics. + +These are typesafe structures to prevent errors as a result of forgetting to reference variables atomically. It's too +easy to introduce subtle bugs where you accidentally do a regular assignment instead of an atomic load/store, etc. By +using a struct we can enforce the use of atomics at compile time. + +These types are declared in the header section because we need to reference them in structs below, but functions for +using them are only exposed in the implementation section. I do not want these to be part of the public API. + +There's a few downsides to this system. The first is that you need to declare a new struct for each type. Below are +some macros to help with the declarations. They will be named like so: + + ma_atomic_uint32 - atomic ma_uint32 + ma_atomic_int32 - atomic ma_int32 + ma_atomic_uint64 - atomic ma_uint64 + ma_atomic_float - atomic float + ma_atomic_bool32 - atomic ma_bool32 + +The other downside is that atomic pointers are extremely messy. You need to declare a new struct for each specific +type of pointer you need to make atomic. For example, an atomic ma_node* will look like this: + + MA_ATOMIC_SAFE_TYPE_IMPL_PTR(node) + +Which will declare a type struct that's named like so: + + ma_atomic_ptr_node + +Functions to use the atomic types are declared in the implementation section. All atomic functions are prefixed with +the name of the struct. For example: + + ma_atomic_uint32_set() - Atomic store of ma_uint32 + ma_atomic_uint32_get() - Atomic load of ma_uint32 + etc. + +For pointer types it's the same, which makes them a bit messy to use due to the length of each function name, but in +return you get type safety and enforcement of atomic operations. +*/ +#define MA_ATOMIC_SAFE_TYPE_DECL(c89TypeExtension, typeSize, type) \ + typedef struct \ + { \ + MA_ATOMIC(typeSize, ma_##type) value; \ + } ma_atomic_##type; \ + +#define MA_ATOMIC_SAFE_TYPE_DECL_PTR(type) \ + typedef struct \ + { \ + MA_ATOMIC(MA_SIZEOF_PTR, ma_##type*) value; \ + } ma_atomic_ptr_##type; \ + +MA_ATOMIC_SAFE_TYPE_DECL(32, 4, uint32) +MA_ATOMIC_SAFE_TYPE_DECL(i32, 4, int32) +MA_ATOMIC_SAFE_TYPE_DECL(64, 8, uint64) +MA_ATOMIC_SAFE_TYPE_DECL(f32, 4, float) +MA_ATOMIC_SAFE_TYPE_DECL(32, 4, bool32) + + +/* Spinlocks are 32-bit for compatibility reasons. */ +typedef ma_uint32 ma_spinlock; + +#ifndef MA_NO_THREADING + /* Thread priorities should be ordered such that the default priority of the worker thread is 0. */ + typedef enum + { + ma_thread_priority_idle = -5, + ma_thread_priority_lowest = -4, + ma_thread_priority_low = -3, + ma_thread_priority_normal = -2, + ma_thread_priority_high = -1, + ma_thread_priority_highest = 0, + ma_thread_priority_realtime = 1, + ma_thread_priority_default = 0 + } ma_thread_priority; + + #if defined(MA_POSIX) + typedef ma_pthread_t ma_thread; + #elif defined(MA_WIN32) + typedef ma_handle ma_thread; + #endif + + #if defined(MA_POSIX) + typedef ma_pthread_mutex_t ma_mutex; + #elif defined(MA_WIN32) + typedef ma_handle ma_mutex; + #endif + + #if defined(MA_POSIX) + typedef struct + { + ma_uint32 value; + ma_pthread_mutex_t lock; + ma_pthread_cond_t cond; + } ma_event; + #elif defined(MA_WIN32) + typedef ma_handle ma_event; + #endif + + #if defined(MA_POSIX) + typedef struct + { + int value; + ma_pthread_mutex_t lock; + ma_pthread_cond_t cond; + } ma_semaphore; + #elif defined(MA_WIN32) + typedef ma_handle ma_semaphore; + #endif +#else + /* MA_NO_THREADING is set which means threading is disabled. Threading is required by some API families. If any of these are enabled we need to throw an error. */ + #ifndef MA_NO_DEVICE_IO + #error "MA_NO_THREADING cannot be used without MA_NO_DEVICE_IO"; + #endif +#endif /* MA_NO_THREADING */ + + +/* +Retrieves the version of miniaudio as separated integers. Each component can be NULL if it's not required. +*/ +MA_API void ma_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision); + +/* +Retrieves the version of miniaudio as a string which can be useful for logging purposes. +*/ +MA_API const char* ma_version_string(void); + + +/************************************************************************************************************************************************************** + +Logging + +**************************************************************************************************************************************************************/ +#include /* For va_list. */ + +#if defined(__has_attribute) + #if __has_attribute(format) + #define MA_ATTRIBUTE_FORMAT(fmt, va) __attribute__((format(printf, fmt, va))) + #endif +#endif +#ifndef MA_ATTRIBUTE_FORMAT +#define MA_ATTRIBUTE_FORMAT(fmt, va) +#endif + +#ifndef MA_MAX_LOG_CALLBACKS +#define MA_MAX_LOG_CALLBACKS 4 +#endif + + +/* +The callback for handling log messages. + + +Parameters +---------- +pUserData (in) + The user data pointer that was passed into ma_log_register_callback(). + +logLevel (in) + The log level. This can be one of the following: + + +----------------------+ + | Log Level | + +----------------------+ + | MA_LOG_LEVEL_DEBUG | + | MA_LOG_LEVEL_INFO | + | MA_LOG_LEVEL_WARNING | + | MA_LOG_LEVEL_ERROR | + +----------------------+ + +pMessage (in) + The log message. +*/ +typedef void (* ma_log_callback_proc)(void* pUserData, ma_uint32 level, const char* pMessage); + +typedef struct +{ + ma_log_callback_proc onLog; + void* pUserData; +} ma_log_callback; + +MA_API ma_log_callback ma_log_callback_init(ma_log_callback_proc onLog, void* pUserData); + + +typedef struct +{ + ma_log_callback callbacks[MA_MAX_LOG_CALLBACKS]; + ma_uint32 callbackCount; + ma_allocation_callbacks allocationCallbacks; /* Need to store these persistently because ma_log_postv() might need to allocate a buffer on the heap. */ +#ifndef MA_NO_THREADING + ma_mutex lock; /* For thread safety just to make it easier and safer for the logging implementation. */ +#endif +} ma_log; + +MA_API ma_result ma_log_init(const ma_allocation_callbacks* pAllocationCallbacks, ma_log* pLog); +MA_API void ma_log_uninit(ma_log* pLog); +MA_API ma_result ma_log_register_callback(ma_log* pLog, ma_log_callback callback); +MA_API ma_result ma_log_unregister_callback(ma_log* pLog, ma_log_callback callback); +MA_API ma_result ma_log_post(ma_log* pLog, ma_uint32 level, const char* pMessage); +MA_API ma_result ma_log_postv(ma_log* pLog, ma_uint32 level, const char* pFormat, va_list args); +MA_API ma_result ma_log_postf(ma_log* pLog, ma_uint32 level, const char* pFormat, ...) MA_ATTRIBUTE_FORMAT(3, 4); + + +/************************************************************************************************************************************************************** + +Biquad Filtering + +**************************************************************************************************************************************************************/ +typedef union +{ + float f32; + ma_int32 s32; +} ma_biquad_coefficient; + +typedef struct +{ + ma_format format; + ma_uint32 channels; + double b0; + double b1; + double b2; + double a0; + double a1; + double a2; +} ma_biquad_config; + +MA_API ma_biquad_config ma_biquad_config_init(ma_format format, ma_uint32 channels, double b0, double b1, double b2, double a0, double a1, double a2); + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_biquad_coefficient b0; + ma_biquad_coefficient b1; + ma_biquad_coefficient b2; + ma_biquad_coefficient a1; + ma_biquad_coefficient a2; + ma_biquad_coefficient* pR1; + ma_biquad_coefficient* pR2; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; +} ma_biquad; + +MA_API ma_result ma_biquad_get_heap_size(const ma_biquad_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_biquad_init_preallocated(const ma_biquad_config* pConfig, void* pHeap, ma_biquad* pBQ); +MA_API ma_result ma_biquad_init(const ma_biquad_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_biquad* pBQ); +MA_API void ma_biquad_uninit(ma_biquad* pBQ, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_biquad_reinit(const ma_biquad_config* pConfig, ma_biquad* pBQ); +MA_API ma_result ma_biquad_clear_cache(ma_biquad* pBQ); +MA_API ma_result ma_biquad_process_pcm_frames(ma_biquad* pBQ, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_biquad_get_latency(const ma_biquad* pBQ); + + +/************************************************************************************************************************************************************** + +Low-Pass Filtering + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + double q; +} ma_lpf1_config, ma_lpf2_config; + +MA_API ma_lpf1_config ma_lpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency); +MA_API ma_lpf2_config ma_lpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q); + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_biquad_coefficient a; + ma_biquad_coefficient* pR1; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; +} ma_lpf1; + +MA_API ma_result ma_lpf1_get_heap_size(const ma_lpf1_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_lpf1_init_preallocated(const ma_lpf1_config* pConfig, void* pHeap, ma_lpf1* pLPF); +MA_API ma_result ma_lpf1_init(const ma_lpf1_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf1* pLPF); +MA_API void ma_lpf1_uninit(ma_lpf1* pLPF, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_lpf1_reinit(const ma_lpf1_config* pConfig, ma_lpf1* pLPF); +MA_API ma_result ma_lpf1_clear_cache(ma_lpf1* pLPF); +MA_API ma_result ma_lpf1_process_pcm_frames(ma_lpf1* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_lpf1_get_latency(const ma_lpf1* pLPF); + +typedef struct +{ + ma_biquad bq; /* The second order low-pass filter is implemented as a biquad filter. */ +} ma_lpf2; + +MA_API ma_result ma_lpf2_get_heap_size(const ma_lpf2_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_lpf2_init_preallocated(const ma_lpf2_config* pConfig, void* pHeap, ma_lpf2* pHPF); +MA_API ma_result ma_lpf2_init(const ma_lpf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf2* pLPF); +MA_API void ma_lpf2_uninit(ma_lpf2* pLPF, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_lpf2_reinit(const ma_lpf2_config* pConfig, ma_lpf2* pLPF); +MA_API ma_result ma_lpf2_clear_cache(ma_lpf2* pLPF); +MA_API ma_result ma_lpf2_process_pcm_frames(ma_lpf2* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_lpf2_get_latency(const ma_lpf2* pLPF); + + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + ma_uint32 order; /* If set to 0, will be treated as a passthrough (no filtering will be applied). */ +} ma_lpf_config; + +MA_API ma_lpf_config ma_lpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint32 lpf1Count; + ma_uint32 lpf2Count; + ma_lpf1* pLPF1; + ma_lpf2* pLPF2; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; +} ma_lpf; + +MA_API ma_result ma_lpf_get_heap_size(const ma_lpf_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_lpf_init_preallocated(const ma_lpf_config* pConfig, void* pHeap, ma_lpf* pLPF); +MA_API ma_result ma_lpf_init(const ma_lpf_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf* pLPF); +MA_API void ma_lpf_uninit(ma_lpf* pLPF, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_lpf_reinit(const ma_lpf_config* pConfig, ma_lpf* pLPF); +MA_API ma_result ma_lpf_clear_cache(ma_lpf* pLPF); +MA_API ma_result ma_lpf_process_pcm_frames(ma_lpf* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_lpf_get_latency(const ma_lpf* pLPF); + + +/************************************************************************************************************************************************************** + +High-Pass Filtering + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + double q; +} ma_hpf1_config, ma_hpf2_config; + +MA_API ma_hpf1_config ma_hpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency); +MA_API ma_hpf2_config ma_hpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q); + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_biquad_coefficient a; + ma_biquad_coefficient* pR1; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; +} ma_hpf1; + +MA_API ma_result ma_hpf1_get_heap_size(const ma_hpf1_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_hpf1_init_preallocated(const ma_hpf1_config* pConfig, void* pHeap, ma_hpf1* pLPF); +MA_API ma_result ma_hpf1_init(const ma_hpf1_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf1* pHPF); +MA_API void ma_hpf1_uninit(ma_hpf1* pHPF, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_hpf1_reinit(const ma_hpf1_config* pConfig, ma_hpf1* pHPF); +MA_API ma_result ma_hpf1_process_pcm_frames(ma_hpf1* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_hpf1_get_latency(const ma_hpf1* pHPF); + +typedef struct +{ + ma_biquad bq; /* The second order high-pass filter is implemented as a biquad filter. */ +} ma_hpf2; + +MA_API ma_result ma_hpf2_get_heap_size(const ma_hpf2_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_hpf2_init_preallocated(const ma_hpf2_config* pConfig, void* pHeap, ma_hpf2* pHPF); +MA_API ma_result ma_hpf2_init(const ma_hpf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf2* pHPF); +MA_API void ma_hpf2_uninit(ma_hpf2* pHPF, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_hpf2_reinit(const ma_hpf2_config* pConfig, ma_hpf2* pHPF); +MA_API ma_result ma_hpf2_process_pcm_frames(ma_hpf2* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_hpf2_get_latency(const ma_hpf2* pHPF); + + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + ma_uint32 order; /* If set to 0, will be treated as a passthrough (no filtering will be applied). */ +} ma_hpf_config; + +MA_API ma_hpf_config ma_hpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint32 hpf1Count; + ma_uint32 hpf2Count; + ma_hpf1* pHPF1; + ma_hpf2* pHPF2; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; +} ma_hpf; + +MA_API ma_result ma_hpf_get_heap_size(const ma_hpf_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_hpf_init_preallocated(const ma_hpf_config* pConfig, void* pHeap, ma_hpf* pLPF); +MA_API ma_result ma_hpf_init(const ma_hpf_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf* pHPF); +MA_API void ma_hpf_uninit(ma_hpf* pHPF, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_hpf_reinit(const ma_hpf_config* pConfig, ma_hpf* pHPF); +MA_API ma_result ma_hpf_process_pcm_frames(ma_hpf* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_hpf_get_latency(const ma_hpf* pHPF); + + +/************************************************************************************************************************************************************** + +Band-Pass Filtering + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + double q; +} ma_bpf2_config; + +MA_API ma_bpf2_config ma_bpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q); + +typedef struct +{ + ma_biquad bq; /* The second order band-pass filter is implemented as a biquad filter. */ +} ma_bpf2; + +MA_API ma_result ma_bpf2_get_heap_size(const ma_bpf2_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_bpf2_init_preallocated(const ma_bpf2_config* pConfig, void* pHeap, ma_bpf2* pBPF); +MA_API ma_result ma_bpf2_init(const ma_bpf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_bpf2* pBPF); +MA_API void ma_bpf2_uninit(ma_bpf2* pBPF, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_bpf2_reinit(const ma_bpf2_config* pConfig, ma_bpf2* pBPF); +MA_API ma_result ma_bpf2_process_pcm_frames(ma_bpf2* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_bpf2_get_latency(const ma_bpf2* pBPF); + + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; + ma_uint32 order; /* If set to 0, will be treated as a passthrough (no filtering will be applied). */ +} ma_bpf_config; + +MA_API ma_bpf_config ma_bpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 bpf2Count; + ma_bpf2* pBPF2; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; +} ma_bpf; + +MA_API ma_result ma_bpf_get_heap_size(const ma_bpf_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_bpf_init_preallocated(const ma_bpf_config* pConfig, void* pHeap, ma_bpf* pBPF); +MA_API ma_result ma_bpf_init(const ma_bpf_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_bpf* pBPF); +MA_API void ma_bpf_uninit(ma_bpf* pBPF, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_bpf_reinit(const ma_bpf_config* pConfig, ma_bpf* pBPF); +MA_API ma_result ma_bpf_process_pcm_frames(ma_bpf* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_bpf_get_latency(const ma_bpf* pBPF); + + +/************************************************************************************************************************************************************** + +Notching Filter + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double q; + double frequency; +} ma_notch2_config, ma_notch_config; + +MA_API ma_notch2_config ma_notch2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double q, double frequency); + +typedef struct +{ + ma_biquad bq; +} ma_notch2; + +MA_API ma_result ma_notch2_get_heap_size(const ma_notch2_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_notch2_init_preallocated(const ma_notch2_config* pConfig, void* pHeap, ma_notch2* pFilter); +MA_API ma_result ma_notch2_init(const ma_notch2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_notch2* pFilter); +MA_API void ma_notch2_uninit(ma_notch2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_notch2_reinit(const ma_notch2_config* pConfig, ma_notch2* pFilter); +MA_API ma_result ma_notch2_process_pcm_frames(ma_notch2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_notch2_get_latency(const ma_notch2* pFilter); + + +/************************************************************************************************************************************************************** + +Peaking EQ Filter + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double gainDB; + double q; + double frequency; +} ma_peak2_config, ma_peak_config; + +MA_API ma_peak2_config ma_peak2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency); + +typedef struct +{ + ma_biquad bq; +} ma_peak2; + +MA_API ma_result ma_peak2_get_heap_size(const ma_peak2_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_peak2_init_preallocated(const ma_peak2_config* pConfig, void* pHeap, ma_peak2* pFilter); +MA_API ma_result ma_peak2_init(const ma_peak2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_peak2* pFilter); +MA_API void ma_peak2_uninit(ma_peak2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_peak2_reinit(const ma_peak2_config* pConfig, ma_peak2* pFilter); +MA_API ma_result ma_peak2_process_pcm_frames(ma_peak2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_peak2_get_latency(const ma_peak2* pFilter); + + +/************************************************************************************************************************************************************** + +Low Shelf Filter + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double gainDB; + double shelfSlope; + double frequency; +} ma_loshelf2_config, ma_loshelf_config; + +MA_API ma_loshelf2_config ma_loshelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency); + +typedef struct +{ + ma_biquad bq; +} ma_loshelf2; + +MA_API ma_result ma_loshelf2_get_heap_size(const ma_loshelf2_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_loshelf2_init_preallocated(const ma_loshelf2_config* pConfig, void* pHeap, ma_loshelf2* pFilter); +MA_API ma_result ma_loshelf2_init(const ma_loshelf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_loshelf2* pFilter); +MA_API void ma_loshelf2_uninit(ma_loshelf2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_loshelf2_reinit(const ma_loshelf2_config* pConfig, ma_loshelf2* pFilter); +MA_API ma_result ma_loshelf2_process_pcm_frames(ma_loshelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_loshelf2_get_latency(const ma_loshelf2* pFilter); + + +/************************************************************************************************************************************************************** + +High Shelf Filter + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double gainDB; + double shelfSlope; + double frequency; +} ma_hishelf2_config, ma_hishelf_config; + +MA_API ma_hishelf2_config ma_hishelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency); + +typedef struct +{ + ma_biquad bq; +} ma_hishelf2; + +MA_API ma_result ma_hishelf2_get_heap_size(const ma_hishelf2_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_hishelf2_init_preallocated(const ma_hishelf2_config* pConfig, void* pHeap, ma_hishelf2* pFilter); +MA_API ma_result ma_hishelf2_init(const ma_hishelf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hishelf2* pFilter); +MA_API void ma_hishelf2_uninit(ma_hishelf2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_hishelf2_reinit(const ma_hishelf2_config* pConfig, ma_hishelf2* pFilter); +MA_API ma_result ma_hishelf2_process_pcm_frames(ma_hishelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_uint32 ma_hishelf2_get_latency(const ma_hishelf2* pFilter); + + + +/* +Delay +*/ +typedef struct +{ + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint32 delayInFrames; + ma_bool32 delayStart; /* Set to true to delay the start of the output; false otherwise. */ + float wet; /* 0..1. Default = 1. */ + float dry; /* 0..1. Default = 1. */ + float decay; /* 0..1. Default = 0 (no feedback). Feedback decay. Use this for echo. */ +} ma_delay_config; + +MA_API ma_delay_config ma_delay_config_init(ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 delayInFrames, float decay); + + +typedef struct +{ + ma_delay_config config; + ma_uint32 cursor; /* Feedback is written to this cursor. Always equal or in front of the read cursor. */ + ma_uint32 bufferSizeInFrames; + float* pBuffer; +} ma_delay; + +MA_API ma_result ma_delay_init(const ma_delay_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_delay* pDelay); +MA_API void ma_delay_uninit(ma_delay* pDelay, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_delay_process_pcm_frames(ma_delay* pDelay, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount); +MA_API void ma_delay_set_wet(ma_delay* pDelay, float value); +MA_API float ma_delay_get_wet(const ma_delay* pDelay); +MA_API void ma_delay_set_dry(ma_delay* pDelay, float value); +MA_API float ma_delay_get_dry(const ma_delay* pDelay); +MA_API void ma_delay_set_decay(ma_delay* pDelay, float value); +MA_API float ma_delay_get_decay(const ma_delay* pDelay); + + +/* Gainer for smooth volume changes. */ +typedef struct +{ + ma_uint32 channels; + ma_uint32 smoothTimeInFrames; +} ma_gainer_config; + +MA_API ma_gainer_config ma_gainer_config_init(ma_uint32 channels, ma_uint32 smoothTimeInFrames); + + +typedef struct +{ + ma_gainer_config config; + ma_uint32 t; + float masterVolume; + float* pOldGains; + float* pNewGains; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; +} ma_gainer; + +MA_API ma_result ma_gainer_get_heap_size(const ma_gainer_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_gainer_init_preallocated(const ma_gainer_config* pConfig, void* pHeap, ma_gainer* pGainer); +MA_API ma_result ma_gainer_init(const ma_gainer_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_gainer* pGainer); +MA_API void ma_gainer_uninit(ma_gainer* pGainer, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_gainer_process_pcm_frames(ma_gainer* pGainer, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_result ma_gainer_set_gain(ma_gainer* pGainer, float newGain); +MA_API ma_result ma_gainer_set_gains(ma_gainer* pGainer, float* pNewGains); +MA_API ma_result ma_gainer_set_master_volume(ma_gainer* pGainer, float volume); +MA_API ma_result ma_gainer_get_master_volume(const ma_gainer* pGainer, float* pVolume); + + + +/* Stereo panner. */ +typedef enum +{ + ma_pan_mode_balance = 0, /* Does not blend one side with the other. Technically just a balance. Compatible with other popular audio engines and therefore the default. */ + ma_pan_mode_pan /* A true pan. The sound from one side will "move" to the other side and blend with it. */ +} ma_pan_mode; + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_pan_mode mode; + float pan; +} ma_panner_config; + +MA_API ma_panner_config ma_panner_config_init(ma_format format, ma_uint32 channels); + + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_pan_mode mode; + float pan; /* -1..1 where 0 is no pan, -1 is left side, +1 is right side. Defaults to 0. */ +} ma_panner; + +MA_API ma_result ma_panner_init(const ma_panner_config* pConfig, ma_panner* pPanner); +MA_API ma_result ma_panner_process_pcm_frames(ma_panner* pPanner, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API void ma_panner_set_mode(ma_panner* pPanner, ma_pan_mode mode); +MA_API ma_pan_mode ma_panner_get_mode(const ma_panner* pPanner); +MA_API void ma_panner_set_pan(ma_panner* pPanner, float pan); +MA_API float ma_panner_get_pan(const ma_panner* pPanner); + + + +/* Fader. */ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; +} ma_fader_config; + +MA_API ma_fader_config ma_fader_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate); + +typedef struct +{ + ma_fader_config config; + float volumeBeg; /* If volumeBeg and volumeEnd is equal to 1, no fading happens (ma_fader_process_pcm_frames() will run as a passthrough). */ + float volumeEnd; + ma_uint64 lengthInFrames; /* The total length of the fade. */ + ma_int64 cursorInFrames; /* The current time in frames. Incremented by ma_fader_process_pcm_frames(). Signed because it'll be offset by startOffsetInFrames in set_fade_ex(). */ +} ma_fader; + +MA_API ma_result ma_fader_init(const ma_fader_config* pConfig, ma_fader* pFader); +MA_API ma_result ma_fader_process_pcm_frames(ma_fader* pFader, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API void ma_fader_get_data_format(const ma_fader* pFader, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate); +MA_API void ma_fader_set_fade(ma_fader* pFader, float volumeBeg, float volumeEnd, ma_uint64 lengthInFrames); +MA_API void ma_fader_set_fade_ex(ma_fader* pFader, float volumeBeg, float volumeEnd, ma_uint64 lengthInFrames, ma_int64 startOffsetInFrames); +MA_API float ma_fader_get_current_volume(const ma_fader* pFader); + + + +/* Spatializer. */ +typedef struct +{ + float x; + float y; + float z; +} ma_vec3f; + +typedef struct +{ + ma_vec3f v; + ma_spinlock lock; +} ma_atomic_vec3f; + +typedef enum +{ + ma_attenuation_model_none, /* No distance attenuation and no spatialization. */ + ma_attenuation_model_inverse, /* Equivalent to OpenAL's AL_INVERSE_DISTANCE_CLAMPED. */ + ma_attenuation_model_linear, /* Linear attenuation. Equivalent to OpenAL's AL_LINEAR_DISTANCE_CLAMPED. */ + ma_attenuation_model_exponential /* Exponential attenuation. Equivalent to OpenAL's AL_EXPONENT_DISTANCE_CLAMPED. */ +} ma_attenuation_model; + +typedef enum +{ + ma_positioning_absolute, + ma_positioning_relative +} ma_positioning; + +typedef enum +{ + ma_handedness_right, + ma_handedness_left +} ma_handedness; + + +typedef struct +{ + ma_uint32 channelsOut; + ma_channel* pChannelMapOut; + ma_handedness handedness; /* Defaults to right. Forward is -1 on the Z axis. In a left handed system, forward is +1 on the Z axis. */ + float coneInnerAngleInRadians; + float coneOuterAngleInRadians; + float coneOuterGain; + float speedOfSound; + ma_vec3f worldUp; +} ma_spatializer_listener_config; + +MA_API ma_spatializer_listener_config ma_spatializer_listener_config_init(ma_uint32 channelsOut); + + +typedef struct +{ + ma_spatializer_listener_config config; + ma_atomic_vec3f position; /* The absolute position of the listener. */ + ma_atomic_vec3f direction; /* The direction the listener is facing. The world up vector is config.worldUp. */ + ma_atomic_vec3f velocity; + ma_bool32 isEnabled; + + /* Memory management. */ + ma_bool32 _ownsHeap; + void* _pHeap; +} ma_spatializer_listener; + +MA_API ma_result ma_spatializer_listener_get_heap_size(const ma_spatializer_listener_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_spatializer_listener_init_preallocated(const ma_spatializer_listener_config* pConfig, void* pHeap, ma_spatializer_listener* pListener); +MA_API ma_result ma_spatializer_listener_init(const ma_spatializer_listener_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_spatializer_listener* pListener); +MA_API void ma_spatializer_listener_uninit(ma_spatializer_listener* pListener, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_channel* ma_spatializer_listener_get_channel_map(ma_spatializer_listener* pListener); +MA_API void ma_spatializer_listener_set_cone(ma_spatializer_listener* pListener, float innerAngleInRadians, float outerAngleInRadians, float outerGain); +MA_API void ma_spatializer_listener_get_cone(const ma_spatializer_listener* pListener, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain); +MA_API void ma_spatializer_listener_set_position(ma_spatializer_listener* pListener, float x, float y, float z); +MA_API ma_vec3f ma_spatializer_listener_get_position(const ma_spatializer_listener* pListener); +MA_API void ma_spatializer_listener_set_direction(ma_spatializer_listener* pListener, float x, float y, float z); +MA_API ma_vec3f ma_spatializer_listener_get_direction(const ma_spatializer_listener* pListener); +MA_API void ma_spatializer_listener_set_velocity(ma_spatializer_listener* pListener, float x, float y, float z); +MA_API ma_vec3f ma_spatializer_listener_get_velocity(const ma_spatializer_listener* pListener); +MA_API void ma_spatializer_listener_set_speed_of_sound(ma_spatializer_listener* pListener, float speedOfSound); +MA_API float ma_spatializer_listener_get_speed_of_sound(const ma_spatializer_listener* pListener); +MA_API void ma_spatializer_listener_set_world_up(ma_spatializer_listener* pListener, float x, float y, float z); +MA_API ma_vec3f ma_spatializer_listener_get_world_up(const ma_spatializer_listener* pListener); +MA_API void ma_spatializer_listener_set_enabled(ma_spatializer_listener* pListener, ma_bool32 isEnabled); +MA_API ma_bool32 ma_spatializer_listener_is_enabled(const ma_spatializer_listener* pListener); + + +typedef struct +{ + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_channel* pChannelMapIn; + ma_attenuation_model attenuationModel; + ma_positioning positioning; + ma_handedness handedness; /* Defaults to right. Forward is -1 on the Z axis. In a left handed system, forward is +1 on the Z axis. */ + float minGain; + float maxGain; + float minDistance; + float maxDistance; + float rolloff; + float coneInnerAngleInRadians; + float coneOuterAngleInRadians; + float coneOuterGain; + float dopplerFactor; /* Set to 0 to disable doppler effect. */ + float directionalAttenuationFactor; /* Set to 0 to disable directional attenuation. */ + float minSpatializationChannelGain; /* The minimal scaling factor to apply to channel gains when accounting for the direction of the sound relative to the listener. Must be in the range of 0..1. Smaller values means more aggressive directional panning, larger values means more subtle directional panning. */ + ma_uint32 gainSmoothTimeInFrames; /* When the gain of a channel changes during spatialization, the transition will be linearly interpolated over this number of frames. */ +} ma_spatializer_config; + +MA_API ma_spatializer_config ma_spatializer_config_init(ma_uint32 channelsIn, ma_uint32 channelsOut); + + +typedef struct +{ + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_channel* pChannelMapIn; + ma_attenuation_model attenuationModel; + ma_positioning positioning; + ma_handedness handedness; /* Defaults to right. Forward is -1 on the Z axis. In a left handed system, forward is +1 on the Z axis. */ + float minGain; + float maxGain; + float minDistance; + float maxDistance; + float rolloff; + float coneInnerAngleInRadians; + float coneOuterAngleInRadians; + float coneOuterGain; + float dopplerFactor; /* Set to 0 to disable doppler effect. */ + float directionalAttenuationFactor; /* Set to 0 to disable directional attenuation. */ + ma_uint32 gainSmoothTimeInFrames; /* When the gain of a channel changes during spatialization, the transition will be linearly interpolated over this number of frames. */ + ma_atomic_vec3f position; + ma_atomic_vec3f direction; + ma_atomic_vec3f velocity; /* For doppler effect. */ + float dopplerPitch; /* Will be updated by ma_spatializer_process_pcm_frames() and can be used by higher level functions to apply a pitch shift for doppler effect. */ + float minSpatializationChannelGain; + ma_gainer gainer; /* For smooth gain transitions. */ + float* pNewChannelGainsOut; /* An offset of _pHeap. Used by ma_spatializer_process_pcm_frames() to store new channel gains. The number of elements in this array is equal to config.channelsOut. */ + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; +} ma_spatializer; + +MA_API ma_result ma_spatializer_get_heap_size(const ma_spatializer_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_spatializer_init_preallocated(const ma_spatializer_config* pConfig, void* pHeap, ma_spatializer* pSpatializer); +MA_API ma_result ma_spatializer_init(const ma_spatializer_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_spatializer* pSpatializer); +MA_API void ma_spatializer_uninit(ma_spatializer* pSpatializer, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_spatializer_process_pcm_frames(ma_spatializer* pSpatializer, ma_spatializer_listener* pListener, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_result ma_spatializer_set_master_volume(ma_spatializer* pSpatializer, float volume); +MA_API ma_result ma_spatializer_get_master_volume(const ma_spatializer* pSpatializer, float* pVolume); +MA_API ma_uint32 ma_spatializer_get_input_channels(const ma_spatializer* pSpatializer); +MA_API ma_uint32 ma_spatializer_get_output_channels(const ma_spatializer* pSpatializer); +MA_API void ma_spatializer_set_attenuation_model(ma_spatializer* pSpatializer, ma_attenuation_model attenuationModel); +MA_API ma_attenuation_model ma_spatializer_get_attenuation_model(const ma_spatializer* pSpatializer); +MA_API void ma_spatializer_set_positioning(ma_spatializer* pSpatializer, ma_positioning positioning); +MA_API ma_positioning ma_spatializer_get_positioning(const ma_spatializer* pSpatializer); +MA_API void ma_spatializer_set_rolloff(ma_spatializer* pSpatializer, float rolloff); +MA_API float ma_spatializer_get_rolloff(const ma_spatializer* pSpatializer); +MA_API void ma_spatializer_set_min_gain(ma_spatializer* pSpatializer, float minGain); +MA_API float ma_spatializer_get_min_gain(const ma_spatializer* pSpatializer); +MA_API void ma_spatializer_set_max_gain(ma_spatializer* pSpatializer, float maxGain); +MA_API float ma_spatializer_get_max_gain(const ma_spatializer* pSpatializer); +MA_API void ma_spatializer_set_min_distance(ma_spatializer* pSpatializer, float minDistance); +MA_API float ma_spatializer_get_min_distance(const ma_spatializer* pSpatializer); +MA_API void ma_spatializer_set_max_distance(ma_spatializer* pSpatializer, float maxDistance); +MA_API float ma_spatializer_get_max_distance(const ma_spatializer* pSpatializer); +MA_API void ma_spatializer_set_cone(ma_spatializer* pSpatializer, float innerAngleInRadians, float outerAngleInRadians, float outerGain); +MA_API void ma_spatializer_get_cone(const ma_spatializer* pSpatializer, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain); +MA_API void ma_spatializer_set_doppler_factor(ma_spatializer* pSpatializer, float dopplerFactor); +MA_API float ma_spatializer_get_doppler_factor(const ma_spatializer* pSpatializer); +MA_API void ma_spatializer_set_directional_attenuation_factor(ma_spatializer* pSpatializer, float directionalAttenuationFactor); +MA_API float ma_spatializer_get_directional_attenuation_factor(const ma_spatializer* pSpatializer); +MA_API void ma_spatializer_set_position(ma_spatializer* pSpatializer, float x, float y, float z); +MA_API ma_vec3f ma_spatializer_get_position(const ma_spatializer* pSpatializer); +MA_API void ma_spatializer_set_direction(ma_spatializer* pSpatializer, float x, float y, float z); +MA_API ma_vec3f ma_spatializer_get_direction(const ma_spatializer* pSpatializer); +MA_API void ma_spatializer_set_velocity(ma_spatializer* pSpatializer, float x, float y, float z); +MA_API ma_vec3f ma_spatializer_get_velocity(const ma_spatializer* pSpatializer); +MA_API void ma_spatializer_get_relative_position_and_direction(const ma_spatializer* pSpatializer, const ma_spatializer_listener* pListener, ma_vec3f* pRelativePos, ma_vec3f* pRelativeDir); + + + +/************************************************************************************************************************************************************ +************************************************************************************************************************************************************* + +DATA CONVERSION +=============== + +This section contains the APIs for data conversion. You will find everything here for channel mapping, sample format conversion, resampling, etc. + +************************************************************************************************************************************************************* +************************************************************************************************************************************************************/ + +/************************************************************************************************************************************************************** + +Resampling + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + ma_uint32 lpfOrder; /* The low-pass filter order. Setting this to 0 will disable low-pass filtering. */ + double lpfNyquistFactor; /* 0..1. Defaults to 1. 1 = Half the sampling frequency (Nyquist Frequency), 0.5 = Quarter the sampling frequency (half Nyquest Frequency), etc. */ +} ma_linear_resampler_config; + +MA_API ma_linear_resampler_config ma_linear_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); + +typedef struct +{ + ma_linear_resampler_config config; + ma_uint32 inAdvanceInt; + ma_uint32 inAdvanceFrac; + ma_uint32 inTimeInt; + ma_uint32 inTimeFrac; + union + { + float* f32; + ma_int16* s16; + } x0; /* The previous input frame. */ + union + { + float* f32; + ma_int16* s16; + } x1; /* The next input frame. */ + ma_lpf lpf; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; +} ma_linear_resampler; + +MA_API ma_result ma_linear_resampler_get_heap_size(const ma_linear_resampler_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_linear_resampler_init_preallocated(const ma_linear_resampler_config* pConfig, void* pHeap, ma_linear_resampler* pResampler); +MA_API ma_result ma_linear_resampler_init(const ma_linear_resampler_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_linear_resampler* pResampler); +MA_API void ma_linear_resampler_uninit(ma_linear_resampler* pResampler, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_linear_resampler_process_pcm_frames(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut); +MA_API ma_result ma_linear_resampler_set_rate(ma_linear_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); +MA_API ma_result ma_linear_resampler_set_rate_ratio(ma_linear_resampler* pResampler, float ratioInOut); +MA_API ma_uint64 ma_linear_resampler_get_input_latency(const ma_linear_resampler* pResampler); +MA_API ma_uint64 ma_linear_resampler_get_output_latency(const ma_linear_resampler* pResampler); +MA_API ma_result ma_linear_resampler_get_required_input_frame_count(const ma_linear_resampler* pResampler, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount); +MA_API ma_result ma_linear_resampler_get_expected_output_frame_count(const ma_linear_resampler* pResampler, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount); +MA_API ma_result ma_linear_resampler_reset(ma_linear_resampler* pResampler); + + +typedef struct ma_resampler_config ma_resampler_config; + +typedef void ma_resampling_backend; +typedef struct +{ + ma_result (* onGetHeapSize )(void* pUserData, const ma_resampler_config* pConfig, size_t* pHeapSizeInBytes); + ma_result (* onInit )(void* pUserData, const ma_resampler_config* pConfig, void* pHeap, ma_resampling_backend** ppBackend); + void (* onUninit )(void* pUserData, ma_resampling_backend* pBackend, const ma_allocation_callbacks* pAllocationCallbacks); + ma_result (* onProcess )(void* pUserData, ma_resampling_backend* pBackend, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut); + ma_result (* onSetRate )(void* pUserData, ma_resampling_backend* pBackend, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); /* Optional. Rate changes will be disabled. */ + ma_uint64 (* onGetInputLatency )(void* pUserData, const ma_resampling_backend* pBackend); /* Optional. Latency will be reported as 0. */ + ma_uint64 (* onGetOutputLatency )(void* pUserData, const ma_resampling_backend* pBackend); /* Optional. Latency will be reported as 0. */ + ma_result (* onGetRequiredInputFrameCount )(void* pUserData, const ma_resampling_backend* pBackend, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount); /* Optional. Latency mitigation will be disabled. */ + ma_result (* onGetExpectedOutputFrameCount)(void* pUserData, const ma_resampling_backend* pBackend, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount); /* Optional. Latency mitigation will be disabled. */ + ma_result (* onReset )(void* pUserData, ma_resampling_backend* pBackend); +} ma_resampling_backend_vtable; + +typedef enum +{ + ma_resample_algorithm_linear = 0, /* Fastest, lowest quality. Optional low-pass filtering. Default. */ + ma_resample_algorithm_custom, +} ma_resample_algorithm; + +struct ma_resampler_config +{ + ma_format format; /* Must be either ma_format_f32 or ma_format_s16. */ + ma_uint32 channels; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + ma_resample_algorithm algorithm; /* When set to ma_resample_algorithm_custom, pBackendVTable will be used. */ + ma_resampling_backend_vtable* pBackendVTable; + void* pBackendUserData; + struct + { + ma_uint32 lpfOrder; + } linear; +}; + +MA_API ma_resampler_config ma_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_resample_algorithm algorithm); + +typedef struct +{ + ma_resampling_backend* pBackend; + ma_resampling_backend_vtable* pBackendVTable; + void* pBackendUserData; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + union + { + ma_linear_resampler linear; + } state; /* State for stock resamplers so we can avoid a malloc. For stock resamplers, pBackend will point here. */ + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; +} ma_resampler; + +MA_API ma_result ma_resampler_get_heap_size(const ma_resampler_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_resampler_init_preallocated(const ma_resampler_config* pConfig, void* pHeap, ma_resampler* pResampler); + +/* +Initializes a new resampler object from a config. +*/ +MA_API ma_result ma_resampler_init(const ma_resampler_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_resampler* pResampler); + +/* +Uninitializes a resampler. +*/ +MA_API void ma_resampler_uninit(ma_resampler* pResampler, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +Converts the given input data. + +Both the input and output frames must be in the format specified in the config when the resampler was initialized. + +On input, [pFrameCountOut] contains the number of output frames to process. On output it contains the number of output frames that +were actually processed, which may be less than the requested amount which will happen if there's not enough input data. You can use +ma_resampler_get_expected_output_frame_count() to know how many output frames will be processed for a given number of input frames. + +On input, [pFrameCountIn] contains the number of input frames contained in [pFramesIn]. On output it contains the number of whole +input frames that were actually processed. You can use ma_resampler_get_required_input_frame_count() to know how many input frames +you should provide for a given number of output frames. [pFramesIn] can be NULL, in which case zeroes will be used instead. + +If [pFramesOut] is NULL, a seek is performed. In this case, if [pFrameCountOut] is not NULL it will seek by the specified number of +output frames. Otherwise, if [pFramesCountOut] is NULL and [pFrameCountIn] is not NULL, it will seek by the specified number of input +frames. When seeking, [pFramesIn] is allowed to NULL, in which case the internal timing state will be updated, but no input will be +processed. In this case, any internal filter state will be updated as if zeroes were passed in. + +It is an error for [pFramesOut] to be non-NULL and [pFrameCountOut] to be NULL. + +It is an error for both [pFrameCountOut] and [pFrameCountIn] to be NULL. +*/ +MA_API ma_result ma_resampler_process_pcm_frames(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut); + + +/* +Sets the input and output sample rate. +*/ +MA_API ma_result ma_resampler_set_rate(ma_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); + +/* +Sets the input and output sample rate as a ratio. + +The ration is in/out. +*/ +MA_API ma_result ma_resampler_set_rate_ratio(ma_resampler* pResampler, float ratio); + +/* +Retrieves the latency introduced by the resampler in input frames. +*/ +MA_API ma_uint64 ma_resampler_get_input_latency(const ma_resampler* pResampler); + +/* +Retrieves the latency introduced by the resampler in output frames. +*/ +MA_API ma_uint64 ma_resampler_get_output_latency(const ma_resampler* pResampler); + +/* +Calculates the number of whole input frames that would need to be read from the client in order to output the specified +number of output frames. + +The returned value does not include cached input frames. It only returns the number of extra frames that would need to be +read from the input buffer in order to output the specified number of output frames. +*/ +MA_API ma_result ma_resampler_get_required_input_frame_count(const ma_resampler* pResampler, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount); + +/* +Calculates the number of whole output frames that would be output after fully reading and consuming the specified number of +input frames. +*/ +MA_API ma_result ma_resampler_get_expected_output_frame_count(const ma_resampler* pResampler, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount); + +/* +Resets the resampler's timer and clears its internal cache. +*/ +MA_API ma_result ma_resampler_reset(ma_resampler* pResampler); + + +/************************************************************************************************************************************************************** + +Channel Conversion + +**************************************************************************************************************************************************************/ +typedef enum +{ + ma_channel_conversion_path_unknown, + ma_channel_conversion_path_passthrough, + ma_channel_conversion_path_mono_out, /* Converting to mono. */ + ma_channel_conversion_path_mono_in, /* Converting from mono. */ + ma_channel_conversion_path_shuffle, /* Simple shuffle. Will use this when all channels are present in both input and output channel maps, but just in a different order. */ + ma_channel_conversion_path_weights /* Blended based on weights. */ +} ma_channel_conversion_path; + +typedef enum +{ + ma_mono_expansion_mode_duplicate = 0, /* The default. */ + ma_mono_expansion_mode_average, /* Average the mono channel across all channels. */ + ma_mono_expansion_mode_stereo_only, /* Duplicate to the left and right channels only and ignore the others. */ + ma_mono_expansion_mode_default = ma_mono_expansion_mode_duplicate +} ma_mono_expansion_mode; + +typedef struct +{ + ma_format format; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + const ma_channel* pChannelMapIn; + const ma_channel* pChannelMapOut; + ma_channel_mix_mode mixingMode; + ma_bool32 calculateLFEFromSpatialChannels; /* When an output LFE channel is present, but no input LFE, set to true to set the output LFE to the average of all spatial channels (LR, FR, etc.). Ignored when an input LFE is present. */ + float** ppWeights; /* [in][out]. Only used when mixingMode is set to ma_channel_mix_mode_custom_weights. */ +} ma_channel_converter_config; + +MA_API ma_channel_converter_config ma_channel_converter_config_init(ma_format format, ma_uint32 channelsIn, const ma_channel* pChannelMapIn, ma_uint32 channelsOut, const ma_channel* pChannelMapOut, ma_channel_mix_mode mixingMode); + +typedef struct +{ + ma_format format; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_channel_mix_mode mixingMode; + ma_channel_conversion_path conversionPath; + ma_channel* pChannelMapIn; + ma_channel* pChannelMapOut; + ma_uint8* pShuffleTable; /* Indexed by output channel index. */ + union + { + float** f32; + ma_int32** s16; + } weights; /* [in][out] */ + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; +} ma_channel_converter; + +MA_API ma_result ma_channel_converter_get_heap_size(const ma_channel_converter_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_channel_converter_init_preallocated(const ma_channel_converter_config* pConfig, void* pHeap, ma_channel_converter* pConverter); +MA_API ma_result ma_channel_converter_init(const ma_channel_converter_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_channel_converter* pConverter); +MA_API void ma_channel_converter_uninit(ma_channel_converter* pConverter, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_channel_converter_process_pcm_frames(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +MA_API ma_result ma_channel_converter_get_input_channel_map(const ma_channel_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_channel_converter_get_output_channel_map(const ma_channel_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap); + + +/************************************************************************************************************************************************************** + +Data Conversion + +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format formatIn; + ma_format formatOut; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + ma_channel* pChannelMapIn; + ma_channel* pChannelMapOut; + ma_dither_mode ditherMode; + ma_channel_mix_mode channelMixMode; + ma_bool32 calculateLFEFromSpatialChannels; /* When an output LFE channel is present, but no input LFE, set to true to set the output LFE to the average of all spatial channels (LR, FR, etc.). Ignored when an input LFE is present. */ + float** ppChannelWeights; /* [in][out]. Only used when mixingMode is set to ma_channel_mix_mode_custom_weights. */ + ma_bool32 allowDynamicSampleRate; + ma_resampler_config resampling; +} ma_data_converter_config; + +MA_API ma_data_converter_config ma_data_converter_config_init_default(void); +MA_API ma_data_converter_config ma_data_converter_config_init(ma_format formatIn, ma_format formatOut, ma_uint32 channelsIn, ma_uint32 channelsOut, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); + + +typedef enum +{ + ma_data_converter_execution_path_passthrough, /* No conversion. */ + ma_data_converter_execution_path_format_only, /* Only format conversion. */ + ma_data_converter_execution_path_channels_only, /* Only channel conversion. */ + ma_data_converter_execution_path_resample_only, /* Only resampling. */ + ma_data_converter_execution_path_resample_first, /* All conversions, but resample as the first step. */ + ma_data_converter_execution_path_channels_first /* All conversions, but channels as the first step. */ +} ma_data_converter_execution_path; + +typedef struct +{ + ma_format formatIn; + ma_format formatOut; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + ma_dither_mode ditherMode; + ma_data_converter_execution_path executionPath; /* The execution path the data converter will follow when processing. */ + ma_channel_converter channelConverter; + ma_resampler resampler; + ma_bool8 hasPreFormatConversion; + ma_bool8 hasPostFormatConversion; + ma_bool8 hasChannelConverter; + ma_bool8 hasResampler; + ma_bool8 isPassthrough; + + /* Memory management. */ + ma_bool8 _ownsHeap; + void* _pHeap; +} ma_data_converter; + +MA_API ma_result ma_data_converter_get_heap_size(const ma_data_converter_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_data_converter_init_preallocated(const ma_data_converter_config* pConfig, void* pHeap, ma_data_converter* pConverter); +MA_API ma_result ma_data_converter_init(const ma_data_converter_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_converter* pConverter); +MA_API void ma_data_converter_uninit(ma_data_converter* pConverter, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_data_converter_process_pcm_frames(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut); +MA_API ma_result ma_data_converter_set_rate(ma_data_converter* pConverter, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); +MA_API ma_result ma_data_converter_set_rate_ratio(ma_data_converter* pConverter, float ratioInOut); +MA_API ma_uint64 ma_data_converter_get_input_latency(const ma_data_converter* pConverter); +MA_API ma_uint64 ma_data_converter_get_output_latency(const ma_data_converter* pConverter); +MA_API ma_result ma_data_converter_get_required_input_frame_count(const ma_data_converter* pConverter, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount); +MA_API ma_result ma_data_converter_get_expected_output_frame_count(const ma_data_converter* pConverter, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount); +MA_API ma_result ma_data_converter_get_input_channel_map(const ma_data_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_data_converter_get_output_channel_map(const ma_data_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_data_converter_reset(ma_data_converter* pConverter); + + +/************************************************************************************************************************************************************ + +Format Conversion + +************************************************************************************************************************************************************/ +MA_API void ma_pcm_u8_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_u8_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_u8_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_u8_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s16_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s16_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s16_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s16_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s24_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s24_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s24_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s24_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s32_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s32_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s32_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_s32_to_f32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_f32_to_u8(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_f32_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_f32_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_f32_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); +MA_API void ma_pcm_convert(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 sampleCount, ma_dither_mode ditherMode); +MA_API void ma_convert_pcm_frames_format(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 frameCount, ma_uint32 channels, ma_dither_mode ditherMode); + +/* +Deinterleaves an interleaved buffer. +*/ +MA_API void ma_deinterleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void* pInterleavedPCMFrames, void** ppDeinterleavedPCMFrames); + +/* +Interleaves a group of deinterleaved buffers. +*/ +MA_API void ma_interleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void** ppDeinterleavedPCMFrames, void* pInterleavedPCMFrames); + + +/************************************************************************************************************************************************************ + +Channel Maps + +************************************************************************************************************************************************************/ +/* +This is used in the shuffle table to indicate that the channel index is undefined and should be ignored. +*/ +#define MA_CHANNEL_INDEX_NULL 255 + +/* +Retrieves the channel position of the specified channel in the given channel map. + +The pChannelMap parameter can be null, in which case miniaudio's default channel map will be assumed. +*/ +MA_API ma_channel ma_channel_map_get_channel(const ma_channel* pChannelMap, ma_uint32 channelCount, ma_uint32 channelIndex); + +/* +Initializes a blank channel map. + +When a blank channel map is specified anywhere it indicates that the native channel map should be used. +*/ +MA_API void ma_channel_map_init_blank(ma_channel* pChannelMap, ma_uint32 channels); + +/* +Helper for retrieving a standard channel map. + +The output channel map buffer must have a capacity of at least `channelMapCap`. +*/ +MA_API void ma_channel_map_init_standard(ma_standard_channel_map standardChannelMap, ma_channel* pChannelMap, size_t channelMapCap, ma_uint32 channels); + +/* +Copies a channel map. + +Both input and output channel map buffers must have a capacity of at least `channels`. +*/ +MA_API void ma_channel_map_copy(ma_channel* pOut, const ma_channel* pIn, ma_uint32 channels); + +/* +Copies a channel map if one is specified, otherwise copies the default channel map. + +The output buffer must have a capacity of at least `channels`. If not NULL, the input channel map must also have a capacity of at least `channels`. +*/ +MA_API void ma_channel_map_copy_or_default(ma_channel* pOut, size_t channelMapCapOut, const ma_channel* pIn, ma_uint32 channels); + + +/* +Determines whether or not a channel map is valid. + +A blank channel map is valid (all channels set to MA_CHANNEL_NONE). The way a blank channel map is handled is context specific, but +is usually treated as a passthrough. + +Invalid channel maps: + - A channel map with no channels + - A channel map with more than one channel and a mono channel + +The channel map buffer must have a capacity of at least `channels`. +*/ +MA_API ma_bool32 ma_channel_map_is_valid(const ma_channel* pChannelMap, ma_uint32 channels); + +/* +Helper for comparing two channel maps for equality. + +This assumes the channel count is the same between the two. + +Both channels map buffers must have a capacity of at least `channels`. +*/ +MA_API ma_bool32 ma_channel_map_is_equal(const ma_channel* pChannelMapA, const ma_channel* pChannelMapB, ma_uint32 channels); + +/* +Helper for determining if a channel map is blank (all channels set to MA_CHANNEL_NONE). + +The channel map buffer must have a capacity of at least `channels`. +*/ +MA_API ma_bool32 ma_channel_map_is_blank(const ma_channel* pChannelMap, ma_uint32 channels); + +/* +Helper for determining whether or not a channel is present in the given channel map. + +The channel map buffer must have a capacity of at least `channels`. +*/ +MA_API ma_bool32 ma_channel_map_contains_channel_position(ma_uint32 channels, const ma_channel* pChannelMap, ma_channel channelPosition); + +/* +Find a channel position in the given channel map. Returns MA_TRUE if the channel is found; MA_FALSE otherwise. The +index of the channel is output to `pChannelIndex`. + +The channel map buffer must have a capacity of at least `channels`. +*/ +MA_API ma_bool32 ma_channel_map_find_channel_position(ma_uint32 channels, const ma_channel* pChannelMap, ma_channel channelPosition, ma_uint32* pChannelIndex); + +/* +Generates a string representing the given channel map. + +This is for printing and debugging purposes, not serialization/deserialization. + +Returns the length of the string, not including the null terminator. +*/ +MA_API size_t ma_channel_map_to_string(const ma_channel* pChannelMap, ma_uint32 channels, char* pBufferOut, size_t bufferCap); + +/* +Retrieves a human readable version of a channel position. +*/ +MA_API const char* ma_channel_position_to_string(ma_channel channel); + + +/************************************************************************************************************************************************************ + +Conversion Helpers + +************************************************************************************************************************************************************/ + +/* +High-level helper for doing a full format conversion in one go. Returns the number of output frames. Call this with pOut set to NULL to +determine the required size of the output buffer. frameCountOut should be set to the capacity of pOut. If pOut is NULL, frameCountOut is +ignored. + +A return value of 0 indicates an error. + +This function is useful for one-off bulk conversions, but if you're streaming data you should use the ma_data_converter APIs instead. +*/ +MA_API ma_uint64 ma_convert_frames(void* pOut, ma_uint64 frameCountOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, const void* pIn, ma_uint64 frameCountIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn); +MA_API ma_uint64 ma_convert_frames_ex(void* pOut, ma_uint64 frameCountOut, const void* pIn, ma_uint64 frameCountIn, const ma_data_converter_config* pConfig); + + +/************************************************************************************************************************************************************ + +Data Source + +************************************************************************************************************************************************************/ +typedef void ma_data_source; + +#define MA_DATA_SOURCE_SELF_MANAGED_RANGE_AND_LOOP_POINT 0x00000001 + +typedef struct +{ + ma_result (* onRead)(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); + ma_result (* onSeek)(ma_data_source* pDataSource, ma_uint64 frameIndex); + ma_result (* onGetDataFormat)(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); + ma_result (* onGetCursor)(ma_data_source* pDataSource, ma_uint64* pCursor); + ma_result (* onGetLength)(ma_data_source* pDataSource, ma_uint64* pLength); + ma_result (* onSetLooping)(ma_data_source* pDataSource, ma_bool32 isLooping); + ma_uint32 flags; +} ma_data_source_vtable; + +typedef ma_data_source* (* ma_data_source_get_next_proc)(ma_data_source* pDataSource); + +typedef struct +{ + const ma_data_source_vtable* vtable; +} ma_data_source_config; + +MA_API ma_data_source_config ma_data_source_config_init(void); + + +typedef struct +{ + const ma_data_source_vtable* vtable; + ma_uint64 rangeBegInFrames; + ma_uint64 rangeEndInFrames; /* Set to -1 for unranged (default). */ + ma_uint64 loopBegInFrames; /* Relative to rangeBegInFrames. */ + ma_uint64 loopEndInFrames; /* Relative to rangeBegInFrames. Set to -1 for the end of the range. */ + ma_data_source* pCurrent; /* When non-NULL, the data source being initialized will act as a proxy and will route all operations to pCurrent. Used in conjunction with pNext/onGetNext for seamless chaining. */ + ma_data_source* pNext; /* When set to NULL, onGetNext will be used. */ + ma_data_source_get_next_proc onGetNext; /* Will be used when pNext is NULL. If both are NULL, no next will be used. */ + MA_ATOMIC(4, ma_bool32) isLooping; +} ma_data_source_base; + +MA_API ma_result ma_data_source_init(const ma_data_source_config* pConfig, ma_data_source* pDataSource); +MA_API void ma_data_source_uninit(ma_data_source* pDataSource); +MA_API ma_result ma_data_source_read_pcm_frames(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); /* Must support pFramesOut = NULL in which case a forward seek should be performed. */ +MA_API ma_result ma_data_source_seek_pcm_frames(ma_data_source* pDataSource, ma_uint64 frameCount, ma_uint64* pFramesSeeked); /* Can only seek forward. Equivalent to ma_data_source_read_pcm_frames(pDataSource, NULL, frameCount, &framesRead); */ +MA_API ma_result ma_data_source_seek_to_pcm_frame(ma_data_source* pDataSource, ma_uint64 frameIndex); +MA_API ma_result ma_data_source_seek_seconds(ma_data_source* pDataSource, float secondCount, float* pSecondsSeeked); /* Can only seek forward. Abstraction to ma_data_source_seek_pcm_frames() */ +MA_API ma_result ma_data_source_seek_to_second(ma_data_source* pDataSource, float seekPointInSeconds); /* Abstraction to ma_data_source_seek_to_pcm_frame() */ +MA_API ma_result ma_data_source_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_data_source_get_cursor_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pCursor); +MA_API ma_result ma_data_source_get_length_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pLength); /* Returns MA_NOT_IMPLEMENTED if the length is unknown or cannot be determined. Decoders can return this. */ +MA_API ma_result ma_data_source_get_cursor_in_seconds(ma_data_source* pDataSource, float* pCursor); +MA_API ma_result ma_data_source_get_length_in_seconds(ma_data_source* pDataSource, float* pLength); +MA_API ma_result ma_data_source_set_looping(ma_data_source* pDataSource, ma_bool32 isLooping); +MA_API ma_bool32 ma_data_source_is_looping(const ma_data_source* pDataSource); +MA_API ma_result ma_data_source_set_range_in_pcm_frames(ma_data_source* pDataSource, ma_uint64 rangeBegInFrames, ma_uint64 rangeEndInFrames); +MA_API void ma_data_source_get_range_in_pcm_frames(const ma_data_source* pDataSource, ma_uint64* pRangeBegInFrames, ma_uint64* pRangeEndInFrames); +MA_API ma_result ma_data_source_set_loop_point_in_pcm_frames(ma_data_source* pDataSource, ma_uint64 loopBegInFrames, ma_uint64 loopEndInFrames); +MA_API void ma_data_source_get_loop_point_in_pcm_frames(const ma_data_source* pDataSource, ma_uint64* pLoopBegInFrames, ma_uint64* pLoopEndInFrames); +MA_API ma_result ma_data_source_set_current(ma_data_source* pDataSource, ma_data_source* pCurrentDataSource); +MA_API ma_data_source* ma_data_source_get_current(const ma_data_source* pDataSource); +MA_API ma_result ma_data_source_set_next(ma_data_source* pDataSource, ma_data_source* pNextDataSource); +MA_API ma_data_source* ma_data_source_get_next(const ma_data_source* pDataSource); +MA_API ma_result ma_data_source_set_next_callback(ma_data_source* pDataSource, ma_data_source_get_next_proc onGetNext); +MA_API ma_data_source_get_next_proc ma_data_source_get_next_callback(const ma_data_source* pDataSource); + + +typedef struct +{ + ma_data_source_base ds; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint64 cursor; + ma_uint64 sizeInFrames; + const void* pData; +} ma_audio_buffer_ref; + +MA_API ma_result ma_audio_buffer_ref_init(ma_format format, ma_uint32 channels, const void* pData, ma_uint64 sizeInFrames, ma_audio_buffer_ref* pAudioBufferRef); +MA_API void ma_audio_buffer_ref_uninit(ma_audio_buffer_ref* pAudioBufferRef); +MA_API ma_result ma_audio_buffer_ref_set_data(ma_audio_buffer_ref* pAudioBufferRef, const void* pData, ma_uint64 sizeInFrames); +MA_API ma_uint64 ma_audio_buffer_ref_read_pcm_frames(ma_audio_buffer_ref* pAudioBufferRef, void* pFramesOut, ma_uint64 frameCount, ma_bool32 loop); +MA_API ma_result ma_audio_buffer_ref_seek_to_pcm_frame(ma_audio_buffer_ref* pAudioBufferRef, ma_uint64 frameIndex); +MA_API ma_result ma_audio_buffer_ref_map(ma_audio_buffer_ref* pAudioBufferRef, void** ppFramesOut, ma_uint64* pFrameCount); +MA_API ma_result ma_audio_buffer_ref_unmap(ma_audio_buffer_ref* pAudioBufferRef, ma_uint64 frameCount); /* Returns MA_AT_END if the end has been reached. This should be considered successful. */ +MA_API ma_bool32 ma_audio_buffer_ref_at_end(const ma_audio_buffer_ref* pAudioBufferRef); +MA_API ma_result ma_audio_buffer_ref_get_cursor_in_pcm_frames(const ma_audio_buffer_ref* pAudioBufferRef, ma_uint64* pCursor); +MA_API ma_result ma_audio_buffer_ref_get_length_in_pcm_frames(const ma_audio_buffer_ref* pAudioBufferRef, ma_uint64* pLength); +MA_API ma_result ma_audio_buffer_ref_get_available_frames(const ma_audio_buffer_ref* pAudioBufferRef, ma_uint64* pAvailableFrames); + + + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint64 sizeInFrames; + const void* pData; /* If set to NULL, will allocate a block of memory for you. */ + ma_allocation_callbacks allocationCallbacks; +} ma_audio_buffer_config; + +MA_API ma_audio_buffer_config ma_audio_buffer_config_init(ma_format format, ma_uint32 channels, ma_uint64 sizeInFrames, const void* pData, const ma_allocation_callbacks* pAllocationCallbacks); + +typedef struct +{ + ma_audio_buffer_ref ref; + ma_allocation_callbacks allocationCallbacks; + ma_bool32 ownsData; /* Used to control whether or not miniaudio owns the data buffer. If set to true, pData will be freed in ma_audio_buffer_uninit(). */ + ma_uint8 _pExtraData[1]; /* For allocating a buffer with the memory located directly after the other memory of the structure. */ +} ma_audio_buffer; + +MA_API ma_result ma_audio_buffer_init(const ma_audio_buffer_config* pConfig, ma_audio_buffer* pAudioBuffer); +MA_API ma_result ma_audio_buffer_init_copy(const ma_audio_buffer_config* pConfig, ma_audio_buffer* pAudioBuffer); +MA_API ma_result ma_audio_buffer_alloc_and_init(const ma_audio_buffer_config* pConfig, ma_audio_buffer** ppAudioBuffer); /* Always copies the data. Doesn't make sense to use this otherwise. Use ma_audio_buffer_uninit_and_free() to uninit. */ +MA_API void ma_audio_buffer_uninit(ma_audio_buffer* pAudioBuffer); +MA_API void ma_audio_buffer_uninit_and_free(ma_audio_buffer* pAudioBuffer); +MA_API ma_uint64 ma_audio_buffer_read_pcm_frames(ma_audio_buffer* pAudioBuffer, void* pFramesOut, ma_uint64 frameCount, ma_bool32 loop); +MA_API ma_result ma_audio_buffer_seek_to_pcm_frame(ma_audio_buffer* pAudioBuffer, ma_uint64 frameIndex); +MA_API ma_result ma_audio_buffer_map(ma_audio_buffer* pAudioBuffer, void** ppFramesOut, ma_uint64* pFrameCount); +MA_API ma_result ma_audio_buffer_unmap(ma_audio_buffer* pAudioBuffer, ma_uint64 frameCount); /* Returns MA_AT_END if the end has been reached. This should be considered successful. */ +MA_API ma_bool32 ma_audio_buffer_at_end(const ma_audio_buffer* pAudioBuffer); +MA_API ma_result ma_audio_buffer_get_cursor_in_pcm_frames(const ma_audio_buffer* pAudioBuffer, ma_uint64* pCursor); +MA_API ma_result ma_audio_buffer_get_length_in_pcm_frames(const ma_audio_buffer* pAudioBuffer, ma_uint64* pLength); +MA_API ma_result ma_audio_buffer_get_available_frames(const ma_audio_buffer* pAudioBuffer, ma_uint64* pAvailableFrames); + + +/* +Paged Audio Buffer +================== +A paged audio buffer is made up of a linked list of pages. It's expandable, but not shrinkable. It +can be used for cases where audio data is streamed in asynchronously while allowing data to be read +at the same time. + +This is lock-free, but not 100% thread safe. You can append a page and read from the buffer across +simultaneously across different threads, however only one thread at a time can append, and only one +thread at a time can read and seek. +*/ +typedef struct ma_paged_audio_buffer_page ma_paged_audio_buffer_page; +struct ma_paged_audio_buffer_page +{ + MA_ATOMIC(MA_SIZEOF_PTR, ma_paged_audio_buffer_page*) pNext; + ma_uint64 sizeInFrames; + ma_uint8 pAudioData[1]; +}; + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_paged_audio_buffer_page head; /* Dummy head for the lock-free algorithm. Always has a size of 0. */ + MA_ATOMIC(MA_SIZEOF_PTR, ma_paged_audio_buffer_page*) pTail; /* Never null. Initially set to &head. */ +} ma_paged_audio_buffer_data; + +MA_API ma_result ma_paged_audio_buffer_data_init(ma_format format, ma_uint32 channels, ma_paged_audio_buffer_data* pData); +MA_API void ma_paged_audio_buffer_data_uninit(ma_paged_audio_buffer_data* pData, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_paged_audio_buffer_page* ma_paged_audio_buffer_data_get_head(ma_paged_audio_buffer_data* pData); +MA_API ma_paged_audio_buffer_page* ma_paged_audio_buffer_data_get_tail(ma_paged_audio_buffer_data* pData); +MA_API ma_result ma_paged_audio_buffer_data_get_length_in_pcm_frames(ma_paged_audio_buffer_data* pData, ma_uint64* pLength); +MA_API ma_result ma_paged_audio_buffer_data_allocate_page(ma_paged_audio_buffer_data* pData, ma_uint64 pageSizeInFrames, const void* pInitialData, const ma_allocation_callbacks* pAllocationCallbacks, ma_paged_audio_buffer_page** ppPage); +MA_API ma_result ma_paged_audio_buffer_data_free_page(ma_paged_audio_buffer_data* pData, ma_paged_audio_buffer_page* pPage, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_paged_audio_buffer_data_append_page(ma_paged_audio_buffer_data* pData, ma_paged_audio_buffer_page* pPage); +MA_API ma_result ma_paged_audio_buffer_data_allocate_and_append_page(ma_paged_audio_buffer_data* pData, ma_uint32 pageSizeInFrames, const void* pInitialData, const ma_allocation_callbacks* pAllocationCallbacks); + + +typedef struct +{ + ma_paged_audio_buffer_data* pData; /* Must not be null. */ +} ma_paged_audio_buffer_config; + +MA_API ma_paged_audio_buffer_config ma_paged_audio_buffer_config_init(ma_paged_audio_buffer_data* pData); + + +typedef struct +{ + ma_data_source_base ds; + ma_paged_audio_buffer_data* pData; /* Audio data is read from here. Cannot be null. */ + ma_paged_audio_buffer_page* pCurrent; + ma_uint64 relativeCursor; /* Relative to the current page. */ + ma_uint64 absoluteCursor; +} ma_paged_audio_buffer; + +MA_API ma_result ma_paged_audio_buffer_init(const ma_paged_audio_buffer_config* pConfig, ma_paged_audio_buffer* pPagedAudioBuffer); +MA_API void ma_paged_audio_buffer_uninit(ma_paged_audio_buffer* pPagedAudioBuffer); +MA_API ma_result ma_paged_audio_buffer_read_pcm_frames(ma_paged_audio_buffer* pPagedAudioBuffer, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); /* Returns MA_AT_END if no more pages available. */ +MA_API ma_result ma_paged_audio_buffer_seek_to_pcm_frame(ma_paged_audio_buffer* pPagedAudioBuffer, ma_uint64 frameIndex); +MA_API ma_result ma_paged_audio_buffer_get_cursor_in_pcm_frames(ma_paged_audio_buffer* pPagedAudioBuffer, ma_uint64* pCursor); +MA_API ma_result ma_paged_audio_buffer_get_length_in_pcm_frames(ma_paged_audio_buffer* pPagedAudioBuffer, ma_uint64* pLength); + + + +/************************************************************************************************************************************************************ + +Ring Buffer + +************************************************************************************************************************************************************/ +typedef struct +{ + void* pBuffer; + ma_uint32 subbufferSizeInBytes; + ma_uint32 subbufferCount; + ma_uint32 subbufferStrideInBytes; + MA_ATOMIC(4, ma_uint32) encodedReadOffset; /* Most significant bit is the loop flag. Lower 31 bits contains the actual offset in bytes. Must be used atomically. */ + MA_ATOMIC(4, ma_uint32) encodedWriteOffset; /* Most significant bit is the loop flag. Lower 31 bits contains the actual offset in bytes. Must be used atomically. */ + ma_bool8 ownsBuffer; /* Used to know whether or not miniaudio is responsible for free()-ing the buffer. */ + ma_bool8 clearOnWriteAcquire; /* When set, clears the acquired write buffer before returning from ma_rb_acquire_write(). */ + ma_allocation_callbacks allocationCallbacks; +} ma_rb; + +MA_API ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size_t subbufferStrideInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB); +MA_API ma_result ma_rb_init(size_t bufferSizeInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB); +MA_API void ma_rb_uninit(ma_rb* pRB); +MA_API void ma_rb_reset(ma_rb* pRB); +MA_API ma_result ma_rb_acquire_read(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut); +MA_API ma_result ma_rb_commit_read(ma_rb* pRB, size_t sizeInBytes); +MA_API ma_result ma_rb_acquire_write(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut); +MA_API ma_result ma_rb_commit_write(ma_rb* pRB, size_t sizeInBytes); +MA_API ma_result ma_rb_seek_read(ma_rb* pRB, size_t offsetInBytes); +MA_API ma_result ma_rb_seek_write(ma_rb* pRB, size_t offsetInBytes); +MA_API ma_int32 ma_rb_pointer_distance(ma_rb* pRB); /* Returns the distance between the write pointer and the read pointer. Should never be negative for a correct program. Will return the number of bytes that can be read before the read pointer hits the write pointer. */ +MA_API ma_uint32 ma_rb_available_read(ma_rb* pRB); +MA_API ma_uint32 ma_rb_available_write(ma_rb* pRB); +MA_API size_t ma_rb_get_subbuffer_size(ma_rb* pRB); +MA_API size_t ma_rb_get_subbuffer_stride(ma_rb* pRB); +MA_API size_t ma_rb_get_subbuffer_offset(ma_rb* pRB, size_t subbufferIndex); +MA_API void* ma_rb_get_subbuffer_ptr(ma_rb* pRB, size_t subbufferIndex, void* pBuffer); + + +typedef struct +{ + ma_data_source_base ds; + ma_rb rb; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; /* Not required for the ring buffer itself, but useful for associating the data with some sample rate, particularly for data sources. */ +} ma_pcm_rb; + +MA_API ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subbufferSizeInFrames, ma_uint32 subbufferCount, ma_uint32 subbufferStrideInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB); +MA_API ma_result ma_pcm_rb_init(ma_format format, ma_uint32 channels, ma_uint32 bufferSizeInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB); +MA_API void ma_pcm_rb_uninit(ma_pcm_rb* pRB); +MA_API void ma_pcm_rb_reset(ma_pcm_rb* pRB); +MA_API ma_result ma_pcm_rb_acquire_read(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut); +MA_API ma_result ma_pcm_rb_commit_read(ma_pcm_rb* pRB, ma_uint32 sizeInFrames); +MA_API ma_result ma_pcm_rb_acquire_write(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut); +MA_API ma_result ma_pcm_rb_commit_write(ma_pcm_rb* pRB, ma_uint32 sizeInFrames); +MA_API ma_result ma_pcm_rb_seek_read(ma_pcm_rb* pRB, ma_uint32 offsetInFrames); +MA_API ma_result ma_pcm_rb_seek_write(ma_pcm_rb* pRB, ma_uint32 offsetInFrames); +MA_API ma_int32 ma_pcm_rb_pointer_distance(ma_pcm_rb* pRB); /* Return value is in frames. */ +MA_API ma_uint32 ma_pcm_rb_available_read(ma_pcm_rb* pRB); +MA_API ma_uint32 ma_pcm_rb_available_write(ma_pcm_rb* pRB); +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_size(ma_pcm_rb* pRB); +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_stride(ma_pcm_rb* pRB); +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_offset(ma_pcm_rb* pRB, ma_uint32 subbufferIndex); +MA_API void* ma_pcm_rb_get_subbuffer_ptr(ma_pcm_rb* pRB, ma_uint32 subbufferIndex, void* pBuffer); +MA_API ma_format ma_pcm_rb_get_format(const ma_pcm_rb* pRB); +MA_API ma_uint32 ma_pcm_rb_get_channels(const ma_pcm_rb* pRB); +MA_API ma_uint32 ma_pcm_rb_get_sample_rate(const ma_pcm_rb* pRB); +MA_API void ma_pcm_rb_set_sample_rate(ma_pcm_rb* pRB, ma_uint32 sampleRate); + + +/* +The idea of the duplex ring buffer is to act as the intermediary buffer when running two asynchronous devices in a duplex set up. The +capture device writes to it, and then a playback device reads from it. + +At the moment this is just a simple naive implementation, but in the future I want to implement some dynamic resampling to seamlessly +handle desyncs. Note that the API is work in progress and may change at any time in any version. + +The size of the buffer is based on the capture side since that's what'll be written to the buffer. It is based on the capture period size +in frames. The internal sample rate of the capture device is also needed in order to calculate the size. +*/ +typedef struct +{ + ma_pcm_rb rb; +} ma_duplex_rb; + +MA_API ma_result ma_duplex_rb_init(ma_format captureFormat, ma_uint32 captureChannels, ma_uint32 sampleRate, ma_uint32 captureInternalSampleRate, ma_uint32 captureInternalPeriodSizeInFrames, const ma_allocation_callbacks* pAllocationCallbacks, ma_duplex_rb* pRB); +MA_API ma_result ma_duplex_rb_uninit(ma_duplex_rb* pRB); + + +/************************************************************************************************************************************************************ + +Miscellaneous Helpers + +************************************************************************************************************************************************************/ +/* +Retrieves a human readable description of the given result code. +*/ +MA_API const char* ma_result_description(ma_result result); + +/* +malloc() +*/ +MA_API void* ma_malloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +calloc() +*/ +MA_API void* ma_calloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +realloc() +*/ +MA_API void* ma_realloc(void* p, size_t sz, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +free() +*/ +MA_API void ma_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +Performs an aligned malloc, with the assumption that the alignment is a power of 2. +*/ +MA_API void* ma_aligned_malloc(size_t sz, size_t alignment, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +Free's an aligned malloc'd buffer. +*/ +MA_API void ma_aligned_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +Retrieves a friendly name for a format. +*/ +MA_API const char* ma_get_format_name(ma_format format); + +/* +Blends two frames in floating point format. +*/ +MA_API void ma_blend_f32(float* pOut, float* pInA, float* pInB, float factor, ma_uint32 channels); + +/* +Retrieves the size of a sample in bytes for the given format. + +This API is efficient and is implemented using a lookup table. + +Thread Safety: SAFE + This API is pure. +*/ +MA_API ma_uint32 ma_get_bytes_per_sample(ma_format format); +static MA_INLINE ma_uint32 ma_get_bytes_per_frame(ma_format format, ma_uint32 channels) { return ma_get_bytes_per_sample(format) * channels; } + +/* +Converts a log level to a string. +*/ +MA_API const char* ma_log_level_to_string(ma_uint32 logLevel); + + + + +/************************************************************************************************************************************************************ + +Synchronization + +************************************************************************************************************************************************************/ +/* +Locks a spinlock. +*/ +MA_API ma_result ma_spinlock_lock(volatile ma_spinlock* pSpinlock); + +/* +Locks a spinlock, but does not yield() when looping. +*/ +MA_API ma_result ma_spinlock_lock_noyield(volatile ma_spinlock* pSpinlock); + +/* +Unlocks a spinlock. +*/ +MA_API ma_result ma_spinlock_unlock(volatile ma_spinlock* pSpinlock); + + +#ifndef MA_NO_THREADING + +/* +Creates a mutex. + +A mutex must be created from a valid context. A mutex is initially unlocked. +*/ +MA_API ma_result ma_mutex_init(ma_mutex* pMutex); + +/* +Deletes a mutex. +*/ +MA_API void ma_mutex_uninit(ma_mutex* pMutex); + +/* +Locks a mutex with an infinite timeout. +*/ +MA_API void ma_mutex_lock(ma_mutex* pMutex); + +/* +Unlocks a mutex. +*/ +MA_API void ma_mutex_unlock(ma_mutex* pMutex); + + +/* +Initializes an auto-reset event. +*/ +MA_API ma_result ma_event_init(ma_event* pEvent); + +/* +Uninitializes an auto-reset event. +*/ +MA_API void ma_event_uninit(ma_event* pEvent); + +/* +Waits for the specified auto-reset event to become signalled. +*/ +MA_API ma_result ma_event_wait(ma_event* pEvent); + +/* +Signals the specified auto-reset event. +*/ +MA_API ma_result ma_event_signal(ma_event* pEvent); + + +MA_API ma_result ma_semaphore_init(int initialValue, ma_semaphore* pSemaphore); +MA_API void ma_semaphore_uninit(ma_semaphore* pSemaphore); +MA_API ma_result ma_semaphore_wait(ma_semaphore* pSemaphore); +MA_API ma_result ma_semaphore_release(ma_semaphore* pSemaphore); +#endif /* MA_NO_THREADING */ + + +/* +Fence +===== +This locks while the counter is larger than 0. Counter can be incremented and decremented by any +thread, but care needs to be taken when waiting. It is possible for one thread to acquire the +fence just as another thread returns from ma_fence_wait(). + +The idea behind a fence is to allow you to wait for a group of operations to complete. When an +operation starts, the counter is incremented which locks the fence. When the operation completes, +the fence will be released which decrements the counter. ma_fence_wait() will block until the +counter hits zero. + +If threading is disabled, ma_fence_wait() will spin on the counter. +*/ +typedef struct +{ +#ifndef MA_NO_THREADING + ma_event e; +#endif + ma_uint32 counter; +} ma_fence; + +MA_API ma_result ma_fence_init(ma_fence* pFence); +MA_API void ma_fence_uninit(ma_fence* pFence); +MA_API ma_result ma_fence_acquire(ma_fence* pFence); /* Increment counter. */ +MA_API ma_result ma_fence_release(ma_fence* pFence); /* Decrement counter. */ +MA_API ma_result ma_fence_wait(ma_fence* pFence); /* Wait for counter to reach 0. */ + + + +/* +Notification callback for asynchronous operations. +*/ +typedef void ma_async_notification; + +typedef struct +{ + void (* onSignal)(ma_async_notification* pNotification); +} ma_async_notification_callbacks; + +MA_API ma_result ma_async_notification_signal(ma_async_notification* pNotification); + + +/* +Simple polling notification. + +This just sets a variable when the notification has been signalled which is then polled with ma_async_notification_poll_is_signalled() +*/ +typedef struct +{ + ma_async_notification_callbacks cb; + ma_bool32 signalled; +} ma_async_notification_poll; + +MA_API ma_result ma_async_notification_poll_init(ma_async_notification_poll* pNotificationPoll); +MA_API ma_bool32 ma_async_notification_poll_is_signalled(const ma_async_notification_poll* pNotificationPoll); + + +/* +Event Notification + +This uses an ma_event. If threading is disabled (MA_NO_THREADING), initialization will fail. +*/ +typedef struct +{ + ma_async_notification_callbacks cb; +#ifndef MA_NO_THREADING + ma_event e; +#endif +} ma_async_notification_event; + +MA_API ma_result ma_async_notification_event_init(ma_async_notification_event* pNotificationEvent); +MA_API ma_result ma_async_notification_event_uninit(ma_async_notification_event* pNotificationEvent); +MA_API ma_result ma_async_notification_event_wait(ma_async_notification_event* pNotificationEvent); +MA_API ma_result ma_async_notification_event_signal(ma_async_notification_event* pNotificationEvent); + + + + +/************************************************************************************************************************************************************ + +Job Queue + +************************************************************************************************************************************************************/ + +/* +Slot Allocator +-------------- +The idea of the slot allocator is for it to be used in conjunction with a fixed sized buffer. You use the slot allocator to allocate an index that can be used +as the insertion point for an object. + +Slots are reference counted to help mitigate the ABA problem in the lock-free queue we use for tracking jobs. + +The slot index is stored in the low 32 bits. The reference counter is stored in the high 32 bits: + + +-----------------+-----------------+ + | 32 Bits | 32 Bits | + +-----------------+-----------------+ + | Reference Count | Slot Index | + +-----------------+-----------------+ +*/ +typedef struct +{ + ma_uint32 capacity; /* The number of slots to make available. */ +} ma_slot_allocator_config; + +MA_API ma_slot_allocator_config ma_slot_allocator_config_init(ma_uint32 capacity); + + +typedef struct +{ + MA_ATOMIC(4, ma_uint32) bitfield; /* Must be used atomically because the allocation and freeing routines need to make copies of this which must never be optimized away by the compiler. */ +} ma_slot_allocator_group; + +typedef struct +{ + ma_slot_allocator_group* pGroups; /* Slots are grouped in chunks of 32. */ + ma_uint32* pSlots; /* 32 bits for reference counting for ABA mitigation. */ + ma_uint32 count; /* Allocation count. */ + ma_uint32 capacity; + + /* Memory management. */ + ma_bool32 _ownsHeap; + void* _pHeap; +} ma_slot_allocator; + +MA_API ma_result ma_slot_allocator_get_heap_size(const ma_slot_allocator_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_slot_allocator_init_preallocated(const ma_slot_allocator_config* pConfig, void* pHeap, ma_slot_allocator* pAllocator); +MA_API ma_result ma_slot_allocator_init(const ma_slot_allocator_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_slot_allocator* pAllocator); +MA_API void ma_slot_allocator_uninit(ma_slot_allocator* pAllocator, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_slot_allocator_alloc(ma_slot_allocator* pAllocator, ma_uint64* pSlot); +MA_API ma_result ma_slot_allocator_free(ma_slot_allocator* pAllocator, ma_uint64 slot); + + +typedef struct ma_job ma_job; + +/* +Callback for processing a job. Each job type will have their own processing callback which will be +called by ma_job_process(). +*/ +typedef ma_result (* ma_job_proc)(ma_job* pJob); + +/* When a job type is added here an callback needs to be added go "g_jobVTable" in the implementation section. */ +typedef enum +{ + /* Miscellaneous. */ + MA_JOB_TYPE_QUIT = 0, + MA_JOB_TYPE_CUSTOM, + + /* Resource Manager. */ + MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER_NODE, + MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER_NODE, + MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_BUFFER_NODE, + MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER, + MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER, + MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_STREAM, + MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_STREAM, + MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_STREAM, + MA_JOB_TYPE_RESOURCE_MANAGER_SEEK_DATA_STREAM, + + /* Device. */ + MA_JOB_TYPE_DEVICE_AAUDIO_REROUTE, + + /* Count. Must always be last. */ + MA_JOB_TYPE_COUNT +} ma_job_type; + +struct ma_job +{ + union + { + struct + { + ma_uint16 code; /* Job type. */ + ma_uint16 slot; /* Index into a ma_slot_allocator. */ + ma_uint32 refcount; + } breakup; + ma_uint64 allocation; + } toc; /* 8 bytes. We encode the job code into the slot allocation data to save space. */ + MA_ATOMIC(8, ma_uint64) next; /* refcount + slot for the next item. Does not include the job code. */ + ma_uint32 order; /* Execution order. Used to create a data dependency and ensure a job is executed in order. Usage is contextual depending on the job type. */ + + union + { + /* Miscellaneous. */ + struct + { + ma_job_proc proc; + ma_uintptr data0; + ma_uintptr data1; + } custom; + + /* Resource Manager */ + union + { + struct + { + /*ma_resource_manager**/ void* pResourceManager; + /*ma_resource_manager_data_buffer_node**/ void* pDataBufferNode; + char* pFilePath; + wchar_t* pFilePathW; + ma_uint32 flags; /* Resource manager data source flags that were used when initializing the data buffer. */ + ma_async_notification* pInitNotification; /* Signalled when the data buffer has been initialized and the format/channels/rate can be retrieved. */ + ma_async_notification* pDoneNotification; /* Signalled when the data buffer has been fully decoded. Will be passed through to MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_BUFFER_NODE when decoding. */ + ma_fence* pInitFence; /* Released when initialization of the decoder is complete. */ + ma_fence* pDoneFence; /* Released if initialization of the decoder fails. Passed through to PAGE_DATA_BUFFER_NODE untouched if init is successful. */ + } loadDataBufferNode; + struct + { + /*ma_resource_manager**/ void* pResourceManager; + /*ma_resource_manager_data_buffer_node**/ void* pDataBufferNode; + ma_async_notification* pDoneNotification; + ma_fence* pDoneFence; + } freeDataBufferNode; + struct + { + /*ma_resource_manager**/ void* pResourceManager; + /*ma_resource_manager_data_buffer_node**/ void* pDataBufferNode; + /*ma_decoder**/ void* pDecoder; + ma_async_notification* pDoneNotification; /* Signalled when the data buffer has been fully decoded. */ + ma_fence* pDoneFence; /* Passed through from LOAD_DATA_BUFFER_NODE and released when the data buffer completes decoding or an error occurs. */ + } pageDataBufferNode; + + struct + { + /*ma_resource_manager_data_buffer**/ void* pDataBuffer; + ma_async_notification* pInitNotification; /* Signalled when the data buffer has been initialized and the format/channels/rate can be retrieved. */ + ma_async_notification* pDoneNotification; /* Signalled when the data buffer has been fully decoded. */ + ma_fence* pInitFence; /* Released when the data buffer has been initialized and the format/channels/rate can be retrieved. */ + ma_fence* pDoneFence; /* Released when the data buffer has been fully decoded. */ + ma_uint64 rangeBegInPCMFrames; + ma_uint64 rangeEndInPCMFrames; + ma_uint64 loopPointBegInPCMFrames; + ma_uint64 loopPointEndInPCMFrames; + ma_uint32 isLooping; + } loadDataBuffer; + struct + { + /*ma_resource_manager_data_buffer**/ void* pDataBuffer; + ma_async_notification* pDoneNotification; + ma_fence* pDoneFence; + } freeDataBuffer; + + struct + { + /*ma_resource_manager_data_stream**/ void* pDataStream; + char* pFilePath; /* Allocated when the job is posted, freed by the job thread after loading. */ + wchar_t* pFilePathW; /* ^ As above ^. Only used if pFilePath is NULL. */ + ma_uint64 initialSeekPoint; + ma_async_notification* pInitNotification; /* Signalled after the first two pages have been decoded and frames can be read from the stream. */ + ma_fence* pInitFence; + } loadDataStream; + struct + { + /*ma_resource_manager_data_stream**/ void* pDataStream; + ma_async_notification* pDoneNotification; + ma_fence* pDoneFence; + } freeDataStream; + struct + { + /*ma_resource_manager_data_stream**/ void* pDataStream; + ma_uint32 pageIndex; /* The index of the page to decode into. */ + } pageDataStream; + struct + { + /*ma_resource_manager_data_stream**/ void* pDataStream; + ma_uint64 frameIndex; + } seekDataStream; + } resourceManager; + + /* Device. */ + union + { + union + { + struct + { + /*ma_device**/ void* pDevice; + /*ma_device_type*/ ma_uint32 deviceType; + } reroute; + } aaudio; + } device; + } data; +}; + +MA_API ma_job ma_job_init(ma_uint16 code); +MA_API ma_result ma_job_process(ma_job* pJob); + + +/* +When set, ma_job_queue_next() will not wait and no semaphore will be signaled in +ma_job_queue_post(). ma_job_queue_next() will return MA_NO_DATA_AVAILABLE if nothing is available. + +This flag should always be used for platforms that do not support multithreading. +*/ +typedef enum +{ + MA_JOB_QUEUE_FLAG_NON_BLOCKING = 0x00000001 +} ma_job_queue_flags; + +typedef struct +{ + ma_uint32 flags; + ma_uint32 capacity; /* The maximum number of jobs that can fit in the queue at a time. */ +} ma_job_queue_config; + +MA_API ma_job_queue_config ma_job_queue_config_init(ma_uint32 flags, ma_uint32 capacity); + + +typedef struct +{ + ma_uint32 flags; /* Flags passed in at initialization time. */ + ma_uint32 capacity; /* The maximum number of jobs that can fit in the queue at a time. Set by the config. */ + MA_ATOMIC(8, ma_uint64) head; /* The first item in the list. Required for removing from the top of the list. */ + MA_ATOMIC(8, ma_uint64) tail; /* The last item in the list. Required for appending to the end of the list. */ +#ifndef MA_NO_THREADING + ma_semaphore sem; /* Only used when MA_JOB_QUEUE_FLAG_NON_BLOCKING is unset. */ +#endif + ma_slot_allocator allocator; + ma_job* pJobs; +#ifndef MA_USE_EXPERIMENTAL_LOCK_FREE_JOB_QUEUE + ma_spinlock lock; +#endif + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; +} ma_job_queue; + +MA_API ma_result ma_job_queue_get_heap_size(const ma_job_queue_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_job_queue_init_preallocated(const ma_job_queue_config* pConfig, void* pHeap, ma_job_queue* pQueue); +MA_API ma_result ma_job_queue_init(const ma_job_queue_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_job_queue* pQueue); +MA_API void ma_job_queue_uninit(ma_job_queue* pQueue, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_job_queue_post(ma_job_queue* pQueue, const ma_job* pJob); +MA_API ma_result ma_job_queue_next(ma_job_queue* pQueue, ma_job* pJob); /* Returns MA_CANCELLED if the next job is a quit job. */ + + + +/************************************************************************************************************************************************************ +************************************************************************************************************************************************************* + +DEVICE I/O +========== + +This section contains the APIs for device playback and capture. Here is where you'll find ma_device_init(), etc. + +************************************************************************************************************************************************************* +************************************************************************************************************************************************************/ +#ifndef MA_NO_DEVICE_IO +/* Some backends are only supported on certain platforms. */ +#if defined(MA_WIN32) + #define MA_SUPPORT_WASAPI + + #if defined(MA_WIN32_DESKTOP) /* DirectSound and WinMM backends are only supported on desktops. */ + #define MA_SUPPORT_DSOUND + #define MA_SUPPORT_WINMM + + /* Don't enable JACK here if compiling with Cosmopolitan. It'll be enabled in the Linux section below. */ + #if !defined(__COSMOPOLITAN__) + #define MA_SUPPORT_JACK /* JACK is technically supported on Windows, but I don't know how many people use it in practice... */ + #endif + #endif +#endif +#if defined(MA_UNIX) && !defined(MA_ORBIS) && !defined(MA_PROSPERO) + #if defined(MA_LINUX) + #if !defined(MA_ANDROID) && !defined(__COSMOPOLITAN__) /* ALSA is not supported on Android. */ + #define MA_SUPPORT_ALSA + #endif + #endif + #if !defined(MA_BSD) && !defined(MA_ANDROID) && !defined(MA_EMSCRIPTEN) + #define MA_SUPPORT_PULSEAUDIO + #define MA_SUPPORT_JACK + #endif + #if defined(__OpenBSD__) /* <-- Change this to "#if defined(MA_BSD)" to enable sndio on all BSD flavors. */ + #define MA_SUPPORT_SNDIO /* sndio is only supported on OpenBSD for now. May be expanded later if there's demand. */ + #endif + #if defined(__NetBSD__) || defined(__OpenBSD__) + #define MA_SUPPORT_AUDIO4 /* Only support audio(4) on platforms with known support. */ + #endif + #if defined(__FreeBSD__) || defined(__DragonFly__) + #define MA_SUPPORT_OSS /* Only support OSS on specific platforms with known support. */ + #endif +#endif +#if defined(MA_ANDROID) + #define MA_SUPPORT_AAUDIO + #define MA_SUPPORT_OPENSL +#endif +#if defined(MA_APPLE) + #define MA_SUPPORT_COREAUDIO +#endif +#if defined(MA_EMSCRIPTEN) + #define MA_SUPPORT_WEBAUDIO +#endif + +/* All platforms should support custom backends. */ +#define MA_SUPPORT_CUSTOM + +/* Explicitly disable the Null backend for Emscripten because it uses a background thread which is not properly supported right now. */ +#if !defined(MA_EMSCRIPTEN) +#define MA_SUPPORT_NULL +#endif + + +#if defined(MA_SUPPORT_WASAPI) && !defined(MA_NO_WASAPI) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_WASAPI)) + #define MA_HAS_WASAPI +#endif +#if defined(MA_SUPPORT_DSOUND) && !defined(MA_NO_DSOUND) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_DSOUND)) + #define MA_HAS_DSOUND +#endif +#if defined(MA_SUPPORT_WINMM) && !defined(MA_NO_WINMM) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_WINMM)) + #define MA_HAS_WINMM +#endif +#if defined(MA_SUPPORT_ALSA) && !defined(MA_NO_ALSA) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_ALSA)) + #define MA_HAS_ALSA +#endif +#if defined(MA_SUPPORT_PULSEAUDIO) && !defined(MA_NO_PULSEAUDIO) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_PULSEAUDIO)) + #define MA_HAS_PULSEAUDIO +#endif +#if defined(MA_SUPPORT_JACK) && !defined(MA_NO_JACK) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_JACK)) + #define MA_HAS_JACK +#endif +#if defined(MA_SUPPORT_COREAUDIO) && !defined(MA_NO_COREAUDIO) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_COREAUDIO)) + #define MA_HAS_COREAUDIO +#endif +#if defined(MA_SUPPORT_SNDIO) && !defined(MA_NO_SNDIO) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_SNDIO)) + #define MA_HAS_SNDIO +#endif +#if defined(MA_SUPPORT_AUDIO4) && !defined(MA_NO_AUDIO4) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_AUDIO4)) + #define MA_HAS_AUDIO4 +#endif +#if defined(MA_SUPPORT_OSS) && !defined(MA_NO_OSS) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_OSS)) + #define MA_HAS_OSS +#endif +#if defined(MA_SUPPORT_AAUDIO) && !defined(MA_NO_AAUDIO) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_AAUDIO)) + #define MA_HAS_AAUDIO +#endif +#if defined(MA_SUPPORT_OPENSL) && !defined(MA_NO_OPENSL) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_OPENSL)) + #define MA_HAS_OPENSL +#endif +#if defined(MA_SUPPORT_WEBAUDIO) && !defined(MA_NO_WEBAUDIO) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_WEBAUDIO)) + #define MA_HAS_WEBAUDIO +#endif +#if defined(MA_SUPPORT_CUSTOM) && !defined(MA_NO_CUSTOM) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_CUSTOM)) + #define MA_HAS_CUSTOM +#endif +#if defined(MA_SUPPORT_NULL) && !defined(MA_NO_NULL) && (!defined(MA_ENABLE_ONLY_SPECIFIC_BACKENDS) || defined(MA_ENABLE_NULL)) + #define MA_HAS_NULL +#endif + +typedef enum +{ + ma_device_state_uninitialized = 0, + ma_device_state_stopped = 1, /* The device's default state after initialization. */ + ma_device_state_started = 2, /* The device is started and is requesting and/or delivering audio data. */ + ma_device_state_starting = 3, /* Transitioning from a stopped state to started. */ + ma_device_state_stopping = 4 /* Transitioning from a started state to stopped. */ +} ma_device_state; + +MA_ATOMIC_SAFE_TYPE_DECL(i32, 4, device_state) + + +#ifdef MA_SUPPORT_WASAPI +/* We need a IMMNotificationClient object for WASAPI. */ +typedef struct +{ + void* lpVtbl; + ma_uint32 counter; + ma_device* pDevice; +} ma_IMMNotificationClient; +#endif + +/* Backend enums must be in priority order. */ +typedef enum +{ + ma_backend_wasapi, + ma_backend_dsound, + ma_backend_winmm, + ma_backend_coreaudio, + ma_backend_sndio, + ma_backend_audio4, + ma_backend_oss, + ma_backend_pulseaudio, + ma_backend_alsa, + ma_backend_jack, + ma_backend_aaudio, + ma_backend_opensl, + ma_backend_webaudio, + ma_backend_custom, /* <-- Custom backend, with callbacks defined by the context config. */ + ma_backend_null /* <-- Must always be the last item. Lowest priority, and used as the terminator for backend enumeration. */ +} ma_backend; + +#define MA_BACKEND_COUNT (ma_backend_null+1) + + +/* +Device job thread. This is used by backends that require asynchronous processing of certain +operations. It is not used by all backends. + +The device job thread is made up of a thread and a job queue. You can post a job to the thread with +ma_device_job_thread_post(). The thread will do the processing of the job. +*/ +typedef struct +{ + ma_bool32 noThread; /* Set this to true if you want to process jobs yourself. */ + ma_uint32 jobQueueCapacity; + ma_uint32 jobQueueFlags; +} ma_device_job_thread_config; + +MA_API ma_device_job_thread_config ma_device_job_thread_config_init(void); + +typedef struct +{ + ma_thread thread; + ma_job_queue jobQueue; + ma_bool32 _hasThread; +} ma_device_job_thread; + +MA_API ma_result ma_device_job_thread_init(const ma_device_job_thread_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_device_job_thread* pJobThread); +MA_API void ma_device_job_thread_uninit(ma_device_job_thread* pJobThread, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_device_job_thread_post(ma_device_job_thread* pJobThread, const ma_job* pJob); +MA_API ma_result ma_device_job_thread_next(ma_device_job_thread* pJobThread, ma_job* pJob); + + + +/* Device notification types. */ +typedef enum +{ + ma_device_notification_type_started, + ma_device_notification_type_stopped, + ma_device_notification_type_rerouted, + ma_device_notification_type_interruption_began, + ma_device_notification_type_interruption_ended, + ma_device_notification_type_unlocked +} ma_device_notification_type; + +typedef struct +{ + ma_device* pDevice; + ma_device_notification_type type; + union + { + struct + { + int _unused; + } started; + struct + { + int _unused; + } stopped; + struct + { + int _unused; + } rerouted; + struct + { + int _unused; + } interruption; + } data; +} ma_device_notification; + +/* +The notification callback for when the application should be notified of a change to the device. + +This callback is used for notifying the application of changes such as when the device has started, +stopped, rerouted or an interruption has occurred. Note that not all backends will post all +notification types. For example, some backends will perform automatic stream routing without any +kind of notification to the host program which means miniaudio will never know about it and will +never be able to fire the rerouted notification. You should keep this in mind when designing your +program. + +The stopped notification will *not* get fired when a device is rerouted. + + +Parameters +---------- +pNotification (in) + A pointer to a structure containing information about the event. Use the `pDevice` member of + this object to retrieve the relevant device. The `type` member can be used to discriminate + against each of the notification types. + + +Remarks +------- +Do not restart or uninitialize the device from the callback. + +Not all notifications will be triggered by all backends, however the started and stopped events +should be reliable for all backends. Some backends do not have a good way to detect device +stoppages due to unplugging the device which may result in the stopped callback not getting +fired. This has been observed with at least one BSD variant. + +The rerouted notification is fired *after* the reroute has occurred. The stopped notification will +*not* get fired when a device is rerouted. The following backends are known to do automatic stream +rerouting, but do not have a way to be notified of the change: + + * DirectSound + +The interruption notifications are used on mobile platforms for detecting when audio is interrupted +due to things like an incoming phone call. Currently this is only implemented on iOS. None of the +Android backends will report this notification. +*/ +typedef void (* ma_device_notification_proc)(const ma_device_notification* pNotification); + + +/* +The callback for processing audio data from the device. + +The data callback is fired by miniaudio whenever the device needs to have more data delivered to a playback device, or when a capture device has some data +available. This is called as soon as the backend asks for more data which means it may be called with inconsistent frame counts. You cannot assume the +callback will be fired with a consistent frame count. + + +Parameters +---------- +pDevice (in) + A pointer to the relevant device. + +pOutput (out) + A pointer to the output buffer that will receive audio data that will later be played back through the speakers. This will be non-null for a playback or + full-duplex device and null for a capture and loopback device. + +pInput (in) + A pointer to the buffer containing input data from a recording device. This will be non-null for a capture, full-duplex or loopback device and null for a + playback device. + +frameCount (in) + The number of PCM frames to process. Note that this will not necessarily be equal to what you requested when you initialized the device. The + `periodSizeInFrames` and `periodSizeInMilliseconds` members of the device config are just hints, and are not necessarily exactly what you'll get. You must + not assume this will always be the same value each time the callback is fired. + + +Remarks +------- +You cannot stop and start the device from inside the callback or else you'll get a deadlock. You must also not uninitialize the device from inside the +callback. The following APIs cannot be called from inside the callback: + + ma_device_init() + ma_device_init_ex() + ma_device_uninit() + ma_device_start() + ma_device_stop() + +The proper way to stop the device is to call `ma_device_stop()` from a different thread, normally the main application thread. +*/ +typedef void (* ma_device_data_proc)(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount); + + + + +/* +DEPRECATED. Use ma_device_notification_proc instead. + +The callback for when the device has been stopped. + +This will be called when the device is stopped explicitly with `ma_device_stop()` and also called implicitly when the device is stopped through external forces +such as being unplugged or an internal error occurring. + + +Parameters +---------- +pDevice (in) + A pointer to the device that has just stopped. + + +Remarks +------- +Do not restart or uninitialize the device from the callback. +*/ +typedef void (* ma_stop_proc)(ma_device* pDevice); /* DEPRECATED. Use ma_device_notification_proc instead. */ + +typedef enum +{ + ma_device_type_playback = 1, + ma_device_type_capture = 2, + ma_device_type_duplex = ma_device_type_playback | ma_device_type_capture, /* 3 */ + ma_device_type_loopback = 4 +} ma_device_type; + +typedef enum +{ + ma_share_mode_shared = 0, + ma_share_mode_exclusive +} ma_share_mode; + +/* iOS/tvOS/watchOS session categories. */ +typedef enum +{ + ma_ios_session_category_default = 0, /* AVAudioSessionCategoryPlayAndRecord. */ + ma_ios_session_category_none, /* Leave the session category unchanged. */ + ma_ios_session_category_ambient, /* AVAudioSessionCategoryAmbient */ + ma_ios_session_category_solo_ambient, /* AVAudioSessionCategorySoloAmbient */ + ma_ios_session_category_playback, /* AVAudioSessionCategoryPlayback */ + ma_ios_session_category_record, /* AVAudioSessionCategoryRecord */ + ma_ios_session_category_play_and_record, /* AVAudioSessionCategoryPlayAndRecord */ + ma_ios_session_category_multi_route /* AVAudioSessionCategoryMultiRoute */ +} ma_ios_session_category; + +/* iOS/tvOS/watchOS session category options */ +typedef enum +{ + ma_ios_session_category_option_mix_with_others = 0x01, /* AVAudioSessionCategoryOptionMixWithOthers */ + ma_ios_session_category_option_duck_others = 0x02, /* AVAudioSessionCategoryOptionDuckOthers */ + ma_ios_session_category_option_allow_bluetooth = 0x04, /* AVAudioSessionCategoryOptionAllowBluetooth */ + ma_ios_session_category_option_default_to_speaker = 0x08, /* AVAudioSessionCategoryOptionDefaultToSpeaker */ + ma_ios_session_category_option_interrupt_spoken_audio_and_mix_with_others = 0x11, /* AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers */ + ma_ios_session_category_option_allow_bluetooth_a2dp = 0x20, /* AVAudioSessionCategoryOptionAllowBluetoothA2DP */ + ma_ios_session_category_option_allow_air_play = 0x40, /* AVAudioSessionCategoryOptionAllowAirPlay */ +} ma_ios_session_category_option; + +/* OpenSL stream types. */ +typedef enum +{ + ma_opensl_stream_type_default = 0, /* Leaves the stream type unset. */ + ma_opensl_stream_type_voice, /* SL_ANDROID_STREAM_VOICE */ + ma_opensl_stream_type_system, /* SL_ANDROID_STREAM_SYSTEM */ + ma_opensl_stream_type_ring, /* SL_ANDROID_STREAM_RING */ + ma_opensl_stream_type_media, /* SL_ANDROID_STREAM_MEDIA */ + ma_opensl_stream_type_alarm, /* SL_ANDROID_STREAM_ALARM */ + ma_opensl_stream_type_notification /* SL_ANDROID_STREAM_NOTIFICATION */ +} ma_opensl_stream_type; + +/* OpenSL recording presets. */ +typedef enum +{ + ma_opensl_recording_preset_default = 0, /* Leaves the input preset unset. */ + ma_opensl_recording_preset_generic, /* SL_ANDROID_RECORDING_PRESET_GENERIC */ + ma_opensl_recording_preset_camcorder, /* SL_ANDROID_RECORDING_PRESET_CAMCORDER */ + ma_opensl_recording_preset_voice_recognition, /* SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION */ + ma_opensl_recording_preset_voice_communication, /* SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION */ + ma_opensl_recording_preset_voice_unprocessed /* SL_ANDROID_RECORDING_PRESET_UNPROCESSED */ +} ma_opensl_recording_preset; + +/* WASAPI audio thread priority characteristics. */ +typedef enum +{ + ma_wasapi_usage_default = 0, + ma_wasapi_usage_games, + ma_wasapi_usage_pro_audio, +} ma_wasapi_usage; + +/* AAudio usage types. */ +typedef enum +{ + ma_aaudio_usage_default = 0, /* Leaves the usage type unset. */ + ma_aaudio_usage_media, /* AAUDIO_USAGE_MEDIA */ + ma_aaudio_usage_voice_communication, /* AAUDIO_USAGE_VOICE_COMMUNICATION */ + ma_aaudio_usage_voice_communication_signalling, /* AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING */ + ma_aaudio_usage_alarm, /* AAUDIO_USAGE_ALARM */ + ma_aaudio_usage_notification, /* AAUDIO_USAGE_NOTIFICATION */ + ma_aaudio_usage_notification_ringtone, /* AAUDIO_USAGE_NOTIFICATION_RINGTONE */ + ma_aaudio_usage_notification_event, /* AAUDIO_USAGE_NOTIFICATION_EVENT */ + ma_aaudio_usage_assistance_accessibility, /* AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY */ + ma_aaudio_usage_assistance_navigation_guidance, /* AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE */ + ma_aaudio_usage_assistance_sonification, /* AAUDIO_USAGE_ASSISTANCE_SONIFICATION */ + ma_aaudio_usage_game, /* AAUDIO_USAGE_GAME */ + ma_aaudio_usage_assitant, /* AAUDIO_USAGE_ASSISTANT */ + ma_aaudio_usage_emergency, /* AAUDIO_SYSTEM_USAGE_EMERGENCY */ + ma_aaudio_usage_safety, /* AAUDIO_SYSTEM_USAGE_SAFETY */ + ma_aaudio_usage_vehicle_status, /* AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS */ + ma_aaudio_usage_announcement /* AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT */ +} ma_aaudio_usage; + +/* AAudio content types. */ +typedef enum +{ + ma_aaudio_content_type_default = 0, /* Leaves the content type unset. */ + ma_aaudio_content_type_speech, /* AAUDIO_CONTENT_TYPE_SPEECH */ + ma_aaudio_content_type_music, /* AAUDIO_CONTENT_TYPE_MUSIC */ + ma_aaudio_content_type_movie, /* AAUDIO_CONTENT_TYPE_MOVIE */ + ma_aaudio_content_type_sonification /* AAUDIO_CONTENT_TYPE_SONIFICATION */ +} ma_aaudio_content_type; + +/* AAudio input presets. */ +typedef enum +{ + ma_aaudio_input_preset_default = 0, /* Leaves the input preset unset. */ + ma_aaudio_input_preset_generic, /* AAUDIO_INPUT_PRESET_GENERIC */ + ma_aaudio_input_preset_camcorder, /* AAUDIO_INPUT_PRESET_CAMCORDER */ + ma_aaudio_input_preset_voice_recognition, /* AAUDIO_INPUT_PRESET_VOICE_RECOGNITION */ + ma_aaudio_input_preset_voice_communication, /* AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION */ + ma_aaudio_input_preset_unprocessed, /* AAUDIO_INPUT_PRESET_UNPROCESSED */ + ma_aaudio_input_preset_voice_performance /* AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE */ +} ma_aaudio_input_preset; + +typedef enum +{ + ma_aaudio_allow_capture_default = 0, /* Leaves the allowed capture policy unset. */ + ma_aaudio_allow_capture_by_all, /* AAUDIO_ALLOW_CAPTURE_BY_ALL */ + ma_aaudio_allow_capture_by_system, /* AAUDIO_ALLOW_CAPTURE_BY_SYSTEM */ + ma_aaudio_allow_capture_by_none /* AAUDIO_ALLOW_CAPTURE_BY_NONE */ +} ma_aaudio_allowed_capture_policy; + +typedef union +{ + ma_int64 counter; + double counterD; +} ma_timer; + +typedef union +{ + ma_wchar_win32 wasapi[64]; /* WASAPI uses a wchar_t string for identification. */ + ma_uint8 dsound[16]; /* DirectSound uses a GUID for identification. */ + /*UINT_PTR*/ ma_uint32 winmm; /* When creating a device, WinMM expects a Win32 UINT_PTR for device identification. In practice it's actually just a UINT. */ + char alsa[256]; /* ALSA uses a name string for identification. */ + char pulse[256]; /* PulseAudio uses a name string for identification. */ + int jack; /* JACK always uses default devices. */ + char coreaudio[256]; /* Core Audio uses a string for identification. */ + char sndio[256]; /* "snd/0", etc. */ + char audio4[256]; /* "/dev/audio", etc. */ + char oss[64]; /* "dev/dsp0", etc. "dev/dsp" for the default device. */ + ma_int32 aaudio; /* AAudio uses a 32-bit integer for identification. */ + ma_uint32 opensl; /* OpenSL|ES uses a 32-bit unsigned integer for identification. */ + char webaudio[32]; /* Web Audio always uses default devices for now, but if this changes it'll be a GUID. */ + union + { + int i; + char s[256]; + void* p; + } custom; /* The custom backend could be anything. Give them a few options. */ + int nullbackend; /* The null backend uses an integer for device IDs. */ +} ma_device_id; + +MA_API ma_bool32 ma_device_id_equal(const ma_device_id* pA, const ma_device_id* pB); + + +typedef struct ma_context_config ma_context_config; +typedef struct ma_device_config ma_device_config; +typedef struct ma_backend_callbacks ma_backend_callbacks; + +#define MA_DATA_FORMAT_FLAG_EXCLUSIVE_MODE (1U << 1) /* If set, this is supported in exclusive mode. Otherwise not natively supported by exclusive mode. */ + +#ifndef MA_MAX_DEVICE_NAME_LENGTH +#define MA_MAX_DEVICE_NAME_LENGTH 255 +#endif + +typedef struct +{ + /* Basic info. This is the only information guaranteed to be filled in during device enumeration. */ + ma_device_id id; + char name[MA_MAX_DEVICE_NAME_LENGTH + 1]; /* +1 for null terminator. */ + ma_bool32 isDefault; + + ma_uint32 nativeDataFormatCount; + struct + { + ma_format format; /* Sample format. If set to ma_format_unknown, all sample formats are supported. */ + ma_uint32 channels; /* If set to 0, all channels are supported. */ + ma_uint32 sampleRate; /* If set to 0, all sample rates are supported. */ + ma_uint32 flags; /* A combination of MA_DATA_FORMAT_FLAG_* flags. */ + } nativeDataFormats[/*ma_format_count * ma_standard_sample_rate_count * MA_MAX_CHANNELS*/ 64]; /* Not sure how big to make this. There can be *many* permutations for virtual devices which can support anything. */ +} ma_device_info; + +struct ma_device_config +{ + ma_device_type deviceType; + ma_uint32 sampleRate; + ma_uint32 periodSizeInFrames; + ma_uint32 periodSizeInMilliseconds; + ma_uint32 periods; + ma_performance_profile performanceProfile; + ma_bool8 noPreSilencedOutputBuffer; /* When set to true, the contents of the output buffer passed into the data callback will be left undefined rather than initialized to silence. */ + ma_bool8 noClip; /* When set to true, the contents of the output buffer passed into the data callback will not be clipped after returning. Only applies when the playback sample format is f32. */ + ma_bool8 noDisableDenormals; /* Do not disable denormals when firing the data callback. */ + ma_bool8 noFixedSizedCallback; /* Disables strict fixed-sized data callbacks. Setting this to true will result in the period size being treated only as a hint to the backend. This is an optimization for those who don't need fixed sized callbacks. */ + ma_device_data_proc dataCallback; + ma_device_notification_proc notificationCallback; + ma_stop_proc stopCallback; + void* pUserData; + ma_resampler_config resampling; + struct + { + const ma_device_id* pDeviceID; + ma_format format; + ma_uint32 channels; + ma_channel* pChannelMap; + ma_channel_mix_mode channelMixMode; + ma_bool32 calculateLFEFromSpatialChannels; /* When an output LFE channel is present, but no input LFE, set to true to set the output LFE to the average of all spatial channels (LR, FR, etc.). Ignored when an input LFE is present. */ + ma_share_mode shareMode; + } playback; + struct + { + const ma_device_id* pDeviceID; + ma_format format; + ma_uint32 channels; + ma_channel* pChannelMap; + ma_channel_mix_mode channelMixMode; + ma_bool32 calculateLFEFromSpatialChannels; /* When an output LFE channel is present, but no input LFE, set to true to set the output LFE to the average of all spatial channels (LR, FR, etc.). Ignored when an input LFE is present. */ + ma_share_mode shareMode; + } capture; + + struct + { + ma_wasapi_usage usage; /* When configured, uses Avrt APIs to set the thread characteristics. */ + ma_bool8 noAutoConvertSRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM. */ + ma_bool8 noDefaultQualitySRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY. */ + ma_bool8 noAutoStreamRouting; /* Disables automatic stream routing. */ + ma_bool8 noHardwareOffloading; /* Disables WASAPI's hardware offloading feature. */ + ma_uint32 loopbackProcessID; /* The process ID to include or exclude for loopback mode. Set to 0 to capture audio from all processes. Ignored when an explicit device ID is specified. */ + ma_bool8 loopbackProcessExclude; /* When set to true, excludes the process specified by loopbackProcessID. By default, the process will be included. */ + } wasapi; + struct + { + ma_bool32 noMMap; /* Disables MMap mode. */ + ma_bool32 noAutoFormat; /* Opens the ALSA device with SND_PCM_NO_AUTO_FORMAT. */ + ma_bool32 noAutoChannels; /* Opens the ALSA device with SND_PCM_NO_AUTO_CHANNELS. */ + ma_bool32 noAutoResample; /* Opens the ALSA device with SND_PCM_NO_AUTO_RESAMPLE. */ + } alsa; + struct + { + const char* pStreamNamePlayback; + const char* pStreamNameCapture; + int channelMap; + } pulse; + struct + { + ma_bool32 allowNominalSampleRateChange; /* Desktop only. When enabled, allows changing of the sample rate at the operating system level. */ + } coreaudio; + struct + { + ma_opensl_stream_type streamType; + ma_opensl_recording_preset recordingPreset; + ma_bool32 enableCompatibilityWorkarounds; + } opensl; + struct + { + ma_aaudio_usage usage; + ma_aaudio_content_type contentType; + ma_aaudio_input_preset inputPreset; + ma_aaudio_allowed_capture_policy allowedCapturePolicy; + ma_bool32 noAutoStartAfterReroute; + ma_bool32 enableCompatibilityWorkarounds; + ma_bool32 allowSetBufferCapacity; + } aaudio; +}; + + +/* +The callback for handling device enumeration. This is fired from `ma_context_enumerate_devices()`. + + +Parameters +---------- +pContext (in) + A pointer to the context performing the enumeration. + +deviceType (in) + The type of the device being enumerated. This will always be either `ma_device_type_playback` or `ma_device_type_capture`. + +pInfo (in) + A pointer to a `ma_device_info` containing the ID and name of the enumerated device. Note that this will not include detailed information about the device, + only basic information (ID and name). The reason for this is that it would otherwise require opening the backend device to probe for the information which + is too inefficient. + +pUserData (in) + The user data pointer passed into `ma_context_enumerate_devices()`. +*/ +typedef ma_bool32 (* ma_enum_devices_callback_proc)(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData); + + +/* +Describes some basic details about a playback or capture device. +*/ +typedef struct +{ + const ma_device_id* pDeviceID; + ma_share_mode shareMode; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_channel channelMap[MA_MAX_CHANNELS]; + ma_uint32 periodSizeInFrames; + ma_uint32 periodSizeInMilliseconds; + ma_uint32 periodCount; +} ma_device_descriptor; + +/* +These are the callbacks required to be implemented for a backend. These callbacks are grouped into two parts: context and device. There is one context +to many devices. A device is created from a context. + +The general flow goes like this: + + 1) A context is created with `onContextInit()` + 1a) Available devices can be enumerated with `onContextEnumerateDevices()` if required. + 1b) Detailed information about a device can be queried with `onContextGetDeviceInfo()` if required. + 2) A device is created from the context that was created in the first step using `onDeviceInit()`, and optionally a device ID that was + selected from device enumeration via `onContextEnumerateDevices()`. + 3) A device is started or stopped with `onDeviceStart()` / `onDeviceStop()` + 4) Data is delivered to and from the device by the backend. This is always done based on the native format returned by the prior call + to `onDeviceInit()`. Conversion between the device's native format and the format requested by the application will be handled by + miniaudio internally. + +Initialization of the context is quite simple. You need to do any necessary initialization of internal objects and then output the +callbacks defined in this structure. + +Once the context has been initialized you can initialize a device. Before doing so, however, the application may want to know which +physical devices are available. This is where `onContextEnumerateDevices()` comes in. This is fairly simple. For each device, fire the +given callback with, at a minimum, the basic information filled out in `ma_device_info`. When the callback returns `MA_FALSE`, enumeration +needs to stop and the `onContextEnumerateDevices()` function returns with a success code. + +Detailed device information can be retrieved from a device ID using `onContextGetDeviceInfo()`. This takes as input the device type and ID, +and on output returns detailed information about the device in `ma_device_info`. The `onContextGetDeviceInfo()` callback must handle the +case when the device ID is NULL, in which case information about the default device needs to be retrieved. + +Once the context has been created and the device ID retrieved (if using anything other than the default device), the device can be created. +This is a little bit more complicated than initialization of the context due to its more complicated configuration. When initializing a +device, a duplex device may be requested. This means a separate data format needs to be specified for both playback and capture. On input, +the data format is set to what the application wants. On output it's set to the native format which should match as closely as possible to +the requested format. The conversion between the format requested by the application and the device's native format will be handled +internally by miniaudio. + +On input, if the sample format is set to `ma_format_unknown`, the backend is free to use whatever sample format it desires, so long as it's +supported by miniaudio. When the channel count is set to 0, the backend should use the device's native channel count. The same applies for +sample rate. For the channel map, the default should be used when `ma_channel_map_is_blank()` returns true (all channels set to +`MA_CHANNEL_NONE`). On input, the `periodSizeInFrames` or `periodSizeInMilliseconds` option should always be set. The backend should +inspect both of these variables. If `periodSizeInFrames` is set, it should take priority, otherwise it needs to be derived from the period +size in milliseconds (`periodSizeInMilliseconds`) and the sample rate, keeping in mind that the sample rate may be 0, in which case the +sample rate will need to be determined before calculating the period size in frames. On output, all members of the `ma_device_descriptor` +object should be set to a valid value, except for `periodSizeInMilliseconds` which is optional (`periodSizeInFrames` *must* be set). + +Starting and stopping of the device is done with `onDeviceStart()` and `onDeviceStop()` and should be self-explanatory. If the backend uses +asynchronous reading and writing, `onDeviceStart()` and `onDeviceStop()` should always be implemented. + +The handling of data delivery between the application and the device is the most complicated part of the process. To make this a bit +easier, some helper callbacks are available. If the backend uses a blocking read/write style of API, the `onDeviceRead()` and +`onDeviceWrite()` callbacks can optionally be implemented. These are blocking and work just like reading and writing from a file. If the +backend uses a callback for data delivery, that callback must call `ma_device_handle_backend_data_callback()` from within its callback. +This allows miniaudio to then process any necessary data conversion and then pass it to the miniaudio data callback. + +If the backend requires absolute flexibility with its data delivery, it can optionally implement the `onDeviceDataLoop()` callback +which will allow it to implement the logic that will run on the audio thread. This is much more advanced and is completely optional. + +The audio thread should run data delivery logic in a loop while `ma_device_get_state() == ma_device_state_started` and no errors have been +encountered. Do not start or stop the device here. That will be handled from outside the `onDeviceDataLoop()` callback. + +The invocation of the `onDeviceDataLoop()` callback will be handled by miniaudio. When you start the device, miniaudio will fire this +callback. When the device is stopped, the `ma_device_get_state() == ma_device_state_started` condition will fail and the loop will be terminated +which will then fall through to the part that stops the device. For an example on how to implement the `onDeviceDataLoop()` callback, +look at `ma_device_audio_thread__default_read_write()`. Implement the `onDeviceDataLoopWakeup()` callback if you need a mechanism to +wake up the audio thread. + +If the backend supports an optimized retrieval of device information from an initialized `ma_device` object, it should implement the +`onDeviceGetInfo()` callback. This is optional, in which case it will fall back to `onContextGetDeviceInfo()` which is less efficient. +*/ +struct ma_backend_callbacks +{ + ma_result (* onContextInit)(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks); + ma_result (* onContextUninit)(ma_context* pContext); + ma_result (* onContextEnumerateDevices)(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData); + ma_result (* onContextGetDeviceInfo)(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo); + ma_result (* onDeviceInit)(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture); + ma_result (* onDeviceUninit)(ma_device* pDevice); + ma_result (* onDeviceStart)(ma_device* pDevice); + ma_result (* onDeviceStop)(ma_device* pDevice); + ma_result (* onDeviceRead)(ma_device* pDevice, void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesRead); + ma_result (* onDeviceWrite)(ma_device* pDevice, const void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten); + ma_result (* onDeviceDataLoop)(ma_device* pDevice); + ma_result (* onDeviceDataLoopWakeup)(ma_device* pDevice); + ma_result (* onDeviceGetInfo)(ma_device* pDevice, ma_device_type type, ma_device_info* pDeviceInfo); +}; + +struct ma_context_config +{ + ma_log* pLog; + ma_thread_priority threadPriority; + size_t threadStackSize; + void* pUserData; + ma_allocation_callbacks allocationCallbacks; + struct + { + ma_handle hWnd; /* HWND. Optional window handle to pass into SetCooperativeLevel(). Will default to the foreground window, and if that fails, the desktop window. */ + } dsound; + struct + { + ma_bool32 useVerboseDeviceEnumeration; + } alsa; + struct + { + const char* pApplicationName; + const char* pServerName; + ma_bool32 tryAutoSpawn; /* Enables autospawning of the PulseAudio daemon if necessary. */ + } pulse; + struct + { + ma_ios_session_category sessionCategory; + ma_uint32 sessionCategoryOptions; + ma_bool32 noAudioSessionActivate; /* iOS only. When set to true, does not perform an explicit [[AVAudioSession sharedInstace] setActive:true] on initialization. */ + ma_bool32 noAudioSessionDeactivate; /* iOS only. When set to true, does not perform an explicit [[AVAudioSession sharedInstace] setActive:false] on uninitialization. */ + } coreaudio; + struct + { + const char* pClientName; + ma_bool32 tryStartServer; + } jack; + ma_backend_callbacks custom; +}; + +/* WASAPI specific structure for some commands which must run on a common thread due to bugs in WASAPI. */ +typedef struct +{ + int code; + ma_event* pEvent; /* This will be signalled when the event is complete. */ + union + { + struct + { + int _unused; + } quit; + struct + { + ma_device_type deviceType; + void* pAudioClient; + void** ppAudioClientService; + ma_result* pResult; /* The result from creating the audio client service. */ + } createAudioClient; + struct + { + ma_device* pDevice; + ma_device_type deviceType; + } releaseAudioClient; + } data; +} ma_context_command__wasapi; + +struct ma_context +{ + ma_backend_callbacks callbacks; + ma_backend backend; /* DirectSound, ALSA, etc. */ + ma_log* pLog; + ma_log log; /* Only used if the log is owned by the context. The pLog member will be set to &log in this case. */ + ma_thread_priority threadPriority; + size_t threadStackSize; + void* pUserData; + ma_allocation_callbacks allocationCallbacks; + ma_mutex deviceEnumLock; /* Used to make ma_context_get_devices() thread safe. */ + ma_mutex deviceInfoLock; /* Used to make ma_context_get_device_info() thread safe. */ + ma_uint32 deviceInfoCapacity; /* Total capacity of pDeviceInfos. */ + ma_uint32 playbackDeviceInfoCount; + ma_uint32 captureDeviceInfoCount; + ma_device_info* pDeviceInfos; /* Playback devices first, then capture. */ + + union + { +#ifdef MA_SUPPORT_WASAPI + struct + { + ma_thread commandThread; + ma_mutex commandLock; + ma_semaphore commandSem; + ma_uint32 commandIndex; + ma_uint32 commandCount; + ma_context_command__wasapi commands[4]; + ma_handle hAvrt; + ma_proc AvSetMmThreadCharacteristicsA; + ma_proc AvRevertMmThreadcharacteristics; + ma_handle hMMDevapi; + ma_proc ActivateAudioInterfaceAsync; + } wasapi; +#endif +#ifdef MA_SUPPORT_DSOUND + struct + { + ma_handle hWnd; /* Can be null. */ + ma_handle hDSoundDLL; + ma_proc DirectSoundCreate; + ma_proc DirectSoundEnumerateA; + ma_proc DirectSoundCaptureCreate; + ma_proc DirectSoundCaptureEnumerateA; + } dsound; +#endif +#ifdef MA_SUPPORT_WINMM + struct + { + ma_handle hWinMM; + ma_proc waveOutGetNumDevs; + ma_proc waveOutGetDevCapsA; + ma_proc waveOutOpen; + ma_proc waveOutClose; + ma_proc waveOutPrepareHeader; + ma_proc waveOutUnprepareHeader; + ma_proc waveOutWrite; + ma_proc waveOutReset; + ma_proc waveInGetNumDevs; + ma_proc waveInGetDevCapsA; + ma_proc waveInOpen; + ma_proc waveInClose; + ma_proc waveInPrepareHeader; + ma_proc waveInUnprepareHeader; + ma_proc waveInAddBuffer; + ma_proc waveInStart; + ma_proc waveInReset; + } winmm; +#endif +#ifdef MA_SUPPORT_ALSA + struct + { + ma_handle asoundSO; + ma_proc snd_pcm_open; + ma_proc snd_pcm_close; + ma_proc snd_pcm_hw_params_sizeof; + ma_proc snd_pcm_hw_params_any; + ma_proc snd_pcm_hw_params_set_format; + ma_proc snd_pcm_hw_params_set_format_first; + ma_proc snd_pcm_hw_params_get_format_mask; + ma_proc snd_pcm_hw_params_set_channels; + ma_proc snd_pcm_hw_params_set_channels_near; + ma_proc snd_pcm_hw_params_set_channels_minmax; + ma_proc snd_pcm_hw_params_set_rate_resample; + ma_proc snd_pcm_hw_params_set_rate; + ma_proc snd_pcm_hw_params_set_rate_near; + ma_proc snd_pcm_hw_params_set_buffer_size_near; + ma_proc snd_pcm_hw_params_set_periods_near; + ma_proc snd_pcm_hw_params_set_access; + ma_proc snd_pcm_hw_params_get_format; + ma_proc snd_pcm_hw_params_get_channels; + ma_proc snd_pcm_hw_params_get_channels_min; + ma_proc snd_pcm_hw_params_get_channels_max; + ma_proc snd_pcm_hw_params_get_rate; + ma_proc snd_pcm_hw_params_get_rate_min; + ma_proc snd_pcm_hw_params_get_rate_max; + ma_proc snd_pcm_hw_params_get_buffer_size; + ma_proc snd_pcm_hw_params_get_periods; + ma_proc snd_pcm_hw_params_get_access; + ma_proc snd_pcm_hw_params_test_format; + ma_proc snd_pcm_hw_params_test_channels; + ma_proc snd_pcm_hw_params_test_rate; + ma_proc snd_pcm_hw_params; + ma_proc snd_pcm_sw_params_sizeof; + ma_proc snd_pcm_sw_params_current; + ma_proc snd_pcm_sw_params_get_boundary; + ma_proc snd_pcm_sw_params_set_avail_min; + ma_proc snd_pcm_sw_params_set_start_threshold; + ma_proc snd_pcm_sw_params_set_stop_threshold; + ma_proc snd_pcm_sw_params; + ma_proc snd_pcm_format_mask_sizeof; + ma_proc snd_pcm_format_mask_test; + ma_proc snd_pcm_get_chmap; + ma_proc snd_pcm_state; + ma_proc snd_pcm_prepare; + ma_proc snd_pcm_start; + ma_proc snd_pcm_drop; + ma_proc snd_pcm_drain; + ma_proc snd_pcm_reset; + ma_proc snd_device_name_hint; + ma_proc snd_device_name_get_hint; + ma_proc snd_card_get_index; + ma_proc snd_device_name_free_hint; + ma_proc snd_pcm_mmap_begin; + ma_proc snd_pcm_mmap_commit; + ma_proc snd_pcm_recover; + ma_proc snd_pcm_readi; + ma_proc snd_pcm_writei; + ma_proc snd_pcm_avail; + ma_proc snd_pcm_avail_update; + ma_proc snd_pcm_wait; + ma_proc snd_pcm_nonblock; + ma_proc snd_pcm_info; + ma_proc snd_pcm_info_sizeof; + ma_proc snd_pcm_info_get_name; + ma_proc snd_pcm_poll_descriptors; + ma_proc snd_pcm_poll_descriptors_count; + ma_proc snd_pcm_poll_descriptors_revents; + ma_proc snd_config_update_free_global; + + ma_mutex internalDeviceEnumLock; + ma_bool32 useVerboseDeviceEnumeration; + } alsa; +#endif +#ifdef MA_SUPPORT_PULSEAUDIO + struct + { + ma_handle pulseSO; + ma_proc pa_mainloop_new; + ma_proc pa_mainloop_free; + ma_proc pa_mainloop_quit; + ma_proc pa_mainloop_get_api; + ma_proc pa_mainloop_iterate; + ma_proc pa_mainloop_wakeup; + ma_proc pa_threaded_mainloop_new; + ma_proc pa_threaded_mainloop_free; + ma_proc pa_threaded_mainloop_start; + ma_proc pa_threaded_mainloop_stop; + ma_proc pa_threaded_mainloop_lock; + ma_proc pa_threaded_mainloop_unlock; + ma_proc pa_threaded_mainloop_wait; + ma_proc pa_threaded_mainloop_signal; + ma_proc pa_threaded_mainloop_accept; + ma_proc pa_threaded_mainloop_get_retval; + ma_proc pa_threaded_mainloop_get_api; + ma_proc pa_threaded_mainloop_in_thread; + ma_proc pa_threaded_mainloop_set_name; + ma_proc pa_context_new; + ma_proc pa_context_unref; + ma_proc pa_context_connect; + ma_proc pa_context_disconnect; + ma_proc pa_context_set_state_callback; + ma_proc pa_context_get_state; + ma_proc pa_context_get_sink_info_list; + ma_proc pa_context_get_source_info_list; + ma_proc pa_context_get_sink_info_by_name; + ma_proc pa_context_get_source_info_by_name; + ma_proc pa_operation_unref; + ma_proc pa_operation_get_state; + ma_proc pa_channel_map_init_extend; + ma_proc pa_channel_map_valid; + ma_proc pa_channel_map_compatible; + ma_proc pa_stream_new; + ma_proc pa_stream_unref; + ma_proc pa_stream_connect_playback; + ma_proc pa_stream_connect_record; + ma_proc pa_stream_disconnect; + ma_proc pa_stream_get_state; + ma_proc pa_stream_get_sample_spec; + ma_proc pa_stream_get_channel_map; + ma_proc pa_stream_get_buffer_attr; + ma_proc pa_stream_set_buffer_attr; + ma_proc pa_stream_get_device_name; + ma_proc pa_stream_set_write_callback; + ma_proc pa_stream_set_read_callback; + ma_proc pa_stream_set_suspended_callback; + ma_proc pa_stream_set_moved_callback; + ma_proc pa_stream_is_suspended; + ma_proc pa_stream_flush; + ma_proc pa_stream_drain; + ma_proc pa_stream_is_corked; + ma_proc pa_stream_cork; + ma_proc pa_stream_trigger; + ma_proc pa_stream_begin_write; + ma_proc pa_stream_write; + ma_proc pa_stream_peek; + ma_proc pa_stream_drop; + ma_proc pa_stream_writable_size; + ma_proc pa_stream_readable_size; + + /*pa_mainloop**/ ma_ptr pMainLoop; + /*pa_context**/ ma_ptr pPulseContext; + char* pApplicationName; /* Set when the context is initialized. Used by devices for their local pa_context objects. */ + char* pServerName; /* Set when the context is initialized. Used by devices for their local pa_context objects. */ + } pulse; +#endif +#ifdef MA_SUPPORT_JACK + struct + { + ma_handle jackSO; + ma_proc jack_client_open; + ma_proc jack_client_close; + ma_proc jack_client_name_size; + ma_proc jack_set_process_callback; + ma_proc jack_set_buffer_size_callback; + ma_proc jack_on_shutdown; + ma_proc jack_get_sample_rate; + ma_proc jack_get_buffer_size; + ma_proc jack_get_ports; + ma_proc jack_activate; + ma_proc jack_deactivate; + ma_proc jack_connect; + ma_proc jack_port_register; + ma_proc jack_port_name; + ma_proc jack_port_get_buffer; + ma_proc jack_free; + + char* pClientName; + ma_bool32 tryStartServer; + } jack; +#endif +#ifdef MA_SUPPORT_COREAUDIO + struct + { + ma_handle hCoreFoundation; + ma_proc CFStringGetCString; + ma_proc CFRelease; + + ma_handle hCoreAudio; + ma_proc AudioObjectGetPropertyData; + ma_proc AudioObjectGetPropertyDataSize; + ma_proc AudioObjectSetPropertyData; + ma_proc AudioObjectAddPropertyListener; + ma_proc AudioObjectRemovePropertyListener; + + ma_handle hAudioUnit; /* Could possibly be set to AudioToolbox on later versions of macOS. */ + ma_proc AudioComponentFindNext; + ma_proc AudioComponentInstanceDispose; + ma_proc AudioComponentInstanceNew; + ma_proc AudioOutputUnitStart; + ma_proc AudioOutputUnitStop; + ma_proc AudioUnitAddPropertyListener; + ma_proc AudioUnitGetPropertyInfo; + ma_proc AudioUnitGetProperty; + ma_proc AudioUnitSetProperty; + ma_proc AudioUnitInitialize; + ma_proc AudioUnitRender; + + /*AudioComponent*/ ma_ptr component; + ma_bool32 noAudioSessionDeactivate; /* For tracking whether or not the iOS audio session should be explicitly deactivated. Set from the config in ma_context_init__coreaudio(). */ + } coreaudio; +#endif +#ifdef MA_SUPPORT_SNDIO + struct + { + ma_handle sndioSO; + ma_proc sio_open; + ma_proc sio_close; + ma_proc sio_setpar; + ma_proc sio_getpar; + ma_proc sio_getcap; + ma_proc sio_start; + ma_proc sio_stop; + ma_proc sio_read; + ma_proc sio_write; + ma_proc sio_onmove; + ma_proc sio_nfds; + ma_proc sio_pollfd; + ma_proc sio_revents; + ma_proc sio_eof; + ma_proc sio_setvol; + ma_proc sio_onvol; + ma_proc sio_initpar; + } sndio; +#endif +#ifdef MA_SUPPORT_AUDIO4 + struct + { + int _unused; + } audio4; +#endif +#ifdef MA_SUPPORT_OSS + struct + { + int versionMajor; + int versionMinor; + } oss; +#endif +#ifdef MA_SUPPORT_AAUDIO + struct + { + ma_handle hAAudio; /* libaaudio.so */ + ma_proc AAudio_createStreamBuilder; + ma_proc AAudioStreamBuilder_delete; + ma_proc AAudioStreamBuilder_setDeviceId; + ma_proc AAudioStreamBuilder_setDirection; + ma_proc AAudioStreamBuilder_setSharingMode; + ma_proc AAudioStreamBuilder_setFormat; + ma_proc AAudioStreamBuilder_setChannelCount; + ma_proc AAudioStreamBuilder_setSampleRate; + ma_proc AAudioStreamBuilder_setBufferCapacityInFrames; + ma_proc AAudioStreamBuilder_setFramesPerDataCallback; + ma_proc AAudioStreamBuilder_setDataCallback; + ma_proc AAudioStreamBuilder_setErrorCallback; + ma_proc AAudioStreamBuilder_setPerformanceMode; + ma_proc AAudioStreamBuilder_setUsage; + ma_proc AAudioStreamBuilder_setContentType; + ma_proc AAudioStreamBuilder_setInputPreset; + ma_proc AAudioStreamBuilder_setAllowedCapturePolicy; + ma_proc AAudioStreamBuilder_openStream; + ma_proc AAudioStream_close; + ma_proc AAudioStream_getState; + ma_proc AAudioStream_waitForStateChange; + ma_proc AAudioStream_getFormat; + ma_proc AAudioStream_getChannelCount; + ma_proc AAudioStream_getSampleRate; + ma_proc AAudioStream_getBufferCapacityInFrames; + ma_proc AAudioStream_getFramesPerDataCallback; + ma_proc AAudioStream_getFramesPerBurst; + ma_proc AAudioStream_requestStart; + ma_proc AAudioStream_requestStop; + ma_device_job_thread jobThread; /* For processing operations outside of the error callback, specifically device disconnections and rerouting. */ + } aaudio; +#endif +#ifdef MA_SUPPORT_OPENSL + struct + { + ma_handle libOpenSLES; + ma_handle SL_IID_ENGINE; + ma_handle SL_IID_AUDIOIODEVICECAPABILITIES; + ma_handle SL_IID_ANDROIDSIMPLEBUFFERQUEUE; + ma_handle SL_IID_RECORD; + ma_handle SL_IID_PLAY; + ma_handle SL_IID_OUTPUTMIX; + ma_handle SL_IID_ANDROIDCONFIGURATION; + ma_proc slCreateEngine; + } opensl; +#endif +#ifdef MA_SUPPORT_WEBAUDIO + struct + { + int _unused; + } webaudio; +#endif +#ifdef MA_SUPPORT_NULL + struct + { + int _unused; + } null_backend; +#endif + }; + + union + { +#if defined(MA_WIN32) + struct + { + /*HMODULE*/ ma_handle hOle32DLL; + ma_proc CoInitialize; + ma_proc CoInitializeEx; + ma_proc CoUninitialize; + ma_proc CoCreateInstance; + ma_proc CoTaskMemFree; + ma_proc PropVariantClear; + ma_proc StringFromGUID2; + + /*HMODULE*/ ma_handle hUser32DLL; + ma_proc GetForegroundWindow; + ma_proc GetDesktopWindow; + + /*HMODULE*/ ma_handle hAdvapi32DLL; + ma_proc RegOpenKeyExA; + ma_proc RegCloseKey; + ma_proc RegQueryValueExA; + + /*HRESULT*/ long CoInitializeResult; + } win32; +#endif +#ifdef MA_POSIX + struct + { + int _unused; + } posix; +#endif + int _unused; + }; +}; + +struct ma_device +{ + ma_context* pContext; + ma_device_type type; + ma_uint32 sampleRate; + ma_atomic_device_state state; /* The state of the device is variable and can change at any time on any thread. Must be used atomically. */ + ma_device_data_proc onData; /* Set once at initialization time and should not be changed after. */ + ma_device_notification_proc onNotification; /* Set once at initialization time and should not be changed after. */ + ma_stop_proc onStop; /* DEPRECATED. Use the notification callback instead. Set once at initialization time and should not be changed after. */ + void* pUserData; /* Application defined data. */ + ma_mutex startStopLock; + ma_event wakeupEvent; + ma_event startEvent; + ma_event stopEvent; + ma_thread thread; + ma_result workResult; /* This is set by the worker thread after it's finished doing a job. */ + ma_bool8 isOwnerOfContext; /* When set to true, uninitializing the device will also uninitialize the context. Set to true when NULL is passed into ma_device_init(). */ + ma_bool8 noPreSilencedOutputBuffer; + ma_bool8 noClip; + ma_bool8 noDisableDenormals; + ma_bool8 noFixedSizedCallback; + ma_atomic_float masterVolumeFactor; /* Linear 0..1. Can be read and written simultaneously by different threads. Must be used atomically. */ + ma_duplex_rb duplexRB; /* Intermediary buffer for duplex device on asynchronous backends. */ + struct + { + ma_resample_algorithm algorithm; + ma_resampling_backend_vtable* pBackendVTable; + void* pBackendUserData; + struct + { + ma_uint32 lpfOrder; + } linear; + } resampling; + struct + { + ma_device_id* pID; /* Set to NULL if using default ID, otherwise set to the address of "id". */ + ma_device_id id; /* If using an explicit device, will be set to a copy of the ID used for initialization. Otherwise cleared to 0. */ + char name[MA_MAX_DEVICE_NAME_LENGTH + 1]; /* Maybe temporary. Likely to be replaced with a query API. */ + ma_share_mode shareMode; /* Set to whatever was passed in when the device was initialized. */ + ma_format format; + ma_uint32 channels; + ma_channel channelMap[MA_MAX_CHANNELS]; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_channel internalChannelMap[MA_MAX_CHANNELS]; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + ma_channel_mix_mode channelMixMode; + ma_bool32 calculateLFEFromSpatialChannels; + ma_data_converter converter; + void* pIntermediaryBuffer; /* For implementing fixed sized buffer callbacks. Will be null if using variable sized callbacks. */ + ma_uint32 intermediaryBufferCap; + ma_uint32 intermediaryBufferLen; /* How many valid frames are sitting in the intermediary buffer. */ + void* pInputCache; /* In external format. Can be null. */ + ma_uint64 inputCacheCap; + ma_uint64 inputCacheConsumed; + ma_uint64 inputCacheRemaining; + } playback; + struct + { + ma_device_id* pID; /* Set to NULL if using default ID, otherwise set to the address of "id". */ + ma_device_id id; /* If using an explicit device, will be set to a copy of the ID used for initialization. Otherwise cleared to 0. */ + char name[MA_MAX_DEVICE_NAME_LENGTH + 1]; /* Maybe temporary. Likely to be replaced with a query API. */ + ma_share_mode shareMode; /* Set to whatever was passed in when the device was initialized. */ + ma_format format; + ma_uint32 channels; + ma_channel channelMap[MA_MAX_CHANNELS]; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_channel internalChannelMap[MA_MAX_CHANNELS]; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + ma_channel_mix_mode channelMixMode; + ma_bool32 calculateLFEFromSpatialChannels; + ma_data_converter converter; + void* pIntermediaryBuffer; /* For implementing fixed sized buffer callbacks. Will be null if using variable sized callbacks. */ + ma_uint32 intermediaryBufferCap; + ma_uint32 intermediaryBufferLen; /* How many valid frames are sitting in the intermediary buffer. */ + } capture; + + union + { +#ifdef MA_SUPPORT_WASAPI + struct + { + /*IAudioClient**/ ma_ptr pAudioClientPlayback; + /*IAudioClient**/ ma_ptr pAudioClientCapture; + /*IAudioRenderClient**/ ma_ptr pRenderClient; + /*IAudioCaptureClient**/ ma_ptr pCaptureClient; + /*IMMDeviceEnumerator**/ ma_ptr pDeviceEnumerator; /* Used for IMMNotificationClient notifications. Required for detecting default device changes. */ + ma_IMMNotificationClient notificationClient; + /*HANDLE*/ ma_handle hEventPlayback; /* Auto reset. Initialized to signaled. */ + /*HANDLE*/ ma_handle hEventCapture; /* Auto reset. Initialized to unsignaled. */ + ma_uint32 actualBufferSizeInFramesPlayback; /* Value from GetBufferSize(). internalPeriodSizeInFrames is not set to the _actual_ buffer size when low-latency shared mode is being used due to the way the IAudioClient3 API works. */ + ma_uint32 actualBufferSizeInFramesCapture; + ma_uint32 originalPeriodSizeInFrames; + ma_uint32 originalPeriodSizeInMilliseconds; + ma_uint32 originalPeriods; + ma_performance_profile originalPerformanceProfile; + ma_uint32 periodSizeInFramesPlayback; + ma_uint32 periodSizeInFramesCapture; + void* pMappedBufferCapture; + ma_uint32 mappedBufferCaptureCap; + ma_uint32 mappedBufferCaptureLen; + void* pMappedBufferPlayback; + ma_uint32 mappedBufferPlaybackCap; + ma_uint32 mappedBufferPlaybackLen; + ma_atomic_bool32 isStartedCapture; /* Can be read and written simultaneously across different threads. Must be used atomically, and must be 32-bit. */ + ma_atomic_bool32 isStartedPlayback; /* Can be read and written simultaneously across different threads. Must be used atomically, and must be 32-bit. */ + ma_uint32 loopbackProcessID; + ma_bool8 loopbackProcessExclude; + ma_bool8 noAutoConvertSRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM. */ + ma_bool8 noDefaultQualitySRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY. */ + ma_bool8 noHardwareOffloading; + ma_bool8 allowCaptureAutoStreamRouting; + ma_bool8 allowPlaybackAutoStreamRouting; + ma_bool8 isDetachedPlayback; + ma_bool8 isDetachedCapture; + ma_wasapi_usage usage; + void* hAvrtHandle; + ma_mutex rerouteLock; + } wasapi; +#endif +#ifdef MA_SUPPORT_DSOUND + struct + { + /*LPDIRECTSOUND*/ ma_ptr pPlayback; + /*LPDIRECTSOUNDBUFFER*/ ma_ptr pPlaybackPrimaryBuffer; + /*LPDIRECTSOUNDBUFFER*/ ma_ptr pPlaybackBuffer; + /*LPDIRECTSOUNDCAPTURE*/ ma_ptr pCapture; + /*LPDIRECTSOUNDCAPTUREBUFFER*/ ma_ptr pCaptureBuffer; + } dsound; +#endif +#ifdef MA_SUPPORT_WINMM + struct + { + /*HWAVEOUT*/ ma_handle hDevicePlayback; + /*HWAVEIN*/ ma_handle hDeviceCapture; + /*HANDLE*/ ma_handle hEventPlayback; + /*HANDLE*/ ma_handle hEventCapture; + ma_uint32 fragmentSizeInFrames; + ma_uint32 iNextHeaderPlayback; /* [0,periods). Used as an index into pWAVEHDRPlayback. */ + ma_uint32 iNextHeaderCapture; /* [0,periods). Used as an index into pWAVEHDRCapture. */ + ma_uint32 headerFramesConsumedPlayback; /* The number of PCM frames consumed in the buffer in pWAVEHEADER[iNextHeader]. */ + ma_uint32 headerFramesConsumedCapture; /* ^^^ */ + /*WAVEHDR**/ ma_uint8* pWAVEHDRPlayback; /* One instantiation for each period. */ + /*WAVEHDR**/ ma_uint8* pWAVEHDRCapture; /* One instantiation for each period. */ + ma_uint8* pIntermediaryBufferPlayback; + ma_uint8* pIntermediaryBufferCapture; + ma_uint8* _pHeapData; /* Used internally and is used for the heap allocated data for the intermediary buffer and the WAVEHDR structures. */ + } winmm; +#endif +#ifdef MA_SUPPORT_ALSA + struct + { + /*snd_pcm_t**/ ma_ptr pPCMPlayback; + /*snd_pcm_t**/ ma_ptr pPCMCapture; + /*struct pollfd**/ void* pPollDescriptorsPlayback; + /*struct pollfd**/ void* pPollDescriptorsCapture; + int pollDescriptorCountPlayback; + int pollDescriptorCountCapture; + int wakeupfdPlayback; /* eventfd for waking up from poll() when the playback device is stopped. */ + int wakeupfdCapture; /* eventfd for waking up from poll() when the capture device is stopped. */ + ma_bool8 isUsingMMapPlayback; + ma_bool8 isUsingMMapCapture; + } alsa; +#endif +#ifdef MA_SUPPORT_PULSEAUDIO + struct + { + /*pa_mainloop**/ ma_ptr pMainLoop; + /*pa_context**/ ma_ptr pPulseContext; + /*pa_stream**/ ma_ptr pStreamPlayback; + /*pa_stream**/ ma_ptr pStreamCapture; + } pulse; +#endif +#ifdef MA_SUPPORT_JACK + struct + { + /*jack_client_t**/ ma_ptr pClient; + /*jack_port_t**/ ma_ptr* ppPortsPlayback; + /*jack_port_t**/ ma_ptr* ppPortsCapture; + float* pIntermediaryBufferPlayback; /* Typed as a float because JACK is always floating point. */ + float* pIntermediaryBufferCapture; + } jack; +#endif +#ifdef MA_SUPPORT_COREAUDIO + struct + { + ma_uint32 deviceObjectIDPlayback; + ma_uint32 deviceObjectIDCapture; + /*AudioUnit*/ ma_ptr audioUnitPlayback; + /*AudioUnit*/ ma_ptr audioUnitCapture; + /*AudioBufferList**/ ma_ptr pAudioBufferList; /* Only used for input devices. */ + ma_uint32 audioBufferCapInFrames; /* Only used for input devices. The capacity in frames of each buffer in pAudioBufferList. */ + ma_event stopEvent; + ma_uint32 originalPeriodSizeInFrames; + ma_uint32 originalPeriodSizeInMilliseconds; + ma_uint32 originalPeriods; + ma_performance_profile originalPerformanceProfile; + ma_bool32 isDefaultPlaybackDevice; + ma_bool32 isDefaultCaptureDevice; + ma_bool32 isSwitchingPlaybackDevice; /* <-- Set to true when the default device has changed and miniaudio is in the process of switching. */ + ma_bool32 isSwitchingCaptureDevice; /* <-- Set to true when the default device has changed and miniaudio is in the process of switching. */ + void* pNotificationHandler; /* Only used on mobile platforms. Obj-C object for handling route changes. */ + } coreaudio; +#endif +#ifdef MA_SUPPORT_SNDIO + struct + { + ma_ptr handlePlayback; + ma_ptr handleCapture; + ma_bool32 isStartedPlayback; + ma_bool32 isStartedCapture; + } sndio; +#endif +#ifdef MA_SUPPORT_AUDIO4 + struct + { + int fdPlayback; + int fdCapture; + } audio4; +#endif +#ifdef MA_SUPPORT_OSS + struct + { + int fdPlayback; + int fdCapture; + } oss; +#endif +#ifdef MA_SUPPORT_AAUDIO + struct + { + /*AAudioStream**/ ma_ptr pStreamPlayback; + /*AAudioStream**/ ma_ptr pStreamCapture; + ma_mutex rerouteLock; + ma_aaudio_usage usage; + ma_aaudio_content_type contentType; + ma_aaudio_input_preset inputPreset; + ma_aaudio_allowed_capture_policy allowedCapturePolicy; + ma_bool32 noAutoStartAfterReroute; + } aaudio; +#endif +#ifdef MA_SUPPORT_OPENSL + struct + { + /*SLObjectItf*/ ma_ptr pOutputMixObj; + /*SLOutputMixItf*/ ma_ptr pOutputMix; + /*SLObjectItf*/ ma_ptr pAudioPlayerObj; + /*SLPlayItf*/ ma_ptr pAudioPlayer; + /*SLObjectItf*/ ma_ptr pAudioRecorderObj; + /*SLRecordItf*/ ma_ptr pAudioRecorder; + /*SLAndroidSimpleBufferQueueItf*/ ma_ptr pBufferQueuePlayback; + /*SLAndroidSimpleBufferQueueItf*/ ma_ptr pBufferQueueCapture; + ma_bool32 isDrainingCapture; + ma_bool32 isDrainingPlayback; + ma_uint32 currentBufferIndexPlayback; + ma_uint32 currentBufferIndexCapture; + ma_uint8* pBufferPlayback; /* This is malloc()'d and is used for storing audio data. Typed as ma_uint8 for easy offsetting. */ + ma_uint8* pBufferCapture; + } opensl; +#endif +#ifdef MA_SUPPORT_WEBAUDIO + struct + { + /* AudioWorklets path. */ + /* EMSCRIPTEN_WEBAUDIO_T */ int audioContext; + /* EMSCRIPTEN_WEBAUDIO_T */ int audioWorklet; + float* pIntermediaryBuffer; + void* pStackBuffer; + ma_result initResult; /* Set to MA_BUSY while initialization is in progress. */ + int deviceIndex; /* We store the device in a list on the JavaScript side. This is used to map our C object to the JS object. */ + } webaudio; +#endif +#ifdef MA_SUPPORT_NULL + struct + { + ma_thread deviceThread; + ma_event operationEvent; + ma_event operationCompletionEvent; + ma_semaphore operationSemaphore; + ma_uint32 operation; + ma_result operationResult; + ma_timer timer; + double priorRunTime; + ma_uint32 currentPeriodFramesRemainingPlayback; + ma_uint32 currentPeriodFramesRemainingCapture; + ma_uint64 lastProcessedFramePlayback; + ma_uint64 lastProcessedFrameCapture; + ma_atomic_bool32 isStarted; /* Read and written by multiple threads. Must be used atomically, and must be 32-bit for compiler compatibility. */ + } null_device; +#endif + }; +}; +#if defined(_MSC_VER) && !defined(__clang__) + #pragma warning(pop) +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) + #pragma GCC diagnostic pop /* For ISO C99 doesn't support unnamed structs/unions [-Wpedantic] */ +#endif + +/* +Initializes a `ma_context_config` object. + + +Return Value +------------ +A `ma_context_config` initialized to defaults. + + +Remarks +------- +You must always use this to initialize the default state of the `ma_context_config` object. Not using this will result in your program breaking when miniaudio +is updated and new members are added to `ma_context_config`. It also sets logical defaults. + +You can override members of the returned object by changing it's members directly. + + +See Also +-------- +ma_context_init() +*/ +MA_API ma_context_config ma_context_config_init(void); + +/* +Initializes a context. + +The context is used for selecting and initializing an appropriate backend and to represent the backend at a more global level than that of an individual +device. There is one context to many devices, and a device is created from a context. A context is required to enumerate devices. + + +Parameters +---------- +backends (in, optional) + A list of backends to try initializing, in priority order. Can be NULL, in which case it uses default priority order. + +backendCount (in, optional) + The number of items in `backend`. Ignored if `backend` is NULL. + +pConfig (in, optional) + The context configuration. + +pContext (in) + A pointer to the context object being initialized. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Unsafe. Do not call this function across multiple threads as some backends read and write to global state. + + +Remarks +------- +When `backends` is NULL, the default priority order will be used. Below is a list of backends in priority order: + + |-------------|-----------------------|--------------------------------------------------------| + | Name | Enum Name | Supported Operating Systems | + |-------------|-----------------------|--------------------------------------------------------| + | WASAPI | ma_backend_wasapi | Windows Vista+ | + | DirectSound | ma_backend_dsound | Windows XP+ | + | WinMM | ma_backend_winmm | Windows XP+ (may work on older versions, but untested) | + | Core Audio | ma_backend_coreaudio | macOS, iOS | + | ALSA | ma_backend_alsa | Linux | + | PulseAudio | ma_backend_pulseaudio | Cross Platform (disabled on Windows, BSD and Android) | + | JACK | ma_backend_jack | Cross Platform (disabled on BSD and Android) | + | sndio | ma_backend_sndio | OpenBSD | + | audio(4) | ma_backend_audio4 | NetBSD, OpenBSD | + | OSS | ma_backend_oss | FreeBSD | + | AAudio | ma_backend_aaudio | Android 8+ | + | OpenSL|ES | ma_backend_opensl | Android (API level 16+) | + | Web Audio | ma_backend_webaudio | Web (via Emscripten) | + | Null | ma_backend_null | Cross Platform (not used on Web) | + |-------------|-----------------------|--------------------------------------------------------| + +The context can be configured via the `pConfig` argument. The config object is initialized with `ma_context_config_init()`. Individual configuration settings +can then be set directly on the structure. Below are the members of the `ma_context_config` object. + + pLog + A pointer to the `ma_log` to post log messages to. Can be NULL if the application does not + require logging. See the `ma_log` API for details on how to use the logging system. + + threadPriority + The desired priority to use for the audio thread. Allowable values include the following: + + |--------------------------------------| + | Thread Priority | + |--------------------------------------| + | ma_thread_priority_idle | + | ma_thread_priority_lowest | + | ma_thread_priority_low | + | ma_thread_priority_normal | + | ma_thread_priority_high | + | ma_thread_priority_highest (default) | + | ma_thread_priority_realtime | + | ma_thread_priority_default | + |--------------------------------------| + + threadStackSize + The desired size of the stack for the audio thread. Defaults to the operating system's default. + + pUserData + A pointer to application-defined data. This can be accessed from the context object directly such as `context.pUserData`. + + allocationCallbacks + Structure containing custom allocation callbacks. Leaving this at defaults will cause it to use MA_MALLOC, MA_REALLOC and MA_FREE. These allocation + callbacks will be used for anything tied to the context, including devices. + + alsa.useVerboseDeviceEnumeration + ALSA will typically enumerate many different devices which can be intrusive and not user-friendly. To combat this, miniaudio will enumerate only unique + card/device pairs by default. The problem with this is that you lose a bit of flexibility and control. Setting alsa.useVerboseDeviceEnumeration makes + it so the ALSA backend includes all devices. Defaults to false. + + pulse.pApplicationName + PulseAudio only. The application name to use when initializing the PulseAudio context with `pa_context_new()`. + + pulse.pServerName + PulseAudio only. The name of the server to connect to with `pa_context_connect()`. + + pulse.tryAutoSpawn + PulseAudio only. Whether or not to try automatically starting the PulseAudio daemon. Defaults to false. If you set this to true, keep in mind that + miniaudio uses a trial and error method to find the most appropriate backend, and this will result in the PulseAudio daemon starting which may be + intrusive for the end user. + + coreaudio.sessionCategory + iOS only. The session category to use for the shared AudioSession instance. Below is a list of allowable values and their Core Audio equivalents. + + |-----------------------------------------|-------------------------------------| + | miniaudio Token | Core Audio Token | + |-----------------------------------------|-------------------------------------| + | ma_ios_session_category_ambient | AVAudioSessionCategoryAmbient | + | ma_ios_session_category_solo_ambient | AVAudioSessionCategorySoloAmbient | + | ma_ios_session_category_playback | AVAudioSessionCategoryPlayback | + | ma_ios_session_category_record | AVAudioSessionCategoryRecord | + | ma_ios_session_category_play_and_record | AVAudioSessionCategoryPlayAndRecord | + | ma_ios_session_category_multi_route | AVAudioSessionCategoryMultiRoute | + | ma_ios_session_category_none | AVAudioSessionCategoryAmbient | + | ma_ios_session_category_default | AVAudioSessionCategoryAmbient | + |-----------------------------------------|-------------------------------------| + + coreaudio.sessionCategoryOptions + iOS only. Session category options to use with the shared AudioSession instance. Below is a list of allowable values and their Core Audio equivalents. + + |---------------------------------------------------------------------------|------------------------------------------------------------------| + | miniaudio Token | Core Audio Token | + |---------------------------------------------------------------------------|------------------------------------------------------------------| + | ma_ios_session_category_option_mix_with_others | AVAudioSessionCategoryOptionMixWithOthers | + | ma_ios_session_category_option_duck_others | AVAudioSessionCategoryOptionDuckOthers | + | ma_ios_session_category_option_allow_bluetooth | AVAudioSessionCategoryOptionAllowBluetooth | + | ma_ios_session_category_option_default_to_speaker | AVAudioSessionCategoryOptionDefaultToSpeaker | + | ma_ios_session_category_option_interrupt_spoken_audio_and_mix_with_others | AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers | + | ma_ios_session_category_option_allow_bluetooth_a2dp | AVAudioSessionCategoryOptionAllowBluetoothA2DP | + | ma_ios_session_category_option_allow_air_play | AVAudioSessionCategoryOptionAllowAirPlay | + |---------------------------------------------------------------------------|------------------------------------------------------------------| + + coreaudio.noAudioSessionActivate + iOS only. When set to true, does not perform an explicit [[AVAudioSession sharedInstace] setActive:true] on initialization. + + coreaudio.noAudioSessionDeactivate + iOS only. When set to true, does not perform an explicit [[AVAudioSession sharedInstace] setActive:false] on uninitialization. + + jack.pClientName + The name of the client to pass to `jack_client_open()`. + + jack.tryStartServer + Whether or not to try auto-starting the JACK server. Defaults to false. + + +It is recommended that only a single context is active at any given time because it's a bulky data structure which performs run-time linking for the +relevant backends every time it's initialized. + +The location of the context cannot change throughout it's lifetime. Consider allocating the `ma_context` object with `malloc()` if this is an issue. The +reason for this is that a pointer to the context is stored in the `ma_device` structure. + + +Example 1 - Default Initialization +---------------------------------- +The example below shows how to initialize the context using the default configuration. + +```c +ma_context context; +ma_result result = ma_context_init(NULL, 0, NULL, &context); +if (result != MA_SUCCESS) { + // Error. +} +``` + + +Example 2 - Custom Configuration +-------------------------------- +The example below shows how to initialize the context using custom backend priorities and a custom configuration. In this hypothetical example, the program +wants to prioritize ALSA over PulseAudio on Linux. They also want to avoid using the WinMM backend on Windows because it's latency is too high. They also +want an error to be returned if no valid backend is available which they achieve by excluding the Null backend. + +For the configuration, the program wants to capture any log messages so they can, for example, route it to a log file and user interface. + +```c +ma_backend backends[] = { + ma_backend_alsa, + ma_backend_pulseaudio, + ma_backend_wasapi, + ma_backend_dsound +}; + +ma_log log; +ma_log_init(&log); +ma_log_register_callback(&log, ma_log_callback_init(my_log_callbac, pMyLogUserData)); + +ma_context_config config = ma_context_config_init(); +config.pLog = &log; // Specify a custom log object in the config so any logs that are posted from ma_context_init() are captured. + +ma_context context; +ma_result result = ma_context_init(backends, sizeof(backends)/sizeof(backends[0]), &config, &context); +if (result != MA_SUCCESS) { + // Error. + if (result == MA_NO_BACKEND) { + // Couldn't find an appropriate backend. + } +} + +// You could also attach a log callback post-initialization: +ma_log_register_callback(ma_context_get_log(&context), ma_log_callback_init(my_log_callback, pMyLogUserData)); +``` + + +See Also +-------- +ma_context_config_init() +ma_context_uninit() +*/ +MA_API ma_result ma_context_init(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pConfig, ma_context* pContext); + +/* +Uninitializes a context. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Unsafe. Do not call this function across multiple threads as some backends read and write to global state. + + +Remarks +------- +Results are undefined if you call this while any device created by this context is still active. + + +See Also +-------- +ma_context_init() +*/ +MA_API ma_result ma_context_uninit(ma_context* pContext); + +/* +Retrieves the size of the ma_context object. + +This is mainly for the purpose of bindings to know how much memory to allocate. +*/ +MA_API size_t ma_context_sizeof(void); + +/* +Retrieves a pointer to the log object associated with this context. + + +Remarks +------- +Pass the returned pointer to `ma_log_post()`, `ma_log_postv()` or `ma_log_postf()` to post a log +message. + +You can attach your own logging callback to the log with `ma_log_register_callback()` + + +Return Value +------------ +A pointer to the `ma_log` object that the context uses to post log messages. If some error occurs, +NULL will be returned. +*/ +MA_API ma_log* ma_context_get_log(ma_context* pContext); + +/* +Enumerates over every device (both playback and capture). + +This is a lower-level enumeration function to the easier to use `ma_context_get_devices()`. Use `ma_context_enumerate_devices()` if you would rather not incur +an internal heap allocation, or it simply suits your code better. + +Note that this only retrieves the ID and name/description of the device. The reason for only retrieving basic information is that it would otherwise require +opening the backend device in order to probe it for more detailed information which can be inefficient. Consider using `ma_context_get_device_info()` for this, +but don't call it from within the enumeration callback. + +Returning false from the callback will stop enumeration. Returning true will continue enumeration. + + +Parameters +---------- +pContext (in) + A pointer to the context performing the enumeration. + +callback (in) + The callback to fire for each enumerated device. + +pUserData (in) + A pointer to application-defined data passed to the callback. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Safe. This is guarded using a simple mutex lock. + + +Remarks +------- +Do _not_ assume the first enumerated device of a given type is the default device. + +Some backends and platforms may only support default playback and capture devices. + +In general, you should not do anything complicated from within the callback. In particular, do not try initializing a device from within the callback. Also, +do not try to call `ma_context_get_device_info()` from within the callback. + +Consider using `ma_context_get_devices()` for a simpler and safer API, albeit at the expense of an internal heap allocation. + + +Example 1 - Simple Enumeration +------------------------------ +ma_bool32 ma_device_enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData) +{ + printf("Device Name: %s\n", pInfo->name); + return MA_TRUE; +} + +ma_result result = ma_context_enumerate_devices(&context, my_device_enum_callback, pMyUserData); +if (result != MA_SUCCESS) { + // Error. +} + + +See Also +-------- +ma_context_get_devices() +*/ +MA_API ma_result ma_context_enumerate_devices(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData); + +/* +Retrieves basic information about every active playback and/or capture device. + +This function will allocate memory internally for the device lists and return a pointer to them through the `ppPlaybackDeviceInfos` and `ppCaptureDeviceInfos` +parameters. If you do not want to incur the overhead of these allocations consider using `ma_context_enumerate_devices()` which will instead use a callback. + +Note that this only retrieves the ID and name/description of the device. The reason for only retrieving basic information is that it would otherwise require +opening the backend device in order to probe it for more detailed information which can be inefficient. Consider using `ma_context_get_device_info()` for this, +but don't call it from within the enumeration callback. + + +Parameters +---------- +pContext (in) + A pointer to the context performing the enumeration. + +ppPlaybackDeviceInfos (out) + A pointer to a pointer that will receive the address of a buffer containing the list of `ma_device_info` structures for playback devices. + +pPlaybackDeviceCount (out) + A pointer to an unsigned integer that will receive the number of playback devices. + +ppCaptureDeviceInfos (out) + A pointer to a pointer that will receive the address of a buffer containing the list of `ma_device_info` structures for capture devices. + +pCaptureDeviceCount (out) + A pointer to an unsigned integer that will receive the number of capture devices. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Unsafe. Since each call to this function invalidates the pointers from the previous call, you should not be calling this simultaneously across multiple +threads. Instead, you need to make a copy of the returned data with your own higher level synchronization. + + +Remarks +------- +It is _not_ safe to assume the first device in the list is the default device. + +You can pass in NULL for the playback or capture lists in which case they'll be ignored. + +The returned pointers will become invalid upon the next call this this function, or when the context is uninitialized. Do not free the returned pointers. + + +See Also +-------- +ma_context_enumerate_devices() +*/ +MA_API ma_result ma_context_get_devices(ma_context* pContext, ma_device_info** ppPlaybackDeviceInfos, ma_uint32* pPlaybackDeviceCount, ma_device_info** ppCaptureDeviceInfos, ma_uint32* pCaptureDeviceCount); + +/* +Retrieves information about a device of the given type, with the specified ID and share mode. + + +Parameters +---------- +pContext (in) + A pointer to the context performing the query. + +deviceType (in) + The type of the device being queried. Must be either `ma_device_type_playback` or `ma_device_type_capture`. + +pDeviceID (in) + The ID of the device being queried. + +pDeviceInfo (out) + A pointer to the `ma_device_info` structure that will receive the device information. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Safe. This is guarded using a simple mutex lock. + + +Remarks +------- +Do _not_ call this from within the `ma_context_enumerate_devices()` callback. + +It's possible for a device to have different information and capabilities depending on whether or not it's opened in shared or exclusive mode. For example, in +shared mode, WASAPI always uses floating point samples for mixing, but in exclusive mode it can be anything. Therefore, this function allows you to specify +which share mode you want information for. Note that not all backends and devices support shared or exclusive mode, in which case this function will fail if +the requested share mode is unsupported. + +This leaves pDeviceInfo unmodified in the result of an error. +*/ +MA_API ma_result ma_context_get_device_info(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo); + +/* +Determines if the given context supports loopback mode. + + +Parameters +---------- +pContext (in) + A pointer to the context getting queried. + + +Return Value +------------ +MA_TRUE if the context supports loopback mode; MA_FALSE otherwise. +*/ +MA_API ma_bool32 ma_context_is_loopback_supported(ma_context* pContext); + + + +/* +Initializes a device config with default settings. + + +Parameters +---------- +deviceType (in) + The type of the device this config is being initialized for. This must set to one of the following: + + |-------------------------| + | Device Type | + |-------------------------| + | ma_device_type_playback | + | ma_device_type_capture | + | ma_device_type_duplex | + | ma_device_type_loopback | + |-------------------------| + + +Return Value +------------ +A new device config object with default settings. You will typically want to adjust the config after this function returns. See remarks. + + +Thread Safety +------------- +Safe. + + +Callback Safety +--------------- +Safe, but don't try initializing a device in a callback. + + +Remarks +------- +The returned config will be initialized to defaults. You will normally want to customize a few variables before initializing the device. See Example 1 for a +typical configuration which sets the sample format, channel count, sample rate, data callback and user data. These are usually things you will want to change +before initializing the device. + +See `ma_device_init()` for details on specific configuration options. + + +Example 1 - Simple Configuration +-------------------------------- +The example below is what a program will typically want to configure for each device at a minimum. Notice how `ma_device_config_init()` is called first, and +then the returned object is modified directly. This is important because it ensures that your program continues to work as new configuration options are added +to the `ma_device_config` structure. + +```c +ma_device_config config = ma_device_config_init(ma_device_type_playback); +config.playback.format = ma_format_f32; +config.playback.channels = 2; +config.sampleRate = 48000; +config.dataCallback = ma_data_callback; +config.pUserData = pMyUserData; +``` + + +See Also +-------- +ma_device_init() +ma_device_init_ex() +*/ +MA_API ma_device_config ma_device_config_init(ma_device_type deviceType); + + +/* +Initializes a device. + +A device represents a physical audio device. The idea is you send or receive audio data from the device to either play it back through a speaker, or capture it +from a microphone. Whether or not you should send or receive data from the device (or both) depends on the type of device you are initializing which can be +playback, capture, full-duplex or loopback. (Note that loopback mode is only supported on select backends.) Sending and receiving audio data to and from the +device is done via a callback which is fired by miniaudio at periodic time intervals. + +The frequency at which data is delivered to and from a device depends on the size of its period. The size of the period can be defined in terms of PCM frames +or milliseconds, whichever is more convenient. Generally speaking, the smaller the period, the lower the latency at the expense of higher CPU usage and +increased risk of glitching due to the more frequent and granular data deliver intervals. The size of a period will depend on your requirements, but +miniaudio's defaults should work fine for most scenarios. If you're building a game you should leave this fairly small, whereas if you're building a simple +media player you can make it larger. Note that the period size you request is actually just a hint - miniaudio will tell the backend what you want, but the +backend is ultimately responsible for what it gives you. You cannot assume you will get exactly what you ask for. + +When delivering data to and from a device you need to make sure it's in the correct format which you can set through the device configuration. You just set the +format that you want to use and miniaudio will perform all of the necessary conversion for you internally. When delivering data to and from the callback you +can assume the format is the same as what you requested when you initialized the device. See Remarks for more details on miniaudio's data conversion pipeline. + + +Parameters +---------- +pContext (in, optional) + A pointer to the context that owns the device. This can be null, in which case it creates a default context internally. + +pConfig (in) + A pointer to the device configuration. Cannot be null. See remarks for details. + +pDevice (out) + A pointer to the device object being initialized. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Unsafe. It is not safe to call this function simultaneously for different devices because some backends depend on and mutate global state. The same applies to +calling this at the same time as `ma_device_uninit()`. + + +Callback Safety +--------------- +Unsafe. It is not safe to call this inside any callback. + + +Remarks +------- +Setting `pContext` to NULL will result in miniaudio creating a default context internally and is equivalent to passing in a context initialized like so: + + ```c + ma_context_init(NULL, 0, NULL, &context); + ``` + +Do not set `pContext` to NULL if you are needing to open multiple devices. You can, however, use NULL when initializing the first device, and then use +device.pContext for the initialization of other devices. + +The device can be configured via the `pConfig` argument. The config object is initialized with `ma_device_config_init()`. Individual configuration settings can +then be set directly on the structure. Below are the members of the `ma_device_config` object. + + deviceType + Must be `ma_device_type_playback`, `ma_device_type_capture`, `ma_device_type_duplex` of `ma_device_type_loopback`. + + sampleRate + The sample rate, in hertz. The most common sample rates are 48000 and 44100. Setting this to 0 will use the device's native sample rate. + + periodSizeInFrames + The desired size of a period in PCM frames. If this is 0, `periodSizeInMilliseconds` will be used instead. If both are 0 the default buffer size will + be used depending on the selected performance profile. This value affects latency. See below for details. + + periodSizeInMilliseconds + The desired size of a period in milliseconds. If this is 0, `periodSizeInFrames` will be used instead. If both are 0 the default buffer size will be + used depending on the selected performance profile. The value affects latency. See below for details. + + periods + The number of periods making up the device's entire buffer. The total buffer size is `periodSizeInFrames` or `periodSizeInMilliseconds` multiplied by + this value. This is just a hint as backends will be the ones who ultimately decide how your periods will be configured. + + performanceProfile + A hint to miniaudio as to the performance requirements of your program. Can be either `ma_performance_profile_low_latency` (default) or + `ma_performance_profile_conservative`. This mainly affects the size of default buffers and can usually be left at its default value. + + noPreSilencedOutputBuffer + When set to true, the contents of the output buffer passed into the data callback will be left undefined. When set to false (default), the contents of + the output buffer will be cleared the zero. You can use this to avoid the overhead of zeroing out the buffer if you can guarantee that your data + callback will write to every sample in the output buffer, or if you are doing your own clearing. + + noClip + When set to true, the contents of the output buffer are left alone after returning and it will be left up to the backend itself to decide whether or + not to clip. When set to false (default), the contents of the output buffer passed into the data callback will be clipped after returning. This only + applies when the playback sample format is f32. + + noDisableDenormals + By default, miniaudio will disable denormals when the data callback is called. Setting this to true will prevent the disabling of denormals. + + noFixedSizedCallback + Allows miniaudio to fire the data callback with any frame count. When this is set to false (the default), the data callback will be fired with a + consistent frame count as specified by `periodSizeInFrames` or `periodSizeInMilliseconds`. When set to true, miniaudio will fire the callback with + whatever the backend requests, which could be anything. + + dataCallback + The callback to fire whenever data is ready to be delivered to or from the device. + + notificationCallback + The callback to fire when something has changed with the device, such as whether or not it has been started or stopped. + + pUserData + The user data pointer to use with the device. You can access this directly from the device object like `device.pUserData`. + + resampling.algorithm + The resampling algorithm to use when miniaudio needs to perform resampling between the rate specified by `sampleRate` and the device's native rate. The + default value is `ma_resample_algorithm_linear`, and the quality can be configured with `resampling.linear.lpfOrder`. + + resampling.pBackendVTable + A pointer to an optional vtable that can be used for plugging in a custom resampler. + + resampling.pBackendUserData + A pointer that will passed to callbacks in pBackendVTable. + + resampling.linear.lpfOrder + The linear resampler applies a low-pass filter as part of its processing for anti-aliasing. This setting controls the order of the filter. The higher + the value, the better the quality, in general. Setting this to 0 will disable low-pass filtering altogether. The maximum value is + `MA_MAX_FILTER_ORDER`. The default value is `min(4, MA_MAX_FILTER_ORDER)`. + + playback.pDeviceID + A pointer to a `ma_device_id` structure containing the ID of the playback device to initialize. Setting this NULL (default) will use the system's + default playback device. Retrieve the device ID from the `ma_device_info` structure, which can be retrieved using device enumeration. + + playback.format + The sample format to use for playback. When set to `ma_format_unknown` the device's native format will be used. This can be retrieved after + initialization from the device object directly with `device.playback.format`. + + playback.channels + The number of channels to use for playback. When set to 0 the device's native channel count will be used. This can be retrieved after initialization + from the device object directly with `device.playback.channels`. + + playback.pChannelMap + The channel map to use for playback. When left empty, the device's native channel map will be used. This can be retrieved after initialization from the + device object direct with `device.playback.pChannelMap`. When set, the buffer should contain `channels` items. + + playback.shareMode + The preferred share mode to use for playback. Can be either `ma_share_mode_shared` (default) or `ma_share_mode_exclusive`. Note that if you specify + exclusive mode, but it's not supported by the backend, initialization will fail. You can then fall back to shared mode if desired by changing this to + ma_share_mode_shared and reinitializing. + + capture.pDeviceID + A pointer to a `ma_device_id` structure containing the ID of the capture device to initialize. Setting this NULL (default) will use the system's + default capture device. Retrieve the device ID from the `ma_device_info` structure, which can be retrieved using device enumeration. + + capture.format + The sample format to use for capture. When set to `ma_format_unknown` the device's native format will be used. This can be retrieved after + initialization from the device object directly with `device.capture.format`. + + capture.channels + The number of channels to use for capture. When set to 0 the device's native channel count will be used. This can be retrieved after initialization + from the device object directly with `device.capture.channels`. + + capture.pChannelMap + The channel map to use for capture. When left empty, the device's native channel map will be used. This can be retrieved after initialization from the + device object direct with `device.capture.pChannelMap`. When set, the buffer should contain `channels` items. + + capture.shareMode + The preferred share mode to use for capture. Can be either `ma_share_mode_shared` (default) or `ma_share_mode_exclusive`. Note that if you specify + exclusive mode, but it's not supported by the backend, initialization will fail. You can then fall back to shared mode if desired by changing this to + ma_share_mode_shared and reinitializing. + + wasapi.noAutoConvertSRC + WASAPI only. When set to true, disables WASAPI's automatic resampling and forces the use of miniaudio's resampler. Defaults to false. + + wasapi.noDefaultQualitySRC + WASAPI only. Only used when `wasapi.noAutoConvertSRC` is set to false. When set to true, disables the use of `AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY`. + You should usually leave this set to false, which is the default. + + wasapi.noAutoStreamRouting + WASAPI only. When set to true, disables automatic stream routing on the WASAPI backend. Defaults to false. + + wasapi.noHardwareOffloading + WASAPI only. When set to true, disables the use of WASAPI's hardware offloading feature. Defaults to false. + + alsa.noMMap + ALSA only. When set to true, disables MMap mode. Defaults to false. + + alsa.noAutoFormat + ALSA only. When set to true, disables ALSA's automatic format conversion by including the SND_PCM_NO_AUTO_FORMAT flag. Defaults to false. + + alsa.noAutoChannels + ALSA only. When set to true, disables ALSA's automatic channel conversion by including the SND_PCM_NO_AUTO_CHANNELS flag. Defaults to false. + + alsa.noAutoResample + ALSA only. When set to true, disables ALSA's automatic resampling by including the SND_PCM_NO_AUTO_RESAMPLE flag. Defaults to false. + + pulse.pStreamNamePlayback + PulseAudio only. Sets the stream name for playback. + + pulse.pStreamNameCapture + PulseAudio only. Sets the stream name for capture. + + pulse.channelMap + PulseAudio only. Sets the channel map that is requested from PulseAudio. See MA_PA_CHANNEL_MAP_* constants. Defaults to MA_PA_CHANNEL_MAP_AIFF. + + coreaudio.allowNominalSampleRateChange + Core Audio only. Desktop only. When enabled, allows the sample rate of the device to be changed at the operating system level. This + is disabled by default in order to prevent intrusive changes to the user's system. This is useful if you want to use a sample rate + that is known to be natively supported by the hardware thereby avoiding the cost of resampling. When set to true, miniaudio will + find the closest match between the sample rate requested in the device config and the sample rates natively supported by the + hardware. When set to false, the sample rate currently set by the operating system will always be used. + + opensl.streamType + OpenSL only. Explicitly sets the stream type. If left unset (`ma_opensl_stream_type_default`), the + stream type will be left unset. Think of this as the type of audio you're playing. + + opensl.recordingPreset + OpenSL only. Explicitly sets the type of recording your program will be doing. When left + unset, the recording preset will be left unchanged. + + aaudio.usage + AAudio only. Explicitly sets the nature of the audio the program will be consuming. When + left unset, the usage will be left unchanged. + + aaudio.contentType + AAudio only. Sets the content type. When left unset, the content type will be left unchanged. + + aaudio.inputPreset + AAudio only. Explicitly sets the type of recording your program will be doing. When left + unset, the input preset will be left unchanged. + + aaudio.noAutoStartAfterReroute + AAudio only. Controls whether or not the device should be automatically restarted after a + stream reroute. When set to false (default) the device will be restarted automatically; + otherwise the device will be stopped. + + +Once initialized, the device's config is immutable. If you need to change the config you will need to initialize a new device. + +After initializing the device it will be in a stopped state. To start it, use `ma_device_start()`. + +If both `periodSizeInFrames` and `periodSizeInMilliseconds` are set to zero, it will default to `MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY` or +`MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE`, depending on whether or not `performanceProfile` is set to `ma_performance_profile_low_latency` or +`ma_performance_profile_conservative`. + +If you request exclusive mode and the backend does not support it an error will be returned. For robustness, you may want to first try initializing the device +in exclusive mode, and then fall back to shared mode if required. Alternatively you can just request shared mode (the default if you leave it unset in the +config) which is the most reliable option. Some backends do not have a practical way of choosing whether or not the device should be exclusive or not (ALSA, +for example) in which case it just acts as a hint. Unless you have special requirements you should try avoiding exclusive mode as it's intrusive to the user. +Starting with Windows 10, miniaudio will use low-latency shared mode where possible which may make exclusive mode unnecessary. + +When sending or receiving data to/from a device, miniaudio will internally perform a format conversion to convert between the format specified by the config +and the format used internally by the backend. If you pass in 0 for the sample format, channel count, sample rate _and_ channel map, data transmission will run +on an optimized pass-through fast path. You can retrieve the format, channel count and sample rate by inspecting the `playback/capture.format`, +`playback/capture.channels` and `sampleRate` members of the device object. + +When compiling for UWP you must ensure you call this function on the main UI thread because the operating system may need to present the user with a message +asking for permissions. Please refer to the official documentation for ActivateAudioInterfaceAsync() for more information. + +ALSA Specific: When initializing the default device, requesting shared mode will try using the "dmix" device for playback and the "dsnoop" device for capture. +If these fail it will try falling back to the "hw" device. + + +Example 1 - Simple Initialization +--------------------------------- +This example shows how to initialize a simple playback device using a standard configuration. If you are just needing to do simple playback from the default +playback device this is usually all you need. + +```c +ma_device_config config = ma_device_config_init(ma_device_type_playback); +config.playback.format = ma_format_f32; +config.playback.channels = 2; +config.sampleRate = 48000; +config.dataCallback = ma_data_callback; +config.pMyUserData = pMyUserData; + +ma_device device; +ma_result result = ma_device_init(NULL, &config, &device); +if (result != MA_SUCCESS) { + // Error +} +``` + + +Example 2 - Advanced Initialization +----------------------------------- +This example shows how you might do some more advanced initialization. In this hypothetical example we want to control the latency by setting the buffer size +and period count. We also want to allow the user to be able to choose which device to output from which means we need a context so we can perform device +enumeration. + +```c +ma_context context; +ma_result result = ma_context_init(NULL, 0, NULL, &context); +if (result != MA_SUCCESS) { + // Error +} + +ma_device_info* pPlaybackDeviceInfos; +ma_uint32 playbackDeviceCount; +result = ma_context_get_devices(&context, &pPlaybackDeviceInfos, &playbackDeviceCount, NULL, NULL); +if (result != MA_SUCCESS) { + // Error +} + +// ... choose a device from pPlaybackDeviceInfos ... + +ma_device_config config = ma_device_config_init(ma_device_type_playback); +config.playback.pDeviceID = pMyChosenDeviceID; // <-- Get this from the `id` member of one of the `ma_device_info` objects returned by ma_context_get_devices(). +config.playback.format = ma_format_f32; +config.playback.channels = 2; +config.sampleRate = 48000; +config.dataCallback = ma_data_callback; +config.pUserData = pMyUserData; +config.periodSizeInMilliseconds = 10; +config.periods = 3; + +ma_device device; +result = ma_device_init(&context, &config, &device); +if (result != MA_SUCCESS) { + // Error +} +``` + + +See Also +-------- +ma_device_config_init() +ma_device_uninit() +ma_device_start() +ma_context_init() +ma_context_get_devices() +ma_context_enumerate_devices() +*/ +MA_API ma_result ma_device_init(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice); + +/* +Initializes a device without a context, with extra parameters for controlling the configuration of the internal self-managed context. + +This is the same as `ma_device_init()`, only instead of a context being passed in, the parameters from `ma_context_init()` are passed in instead. This function +allows you to configure the internally created context. + + +Parameters +---------- +backends (in, optional) + A list of backends to try initializing, in priority order. Can be NULL, in which case it uses default priority order. + +backendCount (in, optional) + The number of items in `backend`. Ignored if `backend` is NULL. + +pContextConfig (in, optional) + The context configuration. + +pConfig (in) + A pointer to the device configuration. Cannot be null. See remarks for details. + +pDevice (out) + A pointer to the device object being initialized. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Unsafe. It is not safe to call this function simultaneously for different devices because some backends depend on and mutate global state. The same applies to +calling this at the same time as `ma_device_uninit()`. + + +Callback Safety +--------------- +Unsafe. It is not safe to call this inside any callback. + + +Remarks +------- +You only need to use this function if you want to configure the context differently to its defaults. You should never use this function if you want to manage +your own context. + +See the documentation for `ma_context_init()` for information on the different context configuration options. + + +See Also +-------- +ma_device_init() +ma_device_uninit() +ma_device_config_init() +ma_context_init() +*/ +MA_API ma_result ma_device_init_ex(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pContextConfig, const ma_device_config* pConfig, ma_device* pDevice); + +/* +Uninitializes a device. + +This will explicitly stop the device. You do not need to call `ma_device_stop()` beforehand, but it's harmless if you do. + + +Parameters +---------- +pDevice (in) + A pointer to the device to stop. + + +Return Value +------------ +Nothing + + +Thread Safety +------------- +Unsafe. As soon as this API is called the device should be considered undefined. + + +Callback Safety +--------------- +Unsafe. It is not safe to call this inside any callback. Doing this will result in a deadlock. + + +See Also +-------- +ma_device_init() +ma_device_stop() +*/ +MA_API void ma_device_uninit(ma_device* pDevice); + + +/* +Retrieves a pointer to the context that owns the given device. +*/ +MA_API ma_context* ma_device_get_context(ma_device* pDevice); + +/* +Helper function for retrieving the log object associated with the context that owns this device. +*/ +MA_API ma_log* ma_device_get_log(ma_device* pDevice); + + +/* +Retrieves information about the device. + + +Parameters +---------- +pDevice (in) + A pointer to the device whose information is being retrieved. + +type (in) + The device type. This parameter is required for duplex devices. When retrieving device + information, you are doing so for an individual playback or capture device. + +pDeviceInfo (out) + A pointer to the `ma_device_info` that will receive the device information. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Unsafe. This should be considered unsafe because it may be calling into the backend which may or +may not be safe. + + +Callback Safety +--------------- +Unsafe. You should avoid calling this in the data callback because it may call into the backend +which may or may not be safe. +*/ +MA_API ma_result ma_device_get_info(ma_device* pDevice, ma_device_type type, ma_device_info* pDeviceInfo); + + +/* +Retrieves the name of the device. + + +Parameters +---------- +pDevice (in) + A pointer to the device whose information is being retrieved. + +type (in) + The device type. This parameter is required for duplex devices. When retrieving device + information, you are doing so for an individual playback or capture device. + +pName (out) + A pointer to the buffer that will receive the name. + +nameCap (in) + The capacity of the output buffer, including space for the null terminator. + +pLengthNotIncludingNullTerminator (out, optional) + A pointer to the variable that will receive the length of the name, not including the null + terminator. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Unsafe. This should be considered unsafe because it may be calling into the backend which may or +may not be safe. + + +Callback Safety +--------------- +Unsafe. You should avoid calling this in the data callback because it may call into the backend +which may or may not be safe. + + +Remarks +------- +If the name does not fully fit into the output buffer, it'll be truncated. You can pass in NULL to +`pName` if you want to first get the length of the name for the purpose of memory allocation of the +output buffer. Allocating a buffer of size `MA_MAX_DEVICE_NAME_LENGTH + 1` should be enough for +most cases and will avoid the need for the inefficiency of calling this function twice. + +This is implemented in terms of `ma_device_get_info()`. +*/ +MA_API ma_result ma_device_get_name(ma_device* pDevice, ma_device_type type, char* pName, size_t nameCap, size_t* pLengthNotIncludingNullTerminator); + + +/* +Starts the device. For playback devices this begins playback. For capture devices it begins recording. + +Use `ma_device_stop()` to stop the device. + + +Parameters +---------- +pDevice (in) + A pointer to the device to start. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Safe. It's safe to call this from any thread with the exception of the callback thread. + + +Callback Safety +--------------- +Unsafe. It is not safe to call this inside any callback. + + +Remarks +------- +For a playback device, this will retrieve an initial chunk of audio data from the client before returning. The reason for this is to ensure there is valid +audio data in the buffer, which needs to be done before the device begins playback. + +This API waits until the backend device has been started for real by the worker thread. It also waits on a mutex for thread-safety. + +Do not call this in any callback. + + +See Also +-------- +ma_device_stop() +*/ +MA_API ma_result ma_device_start(ma_device* pDevice); + +/* +Stops the device. For playback devices this stops playback. For capture devices it stops recording. + +Use `ma_device_start()` to start the device again. + + +Parameters +---------- +pDevice (in) + A pointer to the device to stop. + + +Return Value +------------ +MA_SUCCESS if successful; any other error code otherwise. + + +Thread Safety +------------- +Safe. It's safe to call this from any thread with the exception of the callback thread. + + +Callback Safety +--------------- +Unsafe. It is not safe to call this inside any callback. Doing this will result in a deadlock. + + +Remarks +------- +This API needs to wait on the worker thread to stop the backend device properly before returning. It also waits on a mutex for thread-safety. In addition, some +backends need to wait for the device to finish playback/recording of the current fragment which can take some time (usually proportionate to the buffer size +that was specified at initialization time). + +Backends are required to either pause the stream in-place or drain the buffer if pausing is not possible. The reason for this is that stopping the device and +the resuming it with ma_device_start() (which you might do when your program loses focus) may result in a situation where those samples are never output to the +speakers or received from the microphone which can in turn result in de-syncs. + +Do not call this in any callback. + + +See Also +-------- +ma_device_start() +*/ +MA_API ma_result ma_device_stop(ma_device* pDevice); + +/* +Determines whether or not the device is started. + + +Parameters +---------- +pDevice (in) + A pointer to the device whose start state is being retrieved. + + +Return Value +------------ +True if the device is started, false otherwise. + + +Thread Safety +------------- +Safe. If another thread calls `ma_device_start()` or `ma_device_stop()` at this same time as this function is called, there's a very small chance the return +value will be out of sync. + + +Callback Safety +--------------- +Safe. This is implemented as a simple accessor. + + +See Also +-------- +ma_device_start() +ma_device_stop() +*/ +MA_API ma_bool32 ma_device_is_started(const ma_device* pDevice); + + +/* +Retrieves the state of the device. + + +Parameters +---------- +pDevice (in) + A pointer to the device whose state is being retrieved. + + +Return Value +------------ +The current state of the device. The return value will be one of the following: + + +-------------------------------+------------------------------------------------------------------------------+ + | ma_device_state_uninitialized | Will only be returned if the device is in the middle of initialization. | + +-------------------------------+------------------------------------------------------------------------------+ + | ma_device_state_stopped | The device is stopped. The initial state of the device after initialization. | + +-------------------------------+------------------------------------------------------------------------------+ + | ma_device_state_started | The device started and requesting and/or delivering audio data. | + +-------------------------------+------------------------------------------------------------------------------+ + | ma_device_state_starting | The device is in the process of starting. | + +-------------------------------+------------------------------------------------------------------------------+ + | ma_device_state_stopping | The device is in the process of stopping. | + +-------------------------------+------------------------------------------------------------------------------+ + + +Thread Safety +------------- +Safe. This is implemented as a simple accessor. Note that if the device is started or stopped at the same time as this function is called, +there's a possibility the return value could be out of sync. See remarks. + + +Callback Safety +--------------- +Safe. This is implemented as a simple accessor. + + +Remarks +------- +The general flow of a devices state goes like this: + + ``` + ma_device_init() -> ma_device_state_uninitialized -> ma_device_state_stopped + ma_device_start() -> ma_device_state_starting -> ma_device_state_started + ma_device_stop() -> ma_device_state_stopping -> ma_device_state_stopped + ``` + +When the state of the device is changed with `ma_device_start()` or `ma_device_stop()` at this same time as this function is called, the +value returned by this function could potentially be out of sync. If this is significant to your program you need to implement your own +synchronization. +*/ +MA_API ma_device_state ma_device_get_state(const ma_device* pDevice); + + +/* +Performs post backend initialization routines for setting up internal data conversion. + +This should be called whenever the backend is initialized. The only time this should be called from +outside of miniaudio is if you're implementing a custom backend, and you would only do it if you +are reinitializing the backend due to rerouting or reinitializing for some reason. + + +Parameters +---------- +pDevice [in] + A pointer to the device. + +deviceType [in] + The type of the device that was just reinitialized. + +pPlaybackDescriptor [in] + The descriptor of the playback device containing the internal data format and buffer sizes. + +pPlaybackDescriptor [in] + The descriptor of the capture device containing the internal data format and buffer sizes. + + +Return Value +------------ +MA_SUCCESS if successful; any other error otherwise. + + +Thread Safety +------------- +Unsafe. This will be reinitializing internal data converters which may be in use by another thread. + + +Callback Safety +--------------- +Unsafe. This will be reinitializing internal data converters which may be in use by the callback. + + +Remarks +------- +For a duplex device, you can call this for only one side of the system. This is why the deviceType +is specified as a parameter rather than deriving it from the device. + +You do not need to call this manually unless you are doing a custom backend, in which case you need +only do it if you're manually performing rerouting or reinitialization. +*/ +MA_API ma_result ma_device_post_init(ma_device* pDevice, ma_device_type deviceType, const ma_device_descriptor* pPlaybackDescriptor, const ma_device_descriptor* pCaptureDescriptor); + + +/* +Sets the master volume factor for the device. + +The volume factor must be between 0 (silence) and 1 (full volume). Use `ma_device_set_master_volume_db()` to use decibel notation, where 0 is full volume and +values less than 0 decreases the volume. + + +Parameters +---------- +pDevice (in) + A pointer to the device whose volume is being set. + +volume (in) + The new volume factor. Must be >= 0. + + +Return Value +------------ +MA_SUCCESS if the volume was set successfully. +MA_INVALID_ARGS if pDevice is NULL. +MA_INVALID_ARGS if volume is negative. + + +Thread Safety +------------- +Safe. This just sets a local member of the device object. + + +Callback Safety +--------------- +Safe. If you set the volume in the data callback, that data written to the output buffer will have the new volume applied. + + +Remarks +------- +This applies the volume factor across all channels. + +This does not change the operating system's volume. It only affects the volume for the given `ma_device` object's audio stream. + + +See Also +-------- +ma_device_get_master_volume() +ma_device_set_master_volume_db() +ma_device_get_master_volume_db() +*/ +MA_API ma_result ma_device_set_master_volume(ma_device* pDevice, float volume); + +/* +Retrieves the master volume factor for the device. + + +Parameters +---------- +pDevice (in) + A pointer to the device whose volume factor is being retrieved. + +pVolume (in) + A pointer to the variable that will receive the volume factor. The returned value will be in the range of [0, 1]. + + +Return Value +------------ +MA_SUCCESS if successful. +MA_INVALID_ARGS if pDevice is NULL. +MA_INVALID_ARGS if pVolume is NULL. + + +Thread Safety +------------- +Safe. This just a simple member retrieval. + + +Callback Safety +--------------- +Safe. + + +Remarks +------- +If an error occurs, `*pVolume` will be set to 0. + + +See Also +-------- +ma_device_set_master_volume() +ma_device_set_master_volume_gain_db() +ma_device_get_master_volume_gain_db() +*/ +MA_API ma_result ma_device_get_master_volume(ma_device* pDevice, float* pVolume); + +/* +Sets the master volume for the device as gain in decibels. + +A gain of 0 is full volume, whereas a gain of < 0 will decrease the volume. + + +Parameters +---------- +pDevice (in) + A pointer to the device whose gain is being set. + +gainDB (in) + The new volume as gain in decibels. Must be less than or equal to 0, where 0 is full volume and anything less than 0 decreases the volume. + + +Return Value +------------ +MA_SUCCESS if the volume was set successfully. +MA_INVALID_ARGS if pDevice is NULL. +MA_INVALID_ARGS if the gain is > 0. + + +Thread Safety +------------- +Safe. This just sets a local member of the device object. + + +Callback Safety +--------------- +Safe. If you set the volume in the data callback, that data written to the output buffer will have the new volume applied. + + +Remarks +------- +This applies the gain across all channels. + +This does not change the operating system's volume. It only affects the volume for the given `ma_device` object's audio stream. + + +See Also +-------- +ma_device_get_master_volume_gain_db() +ma_device_set_master_volume() +ma_device_get_master_volume() +*/ +MA_API ma_result ma_device_set_master_volume_db(ma_device* pDevice, float gainDB); + +/* +Retrieves the master gain in decibels. + + +Parameters +---------- +pDevice (in) + A pointer to the device whose gain is being retrieved. + +pGainDB (in) + A pointer to the variable that will receive the gain in decibels. The returned value will be <= 0. + + +Return Value +------------ +MA_SUCCESS if successful. +MA_INVALID_ARGS if pDevice is NULL. +MA_INVALID_ARGS if pGainDB is NULL. + + +Thread Safety +------------- +Safe. This just a simple member retrieval. + + +Callback Safety +--------------- +Safe. + + +Remarks +------- +If an error occurs, `*pGainDB` will be set to 0. + + +See Also +-------- +ma_device_set_master_volume_db() +ma_device_set_master_volume() +ma_device_get_master_volume() +*/ +MA_API ma_result ma_device_get_master_volume_db(ma_device* pDevice, float* pGainDB); + + +/* +Called from the data callback of asynchronous backends to allow miniaudio to process the data and fire the miniaudio data callback. + + +Parameters +---------- +pDevice (in) + A pointer to device whose processing the data callback. + +pOutput (out) + A pointer to the buffer that will receive the output PCM frame data. On a playback device this must not be NULL. On a duplex device + this can be NULL, in which case pInput must not be NULL. + +pInput (in) + A pointer to the buffer containing input PCM frame data. On a capture device this must not be NULL. On a duplex device this can be + NULL, in which case `pOutput` must not be NULL. + +frameCount (in) + The number of frames being processed. + + +Return Value +------------ +MA_SUCCESS if successful; any other result code otherwise. + + +Thread Safety +------------- +This function should only ever be called from the internal data callback of the backend. It is safe to call this simultaneously between a +playback and capture device in duplex setups. + + +Callback Safety +--------------- +Do not call this from the miniaudio data callback. It should only ever be called from the internal data callback of the backend. + + +Remarks +------- +If both `pOutput` and `pInput` are NULL, and error will be returned. In duplex scenarios, both `pOutput` and `pInput` can be non-NULL, in +which case `pInput` will be processed first, followed by `pOutput`. + +If you are implementing a custom backend, and that backend uses a callback for data delivery, you'll need to call this from inside that +callback. +*/ +MA_API ma_result ma_device_handle_backend_data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount); + + +/* +Calculates an appropriate buffer size from a descriptor, native sample rate and performance profile. + +This function is used by backends for helping determine an appropriately sized buffer to use with +the device depending on the values of `periodSizeInFrames` and `periodSizeInMilliseconds` in the +`pDescriptor` object. Since buffer size calculations based on time depends on the sample rate, a +best guess at the device's native sample rate is also required which is where `nativeSampleRate` +comes in. In addition, the performance profile is also needed for cases where both the period size +in frames and milliseconds are both zero. + + +Parameters +---------- +pDescriptor (in) + A pointer to device descriptor whose `periodSizeInFrames` and `periodSizeInMilliseconds` members + will be used for the calculation of the buffer size. + +nativeSampleRate (in) + The device's native sample rate. This is only ever used when the `periodSizeInFrames` member of + `pDescriptor` is zero. In this case, `periodSizeInMilliseconds` will be used instead, in which + case a sample rate is required to convert to a size in frames. + +performanceProfile (in) + When both the `periodSizeInFrames` and `periodSizeInMilliseconds` members of `pDescriptor` are + zero, miniaudio will fall back to a buffer size based on the performance profile. The profile + to use for this calculation is determine by this parameter. + + +Return Value +------------ +The calculated buffer size in frames. + + +Thread Safety +------------- +This is safe so long as nothing modifies `pDescriptor` at the same time. However, this function +should only ever be called from within the backend's device initialization routine and therefore +shouldn't have any multithreading concerns. + + +Callback Safety +--------------- +This is safe to call within the data callback, but there is no reason to ever do this. + + +Remarks +------- +If `nativeSampleRate` is zero, this function will fall back to `pDescriptor->sampleRate`. If that +is also zero, `MA_DEFAULT_SAMPLE_RATE` will be used instead. +*/ +MA_API ma_uint32 ma_calculate_buffer_size_in_frames_from_descriptor(const ma_device_descriptor* pDescriptor, ma_uint32 nativeSampleRate, ma_performance_profile performanceProfile); + + + +/* +Retrieves a friendly name for a backend. +*/ +MA_API const char* ma_get_backend_name(ma_backend backend); + +/* +Retrieves the backend enum from the given name. +*/ +MA_API ma_result ma_get_backend_from_name(const char* pBackendName, ma_backend* pBackend); + +/* +Determines whether or not the given backend is available by the compilation environment. +*/ +MA_API ma_bool32 ma_is_backend_enabled(ma_backend backend); + +/* +Retrieves compile-time enabled backends. + + +Parameters +---------- +pBackends (out, optional) + A pointer to the buffer that will receive the enabled backends. Set to NULL to retrieve the backend count. Setting + the capacity of the buffer to `MA_BUFFER_COUNT` will guarantee it's large enough for all backends. + +backendCap (in) + The capacity of the `pBackends` buffer. + +pBackendCount (out) + A pointer to the variable that will receive the enabled backend count. + + +Return Value +------------ +MA_SUCCESS if successful. +MA_INVALID_ARGS if `pBackendCount` is NULL. +MA_NO_SPACE if the capacity of `pBackends` is not large enough. + +If `MA_NO_SPACE` is returned, the `pBackends` buffer will be filled with `*pBackendCount` values. + + +Thread Safety +------------- +Safe. + + +Callback Safety +--------------- +Safe. + + +Remarks +------- +If you want to retrieve the number of backends so you can determine the capacity of `pBackends` buffer, you can call +this function with `pBackends` set to NULL. + +This will also enumerate the null backend. If you don't want to include this you need to check for `ma_backend_null` +when you enumerate over the returned backends and handle it appropriately. Alternatively, you can disable it at +compile time with `MA_NO_NULL`. + +The returned backends are determined based on compile time settings, not the platform it's currently running on. For +example, PulseAudio will be returned if it was enabled at compile time, even when the user doesn't actually have +PulseAudio installed. + + +Example 1 +--------- +The example below retrieves the enabled backend count using a fixed sized buffer allocated on the stack. The buffer is +given a capacity of `MA_BACKEND_COUNT` which will guarantee it'll be large enough to store all available backends. +Since `MA_BACKEND_COUNT` is always a relatively small value, this should be suitable for most scenarios. + +``` +ma_backend enabledBackends[MA_BACKEND_COUNT]; +size_t enabledBackendCount; + +result = ma_get_enabled_backends(enabledBackends, MA_BACKEND_COUNT, &enabledBackendCount); +if (result != MA_SUCCESS) { + // Failed to retrieve enabled backends. Should never happen in this example since all inputs are valid. +} +``` + + +See Also +-------- +ma_is_backend_enabled() +*/ +MA_API ma_result ma_get_enabled_backends(ma_backend* pBackends, size_t backendCap, size_t* pBackendCount); + +/* +Determines whether or not loopback mode is support by a backend. +*/ +MA_API ma_bool32 ma_is_loopback_supported(ma_backend backend); + +#endif /* MA_NO_DEVICE_IO */ + + + +/************************************************************************************************************************************************************ + +Utilities + +************************************************************************************************************************************************************/ + +/* +Calculates a buffer size in milliseconds (rounded up) from the specified number of frames and sample rate. +*/ +MA_API ma_uint32 ma_calculate_buffer_size_in_milliseconds_from_frames(ma_uint32 bufferSizeInFrames, ma_uint32 sampleRate); + +/* +Calculates a buffer size in frames from the specified number of milliseconds and sample rate. +*/ +MA_API ma_uint32 ma_calculate_buffer_size_in_frames_from_milliseconds(ma_uint32 bufferSizeInMilliseconds, ma_uint32 sampleRate); + +/* +Copies PCM frames from one buffer to another. +*/ +MA_API void ma_copy_pcm_frames(void* dst, const void* src, ma_uint64 frameCount, ma_format format, ma_uint32 channels); + +/* +Copies silent frames into the given buffer. + +Remarks +------- +For all formats except `ma_format_u8`, the output buffer will be filled with 0. For `ma_format_u8` it will be filled with 128. The reason for this is that it +makes more sense for the purpose of mixing to initialize it to the center point. +*/ +MA_API void ma_silence_pcm_frames(void* p, ma_uint64 frameCount, ma_format format, ma_uint32 channels); + + +/* +Offsets a pointer by the specified number of PCM frames. +*/ +MA_API void* ma_offset_pcm_frames_ptr(void* p, ma_uint64 offsetInFrames, ma_format format, ma_uint32 channels); +MA_API const void* ma_offset_pcm_frames_const_ptr(const void* p, ma_uint64 offsetInFrames, ma_format format, ma_uint32 channels); +static MA_INLINE float* ma_offset_pcm_frames_ptr_f32(float* p, ma_uint64 offsetInFrames, ma_uint32 channels) { return (float*)ma_offset_pcm_frames_ptr((void*)p, offsetInFrames, ma_format_f32, channels); } +static MA_INLINE const float* ma_offset_pcm_frames_const_ptr_f32(const float* p, ma_uint64 offsetInFrames, ma_uint32 channels) { return (const float*)ma_offset_pcm_frames_const_ptr((const void*)p, offsetInFrames, ma_format_f32, channels); } + + +/* +Clips samples. +*/ +MA_API void ma_clip_samples_u8(ma_uint8* pDst, const ma_int16* pSrc, ma_uint64 count); +MA_API void ma_clip_samples_s16(ma_int16* pDst, const ma_int32* pSrc, ma_uint64 count); +MA_API void ma_clip_samples_s24(ma_uint8* pDst, const ma_int64* pSrc, ma_uint64 count); +MA_API void ma_clip_samples_s32(ma_int32* pDst, const ma_int64* pSrc, ma_uint64 count); +MA_API void ma_clip_samples_f32(float* pDst, const float* pSrc, ma_uint64 count); +MA_API void ma_clip_pcm_frames(void* pDst, const void* pSrc, ma_uint64 frameCount, ma_format format, ma_uint32 channels); + +/* +Helper for applying a volume factor to samples. + +Note that the source and destination buffers can be the same, in which case it'll perform the operation in-place. +*/ +MA_API void ma_copy_and_apply_volume_factor_u8(ma_uint8* pSamplesOut, const ma_uint8* pSamplesIn, ma_uint64 sampleCount, float factor); +MA_API void ma_copy_and_apply_volume_factor_s16(ma_int16* pSamplesOut, const ma_int16* pSamplesIn, ma_uint64 sampleCount, float factor); +MA_API void ma_copy_and_apply_volume_factor_s24(void* pSamplesOut, const void* pSamplesIn, ma_uint64 sampleCount, float factor); +MA_API void ma_copy_and_apply_volume_factor_s32(ma_int32* pSamplesOut, const ma_int32* pSamplesIn, ma_uint64 sampleCount, float factor); +MA_API void ma_copy_and_apply_volume_factor_f32(float* pSamplesOut, const float* pSamplesIn, ma_uint64 sampleCount, float factor); + +MA_API void ma_apply_volume_factor_u8(ma_uint8* pSamples, ma_uint64 sampleCount, float factor); +MA_API void ma_apply_volume_factor_s16(ma_int16* pSamples, ma_uint64 sampleCount, float factor); +MA_API void ma_apply_volume_factor_s24(void* pSamples, ma_uint64 sampleCount, float factor); +MA_API void ma_apply_volume_factor_s32(ma_int32* pSamples, ma_uint64 sampleCount, float factor); +MA_API void ma_apply_volume_factor_f32(float* pSamples, ma_uint64 sampleCount, float factor); + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_u8(ma_uint8* pFramesOut, const ma_uint8* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor); +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s16(ma_int16* pFramesOut, const ma_int16* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor); +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s24(void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor); +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s32(ma_int32* pFramesOut, const ma_int32* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor); +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_f32(float* pFramesOut, const float* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor); +MA_API void ma_copy_and_apply_volume_factor_pcm_frames(void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount, ma_format format, ma_uint32 channels, float factor); + +MA_API void ma_apply_volume_factor_pcm_frames_u8(ma_uint8* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor); +MA_API void ma_apply_volume_factor_pcm_frames_s16(ma_int16* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor); +MA_API void ma_apply_volume_factor_pcm_frames_s24(void* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor); +MA_API void ma_apply_volume_factor_pcm_frames_s32(ma_int32* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor); +MA_API void ma_apply_volume_factor_pcm_frames_f32(float* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor); +MA_API void ma_apply_volume_factor_pcm_frames(void* pFrames, ma_uint64 frameCount, ma_format format, ma_uint32 channels, float factor); + +MA_API void ma_copy_and_apply_volume_factor_per_channel_f32(float* pFramesOut, const float* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float* pChannelGains); + + +MA_API void ma_copy_and_apply_volume_and_clip_samples_u8(ma_uint8* pDst, const ma_int16* pSrc, ma_uint64 count, float volume); +MA_API void ma_copy_and_apply_volume_and_clip_samples_s16(ma_int16* pDst, const ma_int32* pSrc, ma_uint64 count, float volume); +MA_API void ma_copy_and_apply_volume_and_clip_samples_s24(ma_uint8* pDst, const ma_int64* pSrc, ma_uint64 count, float volume); +MA_API void ma_copy_and_apply_volume_and_clip_samples_s32(ma_int32* pDst, const ma_int64* pSrc, ma_uint64 count, float volume); +MA_API void ma_copy_and_apply_volume_and_clip_samples_f32(float* pDst, const float* pSrc, ma_uint64 count, float volume); +MA_API void ma_copy_and_apply_volume_and_clip_pcm_frames(void* pDst, const void* pSrc, ma_uint64 frameCount, ma_format format, ma_uint32 channels, float volume); + + +/* +Helper for converting a linear factor to gain in decibels. +*/ +MA_API float ma_volume_linear_to_db(float factor); + +/* +Helper for converting gain in decibels to a linear factor. +*/ +MA_API float ma_volume_db_to_linear(float gain); + + +/* +Mixes the specified number of frames in floating point format with a volume factor. + +This will run on an optimized path when the volume is equal to 1. +*/ +MA_API ma_result ma_mix_pcm_frames_f32(float* pDst, const float* pSrc, ma_uint64 frameCount, ma_uint32 channels, float volume); + + + + +/************************************************************************************************************************************************************ + +VFS +=== + +The VFS object (virtual file system) is what's used to customize file access. This is useful in cases where stdio FILE* based APIs may not be entirely +appropriate for a given situation. + +************************************************************************************************************************************************************/ +typedef void ma_vfs; +typedef ma_handle ma_vfs_file; + +typedef enum +{ + MA_OPEN_MODE_READ = 0x00000001, + MA_OPEN_MODE_WRITE = 0x00000002 +} ma_open_mode_flags; + +typedef enum +{ + ma_seek_origin_start, + ma_seek_origin_current, + ma_seek_origin_end /* Not used by decoders. */ +} ma_seek_origin; + +typedef struct +{ + ma_uint64 sizeInBytes; +} ma_file_info; + +typedef struct +{ + ma_result (* onOpen) (ma_vfs* pVFS, const char* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile); + ma_result (* onOpenW)(ma_vfs* pVFS, const wchar_t* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile); + ma_result (* onClose)(ma_vfs* pVFS, ma_vfs_file file); + ma_result (* onRead) (ma_vfs* pVFS, ma_vfs_file file, void* pDst, size_t sizeInBytes, size_t* pBytesRead); + ma_result (* onWrite)(ma_vfs* pVFS, ma_vfs_file file, const void* pSrc, size_t sizeInBytes, size_t* pBytesWritten); + ma_result (* onSeek) (ma_vfs* pVFS, ma_vfs_file file, ma_int64 offset, ma_seek_origin origin); + ma_result (* onTell) (ma_vfs* pVFS, ma_vfs_file file, ma_int64* pCursor); + ma_result (* onInfo) (ma_vfs* pVFS, ma_vfs_file file, ma_file_info* pInfo); +} ma_vfs_callbacks; + +MA_API ma_result ma_vfs_open(ma_vfs* pVFS, const char* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile); +MA_API ma_result ma_vfs_open_w(ma_vfs* pVFS, const wchar_t* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile); +MA_API ma_result ma_vfs_close(ma_vfs* pVFS, ma_vfs_file file); +MA_API ma_result ma_vfs_read(ma_vfs* pVFS, ma_vfs_file file, void* pDst, size_t sizeInBytes, size_t* pBytesRead); +MA_API ma_result ma_vfs_write(ma_vfs* pVFS, ma_vfs_file file, const void* pSrc, size_t sizeInBytes, size_t* pBytesWritten); +MA_API ma_result ma_vfs_seek(ma_vfs* pVFS, ma_vfs_file file, ma_int64 offset, ma_seek_origin origin); +MA_API ma_result ma_vfs_tell(ma_vfs* pVFS, ma_vfs_file file, ma_int64* pCursor); +MA_API ma_result ma_vfs_info(ma_vfs* pVFS, ma_vfs_file file, ma_file_info* pInfo); +MA_API ma_result ma_vfs_open_and_read_file(ma_vfs* pVFS, const char* pFilePath, void** ppData, size_t* pSize, const ma_allocation_callbacks* pAllocationCallbacks); + +typedef struct +{ + ma_vfs_callbacks cb; + ma_allocation_callbacks allocationCallbacks; /* Only used for the wchar_t version of open() on non-Windows platforms. */ +} ma_default_vfs; + +MA_API ma_result ma_default_vfs_init(ma_default_vfs* pVFS, const ma_allocation_callbacks* pAllocationCallbacks); + + + +typedef ma_result (* ma_read_proc)(void* pUserData, void* pBufferOut, size_t bytesToRead, size_t* pBytesRead); +typedef ma_result (* ma_seek_proc)(void* pUserData, ma_int64 offset, ma_seek_origin origin); +typedef ma_result (* ma_tell_proc)(void* pUserData, ma_int64* pCursor); + + + +#if !defined(MA_NO_DECODING) || !defined(MA_NO_ENCODING) +typedef enum +{ + ma_encoding_format_unknown = 0, + ma_encoding_format_wav, + ma_encoding_format_flac, + ma_encoding_format_mp3, + ma_encoding_format_vorbis +} ma_encoding_format; +#endif + +/************************************************************************************************************************************************************ + +Decoding +======== + +Decoders are independent of the main device API. Decoding APIs can be called freely inside the device's data callback, but they are not thread safe unless +you do your own synchronization. + +************************************************************************************************************************************************************/ +#ifndef MA_NO_DECODING +typedef struct ma_decoder ma_decoder; + + +typedef struct +{ + ma_format preferredFormat; + ma_uint32 seekPointCount; /* Set to > 0 to generate a seektable if the decoding backend supports it. */ +} ma_decoding_backend_config; + +MA_API ma_decoding_backend_config ma_decoding_backend_config_init(ma_format preferredFormat, ma_uint32 seekPointCount); + + +typedef struct +{ + ma_result (* onInit )(void* pUserData, ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend); + ma_result (* onInitFile )(void* pUserData, const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend); /* Optional. */ + ma_result (* onInitFileW )(void* pUserData, const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend); /* Optional. */ + ma_result (* onInitMemory)(void* pUserData, const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend); /* Optional. */ + void (* onUninit )(void* pUserData, ma_data_source* pBackend, const ma_allocation_callbacks* pAllocationCallbacks); +} ma_decoding_backend_vtable; + + +typedef ma_result (* ma_decoder_read_proc)(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead, size_t* pBytesRead); /* Returns the number of bytes read. */ +typedef ma_result (* ma_decoder_seek_proc)(ma_decoder* pDecoder, ma_int64 byteOffset, ma_seek_origin origin); +typedef ma_result (* ma_decoder_tell_proc)(ma_decoder* pDecoder, ma_int64* pCursor); + +typedef struct +{ + ma_format format; /* Set to 0 or ma_format_unknown to use the stream's internal format. */ + ma_uint32 channels; /* Set to 0 to use the stream's internal channels. */ + ma_uint32 sampleRate; /* Set to 0 to use the stream's internal sample rate. */ + ma_channel* pChannelMap; + ma_channel_mix_mode channelMixMode; + ma_dither_mode ditherMode; + ma_resampler_config resampling; + ma_allocation_callbacks allocationCallbacks; + ma_encoding_format encodingFormat; + ma_uint32 seekPointCount; /* When set to > 0, specifies the number of seek points to use for the generation of a seek table. Not all decoding backends support this. */ + ma_decoding_backend_vtable** ppCustomBackendVTables; + ma_uint32 customBackendCount; + void* pCustomBackendUserData; +} ma_decoder_config; + +struct ma_decoder +{ + ma_data_source_base ds; + ma_data_source* pBackend; /* The decoding backend we'll be pulling data from. */ + const ma_decoding_backend_vtable* pBackendVTable; /* The vtable for the decoding backend. This needs to be stored so we can access the onUninit() callback. */ + void* pBackendUserData; + ma_decoder_read_proc onRead; + ma_decoder_seek_proc onSeek; + ma_decoder_tell_proc onTell; + void* pUserData; + ma_uint64 readPointerInPCMFrames; /* In output sample rate. Used for keeping track of how many frames are available for decoding. */ + ma_format outputFormat; + ma_uint32 outputChannels; + ma_uint32 outputSampleRate; + ma_data_converter converter; /* Data conversion is achieved by running frames through this. */ + void* pInputCache; /* In input format. Can be null if it's not needed. */ + ma_uint64 inputCacheCap; /* The capacity of the input cache. */ + ma_uint64 inputCacheConsumed; /* The number of frames that have been consumed in the cache. Used for determining the next valid frame. */ + ma_uint64 inputCacheRemaining; /* The number of valid frames remaining in the cache. */ + ma_allocation_callbacks allocationCallbacks; + union + { + struct + { + ma_vfs* pVFS; + ma_vfs_file file; + } vfs; + struct + { + const ma_uint8* pData; + size_t dataSize; + size_t currentReadPos; + } memory; /* Only used for decoders that were opened against a block of memory. */ + } data; +}; + +MA_API ma_decoder_config ma_decoder_config_init(ma_format outputFormat, ma_uint32 outputChannels, ma_uint32 outputSampleRate); +MA_API ma_decoder_config ma_decoder_config_init_default(void); + +MA_API ma_result ma_decoder_init(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_vfs(ma_vfs* pVFS, const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_vfs_w(ma_vfs* pVFS, const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); +MA_API ma_result ma_decoder_init_file_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder); + +/* +Uninitializes a decoder. +*/ +MA_API ma_result ma_decoder_uninit(ma_decoder* pDecoder); + +/* +Reads PCM frames from the given decoder. + +This is not thread safe without your own synchronization. +*/ +MA_API ma_result ma_decoder_read_pcm_frames(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); + +/* +Seeks to a PCM frame based on its absolute index. + +This is not thread safe without your own synchronization. +*/ +MA_API ma_result ma_decoder_seek_to_pcm_frame(ma_decoder* pDecoder, ma_uint64 frameIndex); + +/* +Retrieves the decoder's output data format. +*/ +MA_API ma_result ma_decoder_get_data_format(ma_decoder* pDecoder, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); + +/* +Retrieves the current position of the read cursor in PCM frames. +*/ +MA_API ma_result ma_decoder_get_cursor_in_pcm_frames(ma_decoder* pDecoder, ma_uint64* pCursor); + +/* +Retrieves the length of the decoder in PCM frames. + +Do not call this on streams of an undefined length, such as internet radio. + +If the length is unknown or an error occurs, 0 will be returned. + +This will always return 0 for Vorbis decoders. This is due to a limitation with stb_vorbis in push mode which is what miniaudio +uses internally. + +For MP3's, this will decode the entire file. Do not call this in time critical scenarios. + +This function is not thread safe without your own synchronization. +*/ +MA_API ma_result ma_decoder_get_length_in_pcm_frames(ma_decoder* pDecoder, ma_uint64* pLength); + +/* +Retrieves the number of frames that can be read before reaching the end. + +This calls `ma_decoder_get_length_in_pcm_frames()` so you need to be aware of the rules for that function, in +particular ensuring you do not call it on streams of an undefined length, such as internet radio. + +If the total length of the decoder cannot be retrieved, such as with Vorbis decoders, `MA_NOT_IMPLEMENTED` will be +returned. +*/ +MA_API ma_result ma_decoder_get_available_frames(ma_decoder* pDecoder, ma_uint64* pAvailableFrames); + +/* +Helper for opening and decoding a file into a heap allocated block of memory. Free the returned pointer with ma_free(). On input, +pConfig should be set to what you want. On output it will be set to what you got. +*/ +MA_API ma_result ma_decode_from_vfs(ma_vfs* pVFS, const char* pFilePath, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut); +MA_API ma_result ma_decode_file(const char* pFilePath, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut); +MA_API ma_result ma_decode_memory(const void* pData, size_t dataSize, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut); + +#endif /* MA_NO_DECODING */ + + +/************************************************************************************************************************************************************ + +Encoding +======== + +Encoders do not perform any format conversion for you. If your target format does not support the format, and error will be returned. + +************************************************************************************************************************************************************/ +#ifndef MA_NO_ENCODING +typedef struct ma_encoder ma_encoder; + +typedef ma_result (* ma_encoder_write_proc) (ma_encoder* pEncoder, const void* pBufferIn, size_t bytesToWrite, size_t* pBytesWritten); +typedef ma_result (* ma_encoder_seek_proc) (ma_encoder* pEncoder, ma_int64 offset, ma_seek_origin origin); +typedef ma_result (* ma_encoder_init_proc) (ma_encoder* pEncoder); +typedef void (* ma_encoder_uninit_proc) (ma_encoder* pEncoder); +typedef ma_result (* ma_encoder_write_pcm_frames_proc)(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount, ma_uint64* pFramesWritten); + +typedef struct +{ + ma_encoding_format encodingFormat; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_allocation_callbacks allocationCallbacks; +} ma_encoder_config; + +MA_API ma_encoder_config ma_encoder_config_init(ma_encoding_format encodingFormat, ma_format format, ma_uint32 channels, ma_uint32 sampleRate); + +struct ma_encoder +{ + ma_encoder_config config; + ma_encoder_write_proc onWrite; + ma_encoder_seek_proc onSeek; + ma_encoder_init_proc onInit; + ma_encoder_uninit_proc onUninit; + ma_encoder_write_pcm_frames_proc onWritePCMFrames; + void* pUserData; + void* pInternalEncoder; + union + { + struct + { + ma_vfs* pVFS; + ma_vfs_file file; + } vfs; + } data; +}; + +MA_API ma_result ma_encoder_init(ma_encoder_write_proc onWrite, ma_encoder_seek_proc onSeek, void* pUserData, const ma_encoder_config* pConfig, ma_encoder* pEncoder); +MA_API ma_result ma_encoder_init_vfs(ma_vfs* pVFS, const char* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder); +MA_API ma_result ma_encoder_init_vfs_w(ma_vfs* pVFS, const wchar_t* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder); +MA_API ma_result ma_encoder_init_file(const char* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder); +MA_API ma_result ma_encoder_init_file_w(const wchar_t* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder); +MA_API void ma_encoder_uninit(ma_encoder* pEncoder); +MA_API ma_result ma_encoder_write_pcm_frames(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount, ma_uint64* pFramesWritten); + +#endif /* MA_NO_ENCODING */ + + +/************************************************************************************************************************************************************ + +Generation + +************************************************************************************************************************************************************/ +#ifndef MA_NO_GENERATION +typedef enum +{ + ma_waveform_type_sine, + ma_waveform_type_square, + ma_waveform_type_triangle, + ma_waveform_type_sawtooth +} ma_waveform_type; + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_waveform_type type; + double amplitude; + double frequency; +} ma_waveform_config; + +MA_API ma_waveform_config ma_waveform_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_waveform_type type, double amplitude, double frequency); + +typedef struct +{ + ma_data_source_base ds; + ma_waveform_config config; + double advance; + double time; +} ma_waveform; + +MA_API ma_result ma_waveform_init(const ma_waveform_config* pConfig, ma_waveform* pWaveform); +MA_API void ma_waveform_uninit(ma_waveform* pWaveform); +MA_API ma_result ma_waveform_read_pcm_frames(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_result ma_waveform_seek_to_pcm_frame(ma_waveform* pWaveform, ma_uint64 frameIndex); +MA_API ma_result ma_waveform_set_amplitude(ma_waveform* pWaveform, double amplitude); +MA_API ma_result ma_waveform_set_frequency(ma_waveform* pWaveform, double frequency); +MA_API ma_result ma_waveform_set_type(ma_waveform* pWaveform, ma_waveform_type type); +MA_API ma_result ma_waveform_set_sample_rate(ma_waveform* pWaveform, ma_uint32 sampleRate); + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double dutyCycle; + double amplitude; + double frequency; +} ma_pulsewave_config; + +MA_API ma_pulsewave_config ma_pulsewave_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double dutyCycle, double amplitude, double frequency); + +typedef struct +{ + ma_waveform waveform; + ma_pulsewave_config config; +} ma_pulsewave; + +MA_API ma_result ma_pulsewave_init(const ma_pulsewave_config* pConfig, ma_pulsewave* pWaveform); +MA_API void ma_pulsewave_uninit(ma_pulsewave* pWaveform); +MA_API ma_result ma_pulsewave_read_pcm_frames(ma_pulsewave* pWaveform, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_result ma_pulsewave_seek_to_pcm_frame(ma_pulsewave* pWaveform, ma_uint64 frameIndex); +MA_API ma_result ma_pulsewave_set_amplitude(ma_pulsewave* pWaveform, double amplitude); +MA_API ma_result ma_pulsewave_set_frequency(ma_pulsewave* pWaveform, double frequency); +MA_API ma_result ma_pulsewave_set_sample_rate(ma_pulsewave* pWaveform, ma_uint32 sampleRate); +MA_API ma_result ma_pulsewave_set_duty_cycle(ma_pulsewave* pWaveform, double dutyCycle); + +typedef enum +{ + ma_noise_type_white, + ma_noise_type_pink, + ma_noise_type_brownian +} ma_noise_type; + + +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_noise_type type; + ma_int32 seed; + double amplitude; + ma_bool32 duplicateChannels; +} ma_noise_config; + +MA_API ma_noise_config ma_noise_config_init(ma_format format, ma_uint32 channels, ma_noise_type type, ma_int32 seed, double amplitude); + +typedef struct +{ + ma_data_source_base ds; + ma_noise_config config; + ma_lcg lcg; + union + { + struct + { + double** bin; + double* accumulation; + ma_uint32* counter; + } pink; + struct + { + double* accumulation; + } brownian; + } state; + + /* Memory management. */ + void* _pHeap; + ma_bool32 _ownsHeap; +} ma_noise; + +MA_API ma_result ma_noise_get_heap_size(const ma_noise_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_noise_init_preallocated(const ma_noise_config* pConfig, void* pHeap, ma_noise* pNoise); +MA_API ma_result ma_noise_init(const ma_noise_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_noise* pNoise); +MA_API void ma_noise_uninit(ma_noise* pNoise, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_noise_read_pcm_frames(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_result ma_noise_set_amplitude(ma_noise* pNoise, double amplitude); +MA_API ma_result ma_noise_set_seed(ma_noise* pNoise, ma_int32 seed); +MA_API ma_result ma_noise_set_type(ma_noise* pNoise, ma_noise_type type); + +#endif /* MA_NO_GENERATION */ + + + +/************************************************************************************************************************************************************ + +Resource Manager + +************************************************************************************************************************************************************/ +/* The resource manager cannot be enabled if there is no decoder. */ +#if !defined(MA_NO_RESOURCE_MANAGER) && defined(MA_NO_DECODING) +#define MA_NO_RESOURCE_MANAGER +#endif + +#ifndef MA_NO_RESOURCE_MANAGER +typedef struct ma_resource_manager ma_resource_manager; +typedef struct ma_resource_manager_data_buffer_node ma_resource_manager_data_buffer_node; +typedef struct ma_resource_manager_data_buffer ma_resource_manager_data_buffer; +typedef struct ma_resource_manager_data_stream ma_resource_manager_data_stream; +typedef struct ma_resource_manager_data_source ma_resource_manager_data_source; + +typedef enum +{ + MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM = 0x00000001, /* When set, does not load the entire data source in memory. Disk I/O will happen on job threads. */ + MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE = 0x00000002, /* Decode data before storing in memory. When set, decoding is done at the resource manager level rather than the mixing thread. Results in faster mixing, but higher memory usage. */ + MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC = 0x00000004, /* When set, the resource manager will load the data source asynchronously. */ + MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT = 0x00000008, /* When set, waits for initialization of the underlying data source before returning from ma_resource_manager_data_source_init(). */ + MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_UNKNOWN_LENGTH = 0x00000010, /* Gives the resource manager a hint that the length of the data source is unknown and calling `ma_data_source_get_length_in_pcm_frames()` should be avoided. */ + MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_LOOPING = 0x00000020 /* When set, configures the data source to loop by default. */ +} ma_resource_manager_data_source_flags; + + +/* +Pipeline notifications used by the resource manager. Made up of both an async notification and a fence, both of which are optional. +*/ +typedef struct +{ + ma_async_notification* pNotification; + ma_fence* pFence; +} ma_resource_manager_pipeline_stage_notification; + +typedef struct +{ + ma_resource_manager_pipeline_stage_notification init; /* Initialization of the decoder. */ + ma_resource_manager_pipeline_stage_notification done; /* Decoding fully completed. */ +} ma_resource_manager_pipeline_notifications; + +MA_API ma_resource_manager_pipeline_notifications ma_resource_manager_pipeline_notifications_init(void); + + + +/* BEGIN BACKWARDS COMPATIBILITY */ +/* TODO: Remove this block in version 0.12. */ +#if 1 +#define ma_resource_manager_job ma_job +#define ma_resource_manager_job_init ma_job_init +#define MA_JOB_TYPE_RESOURCE_MANAGER_QUEUE_FLAG_NON_BLOCKING MA_JOB_QUEUE_FLAG_NON_BLOCKING +#define ma_resource_manager_job_queue_config ma_job_queue_config +#define ma_resource_manager_job_queue_config_init ma_job_queue_config_init +#define ma_resource_manager_job_queue ma_job_queue +#define ma_resource_manager_job_queue_get_heap_size ma_job_queue_get_heap_size +#define ma_resource_manager_job_queue_init_preallocated ma_job_queue_init_preallocated +#define ma_resource_manager_job_queue_init ma_job_queue_init +#define ma_resource_manager_job_queue_uninit ma_job_queue_uninit +#define ma_resource_manager_job_queue_post ma_job_queue_post +#define ma_resource_manager_job_queue_next ma_job_queue_next +#endif +/* END BACKWARDS COMPATIBILITY */ + + + + +/* Maximum job thread count will be restricted to this, but this may be removed later and replaced with a heap allocation thereby removing any limitation. */ +#ifndef MA_RESOURCE_MANAGER_MAX_JOB_THREAD_COUNT +#define MA_RESOURCE_MANAGER_MAX_JOB_THREAD_COUNT 64 +#endif + +typedef enum +{ + /* Indicates ma_resource_manager_next_job() should not block. Only valid when the job thread count is 0. */ + MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING = 0x00000001, + + /* Disables any kind of multithreading. Implicitly enables MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING. */ + MA_RESOURCE_MANAGER_FLAG_NO_THREADING = 0x00000002 +} ma_resource_manager_flags; + +typedef struct +{ + const char* pFilePath; + const wchar_t* pFilePathW; + const ma_resource_manager_pipeline_notifications* pNotifications; + ma_uint64 initialSeekPointInPCMFrames; + ma_uint64 rangeBegInPCMFrames; + ma_uint64 rangeEndInPCMFrames; + ma_uint64 loopPointBegInPCMFrames; + ma_uint64 loopPointEndInPCMFrames; + ma_uint32 flags; + ma_bool32 isLooping; /* Deprecated. Use the MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_LOOPING flag in `flags` instead. */ +} ma_resource_manager_data_source_config; + +MA_API ma_resource_manager_data_source_config ma_resource_manager_data_source_config_init(void); + + +typedef enum +{ + ma_resource_manager_data_supply_type_unknown = 0, /* Used for determining whether or the data supply has been initialized. */ + ma_resource_manager_data_supply_type_encoded, /* Data supply is an encoded buffer. Connector is ma_decoder. */ + ma_resource_manager_data_supply_type_decoded, /* Data supply is a decoded buffer. Connector is ma_audio_buffer. */ + ma_resource_manager_data_supply_type_decoded_paged /* Data supply is a linked list of decoded buffers. Connector is ma_paged_audio_buffer. */ +} ma_resource_manager_data_supply_type; + +typedef struct +{ + MA_ATOMIC(4, ma_resource_manager_data_supply_type) type; /* Read and written from different threads so needs to be accessed atomically. */ + union + { + struct + { + const void* pData; + size_t sizeInBytes; + } encoded; + struct + { + const void* pData; + ma_uint64 totalFrameCount; + ma_uint64 decodedFrameCount; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + } decoded; + struct + { + ma_paged_audio_buffer_data data; + ma_uint64 decodedFrameCount; + ma_uint32 sampleRate; + } decodedPaged; + } backend; +} ma_resource_manager_data_supply; + +struct ma_resource_manager_data_buffer_node +{ + ma_uint32 hashedName32; /* The hashed name. This is the key. */ + ma_uint32 refCount; + MA_ATOMIC(4, ma_result) result; /* Result from asynchronous loading. When loading set to MA_BUSY. When fully loaded set to MA_SUCCESS. When deleting set to MA_UNAVAILABLE. */ + MA_ATOMIC(4, ma_uint32) executionCounter; /* For allocating execution orders for jobs. */ + MA_ATOMIC(4, ma_uint32) executionPointer; /* For managing the order of execution for asynchronous jobs relating to this object. Incremented as jobs complete processing. */ + ma_bool32 isDataOwnedByResourceManager; /* Set to true when the underlying data buffer was allocated the resource manager. Set to false if it is owned by the application (via ma_resource_manager_register_*()). */ + ma_resource_manager_data_supply data; + ma_resource_manager_data_buffer_node* pParent; + ma_resource_manager_data_buffer_node* pChildLo; + ma_resource_manager_data_buffer_node* pChildHi; +}; + +struct ma_resource_manager_data_buffer +{ + ma_data_source_base ds; /* Base data source. A data buffer is a data source. */ + ma_resource_manager* pResourceManager; /* A pointer to the resource manager that owns this buffer. */ + ma_resource_manager_data_buffer_node* pNode; /* The data node. This is reference counted and is what supplies the data. */ + ma_uint32 flags; /* The flags that were passed used to initialize the buffer. */ + MA_ATOMIC(4, ma_uint32) executionCounter; /* For allocating execution orders for jobs. */ + MA_ATOMIC(4, ma_uint32) executionPointer; /* For managing the order of execution for asynchronous jobs relating to this object. Incremented as jobs complete processing. */ + ma_uint64 seekTargetInPCMFrames; /* Only updated by the public API. Never written nor read from the job thread. */ + ma_bool32 seekToCursorOnNextRead; /* On the next read we need to seek to the frame cursor. */ + MA_ATOMIC(4, ma_result) result; /* Keeps track of a result of decoding. Set to MA_BUSY while the buffer is still loading. Set to MA_SUCCESS when loading is finished successfully. Otherwise set to some other code. */ + MA_ATOMIC(4, ma_bool32) isLooping; /* Can be read and written by different threads at the same time. Must be used atomically. */ + ma_atomic_bool32 isConnectorInitialized; /* Used for asynchronous loading to ensure we don't try to initialize the connector multiple times while waiting for the node to fully load. */ + union + { + ma_decoder decoder; /* Supply type is ma_resource_manager_data_supply_type_encoded */ + ma_audio_buffer buffer; /* Supply type is ma_resource_manager_data_supply_type_decoded */ + ma_paged_audio_buffer pagedBuffer; /* Supply type is ma_resource_manager_data_supply_type_decoded_paged */ + } connector; /* Connects this object to the node's data supply. */ +}; + +struct ma_resource_manager_data_stream +{ + ma_data_source_base ds; /* Base data source. A data stream is a data source. */ + ma_resource_manager* pResourceManager; /* A pointer to the resource manager that owns this data stream. */ + ma_uint32 flags; /* The flags that were passed used to initialize the stream. */ + ma_decoder decoder; /* Used for filling pages with data. This is only ever accessed by the job thread. The public API should never touch this. */ + ma_bool32 isDecoderInitialized; /* Required for determining whether or not the decoder should be uninitialized in MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_STREAM. */ + ma_uint64 totalLengthInPCMFrames; /* This is calculated when first loaded by the MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_STREAM. */ + ma_uint32 relativeCursor; /* The playback cursor, relative to the current page. Only ever accessed by the public API. Never accessed by the job thread. */ + MA_ATOMIC(8, ma_uint64) absoluteCursor; /* The playback cursor, in absolute position starting from the start of the file. */ + ma_uint32 currentPageIndex; /* Toggles between 0 and 1. Index 0 is the first half of pPageData. Index 1 is the second half. Only ever accessed by the public API. Never accessed by the job thread. */ + MA_ATOMIC(4, ma_uint32) executionCounter; /* For allocating execution orders for jobs. */ + MA_ATOMIC(4, ma_uint32) executionPointer; /* For managing the order of execution for asynchronous jobs relating to this object. Incremented as jobs complete processing. */ + + /* Written by the public API, read by the job thread. */ + MA_ATOMIC(4, ma_bool32) isLooping; /* Whether or not the stream is looping. It's important to set the looping flag at the data stream level for smooth loop transitions. */ + + /* Written by the job thread, read by the public API. */ + void* pPageData; /* Buffer containing the decoded data of each page. Allocated once at initialization time. */ + MA_ATOMIC(4, ma_uint32) pageFrameCount[2]; /* The number of valid PCM frames in each page. Used to determine the last valid frame. */ + + /* Written and read by both the public API and the job thread. These must be atomic. */ + MA_ATOMIC(4, ma_result) result; /* Result from asynchronous loading. When loading set to MA_BUSY. When initialized set to MA_SUCCESS. When deleting set to MA_UNAVAILABLE. If an error occurs when loading, set to an error code. */ + MA_ATOMIC(4, ma_bool32) isDecoderAtEnd; /* Whether or not the decoder has reached the end. */ + MA_ATOMIC(4, ma_bool32) isPageValid[2]; /* Booleans to indicate whether or not a page is valid. Set to false by the public API, set to true by the job thread. Set to false as the pages are consumed, true when they are filled. */ + MA_ATOMIC(4, ma_bool32) seekCounter; /* When 0, no seeking is being performed. When > 0, a seek is being performed and reading should be delayed with MA_BUSY. */ +}; + +struct ma_resource_manager_data_source +{ + union + { + ma_resource_manager_data_buffer buffer; + ma_resource_manager_data_stream stream; + } backend; /* Must be the first item because we need the first item to be the data source callbacks for the buffer or stream. */ + + ma_uint32 flags; /* The flags that were passed in to ma_resource_manager_data_source_init(). */ + MA_ATOMIC(4, ma_uint32) executionCounter; /* For allocating execution orders for jobs. */ + MA_ATOMIC(4, ma_uint32) executionPointer; /* For managing the order of execution for asynchronous jobs relating to this object. Incremented as jobs complete processing. */ +}; + +typedef struct +{ + ma_allocation_callbacks allocationCallbacks; + ma_log* pLog; + ma_format decodedFormat; /* The decoded format to use. Set to ma_format_unknown (default) to use the file's native format. */ + ma_uint32 decodedChannels; /* The decoded channel count to use. Set to 0 (default) to use the file's native channel count. */ + ma_uint32 decodedSampleRate; /* the decoded sample rate to use. Set to 0 (default) to use the file's native sample rate. */ + ma_uint32 jobThreadCount; /* Set to 0 if you want to self-manage your job threads. Defaults to 1. */ + size_t jobThreadStackSize; + ma_uint32 jobQueueCapacity; /* The maximum number of jobs that can fit in the queue at a time. Defaults to MA_JOB_TYPE_RESOURCE_MANAGER_QUEUE_CAPACITY. Cannot be zero. */ + ma_uint32 flags; + ma_vfs* pVFS; /* Can be NULL in which case defaults will be used. */ + ma_decoding_backend_vtable** ppCustomDecodingBackendVTables; + ma_uint32 customDecodingBackendCount; + void* pCustomDecodingBackendUserData; +} ma_resource_manager_config; + +MA_API ma_resource_manager_config ma_resource_manager_config_init(void); + +struct ma_resource_manager +{ + ma_resource_manager_config config; + ma_resource_manager_data_buffer_node* pRootDataBufferNode; /* The root buffer in the binary tree. */ +#ifndef MA_NO_THREADING + ma_mutex dataBufferBSTLock; /* For synchronizing access to the data buffer binary tree. */ + ma_thread jobThreads[MA_RESOURCE_MANAGER_MAX_JOB_THREAD_COUNT]; /* The threads for executing jobs. */ +#endif + ma_job_queue jobQueue; /* Multi-consumer, multi-producer job queue for managing jobs for asynchronous decoding and streaming. */ + ma_default_vfs defaultVFS; /* Only used if a custom VFS is not specified. */ + ma_log log; /* Only used if no log was specified in the config. */ +}; + +/* Init. */ +MA_API ma_result ma_resource_manager_init(const ma_resource_manager_config* pConfig, ma_resource_manager* pResourceManager); +MA_API void ma_resource_manager_uninit(ma_resource_manager* pResourceManager); +MA_API ma_log* ma_resource_manager_get_log(ma_resource_manager* pResourceManager); + +/* Registration. */ +MA_API ma_result ma_resource_manager_register_file(ma_resource_manager* pResourceManager, const char* pFilePath, ma_uint32 flags); +MA_API ma_result ma_resource_manager_register_file_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath, ma_uint32 flags); +MA_API ma_result ma_resource_manager_register_decoded_data(ma_resource_manager* pResourceManager, const char* pName, const void* pData, ma_uint64 frameCount, ma_format format, ma_uint32 channels, ma_uint32 sampleRate); /* Does not copy. Increments the reference count if already exists and returns MA_SUCCESS. */ +MA_API ma_result ma_resource_manager_register_decoded_data_w(ma_resource_manager* pResourceManager, const wchar_t* pName, const void* pData, ma_uint64 frameCount, ma_format format, ma_uint32 channels, ma_uint32 sampleRate); +MA_API ma_result ma_resource_manager_register_encoded_data(ma_resource_manager* pResourceManager, const char* pName, const void* pData, size_t sizeInBytes); /* Does not copy. Increments the reference count if already exists and returns MA_SUCCESS. */ +MA_API ma_result ma_resource_manager_register_encoded_data_w(ma_resource_manager* pResourceManager, const wchar_t* pName, const void* pData, size_t sizeInBytes); +MA_API ma_result ma_resource_manager_unregister_file(ma_resource_manager* pResourceManager, const char* pFilePath); +MA_API ma_result ma_resource_manager_unregister_file_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath); +MA_API ma_result ma_resource_manager_unregister_data(ma_resource_manager* pResourceManager, const char* pName); +MA_API ma_result ma_resource_manager_unregister_data_w(ma_resource_manager* pResourceManager, const wchar_t* pName); + +/* Data Buffers. */ +MA_API ma_result ma_resource_manager_data_buffer_init_ex(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_resource_manager_data_buffer* pDataBuffer); +MA_API ma_result ma_resource_manager_data_buffer_init(ma_resource_manager* pResourceManager, const char* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_buffer* pDataBuffer); +MA_API ma_result ma_resource_manager_data_buffer_init_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_buffer* pDataBuffer); +MA_API ma_result ma_resource_manager_data_buffer_init_copy(ma_resource_manager* pResourceManager, const ma_resource_manager_data_buffer* pExistingDataBuffer, ma_resource_manager_data_buffer* pDataBuffer); +MA_API ma_result ma_resource_manager_data_buffer_uninit(ma_resource_manager_data_buffer* pDataBuffer); +MA_API ma_result ma_resource_manager_data_buffer_read_pcm_frames(ma_resource_manager_data_buffer* pDataBuffer, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_result ma_resource_manager_data_buffer_seek_to_pcm_frame(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64 frameIndex); +MA_API ma_result ma_resource_manager_data_buffer_get_data_format(ma_resource_manager_data_buffer* pDataBuffer, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_resource_manager_data_buffer_get_cursor_in_pcm_frames(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64* pCursor); +MA_API ma_result ma_resource_manager_data_buffer_get_length_in_pcm_frames(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64* pLength); +MA_API ma_result ma_resource_manager_data_buffer_result(const ma_resource_manager_data_buffer* pDataBuffer); +MA_API ma_result ma_resource_manager_data_buffer_set_looping(ma_resource_manager_data_buffer* pDataBuffer, ma_bool32 isLooping); +MA_API ma_bool32 ma_resource_manager_data_buffer_is_looping(const ma_resource_manager_data_buffer* pDataBuffer); +MA_API ma_result ma_resource_manager_data_buffer_get_available_frames(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64* pAvailableFrames); + +/* Data Streams. */ +MA_API ma_result ma_resource_manager_data_stream_init_ex(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_resource_manager_data_stream* pDataStream); +MA_API ma_result ma_resource_manager_data_stream_init(ma_resource_manager* pResourceManager, const char* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_stream* pDataStream); +MA_API ma_result ma_resource_manager_data_stream_init_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_stream* pDataStream); +MA_API ma_result ma_resource_manager_data_stream_uninit(ma_resource_manager_data_stream* pDataStream); +MA_API ma_result ma_resource_manager_data_stream_read_pcm_frames(ma_resource_manager_data_stream* pDataStream, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_result ma_resource_manager_data_stream_seek_to_pcm_frame(ma_resource_manager_data_stream* pDataStream, ma_uint64 frameIndex); +MA_API ma_result ma_resource_manager_data_stream_get_data_format(ma_resource_manager_data_stream* pDataStream, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_resource_manager_data_stream_get_cursor_in_pcm_frames(ma_resource_manager_data_stream* pDataStream, ma_uint64* pCursor); +MA_API ma_result ma_resource_manager_data_stream_get_length_in_pcm_frames(ma_resource_manager_data_stream* pDataStream, ma_uint64* pLength); +MA_API ma_result ma_resource_manager_data_stream_result(const ma_resource_manager_data_stream* pDataStream); +MA_API ma_result ma_resource_manager_data_stream_set_looping(ma_resource_manager_data_stream* pDataStream, ma_bool32 isLooping); +MA_API ma_bool32 ma_resource_manager_data_stream_is_looping(const ma_resource_manager_data_stream* pDataStream); +MA_API ma_result ma_resource_manager_data_stream_get_available_frames(ma_resource_manager_data_stream* pDataStream, ma_uint64* pAvailableFrames); + +/* Data Sources. */ +MA_API ma_result ma_resource_manager_data_source_init_ex(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_resource_manager_data_source* pDataSource); +MA_API ma_result ma_resource_manager_data_source_init(ma_resource_manager* pResourceManager, const char* pName, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_source* pDataSource); +MA_API ma_result ma_resource_manager_data_source_init_w(ma_resource_manager* pResourceManager, const wchar_t* pName, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_source* pDataSource); +MA_API ma_result ma_resource_manager_data_source_init_copy(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source* pExistingDataSource, ma_resource_manager_data_source* pDataSource); +MA_API ma_result ma_resource_manager_data_source_uninit(ma_resource_manager_data_source* pDataSource); +MA_API ma_result ma_resource_manager_data_source_read_pcm_frames(ma_resource_manager_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_result ma_resource_manager_data_source_seek_to_pcm_frame(ma_resource_manager_data_source* pDataSource, ma_uint64 frameIndex); +MA_API ma_result ma_resource_manager_data_source_get_data_format(ma_resource_manager_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_resource_manager_data_source_get_cursor_in_pcm_frames(ma_resource_manager_data_source* pDataSource, ma_uint64* pCursor); +MA_API ma_result ma_resource_manager_data_source_get_length_in_pcm_frames(ma_resource_manager_data_source* pDataSource, ma_uint64* pLength); +MA_API ma_result ma_resource_manager_data_source_result(const ma_resource_manager_data_source* pDataSource); +MA_API ma_result ma_resource_manager_data_source_set_looping(ma_resource_manager_data_source* pDataSource, ma_bool32 isLooping); +MA_API ma_bool32 ma_resource_manager_data_source_is_looping(const ma_resource_manager_data_source* pDataSource); +MA_API ma_result ma_resource_manager_data_source_get_available_frames(ma_resource_manager_data_source* pDataSource, ma_uint64* pAvailableFrames); + +/* Job management. */ +MA_API ma_result ma_resource_manager_post_job(ma_resource_manager* pResourceManager, const ma_job* pJob); +MA_API ma_result ma_resource_manager_post_job_quit(ma_resource_manager* pResourceManager); /* Helper for posting a quit job. */ +MA_API ma_result ma_resource_manager_next_job(ma_resource_manager* pResourceManager, ma_job* pJob); +MA_API ma_result ma_resource_manager_process_job(ma_resource_manager* pResourceManager, ma_job* pJob); /* DEPRECATED. Use ma_job_process(). Will be removed in version 0.12. */ +MA_API ma_result ma_resource_manager_process_next_job(ma_resource_manager* pResourceManager); /* Returns MA_CANCELLED if a MA_JOB_TYPE_QUIT job is found. In non-blocking mode, returns MA_NO_DATA_AVAILABLE if no jobs are available. */ +#endif /* MA_NO_RESOURCE_MANAGER */ + + + +/************************************************************************************************************************************************************ + +Node Graph + +************************************************************************************************************************************************************/ +#ifndef MA_NO_NODE_GRAPH +/* Must never exceed 254. */ +#ifndef MA_MAX_NODE_BUS_COUNT +#define MA_MAX_NODE_BUS_COUNT 254 +#endif + +/* Used internally by miniaudio for memory management. Must never exceed MA_MAX_NODE_BUS_COUNT. */ +#ifndef MA_MAX_NODE_LOCAL_BUS_COUNT +#define MA_MAX_NODE_LOCAL_BUS_COUNT 2 +#endif + +/* Use this when the bus count is determined by the node instance rather than the vtable. */ +#define MA_NODE_BUS_COUNT_UNKNOWN 255 + + +/* For some internal memory management of ma_node_graph. */ +typedef struct +{ + size_t offset; + size_t sizeInBytes; + unsigned char _data[1]; +} ma_stack; + + +typedef struct ma_node_graph ma_node_graph; +typedef void ma_node; + + +/* Node flags. */ +typedef enum +{ + MA_NODE_FLAG_PASSTHROUGH = 0x00000001, + MA_NODE_FLAG_CONTINUOUS_PROCESSING = 0x00000002, + MA_NODE_FLAG_ALLOW_NULL_INPUT = 0x00000004, + MA_NODE_FLAG_DIFFERENT_PROCESSING_RATES = 0x00000008, + MA_NODE_FLAG_SILENT_OUTPUT = 0x00000010 +} ma_node_flags; + + +/* The playback state of a node. Either started or stopped. */ +typedef enum +{ + ma_node_state_started = 0, + ma_node_state_stopped = 1 +} ma_node_state; + + +typedef struct +{ + /* + Extended processing callback. This callback is used for effects that process input and output + at different rates (i.e. they perform resampling). This is similar to the simple version, only + they take two separate frame counts: one for input, and one for output. + + On input, `pFrameCountOut` is equal to the capacity of the output buffer for each bus, whereas + `pFrameCountIn` will be equal to the number of PCM frames in each of the buffers in `ppFramesIn`. + + On output, set `pFrameCountOut` to the number of PCM frames that were actually output and set + `pFrameCountIn` to the number of input frames that were consumed. + */ + void (* onProcess)(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut); + + /* + A callback for retrieving the number of input frames that are required to output the + specified number of output frames. You would only want to implement this when the node performs + resampling. This is optional, even for nodes that perform resampling, but it does offer a + small reduction in latency as it allows miniaudio to calculate the exact number of input frames + to read at a time instead of having to estimate. + */ + ma_result (* onGetRequiredInputFrameCount)(ma_node* pNode, ma_uint32 outputFrameCount, ma_uint32* pInputFrameCount); + + /* + The number of input buses. This is how many sub-buffers will be contained in the `ppFramesIn` + parameters of the callbacks above. + */ + ma_uint8 inputBusCount; + + /* + The number of output buses. This is how many sub-buffers will be contained in the `ppFramesOut` + parameters of the callbacks above. + */ + ma_uint8 outputBusCount; + + /* + Flags describing characteristics of the node. This is currently just a placeholder for some + ideas for later on. + */ + ma_uint32 flags; +} ma_node_vtable; + +typedef struct +{ + const ma_node_vtable* vtable; /* Should never be null. Initialization of the node will fail if so. */ + ma_node_state initialState; /* Defaults to ma_node_state_started. */ + ma_uint32 inputBusCount; /* Only used if the vtable specifies an input bus count of `MA_NODE_BUS_COUNT_UNKNOWN`, otherwise must be set to `MA_NODE_BUS_COUNT_UNKNOWN` (default). */ + ma_uint32 outputBusCount; /* Only used if the vtable specifies an output bus count of `MA_NODE_BUS_COUNT_UNKNOWN`, otherwise be set to `MA_NODE_BUS_COUNT_UNKNOWN` (default). */ + const ma_uint32* pInputChannels; /* The number of elements are determined by the input bus count as determined by the vtable, or `inputBusCount` if the vtable specifies `MA_NODE_BUS_COUNT_UNKNOWN`. */ + const ma_uint32* pOutputChannels; /* The number of elements are determined by the output bus count as determined by the vtable, or `outputBusCount` if the vtable specifies `MA_NODE_BUS_COUNT_UNKNOWN`. */ +} ma_node_config; + +MA_API ma_node_config ma_node_config_init(void); + + +/* +A node has multiple output buses. An output bus is attached to an input bus as an item in a linked +list. Think of the input bus as a linked list, with the output bus being an item in that list. +*/ +typedef struct ma_node_output_bus ma_node_output_bus; +struct ma_node_output_bus +{ + /* Immutable. */ + ma_node* pNode; /* The node that owns this output bus. The input node. Will be null for dummy head and tail nodes. */ + ma_uint8 outputBusIndex; /* The index of the output bus on pNode that this output bus represents. */ + ma_uint8 channels; /* The number of channels in the audio stream for this bus. */ + + /* Mutable via multiple threads. Must be used atomically. The weird ordering here is for packing reasons. */ + ma_uint8 inputNodeInputBusIndex; /* The index of the input bus on the input. Required for detaching. Will only be used within the spinlock so does not need to be atomic. */ + MA_ATOMIC(4, ma_uint32) flags; /* Some state flags for tracking the read state of the output buffer. A combination of MA_NODE_OUTPUT_BUS_FLAG_*. */ + MA_ATOMIC(4, ma_uint32) refCount; /* Reference count for some thread-safety when detaching. */ + MA_ATOMIC(4, ma_bool32) isAttached; /* This is used to prevent iteration of nodes that are in the middle of being detached. Used for thread safety. */ + MA_ATOMIC(4, ma_spinlock) lock; /* Unfortunate lock, but significantly simplifies the implementation. Required for thread-safe attaching and detaching. */ + MA_ATOMIC(4, float) volume; /* Linear. */ + MA_ATOMIC(MA_SIZEOF_PTR, ma_node_output_bus*) pNext; /* If null, it's the tail node or detached. */ + MA_ATOMIC(MA_SIZEOF_PTR, ma_node_output_bus*) pPrev; /* If null, it's the head node or detached. */ + MA_ATOMIC(MA_SIZEOF_PTR, ma_node*) pInputNode; /* The node that this output bus is attached to. Required for detaching. */ +}; + +/* +A node has multiple input buses. The output buses of a node are connecting to the input busses of +another. An input bus is essentially just a linked list of output buses. +*/ +typedef struct ma_node_input_bus ma_node_input_bus; +struct ma_node_input_bus +{ + /* Mutable via multiple threads. */ + ma_node_output_bus head; /* Dummy head node for simplifying some lock-free thread-safety stuff. */ + MA_ATOMIC(4, ma_uint32) nextCounter; /* This is used to determine whether or not the input bus is finding the next node in the list. Used for thread safety when detaching output buses. */ + MA_ATOMIC(4, ma_spinlock) lock; /* Unfortunate lock, but significantly simplifies the implementation. Required for thread-safe attaching and detaching. */ + + /* Set once at startup. */ + ma_uint8 channels; /* The number of channels in the audio stream for this bus. */ +}; + + +typedef struct ma_node_base ma_node_base; +struct ma_node_base +{ + /* These variables are set once at startup. */ + ma_node_graph* pNodeGraph; /* The graph this node belongs to. */ + const ma_node_vtable* vtable; + ma_uint32 inputBusCount; + ma_uint32 outputBusCount; + ma_node_input_bus* pInputBuses; + ma_node_output_bus* pOutputBuses; + float* pCachedData; /* Allocated on the heap. Fixed size. Needs to be stored on the heap because reading from output buses is done in separate function calls. */ + ma_uint16 cachedDataCapInFramesPerBus; /* The capacity of the input data cache in frames, per bus. */ + + /* These variables are read and written only from the audio thread. */ + ma_uint16 cachedFrameCountOut; + ma_uint16 cachedFrameCountIn; + ma_uint16 consumedFrameCountIn; + + /* These variables are read and written between different threads. */ + MA_ATOMIC(4, ma_node_state) state; /* When set to stopped, nothing will be read, regardless of the times in stateTimes. */ + MA_ATOMIC(8, ma_uint64) stateTimes[2]; /* Indexed by ma_node_state. Specifies the time based on the global clock that a node should be considered to be in the relevant state. */ + MA_ATOMIC(8, ma_uint64) localTime; /* The node's local clock. This is just a running sum of the number of output frames that have been processed. Can be modified by any thread with `ma_node_set_time()`. */ + + /* Memory management. */ + ma_node_input_bus _inputBuses[MA_MAX_NODE_LOCAL_BUS_COUNT]; + ma_node_output_bus _outputBuses[MA_MAX_NODE_LOCAL_BUS_COUNT]; + void* _pHeap; /* A heap allocation for internal use only. pInputBuses and/or pOutputBuses will point to this if the bus count exceeds MA_MAX_NODE_LOCAL_BUS_COUNT. */ + ma_bool32 _ownsHeap; /* If set to true, the node owns the heap allocation and _pHeap will be freed in ma_node_uninit(). */ +}; + +MA_API ma_result ma_node_get_heap_size(ma_node_graph* pNodeGraph, const ma_node_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_node_init_preallocated(ma_node_graph* pNodeGraph, const ma_node_config* pConfig, void* pHeap, ma_node* pNode); +MA_API ma_result ma_node_init(ma_node_graph* pNodeGraph, const ma_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_node* pNode); +MA_API void ma_node_uninit(ma_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_node_graph* ma_node_get_node_graph(const ma_node* pNode); +MA_API ma_uint32 ma_node_get_input_bus_count(const ma_node* pNode); +MA_API ma_uint32 ma_node_get_output_bus_count(const ma_node* pNode); +MA_API ma_uint32 ma_node_get_input_channels(const ma_node* pNode, ma_uint32 inputBusIndex); +MA_API ma_uint32 ma_node_get_output_channels(const ma_node* pNode, ma_uint32 outputBusIndex); +MA_API ma_result ma_node_attach_output_bus(ma_node* pNode, ma_uint32 outputBusIndex, ma_node* pOtherNode, ma_uint32 otherNodeInputBusIndex); +MA_API ma_result ma_node_detach_output_bus(ma_node* pNode, ma_uint32 outputBusIndex); +MA_API ma_result ma_node_detach_all_output_buses(ma_node* pNode); +MA_API ma_result ma_node_set_output_bus_volume(ma_node* pNode, ma_uint32 outputBusIndex, float volume); +MA_API float ma_node_get_output_bus_volume(const ma_node* pNode, ma_uint32 outputBusIndex); +MA_API ma_result ma_node_set_state(ma_node* pNode, ma_node_state state); +MA_API ma_node_state ma_node_get_state(const ma_node* pNode); +MA_API ma_result ma_node_set_state_time(ma_node* pNode, ma_node_state state, ma_uint64 globalTime); +MA_API ma_uint64 ma_node_get_state_time(const ma_node* pNode, ma_node_state state); +MA_API ma_node_state ma_node_get_state_by_time(const ma_node* pNode, ma_uint64 globalTime); +MA_API ma_node_state ma_node_get_state_by_time_range(const ma_node* pNode, ma_uint64 globalTimeBeg, ma_uint64 globalTimeEnd); +MA_API ma_uint64 ma_node_get_time(const ma_node* pNode); +MA_API ma_result ma_node_set_time(ma_node* pNode, ma_uint64 localTime); + + +typedef struct +{ + ma_uint32 channels; + ma_uint32 processingSizeInFrames; /* This is the preferred processing size for node processing callbacks unless overridden by a node itself. Can be 0 in which case it will be based on the frame count passed into ma_node_graph_read_pcm_frames(), but will not be well defined. */ + size_t preMixStackSizeInBytes; /* Defaults to 512KB per channel. Reducing this will save memory, but the depth of your node graph will be more restricted. */ +} ma_node_graph_config; + +MA_API ma_node_graph_config ma_node_graph_config_init(ma_uint32 channels); + + +struct ma_node_graph +{ + /* Immutable. */ + ma_node_base base; /* The node graph itself is a node so it can be connected as an input to different node graph. This has zero inputs and calls ma_node_graph_read_pcm_frames() to generate it's output. */ + ma_node_base endpoint; /* Special node that all nodes eventually connect to. Data is read from this node in ma_node_graph_read_pcm_frames(). */ + float* pProcessingCache; /* This will be allocated when processingSizeInFrames is non-zero. This is needed because ma_node_graph_read_pcm_frames() can be called with a variable number of frames, and we may need to do some buffering in situations where the caller requests a frame count that's not a multiple of processingSizeInFrames. */ + ma_uint32 processingCacheFramesRemaining; + ma_uint32 processingSizeInFrames; + + /* Read and written by multiple threads. */ + MA_ATOMIC(4, ma_bool32) isReading; + + /* Modified only by the audio thread. */ + ma_stack* pPreMixStack; +}; + +MA_API ma_result ma_node_graph_init(const ma_node_graph_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_node_graph* pNodeGraph); +MA_API void ma_node_graph_uninit(ma_node_graph* pNodeGraph, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_node* ma_node_graph_get_endpoint(ma_node_graph* pNodeGraph); +MA_API ma_result ma_node_graph_read_pcm_frames(ma_node_graph* pNodeGraph, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_uint32 ma_node_graph_get_channels(const ma_node_graph* pNodeGraph); +MA_API ma_uint64 ma_node_graph_get_time(const ma_node_graph* pNodeGraph); +MA_API ma_result ma_node_graph_set_time(ma_node_graph* pNodeGraph, ma_uint64 globalTime); + + + +/* Data source node. 0 input buses, 1 output bus. Used for reading from a data source. */ +typedef struct +{ + ma_node_config nodeConfig; + ma_data_source* pDataSource; +} ma_data_source_node_config; + +MA_API ma_data_source_node_config ma_data_source_node_config_init(ma_data_source* pDataSource); + + +typedef struct +{ + ma_node_base base; + ma_data_source* pDataSource; +} ma_data_source_node; + +MA_API ma_result ma_data_source_node_init(ma_node_graph* pNodeGraph, const ma_data_source_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source_node* pDataSourceNode); +MA_API void ma_data_source_node_uninit(ma_data_source_node* pDataSourceNode, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_data_source_node_set_looping(ma_data_source_node* pDataSourceNode, ma_bool32 isLooping); +MA_API ma_bool32 ma_data_source_node_is_looping(ma_data_source_node* pDataSourceNode); + + +/* Splitter Node. 1 input, many outputs. Used for splitting/copying a stream so it can be as input into two separate output nodes. */ +typedef struct +{ + ma_node_config nodeConfig; + ma_uint32 channels; + ma_uint32 outputBusCount; +} ma_splitter_node_config; + +MA_API ma_splitter_node_config ma_splitter_node_config_init(ma_uint32 channels); + + +typedef struct +{ + ma_node_base base; +} ma_splitter_node; + +MA_API ma_result ma_splitter_node_init(ma_node_graph* pNodeGraph, const ma_splitter_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_splitter_node* pSplitterNode); +MA_API void ma_splitter_node_uninit(ma_splitter_node* pSplitterNode, const ma_allocation_callbacks* pAllocationCallbacks); + + +/* +Biquad Node +*/ +typedef struct +{ + ma_node_config nodeConfig; + ma_biquad_config biquad; +} ma_biquad_node_config; + +MA_API ma_biquad_node_config ma_biquad_node_config_init(ma_uint32 channels, float b0, float b1, float b2, float a0, float a1, float a2); + + +typedef struct +{ + ma_node_base baseNode; + ma_biquad biquad; +} ma_biquad_node; + +MA_API ma_result ma_biquad_node_init(ma_node_graph* pNodeGraph, const ma_biquad_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_biquad_node* pNode); +MA_API ma_result ma_biquad_node_reinit(const ma_biquad_config* pConfig, ma_biquad_node* pNode); +MA_API void ma_biquad_node_uninit(ma_biquad_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + +/* +Low Pass Filter Node +*/ +typedef struct +{ + ma_node_config nodeConfig; + ma_lpf_config lpf; +} ma_lpf_node_config; + +MA_API ma_lpf_node_config ma_lpf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + + +typedef struct +{ + ma_node_base baseNode; + ma_lpf lpf; +} ma_lpf_node; + +MA_API ma_result ma_lpf_node_init(ma_node_graph* pNodeGraph, const ma_lpf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf_node* pNode); +MA_API ma_result ma_lpf_node_reinit(const ma_lpf_config* pConfig, ma_lpf_node* pNode); +MA_API void ma_lpf_node_uninit(ma_lpf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + +/* +High Pass Filter Node +*/ +typedef struct +{ + ma_node_config nodeConfig; + ma_hpf_config hpf; +} ma_hpf_node_config; + +MA_API ma_hpf_node_config ma_hpf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + + +typedef struct +{ + ma_node_base baseNode; + ma_hpf hpf; +} ma_hpf_node; + +MA_API ma_result ma_hpf_node_init(ma_node_graph* pNodeGraph, const ma_hpf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf_node* pNode); +MA_API ma_result ma_hpf_node_reinit(const ma_hpf_config* pConfig, ma_hpf_node* pNode); +MA_API void ma_hpf_node_uninit(ma_hpf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + +/* +Band Pass Filter Node +*/ +typedef struct +{ + ma_node_config nodeConfig; + ma_bpf_config bpf; +} ma_bpf_node_config; + +MA_API ma_bpf_node_config ma_bpf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order); + + +typedef struct +{ + ma_node_base baseNode; + ma_bpf bpf; +} ma_bpf_node; + +MA_API ma_result ma_bpf_node_init(ma_node_graph* pNodeGraph, const ma_bpf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_bpf_node* pNode); +MA_API ma_result ma_bpf_node_reinit(const ma_bpf_config* pConfig, ma_bpf_node* pNode); +MA_API void ma_bpf_node_uninit(ma_bpf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + +/* +Notching Filter Node +*/ +typedef struct +{ + ma_node_config nodeConfig; + ma_notch_config notch; +} ma_notch_node_config; + +MA_API ma_notch_node_config ma_notch_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double q, double frequency); + + +typedef struct +{ + ma_node_base baseNode; + ma_notch2 notch; +} ma_notch_node; + +MA_API ma_result ma_notch_node_init(ma_node_graph* pNodeGraph, const ma_notch_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_notch_node* pNode); +MA_API ma_result ma_notch_node_reinit(const ma_notch_config* pConfig, ma_notch_node* pNode); +MA_API void ma_notch_node_uninit(ma_notch_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + +/* +Peaking Filter Node +*/ +typedef struct +{ + ma_node_config nodeConfig; + ma_peak_config peak; +} ma_peak_node_config; + +MA_API ma_peak_node_config ma_peak_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency); + + +typedef struct +{ + ma_node_base baseNode; + ma_peak2 peak; +} ma_peak_node; + +MA_API ma_result ma_peak_node_init(ma_node_graph* pNodeGraph, const ma_peak_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_peak_node* pNode); +MA_API ma_result ma_peak_node_reinit(const ma_peak_config* pConfig, ma_peak_node* pNode); +MA_API void ma_peak_node_uninit(ma_peak_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + +/* +Low Shelf Filter Node +*/ +typedef struct +{ + ma_node_config nodeConfig; + ma_loshelf_config loshelf; +} ma_loshelf_node_config; + +MA_API ma_loshelf_node_config ma_loshelf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency); + + +typedef struct +{ + ma_node_base baseNode; + ma_loshelf2 loshelf; +} ma_loshelf_node; + +MA_API ma_result ma_loshelf_node_init(ma_node_graph* pNodeGraph, const ma_loshelf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_loshelf_node* pNode); +MA_API ma_result ma_loshelf_node_reinit(const ma_loshelf_config* pConfig, ma_loshelf_node* pNode); +MA_API void ma_loshelf_node_uninit(ma_loshelf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + +/* +High Shelf Filter Node +*/ +typedef struct +{ + ma_node_config nodeConfig; + ma_hishelf_config hishelf; +} ma_hishelf_node_config; + +MA_API ma_hishelf_node_config ma_hishelf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency); + + +typedef struct +{ + ma_node_base baseNode; + ma_hishelf2 hishelf; +} ma_hishelf_node; + +MA_API ma_result ma_hishelf_node_init(ma_node_graph* pNodeGraph, const ma_hishelf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hishelf_node* pNode); +MA_API ma_result ma_hishelf_node_reinit(const ma_hishelf_config* pConfig, ma_hishelf_node* pNode); +MA_API void ma_hishelf_node_uninit(ma_hishelf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks); + + +typedef struct +{ + ma_node_config nodeConfig; + ma_delay_config delay; +} ma_delay_node_config; + +MA_API ma_delay_node_config ma_delay_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 delayInFrames, float decay); + + +typedef struct +{ + ma_node_base baseNode; + ma_delay delay; +} ma_delay_node; + +MA_API ma_result ma_delay_node_init(ma_node_graph* pNodeGraph, const ma_delay_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_delay_node* pDelayNode); +MA_API void ma_delay_node_uninit(ma_delay_node* pDelayNode, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API void ma_delay_node_set_wet(ma_delay_node* pDelayNode, float value); +MA_API float ma_delay_node_get_wet(const ma_delay_node* pDelayNode); +MA_API void ma_delay_node_set_dry(ma_delay_node* pDelayNode, float value); +MA_API float ma_delay_node_get_dry(const ma_delay_node* pDelayNode); +MA_API void ma_delay_node_set_decay(ma_delay_node* pDelayNode, float value); +MA_API float ma_delay_node_get_decay(const ma_delay_node* pDelayNode); +#endif /* MA_NO_NODE_GRAPH */ + + +/* SECTION: miniaudio_engine.h */ +/************************************************************************************************************************************************************ + +Engine + +************************************************************************************************************************************************************/ +#if !defined(MA_NO_ENGINE) && !defined(MA_NO_NODE_GRAPH) +typedef struct ma_engine ma_engine; +typedef struct ma_sound ma_sound; + + +/* Sound flags. */ +typedef enum +{ + /* Resource manager flags. */ + MA_SOUND_FLAG_STREAM = 0x00000001, /* MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM */ + MA_SOUND_FLAG_DECODE = 0x00000002, /* MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE */ + MA_SOUND_FLAG_ASYNC = 0x00000004, /* MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC */ + MA_SOUND_FLAG_WAIT_INIT = 0x00000008, /* MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT */ + MA_SOUND_FLAG_UNKNOWN_LENGTH = 0x00000010, /* MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_UNKNOWN_LENGTH */ + MA_SOUND_FLAG_LOOPING = 0x00000020, /* MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_LOOPING */ + + /* ma_sound specific flags. */ + MA_SOUND_FLAG_NO_DEFAULT_ATTACHMENT = 0x00001000, /* Do not attach to the endpoint by default. Useful for when setting up nodes in a complex graph system. */ + MA_SOUND_FLAG_NO_PITCH = 0x00002000, /* Disable pitch shifting with ma_sound_set_pitch() and ma_sound_group_set_pitch(). This is an optimization. */ + MA_SOUND_FLAG_NO_SPATIALIZATION = 0x00004000 /* Disable spatialization. */ +} ma_sound_flags; + +#ifndef MA_ENGINE_MAX_LISTENERS +#define MA_ENGINE_MAX_LISTENERS 4 +#endif + +#define MA_LISTENER_INDEX_CLOSEST ((ma_uint8)-1) + +typedef enum +{ + ma_engine_node_type_sound, + ma_engine_node_type_group +} ma_engine_node_type; + +typedef struct +{ + ma_engine* pEngine; + ma_engine_node_type type; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_uint32 sampleRate; /* Only used when the type is set to ma_engine_node_type_sound. */ + ma_uint32 volumeSmoothTimeInPCMFrames; /* The number of frames to smooth over volume changes. Defaults to 0 in which case no smoothing is used. */ + ma_mono_expansion_mode monoExpansionMode; + ma_bool8 isPitchDisabled; /* Pitching can be explicitly disabled with MA_SOUND_FLAG_NO_PITCH to optimize processing. */ + ma_bool8 isSpatializationDisabled; /* Spatialization can be explicitly disabled with MA_SOUND_FLAG_NO_SPATIALIZATION. */ + ma_uint8 pinnedListenerIndex; /* The index of the listener this node should always use for spatialization. If set to MA_LISTENER_INDEX_CLOSEST the engine will use the closest listener. */ +} ma_engine_node_config; + +MA_API ma_engine_node_config ma_engine_node_config_init(ma_engine* pEngine, ma_engine_node_type type, ma_uint32 flags); + + +/* Base node object for both ma_sound and ma_sound_group. */ +typedef struct +{ + ma_node_base baseNode; /* Must be the first member for compatibility with the ma_node API. */ + ma_engine* pEngine; /* A pointer to the engine. Set based on the value from the config. */ + ma_uint32 sampleRate; /* The sample rate of the input data. For sounds backed by a data source, this will be the data source's sample rate. Otherwise it'll be the engine's sample rate. */ + ma_uint32 volumeSmoothTimeInPCMFrames; + ma_mono_expansion_mode monoExpansionMode; + ma_fader fader; + ma_linear_resampler resampler; /* For pitch shift. */ + ma_spatializer spatializer; + ma_panner panner; + ma_gainer volumeGainer; /* This will only be used if volumeSmoothTimeInPCMFrames is > 0. */ + ma_atomic_float volume; /* Defaults to 1. */ + MA_ATOMIC(4, float) pitch; + float oldPitch; /* For determining whether or not the resampler needs to be updated to reflect the new pitch. The resampler will be updated on the mixing thread. */ + float oldDopplerPitch; /* For determining whether or not the resampler needs to be updated to take a new doppler pitch into account. */ + MA_ATOMIC(4, ma_bool32) isPitchDisabled; /* When set to true, pitching will be disabled which will allow the resampler to be bypassed to save some computation. */ + MA_ATOMIC(4, ma_bool32) isSpatializationDisabled; /* Set to false by default. When set to false, will not have spatialisation applied. */ + MA_ATOMIC(4, ma_uint32) pinnedListenerIndex; /* The index of the listener this node should always use for spatialization. If set to MA_LISTENER_INDEX_CLOSEST the engine will use the closest listener. */ + + /* When setting a fade, it's not done immediately in ma_sound_set_fade(). It's deferred to the audio thread which means we need to store the settings here. */ + struct + { + ma_atomic_float volumeBeg; + ma_atomic_float volumeEnd; + ma_atomic_uint64 fadeLengthInFrames; /* <-- Defaults to (~(ma_uint64)0) which is used to indicate that no fade should be applied. */ + ma_atomic_uint64 absoluteGlobalTimeInFrames; /* <-- The time to start the fade. */ + } fadeSettings; + + /* Memory management. */ + ma_bool8 _ownsHeap; + void* _pHeap; +} ma_engine_node; + +MA_API ma_result ma_engine_node_get_heap_size(const ma_engine_node_config* pConfig, size_t* pHeapSizeInBytes); +MA_API ma_result ma_engine_node_init_preallocated(const ma_engine_node_config* pConfig, void* pHeap, ma_engine_node* pEngineNode); +MA_API ma_result ma_engine_node_init(const ma_engine_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_engine_node* pEngineNode); +MA_API void ma_engine_node_uninit(ma_engine_node* pEngineNode, const ma_allocation_callbacks* pAllocationCallbacks); + + +#define MA_SOUND_SOURCE_CHANNEL_COUNT 0xFFFFFFFF + +/* Callback for when a sound reaches the end. */ +typedef void (* ma_sound_end_proc)(void* pUserData, ma_sound* pSound); + +typedef struct +{ + const char* pFilePath; /* Set this to load from the resource manager. */ + const wchar_t* pFilePathW; /* Set this to load from the resource manager. */ + ma_data_source* pDataSource; /* Set this to load from an existing data source. */ + ma_node* pInitialAttachment; /* If set, the sound will be attached to an input of this node. This can be set to a ma_sound. If set to NULL, the sound will be attached directly to the endpoint unless MA_SOUND_FLAG_NO_DEFAULT_ATTACHMENT is set in `flags`. */ + ma_uint32 initialAttachmentInputBusIndex; /* The index of the input bus of pInitialAttachment to attach the sound to. */ + ma_uint32 channelsIn; /* Ignored if using a data source as input (the data source's channel count will be used always). Otherwise, setting to 0 will cause the engine's channel count to be used. */ + ma_uint32 channelsOut; /* Set this to 0 (default) to use the engine's channel count. Set to MA_SOUND_SOURCE_CHANNEL_COUNT to use the data source's channel count (only used if using a data source as input). */ + ma_mono_expansion_mode monoExpansionMode; /* Controls how the mono channel should be expanded to other channels when spatialization is disabled on a sound. */ + ma_uint32 flags; /* A combination of MA_SOUND_FLAG_* flags. */ + ma_uint32 volumeSmoothTimeInPCMFrames; /* The number of frames to smooth over volume changes. Defaults to 0 in which case no smoothing is used. */ + ma_uint64 initialSeekPointInPCMFrames; /* Initializes the sound such that it's seeked to this location by default. */ + ma_uint64 rangeBegInPCMFrames; + ma_uint64 rangeEndInPCMFrames; + ma_uint64 loopPointBegInPCMFrames; + ma_uint64 loopPointEndInPCMFrames; + ma_sound_end_proc endCallback; /* Fired when the sound reaches the end. Will be fired from the audio thread. Do not restart, uninitialize or otherwise change the state of the sound from here. Instead fire an event or set a variable to indicate to a different thread to change the start of the sound. Will not be fired in response to a scheduled stop with ma_sound_set_stop_time_*(). */ + void* pEndCallbackUserData; +#ifndef MA_NO_RESOURCE_MANAGER + ma_resource_manager_pipeline_notifications initNotifications; +#endif + ma_fence* pDoneFence; /* Deprecated. Use initNotifications instead. Released when the resource manager has finished decoding the entire sound. Not used with streams. */ + ma_bool32 isLooping; /* Deprecated. Use the MA_SOUND_FLAG_LOOPING flag in `flags` instead. */ +} ma_sound_config; + +MA_API ma_sound_config ma_sound_config_init(void); /* Deprecated. Will be removed in version 0.12. Use ma_sound_config_2() instead. */ +MA_API ma_sound_config ma_sound_config_init_2(ma_engine* pEngine); /* Will be renamed to ma_sound_config_init() in version 0.12. */ + +struct ma_sound +{ + ma_engine_node engineNode; /* Must be the first member for compatibility with the ma_node API. */ + ma_data_source* pDataSource; + MA_ATOMIC(8, ma_uint64) seekTarget; /* The PCM frame index to seek to in the mixing thread. Set to (~(ma_uint64)0) to not perform any seeking. */ + MA_ATOMIC(4, ma_bool32) atEnd; + ma_sound_end_proc endCallback; + void* pEndCallbackUserData; + ma_bool8 ownsDataSource; + + /* + We're declaring a resource manager data source object here to save us a malloc when loading a + sound via the resource manager, which I *think* will be the most common scenario. + */ +#ifndef MA_NO_RESOURCE_MANAGER + ma_resource_manager_data_source* pResourceManagerDataSource; +#endif +}; + +/* Structure specifically for sounds played with ma_engine_play_sound(). Making this a separate structure to reduce overhead. */ +typedef struct ma_sound_inlined ma_sound_inlined; +struct ma_sound_inlined +{ + ma_sound sound; + ma_sound_inlined* pNext; + ma_sound_inlined* pPrev; +}; + +/* A sound group is just a sound. */ +typedef ma_sound_config ma_sound_group_config; +typedef ma_sound ma_sound_group; + +MA_API ma_sound_group_config ma_sound_group_config_init(void); /* Deprecated. Will be removed in version 0.12. Use ma_sound_config_2() instead. */ +MA_API ma_sound_group_config ma_sound_group_config_init_2(ma_engine* pEngine); /* Will be renamed to ma_sound_config_init() in version 0.12. */ + +typedef void (* ma_engine_process_proc)(void* pUserData, float* pFramesOut, ma_uint64 frameCount); + +typedef struct +{ +#if !defined(MA_NO_RESOURCE_MANAGER) + ma_resource_manager* pResourceManager; /* Can be null in which case a resource manager will be created for you. */ +#endif +#if !defined(MA_NO_DEVICE_IO) + ma_context* pContext; + ma_device* pDevice; /* If set, the caller is responsible for calling ma_engine_data_callback() in the device's data callback. */ + ma_device_id* pPlaybackDeviceID; /* The ID of the playback device to use with the default listener. */ + ma_device_data_proc dataCallback; /* Can be null. Can be used to provide a custom device data callback. */ + ma_device_notification_proc notificationCallback; +#endif + ma_log* pLog; /* When set to NULL, will use the context's log. */ + ma_uint32 listenerCount; /* Must be between 1 and MA_ENGINE_MAX_LISTENERS. */ + ma_uint32 channels; /* The number of channels to use when mixing and spatializing. When set to 0, will use the native channel count of the device. */ + ma_uint32 sampleRate; /* The sample rate. When set to 0 will use the native channel count of the device. */ + ma_uint32 periodSizeInFrames; /* If set to something other than 0, updates will always be exactly this size. The underlying device may be a different size, but from the perspective of the mixer that won't matter.*/ + ma_uint32 periodSizeInMilliseconds; /* Used if periodSizeInFrames is unset. */ + ma_uint32 gainSmoothTimeInFrames; /* The number of frames to interpolate the gain of spatialized sounds across. If set to 0, will use gainSmoothTimeInMilliseconds. */ + ma_uint32 gainSmoothTimeInMilliseconds; /* When set to 0, gainSmoothTimeInFrames will be used. If both are set to 0, a default value will be used. */ + ma_uint32 defaultVolumeSmoothTimeInPCMFrames; /* Defaults to 0. Controls the default amount of smoothing to apply to volume changes to sounds. High values means more smoothing at the expense of high latency (will take longer to reach the new volume). */ + ma_uint32 preMixStackSizeInBytes; /* A stack is used for internal processing in the node graph. This allows you to configure the size of this stack. Smaller values will reduce the maximum depth of your node graph. You should rarely need to modify this. */ + ma_allocation_callbacks allocationCallbacks; + ma_bool32 noAutoStart; /* When set to true, requires an explicit call to ma_engine_start(). This is false by default, meaning the engine will be started automatically in ma_engine_init(). */ + ma_bool32 noDevice; /* When set to true, don't create a default device. ma_engine_read_pcm_frames() can be called manually to read data. */ + ma_mono_expansion_mode monoExpansionMode; /* Controls how the mono channel should be expanded to other channels when spatialization is disabled on a sound. */ + ma_vfs* pResourceManagerVFS; /* A pointer to a pre-allocated VFS object to use with the resource manager. This is ignored if pResourceManager is not NULL. */ + ma_engine_process_proc onProcess; /* Fired at the end of each call to ma_engine_read_pcm_frames(). For engine's that manage their own internal device (the default configuration), this will be fired from the audio thread, and you do not need to call ma_engine_read_pcm_frames() manually in order to trigger this. */ + void* pProcessUserData; /* User data that's passed into onProcess. */ +} ma_engine_config; + +MA_API ma_engine_config ma_engine_config_init(void); + + +struct ma_engine +{ + ma_node_graph nodeGraph; /* An engine is a node graph. It should be able to be plugged into any ma_node_graph API (with a cast) which means this must be the first member of this struct. */ +#if !defined(MA_NO_RESOURCE_MANAGER) + ma_resource_manager* pResourceManager; +#endif +#if !defined(MA_NO_DEVICE_IO) + ma_device* pDevice; /* Optionally set via the config, otherwise allocated by the engine in ma_engine_init(). */ +#endif + ma_log* pLog; + ma_uint32 sampleRate; + ma_uint32 listenerCount; + ma_spatializer_listener listeners[MA_ENGINE_MAX_LISTENERS]; + ma_allocation_callbacks allocationCallbacks; + ma_bool8 ownsResourceManager; + ma_bool8 ownsDevice; + ma_spinlock inlinedSoundLock; /* For synchronizing access to the inlined sound list. */ + ma_sound_inlined* pInlinedSoundHead; /* The first inlined sound. Inlined sounds are tracked in a linked list. */ + MA_ATOMIC(4, ma_uint32) inlinedSoundCount; /* The total number of allocated inlined sound objects. Used for debugging. */ + ma_uint32 gainSmoothTimeInFrames; /* The number of frames to interpolate the gain of spatialized sounds across. */ + ma_uint32 defaultVolumeSmoothTimeInPCMFrames; + ma_mono_expansion_mode monoExpansionMode; + ma_engine_process_proc onProcess; + void* pProcessUserData; +}; + +MA_API ma_result ma_engine_init(const ma_engine_config* pConfig, ma_engine* pEngine); +MA_API void ma_engine_uninit(ma_engine* pEngine); +MA_API ma_result ma_engine_read_pcm_frames(ma_engine* pEngine, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_node_graph* ma_engine_get_node_graph(ma_engine* pEngine); +#if !defined(MA_NO_RESOURCE_MANAGER) +MA_API ma_resource_manager* ma_engine_get_resource_manager(ma_engine* pEngine); +#endif +MA_API ma_device* ma_engine_get_device(ma_engine* pEngine); +MA_API ma_log* ma_engine_get_log(ma_engine* pEngine); +MA_API ma_node* ma_engine_get_endpoint(ma_engine* pEngine); +MA_API ma_uint64 ma_engine_get_time_in_pcm_frames(const ma_engine* pEngine); +MA_API ma_uint64 ma_engine_get_time_in_milliseconds(const ma_engine* pEngine); +MA_API ma_result ma_engine_set_time_in_pcm_frames(ma_engine* pEngine, ma_uint64 globalTime); +MA_API ma_result ma_engine_set_time_in_milliseconds(ma_engine* pEngine, ma_uint64 globalTime); +MA_API ma_uint64 ma_engine_get_time(const ma_engine* pEngine); /* Deprecated. Use ma_engine_get_time_in_pcm_frames(). Will be removed in version 0.12. */ +MA_API ma_result ma_engine_set_time(ma_engine* pEngine, ma_uint64 globalTime); /* Deprecated. Use ma_engine_set_time_in_pcm_frames(). Will be removed in version 0.12. */ +MA_API ma_uint32 ma_engine_get_channels(const ma_engine* pEngine); +MA_API ma_uint32 ma_engine_get_sample_rate(const ma_engine* pEngine); + +MA_API ma_result ma_engine_start(ma_engine* pEngine); +MA_API ma_result ma_engine_stop(ma_engine* pEngine); +MA_API ma_result ma_engine_set_volume(ma_engine* pEngine, float volume); +MA_API float ma_engine_get_volume(ma_engine* pEngine); +MA_API ma_result ma_engine_set_gain_db(ma_engine* pEngine, float gainDB); +MA_API float ma_engine_get_gain_db(ma_engine* pEngine); + +MA_API ma_uint32 ma_engine_get_listener_count(const ma_engine* pEngine); +MA_API ma_uint32 ma_engine_find_closest_listener(const ma_engine* pEngine, float absolutePosX, float absolutePosY, float absolutePosZ); +MA_API void ma_engine_listener_set_position(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z); +MA_API ma_vec3f ma_engine_listener_get_position(const ma_engine* pEngine, ma_uint32 listenerIndex); +MA_API void ma_engine_listener_set_direction(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z); +MA_API ma_vec3f ma_engine_listener_get_direction(const ma_engine* pEngine, ma_uint32 listenerIndex); +MA_API void ma_engine_listener_set_velocity(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z); +MA_API ma_vec3f ma_engine_listener_get_velocity(const ma_engine* pEngine, ma_uint32 listenerIndex); +MA_API void ma_engine_listener_set_cone(ma_engine* pEngine, ma_uint32 listenerIndex, float innerAngleInRadians, float outerAngleInRadians, float outerGain); +MA_API void ma_engine_listener_get_cone(const ma_engine* pEngine, ma_uint32 listenerIndex, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain); +MA_API void ma_engine_listener_set_world_up(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z); +MA_API ma_vec3f ma_engine_listener_get_world_up(const ma_engine* pEngine, ma_uint32 listenerIndex); +MA_API void ma_engine_listener_set_enabled(ma_engine* pEngine, ma_uint32 listenerIndex, ma_bool32 isEnabled); +MA_API ma_bool32 ma_engine_listener_is_enabled(const ma_engine* pEngine, ma_uint32 listenerIndex); + +#ifndef MA_NO_RESOURCE_MANAGER +MA_API ma_result ma_engine_play_sound_ex(ma_engine* pEngine, const char* pFilePath, ma_node* pNode, ma_uint32 nodeInputBusIndex); +MA_API ma_result ma_engine_play_sound(ma_engine* pEngine, const char* pFilePath, ma_sound_group* pGroup); /* Fire and forget. */ +#endif + +#ifndef MA_NO_RESOURCE_MANAGER +MA_API ma_result ma_sound_init_from_file(ma_engine* pEngine, const char* pFilePath, ma_uint32 flags, ma_sound_group* pGroup, ma_fence* pDoneFence, ma_sound* pSound); +MA_API ma_result ma_sound_init_from_file_w(ma_engine* pEngine, const wchar_t* pFilePath, ma_uint32 flags, ma_sound_group* pGroup, ma_fence* pDoneFence, ma_sound* pSound); +MA_API ma_result ma_sound_init_copy(ma_engine* pEngine, const ma_sound* pExistingSound, ma_uint32 flags, ma_sound_group* pGroup, ma_sound* pSound); +#endif +MA_API ma_result ma_sound_init_from_data_source(ma_engine* pEngine, ma_data_source* pDataSource, ma_uint32 flags, ma_sound_group* pGroup, ma_sound* pSound); +MA_API ma_result ma_sound_init_ex(ma_engine* pEngine, const ma_sound_config* pConfig, ma_sound* pSound); +MA_API void ma_sound_uninit(ma_sound* pSound); +MA_API ma_engine* ma_sound_get_engine(const ma_sound* pSound); +MA_API ma_data_source* ma_sound_get_data_source(const ma_sound* pSound); +MA_API ma_result ma_sound_start(ma_sound* pSound); +MA_API ma_result ma_sound_stop(ma_sound* pSound); +MA_API ma_result ma_sound_stop_with_fade_in_pcm_frames(ma_sound* pSound, ma_uint64 fadeLengthInFrames); /* Will overwrite any scheduled stop and fade. */ +MA_API ma_result ma_sound_stop_with_fade_in_milliseconds(ma_sound* pSound, ma_uint64 fadeLengthInFrames); /* Will overwrite any scheduled stop and fade. */ +MA_API void ma_sound_set_volume(ma_sound* pSound, float volume); +MA_API float ma_sound_get_volume(const ma_sound* pSound); +MA_API void ma_sound_set_pan(ma_sound* pSound, float pan); +MA_API float ma_sound_get_pan(const ma_sound* pSound); +MA_API void ma_sound_set_pan_mode(ma_sound* pSound, ma_pan_mode panMode); +MA_API ma_pan_mode ma_sound_get_pan_mode(const ma_sound* pSound); +MA_API void ma_sound_set_pitch(ma_sound* pSound, float pitch); +MA_API float ma_sound_get_pitch(const ma_sound* pSound); +MA_API void ma_sound_set_spatialization_enabled(ma_sound* pSound, ma_bool32 enabled); +MA_API ma_bool32 ma_sound_is_spatialization_enabled(const ma_sound* pSound); +MA_API void ma_sound_set_pinned_listener_index(ma_sound* pSound, ma_uint32 listenerIndex); +MA_API ma_uint32 ma_sound_get_pinned_listener_index(const ma_sound* pSound); +MA_API ma_uint32 ma_sound_get_listener_index(const ma_sound* pSound); +MA_API ma_vec3f ma_sound_get_direction_to_listener(const ma_sound* pSound); +MA_API void ma_sound_set_position(ma_sound* pSound, float x, float y, float z); +MA_API ma_vec3f ma_sound_get_position(const ma_sound* pSound); +MA_API void ma_sound_set_direction(ma_sound* pSound, float x, float y, float z); +MA_API ma_vec3f ma_sound_get_direction(const ma_sound* pSound); +MA_API void ma_sound_set_velocity(ma_sound* pSound, float x, float y, float z); +MA_API ma_vec3f ma_sound_get_velocity(const ma_sound* pSound); +MA_API void ma_sound_set_attenuation_model(ma_sound* pSound, ma_attenuation_model attenuationModel); +MA_API ma_attenuation_model ma_sound_get_attenuation_model(const ma_sound* pSound); +MA_API void ma_sound_set_positioning(ma_sound* pSound, ma_positioning positioning); +MA_API ma_positioning ma_sound_get_positioning(const ma_sound* pSound); +MA_API void ma_sound_set_rolloff(ma_sound* pSound, float rolloff); +MA_API float ma_sound_get_rolloff(const ma_sound* pSound); +MA_API void ma_sound_set_min_gain(ma_sound* pSound, float minGain); +MA_API float ma_sound_get_min_gain(const ma_sound* pSound); +MA_API void ma_sound_set_max_gain(ma_sound* pSound, float maxGain); +MA_API float ma_sound_get_max_gain(const ma_sound* pSound); +MA_API void ma_sound_set_min_distance(ma_sound* pSound, float minDistance); +MA_API float ma_sound_get_min_distance(const ma_sound* pSound); +MA_API void ma_sound_set_max_distance(ma_sound* pSound, float maxDistance); +MA_API float ma_sound_get_max_distance(const ma_sound* pSound); +MA_API void ma_sound_set_cone(ma_sound* pSound, float innerAngleInRadians, float outerAngleInRadians, float outerGain); +MA_API void ma_sound_get_cone(const ma_sound* pSound, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain); +MA_API void ma_sound_set_doppler_factor(ma_sound* pSound, float dopplerFactor); +MA_API float ma_sound_get_doppler_factor(const ma_sound* pSound); +MA_API void ma_sound_set_directional_attenuation_factor(ma_sound* pSound, float directionalAttenuationFactor); +MA_API float ma_sound_get_directional_attenuation_factor(const ma_sound* pSound); +MA_API void ma_sound_set_fade_in_pcm_frames(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInFrames); +MA_API void ma_sound_set_fade_in_milliseconds(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInMilliseconds); +MA_API void ma_sound_set_fade_start_in_pcm_frames(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInFrames, ma_uint64 absoluteGlobalTimeInFrames); +MA_API void ma_sound_set_fade_start_in_milliseconds(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInMilliseconds, ma_uint64 absoluteGlobalTimeInMilliseconds); +MA_API float ma_sound_get_current_fade_volume(const ma_sound* pSound); +MA_API void ma_sound_set_start_time_in_pcm_frames(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInFrames); +MA_API void ma_sound_set_start_time_in_milliseconds(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInMilliseconds); +MA_API void ma_sound_set_stop_time_in_pcm_frames(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInFrames); +MA_API void ma_sound_set_stop_time_in_milliseconds(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInMilliseconds); +MA_API void ma_sound_set_stop_time_with_fade_in_pcm_frames(ma_sound* pSound, ma_uint64 stopAbsoluteGlobalTimeInFrames, ma_uint64 fadeLengthInFrames); +MA_API void ma_sound_set_stop_time_with_fade_in_milliseconds(ma_sound* pSound, ma_uint64 stopAbsoluteGlobalTimeInMilliseconds, ma_uint64 fadeLengthInMilliseconds); +MA_API ma_bool32 ma_sound_is_playing(const ma_sound* pSound); +MA_API ma_uint64 ma_sound_get_time_in_pcm_frames(const ma_sound* pSound); +MA_API ma_uint64 ma_sound_get_time_in_milliseconds(const ma_sound* pSound); +MA_API void ma_sound_set_looping(ma_sound* pSound, ma_bool32 isLooping); +MA_API ma_bool32 ma_sound_is_looping(const ma_sound* pSound); +MA_API ma_bool32 ma_sound_at_end(const ma_sound* pSound); +MA_API ma_result ma_sound_seek_to_pcm_frame(ma_sound* pSound, ma_uint64 frameIndex); /* Just a wrapper around ma_data_source_seek_to_pcm_frame(). */ +MA_API ma_result ma_sound_seek_to_second(ma_sound* pSound, float seekPointInSeconds); /* Abstraction to ma_sound_seek_to_pcm_frame() */ +MA_API ma_result ma_sound_get_data_format(ma_sound* pSound, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_sound_get_cursor_in_pcm_frames(ma_sound* pSound, ma_uint64* pCursor); +MA_API ma_result ma_sound_get_length_in_pcm_frames(ma_sound* pSound, ma_uint64* pLength); +MA_API ma_result ma_sound_get_cursor_in_seconds(ma_sound* pSound, float* pCursor); +MA_API ma_result ma_sound_get_length_in_seconds(ma_sound* pSound, float* pLength); +MA_API ma_result ma_sound_set_end_callback(ma_sound* pSound, ma_sound_end_proc callback, void* pUserData); + +MA_API ma_result ma_sound_group_init(ma_engine* pEngine, ma_uint32 flags, ma_sound_group* pParentGroup, ma_sound_group* pGroup); +MA_API ma_result ma_sound_group_init_ex(ma_engine* pEngine, const ma_sound_group_config* pConfig, ma_sound_group* pGroup); +MA_API void ma_sound_group_uninit(ma_sound_group* pGroup); +MA_API ma_engine* ma_sound_group_get_engine(const ma_sound_group* pGroup); +MA_API ma_result ma_sound_group_start(ma_sound_group* pGroup); +MA_API ma_result ma_sound_group_stop(ma_sound_group* pGroup); +MA_API void ma_sound_group_set_volume(ma_sound_group* pGroup, float volume); +MA_API float ma_sound_group_get_volume(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_pan(ma_sound_group* pGroup, float pan); +MA_API float ma_sound_group_get_pan(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_pan_mode(ma_sound_group* pGroup, ma_pan_mode panMode); +MA_API ma_pan_mode ma_sound_group_get_pan_mode(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_pitch(ma_sound_group* pGroup, float pitch); +MA_API float ma_sound_group_get_pitch(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_spatialization_enabled(ma_sound_group* pGroup, ma_bool32 enabled); +MA_API ma_bool32 ma_sound_group_is_spatialization_enabled(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_pinned_listener_index(ma_sound_group* pGroup, ma_uint32 listenerIndex); +MA_API ma_uint32 ma_sound_group_get_pinned_listener_index(const ma_sound_group* pGroup); +MA_API ma_uint32 ma_sound_group_get_listener_index(const ma_sound_group* pGroup); +MA_API ma_vec3f ma_sound_group_get_direction_to_listener(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_position(ma_sound_group* pGroup, float x, float y, float z); +MA_API ma_vec3f ma_sound_group_get_position(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_direction(ma_sound_group* pGroup, float x, float y, float z); +MA_API ma_vec3f ma_sound_group_get_direction(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_velocity(ma_sound_group* pGroup, float x, float y, float z); +MA_API ma_vec3f ma_sound_group_get_velocity(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_attenuation_model(ma_sound_group* pGroup, ma_attenuation_model attenuationModel); +MA_API ma_attenuation_model ma_sound_group_get_attenuation_model(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_positioning(ma_sound_group* pGroup, ma_positioning positioning); +MA_API ma_positioning ma_sound_group_get_positioning(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_rolloff(ma_sound_group* pGroup, float rolloff); +MA_API float ma_sound_group_get_rolloff(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_min_gain(ma_sound_group* pGroup, float minGain); +MA_API float ma_sound_group_get_min_gain(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_max_gain(ma_sound_group* pGroup, float maxGain); +MA_API float ma_sound_group_get_max_gain(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_min_distance(ma_sound_group* pGroup, float minDistance); +MA_API float ma_sound_group_get_min_distance(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_max_distance(ma_sound_group* pGroup, float maxDistance); +MA_API float ma_sound_group_get_max_distance(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_cone(ma_sound_group* pGroup, float innerAngleInRadians, float outerAngleInRadians, float outerGain); +MA_API void ma_sound_group_get_cone(const ma_sound_group* pGroup, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain); +MA_API void ma_sound_group_set_doppler_factor(ma_sound_group* pGroup, float dopplerFactor); +MA_API float ma_sound_group_get_doppler_factor(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_directional_attenuation_factor(ma_sound_group* pGroup, float directionalAttenuationFactor); +MA_API float ma_sound_group_get_directional_attenuation_factor(const ma_sound_group* pGroup); +MA_API void ma_sound_group_set_fade_in_pcm_frames(ma_sound_group* pGroup, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInFrames); +MA_API void ma_sound_group_set_fade_in_milliseconds(ma_sound_group* pGroup, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInMilliseconds); +MA_API float ma_sound_group_get_current_fade_volume(ma_sound_group* pGroup); +MA_API void ma_sound_group_set_start_time_in_pcm_frames(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInFrames); +MA_API void ma_sound_group_set_start_time_in_milliseconds(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInMilliseconds); +MA_API void ma_sound_group_set_stop_time_in_pcm_frames(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInFrames); +MA_API void ma_sound_group_set_stop_time_in_milliseconds(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInMilliseconds); +MA_API ma_bool32 ma_sound_group_is_playing(const ma_sound_group* pGroup); +MA_API ma_uint64 ma_sound_group_get_time_in_pcm_frames(const ma_sound_group* pGroup); +#endif /* MA_NO_ENGINE */ +/* END SECTION: miniaudio_engine.h */ + +#ifdef __cplusplus +} +#endif +#endif /* miniaudio_h */ + + +/* +This is for preventing greying out of the implementation section. +*/ +#if defined(Q_CREATOR_RUN) || defined(__INTELLISENSE__) || defined(__CDT_PARSER__) +#define MINIAUDIO_IMPLEMENTATION +#endif + +/************************************************************************************************************************************************************ +************************************************************************************************************************************************************* + +IMPLEMENTATION + +************************************************************************************************************************************************************* +************************************************************************************************************************************************************/ +#if defined(MINIAUDIO_IMPLEMENTATION) || defined(MA_IMPLEMENTATION) +#ifndef miniaudio_c +#define miniaudio_c + +#include +#include /* For INT_MAX */ +#include /* sin(), etc. */ +#include /* For malloc(), free(), wcstombs(). */ +#include /* For memset() */ + +#include +#include +#if !defined(_MSC_VER) && !defined(__DMC__) + #include /* For strcasecmp(). */ + #include /* For wcslen(), wcsrtombs() */ +#endif +#ifdef _MSC_VER + #include /* For _controlfp_s constants */ +#endif + +#if defined(MA_WIN32) + #include + + /* + There's a possibility that WIN32_LEAN_AND_MEAN has been defined which will exclude some symbols + such as STGM_READ and CLSCTL_ALL. We need to check these and define them ourselves if they're + unavailable. + */ + #ifndef STGM_READ + #define STGM_READ 0x00000000L + #endif + #ifndef CLSCTX_ALL + #define CLSCTX_ALL 23 + #endif + + /* IUnknown is used by both the WASAPI and DirectSound backends. It easier to just declare our version here. */ + typedef struct ma_IUnknown ma_IUnknown; +#endif + +#if !defined(MA_WIN32) +#include +#include /* select() (used for ma_sleep()). */ +#include +#endif + +#ifdef MA_NX +#include /* For nanosleep() */ +#endif + +#include /* For fstat(), etc. */ + +#ifdef MA_EMSCRIPTEN +#include +#endif + + +/* Architecture Detection */ +#if !defined(MA_64BIT) && !defined(MA_32BIT) +#ifdef _WIN32 +#ifdef _WIN64 +#define MA_64BIT +#else +#define MA_32BIT +#endif +#endif +#endif + +#if !defined(MA_64BIT) && !defined(MA_32BIT) +#ifdef __GNUC__ +#ifdef __LP64__ +#define MA_64BIT +#else +#define MA_32BIT +#endif +#endif +#endif + +#if !defined(MA_64BIT) && !defined(MA_32BIT) +#include +#if INTPTR_MAX == INT64_MAX +#define MA_64BIT +#else +#define MA_32BIT +#endif +#endif + +#if defined(__arm__) || defined(_M_ARM) +#define MA_ARM32 +#endif +#if defined(__arm64) || defined(__arm64__) || defined(__aarch64__) || defined(_M_ARM64) +#define MA_ARM64 +#endif + +#if defined(__x86_64__) || defined(_M_X64) +#define MA_X64 +#elif defined(__i386) || defined(_M_IX86) +#define MA_X86 +#elif defined(MA_ARM32) || defined(MA_ARM64) +#define MA_ARM +#endif + +/* Intrinsics Support */ +#if (defined(MA_X64) || defined(MA_X86)) && !defined(__COSMOPOLITAN__) + #if defined(_MSC_VER) && !defined(__clang__) + /* MSVC. */ + #if _MSC_VER >= 1400 && !defined(MA_NO_SSE2) /* 2005 */ + #define MA_SUPPORT_SSE2 + #endif + /*#if _MSC_VER >= 1600 && !defined(MA_NO_AVX)*/ /* 2010 */ + /* #define MA_SUPPORT_AVX*/ + /*#endif*/ + #if _MSC_VER >= 1700 && !defined(MA_NO_AVX2) /* 2012 */ + #define MA_SUPPORT_AVX2 + #endif + #else + /* Assume GNUC-style. */ + #if defined(__SSE2__) && !defined(MA_NO_SSE2) + #define MA_SUPPORT_SSE2 + #endif + /*#if defined(__AVX__) && !defined(MA_NO_AVX)*/ + /* #define MA_SUPPORT_AVX*/ + /*#endif*/ + #if defined(__AVX2__) && !defined(MA_NO_AVX2) + #define MA_SUPPORT_AVX2 + #endif + #endif + + /* If at this point we still haven't determined compiler support for the intrinsics just fall back to __has_include. */ + #if !defined(__GNUC__) && !defined(__clang__) && defined(__has_include) + #if !defined(MA_SUPPORT_SSE2) && !defined(MA_NO_SSE2) && __has_include() + #define MA_SUPPORT_SSE2 + #endif + /*#if !defined(MA_SUPPORT_AVX) && !defined(MA_NO_AVX) && __has_include()*/ + /* #define MA_SUPPORT_AVX*/ + /*#endif*/ + #if !defined(MA_SUPPORT_AVX2) && !defined(MA_NO_AVX2) && __has_include() + #define MA_SUPPORT_AVX2 + #endif + #endif + + #if defined(MA_SUPPORT_AVX2) || defined(MA_SUPPORT_AVX) + #include + #elif defined(MA_SUPPORT_SSE2) + #include + #endif +#endif + +#if defined(MA_ARM) + #if !defined(MA_NO_NEON) && (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64)) + #define MA_SUPPORT_NEON + #include + #endif +#endif + +/* Begin globally disabled warnings. */ +#if defined(_MSC_VER) + #pragma warning(push) + #pragma warning(disable:4752) /* found Intel(R) Advanced Vector Extensions; consider using /arch:AVX */ + #pragma warning(disable:4049) /* compiler limit : terminating line number emission */ +#endif + +#if defined(MA_X64) || defined(MA_X86) + #if defined(_MSC_VER) && !defined(__clang__) + #if _MSC_VER >= 1400 + #include + static MA_INLINE void ma_cpuid(int info[4], int fid) + { + __cpuid(info, fid); + } + #else + #define MA_NO_CPUID + #endif + + #if _MSC_VER >= 1600 && (defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 160040219) + static MA_INLINE unsigned __int64 ma_xgetbv(int reg) + { + return _xgetbv(reg); + } + #else + #define MA_NO_XGETBV + #endif + #elif (defined(__GNUC__) || defined(__clang__)) && !defined(MA_ANDROID) + static MA_INLINE void ma_cpuid(int info[4], int fid) + { + /* + It looks like the -fPIC option uses the ebx register which GCC complains about. We can work around this by just using a different register, the + specific register of which I'm letting the compiler decide on. The "k" prefix is used to specify a 32-bit register. The {...} syntax is for + supporting different assembly dialects. + + What's basically happening is that we're saving and restoring the ebx register manually. + */ + #if defined(MA_X86) && defined(__PIC__) + __asm__ __volatile__ ( + "xchg{l} {%%}ebx, %k1;" + "cpuid;" + "xchg{l} {%%}ebx, %k1;" + : "=a"(info[0]), "=&r"(info[1]), "=c"(info[2]), "=d"(info[3]) : "a"(fid), "c"(0) + ); + #else + __asm__ __volatile__ ( + "cpuid" : "=a"(info[0]), "=b"(info[1]), "=c"(info[2]), "=d"(info[3]) : "a"(fid), "c"(0) + ); + #endif + } + + static MA_INLINE ma_uint64 ma_xgetbv(int reg) + { + unsigned int hi; + unsigned int lo; + + __asm__ __volatile__ ( + "xgetbv" : "=a"(lo), "=d"(hi) : "c"(reg) + ); + + return ((ma_uint64)hi << 32) | (ma_uint64)lo; + } + #else + #define MA_NO_CPUID + #define MA_NO_XGETBV + #endif +#else + #define MA_NO_CPUID + #define MA_NO_XGETBV +#endif + +static MA_INLINE ma_bool32 ma_has_sse2(void) +{ +#if defined(MA_SUPPORT_SSE2) + #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_SSE2) + #if defined(MA_X64) + return MA_TRUE; /* 64-bit targets always support SSE2. */ + #elif (defined(_M_IX86_FP) && _M_IX86_FP == 2) || defined(__SSE2__) + return MA_TRUE; /* If the compiler is allowed to freely generate SSE2 code we can assume support. */ + #else + #if defined(MA_NO_CPUID) + return MA_FALSE; + #else + int info[4]; + ma_cpuid(info, 1); + return (info[3] & (1 << 26)) != 0; + #endif + #endif + #else + return MA_FALSE; /* SSE2 is only supported on x86 and x64 architectures. */ + #endif +#else + return MA_FALSE; /* No compiler support. */ +#endif +} + +#if 0 +static MA_INLINE ma_bool32 ma_has_avx() +{ +#if defined(MA_SUPPORT_AVX) + #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_AVX) + #if defined(_AVX_) || defined(__AVX__) + return MA_TRUE; /* If the compiler is allowed to freely generate AVX code we can assume support. */ + #else + /* AVX requires both CPU and OS support. */ + #if defined(MA_NO_CPUID) || defined(MA_NO_XGETBV) + return MA_FALSE; + #else + int info[4]; + ma_cpuid(info, 1); + if (((info[2] & (1 << 27)) != 0) && ((info[2] & (1 << 28)) != 0)) { + ma_uint64 xrc = ma_xgetbv(0); + if ((xrc & 0x06) == 0x06) { + return MA_TRUE; + } else { + return MA_FALSE; + } + } else { + return MA_FALSE; + } + #endif + #endif + #else + return MA_FALSE; /* AVX is only supported on x86 and x64 architectures. */ + #endif +#else + return MA_FALSE; /* No compiler support. */ +#endif +} +#endif + +static MA_INLINE ma_bool32 ma_has_avx2(void) +{ +#if defined(MA_SUPPORT_AVX2) + #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_NO_AVX2) + #if defined(_AVX2_) || defined(__AVX2__) + return MA_TRUE; /* If the compiler is allowed to freely generate AVX2 code we can assume support. */ + #else + /* AVX2 requires both CPU and OS support. */ + #if defined(MA_NO_CPUID) || defined(MA_NO_XGETBV) + return MA_FALSE; + #else + int info1[4]; + int info7[4]; + ma_cpuid(info1, 1); + ma_cpuid(info7, 7); + if (((info1[2] & (1 << 27)) != 0) && ((info7[1] & (1 << 5)) != 0)) { + ma_uint64 xrc = ma_xgetbv(0); + if ((xrc & 0x06) == 0x06) { + return MA_TRUE; + } else { + return MA_FALSE; + } + } else { + return MA_FALSE; + } + #endif + #endif + #else + return MA_FALSE; /* AVX2 is only supported on x86 and x64 architectures. */ + #endif +#else + return MA_FALSE; /* No compiler support. */ +#endif +} + +static MA_INLINE ma_bool32 ma_has_neon(void) +{ +#if defined(MA_SUPPORT_NEON) + #if defined(MA_ARM) && !defined(MA_NO_NEON) + #if (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64)) + return MA_TRUE; /* If the compiler is allowed to freely generate NEON code we can assume support. */ + #else + /* TODO: Runtime check. */ + return MA_FALSE; + #endif + #else + return MA_FALSE; /* NEON is only supported on ARM architectures. */ + #endif +#else + return MA_FALSE; /* No compiler support. */ +#endif +} + +#if defined(__has_builtin) + #define MA_COMPILER_HAS_BUILTIN(x) __has_builtin(x) +#else + #define MA_COMPILER_HAS_BUILTIN(x) 0 +#endif + +#ifndef MA_ASSUME + #if MA_COMPILER_HAS_BUILTIN(__builtin_assume) + #define MA_ASSUME(x) __builtin_assume(x) + #elif MA_COMPILER_HAS_BUILTIN(__builtin_unreachable) + #define MA_ASSUME(x) do { if (!(x)) __builtin_unreachable(); } while (0) + #elif defined(_MSC_VER) + #define MA_ASSUME(x) __assume(x) + #else + #define MA_ASSUME(x) (void)(x) + #endif +#endif + +#ifndef MA_RESTRICT + #if defined(__clang__) || defined(__GNUC__) || defined(_MSC_VER) + #define MA_RESTRICT __restrict + #else + #define MA_RESTRICT + #endif +#endif + +#if defined(_MSC_VER) && _MSC_VER >= 1400 + #define MA_HAS_BYTESWAP16_INTRINSIC + #define MA_HAS_BYTESWAP32_INTRINSIC + #define MA_HAS_BYTESWAP64_INTRINSIC +#elif defined(__clang__) + #if MA_COMPILER_HAS_BUILTIN(__builtin_bswap16) + #define MA_HAS_BYTESWAP16_INTRINSIC + #endif + #if MA_COMPILER_HAS_BUILTIN(__builtin_bswap32) + #define MA_HAS_BYTESWAP32_INTRINSIC + #endif + #if MA_COMPILER_HAS_BUILTIN(__builtin_bswap64) + #define MA_HAS_BYTESWAP64_INTRINSIC + #endif +#elif defined(__GNUC__) + #if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define MA_HAS_BYTESWAP32_INTRINSIC + #define MA_HAS_BYTESWAP64_INTRINSIC + #endif + #if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) + #define MA_HAS_BYTESWAP16_INTRINSIC + #endif +#endif + + +static MA_INLINE ma_bool32 ma_is_little_endian(void) +{ +#if defined(MA_X86) || defined(MA_X64) + return MA_TRUE; +#else + int n = 1; + return (*(char*)&n) == 1; +#endif +} + +static MA_INLINE ma_bool32 ma_is_big_endian(void) +{ + return !ma_is_little_endian(); +} + + +static MA_INLINE ma_uint32 ma_swap_endian_uint32(ma_uint32 n) +{ +#ifdef MA_HAS_BYTESWAP32_INTRINSIC + #if defined(_MSC_VER) + return _byteswap_ulong(n); + #elif defined(__GNUC__) || defined(__clang__) + #if defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(MA_64BIT) /* <-- 64-bit inline assembly has not been tested, so disabling for now. */ + /* Inline assembly optimized implementation for ARM. In my testing, GCC does not generate optimized code with __builtin_bswap32(). */ + ma_uint32 r; + __asm__ __volatile__ ( + #if defined(MA_64BIT) + "rev %w[out], %w[in]" : [out]"=r"(r) : [in]"r"(n) /* <-- This is untested. If someone in the community could test this, that would be appreciated! */ + #else + "rev %[out], %[in]" : [out]"=r"(r) : [in]"r"(n) + #endif + ); + return r; + #else + return __builtin_bswap32(n); + #endif + #else + #error "This compiler does not support the byte swap intrinsic." + #endif +#else + return ((n & 0xFF000000) >> 24) | + ((n & 0x00FF0000) >> 8) | + ((n & 0x0000FF00) << 8) | + ((n & 0x000000FF) << 24); +#endif +} + + +#if !defined(MA_EMSCRIPTEN) +#ifdef MA_WIN32 +static void ma_sleep__win32(ma_uint32 milliseconds) +{ + Sleep((DWORD)milliseconds); +} +#endif +#ifdef MA_POSIX +static void ma_sleep__posix(ma_uint32 milliseconds) +{ +#ifdef MA_EMSCRIPTEN + (void)milliseconds; + MA_ASSERT(MA_FALSE); /* The Emscripten build should never sleep. */ +#else + #if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 199309L) || defined(MA_NX) + struct timespec ts; + ts.tv_sec = milliseconds / 1000; + ts.tv_nsec = milliseconds % 1000 * 1000000; + nanosleep(&ts, NULL); + #else + struct timeval tv; + tv.tv_sec = milliseconds / 1000; + tv.tv_usec = milliseconds % 1000 * 1000; + select(0, NULL, NULL, NULL, &tv); + #endif +#endif +} +#endif + +static MA_INLINE void ma_sleep(ma_uint32 milliseconds) +{ +#ifdef MA_WIN32 + ma_sleep__win32(milliseconds); +#endif +#ifdef MA_POSIX + ma_sleep__posix(milliseconds); +#endif +} +#endif + +static MA_INLINE void ma_yield(void) +{ +#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) + /* x86/x64 */ + #if (defined(_MSC_VER) || defined(__WATCOMC__) || defined(__DMC__)) && !defined(__clang__) + #if _MSC_VER >= 1400 + _mm_pause(); + #else + #if defined(__DMC__) + /* Digital Mars does not recognize the PAUSE opcode. Fall back to NOP. */ + __asm nop; + #else + __asm pause; + #endif + #endif + #else + __asm__ __volatile__ ("pause"); + #endif +#elif (defined(__arm__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7) || defined(_M_ARM64) || (defined(_M_ARM) && _M_ARM >= 7) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) + /* ARM */ + #if defined(_MSC_VER) + /* Apparently there is a __yield() intrinsic that's compatible with ARM, but I cannot find documentation for it nor can I find where it's declared. */ + __yield(); + #else + __asm__ __volatile__ ("yield"); /* ARMv6K/ARMv6T2 and above. */ + #endif +#else + /* Unknown or unsupported architecture. No-op. */ +#endif +} + + +#define MA_MM_DENORMALS_ZERO_MASK 0x0040 +#define MA_MM_FLUSH_ZERO_MASK 0x8000 + +static MA_INLINE unsigned int ma_disable_denormals(void) +{ + unsigned int prevState; + + #if defined(_MSC_VER) + { + /* + Older versions of Visual Studio don't support the "safe" versions of _controlfp_s(). I don't + know which version of Visual Studio first added support for _controlfp_s(), but I do know + that VC6 lacks support. _MSC_VER = 1200 is VC6, but if you get compilation errors on older + versions of Visual Studio, let me know and I'll make the necessary adjustment. + */ + #if _MSC_VER <= 1200 + { + prevState = _statusfp(); + _controlfp(prevState | _DN_FLUSH, _MCW_DN); + } + #else + { + unsigned int unused; + _controlfp_s(&prevState, 0, 0); + _controlfp_s(&unused, prevState | _DN_FLUSH, _MCW_DN); + } + #endif + } + #elif defined(MA_X86) || defined(MA_X64) + { + #if defined(__SSE2__) && !(defined(__TINYC__) || defined(__WATCOMC__) || defined(__COSMOPOLITAN__)) /* <-- Add compilers that lack support for _mm_getcsr() and _mm_setcsr() to this list. */ + { + prevState = _mm_getcsr(); + _mm_setcsr(prevState | MA_MM_DENORMALS_ZERO_MASK | MA_MM_FLUSH_ZERO_MASK); + } + #else + { + /* x88/64, but no support for _mm_getcsr()/_mm_setcsr(). May need to fall back to inlined assembly here. */ + prevState = 0; + } + #endif + } + #else + { + /* Unknown or unsupported architecture. No-op. */ + prevState = 0; + } + #endif + + return prevState; +} + +static MA_INLINE void ma_restore_denormals(unsigned int prevState) +{ + #if defined(_MSC_VER) + { + /* Older versions of Visual Studio do not support _controlfp_s(). See ma_disable_denormals(). */ + #if _MSC_VER <= 1200 + { + _controlfp(prevState, _MCW_DN); + } + #else + { + unsigned int unused; + _controlfp_s(&unused, prevState, _MCW_DN); + } + #endif + } + #elif defined(MA_X86) || defined(MA_X64) + { + #if defined(__SSE2__) && !(defined(__TINYC__) || defined(__WATCOMC__) || defined(__COSMOPOLITAN__)) /* <-- Add compilers that lack support for _mm_getcsr() and _mm_setcsr() to this list. */ + { + _mm_setcsr(prevState); + } + #else + { + /* x88/64, but no support for _mm_getcsr()/_mm_setcsr(). May need to fall back to inlined assembly here. */ + (void)prevState; + } + #endif + } + #else + { + /* Unknown or unsupported architecture. No-op. */ + (void)prevState; + } + #endif +} + + +#ifdef MA_ANDROID +#include + +int ma_android_sdk_version() +{ + char sdkVersion[PROP_VALUE_MAX + 1] = {0, }; + if (__system_property_get("ro.build.version.sdk", sdkVersion)) { + return atoi(sdkVersion); + } + + return 0; +} +#endif + + +#ifndef MA_COINIT_VALUE +#define MA_COINIT_VALUE 0 /* 0 = COINIT_MULTITHREADED */ +#endif + + +#ifndef MA_FLT_MAX + #ifdef FLT_MAX + #define MA_FLT_MAX FLT_MAX + #else + #define MA_FLT_MAX 3.402823466e+38F + #endif +#endif + + +#ifndef MA_PI +#define MA_PI 3.14159265358979323846264f +#endif +#ifndef MA_PI_D +#define MA_PI_D 3.14159265358979323846264 +#endif +#ifndef MA_TAU +#define MA_TAU 6.28318530717958647693f +#endif +#ifndef MA_TAU_D +#define MA_TAU_D 6.28318530717958647693 +#endif + + +/* The default format when ma_format_unknown (0) is requested when initializing a device. */ +#ifndef MA_DEFAULT_FORMAT +#define MA_DEFAULT_FORMAT ma_format_f32 +#endif + +/* The default channel count to use when 0 is used when initializing a device. */ +#ifndef MA_DEFAULT_CHANNELS +#define MA_DEFAULT_CHANNELS 2 +#endif + +/* The default sample rate to use when 0 is used when initializing a device. */ +#ifndef MA_DEFAULT_SAMPLE_RATE +#define MA_DEFAULT_SAMPLE_RATE 48000 +#endif + +/* Default periods when none is specified in ma_device_init(). More periods means more work on the CPU. */ +#ifndef MA_DEFAULT_PERIODS +#define MA_DEFAULT_PERIODS 3 +#endif + +/* The default period size in milliseconds for low latency mode. */ +#ifndef MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY +#define MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY 10 +#endif + +/* The default buffer size in milliseconds for conservative mode. */ +#ifndef MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE +#define MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE 100 +#endif + +/* The default LPF filter order for linear resampling. Note that this is clamped to MA_MAX_FILTER_ORDER. */ +#ifndef MA_DEFAULT_RESAMPLER_LPF_ORDER + #if MA_MAX_FILTER_ORDER >= 4 + #define MA_DEFAULT_RESAMPLER_LPF_ORDER 4 + #else + #define MA_DEFAULT_RESAMPLER_LPF_ORDER MA_MAX_FILTER_ORDER + #endif +#endif + + +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wunused-variable" +#endif + +/* Standard sample rates, in order of priority. */ +static ma_uint32 g_maStandardSampleRatePriorities[] = { + (ma_uint32)ma_standard_sample_rate_48000, + (ma_uint32)ma_standard_sample_rate_44100, + + (ma_uint32)ma_standard_sample_rate_32000, + (ma_uint32)ma_standard_sample_rate_24000, + (ma_uint32)ma_standard_sample_rate_22050, + + (ma_uint32)ma_standard_sample_rate_88200, + (ma_uint32)ma_standard_sample_rate_96000, + (ma_uint32)ma_standard_sample_rate_176400, + (ma_uint32)ma_standard_sample_rate_192000, + + (ma_uint32)ma_standard_sample_rate_16000, + (ma_uint32)ma_standard_sample_rate_11025, + (ma_uint32)ma_standard_sample_rate_8000, + + (ma_uint32)ma_standard_sample_rate_352800, + (ma_uint32)ma_standard_sample_rate_384000 +}; + +static MA_INLINE ma_bool32 ma_is_standard_sample_rate(ma_uint32 sampleRate) +{ + ma_uint32 iSampleRate; + + for (iSampleRate = 0; iSampleRate < sizeof(g_maStandardSampleRatePriorities) / sizeof(g_maStandardSampleRatePriorities[0]); iSampleRate += 1) { + if (g_maStandardSampleRatePriorities[iSampleRate] == sampleRate) { + return MA_TRUE; + } + } + + /* Getting here means the sample rate is not supported. */ + return MA_FALSE; +} + + +static ma_format g_maFormatPriorities[] = { + ma_format_s16, /* Most common */ + ma_format_f32, + + /*ma_format_s24_32,*/ /* Clean alignment */ + ma_format_s32, + + ma_format_s24, /* Unclean alignment */ + + ma_format_u8 /* Low quality */ +}; +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) + #pragma GCC diagnostic pop +#endif + + +MA_API void ma_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision) +{ + if (pMajor) { + *pMajor = MA_VERSION_MAJOR; + } + + if (pMinor) { + *pMinor = MA_VERSION_MINOR; + } + + if (pRevision) { + *pRevision = MA_VERSION_REVISION; + } +} + +MA_API const char* ma_version_string(void) +{ + return MA_VERSION_STRING; +} + + +/****************************************************************************** + +Standard Library Stuff + +******************************************************************************/ +#ifndef MA_ASSERT +#define MA_ASSERT(condition) assert(condition) +#endif + +#ifndef MA_MALLOC +#define MA_MALLOC(sz) malloc((sz)) +#endif +#ifndef MA_REALLOC +#define MA_REALLOC(p, sz) realloc((p), (sz)) +#endif +#ifndef MA_FREE +#define MA_FREE(p) free((p)) +#endif + +static MA_INLINE void ma_zero_memory_default(void* p, size_t sz) +{ + if (p == NULL) { + MA_ASSERT(sz == 0); /* If this is triggered there's an error with the calling code. */ + return; + } + + if (sz > 0) { + memset(p, 0, sz); + } +} + + +#ifndef MA_ZERO_MEMORY +#define MA_ZERO_MEMORY(p, sz) ma_zero_memory_default((p), (sz)) +#endif +#ifndef MA_COPY_MEMORY +#define MA_COPY_MEMORY(dst, src, sz) memcpy((dst), (src), (sz)) +#endif +#ifndef MA_MOVE_MEMORY +#define MA_MOVE_MEMORY(dst, src, sz) memmove((dst), (src), (sz)) +#endif + +#define MA_ZERO_OBJECT(p) MA_ZERO_MEMORY((p), sizeof(*(p))) + +#define ma_countof(x) (sizeof(x) / sizeof(x[0])) +#define ma_max(x, y) (((x) > (y)) ? (x) : (y)) +#define ma_min(x, y) (((x) < (y)) ? (x) : (y)) +#define ma_abs(x) (((x) > 0) ? (x) : -(x)) +#define ma_clamp(x, lo, hi) (ma_max(lo, ma_min(x, hi))) +#define ma_offset_ptr(p, offset) (((ma_uint8*)(p)) + (offset)) +#define ma_align(x, a) (((x) + ((a)-1)) & ~((a)-1)) +#define ma_align_64(x) ma_align(x, 8) + +#define ma_buffer_frame_capacity(buffer, channels, format) (sizeof(buffer) / ma_get_bytes_per_sample(format) / (channels)) + +static MA_INLINE double ma_sind(double x) +{ + /* TODO: Implement custom sin(x). */ + return sin(x); +} + +static MA_INLINE double ma_expd(double x) +{ + /* TODO: Implement custom exp(x). */ + return exp(x); +} + +static MA_INLINE double ma_logd(double x) +{ + /* TODO: Implement custom log(x). */ + return log(x); +} + +static MA_INLINE double ma_powd(double x, double y) +{ + /* TODO: Implement custom pow(x, y). */ + return pow(x, y); +} + +static MA_INLINE double ma_sqrtd(double x) +{ + /* TODO: Implement custom sqrt(x). */ + return sqrt(x); +} + + +static MA_INLINE float ma_rsqrtf(float x) +{ + #if defined(MA_SUPPORT_SSE2) && !defined(MA_NO_SSE2) && (defined(MA_X64) || (defined(_M_IX86_FP) && _M_IX86_FP == 2) || defined(__SSE2__)) + { + /* + For SSE we can use RSQRTSS. + + This Stack Overflow post suggests that compilers don't necessarily generate optimal code + when using intrinsics: + + https://web.archive.org/web/20221211012522/https://stackoverflow.com/questions/32687079/getting-fewest-instructions-for-rsqrtss-wrapper + + I'm going to do something similar here, but a bit simpler. + */ + #if defined(__GNUC__) || defined(__clang__) + { + float result; + __asm__ __volatile__("rsqrtss %1, %0" : "=x"(result) : "x"(x)); + return result; + } + #else + { + return _mm_cvtss_f32(_mm_rsqrt_ss(_mm_set_ps1(x))); + } + #endif + } + #else + { + return 1 / (float)ma_sqrtd(x); + } + #endif +} + + +static MA_INLINE float ma_sinf(float x) +{ + return (float)ma_sind((float)x); +} + +static MA_INLINE double ma_cosd(double x) +{ + return ma_sind((MA_PI_D*0.5) - x); +} + +static MA_INLINE float ma_cosf(float x) +{ + return (float)ma_cosd((float)x); +} + +static MA_INLINE double ma_log10d(double x) +{ + return ma_logd(x) * 0.43429448190325182765; +} + +static MA_INLINE float ma_powf(float x, float y) +{ + return (float)ma_powd((double)x, (double)y); +} + +static MA_INLINE float ma_log10f(float x) +{ + return (float)ma_log10d((double)x); +} + + +static MA_INLINE double ma_degrees_to_radians(double degrees) +{ + return degrees * 0.01745329252; +} + +static MA_INLINE double ma_radians_to_degrees(double radians) +{ + return radians * 57.295779512896; +} + +static MA_INLINE float ma_degrees_to_radians_f(float degrees) +{ + return degrees * 0.01745329252f; +} + +static MA_INLINE float ma_radians_to_degrees_f(float radians) +{ + return radians * 57.295779512896f; +} + + +/* +Return Values: + 0: Success + 22: EINVAL + 34: ERANGE + +Not using symbolic constants for errors because I want to avoid #including errno.h + +These are marked as no-inline because of some bad code generation by Clang. None of these functions +are used in any performance-critical code within miniaudio. +*/ +MA_API MA_NO_INLINE int ma_strcpy_s(char* dst, size_t dstSizeInBytes, const char* src) +{ + size_t i; + + if (dst == 0) { + return 22; + } + if (dstSizeInBytes == 0) { + return 34; + } + if (src == 0) { + dst[0] = '\0'; + return 22; + } + + for (i = 0; i < dstSizeInBytes && src[i] != '\0'; ++i) { + dst[i] = src[i]; + } + + if (i < dstSizeInBytes) { + dst[i] = '\0'; + return 0; + } + + dst[0] = '\0'; + return 34; +} + +MA_API MA_NO_INLINE int ma_wcscpy_s(wchar_t* dst, size_t dstCap, const wchar_t* src) +{ + size_t i; + + if (dst == 0) { + return 22; + } + if (dstCap == 0) { + return 34; + } + if (src == 0) { + dst[0] = '\0'; + return 22; + } + + for (i = 0; i < dstCap && src[i] != '\0'; ++i) { + dst[i] = src[i]; + } + + if (i < dstCap) { + dst[i] = '\0'; + return 0; + } + + dst[0] = '\0'; + return 34; +} + + +MA_API MA_NO_INLINE int ma_strncpy_s(char* dst, size_t dstSizeInBytes, const char* src, size_t count) +{ + size_t maxcount; + size_t i; + + if (dst == 0) { + return 22; + } + if (dstSizeInBytes == 0) { + return 34; + } + if (src == 0) { + dst[0] = '\0'; + return 22; + } + + maxcount = count; + if (count == ((size_t)-1) || count >= dstSizeInBytes) { /* -1 = _TRUNCATE */ + maxcount = dstSizeInBytes - 1; + } + + for (i = 0; i < maxcount && src[i] != '\0'; ++i) { + dst[i] = src[i]; + } + + if (src[i] == '\0' || i == count || count == ((size_t)-1)) { + dst[i] = '\0'; + return 0; + } + + dst[0] = '\0'; + return 34; +} + +MA_API MA_NO_INLINE int ma_strcat_s(char* dst, size_t dstSizeInBytes, const char* src) +{ + char* dstorig; + + if (dst == 0) { + return 22; + } + if (dstSizeInBytes == 0) { + return 34; + } + if (src == 0) { + dst[0] = '\0'; + return 22; + } + + dstorig = dst; + + while (dstSizeInBytes > 0 && dst[0] != '\0') { + dst += 1; + dstSizeInBytes -= 1; + } + + if (dstSizeInBytes == 0) { + return 22; /* Unterminated. */ + } + + + while (dstSizeInBytes > 0 && src[0] != '\0') { + *dst++ = *src++; + dstSizeInBytes -= 1; + } + + if (dstSizeInBytes > 0) { + dst[0] = '\0'; + } else { + dstorig[0] = '\0'; + return 34; + } + + return 0; +} + +MA_API MA_NO_INLINE int ma_strncat_s(char* dst, size_t dstSizeInBytes, const char* src, size_t count) +{ + char* dstorig; + + if (dst == 0) { + return 22; + } + if (dstSizeInBytes == 0) { + return 34; + } + if (src == 0) { + return 22; + } + + dstorig = dst; + + while (dstSizeInBytes > 0 && dst[0] != '\0') { + dst += 1; + dstSizeInBytes -= 1; + } + + if (dstSizeInBytes == 0) { + return 22; /* Unterminated. */ + } + + + if (count == ((size_t)-1)) { /* _TRUNCATE */ + count = dstSizeInBytes - 1; + } + + while (dstSizeInBytes > 0 && src[0] != '\0' && count > 0) { + *dst++ = *src++; + dstSizeInBytes -= 1; + count -= 1; + } + + if (dstSizeInBytes > 0) { + dst[0] = '\0'; + } else { + dstorig[0] = '\0'; + return 34; + } + + return 0; +} + +MA_API MA_NO_INLINE int ma_itoa_s(int value, char* dst, size_t dstSizeInBytes, int radix) +{ + int sign; + unsigned int valueU; + char* dstEnd; + + if (dst == NULL || dstSizeInBytes == 0) { + return 22; + } + if (radix < 2 || radix > 36) { + dst[0] = '\0'; + return 22; + } + + sign = (value < 0 && radix == 10) ? -1 : 1; /* The negative sign is only used when the base is 10. */ + + if (value < 0) { + valueU = -value; + } else { + valueU = value; + } + + dstEnd = dst; + do + { + int remainder = valueU % radix; + if (remainder > 9) { + *dstEnd = (char)((remainder - 10) + 'a'); + } else { + *dstEnd = (char)(remainder + '0'); + } + + dstEnd += 1; + dstSizeInBytes -= 1; + valueU /= radix; + } while (dstSizeInBytes > 0 && valueU > 0); + + if (dstSizeInBytes == 0) { + dst[0] = '\0'; + return 22; /* Ran out of room in the output buffer. */ + } + + if (sign < 0) { + *dstEnd++ = '-'; + dstSizeInBytes -= 1; + } + + if (dstSizeInBytes == 0) { + dst[0] = '\0'; + return 22; /* Ran out of room in the output buffer. */ + } + + *dstEnd = '\0'; + + + /* At this point the string will be reversed. */ + dstEnd -= 1; + while (dst < dstEnd) { + char temp = *dst; + *dst = *dstEnd; + *dstEnd = temp; + + dst += 1; + dstEnd -= 1; + } + + return 0; +} + +MA_API MA_NO_INLINE int ma_strcmp(const char* str1, const char* str2) +{ + if (str1 == str2) return 0; + + /* These checks differ from the standard implementation. It's not important, but I prefer it just for sanity. */ + if (str1 == NULL) return -1; + if (str2 == NULL) return 1; + + for (;;) { + if (str1[0] == '\0') { + break; + } + if (str1[0] != str2[0]) { + break; + } + + str1 += 1; + str2 += 1; + } + + return ((unsigned char*)str1)[0] - ((unsigned char*)str2)[0]; +} + +MA_API MA_NO_INLINE int ma_strappend(char* dst, size_t dstSize, const char* srcA, const char* srcB) +{ + int result; + + result = ma_strncpy_s(dst, dstSize, srcA, (size_t)-1); + if (result != 0) { + return result; + } + + result = ma_strncat_s(dst, dstSize, srcB, (size_t)-1); + if (result != 0) { + return result; + } + + return result; +} + +MA_API MA_NO_INLINE char* ma_copy_string(const char* src, const ma_allocation_callbacks* pAllocationCallbacks) +{ + size_t sz; + char* dst; + + if (src == NULL) { + return NULL; + } + + sz = strlen(src)+1; + dst = (char*)ma_malloc(sz, pAllocationCallbacks); + if (dst == NULL) { + return NULL; + } + + ma_strcpy_s(dst, sz, src); + + return dst; +} + +MA_API MA_NO_INLINE wchar_t* ma_copy_string_w(const wchar_t* src, const ma_allocation_callbacks* pAllocationCallbacks) +{ + size_t sz = wcslen(src)+1; + wchar_t* dst = (wchar_t*)ma_malloc(sz * sizeof(*dst), pAllocationCallbacks); + if (dst == NULL) { + return NULL; + } + + ma_wcscpy_s(dst, sz, src); + + return dst; +} + + + +#include +static ma_result ma_result_from_errno(int e) +{ + if (e == 0) { + return MA_SUCCESS; + } +#ifdef EPERM + else if (e == EPERM) { return MA_INVALID_OPERATION; } +#endif +#ifdef ENOENT + else if (e == ENOENT) { return MA_DOES_NOT_EXIST; } +#endif +#ifdef ESRCH + else if (e == ESRCH) { return MA_DOES_NOT_EXIST; } +#endif +#ifdef EINTR + else if (e == EINTR) { return MA_INTERRUPT; } +#endif +#ifdef EIO + else if (e == EIO) { return MA_IO_ERROR; } +#endif +#ifdef ENXIO + else if (e == ENXIO) { return MA_DOES_NOT_EXIST; } +#endif +#ifdef E2BIG + else if (e == E2BIG) { return MA_INVALID_ARGS; } +#endif +#ifdef ENOEXEC + else if (e == ENOEXEC) { return MA_INVALID_FILE; } +#endif +#ifdef EBADF + else if (e == EBADF) { return MA_INVALID_FILE; } +#endif +#ifdef ECHILD + else if (e == ECHILD) { return MA_ERROR; } +#endif +#ifdef EAGAIN + else if (e == EAGAIN) { return MA_UNAVAILABLE; } +#endif +#ifdef ENOMEM + else if (e == ENOMEM) { return MA_OUT_OF_MEMORY; } +#endif +#ifdef EACCES + else if (e == EACCES) { return MA_ACCESS_DENIED; } +#endif +#ifdef EFAULT + else if (e == EFAULT) { return MA_BAD_ADDRESS; } +#endif +#ifdef ENOTBLK + else if (e == ENOTBLK) { return MA_ERROR; } +#endif +#ifdef EBUSY + else if (e == EBUSY) { return MA_BUSY; } +#endif +#ifdef EEXIST + else if (e == EEXIST) { return MA_ALREADY_EXISTS; } +#endif +#ifdef EXDEV + else if (e == EXDEV) { return MA_ERROR; } +#endif +#ifdef ENODEV + else if (e == ENODEV) { return MA_DOES_NOT_EXIST; } +#endif +#ifdef ENOTDIR + else if (e == ENOTDIR) { return MA_NOT_DIRECTORY; } +#endif +#ifdef EISDIR + else if (e == EISDIR) { return MA_IS_DIRECTORY; } +#endif +#ifdef EINVAL + else if (e == EINVAL) { return MA_INVALID_ARGS; } +#endif +#ifdef ENFILE + else if (e == ENFILE) { return MA_TOO_MANY_OPEN_FILES; } +#endif +#ifdef EMFILE + else if (e == EMFILE) { return MA_TOO_MANY_OPEN_FILES; } +#endif +#ifdef ENOTTY + else if (e == ENOTTY) { return MA_INVALID_OPERATION; } +#endif +#ifdef ETXTBSY + else if (e == ETXTBSY) { return MA_BUSY; } +#endif +#ifdef EFBIG + else if (e == EFBIG) { return MA_TOO_BIG; } +#endif +#ifdef ENOSPC + else if (e == ENOSPC) { return MA_NO_SPACE; } +#endif +#ifdef ESPIPE + else if (e == ESPIPE) { return MA_BAD_SEEK; } +#endif +#ifdef EROFS + else if (e == EROFS) { return MA_ACCESS_DENIED; } +#endif +#ifdef EMLINK + else if (e == EMLINK) { return MA_TOO_MANY_LINKS; } +#endif +#ifdef EPIPE + else if (e == EPIPE) { return MA_BAD_PIPE; } +#endif +#ifdef EDOM + else if (e == EDOM) { return MA_OUT_OF_RANGE; } +#endif +#ifdef ERANGE + else if (e == ERANGE) { return MA_OUT_OF_RANGE; } +#endif +#ifdef EDEADLK + else if (e == EDEADLK) { return MA_DEADLOCK; } +#endif +#ifdef ENAMETOOLONG + else if (e == ENAMETOOLONG) { return MA_PATH_TOO_LONG; } +#endif +#ifdef ENOLCK + else if (e == ENOLCK) { return MA_ERROR; } +#endif +#ifdef ENOSYS + else if (e == ENOSYS) { return MA_NOT_IMPLEMENTED; } +#endif +#ifdef ENOTEMPTY + else if (e == ENOTEMPTY) { return MA_DIRECTORY_NOT_EMPTY; } +#endif +#ifdef ELOOP + else if (e == ELOOP) { return MA_TOO_MANY_LINKS; } +#endif +#ifdef ENOMSG + else if (e == ENOMSG) { return MA_NO_MESSAGE; } +#endif +#ifdef EIDRM + else if (e == EIDRM) { return MA_ERROR; } +#endif +#ifdef ECHRNG + else if (e == ECHRNG) { return MA_ERROR; } +#endif +#ifdef EL2NSYNC + else if (e == EL2NSYNC) { return MA_ERROR; } +#endif +#ifdef EL3HLT + else if (e == EL3HLT) { return MA_ERROR; } +#endif +#ifdef EL3RST + else if (e == EL3RST) { return MA_ERROR; } +#endif +#ifdef ELNRNG + else if (e == ELNRNG) { return MA_OUT_OF_RANGE; } +#endif +#ifdef EUNATCH + else if (e == EUNATCH) { return MA_ERROR; } +#endif +#ifdef ENOCSI + else if (e == ENOCSI) { return MA_ERROR; } +#endif +#ifdef EL2HLT + else if (e == EL2HLT) { return MA_ERROR; } +#endif +#ifdef EBADE + else if (e == EBADE) { return MA_ERROR; } +#endif +#ifdef EBADR + else if (e == EBADR) { return MA_ERROR; } +#endif +#ifdef EXFULL + else if (e == EXFULL) { return MA_ERROR; } +#endif +#ifdef ENOANO + else if (e == ENOANO) { return MA_ERROR; } +#endif +#ifdef EBADRQC + else if (e == EBADRQC) { return MA_ERROR; } +#endif +#ifdef EBADSLT + else if (e == EBADSLT) { return MA_ERROR; } +#endif +#ifdef EBFONT + else if (e == EBFONT) { return MA_INVALID_FILE; } +#endif +#ifdef ENOSTR + else if (e == ENOSTR) { return MA_ERROR; } +#endif +#ifdef ENODATA + else if (e == ENODATA) { return MA_NO_DATA_AVAILABLE; } +#endif +#ifdef ETIME + else if (e == ETIME) { return MA_TIMEOUT; } +#endif +#ifdef ENOSR + else if (e == ENOSR) { return MA_NO_DATA_AVAILABLE; } +#endif +#ifdef ENONET + else if (e == ENONET) { return MA_NO_NETWORK; } +#endif +#ifdef ENOPKG + else if (e == ENOPKG) { return MA_ERROR; } +#endif +#ifdef EREMOTE + else if (e == EREMOTE) { return MA_ERROR; } +#endif +#ifdef ENOLINK + else if (e == ENOLINK) { return MA_ERROR; } +#endif +#ifdef EADV + else if (e == EADV) { return MA_ERROR; } +#endif +#ifdef ESRMNT + else if (e == ESRMNT) { return MA_ERROR; } +#endif +#ifdef ECOMM + else if (e == ECOMM) { return MA_ERROR; } +#endif +#ifdef EPROTO + else if (e == EPROTO) { return MA_ERROR; } +#endif +#ifdef EMULTIHOP + else if (e == EMULTIHOP) { return MA_ERROR; } +#endif +#ifdef EDOTDOT + else if (e == EDOTDOT) { return MA_ERROR; } +#endif +#ifdef EBADMSG + else if (e == EBADMSG) { return MA_BAD_MESSAGE; } +#endif +#ifdef EOVERFLOW + else if (e == EOVERFLOW) { return MA_TOO_BIG; } +#endif +#ifdef ENOTUNIQ + else if (e == ENOTUNIQ) { return MA_NOT_UNIQUE; } +#endif +#ifdef EBADFD + else if (e == EBADFD) { return MA_ERROR; } +#endif +#ifdef EREMCHG + else if (e == EREMCHG) { return MA_ERROR; } +#endif +#ifdef ELIBACC + else if (e == ELIBACC) { return MA_ACCESS_DENIED; } +#endif +#ifdef ELIBBAD + else if (e == ELIBBAD) { return MA_INVALID_FILE; } +#endif +#ifdef ELIBSCN + else if (e == ELIBSCN) { return MA_INVALID_FILE; } +#endif +#ifdef ELIBMAX + else if (e == ELIBMAX) { return MA_ERROR; } +#endif +#ifdef ELIBEXEC + else if (e == ELIBEXEC) { return MA_ERROR; } +#endif +#ifdef EILSEQ + else if (e == EILSEQ) { return MA_INVALID_DATA; } +#endif +#ifdef ERESTART + else if (e == ERESTART) { return MA_ERROR; } +#endif +#ifdef ESTRPIPE + else if (e == ESTRPIPE) { return MA_ERROR; } +#endif +#ifdef EUSERS + else if (e == EUSERS) { return MA_ERROR; } +#endif +#ifdef ENOTSOCK + else if (e == ENOTSOCK) { return MA_NOT_SOCKET; } +#endif +#ifdef EDESTADDRREQ + else if (e == EDESTADDRREQ) { return MA_NO_ADDRESS; } +#endif +#ifdef EMSGSIZE + else if (e == EMSGSIZE) { return MA_TOO_BIG; } +#endif +#ifdef EPROTOTYPE + else if (e == EPROTOTYPE) { return MA_BAD_PROTOCOL; } +#endif +#ifdef ENOPROTOOPT + else if (e == ENOPROTOOPT) { return MA_PROTOCOL_UNAVAILABLE; } +#endif +#ifdef EPROTONOSUPPORT + else if (e == EPROTONOSUPPORT) { return MA_PROTOCOL_NOT_SUPPORTED; } +#endif +#ifdef ESOCKTNOSUPPORT + else if (e == ESOCKTNOSUPPORT) { return MA_SOCKET_NOT_SUPPORTED; } +#endif +#ifdef EOPNOTSUPP + else if (e == EOPNOTSUPP) { return MA_INVALID_OPERATION; } +#endif +#ifdef EPFNOSUPPORT + else if (e == EPFNOSUPPORT) { return MA_PROTOCOL_FAMILY_NOT_SUPPORTED; } +#endif +#ifdef EAFNOSUPPORT + else if (e == EAFNOSUPPORT) { return MA_ADDRESS_FAMILY_NOT_SUPPORTED; } +#endif +#ifdef EADDRINUSE + else if (e == EADDRINUSE) { return MA_ALREADY_IN_USE; } +#endif +#ifdef EADDRNOTAVAIL + else if (e == EADDRNOTAVAIL) { return MA_ERROR; } +#endif +#ifdef ENETDOWN + else if (e == ENETDOWN) { return MA_NO_NETWORK; } +#endif +#ifdef ENETUNREACH + else if (e == ENETUNREACH) { return MA_NO_NETWORK; } +#endif +#ifdef ENETRESET + else if (e == ENETRESET) { return MA_NO_NETWORK; } +#endif +#ifdef ECONNABORTED + else if (e == ECONNABORTED) { return MA_NO_NETWORK; } +#endif +#ifdef ECONNRESET + else if (e == ECONNRESET) { return MA_CONNECTION_RESET; } +#endif +#ifdef ENOBUFS + else if (e == ENOBUFS) { return MA_NO_SPACE; } +#endif +#ifdef EISCONN + else if (e == EISCONN) { return MA_ALREADY_CONNECTED; } +#endif +#ifdef ENOTCONN + else if (e == ENOTCONN) { return MA_NOT_CONNECTED; } +#endif +#ifdef ESHUTDOWN + else if (e == ESHUTDOWN) { return MA_ERROR; } +#endif +#ifdef ETOOMANYREFS + else if (e == ETOOMANYREFS) { return MA_ERROR; } +#endif +#ifdef ETIMEDOUT + else if (e == ETIMEDOUT) { return MA_TIMEOUT; } +#endif +#ifdef ECONNREFUSED + else if (e == ECONNREFUSED) { return MA_CONNECTION_REFUSED; } +#endif +#ifdef EHOSTDOWN + else if (e == EHOSTDOWN) { return MA_NO_HOST; } +#endif +#ifdef EHOSTUNREACH + else if (e == EHOSTUNREACH) { return MA_NO_HOST; } +#endif +#ifdef EALREADY + else if (e == EALREADY) { return MA_IN_PROGRESS; } +#endif +#ifdef EINPROGRESS + else if (e == EINPROGRESS) { return MA_IN_PROGRESS; } +#endif +#ifdef ESTALE + else if (e == ESTALE) { return MA_INVALID_FILE; } +#endif +#ifdef EUCLEAN + else if (e == EUCLEAN) { return MA_ERROR; } +#endif +#ifdef ENOTNAM + else if (e == ENOTNAM) { return MA_ERROR; } +#endif +#ifdef ENAVAIL + else if (e == ENAVAIL) { return MA_ERROR; } +#endif +#ifdef EISNAM + else if (e == EISNAM) { return MA_ERROR; } +#endif +#ifdef EREMOTEIO + else if (e == EREMOTEIO) { return MA_IO_ERROR; } +#endif +#ifdef EDQUOT + else if (e == EDQUOT) { return MA_NO_SPACE; } +#endif +#ifdef ENOMEDIUM + else if (e == ENOMEDIUM) { return MA_DOES_NOT_EXIST; } +#endif +#ifdef EMEDIUMTYPE + else if (e == EMEDIUMTYPE) { return MA_ERROR; } +#endif +#ifdef ECANCELED + else if (e == ECANCELED) { return MA_CANCELLED; } +#endif +#ifdef ENOKEY + else if (e == ENOKEY) { return MA_ERROR; } +#endif +#ifdef EKEYEXPIRED + else if (e == EKEYEXPIRED) { return MA_ERROR; } +#endif +#ifdef EKEYREVOKED + else if (e == EKEYREVOKED) { return MA_ERROR; } +#endif +#ifdef EKEYREJECTED + else if (e == EKEYREJECTED) { return MA_ERROR; } +#endif +#ifdef EOWNERDEAD + else if (e == EOWNERDEAD) { return MA_ERROR; } +#endif +#ifdef ENOTRECOVERABLE + else if (e == ENOTRECOVERABLE) { return MA_ERROR; } +#endif +#ifdef ERFKILL + else if (e == ERFKILL) { return MA_ERROR; } +#endif +#ifdef EHWPOISON + else if (e == EHWPOISON) { return MA_ERROR; } +#endif + else { + return MA_ERROR; + } +} + +MA_API ma_result ma_fopen(FILE** ppFile, const char* pFilePath, const char* pOpenMode) +{ +#if defined(_MSC_VER) && _MSC_VER >= 1400 + errno_t err; +#endif + + if (ppFile != NULL) { + *ppFile = NULL; /* Safety. */ + } + + if (pFilePath == NULL || pOpenMode == NULL || ppFile == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(_MSC_VER) && _MSC_VER >= 1400 + err = fopen_s(ppFile, pFilePath, pOpenMode); + if (err != 0) { + return ma_result_from_errno(err); + } +#else +#if defined(_WIN32) || defined(__APPLE__) + *ppFile = fopen(pFilePath, pOpenMode); +#else + #if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS == 64 && defined(_LARGEFILE64_SOURCE) + *ppFile = fopen64(pFilePath, pOpenMode); + #else + *ppFile = fopen(pFilePath, pOpenMode); + #endif +#endif + if (*ppFile == NULL) { + ma_result result = ma_result_from_errno(errno); + if (result == MA_SUCCESS) { + result = MA_ERROR; /* Just a safety check to make sure we never ever return success when pFile == NULL. */ + } + + return result; + } +#endif + + return MA_SUCCESS; +} + + + +/* +_wfopen() isn't always available in all compilation environments. + + * Windows only. + * MSVC seems to support it universally as far back as VC6 from what I can tell (haven't checked further back). + * MinGW-64 (both 32- and 64-bit) seems to support it. + * MinGW wraps it in !defined(__STRICT_ANSI__). + * OpenWatcom wraps it in !defined(_NO_EXT_KEYS). + +This can be reviewed as compatibility issues arise. The preference is to use _wfopen_s() and _wfopen() as opposed to the wcsrtombs() +fallback, so if you notice your compiler not detecting this properly I'm happy to look at adding support. +*/ +#if defined(_WIN32) + #if defined(_MSC_VER) || defined(__MINGW64__) || (!defined(__STRICT_ANSI__) && !defined(_NO_EXT_KEYS)) + #define MA_HAS_WFOPEN + #endif +#endif + +MA_API ma_result ma_wfopen(FILE** ppFile, const wchar_t* pFilePath, const wchar_t* pOpenMode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (ppFile != NULL) { + *ppFile = NULL; /* Safety. */ + } + + if (pFilePath == NULL || pOpenMode == NULL || ppFile == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_HAS_WFOPEN) + { + /* Use _wfopen() on Windows. */ + #if defined(_MSC_VER) && _MSC_VER >= 1400 + errno_t err = _wfopen_s(ppFile, pFilePath, pOpenMode); + if (err != 0) { + return ma_result_from_errno(err); + } + #else + *ppFile = _wfopen(pFilePath, pOpenMode); + if (*ppFile == NULL) { + return ma_result_from_errno(errno); + } + #endif + (void)pAllocationCallbacks; + } +#else + /* + Use fopen() on anything other than Windows. Requires a conversion. This is annoying because fopen() is locale specific. The only real way I can + think of to do this is with wcsrtombs(). Note that wcstombs() is apparently not thread-safe because it uses a static global mbstate_t object for + maintaining state. I've checked this with -std=c89 and it works, but if somebody get's a compiler error I'll look into improving compatibility. + */ + { + mbstate_t mbs; + size_t lenMB; + const wchar_t* pFilePathTemp = pFilePath; + char* pFilePathMB = NULL; + char pOpenModeMB[32] = {0}; + + /* Get the length first. */ + MA_ZERO_OBJECT(&mbs); + lenMB = wcsrtombs(NULL, &pFilePathTemp, 0, &mbs); + if (lenMB == (size_t)-1) { + return ma_result_from_errno(errno); + } + + pFilePathMB = (char*)ma_malloc(lenMB + 1, pAllocationCallbacks); + if (pFilePathMB == NULL) { + return MA_OUT_OF_MEMORY; + } + + pFilePathTemp = pFilePath; + MA_ZERO_OBJECT(&mbs); + wcsrtombs(pFilePathMB, &pFilePathTemp, lenMB + 1, &mbs); + + /* The open mode should always consist of ASCII characters so we should be able to do a trivial conversion. */ + { + size_t i = 0; + for (;;) { + if (pOpenMode[i] == 0) { + pOpenModeMB[i] = '\0'; + break; + } + + pOpenModeMB[i] = (char)pOpenMode[i]; + i += 1; + } + } + + *ppFile = fopen(pFilePathMB, pOpenModeMB); + + ma_free(pFilePathMB, pAllocationCallbacks); + } + + if (*ppFile == NULL) { + return MA_ERROR; + } +#endif + + return MA_SUCCESS; +} + + + +static MA_INLINE void ma_copy_memory_64(void* dst, const void* src, ma_uint64 sizeInBytes) +{ +#if 0xFFFFFFFFFFFFFFFF <= MA_SIZE_MAX + MA_COPY_MEMORY(dst, src, (size_t)sizeInBytes); +#else + while (sizeInBytes > 0) { + ma_uint64 bytesToCopyNow = sizeInBytes; + if (bytesToCopyNow > MA_SIZE_MAX) { + bytesToCopyNow = MA_SIZE_MAX; + } + + MA_COPY_MEMORY(dst, src, (size_t)bytesToCopyNow); /* Safe cast to size_t. */ + + sizeInBytes -= bytesToCopyNow; + dst = ( void*)(( ma_uint8*)dst + bytesToCopyNow); + src = (const void*)((const ma_uint8*)src + bytesToCopyNow); + } +#endif +} + +static MA_INLINE void ma_zero_memory_64(void* dst, ma_uint64 sizeInBytes) +{ +#if 0xFFFFFFFFFFFFFFFF <= MA_SIZE_MAX + MA_ZERO_MEMORY(dst, (size_t)sizeInBytes); +#else + while (sizeInBytes > 0) { + ma_uint64 bytesToZeroNow = sizeInBytes; + if (bytesToZeroNow > MA_SIZE_MAX) { + bytesToZeroNow = MA_SIZE_MAX; + } + + MA_ZERO_MEMORY(dst, (size_t)bytesToZeroNow); /* Safe cast to size_t. */ + + sizeInBytes -= bytesToZeroNow; + dst = (void*)((ma_uint8*)dst + bytesToZeroNow); + } +#endif +} + + +/* Thanks to good old Bit Twiddling Hacks for this one: http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */ +static MA_INLINE unsigned int ma_next_power_of_2(unsigned int x) +{ + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x++; + + return x; +} + +static MA_INLINE unsigned int ma_prev_power_of_2(unsigned int x) +{ + return ma_next_power_of_2(x) >> 1; +} + +static MA_INLINE unsigned int ma_round_to_power_of_2(unsigned int x) +{ + unsigned int prev = ma_prev_power_of_2(x); + unsigned int next = ma_next_power_of_2(x); + if ((next - x) > (x - prev)) { + return prev; + } else { + return next; + } +} + +static MA_INLINE unsigned int ma_count_set_bits(unsigned int x) +{ + unsigned int count = 0; + while (x != 0) { + if (x & 1) { + count += 1; + } + + x = x >> 1; + } + + return count; +} + + + +/************************************************************************************************************************************************************** + +Allocation Callbacks + +**************************************************************************************************************************************************************/ +static void* ma__malloc_default(size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_MALLOC(sz); +} + +static void* ma__realloc_default(void* p, size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_REALLOC(p, sz); +} + +static void ma__free_default(void* p, void* pUserData) +{ + (void)pUserData; + MA_FREE(p); +} + +static ma_allocation_callbacks ma_allocation_callbacks_init_default(void) +{ + ma_allocation_callbacks callbacks; + callbacks.pUserData = NULL; + callbacks.onMalloc = ma__malloc_default; + callbacks.onRealloc = ma__realloc_default; + callbacks.onFree = ma__free_default; + + return callbacks; +} + +static ma_result ma_allocation_callbacks_init_copy(ma_allocation_callbacks* pDst, const ma_allocation_callbacks* pSrc) +{ + if (pDst == NULL) { + return MA_INVALID_ARGS; + } + + if (pSrc == NULL) { + *pDst = ma_allocation_callbacks_init_default(); + } else { + if (pSrc->pUserData == NULL && pSrc->onFree == NULL && pSrc->onMalloc == NULL && pSrc->onRealloc == NULL) { + *pDst = ma_allocation_callbacks_init_default(); + } else { + if (pSrc->onFree == NULL || (pSrc->onMalloc == NULL && pSrc->onRealloc == NULL)) { + return MA_INVALID_ARGS; /* Invalid allocation callbacks. */ + } else { + *pDst = *pSrc; + } + } + } + + return MA_SUCCESS; +} + + + + +/************************************************************************************************************************************************************** + +Logging + +**************************************************************************************************************************************************************/ +MA_API const char* ma_log_level_to_string(ma_uint32 logLevel) +{ + switch (logLevel) + { + case MA_LOG_LEVEL_DEBUG: return "DEBUG"; + case MA_LOG_LEVEL_INFO: return "INFO"; + case MA_LOG_LEVEL_WARNING: return "WARNING"; + case MA_LOG_LEVEL_ERROR: return "ERROR"; + default: return "ERROR"; + } +} + +#if defined(MA_DEBUG_OUTPUT) +#if defined(MA_ANDROID) + #include +#endif + +/* Customize this to use a specific tag in __android_log_print() for debug output messages. */ +#ifndef MA_ANDROID_LOG_TAG +#define MA_ANDROID_LOG_TAG "miniaudio" +#endif + +void ma_log_callback_debug(void* pUserData, ma_uint32 level, const char* pMessage) +{ + (void)pUserData; + + /* Special handling for some platforms. */ + #if defined(MA_ANDROID) + { + /* Android. */ + __android_log_print(ANDROID_LOG_DEBUG, MA_ANDROID_LOG_TAG, "%s: %s", ma_log_level_to_string(level), pMessage); + } + #else + { + /* Everything else. */ + printf("%s: %s", ma_log_level_to_string(level), pMessage); + } + #endif +} +#endif + +MA_API ma_log_callback ma_log_callback_init(ma_log_callback_proc onLog, void* pUserData) +{ + ma_log_callback callback; + + MA_ZERO_OBJECT(&callback); + callback.onLog = onLog; + callback.pUserData = pUserData; + + return callback; +} + + +MA_API ma_result ma_log_init(const ma_allocation_callbacks* pAllocationCallbacks, ma_log* pLog) +{ + if (pLog == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLog); + ma_allocation_callbacks_init_copy(&pLog->allocationCallbacks, pAllocationCallbacks); + + /* We need a mutex for thread safety. */ + #ifndef MA_NO_THREADING + { + ma_result result = ma_mutex_init(&pLog->lock); + if (result != MA_SUCCESS) { + return result; + } + } + #endif + + /* If we're using debug output, enable it. */ + #if defined(MA_DEBUG_OUTPUT) + { + ma_log_register_callback(pLog, ma_log_callback_init(ma_log_callback_debug, NULL)); /* Doesn't really matter if this fails. */ + } + #endif + + return MA_SUCCESS; +} + +MA_API void ma_log_uninit(ma_log* pLog) +{ + if (pLog == NULL) { + return; + } + +#ifndef MA_NO_THREADING + ma_mutex_uninit(&pLog->lock); +#endif +} + +static void ma_log_lock(ma_log* pLog) +{ +#ifndef MA_NO_THREADING + ma_mutex_lock(&pLog->lock); +#else + (void)pLog; +#endif +} + +static void ma_log_unlock(ma_log* pLog) +{ +#ifndef MA_NO_THREADING + ma_mutex_unlock(&pLog->lock); +#else + (void)pLog; +#endif +} + +MA_API ma_result ma_log_register_callback(ma_log* pLog, ma_log_callback callback) +{ + ma_result result = MA_SUCCESS; + + if (pLog == NULL || callback.onLog == NULL) { + return MA_INVALID_ARGS; + } + + ma_log_lock(pLog); + { + if (pLog->callbackCount == ma_countof(pLog->callbacks)) { + result = MA_OUT_OF_MEMORY; /* Reached the maximum allowed log callbacks. */ + } else { + pLog->callbacks[pLog->callbackCount] = callback; + pLog->callbackCount += 1; + } + } + ma_log_unlock(pLog); + + return result; +} + +MA_API ma_result ma_log_unregister_callback(ma_log* pLog, ma_log_callback callback) +{ + if (pLog == NULL) { + return MA_INVALID_ARGS; + } + + ma_log_lock(pLog); + { + ma_uint32 iLog; + for (iLog = 0; iLog < pLog->callbackCount; ) { + if (pLog->callbacks[iLog].onLog == callback.onLog) { + /* Found. Move everything down a slot. */ + ma_uint32 jLog; + for (jLog = iLog; jLog < pLog->callbackCount-1; jLog += 1) { + pLog->callbacks[jLog] = pLog->callbacks[jLog + 1]; + } + + pLog->callbackCount -= 1; + } else { + /* Not found. */ + iLog += 1; + } + } + } + ma_log_unlock(pLog); + + return MA_SUCCESS; +} + +MA_API ma_result ma_log_post(ma_log* pLog, ma_uint32 level, const char* pMessage) +{ + if (pLog == NULL || pMessage == NULL) { + return MA_INVALID_ARGS; + } + + ma_log_lock(pLog); + { + ma_uint32 iLog; + for (iLog = 0; iLog < pLog->callbackCount; iLog += 1) { + if (pLog->callbacks[iLog].onLog) { + pLog->callbacks[iLog].onLog(pLog->callbacks[iLog].pUserData, level, pMessage); + } + } + } + ma_log_unlock(pLog); + + return MA_SUCCESS; +} + + +/* +We need to emulate _vscprintf() for the VC6 build. This can be more efficient, but since it's only VC6, and it's just a +logging function, I'm happy to keep this simple. In the VC6 build we can implement this in terms of _vsnprintf(). +*/ +#if defined(_MSC_VER) && _MSC_VER < 1900 +static int ma_vscprintf(const ma_allocation_callbacks* pAllocationCallbacks, const char* format, va_list args) +{ +#if _MSC_VER > 1200 + return _vscprintf(format, args); +#else + int result; + char* pTempBuffer = NULL; + size_t tempBufferCap = 1024; + + if (format == NULL) { + errno = EINVAL; + return -1; + } + + for (;;) { + char* pNewTempBuffer = (char*)ma_realloc(pTempBuffer, tempBufferCap, pAllocationCallbacks); + if (pNewTempBuffer == NULL) { + ma_free(pTempBuffer, pAllocationCallbacks); + errno = ENOMEM; + return -1; /* Out of memory. */ + } + + pTempBuffer = pNewTempBuffer; + + result = _vsnprintf(pTempBuffer, tempBufferCap, format, args); + ma_free(pTempBuffer, NULL); + + if (result != -1) { + break; /* Got it. */ + } + + /* Buffer wasn't big enough. Ideally it'd be nice to use an error code to know the reason for sure, but this is reliable enough. */ + tempBufferCap *= 2; + } + + return result; +#endif +} +#endif + +MA_API ma_result ma_log_postv(ma_log* pLog, ma_uint32 level, const char* pFormat, va_list args) +{ + if (pLog == NULL || pFormat == NULL) { + return MA_INVALID_ARGS; + } + + #if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || ((!defined(_MSC_VER) || _MSC_VER >= 1900) && !defined(__STRICT_ANSI__) && !defined(_NO_EXT_KEYS)) || (defined(__cplusplus) && __cplusplus >= 201103L) + { + ma_result result; + int length; + char pFormattedMessageStack[1024]; + char* pFormattedMessageHeap = NULL; + + /* First try formatting into our fixed sized stack allocated buffer. If this is too small we'll fallback to a heap allocation. */ + length = vsnprintf(pFormattedMessageStack, sizeof(pFormattedMessageStack), pFormat, args); + if (length < 0) { + return MA_INVALID_OPERATION; /* An error occurred when trying to convert the buffer. */ + } + + if ((size_t)length < sizeof(pFormattedMessageStack)) { + /* The string was written to the stack. */ + result = ma_log_post(pLog, level, pFormattedMessageStack); + } else { + /* The stack buffer was too small, try the heap. */ + pFormattedMessageHeap = (char*)ma_malloc(length + 1, &pLog->allocationCallbacks); + if (pFormattedMessageHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + + length = vsnprintf(pFormattedMessageHeap, length + 1, pFormat, args); + if (length < 0) { + ma_free(pFormattedMessageHeap, &pLog->allocationCallbacks); + return MA_INVALID_OPERATION; + } + + result = ma_log_post(pLog, level, pFormattedMessageHeap); + ma_free(pFormattedMessageHeap, &pLog->allocationCallbacks); + } + + return result; + } + #else + { + /* + Without snprintf() we need to first measure the string and then heap allocate it. I'm only aware of Visual Studio having support for this without snprintf(), so we'll + need to restrict this branch to Visual Studio. For other compilers we need to just not support formatted logging because I don't want the security risk of overflowing + a fixed sized stack allocated buffer. + */ + #if defined(_MSC_VER) && _MSC_VER >= 1200 /* 1200 = VC6 */ + { + ma_result result; + int formattedLen; + char* pFormattedMessage = NULL; + va_list args2; + + #if _MSC_VER >= 1800 + { + va_copy(args2, args); + } + #else + { + args2 = args; + } + #endif + + formattedLen = ma_vscprintf(&pLog->allocationCallbacks, pFormat, args2); + va_end(args2); + + if (formattedLen <= 0) { + return MA_INVALID_OPERATION; + } + + pFormattedMessage = (char*)ma_malloc(formattedLen + 1, &pLog->allocationCallbacks); + if (pFormattedMessage == NULL) { + return MA_OUT_OF_MEMORY; + } + + /* We'll get errors on newer versions of Visual Studio if we try to use vsprintf(). */ + #if _MSC_VER >= 1400 /* 1400 = Visual Studio 2005 */ + { + vsprintf_s(pFormattedMessage, formattedLen + 1, pFormat, args); + } + #else + { + vsprintf(pFormattedMessage, pFormat, args); + } + #endif + + result = ma_log_post(pLog, level, pFormattedMessage); + ma_free(pFormattedMessage, &pLog->allocationCallbacks); + + return result; + } + #else + { + /* Can't do anything because we don't have a safe way of to emulate vsnprintf() without a manual solution. */ + (void)level; + (void)args; + + return MA_INVALID_OPERATION; + } + #endif + } + #endif +} + +MA_API ma_result ma_log_postf(ma_log* pLog, ma_uint32 level, const char* pFormat, ...) +{ + ma_result result; + va_list args; + + if (pLog == NULL || pFormat == NULL) { + return MA_INVALID_ARGS; + } + + va_start(args, pFormat); + { + result = ma_log_postv(pLog, level, pFormat, args); + } + va_end(args); + + return result; +} + + + +static MA_INLINE ma_uint8 ma_clip_u8(ma_int32 x) +{ + return (ma_uint8)(ma_clamp(x, -128, 127) + 128); +} + +static MA_INLINE ma_int16 ma_clip_s16(ma_int32 x) +{ + return (ma_int16)ma_clamp(x, -32768, 32767); +} + +static MA_INLINE ma_int64 ma_clip_s24(ma_int64 x) +{ + return (ma_int64)ma_clamp(x, -8388608, 8388607); +} + +static MA_INLINE ma_int32 ma_clip_s32(ma_int64 x) +{ + /* This dance is to silence warnings with -std=c89. A good compiler should be able to optimize this away. */ + ma_int64 clipMin; + ma_int64 clipMax; + clipMin = -((ma_int64)2147483647 + 1); + clipMax = (ma_int64)2147483647; + + return (ma_int32)ma_clamp(x, clipMin, clipMax); +} + +static MA_INLINE float ma_clip_f32(float x) +{ + if (x < -1) return -1; + if (x > +1) return +1; + return x; +} + + +static MA_INLINE float ma_mix_f32(float x, float y, float a) +{ + return x*(1-a) + y*a; +} +static MA_INLINE float ma_mix_f32_fast(float x, float y, float a) +{ + float r0 = (y - x); + float r1 = r0*a; + return x + r1; + /*return x + (y - x)*a;*/ +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE __m128 ma_mix_f32_fast__sse2(__m128 x, __m128 y, __m128 a) +{ + return _mm_add_ps(x, _mm_mul_ps(_mm_sub_ps(y, x), a)); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE __m256 ma_mix_f32_fast__avx2(__m256 x, __m256 y, __m256 a) +{ + return _mm256_add_ps(x, _mm256_mul_ps(_mm256_sub_ps(y, x), a)); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE float32x4_t ma_mix_f32_fast__neon(float32x4_t x, float32x4_t y, float32x4_t a) +{ + return vaddq_f32(x, vmulq_f32(vsubq_f32(y, x), a)); +} +#endif + + +static MA_INLINE double ma_mix_f64(double x, double y, double a) +{ + return x*(1-a) + y*a; +} +static MA_INLINE double ma_mix_f64_fast(double x, double y, double a) +{ + return x + (y - x)*a; +} + +static MA_INLINE float ma_scale_to_range_f32(float x, float lo, float hi) +{ + return lo + x*(hi-lo); +} + + +/* +Greatest common factor using Euclid's algorithm iteratively. +*/ +static MA_INLINE ma_uint32 ma_gcf_u32(ma_uint32 a, ma_uint32 b) +{ + for (;;) { + if (b == 0) { + break; + } else { + ma_uint32 t = a; + a = b; + b = t % a; + } + } + + return a; +} + + +static ma_uint32 ma_ffs_32(ma_uint32 x) +{ + ma_uint32 i; + + /* Just a naive implementation just to get things working for now. Will optimize this later. */ + for (i = 0; i < 32; i += 1) { + if ((x & (1U << i)) != 0) { + return i; + } + } + + return i; +} + +static MA_INLINE ma_int16 ma_float_to_fixed_16(float x) +{ + return (ma_int16)(x * (1 << 8)); +} + + + +/* +Random Number Generation + +miniaudio uses the LCG random number generation algorithm. This is good enough for audio. + +Note that miniaudio's global LCG implementation uses global state which is _not_ thread-local. When this is called across +multiple threads, results will be unpredictable. However, it won't crash and results will still be random enough for +miniaudio's purposes. +*/ +#ifndef MA_DEFAULT_LCG_SEED +#define MA_DEFAULT_LCG_SEED 4321 +#endif + +#define MA_LCG_M 2147483647 +#define MA_LCG_A 48271 +#define MA_LCG_C 0 + +static ma_lcg g_maLCG = {MA_DEFAULT_LCG_SEED}; /* Non-zero initial seed. Use ma_seed() to use an explicit seed. */ + +static MA_INLINE void ma_lcg_seed(ma_lcg* pLCG, ma_int32 seed) +{ + MA_ASSERT(pLCG != NULL); + pLCG->state = seed; +} + +static MA_INLINE ma_int32 ma_lcg_rand_s32(ma_lcg* pLCG) +{ + pLCG->state = (MA_LCG_A * pLCG->state + MA_LCG_C) % MA_LCG_M; + return pLCG->state; +} + +static MA_INLINE ma_uint32 ma_lcg_rand_u32(ma_lcg* pLCG) +{ + return (ma_uint32)ma_lcg_rand_s32(pLCG); +} + +static MA_INLINE ma_int16 ma_lcg_rand_s16(ma_lcg* pLCG) +{ + return (ma_int16)(ma_lcg_rand_s32(pLCG) & 0xFFFF); +} + +static MA_INLINE double ma_lcg_rand_f64(ma_lcg* pLCG) +{ + return ma_lcg_rand_s32(pLCG) / (double)0x7FFFFFFF; +} + +static MA_INLINE float ma_lcg_rand_f32(ma_lcg* pLCG) +{ + return (float)ma_lcg_rand_f64(pLCG); +} + +static MA_INLINE float ma_lcg_rand_range_f32(ma_lcg* pLCG, float lo, float hi) +{ + return ma_scale_to_range_f32(ma_lcg_rand_f32(pLCG), lo, hi); +} + +static MA_INLINE ma_int32 ma_lcg_rand_range_s32(ma_lcg* pLCG, ma_int32 lo, ma_int32 hi) +{ + if (lo == hi) { + return lo; + } + + return lo + ma_lcg_rand_u32(pLCG) / (0xFFFFFFFF / (hi - lo + 1) + 1); +} + + + +static MA_INLINE void ma_seed(ma_int32 seed) +{ + ma_lcg_seed(&g_maLCG, seed); +} + +static MA_INLINE ma_int32 ma_rand_s32(void) +{ + return ma_lcg_rand_s32(&g_maLCG); +} + +static MA_INLINE ma_uint32 ma_rand_u32(void) +{ + return ma_lcg_rand_u32(&g_maLCG); +} + +static MA_INLINE double ma_rand_f64(void) +{ + return ma_lcg_rand_f64(&g_maLCG); +} + +static MA_INLINE float ma_rand_f32(void) +{ + return ma_lcg_rand_f32(&g_maLCG); +} + +static MA_INLINE float ma_rand_range_f32(float lo, float hi) +{ + return ma_lcg_rand_range_f32(&g_maLCG, lo, hi); +} + +static MA_INLINE ma_int32 ma_rand_range_s32(ma_int32 lo, ma_int32 hi) +{ + return ma_lcg_rand_range_s32(&g_maLCG, lo, hi); +} + + +static MA_INLINE float ma_dither_f32_rectangle(float ditherMin, float ditherMax) +{ + return ma_rand_range_f32(ditherMin, ditherMax); +} + +static MA_INLINE float ma_dither_f32_triangle(float ditherMin, float ditherMax) +{ + float a = ma_rand_range_f32(ditherMin, 0); + float b = ma_rand_range_f32(0, ditherMax); + return a + b; +} + +static MA_INLINE float ma_dither_f32(ma_dither_mode ditherMode, float ditherMin, float ditherMax) +{ + if (ditherMode == ma_dither_mode_rectangle) { + return ma_dither_f32_rectangle(ditherMin, ditherMax); + } + if (ditherMode == ma_dither_mode_triangle) { + return ma_dither_f32_triangle(ditherMin, ditherMax); + } + + return 0; +} + +static MA_INLINE ma_int32 ma_dither_s32(ma_dither_mode ditherMode, ma_int32 ditherMin, ma_int32 ditherMax) +{ + if (ditherMode == ma_dither_mode_rectangle) { + ma_int32 a = ma_rand_range_s32(ditherMin, ditherMax); + return a; + } + if (ditherMode == ma_dither_mode_triangle) { + ma_int32 a = ma_rand_range_s32(ditherMin, 0); + ma_int32 b = ma_rand_range_s32(0, ditherMax); + return a + b; + } + + return 0; +} + + +/************************************************************************************************************************************************************** + +Atomics + +**************************************************************************************************************************************************************/ +/* c89atomic.h begin */ +#ifndef ma_atomic_h +#if defined(__cplusplus) +extern "C" { +#endif +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wlong-long" + #if defined(__clang__) + #pragma GCC diagnostic ignored "-Wc++11-long-long" + #endif +#endif +typedef int ma_atomic_memory_order; +#define MA_ATOMIC_HAS_8 +#define MA_ATOMIC_HAS_16 +#define MA_ATOMIC_HAS_32 +#define MA_ATOMIC_HAS_64 +#if (defined(_MSC_VER) ) || defined(__WATCOMC__) || defined(__DMC__) + #define MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, intrin, ma_atomicType, msvcType) \ + ma_atomicType result; \ + switch (order) \ + { \ + case ma_atomic_memory_order_relaxed: \ + { \ + result = (ma_atomicType)intrin##_nf((volatile msvcType*)dst, (msvcType)src); \ + } break; \ + case ma_atomic_memory_order_consume: \ + case ma_atomic_memory_order_acquire: \ + { \ + result = (ma_atomicType)intrin##_acq((volatile msvcType*)dst, (msvcType)src); \ + } break; \ + case ma_atomic_memory_order_release: \ + { \ + result = (ma_atomicType)intrin##_rel((volatile msvcType*)dst, (msvcType)src); \ + } break; \ + case ma_atomic_memory_order_acq_rel: \ + case ma_atomic_memory_order_seq_cst: \ + default: \ + { \ + result = (ma_atomicType)intrin((volatile msvcType*)dst, (msvcType)src); \ + } break; \ + } \ + return result; + #define MA_ATOMIC_MSVC_ARM_INTRINSIC_COMPARE_EXCHANGE(ptr, expected, desired, order, intrin, ma_atomicType, msvcType) \ + ma_atomicType result; \ + switch (order) \ + { \ + case ma_atomic_memory_order_relaxed: \ + { \ + result = (ma_atomicType)intrin##_nf((volatile msvcType*)ptr, (msvcType)expected, (msvcType)desired); \ + } break; \ + case ma_atomic_memory_order_consume: \ + case ma_atomic_memory_order_acquire: \ + { \ + result = (ma_atomicType)intrin##_acq((volatile msvcType*)ptr, (msvcType)expected, (msvcType)desired); \ + } break; \ + case ma_atomic_memory_order_release: \ + { \ + result = (ma_atomicType)intrin##_rel((volatile msvcType*)ptr, (msvcType)expected, (msvcType)desired); \ + } break; \ + case ma_atomic_memory_order_acq_rel: \ + case ma_atomic_memory_order_seq_cst: \ + default: \ + { \ + result = (ma_atomicType)intrin((volatile msvcType*)ptr, (msvcType)expected, (msvcType)desired); \ + } break; \ + } \ + return result; + #define ma_atomic_memory_order_relaxed 0 + #define ma_atomic_memory_order_consume 1 + #define ma_atomic_memory_order_acquire 2 + #define ma_atomic_memory_order_release 3 + #define ma_atomic_memory_order_acq_rel 4 + #define ma_atomic_memory_order_seq_cst 5 + #if _MSC_VER < 1600 && defined(MA_X86) + #define MA_ATOMIC_MSVC_USE_INLINED_ASSEMBLY + #endif + #if _MSC_VER < 1600 + #undef MA_ATOMIC_HAS_8 + #undef MA_ATOMIC_HAS_16 + #endif + #if !defined(MA_ATOMIC_MSVC_USE_INLINED_ASSEMBLY) + #include + #endif + #if defined(MA_ATOMIC_MSVC_USE_INLINED_ASSEMBLY) + #if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_compare_and_swap_8(volatile ma_uint8* dst, ma_uint8 expected, ma_uint8 desired) + { + ma_uint8 result = 0; + __asm { + mov ecx, dst + mov al, expected + mov dl, desired + lock cmpxchg [ecx], dl + mov result, al + } + return result; + } + #endif + #if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_compare_and_swap_16(volatile ma_uint16* dst, ma_uint16 expected, ma_uint16 desired) + { + ma_uint16 result = 0; + __asm { + mov ecx, dst + mov ax, expected + mov dx, desired + lock cmpxchg [ecx], dx + mov result, ax + } + return result; + } + #endif + #if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_compare_and_swap_32(volatile ma_uint32* dst, ma_uint32 expected, ma_uint32 desired) + { + ma_uint32 result = 0; + __asm { + mov ecx, dst + mov eax, expected + mov edx, desired + lock cmpxchg [ecx], edx + mov result, eax + } + return result; + } + #endif + #if defined(MA_ATOMIC_HAS_64) + static MA_INLINE ma_uint64 __stdcall ma_atomic_compare_and_swap_64(volatile ma_uint64* dst, ma_uint64 expected, ma_uint64 desired) + { + ma_uint32 resultEAX = 0; + ma_uint32 resultEDX = 0; + __asm { + mov esi, dst + mov eax, dword ptr expected + mov edx, dword ptr expected + 4 + mov ebx, dword ptr desired + mov ecx, dword ptr desired + 4 + lock cmpxchg8b qword ptr [esi] + mov resultEAX, eax + mov resultEDX, edx + } + return ((ma_uint64)resultEDX << 32) | resultEAX; + } + #endif + #else + #if defined(MA_ATOMIC_HAS_8) + #define ma_atomic_compare_and_swap_8( dst, expected, desired) (ma_uint8 )_InterlockedCompareExchange8((volatile char*)dst, (char)desired, (char)expected) + #endif + #if defined(MA_ATOMIC_HAS_16) + #define ma_atomic_compare_and_swap_16(dst, expected, desired) (ma_uint16)_InterlockedCompareExchange16((volatile short*)dst, (short)desired, (short)expected) + #endif + #if defined(MA_ATOMIC_HAS_32) + #define ma_atomic_compare_and_swap_32(dst, expected, desired) (ma_uint32)_InterlockedCompareExchange((volatile long*)dst, (long)desired, (long)expected) + #endif + #if defined(MA_ATOMIC_HAS_64) + #define ma_atomic_compare_and_swap_64(dst, expected, desired) (ma_uint64)_InterlockedCompareExchange64((volatile ma_int64*)dst, (ma_int64)desired, (ma_int64)expected) + #endif + #endif + #if defined(MA_ATOMIC_MSVC_USE_INLINED_ASSEMBLY) + #if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_exchange_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 result = 0; + (void)order; + __asm { + mov ecx, dst + mov al, src + lock xchg [ecx], al + mov result, al + } + return result; + } + #endif + #if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_exchange_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 result = 0; + (void)order; + __asm { + mov ecx, dst + mov ax, src + lock xchg [ecx], ax + mov result, ax + } + return result; + } + #endif + #if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_exchange_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 result = 0; + (void)order; + __asm { + mov ecx, dst + mov eax, src + lock xchg [ecx], eax + mov result, eax + } + return result; + } + #endif + #else + #if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_exchange_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchange8, ma_uint8, char); + #else + (void)order; + return (ma_uint8)_InterlockedExchange8((volatile char*)dst, (char)src); + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_exchange_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchange16, ma_uint16, short); + #else + (void)order; + return (ma_uint16)_InterlockedExchange16((volatile short*)dst, (short)src); + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_exchange_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchange, ma_uint32, long); + #else + (void)order; + return (ma_uint32)_InterlockedExchange((volatile long*)dst, (long)src); + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_64) && defined(MA_64BIT) + static MA_INLINE ma_uint64 __stdcall ma_atomic_exchange_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchange64, ma_uint64, long long); + #else + (void)order; + return (ma_uint64)_InterlockedExchange64((volatile long long*)dst, (long long)src); + #endif + } + #else + #endif + #endif + #if defined(MA_ATOMIC_HAS_64) && !defined(MA_64BIT) + static MA_INLINE ma_uint64 __stdcall ma_atomic_exchange_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + do { + oldValue = *dst; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, src) != oldValue); + (void)order; + return oldValue; + } + #endif + #if defined(MA_ATOMIC_MSVC_USE_INLINED_ASSEMBLY) + #if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_fetch_add_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 result = 0; + (void)order; + __asm { + mov ecx, dst + mov al, src + lock xadd [ecx], al + mov result, al + } + return result; + } + #endif + #if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_fetch_add_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 result = 0; + (void)order; + __asm { + mov ecx, dst + mov ax, src + lock xadd [ecx], ax + mov result, ax + } + return result; + } + #endif + #if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_fetch_add_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 result = 0; + (void)order; + __asm { + mov ecx, dst + mov eax, src + lock xadd [ecx], eax + mov result, eax + } + return result; + } + #endif + #else + #if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_fetch_add_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchangeAdd8, ma_uint8, char); + #else + (void)order; + return (ma_uint8)_InterlockedExchangeAdd8((volatile char*)dst, (char)src); + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_fetch_add_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchangeAdd16, ma_uint16, short); + #else + (void)order; + return (ma_uint16)_InterlockedExchangeAdd16((volatile short*)dst, (short)src); + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_fetch_add_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchangeAdd, ma_uint32, long); + #else + (void)order; + return (ma_uint32)_InterlockedExchangeAdd((volatile long*)dst, (long)src); + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_64) && defined(MA_64BIT) + static MA_INLINE ma_uint64 __stdcall ma_atomic_fetch_add_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedExchangeAdd64, ma_uint64, long long); + #else + (void)order; + return (ma_uint64)_InterlockedExchangeAdd64((volatile long long*)dst, (long long)src); + #endif + } + #else + #endif + #endif + #if defined(MA_ATOMIC_HAS_64) && !defined(MA_64BIT) + static MA_INLINE ma_uint64 __stdcall ma_atomic_fetch_add_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue + src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + #endif + #if defined(MA_ATOMIC_MSVC_USE_INLINED_ASSEMBLY) + static MA_INLINE void __stdcall ma_atomic_thread_fence(ma_atomic_memory_order order) + { + (void)order; + __asm { + lock add [esp], 0 + } + } + #else + #if defined(MA_X64) + #define ma_atomic_thread_fence(order) __faststorefence(), (void)order + #elif defined(MA_ARM64) + #define ma_atomic_thread_fence(order) __dmb(_ARM64_BARRIER_ISH), (void)order + #else + static MA_INLINE void ma_atomic_thread_fence(ma_atomic_memory_order order) + { + volatile ma_uint32 barrier = 0; + ma_atomic_fetch_add_explicit_32(&barrier, 0, order); + } + #endif + #endif + #define ma_atomic_compiler_fence() ma_atomic_thread_fence(ma_atomic_memory_order_seq_cst) + #define ma_atomic_signal_fence(order) ma_atomic_thread_fence(order) + #if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 ma_atomic_load_explicit_8(volatile const ma_uint8* ptr, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC_COMPARE_EXCHANGE(ptr, 0, 0, order, _InterlockedCompareExchange8, ma_uint8, char); + #else + (void)order; + return ma_atomic_compare_and_swap_8((volatile ma_uint8*)ptr, 0, 0); + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 ma_atomic_load_explicit_16(volatile const ma_uint16* ptr, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC_COMPARE_EXCHANGE(ptr, 0, 0, order, _InterlockedCompareExchange16, ma_uint16, short); + #else + (void)order; + return ma_atomic_compare_and_swap_16((volatile ma_uint16*)ptr, 0, 0); + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 ma_atomic_load_explicit_32(volatile const ma_uint32* ptr, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC_COMPARE_EXCHANGE(ptr, 0, 0, order, _InterlockedCompareExchange, ma_uint32, long); + #else + (void)order; + return ma_atomic_compare_and_swap_32((volatile ma_uint32*)ptr, 0, 0); + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_64) + static MA_INLINE ma_uint64 ma_atomic_load_explicit_64(volatile const ma_uint64* ptr, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC_COMPARE_EXCHANGE(ptr, 0, 0, order, _InterlockedCompareExchange64, ma_uint64, long long); + #else + (void)order; + return ma_atomic_compare_and_swap_64((volatile ma_uint64*)ptr, 0, 0); + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_8) + #define ma_atomic_store_explicit_8( dst, src, order) (void)ma_atomic_exchange_explicit_8 (dst, src, order) + #endif + #if defined(MA_ATOMIC_HAS_16) + #define ma_atomic_store_explicit_16(dst, src, order) (void)ma_atomic_exchange_explicit_16(dst, src, order) + #endif + #if defined(MA_ATOMIC_HAS_32) + #define ma_atomic_store_explicit_32(dst, src, order) (void)ma_atomic_exchange_explicit_32(dst, src, order) + #endif + #if defined(MA_ATOMIC_HAS_64) + #define ma_atomic_store_explicit_64(dst, src, order) (void)ma_atomic_exchange_explicit_64(dst, src, order) + #endif + #if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_fetch_sub_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue - src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + #endif + #if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_fetch_sub_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue - src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + #endif + #if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_fetch_sub_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue - src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + #endif + #if defined(MA_ATOMIC_HAS_64) + static MA_INLINE ma_uint64 __stdcall ma_atomic_fetch_sub_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue - src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + #endif + #if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_fetch_and_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedAnd8, ma_uint8, char); + #else + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue & src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_fetch_and_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedAnd16, ma_uint16, short); + #else + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue & src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_fetch_and_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedAnd, ma_uint32, long); + #else + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue & src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_64) + static MA_INLINE ma_uint64 __stdcall ma_atomic_fetch_and_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedAnd64, ma_uint64, long long); + #else + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue & src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_fetch_xor_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedXor8, ma_uint8, char); + #else + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue ^ src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_fetch_xor_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedXor16, ma_uint16, short); + #else + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue ^ src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_fetch_xor_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedXor, ma_uint32, long); + #else + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue ^ src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_64) + static MA_INLINE ma_uint64 __stdcall ma_atomic_fetch_xor_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedXor64, ma_uint64, long long); + #else + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue ^ src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_uint8 __stdcall ma_atomic_fetch_or_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedOr8, ma_uint8, char); + #else + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue | src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_uint16 __stdcall ma_atomic_fetch_or_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedOr16, ma_uint16, short); + #else + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue | src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_uint32 __stdcall ma_atomic_fetch_or_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedOr, ma_uint32, long); + #else + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue | src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_64) + static MA_INLINE ma_uint64 __stdcall ma_atomic_fetch_or_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + #if defined(MA_ARM) + MA_ATOMIC_MSVC_ARM_INTRINSIC(dst, src, order, _InterlockedOr64, ma_uint64, long long); + #else + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue | src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + #endif + } + #endif + #if defined(MA_ATOMIC_HAS_8) + #define ma_atomic_test_and_set_explicit_8( dst, order) ma_atomic_exchange_explicit_8 (dst, 1, order) + #endif + #if defined(MA_ATOMIC_HAS_16) + #define ma_atomic_test_and_set_explicit_16(dst, order) ma_atomic_exchange_explicit_16(dst, 1, order) + #endif + #if defined(MA_ATOMIC_HAS_32) + #define ma_atomic_test_and_set_explicit_32(dst, order) ma_atomic_exchange_explicit_32(dst, 1, order) + #endif + #if defined(MA_ATOMIC_HAS_64) + #define ma_atomic_test_and_set_explicit_64(dst, order) ma_atomic_exchange_explicit_64(dst, 1, order) + #endif + #if defined(MA_ATOMIC_HAS_8) + #define ma_atomic_clear_explicit_8( dst, order) ma_atomic_store_explicit_8 (dst, 0, order) + #endif + #if defined(MA_ATOMIC_HAS_16) + #define ma_atomic_clear_explicit_16(dst, order) ma_atomic_store_explicit_16(dst, 0, order) + #endif + #if defined(MA_ATOMIC_HAS_32) + #define ma_atomic_clear_explicit_32(dst, order) ma_atomic_store_explicit_32(dst, 0, order) + #endif + #if defined(MA_ATOMIC_HAS_64) + #define ma_atomic_clear_explicit_64(dst, order) ma_atomic_store_explicit_64(dst, 0, order) + #endif + #if defined(MA_ATOMIC_HAS_8) + typedef ma_uint8 ma_atomic_flag; + #define ma_atomic_flag_test_and_set_explicit(ptr, order) (ma_bool32)ma_atomic_test_and_set_explicit_8(ptr, order) + #define ma_atomic_flag_clear_explicit(ptr, order) ma_atomic_clear_explicit_8(ptr, order) + #define ma_atomic_flag_load_explicit(ptr, order) ma_atomic_load_explicit_8(ptr, order) + #else + typedef ma_uint32 ma_atomic_flag; + #define ma_atomic_flag_test_and_set_explicit(ptr, order) (ma_bool32)ma_atomic_test_and_set_explicit_32(ptr, order) + #define ma_atomic_flag_clear_explicit(ptr, order) ma_atomic_clear_explicit_32(ptr, order) + #define ma_atomic_flag_load_explicit(ptr, order) ma_atomic_load_explicit_32(ptr, order) + #endif +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))) + #define MA_ATOMIC_HAS_NATIVE_COMPARE_EXCHANGE + #define MA_ATOMIC_HAS_NATIVE_IS_LOCK_FREE + #define ma_atomic_memory_order_relaxed __ATOMIC_RELAXED + #define ma_atomic_memory_order_consume __ATOMIC_CONSUME + #define ma_atomic_memory_order_acquire __ATOMIC_ACQUIRE + #define ma_atomic_memory_order_release __ATOMIC_RELEASE + #define ma_atomic_memory_order_acq_rel __ATOMIC_ACQ_REL + #define ma_atomic_memory_order_seq_cst __ATOMIC_SEQ_CST + #define ma_atomic_compiler_fence() __asm__ __volatile__("":::"memory") + #define ma_atomic_thread_fence(order) __atomic_thread_fence(order) + #define ma_atomic_signal_fence(order) __atomic_signal_fence(order) + #define ma_atomic_is_lock_free_8(ptr) __atomic_is_lock_free(1, ptr) + #define ma_atomic_is_lock_free_16(ptr) __atomic_is_lock_free(2, ptr) + #define ma_atomic_is_lock_free_32(ptr) __atomic_is_lock_free(4, ptr) + #define ma_atomic_is_lock_free_64(ptr) __atomic_is_lock_free(8, ptr) + #define ma_atomic_test_and_set_explicit_8( dst, order) __atomic_exchange_n(dst, 1, order) + #define ma_atomic_test_and_set_explicit_16(dst, order) __atomic_exchange_n(dst, 1, order) + #define ma_atomic_test_and_set_explicit_32(dst, order) __atomic_exchange_n(dst, 1, order) + #define ma_atomic_test_and_set_explicit_64(dst, order) __atomic_exchange_n(dst, 1, order) + #define ma_atomic_clear_explicit_8( dst, order) __atomic_store_n(dst, 0, order) + #define ma_atomic_clear_explicit_16(dst, order) __atomic_store_n(dst, 0, order) + #define ma_atomic_clear_explicit_32(dst, order) __atomic_store_n(dst, 0, order) + #define ma_atomic_clear_explicit_64(dst, order) __atomic_store_n(dst, 0, order) + #define ma_atomic_store_explicit_8( dst, src, order) __atomic_store_n(dst, src, order) + #define ma_atomic_store_explicit_16(dst, src, order) __atomic_store_n(dst, src, order) + #define ma_atomic_store_explicit_32(dst, src, order) __atomic_store_n(dst, src, order) + #define ma_atomic_store_explicit_64(dst, src, order) __atomic_store_n(dst, src, order) + #define ma_atomic_load_explicit_8( dst, order) __atomic_load_n(dst, order) + #define ma_atomic_load_explicit_16(dst, order) __atomic_load_n(dst, order) + #define ma_atomic_load_explicit_32(dst, order) __atomic_load_n(dst, order) + #define ma_atomic_load_explicit_64(dst, order) __atomic_load_n(dst, order) + #define ma_atomic_exchange_explicit_8( dst, src, order) __atomic_exchange_n(dst, src, order) + #define ma_atomic_exchange_explicit_16(dst, src, order) __atomic_exchange_n(dst, src, order) + #define ma_atomic_exchange_explicit_32(dst, src, order) __atomic_exchange_n(dst, src, order) + #define ma_atomic_exchange_explicit_64(dst, src, order) __atomic_exchange_n(dst, src, order) + #define ma_atomic_compare_exchange_strong_explicit_8( dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 0, successOrder, failureOrder) + #define ma_atomic_compare_exchange_strong_explicit_16(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 0, successOrder, failureOrder) + #define ma_atomic_compare_exchange_strong_explicit_32(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 0, successOrder, failureOrder) + #define ma_atomic_compare_exchange_strong_explicit_64(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 0, successOrder, failureOrder) + #define ma_atomic_compare_exchange_weak_explicit_8( dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 1, successOrder, failureOrder) + #define ma_atomic_compare_exchange_weak_explicit_16(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 1, successOrder, failureOrder) + #define ma_atomic_compare_exchange_weak_explicit_32(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 1, successOrder, failureOrder) + #define ma_atomic_compare_exchange_weak_explicit_64(dst, expected, desired, successOrder, failureOrder) __atomic_compare_exchange_n(dst, expected, desired, 1, successOrder, failureOrder) + #define ma_atomic_fetch_add_explicit_8( dst, src, order) __atomic_fetch_add(dst, src, order) + #define ma_atomic_fetch_add_explicit_16(dst, src, order) __atomic_fetch_add(dst, src, order) + #define ma_atomic_fetch_add_explicit_32(dst, src, order) __atomic_fetch_add(dst, src, order) + #define ma_atomic_fetch_add_explicit_64(dst, src, order) __atomic_fetch_add(dst, src, order) + #define ma_atomic_fetch_sub_explicit_8( dst, src, order) __atomic_fetch_sub(dst, src, order) + #define ma_atomic_fetch_sub_explicit_16(dst, src, order) __atomic_fetch_sub(dst, src, order) + #define ma_atomic_fetch_sub_explicit_32(dst, src, order) __atomic_fetch_sub(dst, src, order) + #define ma_atomic_fetch_sub_explicit_64(dst, src, order) __atomic_fetch_sub(dst, src, order) + #define ma_atomic_fetch_or_explicit_8( dst, src, order) __atomic_fetch_or(dst, src, order) + #define ma_atomic_fetch_or_explicit_16(dst, src, order) __atomic_fetch_or(dst, src, order) + #define ma_atomic_fetch_or_explicit_32(dst, src, order) __atomic_fetch_or(dst, src, order) + #define ma_atomic_fetch_or_explicit_64(dst, src, order) __atomic_fetch_or(dst, src, order) + #define ma_atomic_fetch_xor_explicit_8( dst, src, order) __atomic_fetch_xor(dst, src, order) + #define ma_atomic_fetch_xor_explicit_16(dst, src, order) __atomic_fetch_xor(dst, src, order) + #define ma_atomic_fetch_xor_explicit_32(dst, src, order) __atomic_fetch_xor(dst, src, order) + #define ma_atomic_fetch_xor_explicit_64(dst, src, order) __atomic_fetch_xor(dst, src, order) + #define ma_atomic_fetch_and_explicit_8( dst, src, order) __atomic_fetch_and(dst, src, order) + #define ma_atomic_fetch_and_explicit_16(dst, src, order) __atomic_fetch_and(dst, src, order) + #define ma_atomic_fetch_and_explicit_32(dst, src, order) __atomic_fetch_and(dst, src, order) + #define ma_atomic_fetch_and_explicit_64(dst, src, order) __atomic_fetch_and(dst, src, order) + static MA_INLINE ma_uint8 ma_atomic_compare_and_swap_8(volatile ma_uint8* dst, ma_uint8 expected, ma_uint8 desired) + { + __atomic_compare_exchange_n(dst, &expected, desired, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return expected; + } + static MA_INLINE ma_uint16 ma_atomic_compare_and_swap_16(volatile ma_uint16* dst, ma_uint16 expected, ma_uint16 desired) + { + __atomic_compare_exchange_n(dst, &expected, desired, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return expected; + } + static MA_INLINE ma_uint32 ma_atomic_compare_and_swap_32(volatile ma_uint32* dst, ma_uint32 expected, ma_uint32 desired) + { + __atomic_compare_exchange_n(dst, &expected, desired, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return expected; + } + #if defined(__clang__) + #pragma clang diagnostic push + #if __clang_major__ >= 8 + #pragma clang diagnostic ignored "-Watomic-alignment" + #endif + #endif + static MA_INLINE ma_uint64 ma_atomic_compare_and_swap_64(volatile ma_uint64* dst, ma_uint64 expected, ma_uint64 desired) + { + __atomic_compare_exchange_n(dst, &expected, desired, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + return expected; + } + #if defined(__clang__) + #pragma clang diagnostic pop + #endif + typedef ma_uint8 ma_atomic_flag; + #define ma_atomic_flag_test_and_set_explicit(dst, order) (ma_bool32)__atomic_test_and_set(dst, order) + #define ma_atomic_flag_clear_explicit(dst, order) __atomic_clear(dst, order) + #define ma_atomic_flag_load_explicit(ptr, order) ma_atomic_load_explicit_8(ptr, order) +#else + #define ma_atomic_memory_order_relaxed 1 + #define ma_atomic_memory_order_consume 2 + #define ma_atomic_memory_order_acquire 3 + #define ma_atomic_memory_order_release 4 + #define ma_atomic_memory_order_acq_rel 5 + #define ma_atomic_memory_order_seq_cst 6 + #define ma_atomic_compiler_fence() __asm__ __volatile__("":::"memory") + #if defined(__GNUC__) + #define ma_atomic_thread_fence(order) __sync_synchronize(), (void)order + static MA_INLINE ma_uint8 ma_atomic_exchange_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + if (order > ma_atomic_memory_order_acquire) { + __sync_synchronize(); + } + return __sync_lock_test_and_set(dst, src); + } + static MA_INLINE ma_uint16 ma_atomic_exchange_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 oldValue; + do { + oldValue = *dst; + } while (__sync_val_compare_and_swap(dst, oldValue, src) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint32 ma_atomic_exchange_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 oldValue; + do { + oldValue = *dst; + } while (__sync_val_compare_and_swap(dst, oldValue, src) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint64 ma_atomic_exchange_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + do { + oldValue = *dst; + } while (__sync_val_compare_and_swap(dst, oldValue, src) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint8 ma_atomic_fetch_add_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_add(dst, src); + } + static MA_INLINE ma_uint16 ma_atomic_fetch_add_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_add(dst, src); + } + static MA_INLINE ma_uint32 ma_atomic_fetch_add_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_add(dst, src); + } + static MA_INLINE ma_uint64 ma_atomic_fetch_add_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_add(dst, src); + } + static MA_INLINE ma_uint8 ma_atomic_fetch_sub_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_sub(dst, src); + } + static MA_INLINE ma_uint16 ma_atomic_fetch_sub_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_sub(dst, src); + } + static MA_INLINE ma_uint32 ma_atomic_fetch_sub_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_sub(dst, src); + } + static MA_INLINE ma_uint64 ma_atomic_fetch_sub_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_sub(dst, src); + } + static MA_INLINE ma_uint8 ma_atomic_fetch_or_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_or(dst, src); + } + static MA_INLINE ma_uint16 ma_atomic_fetch_or_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_or(dst, src); + } + static MA_INLINE ma_uint32 ma_atomic_fetch_or_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_or(dst, src); + } + static MA_INLINE ma_uint64 ma_atomic_fetch_or_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_or(dst, src); + } + static MA_INLINE ma_uint8 ma_atomic_fetch_xor_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_xor(dst, src); + } + static MA_INLINE ma_uint16 ma_atomic_fetch_xor_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_xor(dst, src); + } + static MA_INLINE ma_uint32 ma_atomic_fetch_xor_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_xor(dst, src); + } + static MA_INLINE ma_uint64 ma_atomic_fetch_xor_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_xor(dst, src); + } + static MA_INLINE ma_uint8 ma_atomic_fetch_and_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_and(dst, src); + } + static MA_INLINE ma_uint16 ma_atomic_fetch_and_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_and(dst, src); + } + static MA_INLINE ma_uint32 ma_atomic_fetch_and_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_and(dst, src); + } + static MA_INLINE ma_uint64 ma_atomic_fetch_and_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + (void)order; + return __sync_fetch_and_and(dst, src); + } + #define ma_atomic_compare_and_swap_8( dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired) + #define ma_atomic_compare_and_swap_16(dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired) + #define ma_atomic_compare_and_swap_32(dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired) + #define ma_atomic_compare_and_swap_64(dst, expected, desired) __sync_val_compare_and_swap(dst, expected, desired) + #else + #if defined(MA_X86) + #define ma_atomic_thread_fence(order) __asm__ __volatile__("lock; addl $0, (%%esp)" ::: "memory", "cc") + #elif defined(MA_X64) + #define ma_atomic_thread_fence(order) __asm__ __volatile__("lock; addq $0, (%%rsp)" ::: "memory", "cc") + #else + #error Unsupported architecture. Please submit a feature request. + #endif + static MA_INLINE ma_uint8 ma_atomic_compare_and_swap_8(volatile ma_uint8* dst, ma_uint8 expected, ma_uint8 desired) + { + ma_uint8 result; + #if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; cmpxchg %3, %0" : "+m"(*dst), "=a"(result) : "a"(expected), "d"(desired) : "cc"); + #else + #error Unsupported architecture. Please submit a feature request. + #endif + return result; + } + static MA_INLINE ma_uint16 ma_atomic_compare_and_swap_16(volatile ma_uint16* dst, ma_uint16 expected, ma_uint16 desired) + { + ma_uint16 result; + #if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; cmpxchg %3, %0" : "+m"(*dst), "=a"(result) : "a"(expected), "d"(desired) : "cc"); + #else + #error Unsupported architecture. Please submit a feature request. + #endif + return result; + } + static MA_INLINE ma_uint32 ma_atomic_compare_and_swap_32(volatile ma_uint32* dst, ma_uint32 expected, ma_uint32 desired) + { + ma_uint32 result; + #if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; cmpxchg %3, %0" : "+m"(*dst), "=a"(result) : "a"(expected), "d"(desired) : "cc"); + #else + #error Unsupported architecture. Please submit a feature request. + #endif + return result; + } + static MA_INLINE ma_uint64 ma_atomic_compare_and_swap_64(volatile ma_uint64* dst, ma_uint64 expected, ma_uint64 desired) + { + volatile ma_uint64 result; + #if defined(MA_X86) + ma_uint32 resultEAX; + ma_uint32 resultEDX; + __asm__ __volatile__("push %%ebx; xchg %5, %%ebx; lock; cmpxchg8b %0; pop %%ebx" : "+m"(*dst), "=a"(resultEAX), "=d"(resultEDX) : "a"(expected & 0xFFFFFFFF), "d"(expected >> 32), "r"(desired & 0xFFFFFFFF), "c"(desired >> 32) : "cc"); + result = ((ma_uint64)resultEDX << 32) | resultEAX; + #elif defined(MA_X64) + __asm__ __volatile__("lock; cmpxchg %3, %0" : "+m"(*dst), "=a"(result) : "a"(expected), "d"(desired) : "cc"); + #else + #error Unsupported architecture. Please submit a feature request. + #endif + return result; + } + static MA_INLINE ma_uint8 ma_atomic_exchange_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 result = 0; + (void)order; + #if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; xchg %1, %0" : "+m"(*dst), "=a"(result) : "a"(src)); + #else + #error Unsupported architecture. Please submit a feature request. + #endif + return result; + } + static MA_INLINE ma_uint16 ma_atomic_exchange_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 result = 0; + (void)order; + #if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; xchg %1, %0" : "+m"(*dst), "=a"(result) : "a"(src)); + #else + #error Unsupported architecture. Please submit a feature request. + #endif + return result; + } + static MA_INLINE ma_uint32 ma_atomic_exchange_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 result; + (void)order; + #if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; xchg %1, %0" : "+m"(*dst), "=a"(result) : "a"(src)); + #else + #error Unsupported architecture. Please submit a feature request. + #endif + return result; + } + static MA_INLINE ma_uint64 ma_atomic_exchange_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 result; + (void)order; + #if defined(MA_X86) + do { + result = *dst; + } while (ma_atomic_compare_and_swap_64(dst, result, src) != result); + #elif defined(MA_X64) + __asm__ __volatile__("lock; xchg %1, %0" : "+m"(*dst), "=a"(result) : "a"(src)); + #else + #error Unsupported architecture. Please submit a feature request. + #endif + return result; + } + static MA_INLINE ma_uint8 ma_atomic_fetch_add_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 result; + (void)order; + #if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; xadd %1, %0" : "+m"(*dst), "=a"(result) : "a"(src) : "cc"); + #else + #error Unsupported architecture. Please submit a feature request. + #endif + return result; + } + static MA_INLINE ma_uint16 ma_atomic_fetch_add_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 result; + (void)order; + #if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; xadd %1, %0" : "+m"(*dst), "=a"(result) : "a"(src) : "cc"); + #else + #error Unsupported architecture. Please submit a feature request. + #endif + return result; + } + static MA_INLINE ma_uint32 ma_atomic_fetch_add_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 result; + (void)order; + #if defined(MA_X86) || defined(MA_X64) + __asm__ __volatile__("lock; xadd %1, %0" : "+m"(*dst), "=a"(result) : "a"(src) : "cc"); + #else + #error Unsupported architecture. Please submit a feature request. + #endif + return result; + } + static MA_INLINE ma_uint64 ma_atomic_fetch_add_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + #if defined(MA_X86) + ma_uint64 oldValue; + ma_uint64 newValue; + (void)order; + do { + oldValue = *dst; + newValue = oldValue + src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + return oldValue; + #elif defined(MA_X64) + ma_uint64 result; + (void)order; + __asm__ __volatile__("lock; xadd %1, %0" : "+m"(*dst), "=a"(result) : "a"(src) : "cc"); + return result; + #endif + } + static MA_INLINE ma_uint8 ma_atomic_fetch_sub_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue - src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint16 ma_atomic_fetch_sub_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue - src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint32 ma_atomic_fetch_sub_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue - src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint64 ma_atomic_fetch_sub_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue - src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint8 ma_atomic_fetch_and_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue & src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint16 ma_atomic_fetch_and_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue & src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint32 ma_atomic_fetch_and_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue & src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint64 ma_atomic_fetch_and_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue & src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint8 ma_atomic_fetch_xor_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue ^ src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint16 ma_atomic_fetch_xor_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue ^ src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint32 ma_atomic_fetch_xor_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue ^ src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint64 ma_atomic_fetch_xor_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue ^ src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint8 ma_atomic_fetch_or_explicit_8(volatile ma_uint8* dst, ma_uint8 src, ma_atomic_memory_order order) + { + ma_uint8 oldValue; + ma_uint8 newValue; + do { + oldValue = *dst; + newValue = (ma_uint8)(oldValue | src); + } while (ma_atomic_compare_and_swap_8(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint16 ma_atomic_fetch_or_explicit_16(volatile ma_uint16* dst, ma_uint16 src, ma_atomic_memory_order order) + { + ma_uint16 oldValue; + ma_uint16 newValue; + do { + oldValue = *dst; + newValue = (ma_uint16)(oldValue | src); + } while (ma_atomic_compare_and_swap_16(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint32 ma_atomic_fetch_or_explicit_32(volatile ma_uint32* dst, ma_uint32 src, ma_atomic_memory_order order) + { + ma_uint32 oldValue; + ma_uint32 newValue; + do { + oldValue = *dst; + newValue = oldValue | src; + } while (ma_atomic_compare_and_swap_32(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + static MA_INLINE ma_uint64 ma_atomic_fetch_or_explicit_64(volatile ma_uint64* dst, ma_uint64 src, ma_atomic_memory_order order) + { + ma_uint64 oldValue; + ma_uint64 newValue; + do { + oldValue = *dst; + newValue = oldValue | src; + } while (ma_atomic_compare_and_swap_64(dst, oldValue, newValue) != oldValue); + (void)order; + return oldValue; + } + #endif + #define ma_atomic_signal_fence(order) ma_atomic_thread_fence(order) + static MA_INLINE ma_uint8 ma_atomic_load_explicit_8(volatile const ma_uint8* ptr, ma_atomic_memory_order order) + { + (void)order; + return ma_atomic_compare_and_swap_8((ma_uint8*)ptr, 0, 0); + } + static MA_INLINE ma_uint16 ma_atomic_load_explicit_16(volatile const ma_uint16* ptr, ma_atomic_memory_order order) + { + (void)order; + return ma_atomic_compare_and_swap_16((ma_uint16*)ptr, 0, 0); + } + static MA_INLINE ma_uint32 ma_atomic_load_explicit_32(volatile const ma_uint32* ptr, ma_atomic_memory_order order) + { + (void)order; + return ma_atomic_compare_and_swap_32((ma_uint32*)ptr, 0, 0); + } + static MA_INLINE ma_uint64 ma_atomic_load_explicit_64(volatile const ma_uint64* ptr, ma_atomic_memory_order order) + { + (void)order; + return ma_atomic_compare_and_swap_64((ma_uint64*)ptr, 0, 0); + } + #define ma_atomic_store_explicit_8( dst, src, order) (void)ma_atomic_exchange_explicit_8 (dst, src, order) + #define ma_atomic_store_explicit_16(dst, src, order) (void)ma_atomic_exchange_explicit_16(dst, src, order) + #define ma_atomic_store_explicit_32(dst, src, order) (void)ma_atomic_exchange_explicit_32(dst, src, order) + #define ma_atomic_store_explicit_64(dst, src, order) (void)ma_atomic_exchange_explicit_64(dst, src, order) + #define ma_atomic_test_and_set_explicit_8( dst, order) ma_atomic_exchange_explicit_8 (dst, 1, order) + #define ma_atomic_test_and_set_explicit_16(dst, order) ma_atomic_exchange_explicit_16(dst, 1, order) + #define ma_atomic_test_and_set_explicit_32(dst, order) ma_atomic_exchange_explicit_32(dst, 1, order) + #define ma_atomic_test_and_set_explicit_64(dst, order) ma_atomic_exchange_explicit_64(dst, 1, order) + #define ma_atomic_clear_explicit_8( dst, order) ma_atomic_store_explicit_8 (dst, 0, order) + #define ma_atomic_clear_explicit_16(dst, order) ma_atomic_store_explicit_16(dst, 0, order) + #define ma_atomic_clear_explicit_32(dst, order) ma_atomic_store_explicit_32(dst, 0, order) + #define ma_atomic_clear_explicit_64(dst, order) ma_atomic_store_explicit_64(dst, 0, order) + typedef ma_uint8 ma_atomic_flag; + #define ma_atomic_flag_test_and_set_explicit(ptr, order) (ma_bool32)ma_atomic_test_and_set_explicit_8(ptr, order) + #define ma_atomic_flag_clear_explicit(ptr, order) ma_atomic_clear_explicit_8(ptr, order) + #define ma_atomic_flag_load_explicit(ptr, order) ma_atomic_load_explicit_8(ptr, order) +#endif +#if !defined(MA_ATOMIC_HAS_NATIVE_COMPARE_EXCHANGE) + #if defined(MA_ATOMIC_HAS_8) + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_8(volatile ma_uint8* dst, ma_uint8* expected, ma_uint8 desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + ma_uint8 expectedValue; + ma_uint8 result; + (void)successOrder; + (void)failureOrder; + expectedValue = ma_atomic_load_explicit_8(expected, ma_atomic_memory_order_seq_cst); + result = ma_atomic_compare_and_swap_8(dst, expectedValue, desired); + if (result == expectedValue) { + return 1; + } else { + ma_atomic_store_explicit_8(expected, result, failureOrder); + return 0; + } + } + #endif + #if defined(MA_ATOMIC_HAS_16) + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_16(volatile ma_uint16* dst, ma_uint16* expected, ma_uint16 desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + ma_uint16 expectedValue; + ma_uint16 result; + (void)successOrder; + (void)failureOrder; + expectedValue = ma_atomic_load_explicit_16(expected, ma_atomic_memory_order_seq_cst); + result = ma_atomic_compare_and_swap_16(dst, expectedValue, desired); + if (result == expectedValue) { + return 1; + } else { + ma_atomic_store_explicit_16(expected, result, failureOrder); + return 0; + } + } + #endif + #if defined(MA_ATOMIC_HAS_32) + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_32(volatile ma_uint32* dst, ma_uint32* expected, ma_uint32 desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + ma_uint32 expectedValue; + ma_uint32 result; + (void)successOrder; + (void)failureOrder; + expectedValue = ma_atomic_load_explicit_32(expected, ma_atomic_memory_order_seq_cst); + result = ma_atomic_compare_and_swap_32(dst, expectedValue, desired); + if (result == expectedValue) { + return 1; + } else { + ma_atomic_store_explicit_32(expected, result, failureOrder); + return 0; + } + } + #endif + #if defined(MA_ATOMIC_HAS_64) + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_64(volatile ma_uint64* dst, volatile ma_uint64* expected, ma_uint64 desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + ma_uint64 expectedValue; + ma_uint64 result; + (void)successOrder; + (void)failureOrder; + expectedValue = ma_atomic_load_explicit_64(expected, ma_atomic_memory_order_seq_cst); + result = ma_atomic_compare_and_swap_64(dst, expectedValue, desired); + if (result == expectedValue) { + return 1; + } else { + ma_atomic_store_explicit_64(expected, result, failureOrder); + return 0; + } + } + #endif + #define ma_atomic_compare_exchange_weak_explicit_8( dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_8 (dst, expected, desired, successOrder, failureOrder) + #define ma_atomic_compare_exchange_weak_explicit_16(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_16(dst, expected, desired, successOrder, failureOrder) + #define ma_atomic_compare_exchange_weak_explicit_32(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_32(dst, expected, desired, successOrder, failureOrder) + #define ma_atomic_compare_exchange_weak_explicit_64(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_64(dst, expected, desired, successOrder, failureOrder) +#endif +#if !defined(MA_ATOMIC_HAS_NATIVE_IS_LOCK_FREE) + static MA_INLINE ma_bool32 ma_atomic_is_lock_free_8(volatile void* ptr) + { + (void)ptr; + return 1; + } + static MA_INLINE ma_bool32 ma_atomic_is_lock_free_16(volatile void* ptr) + { + (void)ptr; + return 1; + } + static MA_INLINE ma_bool32 ma_atomic_is_lock_free_32(volatile void* ptr) + { + (void)ptr; + return 1; + } + static MA_INLINE ma_bool32 ma_atomic_is_lock_free_64(volatile void* ptr) + { + (void)ptr; + #if defined(MA_64BIT) + return 1; + #else + #if defined(MA_X86) || defined(MA_X64) + return 1; + #else + return 0; + #endif + #endif + } +#endif +#if defined(MA_64BIT) + static MA_INLINE ma_bool32 ma_atomic_is_lock_free_ptr(volatile void** ptr) + { + return ma_atomic_is_lock_free_64((volatile ma_uint64*)ptr); + } + static MA_INLINE void* ma_atomic_load_explicit_ptr(volatile void** ptr, ma_atomic_memory_order order) + { + return (void*)ma_atomic_load_explicit_64((volatile ma_uint64*)ptr, order); + } + static MA_INLINE void ma_atomic_store_explicit_ptr(volatile void** dst, void* src, ma_atomic_memory_order order) + { + ma_atomic_store_explicit_64((volatile ma_uint64*)dst, (ma_uint64)src, order); + } + static MA_INLINE void* ma_atomic_exchange_explicit_ptr(volatile void** dst, void* src, ma_atomic_memory_order order) + { + return (void*)ma_atomic_exchange_explicit_64((volatile ma_uint64*)dst, (ma_uint64)src, order); + } + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_ptr(volatile void** dst, void** expected, void* desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + return ma_atomic_compare_exchange_strong_explicit_64((volatile ma_uint64*)dst, (ma_uint64*)expected, (ma_uint64)desired, successOrder, failureOrder); + } + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_weak_explicit_ptr(volatile void** dst, void** expected, void* desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + return ma_atomic_compare_exchange_weak_explicit_64((volatile ma_uint64*)dst, (ma_uint64*)expected, (ma_uint64)desired, successOrder, failureOrder); + } + static MA_INLINE void* ma_atomic_compare_and_swap_ptr(volatile void** dst, void* expected, void* desired) + { + return (void*)ma_atomic_compare_and_swap_64((volatile ma_uint64*)dst, (ma_uint64)expected, (ma_uint64)desired); + } +#elif defined(MA_32BIT) + static MA_INLINE ma_bool32 ma_atomic_is_lock_free_ptr(volatile void** ptr) + { + return ma_atomic_is_lock_free_32((volatile ma_uint32*)ptr); + } + static MA_INLINE void* ma_atomic_load_explicit_ptr(volatile void** ptr, ma_atomic_memory_order order) + { + return (void*)ma_atomic_load_explicit_32((volatile ma_uint32*)ptr, order); + } + static MA_INLINE void ma_atomic_store_explicit_ptr(volatile void** dst, void* src, ma_atomic_memory_order order) + { + ma_atomic_store_explicit_32((volatile ma_uint32*)dst, (ma_uint32)src, order); + } + static MA_INLINE void* ma_atomic_exchange_explicit_ptr(volatile void** dst, void* src, ma_atomic_memory_order order) + { + return (void*)ma_atomic_exchange_explicit_32((volatile ma_uint32*)dst, (ma_uint32)src, order); + } + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_ptr(volatile void** dst, void** expected, void* desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + return ma_atomic_compare_exchange_strong_explicit_32((volatile ma_uint32*)dst, (ma_uint32*)expected, (ma_uint32)desired, successOrder, failureOrder); + } + static MA_INLINE ma_bool32 ma_atomic_compare_exchange_weak_explicit_ptr(volatile void** dst, void** expected, void* desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) + { + return ma_atomic_compare_exchange_weak_explicit_32((volatile ma_uint32*)dst, (ma_uint32*)expected, (ma_uint32)desired, successOrder, failureOrder); + } + static MA_INLINE void* ma_atomic_compare_and_swap_ptr(volatile void** dst, void* expected, void* desired) + { + return (void*)ma_atomic_compare_and_swap_32((volatile ma_uint32*)dst, (ma_uint32)expected, (ma_uint32)desired); + } +#else + #error Unsupported architecture. +#endif +#define ma_atomic_flag_test_and_set(ptr) ma_atomic_flag_test_and_set_explicit(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_flag_clear(ptr) ma_atomic_flag_clear_explicit(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_ptr(dst, src) ma_atomic_store_explicit_ptr((volatile void**)dst, (void*)src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_ptr(ptr) ma_atomic_load_explicit_ptr((volatile void**)ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_ptr(dst, src) ma_atomic_exchange_explicit_ptr((volatile void**)dst, (void*)src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_ptr(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_ptr((volatile void**)dst, (void**)expected, (void*)desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_ptr(dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_ptr((volatile void**)dst, (void**)expected, (void*)desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_8( ptr) ma_atomic_test_and_set_explicit_8( ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_16(ptr) ma_atomic_test_and_set_explicit_16(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_32(ptr) ma_atomic_test_and_set_explicit_32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_64(ptr) ma_atomic_test_and_set_explicit_64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_8( ptr) ma_atomic_clear_explicit_8( ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_16(ptr) ma_atomic_clear_explicit_16(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_32(ptr) ma_atomic_clear_explicit_32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_64(ptr) ma_atomic_clear_explicit_64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_8( dst, src) ma_atomic_store_explicit_8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_16(dst, src) ma_atomic_store_explicit_16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_32(dst, src) ma_atomic_store_explicit_32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_64(dst, src) ma_atomic_store_explicit_64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_8( ptr) ma_atomic_load_explicit_8( ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_16(ptr) ma_atomic_load_explicit_16(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_32(ptr) ma_atomic_load_explicit_32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_64(ptr) ma_atomic_load_explicit_64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_8( dst, src) ma_atomic_exchange_explicit_8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_16(dst, src) ma_atomic_exchange_explicit_16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_32(dst, src) ma_atomic_exchange_explicit_32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_64(dst, src) ma_atomic_exchange_explicit_64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_8( dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_8( dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_16(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_16(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_32(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_32(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_64(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_64(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_8( dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_8( dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_16( dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_16(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_32( dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_32(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_64( dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_64(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_8( dst, src) ma_atomic_fetch_add_explicit_8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_16(dst, src) ma_atomic_fetch_add_explicit_16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_32(dst, src) ma_atomic_fetch_add_explicit_32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_64(dst, src) ma_atomic_fetch_add_explicit_64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_8( dst, src) ma_atomic_fetch_sub_explicit_8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_16(dst, src) ma_atomic_fetch_sub_explicit_16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_32(dst, src) ma_atomic_fetch_sub_explicit_32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_64(dst, src) ma_atomic_fetch_sub_explicit_64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_8( dst, src) ma_atomic_fetch_or_explicit_8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_16(dst, src) ma_atomic_fetch_or_explicit_16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_32(dst, src) ma_atomic_fetch_or_explicit_32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_64(dst, src) ma_atomic_fetch_or_explicit_64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_8( dst, src) ma_atomic_fetch_xor_explicit_8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_16(dst, src) ma_atomic_fetch_xor_explicit_16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_32(dst, src) ma_atomic_fetch_xor_explicit_32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_64(dst, src) ma_atomic_fetch_xor_explicit_64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_8( dst, src) ma_atomic_fetch_and_explicit_8 (dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_16(dst, src) ma_atomic_fetch_and_explicit_16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_32(dst, src) ma_atomic_fetch_and_explicit_32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_64(dst, src) ma_atomic_fetch_and_explicit_64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_explicit_i8( ptr, order) (ma_int8 )ma_atomic_test_and_set_explicit_8( (ma_uint8* )ptr, order) +#define ma_atomic_test_and_set_explicit_i16(ptr, order) (ma_int16)ma_atomic_test_and_set_explicit_16((ma_uint16*)ptr, order) +#define ma_atomic_test_and_set_explicit_i32(ptr, order) (ma_int32)ma_atomic_test_and_set_explicit_32((ma_uint32*)ptr, order) +#define ma_atomic_test_and_set_explicit_i64(ptr, order) (ma_int64)ma_atomic_test_and_set_explicit_64((ma_uint64*)ptr, order) +#define ma_atomic_clear_explicit_i8( ptr, order) ma_atomic_clear_explicit_8( (ma_uint8* )ptr, order) +#define ma_atomic_clear_explicit_i16(ptr, order) ma_atomic_clear_explicit_16((ma_uint16*)ptr, order) +#define ma_atomic_clear_explicit_i32(ptr, order) ma_atomic_clear_explicit_32((ma_uint32*)ptr, order) +#define ma_atomic_clear_explicit_i64(ptr, order) ma_atomic_clear_explicit_64((ma_uint64*)ptr, order) +#define ma_atomic_store_explicit_i8( dst, src, order) ma_atomic_store_explicit_8( (ma_uint8* )dst, (ma_uint8 )src, order) +#define ma_atomic_store_explicit_i16(dst, src, order) ma_atomic_store_explicit_16((ma_uint16*)dst, (ma_uint16)src, order) +#define ma_atomic_store_explicit_i32(dst, src, order) ma_atomic_store_explicit_32((ma_uint32*)dst, (ma_uint32)src, order) +#define ma_atomic_store_explicit_i64(dst, src, order) ma_atomic_store_explicit_64((ma_uint64*)dst, (ma_uint64)src, order) +#define ma_atomic_load_explicit_i8( ptr, order) (ma_int8 )ma_atomic_load_explicit_8( (ma_uint8* )ptr, order) +#define ma_atomic_load_explicit_i16(ptr, order) (ma_int16)ma_atomic_load_explicit_16((ma_uint16*)ptr, order) +#define ma_atomic_load_explicit_i32(ptr, order) (ma_int32)ma_atomic_load_explicit_32((ma_uint32*)ptr, order) +#define ma_atomic_load_explicit_i64(ptr, order) (ma_int64)ma_atomic_load_explicit_64((ma_uint64*)ptr, order) +#define ma_atomic_exchange_explicit_i8( dst, src, order) (ma_int8 )ma_atomic_exchange_explicit_8 ((ma_uint8* )dst, (ma_uint8 )src, order) +#define ma_atomic_exchange_explicit_i16(dst, src, order) (ma_int16)ma_atomic_exchange_explicit_16((ma_uint16*)dst, (ma_uint16)src, order) +#define ma_atomic_exchange_explicit_i32(dst, src, order) (ma_int32)ma_atomic_exchange_explicit_32((ma_uint32*)dst, (ma_uint32)src, order) +#define ma_atomic_exchange_explicit_i64(dst, src, order) (ma_int64)ma_atomic_exchange_explicit_64((ma_uint64*)dst, (ma_uint64)src, order) +#define ma_atomic_compare_exchange_strong_explicit_i8( dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_8( (ma_uint8* )dst, (ma_uint8* )expected, (ma_uint8 )desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_strong_explicit_i16(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_16((ma_uint16*)dst, (ma_uint16*)expected, (ma_uint16)desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_strong_explicit_i32(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_32((ma_uint32*)dst, (ma_uint32*)expected, (ma_uint32)desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_strong_explicit_i64(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_strong_explicit_64((ma_uint64*)dst, (ma_uint64*)expected, (ma_uint64)desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_i8( dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_weak_explicit_8( (ma_uint8* )dst, (ma_uint8* )expected, (ma_uint8 )desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_i16(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_weak_explicit_16((ma_uint16*)dst, (ma_uint16*)expected, (ma_uint16)desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_i32(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_weak_explicit_32((ma_uint32*)dst, (ma_uint32*)expected, (ma_uint32)desired, successOrder, failureOrder) +#define ma_atomic_compare_exchange_weak_explicit_i64(dst, expected, desired, successOrder, failureOrder) ma_atomic_compare_exchange_weak_explicit_64((ma_uint64*)dst, (ma_uint64*)expected, (ma_uint64)desired, successOrder, failureOrder) +#define ma_atomic_fetch_add_explicit_i8( dst, src, order) (ma_int8 )ma_atomic_fetch_add_explicit_8( (ma_uint8* )dst, (ma_uint8 )src, order) +#define ma_atomic_fetch_add_explicit_i16(dst, src, order) (ma_int16)ma_atomic_fetch_add_explicit_16((ma_uint16*)dst, (ma_uint16)src, order) +#define ma_atomic_fetch_add_explicit_i32(dst, src, order) (ma_int32)ma_atomic_fetch_add_explicit_32((ma_uint32*)dst, (ma_uint32)src, order) +#define ma_atomic_fetch_add_explicit_i64(dst, src, order) (ma_int64)ma_atomic_fetch_add_explicit_64((ma_uint64*)dst, (ma_uint64)src, order) +#define ma_atomic_fetch_sub_explicit_i8( dst, src, order) (ma_int8 )ma_atomic_fetch_sub_explicit_8( (ma_uint8* )dst, (ma_uint8 )src, order) +#define ma_atomic_fetch_sub_explicit_i16(dst, src, order) (ma_int16)ma_atomic_fetch_sub_explicit_16((ma_uint16*)dst, (ma_uint16)src, order) +#define ma_atomic_fetch_sub_explicit_i32(dst, src, order) (ma_int32)ma_atomic_fetch_sub_explicit_32((ma_uint32*)dst, (ma_uint32)src, order) +#define ma_atomic_fetch_sub_explicit_i64(dst, src, order) (ma_int64)ma_atomic_fetch_sub_explicit_64((ma_uint64*)dst, (ma_uint64)src, order) +#define ma_atomic_fetch_or_explicit_i8( dst, src, order) (ma_int8 )ma_atomic_fetch_or_explicit_8( (ma_uint8* )dst, (ma_uint8 )src, order) +#define ma_atomic_fetch_or_explicit_i16(dst, src, order) (ma_int16)ma_atomic_fetch_or_explicit_16((ma_uint16*)dst, (ma_uint16)src, order) +#define ma_atomic_fetch_or_explicit_i32(dst, src, order) (ma_int32)ma_atomic_fetch_or_explicit_32((ma_uint32*)dst, (ma_uint32)src, order) +#define ma_atomic_fetch_or_explicit_i64(dst, src, order) (ma_int64)ma_atomic_fetch_or_explicit_64((ma_uint64*)dst, (ma_uint64)src, order) +#define ma_atomic_fetch_xor_explicit_i8( dst, src, order) (ma_int8 )ma_atomic_fetch_xor_explicit_8( (ma_uint8* )dst, (ma_uint8 )src, order) +#define ma_atomic_fetch_xor_explicit_i16(dst, src, order) (ma_int16)ma_atomic_fetch_xor_explicit_16((ma_uint16*)dst, (ma_uint16)src, order) +#define ma_atomic_fetch_xor_explicit_i32(dst, src, order) (ma_int32)ma_atomic_fetch_xor_explicit_32((ma_uint32*)dst, (ma_uint32)src, order) +#define ma_atomic_fetch_xor_explicit_i64(dst, src, order) (ma_int64)ma_atomic_fetch_xor_explicit_64((ma_uint64*)dst, (ma_uint64)src, order) +#define ma_atomic_fetch_and_explicit_i8( dst, src, order) (ma_int8 )ma_atomic_fetch_and_explicit_8( (ma_uint8* )dst, (ma_uint8 )src, order) +#define ma_atomic_fetch_and_explicit_i16(dst, src, order) (ma_int16)ma_atomic_fetch_and_explicit_16((ma_uint16*)dst, (ma_uint16)src, order) +#define ma_atomic_fetch_and_explicit_i32(dst, src, order) (ma_int32)ma_atomic_fetch_and_explicit_32((ma_uint32*)dst, (ma_uint32)src, order) +#define ma_atomic_fetch_and_explicit_i64(dst, src, order) (ma_int64)ma_atomic_fetch_and_explicit_64((ma_uint64*)dst, (ma_uint64)src, order) +#define ma_atomic_test_and_set_i8( ptr) ma_atomic_test_and_set_explicit_i8( ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_i16(ptr) ma_atomic_test_and_set_explicit_i16(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_i32(ptr) ma_atomic_test_and_set_explicit_i32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_test_and_set_i64(ptr) ma_atomic_test_and_set_explicit_i64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_i8( ptr) ma_atomic_clear_explicit_i8( ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_i16(ptr) ma_atomic_clear_explicit_i16(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_i32(ptr) ma_atomic_clear_explicit_i32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_i64(ptr) ma_atomic_clear_explicit_i64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_i8( dst, src) ma_atomic_store_explicit_i8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_i16(dst, src) ma_atomic_store_explicit_i16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_i32(dst, src) ma_atomic_store_explicit_i32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_i64(dst, src) ma_atomic_store_explicit_i64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_i8( ptr) ma_atomic_load_explicit_i8( ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_i16(ptr) ma_atomic_load_explicit_i16(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_i32(ptr) ma_atomic_load_explicit_i32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_i64(ptr) ma_atomic_load_explicit_i64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_i8( dst, src) ma_atomic_exchange_explicit_i8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_i16(dst, src) ma_atomic_exchange_explicit_i16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_i32(dst, src) ma_atomic_exchange_explicit_i32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_i64(dst, src) ma_atomic_exchange_explicit_i64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_i8( dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_i8( dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_i16(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_i16(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_i32(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_i32(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_i64(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_i64(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_i8( dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_i8( dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_i16(dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_i16(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_i32(dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_i32(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_i64(dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_i64(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_i8( dst, src) ma_atomic_fetch_add_explicit_i8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_i16(dst, src) ma_atomic_fetch_add_explicit_i16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_i32(dst, src) ma_atomic_fetch_add_explicit_i32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_i64(dst, src) ma_atomic_fetch_add_explicit_i64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_i8( dst, src) ma_atomic_fetch_sub_explicit_i8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_i16(dst, src) ma_atomic_fetch_sub_explicit_i16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_i32(dst, src) ma_atomic_fetch_sub_explicit_i32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_i64(dst, src) ma_atomic_fetch_sub_explicit_i64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_i8( dst, src) ma_atomic_fetch_or_explicit_i8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_i16(dst, src) ma_atomic_fetch_or_explicit_i16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_i32(dst, src) ma_atomic_fetch_or_explicit_i32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_i64(dst, src) ma_atomic_fetch_or_explicit_i64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_i8( dst, src) ma_atomic_fetch_xor_explicit_i8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_i16(dst, src) ma_atomic_fetch_xor_explicit_i16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_i32(dst, src) ma_atomic_fetch_xor_explicit_i32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_i64(dst, src) ma_atomic_fetch_xor_explicit_i64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_i8( dst, src) ma_atomic_fetch_and_explicit_i8( dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_i16(dst, src) ma_atomic_fetch_and_explicit_i16(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_i32(dst, src) ma_atomic_fetch_and_explicit_i32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_i64(dst, src) ma_atomic_fetch_and_explicit_i64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_and_swap_i8( dst, expected, dedsired) (ma_int8 )ma_atomic_compare_and_swap_8( (ma_uint8* )dst, (ma_uint8 )expected, (ma_uint8 )dedsired) +#define ma_atomic_compare_and_swap_i16(dst, expected, dedsired) (ma_int16)ma_atomic_compare_and_swap_16((ma_uint16*)dst, (ma_uint16)expected, (ma_uint16)dedsired) +#define ma_atomic_compare_and_swap_i32(dst, expected, dedsired) (ma_int32)ma_atomic_compare_and_swap_32((ma_uint32*)dst, (ma_uint32)expected, (ma_uint32)dedsired) +#define ma_atomic_compare_and_swap_i64(dst, expected, dedsired) (ma_int64)ma_atomic_compare_and_swap_64((ma_uint64*)dst, (ma_uint64)expected, (ma_uint64)dedsired) +typedef union +{ + ma_uint32 i; + float f; +} ma_atomic_if32; +typedef union +{ + ma_uint64 i; + double f; +} ma_atomic_if64; +#define ma_atomic_clear_explicit_f32(ptr, order) ma_atomic_clear_explicit_32((ma_uint32*)ptr, order) +#define ma_atomic_clear_explicit_f64(ptr, order) ma_atomic_clear_explicit_64((ma_uint64*)ptr, order) +static MA_INLINE void ma_atomic_store_explicit_f32(volatile float* dst, float src, ma_atomic_memory_order order) +{ + ma_atomic_if32 x; + x.f = src; + ma_atomic_store_explicit_32((volatile ma_uint32*)dst, x.i, order); +} +static MA_INLINE void ma_atomic_store_explicit_f64(volatile double* dst, double src, ma_atomic_memory_order order) +{ + ma_atomic_if64 x; + x.f = src; + ma_atomic_store_explicit_64((volatile ma_uint64*)dst, x.i, order); +} +static MA_INLINE float ma_atomic_load_explicit_f32(volatile const float* ptr, ma_atomic_memory_order order) +{ + ma_atomic_if32 r; + r.i = ma_atomic_load_explicit_32((volatile const ma_uint32*)ptr, order); + return r.f; +} +static MA_INLINE double ma_atomic_load_explicit_f64(volatile const double* ptr, ma_atomic_memory_order order) +{ + ma_atomic_if64 r; + r.i = ma_atomic_load_explicit_64((volatile const ma_uint64*)ptr, order); + return r.f; +} +static MA_INLINE float ma_atomic_exchange_explicit_f32(volatile float* dst, float src, ma_atomic_memory_order order) +{ + ma_atomic_if32 r; + ma_atomic_if32 x; + x.f = src; + r.i = ma_atomic_exchange_explicit_32((volatile ma_uint32*)dst, x.i, order); + return r.f; +} +static MA_INLINE double ma_atomic_exchange_explicit_f64(volatile double* dst, double src, ma_atomic_memory_order order) +{ + ma_atomic_if64 r; + ma_atomic_if64 x; + x.f = src; + r.i = ma_atomic_exchange_explicit_64((volatile ma_uint64*)dst, x.i, order); + return r.f; +} +static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_f32(volatile float* dst, float* expected, float desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) +{ + ma_atomic_if32 d; + d.f = desired; + return ma_atomic_compare_exchange_strong_explicit_32((volatile ma_uint32*)dst, (ma_uint32*)expected, d.i, successOrder, failureOrder); +} +static MA_INLINE ma_bool32 ma_atomic_compare_exchange_strong_explicit_f64(volatile double* dst, double* expected, double desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) +{ + ma_atomic_if64 d; + d.f = desired; + return ma_atomic_compare_exchange_strong_explicit_64((volatile ma_uint64*)dst, (ma_uint64*)expected, d.i, successOrder, failureOrder); +} +static MA_INLINE ma_bool32 ma_atomic_compare_exchange_weak_explicit_f32(volatile float* dst, float* expected, float desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) +{ + ma_atomic_if32 d; + d.f = desired; + return ma_atomic_compare_exchange_weak_explicit_32((volatile ma_uint32*)dst, (ma_uint32*)expected, d.i, successOrder, failureOrder); +} +static MA_INLINE ma_bool32 ma_atomic_compare_exchange_weak_explicit_f64(volatile double* dst, double* expected, double desired, ma_atomic_memory_order successOrder, ma_atomic_memory_order failureOrder) +{ + ma_atomic_if64 d; + d.f = desired; + return ma_atomic_compare_exchange_weak_explicit_64((volatile ma_uint64*)dst, (ma_uint64*)expected, d.i, successOrder, failureOrder); +} +static MA_INLINE float ma_atomic_fetch_add_explicit_f32(volatile float* dst, float src, ma_atomic_memory_order order) +{ + ma_atomic_if32 r; + ma_atomic_if32 x; + x.f = src; + r.i = ma_atomic_fetch_add_explicit_32((volatile ma_uint32*)dst, x.i, order); + return r.f; +} +static MA_INLINE double ma_atomic_fetch_add_explicit_f64(volatile double* dst, double src, ma_atomic_memory_order order) +{ + ma_atomic_if64 r; + ma_atomic_if64 x; + x.f = src; + r.i = ma_atomic_fetch_add_explicit_64((volatile ma_uint64*)dst, x.i, order); + return r.f; +} +static MA_INLINE float ma_atomic_fetch_sub_explicit_f32(volatile float* dst, float src, ma_atomic_memory_order order) +{ + ma_atomic_if32 r; + ma_atomic_if32 x; + x.f = src; + r.i = ma_atomic_fetch_sub_explicit_32((volatile ma_uint32*)dst, x.i, order); + return r.f; +} +static MA_INLINE double ma_atomic_fetch_sub_explicit_f64(volatile double* dst, double src, ma_atomic_memory_order order) +{ + ma_atomic_if64 r; + ma_atomic_if64 x; + x.f = src; + r.i = ma_atomic_fetch_sub_explicit_64((volatile ma_uint64*)dst, x.i, order); + return r.f; +} +static MA_INLINE float ma_atomic_fetch_or_explicit_f32(volatile float* dst, float src, ma_atomic_memory_order order) +{ + ma_atomic_if32 r; + ma_atomic_if32 x; + x.f = src; + r.i = ma_atomic_fetch_or_explicit_32((volatile ma_uint32*)dst, x.i, order); + return r.f; +} +static MA_INLINE double ma_atomic_fetch_or_explicit_f64(volatile double* dst, double src, ma_atomic_memory_order order) +{ + ma_atomic_if64 r; + ma_atomic_if64 x; + x.f = src; + r.i = ma_atomic_fetch_or_explicit_64((volatile ma_uint64*)dst, x.i, order); + return r.f; +} +static MA_INLINE float ma_atomic_fetch_xor_explicit_f32(volatile float* dst, float src, ma_atomic_memory_order order) +{ + ma_atomic_if32 r; + ma_atomic_if32 x; + x.f = src; + r.i = ma_atomic_fetch_xor_explicit_32((volatile ma_uint32*)dst, x.i, order); + return r.f; +} +static MA_INLINE double ma_atomic_fetch_xor_explicit_f64(volatile double* dst, double src, ma_atomic_memory_order order) +{ + ma_atomic_if64 r; + ma_atomic_if64 x; + x.f = src; + r.i = ma_atomic_fetch_xor_explicit_64((volatile ma_uint64*)dst, x.i, order); + return r.f; +} +static MA_INLINE float ma_atomic_fetch_and_explicit_f32(volatile float* dst, float src, ma_atomic_memory_order order) +{ + ma_atomic_if32 r; + ma_atomic_if32 x; + x.f = src; + r.i = ma_atomic_fetch_and_explicit_32((volatile ma_uint32*)dst, x.i, order); + return r.f; +} +static MA_INLINE double ma_atomic_fetch_and_explicit_f64(volatile double* dst, double src, ma_atomic_memory_order order) +{ + ma_atomic_if64 r; + ma_atomic_if64 x; + x.f = src; + r.i = ma_atomic_fetch_and_explicit_64((volatile ma_uint64*)dst, x.i, order); + return r.f; +} +#define ma_atomic_clear_f32(ptr) (float )ma_atomic_clear_explicit_f32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_clear_f64(ptr) (double)ma_atomic_clear_explicit_f64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_f32(dst, src) ma_atomic_store_explicit_f32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_store_f64(dst, src) ma_atomic_store_explicit_f64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_f32(ptr) (float )ma_atomic_load_explicit_f32(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_load_f64(ptr) (double)ma_atomic_load_explicit_f64(ptr, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_f32(dst, src) (float )ma_atomic_exchange_explicit_f32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_exchange_f64(dst, src) (double)ma_atomic_exchange_explicit_f64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_f32(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_f32(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_strong_f64(dst, expected, desired) ma_atomic_compare_exchange_strong_explicit_f64(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_f32(dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_f32(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_compare_exchange_weak_f64(dst, expected, desired) ma_atomic_compare_exchange_weak_explicit_f64(dst, expected, desired, ma_atomic_memory_order_seq_cst, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_f32(dst, src) ma_atomic_fetch_add_explicit_f32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_add_f64(dst, src) ma_atomic_fetch_add_explicit_f64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_f32(dst, src) ma_atomic_fetch_sub_explicit_f32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_sub_f64(dst, src) ma_atomic_fetch_sub_explicit_f64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_f32(dst, src) ma_atomic_fetch_or_explicit_f32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_or_f64(dst, src) ma_atomic_fetch_or_explicit_f64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_f32(dst, src) ma_atomic_fetch_xor_explicit_f32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_xor_f64(dst, src) ma_atomic_fetch_xor_explicit_f64(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_f32(dst, src) ma_atomic_fetch_and_explicit_f32(dst, src, ma_atomic_memory_order_seq_cst) +#define ma_atomic_fetch_and_f64(dst, src) ma_atomic_fetch_and_explicit_f64(dst, src, ma_atomic_memory_order_seq_cst) +static MA_INLINE float ma_atomic_compare_and_swap_f32(volatile float* dst, float expected, float desired) +{ + ma_atomic_if32 r; + ma_atomic_if32 e, d; + e.f = expected; + d.f = desired; + r.i = ma_atomic_compare_and_swap_32((volatile ma_uint32*)dst, e.i, d.i); + return r.f; +} +static MA_INLINE double ma_atomic_compare_and_swap_f64(volatile double* dst, double expected, double desired) +{ + ma_atomic_if64 r; + ma_atomic_if64 e, d; + e.f = expected; + d.f = desired; + r.i = ma_atomic_compare_and_swap_64((volatile ma_uint64*)dst, e.i, d.i); + return r.f; +} +typedef ma_atomic_flag ma_atomic_spinlock; +static MA_INLINE void ma_atomic_spinlock_lock(volatile ma_atomic_spinlock* pSpinlock) +{ + for (;;) { + if (ma_atomic_flag_test_and_set_explicit(pSpinlock, ma_atomic_memory_order_acquire) == 0) { + break; + } + while (ma_atomic_flag_load_explicit(pSpinlock, ma_atomic_memory_order_relaxed) == 1) { + } + } +} +static MA_INLINE void ma_atomic_spinlock_unlock(volatile ma_atomic_spinlock* pSpinlock) +{ + ma_atomic_flag_clear_explicit(pSpinlock, ma_atomic_memory_order_release); +} +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) + #pragma GCC diagnostic pop +#endif +#if defined(__cplusplus) +} +#endif +#endif +/* c89atomic.h end */ + +#define MA_ATOMIC_SAFE_TYPE_IMPL(c89TypeExtension, type) \ + static MA_INLINE ma_##type ma_atomic_##type##_get(ma_atomic_##type* x) \ + { \ + return (ma_##type)ma_atomic_load_##c89TypeExtension(&x->value); \ + } \ + static MA_INLINE void ma_atomic_##type##_set(ma_atomic_##type* x, ma_##type value) \ + { \ + ma_atomic_store_##c89TypeExtension(&x->value, value); \ + } \ + static MA_INLINE ma_##type ma_atomic_##type##_exchange(ma_atomic_##type* x, ma_##type value) \ + { \ + return (ma_##type)ma_atomic_exchange_##c89TypeExtension(&x->value, value); \ + } \ + static MA_INLINE ma_bool32 ma_atomic_##type##_compare_exchange(ma_atomic_##type* x, ma_##type* expected, ma_##type desired) \ + { \ + return ma_atomic_compare_exchange_weak_##c89TypeExtension(&x->value, expected, desired); \ + } \ + static MA_INLINE ma_##type ma_atomic_##type##_fetch_add(ma_atomic_##type* x, ma_##type y) \ + { \ + return (ma_##type)ma_atomic_fetch_add_##c89TypeExtension(&x->value, y); \ + } \ + static MA_INLINE ma_##type ma_atomic_##type##_fetch_sub(ma_atomic_##type* x, ma_##type y) \ + { \ + return (ma_##type)ma_atomic_fetch_sub_##c89TypeExtension(&x->value, y); \ + } \ + static MA_INLINE ma_##type ma_atomic_##type##_fetch_or(ma_atomic_##type* x, ma_##type y) \ + { \ + return (ma_##type)ma_atomic_fetch_or_##c89TypeExtension(&x->value, y); \ + } \ + static MA_INLINE ma_##type ma_atomic_##type##_fetch_xor(ma_atomic_##type* x, ma_##type y) \ + { \ + return (ma_##type)ma_atomic_fetch_xor_##c89TypeExtension(&x->value, y); \ + } \ + static MA_INLINE ma_##type ma_atomic_##type##_fetch_and(ma_atomic_##type* x, ma_##type y) \ + { \ + return (ma_##type)ma_atomic_fetch_and_##c89TypeExtension(&x->value, y); \ + } \ + static MA_INLINE ma_##type ma_atomic_##type##_compare_and_swap(ma_atomic_##type* x, ma_##type expected, ma_##type desired) \ + { \ + return (ma_##type)ma_atomic_compare_and_swap_##c89TypeExtension(&x->value, expected, desired); \ + } \ + +#define MA_ATOMIC_SAFE_TYPE_IMPL_PTR(type) \ + static MA_INLINE ma_##type* ma_atomic_ptr_##type##_get(ma_atomic_ptr_##type* x) \ + { \ + return ma_atomic_load_ptr((void**)&x->value); \ + } \ + static MA_INLINE void ma_atomic_ptr_##type##_set(ma_atomic_ptr_##type* x, ma_##type* value) \ + { \ + ma_atomic_store_ptr((void**)&x->value, (void*)value); \ + } \ + static MA_INLINE ma_##type* ma_atomic_ptr_##type##_exchange(ma_atomic_ptr_##type* x, ma_##type* value) \ + { \ + return ma_atomic_exchange_ptr((void**)&x->value, (void*)value); \ + } \ + static MA_INLINE ma_bool32 ma_atomic_ptr_##type##_compare_exchange(ma_atomic_ptr_##type* x, ma_##type** expected, ma_##type* desired) \ + { \ + return ma_atomic_compare_exchange_weak_ptr((void**)&x->value, (void*)expected, (void*)desired); \ + } \ + static MA_INLINE ma_##type* ma_atomic_ptr_##type##_compare_and_swap(ma_atomic_ptr_##type* x, ma_##type* expected, ma_##type* desired) \ + { \ + return (ma_##type*)ma_atomic_compare_and_swap_ptr((void**)&x->value, (void*)expected, (void*)desired); \ + } \ + +MA_ATOMIC_SAFE_TYPE_IMPL(32, uint32) +MA_ATOMIC_SAFE_TYPE_IMPL(i32, int32) +MA_ATOMIC_SAFE_TYPE_IMPL(64, uint64) +MA_ATOMIC_SAFE_TYPE_IMPL(f32, float) +MA_ATOMIC_SAFE_TYPE_IMPL(32, bool32) + +#if !defined(MA_NO_DEVICE_IO) +MA_ATOMIC_SAFE_TYPE_IMPL(i32, device_state) +#endif + + +MA_API ma_uint64 ma_calculate_frame_count_after_resampling(ma_uint32 sampleRateOut, ma_uint32 sampleRateIn, ma_uint64 frameCountIn) +{ + /* This is based on the calculation in ma_linear_resampler_get_expected_output_frame_count(). */ + ma_uint64 outputFrameCount; + ma_uint64 preliminaryInputFrameCountFromFrac; + ma_uint64 preliminaryInputFrameCount; + + if (sampleRateIn == 0 || sampleRateOut == 0 || frameCountIn == 0) { + return 0; + } + + if (sampleRateOut == sampleRateIn) { + return frameCountIn; + } + + outputFrameCount = (frameCountIn * sampleRateOut) / sampleRateIn; + + preliminaryInputFrameCountFromFrac = (outputFrameCount * (sampleRateIn / sampleRateOut)) / sampleRateOut; + preliminaryInputFrameCount = (outputFrameCount * (sampleRateIn % sampleRateOut)) + preliminaryInputFrameCountFromFrac; + + if (preliminaryInputFrameCount <= frameCountIn) { + outputFrameCount += 1; + } + + return outputFrameCount; +} + +#ifndef MA_DATA_CONVERTER_STACK_BUFFER_SIZE +#define MA_DATA_CONVERTER_STACK_BUFFER_SIZE 4096 +#endif + + + +#if defined(MA_WIN32) +static ma_result ma_result_from_GetLastError(DWORD error) +{ + switch (error) + { + case ERROR_SUCCESS: return MA_SUCCESS; + case ERROR_PATH_NOT_FOUND: return MA_DOES_NOT_EXIST; + case ERROR_TOO_MANY_OPEN_FILES: return MA_TOO_MANY_OPEN_FILES; + case ERROR_NOT_ENOUGH_MEMORY: return MA_OUT_OF_MEMORY; + case ERROR_DISK_FULL: return MA_NO_SPACE; + case ERROR_HANDLE_EOF: return MA_AT_END; + case ERROR_NEGATIVE_SEEK: return MA_BAD_SEEK; + case ERROR_INVALID_PARAMETER: return MA_INVALID_ARGS; + case ERROR_ACCESS_DENIED: return MA_ACCESS_DENIED; + case ERROR_SEM_TIMEOUT: return MA_TIMEOUT; + case ERROR_FILE_NOT_FOUND: return MA_DOES_NOT_EXIST; + default: break; + } + + return MA_ERROR; +} +#endif /* MA_WIN32 */ + + +/******************************************************************************* + +Threading + +*******************************************************************************/ +static MA_INLINE ma_result ma_spinlock_lock_ex(volatile ma_spinlock* pSpinlock, ma_bool32 yield) +{ + if (pSpinlock == NULL) { + return MA_INVALID_ARGS; + } + + for (;;) { + if (ma_atomic_exchange_explicit_32(pSpinlock, 1, ma_atomic_memory_order_acquire) == 0) { + break; + } + + while (ma_atomic_load_explicit_32(pSpinlock, ma_atomic_memory_order_relaxed) == 1) { + if (yield) { + ma_yield(); + } + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_spinlock_lock(volatile ma_spinlock* pSpinlock) +{ + return ma_spinlock_lock_ex(pSpinlock, MA_TRUE); +} + +MA_API ma_result ma_spinlock_lock_noyield(volatile ma_spinlock* pSpinlock) +{ + return ma_spinlock_lock_ex(pSpinlock, MA_FALSE); +} + +MA_API ma_result ma_spinlock_unlock(volatile ma_spinlock* pSpinlock) +{ + if (pSpinlock == NULL) { + return MA_INVALID_ARGS; + } + + ma_atomic_store_explicit_32(pSpinlock, 0, ma_atomic_memory_order_release); + return MA_SUCCESS; +} + + +#ifndef MA_NO_THREADING +#if defined(MA_POSIX) + #define MA_THREADCALL + typedef void* ma_thread_result; +#elif defined(MA_WIN32) + #define MA_THREADCALL WINAPI + typedef unsigned long ma_thread_result; +#endif + +typedef ma_thread_result (MA_THREADCALL * ma_thread_entry_proc)(void* pData); + +#ifdef MA_POSIX +static ma_result ma_thread_create__posix(ma_thread* pThread, ma_thread_priority priority, size_t stackSize, ma_thread_entry_proc entryProc, void* pData) +{ + int result; + pthread_attr_t* pAttr = NULL; + +#if !defined(__EMSCRIPTEN__) && !defined(__3DS__) + /* Try setting the thread priority. It's not critical if anything fails here. */ + pthread_attr_t attr; + if (pthread_attr_init(&attr) == 0) { + int scheduler = -1; + + /* We successfully initialized our attributes object so we can assign the pointer so it's passed into pthread_create(). */ + pAttr = &attr; + + /* We need to set the scheduler policy. Only do this if the OS supports pthread_attr_setschedpolicy() */ + #if !defined(MA_BEOS) + { + if (priority == ma_thread_priority_idle) { + #ifdef SCHED_IDLE + if (pthread_attr_setschedpolicy(&attr, SCHED_IDLE) == 0) { + scheduler = SCHED_IDLE; + } + #endif + } else if (priority == ma_thread_priority_realtime) { + #ifdef SCHED_FIFO + if (pthread_attr_setschedpolicy(&attr, SCHED_FIFO) == 0) { + scheduler = SCHED_FIFO; + } + #endif + #ifdef MA_LINUX + } else { + scheduler = sched_getscheduler(0); + #endif + } + } + #endif + + if (stackSize > 0) { + pthread_attr_setstacksize(&attr, stackSize); + } + + if (scheduler != -1) { + int priorityMin = sched_get_priority_min(scheduler); + int priorityMax = sched_get_priority_max(scheduler); + int priorityStep = (priorityMax - priorityMin) / 7; /* 7 = number of priorities supported by miniaudio. */ + + struct sched_param sched; + if (pthread_attr_getschedparam(&attr, &sched) == 0) { + if (priority == ma_thread_priority_idle) { + sched.sched_priority = priorityMin; + } else if (priority == ma_thread_priority_realtime) { + #if defined(MA_PTHREAD_REALTIME_THREAD_PRIORITY) + { + sched.sched_priority = MA_PTHREAD_REALTIME_THREAD_PRIORITY; + } + #else + { + sched.sched_priority = priorityMax; + } + #endif + } else { + sched.sched_priority += ((int)priority + 5) * priorityStep; /* +5 because the lowest priority is -5. */ + } + + if (sched.sched_priority < priorityMin) { + sched.sched_priority = priorityMin; + } + if (sched.sched_priority > priorityMax) { + sched.sched_priority = priorityMax; + } + + /* I'm not treating a failure of setting the priority as a critical error so not aborting on failure here. */ + if (pthread_attr_setschedparam(&attr, &sched) == 0) { + #if !defined(MA_ANDROID) || (defined(__ANDROID_API__) && __ANDROID_API__ >= 28) + { + pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED); + } + #endif + } + } + } + } +#else + /* It's the emscripten build. We'll have a few unused parameters. */ + (void)priority; + (void)stackSize; +#endif + + result = pthread_create((pthread_t*)pThread, pAttr, entryProc, pData); + + /* The thread attributes object is no longer required. */ + if (pAttr != NULL) { + pthread_attr_destroy(pAttr); + } + + if (result != 0) { + return ma_result_from_errno(result); + } + + return MA_SUCCESS; +} + +static void ma_thread_wait__posix(ma_thread* pThread) +{ + pthread_join((pthread_t)*pThread, NULL); +} + + +static ma_result ma_mutex_init__posix(ma_mutex* pMutex) +{ + int result; + + if (pMutex == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pMutex); + + result = pthread_mutex_init((pthread_mutex_t*)pMutex, NULL); + if (result != 0) { + return ma_result_from_errno(result); + } + + return MA_SUCCESS; +} + +static void ma_mutex_uninit__posix(ma_mutex* pMutex) +{ + pthread_mutex_destroy((pthread_mutex_t*)pMutex); +} + +static void ma_mutex_lock__posix(ma_mutex* pMutex) +{ + pthread_mutex_lock((pthread_mutex_t*)pMutex); +} + +static void ma_mutex_unlock__posix(ma_mutex* pMutex) +{ + pthread_mutex_unlock((pthread_mutex_t*)pMutex); +} + + +static ma_result ma_event_init__posix(ma_event* pEvent) +{ + int result; + + result = pthread_mutex_init((pthread_mutex_t*)&pEvent->lock, NULL); + if (result != 0) { + return ma_result_from_errno(result); + } + + result = pthread_cond_init((pthread_cond_t*)&pEvent->cond, NULL); + if (result != 0) { + pthread_mutex_destroy((pthread_mutex_t*)&pEvent->lock); + return ma_result_from_errno(result); + } + + pEvent->value = 0; + return MA_SUCCESS; +} + +static void ma_event_uninit__posix(ma_event* pEvent) +{ + pthread_cond_destroy((pthread_cond_t*)&pEvent->cond); + pthread_mutex_destroy((pthread_mutex_t*)&pEvent->lock); +} + +static ma_result ma_event_wait__posix(ma_event* pEvent) +{ + pthread_mutex_lock((pthread_mutex_t*)&pEvent->lock); + { + while (pEvent->value == 0) { + pthread_cond_wait((pthread_cond_t*)&pEvent->cond, (pthread_mutex_t*)&pEvent->lock); + } + pEvent->value = 0; /* Auto-reset. */ + } + pthread_mutex_unlock((pthread_mutex_t*)&pEvent->lock); + + return MA_SUCCESS; +} + +static ma_result ma_event_signal__posix(ma_event* pEvent) +{ + pthread_mutex_lock((pthread_mutex_t*)&pEvent->lock); + { + pEvent->value = 1; + pthread_cond_signal((pthread_cond_t*)&pEvent->cond); + } + pthread_mutex_unlock((pthread_mutex_t*)&pEvent->lock); + + return MA_SUCCESS; +} + + +static ma_result ma_semaphore_init__posix(int initialValue, ma_semaphore* pSemaphore) +{ + int result; + + if (pSemaphore == NULL) { + return MA_INVALID_ARGS; + } + + pSemaphore->value = initialValue; + + result = pthread_mutex_init((pthread_mutex_t*)&pSemaphore->lock, NULL); + if (result != 0) { + return ma_result_from_errno(result); /* Failed to create mutex. */ + } + + result = pthread_cond_init((pthread_cond_t*)&pSemaphore->cond, NULL); + if (result != 0) { + pthread_mutex_destroy((pthread_mutex_t*)&pSemaphore->lock); + return ma_result_from_errno(result); /* Failed to create condition variable. */ + } + + return MA_SUCCESS; +} + +static void ma_semaphore_uninit__posix(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + return; + } + + pthread_cond_destroy((pthread_cond_t*)&pSemaphore->cond); + pthread_mutex_destroy((pthread_mutex_t*)&pSemaphore->lock); +} + +static ma_result ma_semaphore_wait__posix(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + return MA_INVALID_ARGS; + } + + pthread_mutex_lock((pthread_mutex_t*)&pSemaphore->lock); + { + /* We need to wait on a condition variable before escaping. We can't return from this function until the semaphore has been signaled. */ + while (pSemaphore->value == 0) { + pthread_cond_wait((pthread_cond_t*)&pSemaphore->cond, (pthread_mutex_t*)&pSemaphore->lock); + } + + pSemaphore->value -= 1; + } + pthread_mutex_unlock((pthread_mutex_t*)&pSemaphore->lock); + + return MA_SUCCESS; +} + +static ma_result ma_semaphore_release__posix(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + return MA_INVALID_ARGS; + } + + pthread_mutex_lock((pthread_mutex_t*)&pSemaphore->lock); + { + pSemaphore->value += 1; + pthread_cond_signal((pthread_cond_t*)&pSemaphore->cond); + } + pthread_mutex_unlock((pthread_mutex_t*)&pSemaphore->lock); + + return MA_SUCCESS; +} +#elif defined(MA_WIN32) +static int ma_thread_priority_to_win32(ma_thread_priority priority) +{ + switch (priority) { + case ma_thread_priority_idle: return THREAD_PRIORITY_IDLE; + case ma_thread_priority_lowest: return THREAD_PRIORITY_LOWEST; + case ma_thread_priority_low: return THREAD_PRIORITY_BELOW_NORMAL; + case ma_thread_priority_normal: return THREAD_PRIORITY_NORMAL; + case ma_thread_priority_high: return THREAD_PRIORITY_ABOVE_NORMAL; + case ma_thread_priority_highest: return THREAD_PRIORITY_HIGHEST; + case ma_thread_priority_realtime: return THREAD_PRIORITY_TIME_CRITICAL; + default: return THREAD_PRIORITY_NORMAL; + } +} + +static ma_result ma_thread_create__win32(ma_thread* pThread, ma_thread_priority priority, size_t stackSize, ma_thread_entry_proc entryProc, void* pData) +{ + DWORD threadID; /* Not used. Only used for passing into CreateThread() so it doesn't fail on Windows 98. */ + + *pThread = CreateThread(NULL, stackSize, entryProc, pData, 0, &threadID); + if (*pThread == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + SetThreadPriority((HANDLE)*pThread, ma_thread_priority_to_win32(priority)); + + return MA_SUCCESS; +} + +static void ma_thread_wait__win32(ma_thread* pThread) +{ + WaitForSingleObject((HANDLE)*pThread, INFINITE); + CloseHandle((HANDLE)*pThread); +} + + +static ma_result ma_mutex_init__win32(ma_mutex* pMutex) +{ + *pMutex = CreateEventA(NULL, FALSE, TRUE, NULL); + if (*pMutex == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + +static void ma_mutex_uninit__win32(ma_mutex* pMutex) +{ + CloseHandle((HANDLE)*pMutex); +} + +static void ma_mutex_lock__win32(ma_mutex* pMutex) +{ + WaitForSingleObject((HANDLE)*pMutex, INFINITE); +} + +static void ma_mutex_unlock__win32(ma_mutex* pMutex) +{ + SetEvent((HANDLE)*pMutex); +} + + +static ma_result ma_event_init__win32(ma_event* pEvent) +{ + *pEvent = CreateEventA(NULL, FALSE, FALSE, NULL); + if (*pEvent == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + +static void ma_event_uninit__win32(ma_event* pEvent) +{ + CloseHandle((HANDLE)*pEvent); +} + +static ma_result ma_event_wait__win32(ma_event* pEvent) +{ + DWORD result = WaitForSingleObject((HANDLE)*pEvent, INFINITE); + if (result == WAIT_OBJECT_0) { + return MA_SUCCESS; + } + + if (result == WAIT_TIMEOUT) { + return MA_TIMEOUT; + } + + return ma_result_from_GetLastError(GetLastError()); +} + +static ma_result ma_event_signal__win32(ma_event* pEvent) +{ + BOOL result = SetEvent((HANDLE)*pEvent); + if (result == 0) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + + +static ma_result ma_semaphore_init__win32(int initialValue, ma_semaphore* pSemaphore) +{ + *pSemaphore = CreateSemaphoreW(NULL, (LONG)initialValue, LONG_MAX, NULL); + if (*pSemaphore == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + +static void ma_semaphore_uninit__win32(ma_semaphore* pSemaphore) +{ + CloseHandle((HANDLE)*pSemaphore); +} + +static ma_result ma_semaphore_wait__win32(ma_semaphore* pSemaphore) +{ + DWORD result = WaitForSingleObject((HANDLE)*pSemaphore, INFINITE); + if (result == WAIT_OBJECT_0) { + return MA_SUCCESS; + } + + if (result == WAIT_TIMEOUT) { + return MA_TIMEOUT; + } + + return ma_result_from_GetLastError(GetLastError()); +} + +static ma_result ma_semaphore_release__win32(ma_semaphore* pSemaphore) +{ + BOOL result = ReleaseSemaphore((HANDLE)*pSemaphore, 1, NULL); + if (result == 0) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} +#endif + +typedef struct +{ + ma_thread_entry_proc entryProc; + void* pData; + ma_allocation_callbacks allocationCallbacks; +} ma_thread_proxy_data; + +static ma_thread_result MA_THREADCALL ma_thread_entry_proxy(void* pData) +{ + ma_thread_proxy_data* pProxyData = (ma_thread_proxy_data*)pData; + ma_thread_entry_proc entryProc; + void* pEntryProcData; + ma_thread_result result; + + #if defined(MA_ON_THREAD_ENTRY) + MA_ON_THREAD_ENTRY + #endif + + entryProc = pProxyData->entryProc; + pEntryProcData = pProxyData->pData; + + /* Free the proxy data before getting into the real thread entry proc. */ + ma_free(pProxyData, &pProxyData->allocationCallbacks); + + result = entryProc(pEntryProcData); + + #if defined(MA_ON_THREAD_EXIT) + MA_ON_THREAD_EXIT + #endif + + return result; +} + +static ma_result ma_thread_create(ma_thread* pThread, ma_thread_priority priority, size_t stackSize, ma_thread_entry_proc entryProc, void* pData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_result result; + ma_thread_proxy_data* pProxyData; + + if (pThread == NULL || entryProc == NULL) { + return MA_INVALID_ARGS; + } + + pProxyData = (ma_thread_proxy_data*)ma_malloc(sizeof(*pProxyData), pAllocationCallbacks); /* Will be freed by the proxy entry proc. */ + if (pProxyData == NULL) { + return MA_OUT_OF_MEMORY; + } + +#if defined(MA_THREAD_DEFAULT_STACK_SIZE) + if (stackSize == 0) { + stackSize = MA_THREAD_DEFAULT_STACK_SIZE; + } +#endif + + pProxyData->entryProc = entryProc; + pProxyData->pData = pData; + ma_allocation_callbacks_init_copy(&pProxyData->allocationCallbacks, pAllocationCallbacks); + +#if defined(MA_POSIX) + result = ma_thread_create__posix(pThread, priority, stackSize, ma_thread_entry_proxy, pProxyData); +#elif defined(MA_WIN32) + result = ma_thread_create__win32(pThread, priority, stackSize, ma_thread_entry_proxy, pProxyData); +#endif + + if (result != MA_SUCCESS) { + ma_free(pProxyData, pAllocationCallbacks); + return result; + } + + return MA_SUCCESS; +} + +static void ma_thread_wait(ma_thread* pThread) +{ + if (pThread == NULL) { + return; + } + +#if defined(MA_POSIX) + ma_thread_wait__posix(pThread); +#elif defined(MA_WIN32) + ma_thread_wait__win32(pThread); +#endif +} + + +MA_API ma_result ma_mutex_init(ma_mutex* pMutex) +{ + if (pMutex == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return MA_INVALID_ARGS; + } + +#if defined(MA_POSIX) + return ma_mutex_init__posix(pMutex); +#elif defined(MA_WIN32) + return ma_mutex_init__win32(pMutex); +#endif +} + +MA_API void ma_mutex_uninit(ma_mutex* pMutex) +{ + if (pMutex == NULL) { + return; + } + +#if defined(MA_POSIX) + ma_mutex_uninit__posix(pMutex); +#elif defined(MA_WIN32) + ma_mutex_uninit__win32(pMutex); +#endif +} + +MA_API void ma_mutex_lock(ma_mutex* pMutex) +{ + if (pMutex == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return; + } + +#if defined(MA_POSIX) + ma_mutex_lock__posix(pMutex); +#elif defined(MA_WIN32) + ma_mutex_lock__win32(pMutex); +#endif +} + +MA_API void ma_mutex_unlock(ma_mutex* pMutex) +{ + if (pMutex == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return; + } + +#if defined(MA_POSIX) + ma_mutex_unlock__posix(pMutex); +#elif defined(MA_WIN32) + ma_mutex_unlock__win32(pMutex); +#endif +} + + +MA_API ma_result ma_event_init(ma_event* pEvent) +{ + if (pEvent == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return MA_INVALID_ARGS; + } + +#if defined(MA_POSIX) + return ma_event_init__posix(pEvent); +#elif defined(MA_WIN32) + return ma_event_init__win32(pEvent); +#endif +} + +#if 0 +static ma_result ma_event_alloc_and_init(ma_event** ppEvent, ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_result result; + ma_event* pEvent; + + if (ppEvent == NULL) { + return MA_INVALID_ARGS; + } + + *ppEvent = NULL; + + pEvent = ma_malloc(sizeof(*pEvent), pAllocationCallbacks); + if (pEvent == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_event_init(pEvent); + if (result != MA_SUCCESS) { + ma_free(pEvent, pAllocationCallbacks); + return result; + } + + *ppEvent = pEvent; + return result; +} +#endif + +MA_API void ma_event_uninit(ma_event* pEvent) +{ + if (pEvent == NULL) { + return; + } + +#if defined(MA_POSIX) + ma_event_uninit__posix(pEvent); +#elif defined(MA_WIN32) + ma_event_uninit__win32(pEvent); +#endif +} + +#if 0 +static void ma_event_uninit_and_free(ma_event* pEvent, ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pEvent == NULL) { + return; + } + + ma_event_uninit(pEvent); + ma_free(pEvent, pAllocationCallbacks); +} +#endif + +MA_API ma_result ma_event_wait(ma_event* pEvent) +{ + if (pEvent == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert to the caller is aware of this bug. */ + return MA_INVALID_ARGS; + } + +#if defined(MA_POSIX) + return ma_event_wait__posix(pEvent); +#elif defined(MA_WIN32) + return ma_event_wait__win32(pEvent); +#endif +} + +MA_API ma_result ma_event_signal(ma_event* pEvent) +{ + if (pEvent == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert to the caller is aware of this bug. */ + return MA_INVALID_ARGS; + } + +#if defined(MA_POSIX) + return ma_event_signal__posix(pEvent); +#elif defined(MA_WIN32) + return ma_event_signal__win32(pEvent); +#endif +} + + +MA_API ma_result ma_semaphore_init(int initialValue, ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return MA_INVALID_ARGS; + } + +#if defined(MA_POSIX) + return ma_semaphore_init__posix(initialValue, pSemaphore); +#elif defined(MA_WIN32) + return ma_semaphore_init__win32(initialValue, pSemaphore); +#endif +} + +MA_API void ma_semaphore_uninit(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return; + } + +#if defined(MA_POSIX) + ma_semaphore_uninit__posix(pSemaphore); +#elif defined(MA_WIN32) + ma_semaphore_uninit__win32(pSemaphore); +#endif +} + +MA_API ma_result ma_semaphore_wait(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return MA_INVALID_ARGS; + } + +#if defined(MA_POSIX) + return ma_semaphore_wait__posix(pSemaphore); +#elif defined(MA_WIN32) + return ma_semaphore_wait__win32(pSemaphore); +#endif +} + +MA_API ma_result ma_semaphore_release(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + MA_ASSERT(MA_FALSE); /* Fire an assert so the caller is aware of this bug. */ + return MA_INVALID_ARGS; + } + +#if defined(MA_POSIX) + return ma_semaphore_release__posix(pSemaphore); +#elif defined(MA_WIN32) + return ma_semaphore_release__win32(pSemaphore); +#endif +} +#else +/* MA_NO_THREADING is set which means threading is disabled. Threading is required by some API families. If any of these are enabled we need to throw an error. */ +#ifndef MA_NO_DEVICE_IO +#error "MA_NO_THREADING cannot be used without MA_NO_DEVICE_IO"; +#endif +#endif /* MA_NO_THREADING */ + + + +#define MA_FENCE_COUNTER_MAX 0x7FFFFFFF + +MA_API ma_result ma_fence_init(ma_fence* pFence) +{ + if (pFence == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFence); + pFence->counter = 0; + + #ifndef MA_NO_THREADING + { + ma_result result; + + result = ma_event_init(&pFence->e); + if (result != MA_SUCCESS) { + return result; + } + } + #endif + + return MA_SUCCESS; +} + +MA_API void ma_fence_uninit(ma_fence* pFence) +{ + if (pFence == NULL) { + return; + } + + #ifndef MA_NO_THREADING + { + ma_event_uninit(&pFence->e); + } + #endif + + MA_ZERO_OBJECT(pFence); +} + +MA_API ma_result ma_fence_acquire(ma_fence* pFence) +{ + if (pFence == NULL) { + return MA_INVALID_ARGS; + } + + for (;;) { + ma_uint32 oldCounter = ma_atomic_load_32(&pFence->counter); + ma_uint32 newCounter = oldCounter + 1; + + /* Make sure we're not about to exceed our maximum value. */ + if (newCounter > MA_FENCE_COUNTER_MAX) { + MA_ASSERT(MA_FALSE); + return MA_OUT_OF_RANGE; + } + + if (ma_atomic_compare_exchange_weak_32(&pFence->counter, &oldCounter, newCounter)) { + return MA_SUCCESS; + } else { + if (oldCounter == MA_FENCE_COUNTER_MAX) { + MA_ASSERT(MA_FALSE); + return MA_OUT_OF_RANGE; /* The other thread took the last available slot. Abort. */ + } + } + } + + /* Should never get here. */ + /*return MA_SUCCESS;*/ +} + +MA_API ma_result ma_fence_release(ma_fence* pFence) +{ + if (pFence == NULL) { + return MA_INVALID_ARGS; + } + + for (;;) { + ma_uint32 oldCounter = ma_atomic_load_32(&pFence->counter); + ma_uint32 newCounter = oldCounter - 1; + + if (oldCounter == 0) { + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; /* Acquire/release mismatch. */ + } + + if (ma_atomic_compare_exchange_weak_32(&pFence->counter, &oldCounter, newCounter)) { + #ifndef MA_NO_THREADING + { + if (newCounter == 0) { + ma_event_signal(&pFence->e); /* <-- ma_fence_wait() will be waiting on this. */ + } + } + #endif + + return MA_SUCCESS; + } else { + if (oldCounter == 0) { + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; /* Another thread has taken the 0 slot. Acquire/release mismatch. */ + } + } + } + + /* Should never get here. */ + /*return MA_SUCCESS;*/ +} + +MA_API ma_result ma_fence_wait(ma_fence* pFence) +{ + if (pFence == NULL) { + return MA_INVALID_ARGS; + } + + for (;;) { + ma_uint32 counter; + + counter = ma_atomic_load_32(&pFence->counter); + if (counter == 0) { + /* + Counter has hit zero. By the time we get here some other thread may have acquired the + fence again, but that is where the caller needs to take care with how they se the fence. + */ + return MA_SUCCESS; + } + + /* Getting here means the counter is > 0. We'll need to wait for something to happen. */ + #ifndef MA_NO_THREADING + { + ma_result result; + + result = ma_event_wait(&pFence->e); + if (result != MA_SUCCESS) { + return result; + } + } + #endif + } + + /* Should never get here. */ + /*return MA_INVALID_OPERATION;*/ +} + + +MA_API ma_result ma_async_notification_signal(ma_async_notification* pNotification) +{ + ma_async_notification_callbacks* pNotificationCallbacks = (ma_async_notification_callbacks*)pNotification; + + if (pNotification == NULL) { + return MA_INVALID_ARGS; + } + + if (pNotificationCallbacks->onSignal == NULL) { + return MA_NOT_IMPLEMENTED; + } + + pNotificationCallbacks->onSignal(pNotification); + return MA_INVALID_ARGS; +} + + +static void ma_async_notification_poll__on_signal(ma_async_notification* pNotification) +{ + ((ma_async_notification_poll*)pNotification)->signalled = MA_TRUE; +} + +MA_API ma_result ma_async_notification_poll_init(ma_async_notification_poll* pNotificationPoll) +{ + if (pNotificationPoll == NULL) { + return MA_INVALID_ARGS; + } + + pNotificationPoll->cb.onSignal = ma_async_notification_poll__on_signal; + pNotificationPoll->signalled = MA_FALSE; + + return MA_SUCCESS; +} + +MA_API ma_bool32 ma_async_notification_poll_is_signalled(const ma_async_notification_poll* pNotificationPoll) +{ + if (pNotificationPoll == NULL) { + return MA_FALSE; + } + + return pNotificationPoll->signalled; +} + + +static void ma_async_notification_event__on_signal(ma_async_notification* pNotification) +{ + ma_async_notification_event_signal((ma_async_notification_event*)pNotification); +} + +MA_API ma_result ma_async_notification_event_init(ma_async_notification_event* pNotificationEvent) +{ + if (pNotificationEvent == NULL) { + return MA_INVALID_ARGS; + } + + pNotificationEvent->cb.onSignal = ma_async_notification_event__on_signal; + + #ifndef MA_NO_THREADING + { + ma_result result; + + result = ma_event_init(&pNotificationEvent->e); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; + } + #else + { + return MA_NOT_IMPLEMENTED; /* Threading is disabled. */ + } + #endif +} + +MA_API ma_result ma_async_notification_event_uninit(ma_async_notification_event* pNotificationEvent) +{ + if (pNotificationEvent == NULL) { + return MA_INVALID_ARGS; + } + + #ifndef MA_NO_THREADING + { + ma_event_uninit(&pNotificationEvent->e); + return MA_SUCCESS; + } + #else + { + return MA_NOT_IMPLEMENTED; /* Threading is disabled. */ + } + #endif +} + +MA_API ma_result ma_async_notification_event_wait(ma_async_notification_event* pNotificationEvent) +{ + if (pNotificationEvent == NULL) { + return MA_INVALID_ARGS; + } + + #ifndef MA_NO_THREADING + { + return ma_event_wait(&pNotificationEvent->e); + } + #else + { + return MA_NOT_IMPLEMENTED; /* Threading is disabled. */ + } + #endif +} + +MA_API ma_result ma_async_notification_event_signal(ma_async_notification_event* pNotificationEvent) +{ + if (pNotificationEvent == NULL) { + return MA_INVALID_ARGS; + } + + #ifndef MA_NO_THREADING + { + return ma_event_signal(&pNotificationEvent->e); + } + #else + { + return MA_NOT_IMPLEMENTED; /* Threading is disabled. */ + } + #endif +} + + + +/************************************************************************************************************************************************************ + +Job Queue + +************************************************************************************************************************************************************/ +MA_API ma_slot_allocator_config ma_slot_allocator_config_init(ma_uint32 capacity) +{ + ma_slot_allocator_config config; + + MA_ZERO_OBJECT(&config); + config.capacity = capacity; + + return config; +} + + +static MA_INLINE ma_uint32 ma_slot_allocator_calculate_group_capacity(ma_uint32 slotCapacity) +{ + ma_uint32 cap = slotCapacity / 32; + if ((slotCapacity % 32) != 0) { + cap += 1; + } + + return cap; +} + +static MA_INLINE ma_uint32 ma_slot_allocator_group_capacity(const ma_slot_allocator* pAllocator) +{ + return ma_slot_allocator_calculate_group_capacity(pAllocator->capacity); +} + + +typedef struct +{ + size_t sizeInBytes; + size_t groupsOffset; + size_t slotsOffset; +} ma_slot_allocator_heap_layout; + +static ma_result ma_slot_allocator_get_heap_layout(const ma_slot_allocator_config* pConfig, ma_slot_allocator_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->capacity == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* Groups. */ + pHeapLayout->groupsOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(ma_slot_allocator_calculate_group_capacity(pConfig->capacity) * sizeof(ma_slot_allocator_group)); + + /* Slots. */ + pHeapLayout->slotsOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(pConfig->capacity * sizeof(ma_uint32)); + + return MA_SUCCESS; +} + +MA_API ma_result ma_slot_allocator_get_heap_size(const ma_slot_allocator_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_slot_allocator_heap_layout layout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_slot_allocator_get_heap_layout(pConfig, &layout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = layout.sizeInBytes; + + return result; +} + +MA_API ma_result ma_slot_allocator_init_preallocated(const ma_slot_allocator_config* pConfig, void* pHeap, ma_slot_allocator* pAllocator) +{ + ma_result result; + ma_slot_allocator_heap_layout heapLayout; + + if (pAllocator == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pAllocator); + + if (pHeap == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_slot_allocator_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pAllocator->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pAllocator->pGroups = (ma_slot_allocator_group*)ma_offset_ptr(pHeap, heapLayout.groupsOffset); + pAllocator->pSlots = (ma_uint32*)ma_offset_ptr(pHeap, heapLayout.slotsOffset); + pAllocator->capacity = pConfig->capacity; + + return MA_SUCCESS; +} + +MA_API ma_result ma_slot_allocator_init(const ma_slot_allocator_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_slot_allocator* pAllocator) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_slot_allocator_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the size of the heap allocation. */ + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_slot_allocator_init_preallocated(pConfig, pHeap, pAllocator); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pAllocator->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_slot_allocator_uninit(ma_slot_allocator* pAllocator, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocator == NULL) { + return; + } + + if (pAllocator->_ownsHeap) { + ma_free(pAllocator->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_slot_allocator_alloc(ma_slot_allocator* pAllocator, ma_uint64* pSlot) +{ + ma_uint32 iAttempt; + const ma_uint32 maxAttempts = 2; /* The number of iterations to perform until returning MA_OUT_OF_MEMORY if no slots can be found. */ + + if (pAllocator == NULL || pSlot == NULL) { + return MA_INVALID_ARGS; + } + + for (iAttempt = 0; iAttempt < maxAttempts; iAttempt += 1) { + /* We need to acquire a suitable bitfield first. This is a bitfield that's got an available slot within it. */ + ma_uint32 iGroup; + for (iGroup = 0; iGroup < ma_slot_allocator_group_capacity(pAllocator); iGroup += 1) { + /* CAS */ + for (;;) { + ma_uint32 oldBitfield; + ma_uint32 newBitfield; + ma_uint32 bitOffset; + + oldBitfield = ma_atomic_load_32(&pAllocator->pGroups[iGroup].bitfield); /* <-- This copy must happen. The compiler must not optimize this away. */ + + /* Fast check to see if anything is available. */ + if (oldBitfield == 0xFFFFFFFF) { + break; /* No available bits in this bitfield. */ + } + + bitOffset = ma_ffs_32(~oldBitfield); + MA_ASSERT(bitOffset < 32); + + newBitfield = oldBitfield | (1 << bitOffset); + + if (ma_atomic_compare_and_swap_32(&pAllocator->pGroups[iGroup].bitfield, oldBitfield, newBitfield) == oldBitfield) { + ma_uint32 slotIndex; + + /* Increment the counter as soon as possible to have other threads report out-of-memory sooner than later. */ + ma_atomic_fetch_add_32(&pAllocator->count, 1); + + /* The slot index is required for constructing the output value. */ + slotIndex = (iGroup << 5) + bitOffset; /* iGroup << 5 = iGroup * 32 */ + if (slotIndex >= pAllocator->capacity) { + return MA_OUT_OF_MEMORY; + } + + /* Increment the reference count before constructing the output value. */ + pAllocator->pSlots[slotIndex] += 1; + + /* Construct the output value. */ + *pSlot = (((ma_uint64)pAllocator->pSlots[slotIndex] << 32) | slotIndex); + + return MA_SUCCESS; + } + } + } + + /* We weren't able to find a slot. If it's because we've reached our capacity we need to return MA_OUT_OF_MEMORY. Otherwise we need to do another iteration and try again. */ + if (pAllocator->count < pAllocator->capacity) { + ma_yield(); + } else { + return MA_OUT_OF_MEMORY; + } + } + + /* We couldn't find a slot within the maximum number of attempts. */ + return MA_OUT_OF_MEMORY; +} + +MA_API ma_result ma_slot_allocator_free(ma_slot_allocator* pAllocator, ma_uint64 slot) +{ + ma_uint32 iGroup; + ma_uint32 iBit; + + if (pAllocator == NULL) { + return MA_INVALID_ARGS; + } + + iGroup = (ma_uint32)((slot & 0xFFFFFFFF) >> 5); /* slot / 32 */ + iBit = (ma_uint32)((slot & 0xFFFFFFFF) & 31); /* slot % 32 */ + + if (iGroup >= ma_slot_allocator_group_capacity(pAllocator)) { + return MA_INVALID_ARGS; + } + + MA_ASSERT(iBit < 32); /* This must be true due to the logic we used to actually calculate it. */ + + while (ma_atomic_load_32(&pAllocator->count) > 0) { + /* CAS */ + ma_uint32 oldBitfield; + ma_uint32 newBitfield; + + oldBitfield = ma_atomic_load_32(&pAllocator->pGroups[iGroup].bitfield); /* <-- This copy must happen. The compiler must not optimize this away. */ + newBitfield = oldBitfield & ~(1 << iBit); + + /* Debugging for checking for double-frees. */ + #if defined(MA_DEBUG_OUTPUT) + { + if ((oldBitfield & (1 << iBit)) == 0) { + MA_ASSERT(MA_FALSE); /* Double free detected.*/ + } + } + #endif + + if (ma_atomic_compare_and_swap_32(&pAllocator->pGroups[iGroup].bitfield, oldBitfield, newBitfield) == oldBitfield) { + ma_atomic_fetch_sub_32(&pAllocator->count, 1); + return MA_SUCCESS; + } + } + + /* Getting here means there are no allocations available for freeing. */ + return MA_INVALID_OPERATION; +} + + +#define MA_JOB_ID_NONE ~((ma_uint64)0) +#define MA_JOB_SLOT_NONE (ma_uint16)(~0) + +static MA_INLINE ma_uint32 ma_job_extract_refcount(ma_uint64 toc) +{ + return (ma_uint32)(toc >> 32); +} + +static MA_INLINE ma_uint16 ma_job_extract_slot(ma_uint64 toc) +{ + return (ma_uint16)(toc & 0x0000FFFF); +} + +static MA_INLINE ma_uint16 ma_job_extract_code(ma_uint64 toc) +{ + return (ma_uint16)((toc & 0xFFFF0000) >> 16); +} + +static MA_INLINE ma_uint64 ma_job_toc_to_allocation(ma_uint64 toc) +{ + return ((ma_uint64)ma_job_extract_refcount(toc) << 32) | (ma_uint64)ma_job_extract_slot(toc); +} + +static MA_INLINE ma_uint64 ma_job_set_refcount(ma_uint64 toc, ma_uint32 refcount) +{ + /* Clear the reference count first. */ + toc = toc & ~((ma_uint64)0xFFFFFFFF << 32); + toc = toc | ((ma_uint64)refcount << 32); + + return toc; +} + + +MA_API ma_job ma_job_init(ma_uint16 code) +{ + ma_job job; + + MA_ZERO_OBJECT(&job); + job.toc.breakup.code = code; + job.toc.breakup.slot = MA_JOB_SLOT_NONE; /* Temp value. Will be allocated when posted to a queue. */ + job.next = MA_JOB_ID_NONE; + + return job; +} + + +static ma_result ma_job_process__noop(ma_job* pJob); +static ma_result ma_job_process__quit(ma_job* pJob); +static ma_result ma_job_process__custom(ma_job* pJob); +static ma_result ma_job_process__resource_manager__load_data_buffer_node(ma_job* pJob); +static ma_result ma_job_process__resource_manager__free_data_buffer_node(ma_job* pJob); +static ma_result ma_job_process__resource_manager__page_data_buffer_node(ma_job* pJob); +static ma_result ma_job_process__resource_manager__load_data_buffer(ma_job* pJob); +static ma_result ma_job_process__resource_manager__free_data_buffer(ma_job* pJob); +static ma_result ma_job_process__resource_manager__load_data_stream(ma_job* pJob); +static ma_result ma_job_process__resource_manager__free_data_stream(ma_job* pJob); +static ma_result ma_job_process__resource_manager__page_data_stream(ma_job* pJob); +static ma_result ma_job_process__resource_manager__seek_data_stream(ma_job* pJob); + +#if !defined(MA_NO_DEVICE_IO) +static ma_result ma_job_process__device__aaudio_reroute(ma_job* pJob); +#endif + +static ma_job_proc g_jobVTable[MA_JOB_TYPE_COUNT] = +{ + /* Miscellaneous. */ + ma_job_process__quit, /* MA_JOB_TYPE_QUIT */ + ma_job_process__custom, /* MA_JOB_TYPE_CUSTOM */ + + /* Resource Manager. */ + ma_job_process__resource_manager__load_data_buffer_node, /* MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER_NODE */ + ma_job_process__resource_manager__free_data_buffer_node, /* MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER_NODE */ + ma_job_process__resource_manager__page_data_buffer_node, /* MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_BUFFER_NODE */ + ma_job_process__resource_manager__load_data_buffer, /* MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER */ + ma_job_process__resource_manager__free_data_buffer, /* MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER */ + ma_job_process__resource_manager__load_data_stream, /* MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_STREAM */ + ma_job_process__resource_manager__free_data_stream, /* MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_STREAM */ + ma_job_process__resource_manager__page_data_stream, /* MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_STREAM */ + ma_job_process__resource_manager__seek_data_stream, /* MA_JOB_TYPE_RESOURCE_MANAGER_SEEK_DATA_STREAM */ + + /* Device. */ +#if !defined(MA_NO_DEVICE_IO) + ma_job_process__device__aaudio_reroute /* MA_JOB_TYPE_DEVICE_AAUDIO_REROUTE */ +#endif +}; + +MA_API ma_result ma_job_process(ma_job* pJob) +{ + if (pJob == NULL) { + return MA_INVALID_ARGS; + } + + if (pJob->toc.breakup.code >= MA_JOB_TYPE_COUNT) { + return MA_INVALID_OPERATION; + } + + return g_jobVTable[pJob->toc.breakup.code](pJob); +} + +static ma_result ma_job_process__noop(ma_job* pJob) +{ + MA_ASSERT(pJob != NULL); + + /* No-op. */ + (void)pJob; + + return MA_SUCCESS; +} + +static ma_result ma_job_process__quit(ma_job* pJob) +{ + return ma_job_process__noop(pJob); +} + +static ma_result ma_job_process__custom(ma_job* pJob) +{ + MA_ASSERT(pJob != NULL); + + /* No-op if there's no callback. */ + if (pJob->data.custom.proc == NULL) { + return MA_SUCCESS; + } + + return pJob->data.custom.proc(pJob); +} + + + +MA_API ma_job_queue_config ma_job_queue_config_init(ma_uint32 flags, ma_uint32 capacity) +{ + ma_job_queue_config config; + + config.flags = flags; + config.capacity = capacity; + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t allocatorOffset; + size_t jobsOffset; +} ma_job_queue_heap_layout; + +static ma_result ma_job_queue_get_heap_layout(const ma_job_queue_config* pConfig, ma_job_queue_heap_layout* pHeapLayout) +{ + ma_result result; + + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->capacity == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* Allocator. */ + { + ma_slot_allocator_config allocatorConfig; + size_t allocatorHeapSizeInBytes; + + allocatorConfig = ma_slot_allocator_config_init(pConfig->capacity); + result = ma_slot_allocator_get_heap_size(&allocatorConfig, &allocatorHeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->allocatorOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += allocatorHeapSizeInBytes; + } + + /* Jobs. */ + pHeapLayout->jobsOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(pConfig->capacity * sizeof(ma_job)); + + return MA_SUCCESS; +} + +MA_API ma_result ma_job_queue_get_heap_size(const ma_job_queue_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_job_queue_heap_layout layout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_job_queue_get_heap_layout(pConfig, &layout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = layout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_job_queue_init_preallocated(const ma_job_queue_config* pConfig, void* pHeap, ma_job_queue* pQueue) +{ + ma_result result; + ma_job_queue_heap_layout heapLayout; + ma_slot_allocator_config allocatorConfig; + + if (pQueue == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pQueue); + + result = ma_job_queue_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pQueue->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pQueue->flags = pConfig->flags; + pQueue->capacity = pConfig->capacity; + pQueue->pJobs = (ma_job*)ma_offset_ptr(pHeap, heapLayout.jobsOffset); + + allocatorConfig = ma_slot_allocator_config_init(pConfig->capacity); + result = ma_slot_allocator_init_preallocated(&allocatorConfig, ma_offset_ptr(pHeap, heapLayout.allocatorOffset), &pQueue->allocator); + if (result != MA_SUCCESS) { + return result; + } + + /* We need a semaphore if we're running in non-blocking mode. If threading is disabled we need to return an error. */ + if ((pQueue->flags & MA_JOB_QUEUE_FLAG_NON_BLOCKING) == 0) { + #ifndef MA_NO_THREADING + { + ma_semaphore_init(0, &pQueue->sem); + } + #else + { + /* Threading is disabled and we've requested non-blocking mode. */ + return MA_INVALID_OPERATION; + } + #endif + } + + /* + Our queue needs to be initialized with a free standing node. This should always be slot 0. Required for the lock free algorithm. The first job in the queue is + just a dummy item for giving us the first item in the list which is stored in the "next" member. + */ + ma_slot_allocator_alloc(&pQueue->allocator, &pQueue->head); /* Will never fail. */ + pQueue->pJobs[ma_job_extract_slot(pQueue->head)].next = MA_JOB_ID_NONE; + pQueue->tail = pQueue->head; + + return MA_SUCCESS; +} + +MA_API ma_result ma_job_queue_init(const ma_job_queue_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_job_queue* pQueue) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_job_queue_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_job_queue_init_preallocated(pConfig, pHeap, pQueue); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pQueue->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_job_queue_uninit(ma_job_queue* pQueue, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pQueue == NULL) { + return; + } + + /* All we need to do is uninitialize the semaphore. */ + if ((pQueue->flags & MA_JOB_QUEUE_FLAG_NON_BLOCKING) == 0) { + #ifndef MA_NO_THREADING + { + ma_semaphore_uninit(&pQueue->sem); + } + #else + { + MA_ASSERT(MA_FALSE); /* Should never get here. Should have been checked at initialization time. */ + } + #endif + } + + ma_slot_allocator_uninit(&pQueue->allocator, pAllocationCallbacks); + + if (pQueue->_ownsHeap) { + ma_free(pQueue->_pHeap, pAllocationCallbacks); + } +} + +static ma_bool32 ma_job_queue_cas(volatile ma_uint64* dst, ma_uint64 expected, ma_uint64 desired) +{ + /* The new counter is taken from the expected value. */ + return ma_atomic_compare_and_swap_64(dst, expected, ma_job_set_refcount(desired, ma_job_extract_refcount(expected) + 1)) == expected; +} + +MA_API ma_result ma_job_queue_post(ma_job_queue* pQueue, const ma_job* pJob) +{ + /* + Lock free queue implementation based on the paper by Michael and Scott: Nonblocking Algorithms and Preemption-Safe Locking on Multiprogrammed Shared Memory Multiprocessors + */ + ma_result result; + ma_uint64 slot; + ma_uint64 tail; + ma_uint64 next; + + if (pQueue == NULL || pJob == NULL) { + return MA_INVALID_ARGS; + } + + /* We need a new slot. */ + result = ma_slot_allocator_alloc(&pQueue->allocator, &slot); + if (result != MA_SUCCESS) { + return result; /* Probably ran out of slots. If so, MA_OUT_OF_MEMORY will be returned. */ + } + + /* At this point we should have a slot to place the job. */ + MA_ASSERT(ma_job_extract_slot(slot) < pQueue->capacity); + + /* We need to put the job into memory before we do anything. */ + pQueue->pJobs[ma_job_extract_slot(slot)] = *pJob; + pQueue->pJobs[ma_job_extract_slot(slot)].toc.allocation = slot; /* This will overwrite the job code. */ + pQueue->pJobs[ma_job_extract_slot(slot)].toc.breakup.code = pJob->toc.breakup.code; /* The job code needs to be applied again because the line above overwrote it. */ + pQueue->pJobs[ma_job_extract_slot(slot)].next = MA_JOB_ID_NONE; /* Reset for safety. */ + + #ifndef MA_USE_EXPERIMENTAL_LOCK_FREE_JOB_QUEUE + ma_spinlock_lock(&pQueue->lock); + #endif + { + /* The job is stored in memory so now we need to add it to our linked list. We only ever add items to the end of the list. */ + for (;;) { + tail = ma_atomic_load_64(&pQueue->tail); + next = ma_atomic_load_64(&pQueue->pJobs[ma_job_extract_slot(tail)].next); + + if (ma_job_toc_to_allocation(tail) == ma_job_toc_to_allocation(ma_atomic_load_64(&pQueue->tail))) { + if (ma_job_extract_slot(next) == 0xFFFF) { + if (ma_job_queue_cas(&pQueue->pJobs[ma_job_extract_slot(tail)].next, next, slot)) { + break; + } + } else { + ma_job_queue_cas(&pQueue->tail, tail, ma_job_extract_slot(next)); + } + } + } + ma_job_queue_cas(&pQueue->tail, tail, slot); + } + #ifndef MA_USE_EXPERIMENTAL_LOCK_FREE_JOB_QUEUE + ma_spinlock_unlock(&pQueue->lock); + #endif + + + /* Signal the semaphore as the last step if we're using synchronous mode. */ + if ((pQueue->flags & MA_JOB_QUEUE_FLAG_NON_BLOCKING) == 0) { + #ifndef MA_NO_THREADING + { + ma_semaphore_release(&pQueue->sem); + } + #else + { + MA_ASSERT(MA_FALSE); /* Should never get here. Should have been checked at initialization time. */ + } + #endif + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_job_queue_next(ma_job_queue* pQueue, ma_job* pJob) +{ + ma_uint64 head; + ma_uint64 tail; + ma_uint64 next; + + if (pQueue == NULL || pJob == NULL) { + return MA_INVALID_ARGS; + } + + /* If we're running in synchronous mode we'll need to wait on a semaphore. */ + if ((pQueue->flags & MA_JOB_QUEUE_FLAG_NON_BLOCKING) == 0) { + #ifndef MA_NO_THREADING + { + ma_semaphore_wait(&pQueue->sem); + } + #else + { + MA_ASSERT(MA_FALSE); /* Should never get here. Should have been checked at initialization time. */ + } + #endif + } + + #ifndef MA_USE_EXPERIMENTAL_LOCK_FREE_JOB_QUEUE + ma_spinlock_lock(&pQueue->lock); + #endif + { + /* + BUG: In lock-free mode, multiple threads can be in this section of code. The "head" variable in the loop below + is stored. One thread can fall through to the freeing of this item while another is still using "head" for the + retrieval of the "next" variable. + + The slot allocator might need to make use of some reference counting to ensure it's only truly freed when + there are no more references to the item. This must be fixed before removing these locks. + */ + + /* Now we need to remove the root item from the list. */ + for (;;) { + head = ma_atomic_load_64(&pQueue->head); + tail = ma_atomic_load_64(&pQueue->tail); + next = ma_atomic_load_64(&pQueue->pJobs[ma_job_extract_slot(head)].next); + + if (ma_job_toc_to_allocation(head) == ma_job_toc_to_allocation(ma_atomic_load_64(&pQueue->head))) { + if (ma_job_extract_slot(head) == ma_job_extract_slot(tail)) { + if (ma_job_extract_slot(next) == 0xFFFF) { + #ifndef MA_USE_EXPERIMENTAL_LOCK_FREE_JOB_QUEUE + ma_spinlock_unlock(&pQueue->lock); + #endif + return MA_NO_DATA_AVAILABLE; + } + ma_job_queue_cas(&pQueue->tail, tail, ma_job_extract_slot(next)); + } else { + *pJob = pQueue->pJobs[ma_job_extract_slot(next)]; + if (ma_job_queue_cas(&pQueue->head, head, ma_job_extract_slot(next))) { + break; + } + } + } + } + } + #ifndef MA_USE_EXPERIMENTAL_LOCK_FREE_JOB_QUEUE + ma_spinlock_unlock(&pQueue->lock); + #endif + + ma_slot_allocator_free(&pQueue->allocator, head); + + /* + If it's a quit job make sure it's put back on the queue to ensure other threads have an opportunity to detect it and terminate naturally. We + could instead just leave it on the queue, but that would involve fiddling with the lock-free code above and I want to keep that as simple as + possible. + */ + if (pJob->toc.breakup.code == MA_JOB_TYPE_QUIT) { + ma_job_queue_post(pQueue, pJob); + return MA_CANCELLED; /* Return a cancelled status just in case the thread is checking return codes and not properly checking for a quit job. */ + } + + return MA_SUCCESS; +} + + + +/******************************************************************************* + +Dynamic Linking + +*******************************************************************************/ +#ifdef MA_POSIX + /* No need for dlfcn.h if we're not using runtime linking. */ + #ifndef MA_NO_RUNTIME_LINKING + #include + #endif +#endif + +MA_API ma_handle ma_dlopen(ma_log* pLog, const char* filename) +{ +#ifndef MA_NO_RUNTIME_LINKING + ma_handle handle; + + ma_log_postf(pLog, MA_LOG_LEVEL_DEBUG, "Loading library: %s\n", filename); + + #ifdef MA_WIN32 + /* From MSDN: Desktop applications cannot use LoadPackagedLibrary; if a desktop application calls this function it fails with APPMODEL_ERROR_NO_PACKAGE.*/ + #if !defined(MA_WIN32_UWP) || !(defined(WINAPI_FAMILY) && ((defined(WINAPI_FAMILY_PHONE_APP) && WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP))) + handle = (ma_handle)LoadLibraryA(filename); + #else + /* *sigh* It appears there is no ANSI version of LoadPackagedLibrary()... */ + WCHAR filenameW[4096]; + if (MultiByteToWideChar(CP_UTF8, 0, filename, -1, filenameW, sizeof(filenameW)) == 0) { + handle = NULL; + } else { + handle = (ma_handle)LoadPackagedLibrary(filenameW, 0); + } + #endif + #else + handle = (ma_handle)dlopen(filename, RTLD_NOW); + #endif + + /* + I'm not considering failure to load a library an error nor a warning because seamlessly falling through to a lower-priority + backend is a deliberate design choice. Instead I'm logging it as an informational message. + */ + if (handle == NULL) { + ma_log_postf(pLog, MA_LOG_LEVEL_INFO, "Failed to load library: %s\n", filename); + } + + return handle; +#else + /* Runtime linking is disabled. */ + (void)pLog; + (void)filename; + return NULL; +#endif +} + +MA_API void ma_dlclose(ma_log* pLog, ma_handle handle) +{ +#ifndef MA_NO_RUNTIME_LINKING + #ifdef MA_WIN32 + FreeLibrary((HMODULE)handle); + #else + /* Hack for Android bug (see https://github.com/android/ndk/issues/360). Calling dlclose() pre-API 28 may segfault. */ + #if !defined(MA_ANDROID) || (defined(__ANDROID_API__) && __ANDROID_API__ >= 28) + { + dlclose((void*)handle); + } + #else + { + (void)handle; + } + #endif + #endif + + (void)pLog; +#else + /* Runtime linking is disabled. */ + (void)pLog; + (void)handle; +#endif +} + +MA_API ma_proc ma_dlsym(ma_log* pLog, ma_handle handle, const char* symbol) +{ +#ifndef MA_NO_RUNTIME_LINKING + ma_proc proc; + + ma_log_postf(pLog, MA_LOG_LEVEL_DEBUG, "Loading symbol: %s\n", symbol); + +#ifdef _WIN32 + proc = (ma_proc)GetProcAddress((HMODULE)handle, symbol); +#else +#if (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) || defined(__clang__) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpedantic" +#endif + proc = (ma_proc)dlsym((void*)handle, symbol); +#if (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) || defined(__clang__) + #pragma GCC diagnostic pop +#endif +#endif + + if (proc == NULL) { + ma_log_postf(pLog, MA_LOG_LEVEL_WARNING, "Failed to load symbol: %s\n", symbol); + } + + (void)pLog; /* It's possible for pContext to be unused. */ + return proc; +#else + /* Runtime linking is disabled. */ + (void)pLog; + (void)handle; + (void)symbol; + return NULL; +#endif +} + + + +/************************************************************************************************************************************************************ +************************************************************************************************************************************************************* + +DEVICE I/O +========== + +************************************************************************************************************************************************************* +************************************************************************************************************************************************************/ + +/* Disable run-time linking on certain backends and platforms. */ +#ifndef MA_NO_RUNTIME_LINKING + #if defined(MA_EMSCRIPTEN) || defined(MA_ORBIS) || defined(MA_PROSPERO) + #define MA_NO_RUNTIME_LINKING + #endif +#endif + +#ifdef MA_APPLE + #include +#endif + +#ifndef MA_NO_DEVICE_IO + +#if defined(MA_APPLE) && (MAC_OS_X_VERSION_MIN_REQUIRED < 101200) + #include /* For mach_absolute_time() */ +#endif + +#ifdef MA_POSIX + #include + #include + + /* No need for dlfcn.h if we're not using runtime linking. */ + #ifndef MA_NO_RUNTIME_LINKING + #include + #endif +#endif + +/* This must be set to at least 26. */ +#ifndef MA_AAUDIO_MIN_ANDROID_SDK_VERSION +#define MA_AAUDIO_MIN_ANDROID_SDK_VERSION 27 +#endif + + +MA_API void ma_device_info_add_native_data_format(ma_device_info* pDeviceInfo, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 flags) +{ + if (pDeviceInfo == NULL) { + return; + } + + if (pDeviceInfo->nativeDataFormatCount < ma_countof(pDeviceInfo->nativeDataFormats)) { + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = format; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = sampleRate; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = flags; + pDeviceInfo->nativeDataFormatCount += 1; + } +} + + +typedef struct +{ + ma_backend backend; + const char* pName; +} ma_backend_info; + +static ma_backend_info gBackendInfo[] = /* Indexed by the backend enum. Must be in the order backends are declared in the ma_backend enum. */ +{ + {ma_backend_wasapi, "WASAPI"}, + {ma_backend_dsound, "DirectSound"}, + {ma_backend_winmm, "WinMM"}, + {ma_backend_coreaudio, "Core Audio"}, + {ma_backend_sndio, "sndio"}, + {ma_backend_audio4, "audio(4)"}, + {ma_backend_oss, "OSS"}, + {ma_backend_pulseaudio, "PulseAudio"}, + {ma_backend_alsa, "ALSA"}, + {ma_backend_jack, "JACK"}, + {ma_backend_aaudio, "AAudio"}, + {ma_backend_opensl, "OpenSL|ES"}, + {ma_backend_webaudio, "Web Audio"}, + {ma_backend_custom, "Custom"}, + {ma_backend_null, "Null"} +}; + +MA_API const char* ma_get_backend_name(ma_backend backend) +{ + if (backend < 0 || backend >= (int)ma_countof(gBackendInfo)) { + return "Unknown"; + } + + return gBackendInfo[backend].pName; +} + +MA_API ma_result ma_get_backend_from_name(const char* pBackendName, ma_backend* pBackend) +{ + size_t iBackend; + + if (pBackendName == NULL) { + return MA_INVALID_ARGS; + } + + for (iBackend = 0; iBackend < ma_countof(gBackendInfo); iBackend += 1) { + if (ma_strcmp(pBackendName, gBackendInfo[iBackend].pName) == 0) { + if (pBackend != NULL) { + *pBackend = gBackendInfo[iBackend].backend; + } + + return MA_SUCCESS; + } + } + + /* Getting here means the backend name is unknown. */ + return MA_INVALID_ARGS; +} + +MA_API ma_bool32 ma_is_backend_enabled(ma_backend backend) +{ + /* + This looks a little bit gross, but we want all backends to be included in the switch to avoid warnings on some compilers + about some enums not being handled by the switch statement. + */ + switch (backend) + { + case ma_backend_wasapi: + #if defined(MA_HAS_WASAPI) + return MA_TRUE; + #else + return MA_FALSE; + #endif + case ma_backend_dsound: + #if defined(MA_HAS_DSOUND) + return MA_TRUE; + #else + return MA_FALSE; + #endif + case ma_backend_winmm: + #if defined(MA_HAS_WINMM) + return MA_TRUE; + #else + return MA_FALSE; + #endif + case ma_backend_coreaudio: + #if defined(MA_HAS_COREAUDIO) + return MA_TRUE; + #else + return MA_FALSE; + #endif + case ma_backend_sndio: + #if defined(MA_HAS_SNDIO) + return MA_TRUE; + #else + return MA_FALSE; + #endif + case ma_backend_audio4: + #if defined(MA_HAS_AUDIO4) + return MA_TRUE; + #else + return MA_FALSE; + #endif + case ma_backend_oss: + #if defined(MA_HAS_OSS) + return MA_TRUE; + #else + return MA_FALSE; + #endif + case ma_backend_pulseaudio: + #if defined(MA_HAS_PULSEAUDIO) + return MA_TRUE; + #else + return MA_FALSE; + #endif + case ma_backend_alsa: + #if defined(MA_HAS_ALSA) + return MA_TRUE; + #else + return MA_FALSE; + #endif + case ma_backend_jack: + #if defined(MA_HAS_JACK) + return MA_TRUE; + #else + return MA_FALSE; + #endif + case ma_backend_aaudio: + #if defined(MA_HAS_AAUDIO) + #if defined(MA_ANDROID) + { + return ma_android_sdk_version() >= MA_AAUDIO_MIN_ANDROID_SDK_VERSION; + } + #else + return MA_FALSE; + #endif + #else + return MA_FALSE; + #endif + case ma_backend_opensl: + #if defined(MA_HAS_OPENSL) + #if defined(MA_ANDROID) + { + return ma_android_sdk_version() >= 9; + } + #else + return MA_TRUE; + #endif + #else + return MA_FALSE; + #endif + case ma_backend_webaudio: + #if defined(MA_HAS_WEBAUDIO) + return MA_TRUE; + #else + return MA_FALSE; + #endif + case ma_backend_custom: + #if defined(MA_HAS_CUSTOM) + return MA_TRUE; + #else + return MA_FALSE; + #endif + case ma_backend_null: + #if defined(MA_HAS_NULL) + return MA_TRUE; + #else + return MA_FALSE; + #endif + + default: return MA_FALSE; + } +} + +MA_API ma_result ma_get_enabled_backends(ma_backend* pBackends, size_t backendCap, size_t* pBackendCount) +{ + size_t backendCount; + size_t iBackend; + ma_result result = MA_SUCCESS; + + if (pBackendCount == NULL) { + return MA_INVALID_ARGS; + } + + backendCount = 0; + + for (iBackend = 0; iBackend <= ma_backend_null; iBackend += 1) { + ma_backend backend = (ma_backend)iBackend; + + if (ma_is_backend_enabled(backend)) { + /* The backend is enabled. Try adding it to the list. If there's no room, MA_NO_SPACE needs to be returned. */ + if (backendCount == backendCap) { + result = MA_NO_SPACE; + break; + } else { + pBackends[backendCount] = backend; + backendCount += 1; + } + } + } + + if (pBackendCount != NULL) { + *pBackendCount = backendCount; + } + + return result; +} + +MA_API ma_bool32 ma_is_loopback_supported(ma_backend backend) +{ + switch (backend) + { + case ma_backend_wasapi: return MA_TRUE; + case ma_backend_dsound: return MA_FALSE; + case ma_backend_winmm: return MA_FALSE; + case ma_backend_coreaudio: return MA_FALSE; + case ma_backend_sndio: return MA_FALSE; + case ma_backend_audio4: return MA_FALSE; + case ma_backend_oss: return MA_FALSE; + case ma_backend_pulseaudio: return MA_FALSE; + case ma_backend_alsa: return MA_FALSE; + case ma_backend_jack: return MA_FALSE; + case ma_backend_aaudio: return MA_FALSE; + case ma_backend_opensl: return MA_FALSE; + case ma_backend_webaudio: return MA_FALSE; + case ma_backend_custom: return MA_FALSE; /* <-- Will depend on the implementation of the backend. */ + case ma_backend_null: return MA_FALSE; + default: return MA_FALSE; + } +} + + + +#if defined(MA_WIN32) +/* WASAPI error codes. */ +#define MA_AUDCLNT_E_NOT_INITIALIZED ((HRESULT)0x88890001) +#define MA_AUDCLNT_E_ALREADY_INITIALIZED ((HRESULT)0x88890002) +#define MA_AUDCLNT_E_WRONG_ENDPOINT_TYPE ((HRESULT)0x88890003) +#define MA_AUDCLNT_E_DEVICE_INVALIDATED ((HRESULT)0x88890004) +#define MA_AUDCLNT_E_NOT_STOPPED ((HRESULT)0x88890005) +#define MA_AUDCLNT_E_BUFFER_TOO_LARGE ((HRESULT)0x88890006) +#define MA_AUDCLNT_E_OUT_OF_ORDER ((HRESULT)0x88890007) +#define MA_AUDCLNT_E_UNSUPPORTED_FORMAT ((HRESULT)0x88890008) +#define MA_AUDCLNT_E_INVALID_SIZE ((HRESULT)0x88890009) +#define MA_AUDCLNT_E_DEVICE_IN_USE ((HRESULT)0x8889000A) +#define MA_AUDCLNT_E_BUFFER_OPERATION_PENDING ((HRESULT)0x8889000B) +#define MA_AUDCLNT_E_THREAD_NOT_REGISTERED ((HRESULT)0x8889000C) +#define MA_AUDCLNT_E_NO_SINGLE_PROCESS ((HRESULT)0x8889000D) +#define MA_AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED ((HRESULT)0x8889000E) +#define MA_AUDCLNT_E_ENDPOINT_CREATE_FAILED ((HRESULT)0x8889000F) +#define MA_AUDCLNT_E_SERVICE_NOT_RUNNING ((HRESULT)0x88890010) +#define MA_AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED ((HRESULT)0x88890011) +#define MA_AUDCLNT_E_EXCLUSIVE_MODE_ONLY ((HRESULT)0x88890012) +#define MA_AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL ((HRESULT)0x88890013) +#define MA_AUDCLNT_E_EVENTHANDLE_NOT_SET ((HRESULT)0x88890014) +#define MA_AUDCLNT_E_INCORRECT_BUFFER_SIZE ((HRESULT)0x88890015) +#define MA_AUDCLNT_E_BUFFER_SIZE_ERROR ((HRESULT)0x88890016) +#define MA_AUDCLNT_E_CPUUSAGE_EXCEEDED ((HRESULT)0x88890017) +#define MA_AUDCLNT_E_BUFFER_ERROR ((HRESULT)0x88890018) +#define MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED ((HRESULT)0x88890019) +#define MA_AUDCLNT_E_INVALID_DEVICE_PERIOD ((HRESULT)0x88890020) +#define MA_AUDCLNT_E_INVALID_STREAM_FLAG ((HRESULT)0x88890021) +#define MA_AUDCLNT_E_ENDPOINT_OFFLOAD_NOT_CAPABLE ((HRESULT)0x88890022) +#define MA_AUDCLNT_E_OUT_OF_OFFLOAD_RESOURCES ((HRESULT)0x88890023) +#define MA_AUDCLNT_E_OFFLOAD_MODE_ONLY ((HRESULT)0x88890024) +#define MA_AUDCLNT_E_NONOFFLOAD_MODE_ONLY ((HRESULT)0x88890025) +#define MA_AUDCLNT_E_RESOURCES_INVALIDATED ((HRESULT)0x88890026) +#define MA_AUDCLNT_E_RAW_MODE_UNSUPPORTED ((HRESULT)0x88890027) +#define MA_AUDCLNT_E_ENGINE_PERIODICITY_LOCKED ((HRESULT)0x88890028) +#define MA_AUDCLNT_E_ENGINE_FORMAT_LOCKED ((HRESULT)0x88890029) +#define MA_AUDCLNT_E_HEADTRACKING_ENABLED ((HRESULT)0x88890030) +#define MA_AUDCLNT_E_HEADTRACKING_UNSUPPORTED ((HRESULT)0x88890040) +#define MA_AUDCLNT_S_BUFFER_EMPTY ((HRESULT)0x08890001) +#define MA_AUDCLNT_S_THREAD_ALREADY_REGISTERED ((HRESULT)0x08890002) +#define MA_AUDCLNT_S_POSITION_STALLED ((HRESULT)0x08890003) + +#define MA_DS_OK ((HRESULT)0) +#define MA_DS_NO_VIRTUALIZATION ((HRESULT)0x0878000A) +#define MA_DSERR_ALLOCATED ((HRESULT)0x8878000A) +#define MA_DSERR_CONTROLUNAVAIL ((HRESULT)0x8878001E) +#define MA_DSERR_INVALIDPARAM ((HRESULT)0x80070057) /*E_INVALIDARG*/ +#define MA_DSERR_INVALIDCALL ((HRESULT)0x88780032) +#define MA_DSERR_GENERIC ((HRESULT)0x80004005) /*E_FAIL*/ +#define MA_DSERR_PRIOLEVELNEEDED ((HRESULT)0x88780046) +#define MA_DSERR_OUTOFMEMORY ((HRESULT)0x8007000E) /*E_OUTOFMEMORY*/ +#define MA_DSERR_BADFORMAT ((HRESULT)0x88780064) +#define MA_DSERR_UNSUPPORTED ((HRESULT)0x80004001) /*E_NOTIMPL*/ +#define MA_DSERR_NODRIVER ((HRESULT)0x88780078) +#define MA_DSERR_ALREADYINITIALIZED ((HRESULT)0x88780082) +#define MA_DSERR_NOAGGREGATION ((HRESULT)0x80040110) /*CLASS_E_NOAGGREGATION*/ +#define MA_DSERR_BUFFERLOST ((HRESULT)0x88780096) +#define MA_DSERR_OTHERAPPHASPRIO ((HRESULT)0x887800A0) +#define MA_DSERR_UNINITIALIZED ((HRESULT)0x887800AA) +#define MA_DSERR_NOINTERFACE ((HRESULT)0x80004002) /*E_NOINTERFACE*/ +#define MA_DSERR_ACCESSDENIED ((HRESULT)0x80070005) /*E_ACCESSDENIED*/ +#define MA_DSERR_BUFFERTOOSMALL ((HRESULT)0x887800B4) +#define MA_DSERR_DS8_REQUIRED ((HRESULT)0x887800BE) +#define MA_DSERR_SENDLOOP ((HRESULT)0x887800C8) +#define MA_DSERR_BADSENDBUFFERGUID ((HRESULT)0x887800D2) +#define MA_DSERR_OBJECTNOTFOUND ((HRESULT)0x88781161) +#define MA_DSERR_FXUNAVAILABLE ((HRESULT)0x887800DC) + +static ma_result ma_result_from_HRESULT(HRESULT hr) +{ + switch (hr) + { + case NOERROR: return MA_SUCCESS; + /*case S_OK: return MA_SUCCESS;*/ + + case E_POINTER: return MA_INVALID_ARGS; + case E_UNEXPECTED: return MA_ERROR; + case E_NOTIMPL: return MA_NOT_IMPLEMENTED; + case E_OUTOFMEMORY: return MA_OUT_OF_MEMORY; + case E_INVALIDARG: return MA_INVALID_ARGS; + case E_NOINTERFACE: return MA_API_NOT_FOUND; + case E_HANDLE: return MA_INVALID_ARGS; + case E_ABORT: return MA_ERROR; + case E_FAIL: return MA_ERROR; + case E_ACCESSDENIED: return MA_ACCESS_DENIED; + + /* WASAPI */ + case MA_AUDCLNT_E_NOT_INITIALIZED: return MA_DEVICE_NOT_INITIALIZED; + case MA_AUDCLNT_E_ALREADY_INITIALIZED: return MA_DEVICE_ALREADY_INITIALIZED; + case MA_AUDCLNT_E_WRONG_ENDPOINT_TYPE: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_DEVICE_INVALIDATED: return MA_UNAVAILABLE; + case MA_AUDCLNT_E_NOT_STOPPED: return MA_DEVICE_NOT_STOPPED; + case MA_AUDCLNT_E_BUFFER_TOO_LARGE: return MA_TOO_BIG; + case MA_AUDCLNT_E_OUT_OF_ORDER: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_UNSUPPORTED_FORMAT: return MA_FORMAT_NOT_SUPPORTED; + case MA_AUDCLNT_E_INVALID_SIZE: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_DEVICE_IN_USE: return MA_BUSY; + case MA_AUDCLNT_E_BUFFER_OPERATION_PENDING: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_THREAD_NOT_REGISTERED: return MA_DOES_NOT_EXIST; + case MA_AUDCLNT_E_NO_SINGLE_PROCESS: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED: return MA_SHARE_MODE_NOT_SUPPORTED; + case MA_AUDCLNT_E_ENDPOINT_CREATE_FAILED: return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + case MA_AUDCLNT_E_SERVICE_NOT_RUNNING: return MA_NOT_CONNECTED; + case MA_AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_EXCLUSIVE_MODE_ONLY: return MA_SHARE_MODE_NOT_SUPPORTED; + case MA_AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_EVENTHANDLE_NOT_SET: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_INCORRECT_BUFFER_SIZE: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_BUFFER_SIZE_ERROR: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_CPUUSAGE_EXCEEDED: return MA_ERROR; + case MA_AUDCLNT_E_BUFFER_ERROR: return MA_ERROR; + case MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_INVALID_DEVICE_PERIOD: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_INVALID_STREAM_FLAG: return MA_INVALID_ARGS; + case MA_AUDCLNT_E_ENDPOINT_OFFLOAD_NOT_CAPABLE: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_OUT_OF_OFFLOAD_RESOURCES: return MA_OUT_OF_MEMORY; + case MA_AUDCLNT_E_OFFLOAD_MODE_ONLY: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_NONOFFLOAD_MODE_ONLY: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_RESOURCES_INVALIDATED: return MA_INVALID_DATA; + case MA_AUDCLNT_E_RAW_MODE_UNSUPPORTED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_ENGINE_PERIODICITY_LOCKED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_ENGINE_FORMAT_LOCKED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_HEADTRACKING_ENABLED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_E_HEADTRACKING_UNSUPPORTED: return MA_INVALID_OPERATION; + case MA_AUDCLNT_S_BUFFER_EMPTY: return MA_NO_SPACE; + case MA_AUDCLNT_S_THREAD_ALREADY_REGISTERED: return MA_ALREADY_EXISTS; + case MA_AUDCLNT_S_POSITION_STALLED: return MA_ERROR; + + /* DirectSound */ + /*case MA_DS_OK: return MA_SUCCESS;*/ /* S_OK */ + case MA_DS_NO_VIRTUALIZATION: return MA_SUCCESS; + case MA_DSERR_ALLOCATED: return MA_ALREADY_IN_USE; + case MA_DSERR_CONTROLUNAVAIL: return MA_INVALID_OPERATION; + /*case MA_DSERR_INVALIDPARAM: return MA_INVALID_ARGS;*/ /* E_INVALIDARG */ + case MA_DSERR_INVALIDCALL: return MA_INVALID_OPERATION; + /*case MA_DSERR_GENERIC: return MA_ERROR;*/ /* E_FAIL */ + case MA_DSERR_PRIOLEVELNEEDED: return MA_INVALID_OPERATION; + /*case MA_DSERR_OUTOFMEMORY: return MA_OUT_OF_MEMORY;*/ /* E_OUTOFMEMORY */ + case MA_DSERR_BADFORMAT: return MA_FORMAT_NOT_SUPPORTED; + /*case MA_DSERR_UNSUPPORTED: return MA_NOT_IMPLEMENTED;*/ /* E_NOTIMPL */ + case MA_DSERR_NODRIVER: return MA_FAILED_TO_INIT_BACKEND; + case MA_DSERR_ALREADYINITIALIZED: return MA_DEVICE_ALREADY_INITIALIZED; + case MA_DSERR_NOAGGREGATION: return MA_ERROR; + case MA_DSERR_BUFFERLOST: return MA_UNAVAILABLE; + case MA_DSERR_OTHERAPPHASPRIO: return MA_ACCESS_DENIED; + case MA_DSERR_UNINITIALIZED: return MA_DEVICE_NOT_INITIALIZED; + /*case MA_DSERR_NOINTERFACE: return MA_API_NOT_FOUND;*/ /* E_NOINTERFACE */ + /*case MA_DSERR_ACCESSDENIED: return MA_ACCESS_DENIED;*/ /* E_ACCESSDENIED */ + case MA_DSERR_BUFFERTOOSMALL: return MA_NO_SPACE; + case MA_DSERR_DS8_REQUIRED: return MA_INVALID_OPERATION; + case MA_DSERR_SENDLOOP: return MA_DEADLOCK; + case MA_DSERR_BADSENDBUFFERGUID: return MA_INVALID_ARGS; + case MA_DSERR_OBJECTNOTFOUND: return MA_NO_DEVICE; + case MA_DSERR_FXUNAVAILABLE: return MA_UNAVAILABLE; + + default: return MA_ERROR; + } +} + +/* PROPVARIANT */ +#define MA_VT_LPWSTR 31 +#define MA_VT_BLOB 65 + +#if defined(_MSC_VER) && !defined(__clang__) + #pragma warning(push) + #pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union */ +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpedantic" /* For ISO C99 doesn't support unnamed structs/unions [-Wpedantic] */ + #if defined(__clang__) + #pragma GCC diagnostic ignored "-Wc11-extensions" /* anonymous unions are a C11 extension */ + #endif +#endif +typedef struct +{ + WORD vt; + WORD wReserved1; + WORD wReserved2; + WORD wReserved3; + union + { + struct + { + ULONG cbSize; + BYTE* pBlobData; + } blob; + WCHAR* pwszVal; + char pad[16]; /* Just to ensure the size of the struct matches the official version. */ + }; +} MA_PROPVARIANT; +#if defined(_MSC_VER) && !defined(__clang__) + #pragma warning(pop) +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) + #pragma GCC diagnostic pop +#endif + +typedef HRESULT (WINAPI * MA_PFN_CoInitialize)(void* pvReserved); +typedef HRESULT (WINAPI * MA_PFN_CoInitializeEx)(void* pvReserved, DWORD dwCoInit); +typedef void (WINAPI * MA_PFN_CoUninitialize)(void); +typedef HRESULT (WINAPI * MA_PFN_CoCreateInstance)(const IID* rclsid, void* pUnkOuter, DWORD dwClsContext, const IID* riid, void* ppv); +typedef void (WINAPI * MA_PFN_CoTaskMemFree)(void* pv); +typedef HRESULT (WINAPI * MA_PFN_PropVariantClear)(MA_PROPVARIANT *pvar); +typedef int (WINAPI * MA_PFN_StringFromGUID2)(const GUID* const rguid, WCHAR* lpsz, int cchMax); + +typedef HWND (WINAPI * MA_PFN_GetForegroundWindow)(void); +typedef HWND (WINAPI * MA_PFN_GetDesktopWindow)(void); + +#if defined(MA_WIN32_DESKTOP) +/* Microsoft documents these APIs as returning LSTATUS, but the Win32 API shipping with some compilers do not define it. It's just a LONG. */ +typedef LONG (WINAPI * MA_PFN_RegOpenKeyExA)(HKEY hKey, const char* lpSubKey, DWORD ulOptions, DWORD samDesired, HKEY* phkResult); +typedef LONG (WINAPI * MA_PFN_RegCloseKey)(HKEY hKey); +typedef LONG (WINAPI * MA_PFN_RegQueryValueExA)(HKEY hKey, const char* lpValueName, DWORD* lpReserved, DWORD* lpType, BYTE* lpData, DWORD* lpcbData); +#endif /* MA_WIN32_DESKTOP */ + +MA_API size_t ma_strlen_WCHAR(const WCHAR* str) +{ + size_t len = 0; + while (str[len] != '\0') { + len += 1; + } + + return len; +} + +MA_API int ma_strcmp_WCHAR(const WCHAR *s1, const WCHAR *s2) +{ + while (*s1 != '\0' && *s1 == *s2) { + s1 += 1; + s2 += 1; + } + + return *s1 - *s2; +} + +MA_API int ma_strcpy_s_WCHAR(WCHAR* dst, size_t dstCap, const WCHAR* src) +{ + size_t i; + + if (dst == 0) { + return 22; + } + if (dstCap == 0) { + return 34; + } + if (src == 0) { + dst[0] = '\0'; + return 22; + } + + for (i = 0; i < dstCap && src[i] != '\0'; ++i) { + dst[i] = src[i]; + } + + if (i < dstCap) { + dst[i] = '\0'; + return 0; + } + + dst[0] = '\0'; + return 34; +} +#endif /* MA_WIN32 */ + + +#define MA_DEFAULT_PLAYBACK_DEVICE_NAME "Default Playback Device" +#define MA_DEFAULT_CAPTURE_DEVICE_NAME "Default Capture Device" + + + + +/******************************************************************************* + +Timing + +*******************************************************************************/ +#if defined(MA_WIN32) && !defined(MA_POSIX) + static LARGE_INTEGER g_ma_TimerFrequency; /* <-- Initialized to zero since it's static. */ + static void ma_timer_init(ma_timer* pTimer) + { + LARGE_INTEGER counter; + + if (g_ma_TimerFrequency.QuadPart == 0) { + QueryPerformanceFrequency(&g_ma_TimerFrequency); + } + + QueryPerformanceCounter(&counter); + pTimer->counter = counter.QuadPart; + } + + static double ma_timer_get_time_in_seconds(ma_timer* pTimer) + { + LARGE_INTEGER counter; + if (!QueryPerformanceCounter(&counter)) { + return 0; + } + + return (double)(counter.QuadPart - pTimer->counter) / g_ma_TimerFrequency.QuadPart; + } +#elif defined(MA_APPLE) && (MAC_OS_X_VERSION_MIN_REQUIRED < 101200) + static ma_uint64 g_ma_TimerFrequency = 0; + static void ma_timer_init(ma_timer* pTimer) + { + mach_timebase_info_data_t baseTime; + mach_timebase_info(&baseTime); + g_ma_TimerFrequency = (baseTime.denom * 1e9) / baseTime.numer; + + pTimer->counter = mach_absolute_time(); + } + + static double ma_timer_get_time_in_seconds(ma_timer* pTimer) + { + ma_uint64 newTimeCounter = mach_absolute_time(); + ma_uint64 oldTimeCounter = pTimer->counter; + + return (newTimeCounter - oldTimeCounter) / g_ma_TimerFrequency; + } +#elif defined(MA_EMSCRIPTEN) + static MA_INLINE void ma_timer_init(ma_timer* pTimer) + { + pTimer->counterD = emscripten_get_now(); + } + + static MA_INLINE double ma_timer_get_time_in_seconds(ma_timer* pTimer) + { + return (emscripten_get_now() - pTimer->counterD) / 1000; /* Emscripten is in milliseconds. */ + } +#else + #if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 199309L + #if defined(CLOCK_MONOTONIC) + #define MA_CLOCK_ID CLOCK_MONOTONIC + #else + #define MA_CLOCK_ID CLOCK_REALTIME + #endif + + static void ma_timer_init(ma_timer* pTimer) + { + struct timespec newTime; + clock_gettime(MA_CLOCK_ID, &newTime); + + pTimer->counter = (newTime.tv_sec * 1000000000) + newTime.tv_nsec; + } + + static double ma_timer_get_time_in_seconds(ma_timer* pTimer) + { + ma_uint64 newTimeCounter; + ma_uint64 oldTimeCounter; + + struct timespec newTime; + clock_gettime(MA_CLOCK_ID, &newTime); + + newTimeCounter = (newTime.tv_sec * 1000000000) + newTime.tv_nsec; + oldTimeCounter = pTimer->counter; + + return (newTimeCounter - oldTimeCounter) / 1000000000.0; + } + #else + static void ma_timer_init(ma_timer* pTimer) + { + struct timeval newTime; + gettimeofday(&newTime, NULL); + + pTimer->counter = (newTime.tv_sec * 1000000) + newTime.tv_usec; + } + + static double ma_timer_get_time_in_seconds(ma_timer* pTimer) + { + ma_uint64 newTimeCounter; + ma_uint64 oldTimeCounter; + + struct timeval newTime; + gettimeofday(&newTime, NULL); + + newTimeCounter = (newTime.tv_sec * 1000000) + newTime.tv_usec; + oldTimeCounter = pTimer->counter; + + return (newTimeCounter - oldTimeCounter) / 1000000.0; + } + #endif +#endif + + + +#if 0 +static ma_uint32 ma_get_closest_standard_sample_rate(ma_uint32 sampleRateIn) +{ + ma_uint32 closestRate = 0; + ma_uint32 closestDiff = 0xFFFFFFFF; + size_t iStandardRate; + + for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) { + ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate]; + ma_uint32 diff; + + if (sampleRateIn > standardRate) { + diff = sampleRateIn - standardRate; + } else { + diff = standardRate - sampleRateIn; + } + + if (diff == 0) { + return standardRate; /* The input sample rate is a standard rate. */ + } + + if (closestDiff > diff) { + closestDiff = diff; + closestRate = standardRate; + } + } + + return closestRate; +} +#endif + + +static MA_INLINE unsigned int ma_device_disable_denormals(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (!pDevice->noDisableDenormals) { + return ma_disable_denormals(); + } else { + return 0; + } +} + +static MA_INLINE void ma_device_restore_denormals(ma_device* pDevice, unsigned int prevState) +{ + MA_ASSERT(pDevice != NULL); + + if (!pDevice->noDisableDenormals) { + ma_restore_denormals(prevState); + } else { + /* Do nothing. */ + (void)prevState; + } +} + +static ma_device_notification ma_device_notification_init(ma_device* pDevice, ma_device_notification_type type) +{ + ma_device_notification notification; + + MA_ZERO_OBJECT(¬ification); + notification.pDevice = pDevice; + notification.type = type; + + return notification; +} + +static void ma_device__on_notification(ma_device_notification notification) +{ + MA_ASSERT(notification.pDevice != NULL); + + if (notification.pDevice->onNotification != NULL) { + notification.pDevice->onNotification(¬ification); + } + + /* TEMP FOR COMPATIBILITY: If it's a stopped notification, fire the onStop callback as well. This is only for backwards compatibility and will be removed. */ + if (notification.pDevice->onStop != NULL && notification.type == ma_device_notification_type_stopped) { + notification.pDevice->onStop(notification.pDevice); + } +} + +static void ma_device__on_notification_started(ma_device* pDevice) +{ + ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_started)); +} + +static void ma_device__on_notification_stopped(ma_device* pDevice) +{ + ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_stopped)); +} + +/* Not all platforms support reroute notifications. */ +#if !defined(MA_EMSCRIPTEN) +static void ma_device__on_notification_rerouted(ma_device* pDevice) +{ + ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_rerouted)); +} +#endif + +#if defined(MA_EMSCRIPTEN) +#ifdef __cplusplus +extern "C" { +#endif +void EMSCRIPTEN_KEEPALIVE ma_device__on_notification_unlocked(ma_device* pDevice) +{ + ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_unlocked)); +} +#ifdef __cplusplus +} +#endif +#endif + + +static void ma_device__on_data_inner(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pDevice->onData != NULL); + + if (!pDevice->noPreSilencedOutputBuffer && pFramesOut != NULL) { + ma_silence_pcm_frames(pFramesOut, frameCount, pDevice->playback.format, pDevice->playback.channels); + } + + pDevice->onData(pDevice, pFramesOut, pFramesIn, frameCount); +} + +static void ma_device__on_data(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount) +{ + MA_ASSERT(pDevice != NULL); + + /* Don't read more data from the client if we're in the process of stopping. */ + if (ma_device_get_state(pDevice) == ma_device_state_stopping) { + return; + } + + if (pDevice->noFixedSizedCallback) { + /* Fast path. Not using a fixed sized callback. Process directly from the specified buffers. */ + ma_device__on_data_inner(pDevice, pFramesOut, pFramesIn, frameCount); + } else { + /* Slow path. Using a fixed sized callback. Need to use the intermediary buffer. */ + ma_uint32 totalFramesProcessed = 0; + + while (totalFramesProcessed < frameCount) { + ma_uint32 totalFramesRemaining = frameCount - totalFramesProcessed; + ma_uint32 framesToProcessThisIteration = 0; + + if (pFramesIn != NULL) { + /* Capturing. Write to the intermediary buffer. If there's no room, fire the callback to empty it. */ + if (pDevice->capture.intermediaryBufferLen < pDevice->capture.intermediaryBufferCap) { + /* There's some room left in the intermediary buffer. Write to it without firing the callback. */ + framesToProcessThisIteration = totalFramesRemaining; + if (framesToProcessThisIteration > pDevice->capture.intermediaryBufferCap - pDevice->capture.intermediaryBufferLen) { + framesToProcessThisIteration = pDevice->capture.intermediaryBufferCap - pDevice->capture.intermediaryBufferLen; + } + + ma_copy_pcm_frames( + ma_offset_pcm_frames_ptr(pDevice->capture.pIntermediaryBuffer, pDevice->capture.intermediaryBufferLen, pDevice->capture.format, pDevice->capture.channels), + ma_offset_pcm_frames_const_ptr(pFramesIn, totalFramesProcessed, pDevice->capture.format, pDevice->capture.channels), + framesToProcessThisIteration, + pDevice->capture.format, pDevice->capture.channels); + + pDevice->capture.intermediaryBufferLen += framesToProcessThisIteration; + } + + if (pDevice->capture.intermediaryBufferLen == pDevice->capture.intermediaryBufferCap) { + /* No room left in the intermediary buffer. Fire the data callback. */ + if (pDevice->type == ma_device_type_duplex) { + /* We'll do the duplex data callback later after we've processed the playback data. */ + } else { + ma_device__on_data_inner(pDevice, NULL, pDevice->capture.pIntermediaryBuffer, pDevice->capture.intermediaryBufferCap); + + /* The intermediary buffer has just been drained. */ + pDevice->capture.intermediaryBufferLen = 0; + } + } + } + + if (pFramesOut != NULL) { + /* Playing back. Read from the intermediary buffer. If there's nothing in it, fire the callback to fill it. */ + if (pDevice->playback.intermediaryBufferLen > 0) { + /* There's some content in the intermediary buffer. Read from that without firing the callback. */ + if (pDevice->type == ma_device_type_duplex) { + /* The frames processed this iteration for a duplex device will always be based on the capture side. Leave it unmodified. */ + } else { + framesToProcessThisIteration = totalFramesRemaining; + if (framesToProcessThisIteration > pDevice->playback.intermediaryBufferLen) { + framesToProcessThisIteration = pDevice->playback.intermediaryBufferLen; + } + } + + ma_copy_pcm_frames( + ma_offset_pcm_frames_ptr(pFramesOut, totalFramesProcessed, pDevice->playback.format, pDevice->playback.channels), + ma_offset_pcm_frames_ptr(pDevice->playback.pIntermediaryBuffer, pDevice->playback.intermediaryBufferCap - pDevice->playback.intermediaryBufferLen, pDevice->playback.format, pDevice->playback.channels), + framesToProcessThisIteration, + pDevice->playback.format, pDevice->playback.channels); + + pDevice->playback.intermediaryBufferLen -= framesToProcessThisIteration; + } + + if (pDevice->playback.intermediaryBufferLen == 0) { + /* There's nothing in the intermediary buffer. Fire the data callback to fill it. */ + if (pDevice->type == ma_device_type_duplex) { + /* In duplex mode, the data callback will be fired later. Nothing to do here. */ + } else { + ma_device__on_data_inner(pDevice, pDevice->playback.pIntermediaryBuffer, NULL, pDevice->playback.intermediaryBufferCap); + + /* The intermediary buffer has just been filled. */ + pDevice->playback.intermediaryBufferLen = pDevice->playback.intermediaryBufferCap; + } + } + } + + /* If we're in duplex mode we might need to do a refill of the data. */ + if (pDevice->type == ma_device_type_duplex) { + if (pDevice->capture.intermediaryBufferLen == pDevice->capture.intermediaryBufferCap) { + ma_device__on_data_inner(pDevice, pDevice->playback.pIntermediaryBuffer, pDevice->capture.pIntermediaryBuffer, pDevice->capture.intermediaryBufferCap); + + pDevice->playback.intermediaryBufferLen = pDevice->playback.intermediaryBufferCap; /* The playback buffer will have just been filled. */ + pDevice->capture.intermediaryBufferLen = 0; /* The intermediary buffer has just been drained. */ + } + } + + /* Make sure this is only incremented once in the duplex case. */ + totalFramesProcessed += framesToProcessThisIteration; + } + } +} + +static void ma_device__handle_data_callback(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount) +{ + float masterVolumeFactor; + + ma_device_get_master_volume(pDevice, &masterVolumeFactor); /* Use ma_device_get_master_volume() to ensure the volume is loaded atomically. */ + + if (pDevice->onData) { + unsigned int prevDenormalState = ma_device_disable_denormals(pDevice); + { + /* Volume control of input makes things a bit awkward because the input buffer is read-only. We'll need to use a temp buffer and loop in this case. */ + if (pFramesIn != NULL && masterVolumeFactor != 1) { + ma_uint8 tempFramesIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 bpfCapture = ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 bpfPlayback = ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint32 totalFramesProcessed = 0; + while (totalFramesProcessed < frameCount) { + ma_uint32 framesToProcessThisIteration = frameCount - totalFramesProcessed; + if (framesToProcessThisIteration > sizeof(tempFramesIn)/bpfCapture) { + framesToProcessThisIteration = sizeof(tempFramesIn)/bpfCapture; + } + + ma_copy_and_apply_volume_factor_pcm_frames(tempFramesIn, ma_offset_ptr(pFramesIn, totalFramesProcessed*bpfCapture), framesToProcessThisIteration, pDevice->capture.format, pDevice->capture.channels, masterVolumeFactor); + + ma_device__on_data(pDevice, ma_offset_ptr(pFramesOut, totalFramesProcessed*bpfPlayback), tempFramesIn, framesToProcessThisIteration); + + totalFramesProcessed += framesToProcessThisIteration; + } + } else { + ma_device__on_data(pDevice, pFramesOut, pFramesIn, frameCount); + } + + /* Volume control and clipping for playback devices. */ + if (pFramesOut != NULL) { + if (masterVolumeFactor != 1) { + if (pFramesIn == NULL) { /* <-- In full-duplex situations, the volume will have been applied to the input samples before the data callback. Applying it again post-callback will incorrectly compound it. */ + ma_apply_volume_factor_pcm_frames(pFramesOut, frameCount, pDevice->playback.format, pDevice->playback.channels, masterVolumeFactor); + } + } + + if (!pDevice->noClip && pDevice->playback.format == ma_format_f32) { + ma_clip_samples_f32((float*)pFramesOut, (const float*)pFramesOut, frameCount * pDevice->playback.channels); /* Intentionally specifying the same pointer for both input and output for in-place processing. */ + } + } + } + ma_device_restore_denormals(pDevice, prevDenormalState); + } else { + /* No data callback. Just silence the output. */ + if (pFramesOut != NULL) { + ma_silence_pcm_frames(pFramesOut, frameCount, pDevice->playback.format, pDevice->playback.channels); + } + } +} + + + +/* A helper function for reading sample data from the client. */ +static void ma_device__read_frames_from_client(ma_device* pDevice, ma_uint32 frameCount, void* pFramesOut) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCount > 0); + MA_ASSERT(pFramesOut != NULL); + + if (pDevice->playback.converter.isPassthrough) { + ma_device__handle_data_callback(pDevice, pFramesOut, NULL, frameCount); + } else { + ma_result result; + ma_uint64 totalFramesReadOut; + void* pRunningFramesOut; + + totalFramesReadOut = 0; + pRunningFramesOut = pFramesOut; + + /* + We run slightly different logic depending on whether or not we're using a heap-allocated + buffer for caching input data. This will be the case if the data converter does not have + the ability to retrieve the required input frame count for a given output frame count. + */ + if (pDevice->playback.pInputCache != NULL) { + while (totalFramesReadOut < frameCount) { + ma_uint64 framesToReadThisIterationIn; + ma_uint64 framesToReadThisIterationOut; + + /* If there's any data available in the cache, that needs to get processed first. */ + if (pDevice->playback.inputCacheRemaining > 0) { + framesToReadThisIterationOut = (frameCount - totalFramesReadOut); + framesToReadThisIterationIn = framesToReadThisIterationOut; + if (framesToReadThisIterationIn > pDevice->playback.inputCacheRemaining) { + framesToReadThisIterationIn = pDevice->playback.inputCacheRemaining; + } + + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, ma_offset_pcm_frames_ptr(pDevice->playback.pInputCache, pDevice->playback.inputCacheConsumed, pDevice->playback.format, pDevice->playback.channels), &framesToReadThisIterationIn, pRunningFramesOut, &framesToReadThisIterationOut); + if (result != MA_SUCCESS) { + break; + } + + pDevice->playback.inputCacheConsumed += framesToReadThisIterationIn; + pDevice->playback.inputCacheRemaining -= framesToReadThisIterationIn; + + totalFramesReadOut += framesToReadThisIterationOut; + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesToReadThisIterationOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + + if (framesToReadThisIterationIn == 0 && framesToReadThisIterationOut == 0) { + break; /* We're done. */ + } + } + + /* Getting here means there's no data in the cache and we need to fill it up with data from the client. */ + if (pDevice->playback.inputCacheRemaining == 0) { + ma_device__handle_data_callback(pDevice, pDevice->playback.pInputCache, NULL, (ma_uint32)pDevice->playback.inputCacheCap); + + pDevice->playback.inputCacheConsumed = 0; + pDevice->playback.inputCacheRemaining = pDevice->playback.inputCacheCap; + } + } + } else { + while (totalFramesReadOut < frameCount) { + ma_uint8 pIntermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In client format. */ + ma_uint64 intermediaryBufferCap = sizeof(pIntermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 framesToReadThisIterationIn; + ma_uint64 framesReadThisIterationIn; + ma_uint64 framesToReadThisIterationOut; + ma_uint64 framesReadThisIterationOut; + ma_uint64 requiredInputFrameCount; + + framesToReadThisIterationOut = (frameCount - totalFramesReadOut); + framesToReadThisIterationIn = framesToReadThisIterationOut; + if (framesToReadThisIterationIn > intermediaryBufferCap) { + framesToReadThisIterationIn = intermediaryBufferCap; + } + + ma_data_converter_get_required_input_frame_count(&pDevice->playback.converter, framesToReadThisIterationOut, &requiredInputFrameCount); + if (framesToReadThisIterationIn > requiredInputFrameCount) { + framesToReadThisIterationIn = requiredInputFrameCount; + } + + ma_device__handle_data_callback(pDevice, pIntermediaryBuffer, NULL, (ma_uint32)framesToReadThisIterationIn); + + /* + At this point we have our decoded data in input format and now we need to convert to output format. Note that even if we didn't read any + input frames, we still want to try processing frames because there may some output frames generated from cached input data. + */ + framesReadThisIterationIn = framesToReadThisIterationIn; + framesReadThisIterationOut = framesToReadThisIterationOut; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pIntermediaryBuffer, &framesReadThisIterationIn, pRunningFramesOut, &framesReadThisIterationOut); + if (result != MA_SUCCESS) { + break; + } + + totalFramesReadOut += framesReadThisIterationOut; + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesReadThisIterationOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + + if (framesReadThisIterationIn == 0 && framesReadThisIterationOut == 0) { + break; /* We're done. */ + } + } + } + } +} + +/* A helper for sending sample data to the client. */ +static void ma_device__send_frames_to_client(ma_device* pDevice, ma_uint32 frameCountInDeviceFormat, const void* pFramesInDeviceFormat) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCountInDeviceFormat > 0); + MA_ASSERT(pFramesInDeviceFormat != NULL); + + if (pDevice->capture.converter.isPassthrough) { + ma_device__handle_data_callback(pDevice, NULL, pFramesInDeviceFormat, frameCountInDeviceFormat); + } else { + ma_result result; + ma_uint8 pFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint64 framesInClientFormatCap = sizeof(pFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint64 totalDeviceFramesProcessed = 0; + ma_uint64 totalClientFramesProcessed = 0; + const void* pRunningFramesInDeviceFormat = pFramesInDeviceFormat; + + /* We just keep going until we've exhausted all of our input frames and cannot generate any more output frames. */ + for (;;) { + ma_uint64 deviceFramesProcessedThisIteration; + ma_uint64 clientFramesProcessedThisIteration; + + deviceFramesProcessedThisIteration = (frameCountInDeviceFormat - totalDeviceFramesProcessed); + clientFramesProcessedThisIteration = framesInClientFormatCap; + + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningFramesInDeviceFormat, &deviceFramesProcessedThisIteration, pFramesInClientFormat, &clientFramesProcessedThisIteration); + if (result != MA_SUCCESS) { + break; + } + + if (clientFramesProcessedThisIteration > 0) { + ma_device__handle_data_callback(pDevice, NULL, pFramesInClientFormat, (ma_uint32)clientFramesProcessedThisIteration); /* Safe cast. */ + } + + pRunningFramesInDeviceFormat = ma_offset_ptr(pRunningFramesInDeviceFormat, deviceFramesProcessedThisIteration * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + totalDeviceFramesProcessed += deviceFramesProcessedThisIteration; + totalClientFramesProcessed += clientFramesProcessedThisIteration; + + /* This is just to silence a warning. I might want to use this variable later so leaving in place for now. */ + (void)totalClientFramesProcessed; + + if (deviceFramesProcessedThisIteration == 0 && clientFramesProcessedThisIteration == 0) { + break; /* We're done. */ + } + } + } +} + +static ma_result ma_device__handle_duplex_callback_capture(ma_device* pDevice, ma_uint32 frameCountInDeviceFormat, const void* pFramesInDeviceFormat, ma_pcm_rb* pRB) +{ + ma_result result; + ma_uint32 totalDeviceFramesProcessed = 0; + const void* pRunningFramesInDeviceFormat = pFramesInDeviceFormat; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCountInDeviceFormat > 0); + MA_ASSERT(pFramesInDeviceFormat != NULL); + MA_ASSERT(pRB != NULL); + + /* Write to the ring buffer. The ring buffer is in the client format which means we need to convert. */ + for (;;) { + ma_uint32 framesToProcessInDeviceFormat = (frameCountInDeviceFormat - totalDeviceFramesProcessed); + ma_uint32 framesToProcessInClientFormat = MA_DATA_CONVERTER_STACK_BUFFER_SIZE / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint64 framesProcessedInDeviceFormat; + ma_uint64 framesProcessedInClientFormat; + void* pFramesInClientFormat; + + result = ma_pcm_rb_acquire_write(pRB, &framesToProcessInClientFormat, &pFramesInClientFormat); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "Failed to acquire capture PCM frames from ring buffer."); + break; + } + + if (framesToProcessInClientFormat == 0) { + if (ma_pcm_rb_pointer_distance(pRB) == (ma_int32)ma_pcm_rb_get_subbuffer_size(pRB)) { + break; /* Overrun. Not enough room in the ring buffer for input frame. Excess frames are dropped. */ + } + } + + /* Convert. */ + framesProcessedInDeviceFormat = framesToProcessInDeviceFormat; + framesProcessedInClientFormat = framesToProcessInClientFormat; + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningFramesInDeviceFormat, &framesProcessedInDeviceFormat, pFramesInClientFormat, &framesProcessedInClientFormat); + if (result != MA_SUCCESS) { + break; + } + + result = ma_pcm_rb_commit_write(pRB, (ma_uint32)framesProcessedInClientFormat); /* Safe cast. */ + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "Failed to commit capture PCM frames to ring buffer."); + break; + } + + pRunningFramesInDeviceFormat = ma_offset_ptr(pRunningFramesInDeviceFormat, framesProcessedInDeviceFormat * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + totalDeviceFramesProcessed += (ma_uint32)framesProcessedInDeviceFormat; /* Safe cast. */ + + /* We're done when we're unable to process any client nor device frames. */ + if (framesProcessedInClientFormat == 0 && framesProcessedInDeviceFormat == 0) { + break; /* Done. */ + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device__handle_duplex_callback_playback(ma_device* pDevice, ma_uint32 frameCount, void* pFramesInInternalFormat, ma_pcm_rb* pRB) +{ + ma_result result; + ma_uint8 silentInputFrames[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 totalFramesReadOut = 0; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCount > 0); + MA_ASSERT(pFramesInInternalFormat != NULL); + MA_ASSERT(pRB != NULL); + MA_ASSERT(pDevice->playback.pInputCache != NULL); + + /* + Sitting in the ring buffer should be captured data from the capture callback in external format. If there's not enough data in there for + the whole frameCount frames we just use silence instead for the input data. + */ + MA_ZERO_MEMORY(silentInputFrames, sizeof(silentInputFrames)); + + while (totalFramesReadOut < frameCount && ma_device_is_started(pDevice)) { + /* + We should have a buffer allocated on the heap. Any playback frames still sitting in there + need to be sent to the internal device before we process any more data from the client. + */ + if (pDevice->playback.inputCacheRemaining > 0) { + ma_uint64 framesConvertedIn = pDevice->playback.inputCacheRemaining; + ma_uint64 framesConvertedOut = (frameCount - totalFramesReadOut); + ma_data_converter_process_pcm_frames(&pDevice->playback.converter, ma_offset_pcm_frames_ptr(pDevice->playback.pInputCache, pDevice->playback.inputCacheConsumed, pDevice->playback.format, pDevice->playback.channels), &framesConvertedIn, pFramesInInternalFormat, &framesConvertedOut); + + pDevice->playback.inputCacheConsumed += framesConvertedIn; + pDevice->playback.inputCacheRemaining -= framesConvertedIn; + + totalFramesReadOut += (ma_uint32)framesConvertedOut; /* Safe cast. */ + pFramesInInternalFormat = ma_offset_ptr(pFramesInInternalFormat, framesConvertedOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + } + + /* If there's no more data in the cache we'll need to fill it with some. */ + if (totalFramesReadOut < frameCount && pDevice->playback.inputCacheRemaining == 0) { + ma_uint32 inputFrameCount; + void* pInputFrames; + + inputFrameCount = (ma_uint32)pDevice->playback.inputCacheCap; + result = ma_pcm_rb_acquire_read(pRB, &inputFrameCount, &pInputFrames); + if (result == MA_SUCCESS) { + if (inputFrameCount > 0) { + ma_device__handle_data_callback(pDevice, pDevice->playback.pInputCache, pInputFrames, inputFrameCount); + } else { + if (ma_pcm_rb_pointer_distance(pRB) == 0) { + break; /* Underrun. */ + } + } + } else { + /* No capture data available. Feed in silence. */ + inputFrameCount = (ma_uint32)ma_min(pDevice->playback.inputCacheCap, sizeof(silentInputFrames) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); + ma_device__handle_data_callback(pDevice, pDevice->playback.pInputCache, silentInputFrames, inputFrameCount); + } + + pDevice->playback.inputCacheConsumed = 0; + pDevice->playback.inputCacheRemaining = inputFrameCount; + + result = ma_pcm_rb_commit_read(pRB, inputFrameCount); + if (result != MA_SUCCESS) { + return result; /* Should never happen. */ + } + } + } + + return MA_SUCCESS; +} + +/* A helper for changing the state of the device. */ +static MA_INLINE void ma_device__set_state(ma_device* pDevice, ma_device_state newState) +{ + ma_atomic_device_state_set(&pDevice->state, newState); +} + + +#if defined(MA_WIN32) + static GUID MA_GUID_KSDATAFORMAT_SUBTYPE_PCM = {0x00000001, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}}; + static GUID MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT = {0x00000003, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}}; + /*static GUID MA_GUID_KSDATAFORMAT_SUBTYPE_ALAW = {0x00000006, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};*/ + /*static GUID MA_GUID_KSDATAFORMAT_SUBTYPE_MULAW = {0x00000007, 0x0000, 0x0010, {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};*/ +#endif + + + +MA_API ma_uint32 ma_get_format_priority_index(ma_format format) /* Lower = better. */ +{ + ma_uint32 i; + for (i = 0; i < ma_countof(g_maFormatPriorities); ++i) { + if (g_maFormatPriorities[i] == format) { + return i; + } + } + + /* Getting here means the format could not be found or is equal to ma_format_unknown. */ + return (ma_uint32)-1; +} + +static ma_result ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType); + +static ma_bool32 ma_device_descriptor_is_valid(const ma_device_descriptor* pDeviceDescriptor) +{ + if (pDeviceDescriptor == NULL) { + return MA_FALSE; + } + + if (pDeviceDescriptor->format == ma_format_unknown) { + return MA_FALSE; + } + + if (pDeviceDescriptor->channels == 0 || pDeviceDescriptor->channels > MA_MAX_CHANNELS) { + return MA_FALSE; + } + + if (pDeviceDescriptor->sampleRate == 0) { + return MA_FALSE; + } + + return MA_TRUE; +} + + +static ma_result ma_device_audio_thread__default_read_write(ma_device* pDevice) +{ + ma_result result = MA_SUCCESS; + ma_bool32 exitLoop = MA_FALSE; + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = 0; + ma_uint32 playbackDeviceDataCapInFrames = 0; + + MA_ASSERT(pDevice != NULL); + + /* Just some quick validation on the device type and the available callbacks. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + if (pDevice->pContext->callbacks.onDeviceRead == NULL) { + return MA_NOT_IMPLEMENTED; + } + + capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + if (pDevice->pContext->callbacks.onDeviceWrite == NULL) { + return MA_NOT_IMPLEMENTED; + } + + playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + } + + /* NOTE: The device was started outside of this function, in the worker thread. */ + + while (ma_device_get_state(pDevice) == ma_device_state_started && !exitLoop) { + switch (pDevice->type) { + case ma_device_type_duplex: + { + /* The process is: onDeviceRead() -> convert -> callback -> convert -> onDeviceWrite() */ + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalPeriodSizeInFrames, pDevice->playback.internalPeriodSizeInFrames); + + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; + } + + result = pDevice->pContext->callbacks.onDeviceRead(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; + + /* At this point we have our captured data in device format and we now need to convert it to client format. */ + for (;;) { + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhausted all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + + ma_device__handle_data_callback(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ + + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ + for (;;) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { + break; + } + + result = pDevice->pContext->callbacks.onDeviceWrite(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; + } + } + + /* In case an error happened from ma_device_write__null()... */ + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + } + + /* Make sure we don't get stuck in the inner loop. */ + if (capturedDeviceFramesProcessed == 0) { + break; + } + + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; + } + } break; + + case ma_device_type_capture: + case ma_device_type_loopback: + { + ma_uint32 periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames; + ma_uint32 framesReadThisPeriod = 0; + while (framesReadThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesReadThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToReadThisIteration = framesRemainingInPeriod; + if (framesToReadThisIteration > capturedDeviceDataCapInFrames) { + framesToReadThisIteration = capturedDeviceDataCapInFrames; + } + + result = pDevice->pContext->callbacks.onDeviceRead(pDevice, capturedDeviceData, framesToReadThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + /* Make sure we don't get stuck in the inner loop. */ + if (framesProcessed == 0) { + break; + } + + ma_device__send_frames_to_client(pDevice, framesProcessed, capturedDeviceData); + + framesReadThisPeriod += framesProcessed; + } + } break; + + case ma_device_type_playback: + { + /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */ + ma_uint32 periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames; + ma_uint32 framesWrittenThisPeriod = 0; + while (framesWrittenThisPeriod < periodSizeInFrames) { + ma_uint32 framesRemainingInPeriod = periodSizeInFrames - framesWrittenThisPeriod; + ma_uint32 framesProcessed; + ma_uint32 framesToWriteThisIteration = framesRemainingInPeriod; + if (framesToWriteThisIteration > playbackDeviceDataCapInFrames) { + framesToWriteThisIteration = playbackDeviceDataCapInFrames; + } + + ma_device__read_frames_from_client(pDevice, framesToWriteThisIteration, playbackDeviceData); + + result = pDevice->pContext->callbacks.onDeviceWrite(pDevice, playbackDeviceData, framesToWriteThisIteration, &framesProcessed); + if (result != MA_SUCCESS) { + exitLoop = MA_TRUE; + break; + } + + /* Make sure we don't get stuck in the inner loop. */ + if (framesProcessed == 0) { + break; + } + + framesWrittenThisPeriod += framesProcessed; + } + } break; + + /* Should never get here. */ + default: break; + } + } + + return result; +} + + + +/******************************************************************************* + +Null Backend + +*******************************************************************************/ +#ifdef MA_HAS_NULL + +#define MA_DEVICE_OP_NONE__NULL 0 +#define MA_DEVICE_OP_START__NULL 1 +#define MA_DEVICE_OP_SUSPEND__NULL 2 +#define MA_DEVICE_OP_KILL__NULL 3 + +static ma_thread_result MA_THREADCALL ma_device_thread__null(void* pData) +{ + ma_device* pDevice = (ma_device*)pData; + MA_ASSERT(pDevice != NULL); + + for (;;) { /* Keep the thread alive until the device is uninitialized. */ + ma_uint32 operation; + + /* Wait for an operation to be requested. */ + ma_event_wait(&pDevice->null_device.operationEvent); + + /* At this point an event should have been triggered. */ + operation = pDevice->null_device.operation; + + /* Starting the device needs to put the thread into a loop. */ + if (operation == MA_DEVICE_OP_START__NULL) { + /* Reset the timer just in case. */ + ma_timer_init(&pDevice->null_device.timer); + + /* Getting here means a suspend or kill operation has been requested. */ + pDevice->null_device.operationResult = MA_SUCCESS; + ma_event_signal(&pDevice->null_device.operationCompletionEvent); + ma_semaphore_release(&pDevice->null_device.operationSemaphore); + continue; + } + + /* Suspending the device means we need to stop the timer and just continue the loop. */ + if (operation == MA_DEVICE_OP_SUSPEND__NULL) { + /* We need to add the current run time to the prior run time, then reset the timer. */ + pDevice->null_device.priorRunTime += ma_timer_get_time_in_seconds(&pDevice->null_device.timer); + ma_timer_init(&pDevice->null_device.timer); + + /* We're done. */ + pDevice->null_device.operationResult = MA_SUCCESS; + ma_event_signal(&pDevice->null_device.operationCompletionEvent); + ma_semaphore_release(&pDevice->null_device.operationSemaphore); + continue; + } + + /* Killing the device means we need to get out of this loop so that this thread can terminate. */ + if (operation == MA_DEVICE_OP_KILL__NULL) { + pDevice->null_device.operationResult = MA_SUCCESS; + ma_event_signal(&pDevice->null_device.operationCompletionEvent); + ma_semaphore_release(&pDevice->null_device.operationSemaphore); + break; + } + + /* Getting a signal on a "none" operation probably means an error. Return invalid operation. */ + if (operation == MA_DEVICE_OP_NONE__NULL) { + MA_ASSERT(MA_FALSE); /* <-- Trigger this in debug mode to ensure developers are aware they're doing something wrong (or there's a bug in a miniaudio). */ + pDevice->null_device.operationResult = MA_INVALID_OPERATION; + ma_event_signal(&pDevice->null_device.operationCompletionEvent); + ma_semaphore_release(&pDevice->null_device.operationSemaphore); + continue; /* Continue the loop. Don't terminate. */ + } + } + + return (ma_thread_result)0; +} + +static ma_result ma_device_do_operation__null(ma_device* pDevice, ma_uint32 operation) +{ + ma_result result; + + /* + TODO: Need to review this and consider just using mutual exclusion. I think the original motivation + for this was to just post the event to a queue and return immediately, but that has since changed + and now this function is synchronous. I think this can be simplified to just use a mutex. + */ + + /* + The first thing to do is wait for an operation slot to become available. We only have a single slot for this, but we could extend this later + to support queuing of operations. + */ + result = ma_semaphore_wait(&pDevice->null_device.operationSemaphore); + if (result != MA_SUCCESS) { + return result; /* Failed to wait for the event. */ + } + + /* + When we get here it means the background thread is not referencing the operation code and it can be changed. After changing this we need to + signal an event to the worker thread to let it know that it can start work. + */ + pDevice->null_device.operation = operation; + + /* Once the operation code has been set, the worker thread can start work. */ + if (ma_event_signal(&pDevice->null_device.operationEvent) != MA_SUCCESS) { + return MA_ERROR; + } + + /* We want everything to be synchronous so we're going to wait for the worker thread to complete it's operation. */ + if (ma_event_wait(&pDevice->null_device.operationCompletionEvent) != MA_SUCCESS) { + return MA_ERROR; + } + + return pDevice->null_device.operationResult; +} + +static ma_uint64 ma_device_get_total_run_time_in_frames__null(ma_device* pDevice) +{ + ma_uint32 internalSampleRate; + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + internalSampleRate = pDevice->capture.internalSampleRate; + } else { + internalSampleRate = pDevice->playback.internalSampleRate; + } + + return (ma_uint64)((pDevice->null_device.priorRunTime + ma_timer_get_time_in_seconds(&pDevice->null_device.timer)) * internalSampleRate); +} + +static ma_result ma_context_enumerate_devices__null(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult = MA_TRUE; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), "NULL Playback Device", (size_t)-1); + deviceInfo.isDefault = MA_TRUE; /* Only one playback and capture device for the null backend, so might as well mark as default. */ + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), "NULL Capture Device", (size_t)-1); + deviceInfo.isDefault = MA_TRUE; /* Only one playback and capture device for the null backend, so might as well mark as default. */ + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + (void)cbResult; /* Silence a static analysis warning. */ + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__null(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pContext != NULL); + + if (pDeviceID != NULL && pDeviceID->nullbackend != 0) { + return MA_NO_DEVICE; /* Don't know the device. */ + } + + /* Name / Description */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), "NULL Playback Device", (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), "NULL Capture Device", (size_t)-1); + } + + pDeviceInfo->isDefault = MA_TRUE; /* Only one playback and capture device for the null backend, so might as well mark as default. */ + + /* Support everything on the null backend. */ + pDeviceInfo->nativeDataFormats[0].format = ma_format_unknown; + pDeviceInfo->nativeDataFormats[0].channels = 0; + pDeviceInfo->nativeDataFormats[0].sampleRate = 0; + pDeviceInfo->nativeDataFormats[0].flags = 0; + pDeviceInfo->nativeDataFormatCount = 1; + + (void)pContext; + return MA_SUCCESS; +} + + +static ma_result ma_device_uninit__null(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* Keep it clean and wait for the device thread to finish before returning. */ + ma_device_do_operation__null(pDevice, MA_DEVICE_OP_KILL__NULL); + + /* Wait for the thread to finish before continuing. */ + ma_thread_wait(&pDevice->null_device.deviceThread); + + /* At this point the loop in the device thread is as good as terminated so we can uninitialize our events. */ + ma_semaphore_uninit(&pDevice->null_device.operationSemaphore); + ma_event_uninit(&pDevice->null_device.operationCompletionEvent); + ma_event_uninit(&pDevice->null_device.operationEvent); + + return MA_SUCCESS; +} + +static ma_result ma_device_init__null(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->null_device); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* The null backend supports everything exactly as we specify it. */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + pDescriptorCapture->format = (pDescriptorCapture->format != ma_format_unknown) ? pDescriptorCapture->format : MA_DEFAULT_FORMAT; + pDescriptorCapture->channels = (pDescriptorCapture->channels != 0) ? pDescriptorCapture->channels : MA_DEFAULT_CHANNELS; + pDescriptorCapture->sampleRate = (pDescriptorCapture->sampleRate != 0) ? pDescriptorCapture->sampleRate : MA_DEFAULT_SAMPLE_RATE; + + if (pDescriptorCapture->channelMap[0] == MA_CHANNEL_NONE) { + ma_channel_map_init_standard(ma_standard_channel_map_default, pDescriptorCapture->channelMap, ma_countof(pDescriptorCapture->channelMap), pDescriptorCapture->channels); + } + + pDescriptorCapture->periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptorCapture, pDescriptorCapture->sampleRate, pConfig->performanceProfile); + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + pDescriptorPlayback->format = (pDescriptorPlayback->format != ma_format_unknown) ? pDescriptorPlayback->format : MA_DEFAULT_FORMAT; + pDescriptorPlayback->channels = (pDescriptorPlayback->channels != 0) ? pDescriptorPlayback->channels : MA_DEFAULT_CHANNELS; + pDescriptorPlayback->sampleRate = (pDescriptorPlayback->sampleRate != 0) ? pDescriptorPlayback->sampleRate : MA_DEFAULT_SAMPLE_RATE; + + if (pDescriptorPlayback->channelMap[0] == MA_CHANNEL_NONE) { + ma_channel_map_init_standard(ma_standard_channel_map_default, pDescriptorPlayback->channelMap, ma_countof(pDescriptorCapture->channelMap), pDescriptorPlayback->channels); + } + + pDescriptorPlayback->periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptorPlayback, pDescriptorPlayback->sampleRate, pConfig->performanceProfile); + } + + /* + In order to get timing right, we need to create a thread that does nothing but keeps track of the timer. This timer is started when the + first period is "written" to it, and then stopped in ma_device_stop__null(). + */ + result = ma_event_init(&pDevice->null_device.operationEvent); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_event_init(&pDevice->null_device.operationCompletionEvent); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_semaphore_init(1, &pDevice->null_device.operationSemaphore); /* <-- It's important that the initial value is set to 1. */ + if (result != MA_SUCCESS) { + return result; + } + + result = ma_thread_create(&pDevice->null_device.deviceThread, pDevice->pContext->threadPriority, 0, ma_device_thread__null, pDevice, &pDevice->pContext->allocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__null(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + ma_device_do_operation__null(pDevice, MA_DEVICE_OP_START__NULL); + + ma_atomic_bool32_set(&pDevice->null_device.isStarted, MA_TRUE); + return MA_SUCCESS; +} + +static ma_result ma_device_stop__null(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + ma_device_do_operation__null(pDevice, MA_DEVICE_OP_SUSPEND__NULL); + + ma_atomic_bool32_set(&pDevice->null_device.isStarted, MA_FALSE); + return MA_SUCCESS; +} + +static ma_bool32 ma_device_is_started__null(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + return ma_atomic_bool32_get(&pDevice->null_device.isStarted); +} + +static ma_result ma_device_write__null(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + ma_result result = MA_SUCCESS; + ma_uint32 totalPCMFramesProcessed; + ma_bool32 wasStartedOnEntry; + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + wasStartedOnEntry = ma_device_is_started__null(pDevice); + + /* Keep going until everything has been read. */ + totalPCMFramesProcessed = 0; + while (totalPCMFramesProcessed < frameCount) { + ma_uint64 targetFrame; + + /* If there are any frames remaining in the current period, consume those first. */ + if (pDevice->null_device.currentPeriodFramesRemainingPlayback > 0) { + ma_uint32 framesRemaining = (frameCount - totalPCMFramesProcessed); + ma_uint32 framesToProcess = pDevice->null_device.currentPeriodFramesRemainingPlayback; + if (framesToProcess > framesRemaining) { + framesToProcess = framesRemaining; + } + + /* We don't actually do anything with pPCMFrames, so just mark it as unused to prevent a warning. */ + (void)pPCMFrames; + + pDevice->null_device.currentPeriodFramesRemainingPlayback -= framesToProcess; + totalPCMFramesProcessed += framesToProcess; + } + + /* If we've consumed the current period we'll need to mark it as such an ensure the device is started if it's not already. */ + if (pDevice->null_device.currentPeriodFramesRemainingPlayback == 0) { + pDevice->null_device.currentPeriodFramesRemainingPlayback = 0; + + if (!ma_device_is_started__null(pDevice) && !wasStartedOnEntry) { + result = ma_device_start__null(pDevice); + if (result != MA_SUCCESS) { + break; + } + } + } + + /* If we've consumed the whole buffer we can return now. */ + MA_ASSERT(totalPCMFramesProcessed <= frameCount); + if (totalPCMFramesProcessed == frameCount) { + break; + } + + /* Getting here means we've still got more frames to consume, we but need to wait for it to become available. */ + targetFrame = pDevice->null_device.lastProcessedFramePlayback; + for (;;) { + ma_uint64 currentFrame; + + /* Stop waiting if the device has been stopped. */ + if (!ma_device_is_started__null(pDevice)) { + break; + } + + currentFrame = ma_device_get_total_run_time_in_frames__null(pDevice); + if (currentFrame >= targetFrame) { + break; + } + + /* Getting here means we haven't yet reached the target sample, so continue waiting. */ + ma_sleep(10); + } + + pDevice->null_device.lastProcessedFramePlayback += pDevice->playback.internalPeriodSizeInFrames; + pDevice->null_device.currentPeriodFramesRemainingPlayback = pDevice->playback.internalPeriodSizeInFrames; + } + + if (pFramesWritten != NULL) { + *pFramesWritten = totalPCMFramesProcessed; + } + + return result; +} + +static ma_result ma_device_read__null(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint32 totalPCMFramesProcessed; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + /* Keep going until everything has been read. */ + totalPCMFramesProcessed = 0; + while (totalPCMFramesProcessed < frameCount) { + ma_uint64 targetFrame; + + /* If there are any frames remaining in the current period, consume those first. */ + if (pDevice->null_device.currentPeriodFramesRemainingCapture > 0) { + ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 framesRemaining = (frameCount - totalPCMFramesProcessed); + ma_uint32 framesToProcess = pDevice->null_device.currentPeriodFramesRemainingCapture; + if (framesToProcess > framesRemaining) { + framesToProcess = framesRemaining; + } + + /* We need to ensure the output buffer is zeroed. */ + MA_ZERO_MEMORY(ma_offset_ptr(pPCMFrames, totalPCMFramesProcessed*bpf), framesToProcess*bpf); + + pDevice->null_device.currentPeriodFramesRemainingCapture -= framesToProcess; + totalPCMFramesProcessed += framesToProcess; + } + + /* If we've consumed the current period we'll need to mark it as such an ensure the device is started if it's not already. */ + if (pDevice->null_device.currentPeriodFramesRemainingCapture == 0) { + pDevice->null_device.currentPeriodFramesRemainingCapture = 0; + } + + /* If we've consumed the whole buffer we can return now. */ + MA_ASSERT(totalPCMFramesProcessed <= frameCount); + if (totalPCMFramesProcessed == frameCount) { + break; + } + + /* Getting here means we've still got more frames to consume, we but need to wait for it to become available. */ + targetFrame = pDevice->null_device.lastProcessedFrameCapture + pDevice->capture.internalPeriodSizeInFrames; + for (;;) { + ma_uint64 currentFrame; + + /* Stop waiting if the device has been stopped. */ + if (!ma_device_is_started__null(pDevice)) { + break; + } + + currentFrame = ma_device_get_total_run_time_in_frames__null(pDevice); + if (currentFrame >= targetFrame) { + break; + } + + /* Getting here means we haven't yet reached the target sample, so continue waiting. */ + ma_sleep(10); + } + + pDevice->null_device.lastProcessedFrameCapture += pDevice->capture.internalPeriodSizeInFrames; + pDevice->null_device.currentPeriodFramesRemainingCapture = pDevice->capture.internalPeriodSizeInFrames; + } + + if (pFramesRead != NULL) { + *pFramesRead = totalPCMFramesProcessed; + } + + return result; +} + +static ma_result ma_context_uninit__null(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_null); + + (void)pContext; + return MA_SUCCESS; +} + +static ma_result ma_context_init__null(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + MA_ASSERT(pContext != NULL); + + (void)pConfig; + (void)pContext; + + pCallbacks->onContextInit = ma_context_init__null; + pCallbacks->onContextUninit = ma_context_uninit__null; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__null; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__null; + pCallbacks->onDeviceInit = ma_device_init__null; + pCallbacks->onDeviceUninit = ma_device_uninit__null; + pCallbacks->onDeviceStart = ma_device_start__null; + pCallbacks->onDeviceStop = ma_device_stop__null; + pCallbacks->onDeviceRead = ma_device_read__null; + pCallbacks->onDeviceWrite = ma_device_write__null; + pCallbacks->onDeviceDataLoop = NULL; /* Our backend is asynchronous with a blocking read-write API which means we can get miniaudio to deal with the audio thread. */ + + /* The null backend always works. */ + return MA_SUCCESS; +} +#endif + + + +/******************************************************************************* + +WIN32 COMMON + +*******************************************************************************/ +#if defined(MA_WIN32) +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + #define ma_CoInitializeEx(pContext, pvReserved, dwCoInit) ((pContext->win32.CoInitializeEx) ? ((MA_PFN_CoInitializeEx)pContext->win32.CoInitializeEx)(pvReserved, dwCoInit) : ((MA_PFN_CoInitialize)pContext->win32.CoInitialize)(pvReserved)) + #define ma_CoUninitialize(pContext) ((MA_PFN_CoUninitialize)pContext->win32.CoUninitialize)() + #define ma_CoCreateInstance(pContext, rclsid, pUnkOuter, dwClsContext, riid, ppv) ((MA_PFN_CoCreateInstance)pContext->win32.CoCreateInstance)(rclsid, pUnkOuter, dwClsContext, riid, ppv) + #define ma_CoTaskMemFree(pContext, pv) ((MA_PFN_CoTaskMemFree)pContext->win32.CoTaskMemFree)(pv) + #define ma_PropVariantClear(pContext, pvar) ((MA_PFN_PropVariantClear)pContext->win32.PropVariantClear)(pvar) +#else + #define ma_CoInitializeEx(pContext, pvReserved, dwCoInit) CoInitializeEx(pvReserved, dwCoInit) + #define ma_CoUninitialize(pContext) CoUninitialize() + #define ma_CoCreateInstance(pContext, rclsid, pUnkOuter, dwClsContext, riid, ppv) CoCreateInstance(rclsid, pUnkOuter, dwClsContext, riid, ppv) + #define ma_CoTaskMemFree(pContext, pv) CoTaskMemFree(pv) + #define ma_PropVariantClear(pContext, pvar) PropVariantClear(pvar) +#endif + +#if !defined(MAXULONG_PTR) && !defined(__WATCOMC__) +typedef size_t DWORD_PTR; +#endif + +#if !defined(WAVE_FORMAT_1M08) +#define WAVE_FORMAT_1M08 0x00000001 +#define WAVE_FORMAT_1S08 0x00000002 +#define WAVE_FORMAT_1M16 0x00000004 +#define WAVE_FORMAT_1S16 0x00000008 +#define WAVE_FORMAT_2M08 0x00000010 +#define WAVE_FORMAT_2S08 0x00000020 +#define WAVE_FORMAT_2M16 0x00000040 +#define WAVE_FORMAT_2S16 0x00000080 +#define WAVE_FORMAT_4M08 0x00000100 +#define WAVE_FORMAT_4S08 0x00000200 +#define WAVE_FORMAT_4M16 0x00000400 +#define WAVE_FORMAT_4S16 0x00000800 +#endif + +#if !defined(WAVE_FORMAT_44M08) +#define WAVE_FORMAT_44M08 0x00000100 +#define WAVE_FORMAT_44S08 0x00000200 +#define WAVE_FORMAT_44M16 0x00000400 +#define WAVE_FORMAT_44S16 0x00000800 +#define WAVE_FORMAT_48M08 0x00001000 +#define WAVE_FORMAT_48S08 0x00002000 +#define WAVE_FORMAT_48M16 0x00004000 +#define WAVE_FORMAT_48S16 0x00008000 +#define WAVE_FORMAT_96M08 0x00010000 +#define WAVE_FORMAT_96S08 0x00020000 +#define WAVE_FORMAT_96M16 0x00040000 +#define WAVE_FORMAT_96S16 0x00080000 +#endif + +#ifndef SPEAKER_FRONT_LEFT +#define SPEAKER_FRONT_LEFT 0x1 +#define SPEAKER_FRONT_RIGHT 0x2 +#define SPEAKER_FRONT_CENTER 0x4 +#define SPEAKER_LOW_FREQUENCY 0x8 +#define SPEAKER_BACK_LEFT 0x10 +#define SPEAKER_BACK_RIGHT 0x20 +#define SPEAKER_FRONT_LEFT_OF_CENTER 0x40 +#define SPEAKER_FRONT_RIGHT_OF_CENTER 0x80 +#define SPEAKER_BACK_CENTER 0x100 +#define SPEAKER_SIDE_LEFT 0x200 +#define SPEAKER_SIDE_RIGHT 0x400 +#define SPEAKER_TOP_CENTER 0x800 +#define SPEAKER_TOP_FRONT_LEFT 0x1000 +#define SPEAKER_TOP_FRONT_CENTER 0x2000 +#define SPEAKER_TOP_FRONT_RIGHT 0x4000 +#define SPEAKER_TOP_BACK_LEFT 0x8000 +#define SPEAKER_TOP_BACK_CENTER 0x10000 +#define SPEAKER_TOP_BACK_RIGHT 0x20000 +#endif + +/* +Implement our own version of MA_WAVEFORMATEXTENSIBLE so we can avoid a header. Be careful with this +because MA_WAVEFORMATEX has an extra two bytes over standard WAVEFORMATEX due to padding. The +standard version uses tight packing, but for compiler compatibility we're not doing that with ours. +*/ +typedef struct +{ + WORD wFormatTag; + WORD nChannels; + DWORD nSamplesPerSec; + DWORD nAvgBytesPerSec; + WORD nBlockAlign; + WORD wBitsPerSample; + WORD cbSize; +} MA_WAVEFORMATEX; + +typedef struct +{ + WORD wFormatTag; + WORD nChannels; + DWORD nSamplesPerSec; + DWORD nAvgBytesPerSec; + WORD nBlockAlign; + WORD wBitsPerSample; + WORD cbSize; + union + { + WORD wValidBitsPerSample; + WORD wSamplesPerBlock; + WORD wReserved; + } Samples; + DWORD dwChannelMask; + GUID SubFormat; +} MA_WAVEFORMATEXTENSIBLE; + + + +#ifndef WAVE_FORMAT_EXTENSIBLE +#define WAVE_FORMAT_EXTENSIBLE 0xFFFE +#endif + +#ifndef WAVE_FORMAT_PCM +#define WAVE_FORMAT_PCM 1 +#endif + +#ifndef WAVE_FORMAT_IEEE_FLOAT +#define WAVE_FORMAT_IEEE_FLOAT 0x0003 +#endif + +/* Converts an individual Win32-style channel identifier (SPEAKER_FRONT_LEFT, etc.) to miniaudio. */ +static ma_uint8 ma_channel_id_to_ma__win32(DWORD id) +{ + switch (id) + { + case SPEAKER_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT; + case SPEAKER_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT; + case SPEAKER_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER; + case SPEAKER_LOW_FREQUENCY: return MA_CHANNEL_LFE; + case SPEAKER_BACK_LEFT: return MA_CHANNEL_BACK_LEFT; + case SPEAKER_BACK_RIGHT: return MA_CHANNEL_BACK_RIGHT; + case SPEAKER_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER; + case SPEAKER_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case SPEAKER_BACK_CENTER: return MA_CHANNEL_BACK_CENTER; + case SPEAKER_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT; + case SPEAKER_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT; + case SPEAKER_TOP_CENTER: return MA_CHANNEL_TOP_CENTER; + case SPEAKER_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT; + case SPEAKER_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER; + case SPEAKER_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT; + case SPEAKER_TOP_BACK_LEFT: return MA_CHANNEL_TOP_BACK_LEFT; + case SPEAKER_TOP_BACK_CENTER: return MA_CHANNEL_TOP_BACK_CENTER; + case SPEAKER_TOP_BACK_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT; + default: return 0; + } +} + +/* Converts an individual miniaudio channel identifier (MA_CHANNEL_FRONT_LEFT, etc.) to Win32-style. */ +static DWORD ma_channel_id_to_win32(DWORD id) +{ + switch (id) + { + case MA_CHANNEL_MONO: return SPEAKER_FRONT_CENTER; + case MA_CHANNEL_FRONT_LEFT: return SPEAKER_FRONT_LEFT; + case MA_CHANNEL_FRONT_RIGHT: return SPEAKER_FRONT_RIGHT; + case MA_CHANNEL_FRONT_CENTER: return SPEAKER_FRONT_CENTER; + case MA_CHANNEL_LFE: return SPEAKER_LOW_FREQUENCY; + case MA_CHANNEL_BACK_LEFT: return SPEAKER_BACK_LEFT; + case MA_CHANNEL_BACK_RIGHT: return SPEAKER_BACK_RIGHT; + case MA_CHANNEL_FRONT_LEFT_CENTER: return SPEAKER_FRONT_LEFT_OF_CENTER; + case MA_CHANNEL_FRONT_RIGHT_CENTER: return SPEAKER_FRONT_RIGHT_OF_CENTER; + case MA_CHANNEL_BACK_CENTER: return SPEAKER_BACK_CENTER; + case MA_CHANNEL_SIDE_LEFT: return SPEAKER_SIDE_LEFT; + case MA_CHANNEL_SIDE_RIGHT: return SPEAKER_SIDE_RIGHT; + case MA_CHANNEL_TOP_CENTER: return SPEAKER_TOP_CENTER; + case MA_CHANNEL_TOP_FRONT_LEFT: return SPEAKER_TOP_FRONT_LEFT; + case MA_CHANNEL_TOP_FRONT_CENTER: return SPEAKER_TOP_FRONT_CENTER; + case MA_CHANNEL_TOP_FRONT_RIGHT: return SPEAKER_TOP_FRONT_RIGHT; + case MA_CHANNEL_TOP_BACK_LEFT: return SPEAKER_TOP_BACK_LEFT; + case MA_CHANNEL_TOP_BACK_CENTER: return SPEAKER_TOP_BACK_CENTER; + case MA_CHANNEL_TOP_BACK_RIGHT: return SPEAKER_TOP_BACK_RIGHT; + default: return 0; + } +} + +/* Converts a channel mapping to a Win32-style channel mask. */ +static DWORD ma_channel_map_to_channel_mask__win32(const ma_channel* pChannelMap, ma_uint32 channels) +{ + DWORD dwChannelMask = 0; + ma_uint32 iChannel; + + for (iChannel = 0; iChannel < channels; ++iChannel) { + dwChannelMask |= ma_channel_id_to_win32(pChannelMap[iChannel]); + } + + return dwChannelMask; +} + +/* Converts a Win32-style channel mask to a miniaudio channel map. */ +static void ma_channel_mask_to_channel_map__win32(DWORD dwChannelMask, ma_uint32 channels, ma_channel* pChannelMap) +{ + /* If the channel mask is set to 0, just assume a default Win32 channel map. */ + if (dwChannelMask == 0) { + ma_channel_map_init_standard(ma_standard_channel_map_microsoft, pChannelMap, channels, channels); + } else { + if (channels == 1 && (dwChannelMask & SPEAKER_FRONT_CENTER) != 0) { + pChannelMap[0] = MA_CHANNEL_MONO; + } else { + /* Just iterate over each bit. */ + ma_uint32 iChannel = 0; + ma_uint32 iBit; + + for (iBit = 0; iBit < 32 && iChannel < channels; ++iBit) { + DWORD bitValue = (dwChannelMask & (1UL << iBit)); + if (bitValue != 0) { + /* The bit is set. */ + pChannelMap[iChannel] = ma_channel_id_to_ma__win32(bitValue); + iChannel += 1; + } + } + } + } +} + +#ifdef __cplusplus +static ma_bool32 ma_is_guid_equal(const void* a, const void* b) +{ + return IsEqualGUID(*(const GUID*)a, *(const GUID*)b); +} +#else +#define ma_is_guid_equal(a, b) IsEqualGUID((const GUID*)a, (const GUID*)b) +#endif + +static MA_INLINE ma_bool32 ma_is_guid_null(const void* guid) +{ + static GUID nullguid = {0x00000000, 0x0000, 0x0000, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}; + return ma_is_guid_equal(guid, &nullguid); +} + +static ma_format ma_format_from_WAVEFORMATEX(const MA_WAVEFORMATEX* pWF) +{ + MA_ASSERT(pWF != NULL); + + if (pWF->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { + const MA_WAVEFORMATEXTENSIBLE* pWFEX = (const MA_WAVEFORMATEXTENSIBLE*)pWF; + if (ma_is_guid_equal(&pWFEX->SubFormat, &MA_GUID_KSDATAFORMAT_SUBTYPE_PCM)) { + if (pWFEX->Samples.wValidBitsPerSample == 32) { + return ma_format_s32; + } + if (pWFEX->Samples.wValidBitsPerSample == 24) { + if (pWFEX->wBitsPerSample == 32) { + return ma_format_s32; + } + if (pWFEX->wBitsPerSample == 24) { + return ma_format_s24; + } + } + if (pWFEX->Samples.wValidBitsPerSample == 16) { + return ma_format_s16; + } + if (pWFEX->Samples.wValidBitsPerSample == 8) { + return ma_format_u8; + } + } + if (ma_is_guid_equal(&pWFEX->SubFormat, &MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)) { + if (pWFEX->Samples.wValidBitsPerSample == 32) { + return ma_format_f32; + } + /* + if (pWFEX->Samples.wValidBitsPerSample == 64) { + return ma_format_f64; + } + */ + } + } else { + if (pWF->wFormatTag == WAVE_FORMAT_PCM) { + if (pWF->wBitsPerSample == 32) { + return ma_format_s32; + } + if (pWF->wBitsPerSample == 24) { + return ma_format_s24; + } + if (pWF->wBitsPerSample == 16) { + return ma_format_s16; + } + if (pWF->wBitsPerSample == 8) { + return ma_format_u8; + } + } + if (pWF->wFormatTag == WAVE_FORMAT_IEEE_FLOAT) { + if (pWF->wBitsPerSample == 32) { + return ma_format_f32; + } + if (pWF->wBitsPerSample == 64) { + /*return ma_format_f64;*/ + } + } + } + + return ma_format_unknown; +} +#endif + + +/******************************************************************************* + +WASAPI Backend + +*******************************************************************************/ +#ifdef MA_HAS_WASAPI +#if 0 +#if defined(_MSC_VER) + #pragma warning(push) + #pragma warning(disable:4091) /* 'typedef ': ignored on left of '' when no variable is declared */ +#endif +#include +#include +#if defined(_MSC_VER) + #pragma warning(pop) +#endif +#endif /* 0 */ + +static ma_result ma_device_reroute__wasapi(ma_device* pDevice, ma_device_type deviceType); + +/* Some compilers don't define VerifyVersionInfoW. Need to write this ourselves. */ +#define MA_WIN32_WINNT_VISTA 0x0600 +#define MA_VER_MINORVERSION 0x01 +#define MA_VER_MAJORVERSION 0x02 +#define MA_VER_SERVICEPACKMAJOR 0x20 +#define MA_VER_GREATER_EQUAL 0x03 + +typedef struct { + DWORD dwOSVersionInfoSize; + DWORD dwMajorVersion; + DWORD dwMinorVersion; + DWORD dwBuildNumber; + DWORD dwPlatformId; + WCHAR szCSDVersion[128]; + WORD wServicePackMajor; + WORD wServicePackMinor; + WORD wSuiteMask; + BYTE wProductType; + BYTE wReserved; +} ma_OSVERSIONINFOEXW; + +typedef BOOL (WINAPI * ma_PFNVerifyVersionInfoW) (ma_OSVERSIONINFOEXW* lpVersionInfo, DWORD dwTypeMask, DWORDLONG dwlConditionMask); +typedef ULONGLONG (WINAPI * ma_PFNVerSetConditionMask)(ULONGLONG dwlConditionMask, DWORD dwTypeBitMask, BYTE dwConditionMask); + + +#ifndef PROPERTYKEY_DEFINED +#define PROPERTYKEY_DEFINED +#ifndef __WATCOMC__ +typedef struct +{ + GUID fmtid; + DWORD pid; +} PROPERTYKEY; +#endif +#endif + +/* Some compilers don't define PropVariantInit(). We just do this ourselves since it's just a memset(). */ +static MA_INLINE void ma_PropVariantInit(MA_PROPVARIANT* pProp) +{ + MA_ZERO_OBJECT(pProp); +} + + +static const PROPERTYKEY MA_PKEY_Device_FriendlyName = {{0xA45C254E, 0xDF1C, 0x4EFD, {0x80, 0x20, 0x67, 0xD1, 0x46, 0xA8, 0x50, 0xE0}}, 14}; +static const PROPERTYKEY MA_PKEY_AudioEngine_DeviceFormat = {{0xF19F064D, 0x82C, 0x4E27, {0xBC, 0x73, 0x68, 0x82, 0xA1, 0xBB, 0x8E, 0x4C}}, 0}; + +static const IID MA_IID_IUnknown = {0x00000000, 0x0000, 0x0000, {0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}}; /* 00000000-0000-0000-C000-000000000046 */ +#if !defined(MA_WIN32_DESKTOP) && !defined(MA_WIN32_GDK) +static const IID MA_IID_IAgileObject = {0x94EA2B94, 0xE9CC, 0x49E0, {0xC0, 0xFF, 0xEE, 0x64, 0xCA, 0x8F, 0x5B, 0x90}}; /* 94EA2B94-E9CC-49E0-C0FF-EE64CA8F5B90 */ +#endif + +static const IID MA_IID_IAudioClient = {0x1CB9AD4C, 0xDBFA, 0x4C32, {0xB1, 0x78, 0xC2, 0xF5, 0x68, 0xA7, 0x03, 0xB2}}; /* 1CB9AD4C-DBFA-4C32-B178-C2F568A703B2 = __uuidof(IAudioClient) */ +static const IID MA_IID_IAudioClient2 = {0x726778CD, 0xF60A, 0x4EDA, {0x82, 0xDE, 0xE4, 0x76, 0x10, 0xCD, 0x78, 0xAA}}; /* 726778CD-F60A-4EDA-82DE-E47610CD78AA = __uuidof(IAudioClient2) */ +static const IID MA_IID_IAudioClient3 = {0x7ED4EE07, 0x8E67, 0x4CD4, {0x8C, 0x1A, 0x2B, 0x7A, 0x59, 0x87, 0xAD, 0x42}}; /* 7ED4EE07-8E67-4CD4-8C1A-2B7A5987AD42 = __uuidof(IAudioClient3) */ +static const IID MA_IID_IAudioRenderClient = {0xF294ACFC, 0x3146, 0x4483, {0xA7, 0xBF, 0xAD, 0xDC, 0xA7, 0xC2, 0x60, 0xE2}}; /* F294ACFC-3146-4483-A7BF-ADDCA7C260E2 = __uuidof(IAudioRenderClient) */ +static const IID MA_IID_IAudioCaptureClient = {0xC8ADBD64, 0xE71E, 0x48A0, {0xA4, 0xDE, 0x18, 0x5C, 0x39, 0x5C, 0xD3, 0x17}}; /* C8ADBD64-E71E-48A0-A4DE-185C395CD317 = __uuidof(IAudioCaptureClient) */ +static const IID MA_IID_IMMNotificationClient = {0x7991EEC9, 0x7E89, 0x4D85, {0x83, 0x90, 0x6C, 0x70, 0x3C, 0xEC, 0x60, 0xC0}}; /* 7991EEC9-7E89-4D85-8390-6C703CEC60C0 = __uuidof(IMMNotificationClient) */ +#if !defined(MA_WIN32_DESKTOP) && !defined(MA_WIN32_GDK) +static const IID MA_IID_DEVINTERFACE_AUDIO_RENDER = {0xE6327CAD, 0xDCEC, 0x4949, {0xAE, 0x8A, 0x99, 0x1E, 0x97, 0x6A, 0x79, 0xD2}}; /* E6327CAD-DCEC-4949-AE8A-991E976A79D2 */ +static const IID MA_IID_DEVINTERFACE_AUDIO_CAPTURE = {0x2EEF81BE, 0x33FA, 0x4800, {0x96, 0x70, 0x1C, 0xD4, 0x74, 0x97, 0x2C, 0x3F}}; /* 2EEF81BE-33FA-4800-9670-1CD474972C3F */ +static const IID MA_IID_IActivateAudioInterfaceCompletionHandler = {0x41D949AB, 0x9862, 0x444A, {0x80, 0xF6, 0xC2, 0x61, 0x33, 0x4D, 0xA5, 0xEB}}; /* 41D949AB-9862-444A-80F6-C261334DA5EB */ +#endif + +static const IID MA_CLSID_MMDeviceEnumerator = {0xBCDE0395, 0xE52F, 0x467C, {0x8E, 0x3D, 0xC4, 0x57, 0x92, 0x91, 0x69, 0x2E}}; /* BCDE0395-E52F-467C-8E3D-C4579291692E = __uuidof(MMDeviceEnumerator) */ +static const IID MA_IID_IMMDeviceEnumerator = {0xA95664D2, 0x9614, 0x4F35, {0xA7, 0x46, 0xDE, 0x8D, 0xB6, 0x36, 0x17, 0xE6}}; /* A95664D2-9614-4F35-A746-DE8DB63617E6 = __uuidof(IMMDeviceEnumerator) */ + +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) +#define MA_MM_DEVICE_STATE_ACTIVE 1 +#define MA_MM_DEVICE_STATE_DISABLED 2 +#define MA_MM_DEVICE_STATE_NOTPRESENT 4 +#define MA_MM_DEVICE_STATE_UNPLUGGED 8 + +typedef struct ma_IMMDeviceEnumerator ma_IMMDeviceEnumerator; +typedef struct ma_IMMDeviceCollection ma_IMMDeviceCollection; +typedef struct ma_IMMDevice ma_IMMDevice; +#else +typedef struct ma_IActivateAudioInterfaceCompletionHandler ma_IActivateAudioInterfaceCompletionHandler; +typedef struct ma_IActivateAudioInterfaceAsyncOperation ma_IActivateAudioInterfaceAsyncOperation; +#endif +typedef struct ma_IPropertyStore ma_IPropertyStore; +typedef struct ma_IAudioClient ma_IAudioClient; +typedef struct ma_IAudioClient2 ma_IAudioClient2; +typedef struct ma_IAudioClient3 ma_IAudioClient3; +typedef struct ma_IAudioRenderClient ma_IAudioRenderClient; +typedef struct ma_IAudioCaptureClient ma_IAudioCaptureClient; + +typedef ma_int64 MA_REFERENCE_TIME; + +#define MA_AUDCLNT_STREAMFLAGS_CROSSPROCESS 0x00010000 +#define MA_AUDCLNT_STREAMFLAGS_LOOPBACK 0x00020000 +#define MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK 0x00040000 +#define MA_AUDCLNT_STREAMFLAGS_NOPERSIST 0x00080000 +#define MA_AUDCLNT_STREAMFLAGS_RATEADJUST 0x00100000 +#define MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY 0x08000000 +#define MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM 0x80000000 +#define MA_AUDCLNT_SESSIONFLAGS_EXPIREWHENUNOWNED 0x10000000 +#define MA_AUDCLNT_SESSIONFLAGS_DISPLAY_HIDE 0x20000000 +#define MA_AUDCLNT_SESSIONFLAGS_DISPLAY_HIDEWHENEXPIRED 0x40000000 + +/* Buffer flags. */ +#define MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY 1 +#define MA_AUDCLNT_BUFFERFLAGS_SILENT 2 +#define MA_AUDCLNT_BUFFERFLAGS_TIMESTAMP_ERROR 4 + +typedef enum +{ + ma_eRender = 0, + ma_eCapture = 1, + ma_eAll = 2 +} ma_EDataFlow; + +typedef enum +{ + ma_eConsole = 0, + ma_eMultimedia = 1, + ma_eCommunications = 2 +} ma_ERole; + +typedef enum +{ + MA_AUDCLNT_SHAREMODE_SHARED, + MA_AUDCLNT_SHAREMODE_EXCLUSIVE +} MA_AUDCLNT_SHAREMODE; + +typedef enum +{ + MA_AudioCategory_Other = 0 /* <-- miniaudio is only caring about Other. */ +} MA_AUDIO_STREAM_CATEGORY; + +typedef struct +{ + ma_uint32 cbSize; + BOOL bIsOffload; + MA_AUDIO_STREAM_CATEGORY eCategory; +} ma_AudioClientProperties; + +/* IUnknown */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IUnknown* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IUnknown* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IUnknown* pThis); +} ma_IUnknownVtbl; +struct ma_IUnknown +{ + ma_IUnknownVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IUnknown_QueryInterface(ma_IUnknown* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IUnknown_AddRef(ma_IUnknown* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IUnknown_Release(ma_IUnknown* pThis) { return pThis->lpVtbl->Release(pThis); } + +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + /* IMMNotificationClient */ + typedef struct + { + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMNotificationClient* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMNotificationClient* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IMMNotificationClient* pThis); + + /* IMMNotificationClient */ + HRESULT (STDMETHODCALLTYPE * OnDeviceStateChanged) (ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID, DWORD dwNewState); + HRESULT (STDMETHODCALLTYPE * OnDeviceAdded) (ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID); + HRESULT (STDMETHODCALLTYPE * OnDeviceRemoved) (ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID); + HRESULT (STDMETHODCALLTYPE * OnDefaultDeviceChanged)(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, const WCHAR* pDefaultDeviceID); + HRESULT (STDMETHODCALLTYPE * OnPropertyValueChanged)(ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID, const PROPERTYKEY key); + } ma_IMMNotificationClientVtbl; + + /* IMMDeviceEnumerator */ + typedef struct + { + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDeviceEnumerator* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDeviceEnumerator* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDeviceEnumerator* pThis); + + /* IMMDeviceEnumerator */ + HRESULT (STDMETHODCALLTYPE * EnumAudioEndpoints) (ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, DWORD dwStateMask, ma_IMMDeviceCollection** ppDevices); + HRESULT (STDMETHODCALLTYPE * GetDefaultAudioEndpoint) (ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, ma_ERole role, ma_IMMDevice** ppEndpoint); + HRESULT (STDMETHODCALLTYPE * GetDevice) (ma_IMMDeviceEnumerator* pThis, const WCHAR* pID, ma_IMMDevice** ppDevice); + HRESULT (STDMETHODCALLTYPE * RegisterEndpointNotificationCallback) (ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient); + HRESULT (STDMETHODCALLTYPE * UnregisterEndpointNotificationCallback)(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient); + } ma_IMMDeviceEnumeratorVtbl; + struct ma_IMMDeviceEnumerator + { + ma_IMMDeviceEnumeratorVtbl* lpVtbl; + }; + static MA_INLINE HRESULT ma_IMMDeviceEnumerator_QueryInterface(ma_IMMDeviceEnumerator* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } + static MA_INLINE ULONG ma_IMMDeviceEnumerator_AddRef(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->AddRef(pThis); } + static MA_INLINE ULONG ma_IMMDeviceEnumerator_Release(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->Release(pThis); } + static MA_INLINE HRESULT ma_IMMDeviceEnumerator_EnumAudioEndpoints(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, DWORD dwStateMask, ma_IMMDeviceCollection** ppDevices) { return pThis->lpVtbl->EnumAudioEndpoints(pThis, dataFlow, dwStateMask, ppDevices); } + static MA_INLINE HRESULT ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, ma_ERole role, ma_IMMDevice** ppEndpoint) { return pThis->lpVtbl->GetDefaultAudioEndpoint(pThis, dataFlow, role, ppEndpoint); } + static MA_INLINE HRESULT ma_IMMDeviceEnumerator_GetDevice(ma_IMMDeviceEnumerator* pThis, const WCHAR* pID, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->GetDevice(pThis, pID, ppDevice); } + static MA_INLINE HRESULT ma_IMMDeviceEnumerator_RegisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->RegisterEndpointNotificationCallback(pThis, pClient); } + static MA_INLINE HRESULT ma_IMMDeviceEnumerator_UnregisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->UnregisterEndpointNotificationCallback(pThis, pClient); } + + + /* IMMDeviceCollection */ + typedef struct + { + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDeviceCollection* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDeviceCollection* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDeviceCollection* pThis); + + /* IMMDeviceCollection */ + HRESULT (STDMETHODCALLTYPE * GetCount)(ma_IMMDeviceCollection* pThis, UINT* pDevices); + HRESULT (STDMETHODCALLTYPE * Item) (ma_IMMDeviceCollection* pThis, UINT nDevice, ma_IMMDevice** ppDevice); + } ma_IMMDeviceCollectionVtbl; + struct ma_IMMDeviceCollection + { + ma_IMMDeviceCollectionVtbl* lpVtbl; + }; + static MA_INLINE HRESULT ma_IMMDeviceCollection_QueryInterface(ma_IMMDeviceCollection* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } + static MA_INLINE ULONG ma_IMMDeviceCollection_AddRef(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->AddRef(pThis); } + static MA_INLINE ULONG ma_IMMDeviceCollection_Release(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->Release(pThis); } + static MA_INLINE HRESULT ma_IMMDeviceCollection_GetCount(ma_IMMDeviceCollection* pThis, UINT* pDevices) { return pThis->lpVtbl->GetCount(pThis, pDevices); } + static MA_INLINE HRESULT ma_IMMDeviceCollection_Item(ma_IMMDeviceCollection* pThis, UINT nDevice, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->Item(pThis, nDevice, ppDevice); } + + + /* IMMDevice */ + typedef struct + { + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IMMDevice* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IMMDevice* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IMMDevice* pThis); + + /* IMMDevice */ + HRESULT (STDMETHODCALLTYPE * Activate) (ma_IMMDevice* pThis, const IID* const iid, DWORD dwClsCtx, MA_PROPVARIANT* pActivationParams, void** ppInterface); + HRESULT (STDMETHODCALLTYPE * OpenPropertyStore)(ma_IMMDevice* pThis, DWORD stgmAccess, ma_IPropertyStore** ppProperties); + HRESULT (STDMETHODCALLTYPE * GetId) (ma_IMMDevice* pThis, WCHAR** pID); + HRESULT (STDMETHODCALLTYPE * GetState) (ma_IMMDevice* pThis, DWORD *pState); + } ma_IMMDeviceVtbl; + struct ma_IMMDevice + { + ma_IMMDeviceVtbl* lpVtbl; + }; + static MA_INLINE HRESULT ma_IMMDevice_QueryInterface(ma_IMMDevice* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } + static MA_INLINE ULONG ma_IMMDevice_AddRef(ma_IMMDevice* pThis) { return pThis->lpVtbl->AddRef(pThis); } + static MA_INLINE ULONG ma_IMMDevice_Release(ma_IMMDevice* pThis) { return pThis->lpVtbl->Release(pThis); } + static MA_INLINE HRESULT ma_IMMDevice_Activate(ma_IMMDevice* pThis, const IID* const iid, DWORD dwClsCtx, MA_PROPVARIANT* pActivationParams, void** ppInterface) { return pThis->lpVtbl->Activate(pThis, iid, dwClsCtx, pActivationParams, ppInterface); } + static MA_INLINE HRESULT ma_IMMDevice_OpenPropertyStore(ma_IMMDevice* pThis, DWORD stgmAccess, ma_IPropertyStore** ppProperties) { return pThis->lpVtbl->OpenPropertyStore(pThis, stgmAccess, ppProperties); } + static MA_INLINE HRESULT ma_IMMDevice_GetId(ma_IMMDevice* pThis, WCHAR** pID) { return pThis->lpVtbl->GetId(pThis, pID); } + static MA_INLINE HRESULT ma_IMMDevice_GetState(ma_IMMDevice* pThis, DWORD *pState) { return pThis->lpVtbl->GetState(pThis, pState); } +#else + /* IActivateAudioInterfaceAsyncOperation */ + typedef struct + { + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IActivateAudioInterfaceAsyncOperation* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IActivateAudioInterfaceAsyncOperation* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IActivateAudioInterfaceAsyncOperation* pThis); + + /* IActivateAudioInterfaceAsyncOperation */ + HRESULT (STDMETHODCALLTYPE * GetActivateResult)(ma_IActivateAudioInterfaceAsyncOperation* pThis, HRESULT *pActivateResult, ma_IUnknown** ppActivatedInterface); + } ma_IActivateAudioInterfaceAsyncOperationVtbl; + struct ma_IActivateAudioInterfaceAsyncOperation + { + ma_IActivateAudioInterfaceAsyncOperationVtbl* lpVtbl; + }; + static MA_INLINE HRESULT ma_IActivateAudioInterfaceAsyncOperation_QueryInterface(ma_IActivateAudioInterfaceAsyncOperation* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } + static MA_INLINE ULONG ma_IActivateAudioInterfaceAsyncOperation_AddRef(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->AddRef(pThis); } + static MA_INLINE ULONG ma_IActivateAudioInterfaceAsyncOperation_Release(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->Release(pThis); } + static MA_INLINE HRESULT ma_IActivateAudioInterfaceAsyncOperation_GetActivateResult(ma_IActivateAudioInterfaceAsyncOperation* pThis, HRESULT *pActivateResult, ma_IUnknown** ppActivatedInterface) { return pThis->lpVtbl->GetActivateResult(pThis, pActivateResult, ppActivatedInterface); } +#endif + +/* IPropertyStore */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IPropertyStore* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IPropertyStore* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IPropertyStore* pThis); + + /* IPropertyStore */ + HRESULT (STDMETHODCALLTYPE * GetCount)(ma_IPropertyStore* pThis, DWORD* pPropCount); + HRESULT (STDMETHODCALLTYPE * GetAt) (ma_IPropertyStore* pThis, DWORD propIndex, PROPERTYKEY* pPropKey); + HRESULT (STDMETHODCALLTYPE * GetValue)(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, MA_PROPVARIANT* pPropVar); + HRESULT (STDMETHODCALLTYPE * SetValue)(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, const MA_PROPVARIANT* const pPropVar); + HRESULT (STDMETHODCALLTYPE * Commit) (ma_IPropertyStore* pThis); +} ma_IPropertyStoreVtbl; +struct ma_IPropertyStore +{ + ma_IPropertyStoreVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IPropertyStore_QueryInterface(ma_IPropertyStore* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IPropertyStore_AddRef(ma_IPropertyStore* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IPropertyStore_Release(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IPropertyStore_GetCount(ma_IPropertyStore* pThis, DWORD* pPropCount) { return pThis->lpVtbl->GetCount(pThis, pPropCount); } +static MA_INLINE HRESULT ma_IPropertyStore_GetAt(ma_IPropertyStore* pThis, DWORD propIndex, PROPERTYKEY* pPropKey) { return pThis->lpVtbl->GetAt(pThis, propIndex, pPropKey); } +static MA_INLINE HRESULT ma_IPropertyStore_GetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, MA_PROPVARIANT* pPropVar) { return pThis->lpVtbl->GetValue(pThis, pKey, pPropVar); } +static MA_INLINE HRESULT ma_IPropertyStore_SetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, const MA_PROPVARIANT* const pPropVar) { return pThis->lpVtbl->SetValue(pThis, pKey, pPropVar); } +static MA_INLINE HRESULT ma_IPropertyStore_Commit(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Commit(pThis); } + + +/* IAudioClient */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient* pThis); + + /* IAudioClient */ + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid); + HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient* pThis, ma_uint32* pNumBufferFrames); + HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient* pThis, MA_REFERENCE_TIME* pLatency); + HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient* pThis, ma_uint32* pNumPaddingFrames); + HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, const MA_WAVEFORMATEX* pFormat, MA_WAVEFORMATEX** ppClosestMatch); + HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient* pThis, MA_WAVEFORMATEX** ppDeviceFormat); + HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod); + HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient* pThis); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient* pThis); + HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient* pThis); + HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient* pThis, HANDLE eventHandle); + HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient* pThis, const IID* const riid, void** pp); +} ma_IAudioClientVtbl; +struct ma_IAudioClient +{ + ma_IAudioClientVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioClient_QueryInterface(ma_IAudioClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioClient_AddRef(ma_IAudioClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioClient_Release(ma_IAudioClient* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioClient_Initialize(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } +static MA_INLINE HRESULT ma_IAudioClient_GetBufferSize(ma_IAudioClient* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } +static MA_INLINE HRESULT ma_IAudioClient_GetStreamLatency(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } +static MA_INLINE HRESULT ma_IAudioClient_GetCurrentPadding(ma_IAudioClient* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } +static MA_INLINE HRESULT ma_IAudioClient_IsFormatSupported(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, const MA_WAVEFORMATEX* pFormat, MA_WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } +static MA_INLINE HRESULT ma_IAudioClient_GetMixFormat(ma_IAudioClient* pThis, MA_WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } +static MA_INLINE HRESULT ma_IAudioClient_GetDevicePeriod(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } +static MA_INLINE HRESULT ma_IAudioClient_Start(ma_IAudioClient* pThis) { return pThis->lpVtbl->Start(pThis); } +static MA_INLINE HRESULT ma_IAudioClient_Stop(ma_IAudioClient* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IAudioClient_Reset(ma_IAudioClient* pThis) { return pThis->lpVtbl->Reset(pThis); } +static MA_INLINE HRESULT ma_IAudioClient_SetEventHandle(ma_IAudioClient* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } +static MA_INLINE HRESULT ma_IAudioClient_GetService(ma_IAudioClient* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } + +/* IAudioClient2 */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient2* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient2* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient2* pThis); + + /* IAudioClient */ + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid); + HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient2* pThis, ma_uint32* pNumBufferFrames); + HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pLatency); + HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient2* pThis, ma_uint32* pNumPaddingFrames); + HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, const MA_WAVEFORMATEX* pFormat, MA_WAVEFORMATEX** ppClosestMatch); + HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient2* pThis, MA_WAVEFORMATEX** ppDeviceFormat); + HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod); + HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient2* pThis); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient2* pThis); + HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient2* pThis); + HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient2* pThis, HANDLE eventHandle); + HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient2* pThis, const IID* const riid, void** pp); + + /* IAudioClient2 */ + HRESULT (STDMETHODCALLTYPE * IsOffloadCapable) (ma_IAudioClient2* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable); + HRESULT (STDMETHODCALLTYPE * SetClientProperties)(ma_IAudioClient2* pThis, const ma_AudioClientProperties* pProperties); + HRESULT (STDMETHODCALLTYPE * GetBufferSizeLimits)(ma_IAudioClient2* pThis, const MA_WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration); +} ma_IAudioClient2Vtbl; +struct ma_IAudioClient2 +{ + ma_IAudioClient2Vtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioClient2_QueryInterface(ma_IAudioClient2* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioClient2_AddRef(ma_IAudioClient2* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioClient2_Release(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioClient2_Initialize(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } +static MA_INLINE HRESULT ma_IAudioClient2_GetBufferSize(ma_IAudioClient2* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } +static MA_INLINE HRESULT ma_IAudioClient2_GetStreamLatency(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } +static MA_INLINE HRESULT ma_IAudioClient2_GetCurrentPadding(ma_IAudioClient2* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } +static MA_INLINE HRESULT ma_IAudioClient2_IsFormatSupported(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, const MA_WAVEFORMATEX* pFormat, MA_WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } +static MA_INLINE HRESULT ma_IAudioClient2_GetMixFormat(ma_IAudioClient2* pThis, MA_WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } +static MA_INLINE HRESULT ma_IAudioClient2_GetDevicePeriod(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } +static MA_INLINE HRESULT ma_IAudioClient2_Start(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Start(pThis); } +static MA_INLINE HRESULT ma_IAudioClient2_Stop(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IAudioClient2_Reset(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Reset(pThis); } +static MA_INLINE HRESULT ma_IAudioClient2_SetEventHandle(ma_IAudioClient2* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } +static MA_INLINE HRESULT ma_IAudioClient2_GetService(ma_IAudioClient2* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } +static MA_INLINE HRESULT ma_IAudioClient2_IsOffloadCapable(ma_IAudioClient2* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); } +static MA_INLINE HRESULT ma_IAudioClient2_SetClientProperties(ma_IAudioClient2* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); } +static MA_INLINE HRESULT ma_IAudioClient2_GetBufferSizeLimits(ma_IAudioClient2* pThis, const MA_WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); } + + +/* IAudioClient3 */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioClient3* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioClient3* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioClient3* pThis); + + /* IAudioClient */ + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid); + HRESULT (STDMETHODCALLTYPE * GetBufferSize) (ma_IAudioClient3* pThis, ma_uint32* pNumBufferFrames); + HRESULT (STDMETHODCALLTYPE * GetStreamLatency) (ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pLatency); + HRESULT (STDMETHODCALLTYPE * GetCurrentPadding)(ma_IAudioClient3* pThis, ma_uint32* pNumPaddingFrames); + HRESULT (STDMETHODCALLTYPE * IsFormatSupported)(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, const MA_WAVEFORMATEX* pFormat, MA_WAVEFORMATEX** ppClosestMatch); + HRESULT (STDMETHODCALLTYPE * GetMixFormat) (ma_IAudioClient3* pThis, MA_WAVEFORMATEX** ppDeviceFormat); + HRESULT (STDMETHODCALLTYPE * GetDevicePeriod) (ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod); + HRESULT (STDMETHODCALLTYPE * Start) (ma_IAudioClient3* pThis); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IAudioClient3* pThis); + HRESULT (STDMETHODCALLTYPE * Reset) (ma_IAudioClient3* pThis); + HRESULT (STDMETHODCALLTYPE * SetEventHandle) (ma_IAudioClient3* pThis, HANDLE eventHandle); + HRESULT (STDMETHODCALLTYPE * GetService) (ma_IAudioClient3* pThis, const IID* const riid, void** pp); + + /* IAudioClient2 */ + HRESULT (STDMETHODCALLTYPE * IsOffloadCapable) (ma_IAudioClient3* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable); + HRESULT (STDMETHODCALLTYPE * SetClientProperties)(ma_IAudioClient3* pThis, const ma_AudioClientProperties* pProperties); + HRESULT (STDMETHODCALLTYPE * GetBufferSizeLimits)(ma_IAudioClient3* pThis, const MA_WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration); + + /* IAudioClient3 */ + HRESULT (STDMETHODCALLTYPE * GetSharedModeEnginePeriod) (ma_IAudioClient3* pThis, const MA_WAVEFORMATEX* pFormat, ma_uint32* pDefaultPeriodInFrames, ma_uint32* pFundamentalPeriodInFrames, ma_uint32* pMinPeriodInFrames, ma_uint32* pMaxPeriodInFrames); + HRESULT (STDMETHODCALLTYPE * GetCurrentSharedModeEnginePeriod)(ma_IAudioClient3* pThis, MA_WAVEFORMATEX** ppFormat, ma_uint32* pCurrentPeriodInFrames); + HRESULT (STDMETHODCALLTYPE * InitializeSharedAudioStream) (ma_IAudioClient3* pThis, DWORD streamFlags, ma_uint32 periodInFrames, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid); +} ma_IAudioClient3Vtbl; +struct ma_IAudioClient3 +{ + ma_IAudioClient3Vtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioClient3_QueryInterface(ma_IAudioClient3* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioClient3_AddRef(ma_IAudioClient3* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioClient3_Release(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioClient3_Initialize(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } +static MA_INLINE HRESULT ma_IAudioClient3_GetBufferSize(ma_IAudioClient3* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } +static MA_INLINE HRESULT ma_IAudioClient3_GetStreamLatency(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } +static MA_INLINE HRESULT ma_IAudioClient3_GetCurrentPadding(ma_IAudioClient3* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } +static MA_INLINE HRESULT ma_IAudioClient3_IsFormatSupported(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, const MA_WAVEFORMATEX* pFormat, MA_WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } +static MA_INLINE HRESULT ma_IAudioClient3_GetMixFormat(ma_IAudioClient3* pThis, MA_WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } +static MA_INLINE HRESULT ma_IAudioClient3_GetDevicePeriod(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } +static MA_INLINE HRESULT ma_IAudioClient3_Start(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Start(pThis); } +static MA_INLINE HRESULT ma_IAudioClient3_Stop(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IAudioClient3_Reset(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Reset(pThis); } +static MA_INLINE HRESULT ma_IAudioClient3_SetEventHandle(ma_IAudioClient3* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } +static MA_INLINE HRESULT ma_IAudioClient3_GetService(ma_IAudioClient3* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } +static MA_INLINE HRESULT ma_IAudioClient3_IsOffloadCapable(ma_IAudioClient3* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); } +static MA_INLINE HRESULT ma_IAudioClient3_SetClientProperties(ma_IAudioClient3* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); } +static MA_INLINE HRESULT ma_IAudioClient3_GetBufferSizeLimits(ma_IAudioClient3* pThis, const MA_WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); } +static MA_INLINE HRESULT ma_IAudioClient3_GetSharedModeEnginePeriod(ma_IAudioClient3* pThis, const MA_WAVEFORMATEX* pFormat, ma_uint32* pDefaultPeriodInFrames, ma_uint32* pFundamentalPeriodInFrames, ma_uint32* pMinPeriodInFrames, ma_uint32* pMaxPeriodInFrames) { return pThis->lpVtbl->GetSharedModeEnginePeriod(pThis, pFormat, pDefaultPeriodInFrames, pFundamentalPeriodInFrames, pMinPeriodInFrames, pMaxPeriodInFrames); } +static MA_INLINE HRESULT ma_IAudioClient3_GetCurrentSharedModeEnginePeriod(ma_IAudioClient3* pThis, MA_WAVEFORMATEX** ppFormat, ma_uint32* pCurrentPeriodInFrames) { return pThis->lpVtbl->GetCurrentSharedModeEnginePeriod(pThis, ppFormat, pCurrentPeriodInFrames); } +static MA_INLINE HRESULT ma_IAudioClient3_InitializeSharedAudioStream(ma_IAudioClient3* pThis, DWORD streamFlags, ma_uint32 periodInFrames, const MA_WAVEFORMATEX* pFormat, const GUID* pAudioSessionGUID) { return pThis->lpVtbl->InitializeSharedAudioStream(pThis, streamFlags, periodInFrames, pFormat, pAudioSessionGUID); } + + +/* IAudioRenderClient */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioRenderClient* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioRenderClient* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioRenderClient* pThis); + + /* IAudioRenderClient */ + HRESULT (STDMETHODCALLTYPE * GetBuffer) (ma_IAudioRenderClient* pThis, ma_uint32 numFramesRequested, BYTE** ppData); + HRESULT (STDMETHODCALLTYPE * ReleaseBuffer)(ma_IAudioRenderClient* pThis, ma_uint32 numFramesWritten, DWORD dwFlags); +} ma_IAudioRenderClientVtbl; +struct ma_IAudioRenderClient +{ + ma_IAudioRenderClientVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioRenderClient_QueryInterface(ma_IAudioRenderClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioRenderClient_AddRef(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioRenderClient_Release(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioRenderClient_GetBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesRequested, BYTE** ppData) { return pThis->lpVtbl->GetBuffer(pThis, numFramesRequested, ppData); } +static MA_INLINE HRESULT ma_IAudioRenderClient_ReleaseBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesWritten, DWORD dwFlags) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesWritten, dwFlags); } + + +/* IAudioCaptureClient */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IAudioCaptureClient* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IAudioCaptureClient* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IAudioCaptureClient* pThis); + + /* IAudioRenderClient */ + HRESULT (STDMETHODCALLTYPE * GetBuffer) (ma_IAudioCaptureClient* pThis, BYTE** ppData, ma_uint32* pNumFramesToRead, DWORD* pFlags, ma_uint64* pDevicePosition, ma_uint64* pQPCPosition); + HRESULT (STDMETHODCALLTYPE * ReleaseBuffer) (ma_IAudioCaptureClient* pThis, ma_uint32 numFramesRead); + HRESULT (STDMETHODCALLTYPE * GetNextPacketSize)(ma_IAudioCaptureClient* pThis, ma_uint32* pNumFramesInNextPacket); +} ma_IAudioCaptureClientVtbl; +struct ma_IAudioCaptureClient +{ + ma_IAudioCaptureClientVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IAudioCaptureClient_QueryInterface(ma_IAudioCaptureClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IAudioCaptureClient_AddRef(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IAudioCaptureClient_Release(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IAudioCaptureClient_GetBuffer(ma_IAudioCaptureClient* pThis, BYTE** ppData, ma_uint32* pNumFramesToRead, DWORD* pFlags, ma_uint64* pDevicePosition, ma_uint64* pQPCPosition) { return pThis->lpVtbl->GetBuffer(pThis, ppData, pNumFramesToRead, pFlags, pDevicePosition, pQPCPosition); } +static MA_INLINE HRESULT ma_IAudioCaptureClient_ReleaseBuffer(ma_IAudioCaptureClient* pThis, ma_uint32 numFramesRead) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesRead); } +static MA_INLINE HRESULT ma_IAudioCaptureClient_GetNextPacketSize(ma_IAudioCaptureClient* pThis, ma_uint32* pNumFramesInNextPacket) { return pThis->lpVtbl->GetNextPacketSize(pThis, pNumFramesInNextPacket); } + +#if defined(MA_WIN32_UWP) +/* mmdevapi Functions */ +typedef HRESULT (WINAPI * MA_PFN_ActivateAudioInterfaceAsync)(const wchar_t* deviceInterfacePath, const IID* riid, MA_PROPVARIANT* activationParams, ma_IActivateAudioInterfaceCompletionHandler* completionHandler, ma_IActivateAudioInterfaceAsyncOperation** activationOperation); +#endif + +/* Avrt Functions */ +typedef HANDLE (WINAPI * MA_PFN_AvSetMmThreadCharacteristicsA)(const char* TaskName, DWORD* TaskIndex); +typedef BOOL (WINAPI * MA_PFN_AvRevertMmThreadCharacteristics)(HANDLE AvrtHandle); + +#if !defined(MA_WIN32_DESKTOP) && !defined(MA_WIN32_GDK) +typedef struct ma_completion_handler_uwp ma_completion_handler_uwp; + +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_completion_handler_uwp* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_completion_handler_uwp* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_completion_handler_uwp* pThis); + + /* IActivateAudioInterfaceCompletionHandler */ + HRESULT (STDMETHODCALLTYPE * ActivateCompleted)(ma_completion_handler_uwp* pThis, ma_IActivateAudioInterfaceAsyncOperation* pActivateOperation); +} ma_completion_handler_uwp_vtbl; +struct ma_completion_handler_uwp +{ + ma_completion_handler_uwp_vtbl* lpVtbl; + MA_ATOMIC(4, ma_uint32) counter; + HANDLE hEvent; +}; + +static HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_QueryInterface(ma_completion_handler_uwp* pThis, const IID* const riid, void** ppObject) +{ + /* + We need to "implement" IAgileObject which is just an indicator that's used internally by WASAPI for some multithreading management. To + "implement" this, we just make sure we return pThis when the IAgileObject is requested. + */ + if (!ma_is_guid_equal(riid, &MA_IID_IUnknown) && !ma_is_guid_equal(riid, &MA_IID_IActivateAudioInterfaceCompletionHandler) && !ma_is_guid_equal(riid, &MA_IID_IAgileObject)) { + *ppObject = NULL; + return E_NOINTERFACE; + } + + /* Getting here means the IID is IUnknown or IMMNotificationClient. */ + *ppObject = (void*)pThis; + ((ma_completion_handler_uwp_vtbl*)pThis->lpVtbl)->AddRef(pThis); + return S_OK; +} + +static ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_AddRef(ma_completion_handler_uwp* pThis) +{ + return (ULONG)ma_atomic_fetch_add_32(&pThis->counter, 1) + 1; +} + +static ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_Release(ma_completion_handler_uwp* pThis) +{ + ma_uint32 newRefCount = ma_atomic_fetch_sub_32(&pThis->counter, 1) - 1; + if (newRefCount == 0) { + return 0; /* We don't free anything here because we never allocate the object on the heap. */ + } + + return (ULONG)newRefCount; +} + +static HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_ActivateCompleted(ma_completion_handler_uwp* pThis, ma_IActivateAudioInterfaceAsyncOperation* pActivateOperation) +{ + (void)pActivateOperation; + SetEvent(pThis->hEvent); + return S_OK; +} + + +static ma_completion_handler_uwp_vtbl g_maCompletionHandlerVtblInstance = { + ma_completion_handler_uwp_QueryInterface, + ma_completion_handler_uwp_AddRef, + ma_completion_handler_uwp_Release, + ma_completion_handler_uwp_ActivateCompleted +}; + +static ma_result ma_completion_handler_uwp_init(ma_completion_handler_uwp* pHandler) +{ + MA_ASSERT(pHandler != NULL); + MA_ZERO_OBJECT(pHandler); + + pHandler->lpVtbl = &g_maCompletionHandlerVtblInstance; + pHandler->counter = 1; + pHandler->hEvent = CreateEventA(NULL, FALSE, FALSE, NULL); + if (pHandler->hEvent == NULL) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + +static void ma_completion_handler_uwp_uninit(ma_completion_handler_uwp* pHandler) +{ + if (pHandler->hEvent != NULL) { + CloseHandle(pHandler->hEvent); + } +} + +static void ma_completion_handler_uwp_wait(ma_completion_handler_uwp* pHandler) +{ + WaitForSingleObject((HANDLE)pHandler->hEvent, INFINITE); +} +#endif /* !MA_WIN32_DESKTOP */ + +/* We need a virtual table for our notification client object that's used for detecting changes to the default device. */ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_QueryInterface(ma_IMMNotificationClient* pThis, const IID* const riid, void** ppObject) +{ + /* + We care about two interfaces - IUnknown and IMMNotificationClient. If the requested IID is something else + we just return E_NOINTERFACE. Otherwise we need to increment the reference counter and return S_OK. + */ + if (!ma_is_guid_equal(riid, &MA_IID_IUnknown) && !ma_is_guid_equal(riid, &MA_IID_IMMNotificationClient)) { + *ppObject = NULL; + return E_NOINTERFACE; + } + + /* Getting here means the IID is IUnknown or IMMNotificationClient. */ + *ppObject = (void*)pThis; + ((ma_IMMNotificationClientVtbl*)pThis->lpVtbl)->AddRef(pThis); + return S_OK; +} + +static ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_AddRef(ma_IMMNotificationClient* pThis) +{ + return (ULONG)ma_atomic_fetch_add_32(&pThis->counter, 1) + 1; +} + +static ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_Release(ma_IMMNotificationClient* pThis) +{ + ma_uint32 newRefCount = ma_atomic_fetch_sub_32(&pThis->counter, 1) - 1; + if (newRefCount == 0) { + return 0; /* We don't free anything here because we never allocate the object on the heap. */ + } + + return (ULONG)newRefCount; +} + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceStateChanged(ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID, DWORD dwNewState) +{ + ma_bool32 isThisDevice = MA_FALSE; + ma_bool32 isCapture = MA_FALSE; + ma_bool32 isPlayback = MA_FALSE; + +#ifdef MA_DEBUG_OUTPUT + /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDeviceStateChanged(pDeviceID=%S, dwNewState=%u)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)", (unsigned int)dwNewState);*/ +#endif + + /* + There have been reports of a hang when a playback device is disconnected. The idea with this code is to explicitly stop the device if we detect + that the device is disabled or has been unplugged. + */ + if (pThis->pDevice->wasapi.allowCaptureAutoStreamRouting && (pThis->pDevice->type == ma_device_type_capture || pThis->pDevice->type == ma_device_type_duplex || pThis->pDevice->type == ma_device_type_loopback)) { + isCapture = MA_TRUE; + if (ma_strcmp_WCHAR(pThis->pDevice->capture.id.wasapi, pDeviceID) == 0) { + isThisDevice = MA_TRUE; + } + } + + if (pThis->pDevice->wasapi.allowPlaybackAutoStreamRouting && (pThis->pDevice->type == ma_device_type_playback || pThis->pDevice->type == ma_device_type_duplex)) { + isPlayback = MA_TRUE; + if (ma_strcmp_WCHAR(pThis->pDevice->playback.id.wasapi, pDeviceID) == 0) { + isThisDevice = MA_TRUE; + } + } + + + /* + If the device ID matches our device we need to mark our device as detached and stop it. When a + device is added in OnDeviceAdded(), we'll restart it. We only mark it as detached if the device + was started at the time of being removed. + */ + if (isThisDevice) { + if ((dwNewState & MA_MM_DEVICE_STATE_ACTIVE) == 0) { + /* + Unplugged or otherwise unavailable. Mark as detached if we were in a playing state. We'll + use this to determine whether or not we need to automatically start the device when it's + plugged back in again. + */ + if (ma_device_get_state(pThis->pDevice) == ma_device_state_started) { + if (isPlayback) { + pThis->pDevice->wasapi.isDetachedPlayback = MA_TRUE; + } + if (isCapture) { + pThis->pDevice->wasapi.isDetachedCapture = MA_TRUE; + } + + ma_device_stop(pThis->pDevice); + } + } + + if ((dwNewState & MA_MM_DEVICE_STATE_ACTIVE) != 0) { + /* The device was activated. If we were detached, we need to start it again. */ + ma_bool8 tryRestartingDevice = MA_FALSE; + + if (isPlayback) { + if (pThis->pDevice->wasapi.isDetachedPlayback) { + pThis->pDevice->wasapi.isDetachedPlayback = MA_FALSE; + ma_device_reroute__wasapi(pThis->pDevice, ma_device_type_playback); + tryRestartingDevice = MA_TRUE; + } + } + + if (isCapture) { + if (pThis->pDevice->wasapi.isDetachedCapture) { + pThis->pDevice->wasapi.isDetachedCapture = MA_FALSE; + ma_device_reroute__wasapi(pThis->pDevice, (pThis->pDevice->type == ma_device_type_loopback) ? ma_device_type_loopback : ma_device_type_capture); + tryRestartingDevice = MA_TRUE; + } + } + + if (tryRestartingDevice) { + if (pThis->pDevice->wasapi.isDetachedPlayback == MA_FALSE && pThis->pDevice->wasapi.isDetachedCapture == MA_FALSE) { + ma_device_start(pThis->pDevice); + } + } + } + } + + return S_OK; +} + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceAdded(ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID) +{ +#ifdef MA_DEBUG_OUTPUT + /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDeviceAdded(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/ +#endif + + /* We don't need to worry about this event for our purposes. */ + (void)pThis; + (void)pDeviceID; + return S_OK; +} + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceRemoved(ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID) +{ +#ifdef MA_DEBUG_OUTPUT + /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDeviceRemoved(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/ +#endif + + /* We don't need to worry about this event for our purposes. */ + (void)pThis; + (void)pDeviceID; + return S_OK; +} + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDefaultDeviceChanged(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, const WCHAR* pDefaultDeviceID) +{ +#ifdef MA_DEBUG_OUTPUT + /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDefaultDeviceChanged(dataFlow=%d, role=%d, pDefaultDeviceID=%S)\n", dataFlow, role, (pDefaultDeviceID != NULL) ? pDefaultDeviceID : L"(NULL)");*/ +#endif + + (void)role; + + /* We only care about devices with the same data flow as the current device. */ + if ((pThis->pDevice->type == ma_device_type_playback && dataFlow != ma_eRender) || + (pThis->pDevice->type == ma_device_type_capture && dataFlow != ma_eCapture) || + (pThis->pDevice->type == ma_device_type_loopback && dataFlow != ma_eRender)) { + ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Stream rerouting abandoned because dataFlow does match device type.\n"); + return S_OK; + } + + /* We need to consider dataFlow as ma_eCapture if device is ma_device_type_loopback */ + if (pThis->pDevice->type == ma_device_type_loopback) { + dataFlow = ma_eCapture; + } + + /* Don't do automatic stream routing if we're not allowed. */ + if ((dataFlow == ma_eRender && pThis->pDevice->wasapi.allowPlaybackAutoStreamRouting == MA_FALSE) || + (dataFlow == ma_eCapture && pThis->pDevice->wasapi.allowCaptureAutoStreamRouting == MA_FALSE)) { + ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Stream rerouting abandoned because automatic stream routing has been disabled by the device config.\n"); + return S_OK; + } + + /* + Not currently supporting automatic stream routing in exclusive mode. This is not working correctly on my machine due to + AUDCLNT_E_DEVICE_IN_USE errors when reinitializing the device. If this is a bug in miniaudio, we can try re-enabling this once + it's fixed. + */ + if ((dataFlow == ma_eRender && pThis->pDevice->playback.shareMode == ma_share_mode_exclusive) || + (dataFlow == ma_eCapture && pThis->pDevice->capture.shareMode == ma_share_mode_exclusive)) { + ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Stream rerouting abandoned because the device shared mode is exclusive.\n"); + return S_OK; + } + + + + /* + Second attempt at device rerouting. We're going to retrieve the device's state at the time of + the route change. We're then going to stop the device, reinitialize the device, and then start + it again if the state before stopping was ma_device_state_started. + */ + { + ma_uint32 previousState = ma_device_get_state(pThis->pDevice); + ma_bool8 restartDevice = MA_FALSE; + + if (previousState == ma_device_state_uninitialized || previousState == ma_device_state_starting) { + ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Stream rerouting abandoned because the device is in the process of starting.\n"); + return S_OK; + } + + if (previousState == ma_device_state_started) { + ma_device_stop(pThis->pDevice); + restartDevice = MA_TRUE; + } + + if (pDefaultDeviceID != NULL) { /* <-- The input device ID will be null if there's no other device available. */ + ma_mutex_lock(&pThis->pDevice->wasapi.rerouteLock); + { + if (dataFlow == ma_eRender) { + ma_device_reroute__wasapi(pThis->pDevice, ma_device_type_playback); + + if (pThis->pDevice->wasapi.isDetachedPlayback) { + pThis->pDevice->wasapi.isDetachedPlayback = MA_FALSE; + + if (pThis->pDevice->type == ma_device_type_duplex && pThis->pDevice->wasapi.isDetachedCapture) { + restartDevice = MA_FALSE; /* It's a duplex device and the capture side is detached. We cannot be restarting the device just yet. */ + } + else { + restartDevice = MA_TRUE; /* It's not a duplex device, or the capture side is also attached so we can go ahead and restart the device. */ + } + } + } + else { + ma_device_reroute__wasapi(pThis->pDevice, (pThis->pDevice->type == ma_device_type_loopback) ? ma_device_type_loopback : ma_device_type_capture); + + if (pThis->pDevice->wasapi.isDetachedCapture) { + pThis->pDevice->wasapi.isDetachedCapture = MA_FALSE; + + if (pThis->pDevice->type == ma_device_type_duplex && pThis->pDevice->wasapi.isDetachedPlayback) { + restartDevice = MA_FALSE; /* It's a duplex device and the playback side is detached. We cannot be restarting the device just yet. */ + } + else { + restartDevice = MA_TRUE; /* It's not a duplex device, or the playback side is also attached so we can go ahead and restart the device. */ + } + } + } + } + ma_mutex_unlock(&pThis->pDevice->wasapi.rerouteLock); + + if (restartDevice) { + ma_device_start(pThis->pDevice); + } + } + } + + return S_OK; +} + +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnPropertyValueChanged(ma_IMMNotificationClient* pThis, const WCHAR* pDeviceID, const PROPERTYKEY key) +{ +#ifdef MA_DEBUG_OUTPUT + /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnPropertyValueChanged(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/ +#endif + + (void)pThis; + (void)pDeviceID; + (void)key; + return S_OK; +} + +static ma_IMMNotificationClientVtbl g_maNotificationCientVtbl = { + ma_IMMNotificationClient_QueryInterface, + ma_IMMNotificationClient_AddRef, + ma_IMMNotificationClient_Release, + ma_IMMNotificationClient_OnDeviceStateChanged, + ma_IMMNotificationClient_OnDeviceAdded, + ma_IMMNotificationClient_OnDeviceRemoved, + ma_IMMNotificationClient_OnDefaultDeviceChanged, + ma_IMMNotificationClient_OnPropertyValueChanged +}; +#endif /* MA_WIN32_DESKTOP */ + +static const char* ma_to_usage_string__wasapi(ma_wasapi_usage usage) +{ + switch (usage) + { + case ma_wasapi_usage_default: return NULL; + case ma_wasapi_usage_games: return "Games"; + case ma_wasapi_usage_pro_audio: return "Pro Audio"; + default: break; + } + + return NULL; +} + +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) +typedef ma_IMMDevice ma_WASAPIDeviceInterface; +#else +typedef ma_IUnknown ma_WASAPIDeviceInterface; +#endif + + +#define MA_CONTEXT_COMMAND_QUIT__WASAPI 1 +#define MA_CONTEXT_COMMAND_CREATE_IAUDIOCLIENT__WASAPI 2 +#define MA_CONTEXT_COMMAND_RELEASE_IAUDIOCLIENT__WASAPI 3 + +static ma_context_command__wasapi ma_context_init_command__wasapi(int code) +{ + ma_context_command__wasapi cmd; + + MA_ZERO_OBJECT(&cmd); + cmd.code = code; + + return cmd; +} + +static ma_result ma_context_post_command__wasapi(ma_context* pContext, const ma_context_command__wasapi* pCmd) +{ + /* For now we are doing everything synchronously, but I might relax this later if the need arises. */ + ma_result result; + ma_bool32 isUsingLocalEvent = MA_FALSE; + ma_event localEvent; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCmd != NULL); + + if (pCmd->pEvent == NULL) { + isUsingLocalEvent = MA_TRUE; + + result = ma_event_init(&localEvent); + if (result != MA_SUCCESS) { + return result; /* Failed to create the event for this command. */ + } + } + + /* Here is where we add the command to the list. If there's not enough room we'll spin until there is. */ + ma_mutex_lock(&pContext->wasapi.commandLock); + { + ma_uint32 index; + + /* Spin until we've got some space available. */ + while (pContext->wasapi.commandCount == ma_countof(pContext->wasapi.commands)) { + ma_yield(); + } + + /* Space is now available. Can safely add to the list. */ + index = (pContext->wasapi.commandIndex + pContext->wasapi.commandCount) % ma_countof(pContext->wasapi.commands); + pContext->wasapi.commands[index] = *pCmd; + pContext->wasapi.commands[index].pEvent = &localEvent; + pContext->wasapi.commandCount += 1; + + /* Now that the command has been added, release the semaphore so ma_context_next_command__wasapi() can return. */ + ma_semaphore_release(&pContext->wasapi.commandSem); + } + ma_mutex_unlock(&pContext->wasapi.commandLock); + + if (isUsingLocalEvent) { + ma_event_wait(&localEvent); + ma_event_uninit(&localEvent); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_next_command__wasapi(ma_context* pContext, ma_context_command__wasapi* pCmd) +{ + ma_result result = MA_SUCCESS; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCmd != NULL); + + result = ma_semaphore_wait(&pContext->wasapi.commandSem); + if (result == MA_SUCCESS) { + ma_mutex_lock(&pContext->wasapi.commandLock); + { + *pCmd = pContext->wasapi.commands[pContext->wasapi.commandIndex]; + pContext->wasapi.commandIndex = (pContext->wasapi.commandIndex + 1) % ma_countof(pContext->wasapi.commands); + pContext->wasapi.commandCount -= 1; + } + ma_mutex_unlock(&pContext->wasapi.commandLock); + } + + return result; +} + +static ma_thread_result MA_THREADCALL ma_context_command_thread__wasapi(void* pUserData) +{ + ma_result result; + ma_context* pContext = (ma_context*)pUserData; + MA_ASSERT(pContext != NULL); + + for (;;) { + ma_context_command__wasapi cmd; + result = ma_context_next_command__wasapi(pContext, &cmd); + if (result != MA_SUCCESS) { + break; + } + + switch (cmd.code) + { + case MA_CONTEXT_COMMAND_QUIT__WASAPI: + { + /* Do nothing. Handled after the switch. */ + } break; + + case MA_CONTEXT_COMMAND_CREATE_IAUDIOCLIENT__WASAPI: + { + if (cmd.data.createAudioClient.deviceType == ma_device_type_playback) { + *cmd.data.createAudioClient.pResult = ma_result_from_HRESULT(ma_IAudioClient_GetService((ma_IAudioClient*)cmd.data.createAudioClient.pAudioClient, &MA_IID_IAudioRenderClient, cmd.data.createAudioClient.ppAudioClientService)); + } else { + *cmd.data.createAudioClient.pResult = ma_result_from_HRESULT(ma_IAudioClient_GetService((ma_IAudioClient*)cmd.data.createAudioClient.pAudioClient, &MA_IID_IAudioCaptureClient, cmd.data.createAudioClient.ppAudioClientService)); + } + } break; + + case MA_CONTEXT_COMMAND_RELEASE_IAUDIOCLIENT__WASAPI: + { + if (cmd.data.releaseAudioClient.deviceType == ma_device_type_playback) { + if (cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientPlayback != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientPlayback); + cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientPlayback = NULL; + } + } + + if (cmd.data.releaseAudioClient.deviceType == ma_device_type_capture) { + if (cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientCapture != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientCapture); + cmd.data.releaseAudioClient.pDevice->wasapi.pAudioClientCapture = NULL; + } + } + } break; + + default: + { + /* Unknown command. Ignore it, but trigger an assert in debug mode so we're aware of it. */ + MA_ASSERT(MA_FALSE); + } break; + } + + if (cmd.pEvent != NULL) { + ma_event_signal(cmd.pEvent); + } + + if (cmd.code == MA_CONTEXT_COMMAND_QUIT__WASAPI) { + break; /* Received a quit message. Get out of here. */ + } + } + + return (ma_thread_result)0; +} + +static ma_result ma_device_create_IAudioClient_service__wasapi(ma_context* pContext, ma_device_type deviceType, ma_IAudioClient* pAudioClient, void** ppAudioClientService) +{ + ma_result result; + ma_result cmdResult; + ma_context_command__wasapi cmd = ma_context_init_command__wasapi(MA_CONTEXT_COMMAND_CREATE_IAUDIOCLIENT__WASAPI); + cmd.data.createAudioClient.deviceType = deviceType; + cmd.data.createAudioClient.pAudioClient = (void*)pAudioClient; + cmd.data.createAudioClient.ppAudioClientService = ppAudioClientService; + cmd.data.createAudioClient.pResult = &cmdResult; /* Declared locally, but won't be dereferenced after this function returns since execution of the command will wait here. */ + + result = ma_context_post_command__wasapi(pContext, &cmd); /* This will not return until the command has actually been run. */ + if (result != MA_SUCCESS) { + return result; + } + + return *cmd.data.createAudioClient.pResult; +} + +#if 0 /* Not used at the moment, but leaving here for future use. */ +static ma_result ma_device_release_IAudioClient_service__wasapi(ma_device* pDevice, ma_device_type deviceType) +{ + ma_result result; + ma_context_command__wasapi cmd = ma_context_init_command__wasapi(MA_CONTEXT_COMMAND_RELEASE_IAUDIOCLIENT__WASAPI); + cmd.data.releaseAudioClient.pDevice = pDevice; + cmd.data.releaseAudioClient.deviceType = deviceType; + + result = ma_context_post_command__wasapi(pDevice->pContext, &cmd); /* This will not return until the command has actually been run. */ + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} +#endif + + +static void ma_add_native_data_format_to_device_info_from_WAVEFORMATEX(const MA_WAVEFORMATEX* pWF, ma_share_mode shareMode, ma_device_info* pInfo) +{ + MA_ASSERT(pWF != NULL); + MA_ASSERT(pInfo != NULL); + + if (pInfo->nativeDataFormatCount >= ma_countof(pInfo->nativeDataFormats)) { + return; /* Too many data formats. Need to ignore this one. Don't think this should ever happen with WASAPI. */ + } + + pInfo->nativeDataFormats[pInfo->nativeDataFormatCount].format = ma_format_from_WAVEFORMATEX(pWF); + pInfo->nativeDataFormats[pInfo->nativeDataFormatCount].channels = pWF->nChannels; + pInfo->nativeDataFormats[pInfo->nativeDataFormatCount].sampleRate = pWF->nSamplesPerSec; + pInfo->nativeDataFormats[pInfo->nativeDataFormatCount].flags = (shareMode == ma_share_mode_exclusive) ? MA_DATA_FORMAT_FLAG_EXCLUSIVE_MODE : 0; + pInfo->nativeDataFormatCount += 1; +} + +static ma_result ma_context_get_device_info_from_IAudioClient__wasapi(ma_context* pContext, /*ma_IMMDevice**/void* pMMDevice, ma_IAudioClient* pAudioClient, ma_device_info* pInfo) +{ + HRESULT hr; + MA_WAVEFORMATEX* pWF = NULL; + + MA_ASSERT(pAudioClient != NULL); + MA_ASSERT(pInfo != NULL); + + /* Shared Mode. We use GetMixFormat() here. */ + hr = ma_IAudioClient_GetMixFormat((ma_IAudioClient*)pAudioClient, (MA_WAVEFORMATEX**)&pWF); + if (SUCCEEDED(hr)) { + ma_add_native_data_format_to_device_info_from_WAVEFORMATEX(pWF, ma_share_mode_shared, pInfo); + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve mix format for device info retrieval."); + return ma_result_from_HRESULT(hr); + } + + /* + Exclusive Mode. We repeatedly call IsFormatSupported() here. This is not currently supported on + UWP. Failure to retrieve the exclusive mode format is not considered an error, so from here on + out, MA_SUCCESS is guaranteed to be returned. + */ + #if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + { + ma_IPropertyStore *pProperties; + + /* + The first thing to do is get the format from PKEY_AudioEngine_DeviceFormat. This should give us a channel count we assume is + correct which will simplify our searching. + */ + hr = ma_IMMDevice_OpenPropertyStore((ma_IMMDevice*)pMMDevice, STGM_READ, &pProperties); + if (SUCCEEDED(hr)) { + MA_PROPVARIANT var; + ma_PropVariantInit(&var); + + hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_AudioEngine_DeviceFormat, &var); + if (SUCCEEDED(hr)) { + pWF = (MA_WAVEFORMATEX*)var.blob.pBlobData; + + /* + In my testing, the format returned by PKEY_AudioEngine_DeviceFormat is suitable for exclusive mode so we check this format + first. If this fails, fall back to a search. + */ + hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, pWF, NULL); + if (SUCCEEDED(hr)) { + /* The format returned by PKEY_AudioEngine_DeviceFormat is supported. */ + ma_add_native_data_format_to_device_info_from_WAVEFORMATEX(pWF, ma_share_mode_exclusive, pInfo); + } else { + /* + The format returned by PKEY_AudioEngine_DeviceFormat is not supported, so fall back to a search. We assume the channel + count returned by MA_PKEY_AudioEngine_DeviceFormat is valid and correct. For simplicity we're only returning one format. + */ + ma_uint32 channels = pWF->nChannels; + ma_channel defaultChannelMap[MA_MAX_CHANNELS]; + MA_WAVEFORMATEXTENSIBLE wf; + ma_bool32 found; + ma_uint32 iFormat; + + /* Make sure we don't overflow the channel map. */ + if (channels > MA_MAX_CHANNELS) { + channels = MA_MAX_CHANNELS; + } + + ma_channel_map_init_standard(ma_standard_channel_map_microsoft, defaultChannelMap, ma_countof(defaultChannelMap), channels); + + MA_ZERO_OBJECT(&wf); + wf.cbSize = sizeof(wf); + wf.wFormatTag = WAVE_FORMAT_EXTENSIBLE; + wf.nChannels = (WORD)channels; + wf.dwChannelMask = ma_channel_map_to_channel_mask__win32(defaultChannelMap, channels); + + found = MA_FALSE; + for (iFormat = 0; iFormat < ma_countof(g_maFormatPriorities); ++iFormat) { + ma_format format = g_maFormatPriorities[iFormat]; + ma_uint32 iSampleRate; + + wf.wBitsPerSample = (WORD)(ma_get_bytes_per_sample(format)*8); + wf.nBlockAlign = (WORD)(wf.nChannels * wf.wBitsPerSample / 8); + wf.nAvgBytesPerSec = wf.nBlockAlign * wf.nSamplesPerSec; + wf.Samples.wValidBitsPerSample = /*(format == ma_format_s24_32) ? 24 :*/ wf.wBitsPerSample; + if (format == ma_format_f32) { + wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; + } else { + wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM; + } + + for (iSampleRate = 0; iSampleRate < ma_countof(g_maStandardSampleRatePriorities); ++iSampleRate) { + wf.nSamplesPerSec = g_maStandardSampleRatePriorities[iSampleRate]; + + hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, (MA_WAVEFORMATEX*)&wf, NULL); + if (SUCCEEDED(hr)) { + ma_add_native_data_format_to_device_info_from_WAVEFORMATEX((MA_WAVEFORMATEX*)&wf, ma_share_mode_exclusive, pInfo); + found = MA_TRUE; + break; + } + } + + if (found) { + break; + } + } + + ma_PropVariantClear(pContext, &var); + + if (!found) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "[WASAPI] Failed to find suitable device format for device info retrieval."); + } + } + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "[WASAPI] Failed to retrieve device format for device info retrieval."); + } + + ma_IPropertyStore_Release(pProperties); + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "[WASAPI] Failed to open property store for device info retrieval."); + } + } + #else + { + (void)pMMDevice; /* Unused. */ + } + #endif + + return MA_SUCCESS; +} + +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) +static ma_EDataFlow ma_device_type_to_EDataFlow(ma_device_type deviceType) +{ + if (deviceType == ma_device_type_playback) { + return ma_eRender; + } else if (deviceType == ma_device_type_capture) { + return ma_eCapture; + } else { + MA_ASSERT(MA_FALSE); + return ma_eRender; /* Should never hit this. */ + } +} + +static ma_result ma_context_create_IMMDeviceEnumerator__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator** ppDeviceEnumerator) +{ + HRESULT hr; + ma_IMMDeviceEnumerator* pDeviceEnumerator; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppDeviceEnumerator != NULL); + + *ppDeviceEnumerator = NULL; /* Safety. */ + + hr = ma_CoCreateInstance(pContext, &MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, &MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator."); + return ma_result_from_HRESULT(hr); + } + + *ppDeviceEnumerator = pDeviceEnumerator; + + return MA_SUCCESS; +} + +static WCHAR* ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator* pDeviceEnumerator, ma_device_type deviceType) +{ + HRESULT hr; + ma_IMMDevice* pMMDefaultDevice = NULL; + WCHAR* pDefaultDeviceID = NULL; + ma_EDataFlow dataFlow; + ma_ERole role; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceEnumerator != NULL); + + (void)pContext; + + /* Grab the EDataFlow type from the device type. */ + dataFlow = ma_device_type_to_EDataFlow(deviceType); + + /* The role is always eConsole, but we may make this configurable later. */ + role = ma_eConsole; + + hr = ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(pDeviceEnumerator, dataFlow, role, &pMMDefaultDevice); + if (FAILED(hr)) { + return NULL; + } + + hr = ma_IMMDevice_GetId(pMMDefaultDevice, &pDefaultDeviceID); + + ma_IMMDevice_Release(pMMDefaultDevice); + pMMDefaultDevice = NULL; + + if (FAILED(hr)) { + return NULL; + } + + return pDefaultDeviceID; +} + +static WCHAR* ma_context_get_default_device_id__wasapi(ma_context* pContext, ma_device_type deviceType) /* Free the returned pointer with ma_CoTaskMemFree() */ +{ + ma_result result; + ma_IMMDeviceEnumerator* pDeviceEnumerator; + WCHAR* pDefaultDeviceID = NULL; + + MA_ASSERT(pContext != NULL); + + result = ma_context_create_IMMDeviceEnumerator__wasapi(pContext, &pDeviceEnumerator); + if (result != MA_SUCCESS) { + return NULL; + } + + pDefaultDeviceID = ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(pContext, pDeviceEnumerator, deviceType); + + ma_IMMDeviceEnumerator_Release(pDeviceEnumerator); + return pDefaultDeviceID; +} + +static ma_result ma_context_get_MMDevice__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IMMDevice** ppMMDevice) +{ + ma_IMMDeviceEnumerator* pDeviceEnumerator; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppMMDevice != NULL); + + /* + This weird COM init/uninit here is a hack to work around a crash when changing devices. What is happening is + WASAPI fires a callback from another thread when the device is changed. It's from that thread where this + function is getting called. What I'm suspecting is that the other thread is not initializing COM which in turn + results in CoCreateInstance() failing. + + The community has reported that this seems to fix the crash. There are future plans to move all WASAPI operation + over to a single thread to make everything safer, but in the meantime while we wait for that to come online I'm + happy enough to use this hack instead. + */ + ma_CoInitializeEx(pContext, NULL, MA_COINIT_VALUE); + { + hr = ma_CoCreateInstance(pContext, &MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, &MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); + } + ma_CoUninitialize(pContext); + + if (FAILED(hr)) { /* <-- This is checking the call above to ma_CoCreateInstance(). */ + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create IMMDeviceEnumerator.\n"); + return ma_result_from_HRESULT(hr); + } + + if (pDeviceID == NULL) { + hr = ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(pDeviceEnumerator, (deviceType == ma_device_type_capture) ? ma_eCapture : ma_eRender, ma_eConsole, ppMMDevice); + } else { + hr = ma_IMMDeviceEnumerator_GetDevice(pDeviceEnumerator, pDeviceID->wasapi, ppMMDevice); + } + + ma_IMMDeviceEnumerator_Release(pDeviceEnumerator); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve IMMDevice.\n"); + return ma_result_from_HRESULT(hr); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_id_from_MMDevice__wasapi(ma_context* pContext, ma_IMMDevice* pMMDevice, ma_device_id* pDeviceID) +{ + WCHAR* pDeviceIDString; + HRESULT hr; + + MA_ASSERT(pDeviceID != NULL); + + hr = ma_IMMDevice_GetId(pMMDevice, &pDeviceIDString); + if (SUCCEEDED(hr)) { + size_t idlen = ma_strlen_WCHAR(pDeviceIDString); + if (idlen+1 > ma_countof(pDeviceID->wasapi)) { + ma_CoTaskMemFree(pContext, pDeviceIDString); + MA_ASSERT(MA_FALSE); /* NOTE: If this is triggered, please report it. It means the format of the ID must have changed and is too long to fit in our fixed sized buffer. */ + return MA_ERROR; + } + + MA_COPY_MEMORY(pDeviceID->wasapi, pDeviceIDString, idlen * sizeof(wchar_t)); + pDeviceID->wasapi[idlen] = '\0'; + + ma_CoTaskMemFree(pContext, pDeviceIDString); + + return MA_SUCCESS; + } + + return MA_ERROR; +} + +static ma_result ma_context_get_device_info_from_MMDevice__wasapi(ma_context* pContext, ma_IMMDevice* pMMDevice, WCHAR* pDefaultDeviceID, ma_bool32 onlySimpleInfo, ma_device_info* pInfo) +{ + ma_result result; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pMMDevice != NULL); + MA_ASSERT(pInfo != NULL); + + /* ID. */ + result = ma_context_get_device_id_from_MMDevice__wasapi(pContext, pMMDevice, &pInfo->id); + if (result == MA_SUCCESS) { + if (pDefaultDeviceID != NULL) { + if (ma_strcmp_WCHAR(pInfo->id.wasapi, pDefaultDeviceID) == 0) { + pInfo->isDefault = MA_TRUE; + } + } + } + + /* Description / Friendly Name */ + { + ma_IPropertyStore *pProperties; + hr = ma_IMMDevice_OpenPropertyStore(pMMDevice, STGM_READ, &pProperties); + if (SUCCEEDED(hr)) { + MA_PROPVARIANT var; + + ma_PropVariantInit(&var); + hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_Device_FriendlyName, &var); + if (SUCCEEDED(hr)) { + WideCharToMultiByte(CP_UTF8, 0, var.pwszVal, -1, pInfo->name, sizeof(pInfo->name), 0, FALSE); + ma_PropVariantClear(pContext, &var); + } + + ma_IPropertyStore_Release(pProperties); + } + } + + /* Format */ + if (!onlySimpleInfo) { + ma_IAudioClient* pAudioClient; + hr = ma_IMMDevice_Activate(pMMDevice, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pAudioClient); + if (SUCCEEDED(hr)) { + result = ma_context_get_device_info_from_IAudioClient__wasapi(pContext, pMMDevice, pAudioClient, pInfo); + + ma_IAudioClient_Release(pAudioClient); + return result; + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to activate audio client for device info retrieval."); + return ma_result_from_HRESULT(hr); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_enumerate_devices_by_type__wasapi(ma_context* pContext, ma_IMMDeviceEnumerator* pDeviceEnumerator, ma_device_type deviceType, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_result result = MA_SUCCESS; + UINT deviceCount; + HRESULT hr; + ma_uint32 iDevice; + WCHAR* pDefaultDeviceID = NULL; + ma_IMMDeviceCollection* pDeviceCollection = NULL; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Grab the default device. We use this to know whether or not flag the returned device info as being the default. */ + pDefaultDeviceID = ma_context_get_default_device_id_from_IMMDeviceEnumerator__wasapi(pContext, pDeviceEnumerator, deviceType); + + /* We need to enumerate the devices which returns a device collection. */ + hr = ma_IMMDeviceEnumerator_EnumAudioEndpoints(pDeviceEnumerator, ma_device_type_to_EDataFlow(deviceType), MA_MM_DEVICE_STATE_ACTIVE, &pDeviceCollection); + if (SUCCEEDED(hr)) { + hr = ma_IMMDeviceCollection_GetCount(pDeviceCollection, &deviceCount); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to get device count.\n"); + result = ma_result_from_HRESULT(hr); + goto done; + } + + for (iDevice = 0; iDevice < deviceCount; ++iDevice) { + ma_device_info deviceInfo; + ma_IMMDevice* pMMDevice; + + MA_ZERO_OBJECT(&deviceInfo); + + hr = ma_IMMDeviceCollection_Item(pDeviceCollection, iDevice, &pMMDevice); + if (SUCCEEDED(hr)) { + result = ma_context_get_device_info_from_MMDevice__wasapi(pContext, pMMDevice, pDefaultDeviceID, MA_TRUE, &deviceInfo); /* MA_TRUE = onlySimpleInfo. */ + + ma_IMMDevice_Release(pMMDevice); + if (result == MA_SUCCESS) { + ma_bool32 cbResult = callback(pContext, deviceType, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + break; + } + } + } + } + } + +done: + if (pDefaultDeviceID != NULL) { + ma_CoTaskMemFree(pContext, pDefaultDeviceID); + pDefaultDeviceID = NULL; + } + + if (pDeviceCollection != NULL) { + ma_IMMDeviceCollection_Release(pDeviceCollection); + pDeviceCollection = NULL; + } + + return result; +} + +static ma_result ma_context_get_IAudioClient_Desktop__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, MA_PROPVARIANT* pActivationParams, ma_IAudioClient** ppAudioClient, ma_IMMDevice** ppMMDevice) +{ + ma_result result; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppAudioClient != NULL); + MA_ASSERT(ppMMDevice != NULL); + + result = ma_context_get_MMDevice__wasapi(pContext, deviceType, pDeviceID, ppMMDevice); + if (result != MA_SUCCESS) { + return result; + } + + hr = ma_IMMDevice_Activate(*ppMMDevice, &MA_IID_IAudioClient, CLSCTX_ALL, pActivationParams, (void**)ppAudioClient); + if (FAILED(hr)) { + return ma_result_from_HRESULT(hr); + } + + return MA_SUCCESS; +} +#else +static ma_result ma_context_get_IAudioClient_UWP__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, MA_PROPVARIANT* pActivationParams, ma_IAudioClient** ppAudioClient, ma_IUnknown** ppActivatedInterface) +{ + ma_IActivateAudioInterfaceAsyncOperation *pAsyncOp = NULL; + ma_completion_handler_uwp completionHandler; + IID iid; + WCHAR* iidStr; + HRESULT hr; + ma_result result; + HRESULT activateResult; + ma_IUnknown* pActivatedInterface; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppAudioClient != NULL); + + if (pDeviceID != NULL) { + iidStr = (WCHAR*)pDeviceID->wasapi; + } else { + if (deviceType == ma_device_type_capture) { + iid = MA_IID_DEVINTERFACE_AUDIO_CAPTURE; + } else { + iid = MA_IID_DEVINTERFACE_AUDIO_RENDER; + } + + #if defined(__cplusplus) + hr = StringFromIID(iid, &iidStr); + #else + hr = StringFromIID(&iid, &iidStr); + #endif + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to convert device IID to string for ActivateAudioInterfaceAsync(). Out of memory.\n"); + return ma_result_from_HRESULT(hr); + } + } + + result = ma_completion_handler_uwp_init(&completionHandler); + if (result != MA_SUCCESS) { + ma_CoTaskMemFree(pContext, iidStr); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for waiting for ActivateAudioInterfaceAsync().\n"); + return result; + } + + hr = ((MA_PFN_ActivateAudioInterfaceAsync)pContext->wasapi.ActivateAudioInterfaceAsync)(iidStr, &MA_IID_IAudioClient, pActivationParams, (ma_IActivateAudioInterfaceCompletionHandler*)&completionHandler, (ma_IActivateAudioInterfaceAsyncOperation**)&pAsyncOp); + if (FAILED(hr)) { + ma_completion_handler_uwp_uninit(&completionHandler); + ma_CoTaskMemFree(pContext, iidStr); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] ActivateAudioInterfaceAsync() failed.\n"); + return ma_result_from_HRESULT(hr); + } + + if (pDeviceID == NULL) { + ma_CoTaskMemFree(pContext, iidStr); + } + + /* Wait for the async operation for finish. */ + ma_completion_handler_uwp_wait(&completionHandler); + ma_completion_handler_uwp_uninit(&completionHandler); + + hr = ma_IActivateAudioInterfaceAsyncOperation_GetActivateResult(pAsyncOp, &activateResult, &pActivatedInterface); + ma_IActivateAudioInterfaceAsyncOperation_Release(pAsyncOp); + + if (FAILED(hr) || FAILED(activateResult)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to activate device.\n"); + return FAILED(hr) ? ma_result_from_HRESULT(hr) : ma_result_from_HRESULT(activateResult); + } + + /* Here is where we grab the IAudioClient interface. */ + hr = ma_IUnknown_QueryInterface(pActivatedInterface, &MA_IID_IAudioClient, (void**)ppAudioClient); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to query IAudioClient interface.\n"); + return ma_result_from_HRESULT(hr); + } + + if (ppActivatedInterface) { + *ppActivatedInterface = pActivatedInterface; + } else { + ma_IUnknown_Release(pActivatedInterface); + } + + return MA_SUCCESS; +} +#endif + + +/* https://docs.microsoft.com/en-us/windows/win32/api/audioclientactivationparams/ne-audioclientactivationparams-audioclient_activation_type */ +typedef enum +{ + MA_AUDIOCLIENT_ACTIVATION_TYPE_DEFAULT, + MA_AUDIOCLIENT_ACTIVATION_TYPE_PROCESS_LOOPBACK +} MA_AUDIOCLIENT_ACTIVATION_TYPE; + +/* https://docs.microsoft.com/en-us/windows/win32/api/audioclientactivationparams/ne-audioclientactivationparams-process_loopback_mode */ +typedef enum +{ + MA_PROCESS_LOOPBACK_MODE_INCLUDE_TARGET_PROCESS_TREE, + MA_PROCESS_LOOPBACK_MODE_EXCLUDE_TARGET_PROCESS_TREE +} MA_PROCESS_LOOPBACK_MODE; + +/* https://docs.microsoft.com/en-us/windows/win32/api/audioclientactivationparams/ns-audioclientactivationparams-audioclient_process_loopback_params */ +typedef struct +{ + DWORD TargetProcessId; + MA_PROCESS_LOOPBACK_MODE ProcessLoopbackMode; +} MA_AUDIOCLIENT_PROCESS_LOOPBACK_PARAMS; + +#if defined(_MSC_VER) && !defined(__clang__) + #pragma warning(push) + #pragma warning(disable:4201) /* nonstandard extension used: nameless struct/union */ +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wpedantic" /* For ISO C99 doesn't support unnamed structs/unions [-Wpedantic] */ + #if defined(__clang__) + #pragma GCC diagnostic ignored "-Wc11-extensions" /* anonymous unions are a C11 extension */ + #endif +#endif +/* https://docs.microsoft.com/en-us/windows/win32/api/audioclientactivationparams/ns-audioclientactivationparams-audioclient_activation_params */ +typedef struct +{ + MA_AUDIOCLIENT_ACTIVATION_TYPE ActivationType; + union + { + MA_AUDIOCLIENT_PROCESS_LOOPBACK_PARAMS ProcessLoopbackParams; + }; +} MA_AUDIOCLIENT_ACTIVATION_PARAMS; +#if defined(_MSC_VER) && !defined(__clang__) + #pragma warning(pop) +#elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) + #pragma GCC diagnostic pop +#endif + +#define MA_VIRTUAL_AUDIO_DEVICE_PROCESS_LOOPBACK L"VAD\\Process_Loopback" + +static ma_result ma_context_get_IAudioClient__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_uint32 loopbackProcessID, ma_bool32 loopbackProcessExclude, ma_IAudioClient** ppAudioClient, ma_WASAPIDeviceInterface** ppDeviceInterface) +{ + ma_result result; + ma_bool32 usingProcessLoopback = MA_FALSE; + MA_AUDIOCLIENT_ACTIVATION_PARAMS audioclientActivationParams; + MA_PROPVARIANT activationParams; + MA_PROPVARIANT* pActivationParams = NULL; + ma_device_id virtualDeviceID; + + /* Activation parameters specific to loopback mode. Note that process-specific loopback will only work when a default device ID is specified. */ + if (deviceType == ma_device_type_loopback && loopbackProcessID != 0 && pDeviceID == NULL) { + usingProcessLoopback = MA_TRUE; + } + + if (usingProcessLoopback) { + MA_ZERO_OBJECT(&audioclientActivationParams); + audioclientActivationParams.ActivationType = MA_AUDIOCLIENT_ACTIVATION_TYPE_PROCESS_LOOPBACK; + audioclientActivationParams.ProcessLoopbackParams.ProcessLoopbackMode = (loopbackProcessExclude) ? MA_PROCESS_LOOPBACK_MODE_EXCLUDE_TARGET_PROCESS_TREE : MA_PROCESS_LOOPBACK_MODE_INCLUDE_TARGET_PROCESS_TREE; + audioclientActivationParams.ProcessLoopbackParams.TargetProcessId = (DWORD)loopbackProcessID; + + ma_PropVariantInit(&activationParams); + activationParams.vt = MA_VT_BLOB; + activationParams.blob.cbSize = sizeof(audioclientActivationParams); + activationParams.blob.pBlobData = (BYTE*)&audioclientActivationParams; + pActivationParams = &activationParams; + + /* When requesting a specific device ID we need to use a special device ID. */ + MA_COPY_MEMORY(virtualDeviceID.wasapi, MA_VIRTUAL_AUDIO_DEVICE_PROCESS_LOOPBACK, (wcslen(MA_VIRTUAL_AUDIO_DEVICE_PROCESS_LOOPBACK) + 1) * sizeof(wchar_t)); /* +1 for the null terminator. */ + pDeviceID = &virtualDeviceID; + } else { + pActivationParams = NULL; /* No activation parameters required. */ + } + +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + result = ma_context_get_IAudioClient_Desktop__wasapi(pContext, deviceType, pDeviceID, pActivationParams, ppAudioClient, ppDeviceInterface); +#else + result = ma_context_get_IAudioClient_UWP__wasapi(pContext, deviceType, pDeviceID, pActivationParams, ppAudioClient, ppDeviceInterface); +#endif + + /* + If loopback mode was requested with a process ID and initialization failed, it could be because it's + trying to run on an older version of Windows where it's not supported. We need to let the caller + know about this with a log message. + */ + if (result != MA_SUCCESS) { + if (usingProcessLoopback) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Loopback mode requested to %s process ID %u, but initialization failed. Support for this feature begins with Windows 10 Build 20348. Confirm your version of Windows or consider not using process-specific loopback.\n", (loopbackProcessExclude) ? "exclude" : "include", loopbackProcessID); + } + } + + return result; +} + + +static ma_result ma_context_enumerate_devices__wasapi(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + /* Different enumeration for desktop and UWP. */ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + /* Desktop */ + HRESULT hr; + ma_IMMDeviceEnumerator* pDeviceEnumerator; + + hr = ma_CoCreateInstance(pContext, &MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, &MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator."); + return ma_result_from_HRESULT(hr); + } + + ma_context_enumerate_devices_by_type__wasapi(pContext, pDeviceEnumerator, ma_device_type_playback, callback, pUserData); + ma_context_enumerate_devices_by_type__wasapi(pContext, pDeviceEnumerator, ma_device_type_capture, callback, pUserData); + + ma_IMMDeviceEnumerator_Release(pDeviceEnumerator); +#else + /* + UWP + + The MMDevice API is only supported on desktop applications. For now, while I'm still figuring out how to properly enumerate + over devices without using MMDevice, I'm restricting devices to defaults. + + Hint: DeviceInformation::FindAllAsync() with DeviceClass.AudioCapture/AudioRender. https://blogs.windows.com/buildingapps/2014/05/15/real-time-audio-in-windows-store-and-windows-phone-apps/ + */ + if (callback) { + ma_bool32 cbResult = MA_TRUE; + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + deviceInfo.isDefault = MA_TRUE; + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + deviceInfo.isDefault = MA_TRUE; + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + } +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + ma_result result; + ma_IMMDevice* pMMDevice = NULL; + WCHAR* pDefaultDeviceID = NULL; + + result = ma_context_get_MMDevice__wasapi(pContext, deviceType, pDeviceID, &pMMDevice); + if (result != MA_SUCCESS) { + return result; + } + + /* We need the default device ID so we can set the isDefault flag in the device info. */ + pDefaultDeviceID = ma_context_get_default_device_id__wasapi(pContext, deviceType); + + result = ma_context_get_device_info_from_MMDevice__wasapi(pContext, pMMDevice, pDefaultDeviceID, MA_FALSE, pDeviceInfo); /* MA_FALSE = !onlySimpleInfo. */ + + if (pDefaultDeviceID != NULL) { + ma_CoTaskMemFree(pContext, pDefaultDeviceID); + pDefaultDeviceID = NULL; + } + + ma_IMMDevice_Release(pMMDevice); + + return result; +#else + ma_IAudioClient* pAudioClient; + ma_result result; + + /* UWP currently only uses default devices. */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + result = ma_context_get_IAudioClient_UWP__wasapi(pContext, deviceType, pDeviceID, NULL, &pAudioClient, NULL); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_context_get_device_info_from_IAudioClient__wasapi(pContext, NULL, pAudioClient, pDeviceInfo); + + pDeviceInfo->isDefault = MA_TRUE; /* UWP only supports default devices. */ + + ma_IAudioClient_Release(pAudioClient); + return result; +#endif +} + +static ma_result ma_device_uninit__wasapi(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + #if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + { + if (pDevice->wasapi.pDeviceEnumerator) { + ((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator)->lpVtbl->UnregisterEndpointNotificationCallback((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator, &pDevice->wasapi.notificationClient); + ma_IMMDeviceEnumerator_Release((ma_IMMDeviceEnumerator*)pDevice->wasapi.pDeviceEnumerator); + } + + ma_mutex_uninit(&pDevice->wasapi.rerouteLock); + } + #endif + + if (pDevice->wasapi.pRenderClient) { + if (pDevice->wasapi.pMappedBufferPlayback != NULL) { + ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, pDevice->wasapi.mappedBufferPlaybackCap, 0); + pDevice->wasapi.pMappedBufferPlayback = NULL; + pDevice->wasapi.mappedBufferPlaybackCap = 0; + pDevice->wasapi.mappedBufferPlaybackLen = 0; + } + + ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient); + } + if (pDevice->wasapi.pCaptureClient) { + if (pDevice->wasapi.pMappedBufferCapture != NULL) { + ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap); + pDevice->wasapi.pMappedBufferCapture = NULL; + pDevice->wasapi.mappedBufferCaptureCap = 0; + pDevice->wasapi.mappedBufferCaptureLen = 0; + } + + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + } + + if (pDevice->wasapi.pAudioClientPlayback) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + } + if (pDevice->wasapi.pAudioClientCapture) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + } + + if (pDevice->wasapi.hEventPlayback) { + CloseHandle((HANDLE)pDevice->wasapi.hEventPlayback); + } + if (pDevice->wasapi.hEventCapture) { + CloseHandle((HANDLE)pDevice->wasapi.hEventCapture); + } + + return MA_SUCCESS; +} + + +typedef struct +{ + /* Input. */ + ma_format formatIn; + ma_uint32 channelsIn; + ma_uint32 sampleRateIn; + ma_channel channelMapIn[MA_MAX_CHANNELS]; + ma_uint32 periodSizeInFramesIn; + ma_uint32 periodSizeInMillisecondsIn; + ma_uint32 periodsIn; + ma_share_mode shareMode; + ma_performance_profile performanceProfile; + ma_bool32 noAutoConvertSRC; + ma_bool32 noDefaultQualitySRC; + ma_bool32 noHardwareOffloading; + ma_uint32 loopbackProcessID; + ma_bool32 loopbackProcessExclude; + + /* Output. */ + ma_IAudioClient* pAudioClient; + ma_IAudioRenderClient* pRenderClient; + ma_IAudioCaptureClient* pCaptureClient; + ma_format formatOut; + ma_uint32 channelsOut; + ma_uint32 sampleRateOut; + ma_channel channelMapOut[MA_MAX_CHANNELS]; + ma_uint32 periodSizeInFramesOut; + ma_uint32 periodsOut; + ma_bool32 usingAudioClient3; + char deviceName[256]; + ma_device_id id; +} ma_device_init_internal_data__wasapi; + +static ma_result ma_device_init_internal__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__wasapi* pData) +{ + HRESULT hr; + ma_result result = MA_SUCCESS; + const char* errorMsg = ""; + MA_AUDCLNT_SHAREMODE shareMode = MA_AUDCLNT_SHAREMODE_SHARED; + DWORD streamFlags = 0; + MA_REFERENCE_TIME periodDurationInMicroseconds; + ma_bool32 wasInitializedUsingIAudioClient3 = MA_FALSE; + MA_WAVEFORMATEXTENSIBLE wf; + ma_WASAPIDeviceInterface* pDeviceInterface = NULL; + ma_IAudioClient2* pAudioClient2; + ma_uint32 nativeSampleRate; + ma_bool32 usingProcessLoopback = MA_FALSE; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pData != NULL); + + /* This function is only used to initialize one device type: either playback, capture or loopback. Never full-duplex. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + usingProcessLoopback = deviceType == ma_device_type_loopback && pData->loopbackProcessID != 0 && pDeviceID == NULL; + + pData->pAudioClient = NULL; + pData->pRenderClient = NULL; + pData->pCaptureClient = NULL; + + streamFlags = MA_AUDCLNT_STREAMFLAGS_EVENTCALLBACK; + if (!pData->noAutoConvertSRC && pData->sampleRateIn != 0 && pData->shareMode != ma_share_mode_exclusive) { /* <-- Exclusive streams must use the native sample rate. */ + streamFlags |= MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM; + } + if (!pData->noDefaultQualitySRC && pData->sampleRateIn != 0 && (streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) != 0) { + streamFlags |= MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY; + } + if (deviceType == ma_device_type_loopback) { + streamFlags |= MA_AUDCLNT_STREAMFLAGS_LOOPBACK; + } + + result = ma_context_get_IAudioClient__wasapi(pContext, deviceType, pDeviceID, pData->loopbackProcessID, pData->loopbackProcessExclude, &pData->pAudioClient, &pDeviceInterface); + if (result != MA_SUCCESS) { + goto done; + } + + MA_ZERO_OBJECT(&wf); + + /* Try enabling hardware offloading. */ + if (!pData->noHardwareOffloading) { + hr = ma_IAudioClient_QueryInterface(pData->pAudioClient, &MA_IID_IAudioClient2, (void**)&pAudioClient2); + if (SUCCEEDED(hr)) { + BOOL isHardwareOffloadingSupported = 0; + hr = ma_IAudioClient2_IsOffloadCapable(pAudioClient2, MA_AudioCategory_Other, &isHardwareOffloadingSupported); + if (SUCCEEDED(hr) && isHardwareOffloadingSupported) { + ma_AudioClientProperties clientProperties; + MA_ZERO_OBJECT(&clientProperties); + clientProperties.cbSize = sizeof(clientProperties); + clientProperties.bIsOffload = 1; + clientProperties.eCategory = MA_AudioCategory_Other; + ma_IAudioClient2_SetClientProperties(pAudioClient2, &clientProperties); + } + + pAudioClient2->lpVtbl->Release(pAudioClient2); + } + } + + /* Here is where we try to determine the best format to use with the device. If the client if wanting exclusive mode, first try finding the best format for that. If this fails, fall back to shared mode. */ + result = MA_FORMAT_NOT_SUPPORTED; + if (pData->shareMode == ma_share_mode_exclusive) { + #if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + /* In exclusive mode on desktop we always use the backend's native format. */ + ma_IPropertyStore* pStore = NULL; + hr = ma_IMMDevice_OpenPropertyStore(pDeviceInterface, STGM_READ, &pStore); + if (SUCCEEDED(hr)) { + MA_PROPVARIANT prop; + ma_PropVariantInit(&prop); + hr = ma_IPropertyStore_GetValue(pStore, &MA_PKEY_AudioEngine_DeviceFormat, &prop); + if (SUCCEEDED(hr)) { + MA_WAVEFORMATEX* pActualFormat = (MA_WAVEFORMATEX*)prop.blob.pBlobData; + hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pData->pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, pActualFormat, NULL); + if (SUCCEEDED(hr)) { + MA_COPY_MEMORY(&wf, pActualFormat, sizeof(MA_WAVEFORMATEXTENSIBLE)); + } + + ma_PropVariantClear(pContext, &prop); + } + + ma_IPropertyStore_Release(pStore); + } + #else + /* + I do not know how to query the device's native format on UWP so for now I'm just disabling support for + exclusive mode. The alternative is to enumerate over different formats and check IsFormatSupported() + until you find one that works. + + TODO: Add support for exclusive mode to UWP. + */ + hr = S_FALSE; + #endif + + if (hr == S_OK) { + shareMode = MA_AUDCLNT_SHAREMODE_EXCLUSIVE; + result = MA_SUCCESS; + } else { + result = MA_SHARE_MODE_NOT_SUPPORTED; + } + } else { + /* In shared mode we are always using the format reported by the operating system. */ + MA_WAVEFORMATEXTENSIBLE* pNativeFormat = NULL; + hr = ma_IAudioClient_GetMixFormat((ma_IAudioClient*)pData->pAudioClient, (MA_WAVEFORMATEX**)&pNativeFormat); + if (hr != S_OK) { + /* When using process-specific loopback, GetMixFormat() seems to always fail. */ + if (usingProcessLoopback) { + wf.wFormatTag = WAVE_FORMAT_IEEE_FLOAT; + wf.nChannels = 2; + wf.nSamplesPerSec = 44100; + wf.wBitsPerSample = 32; + wf.nBlockAlign = wf.nChannels * wf.wBitsPerSample / 8; + wf.nAvgBytesPerSec = wf.nSamplesPerSec * wf.nBlockAlign; + wf.cbSize = sizeof(MA_WAVEFORMATEX); + + result = MA_SUCCESS; + } else { + result = MA_FORMAT_NOT_SUPPORTED; + } + } else { + /* + I've seen cases where cbSize will be set to sizeof(WAVEFORMATEX) even though the structure itself + is given the format tag of WAVE_FORMAT_EXTENSIBLE. If the format tag is WAVE_FORMAT_EXTENSIBLE + want to make sure we copy the whole WAVEFORMATEXTENSIBLE structure. Otherwise we'll have to be + safe and only copy the WAVEFORMATEX part. + */ + if (pNativeFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { + MA_COPY_MEMORY(&wf, pNativeFormat, sizeof(MA_WAVEFORMATEXTENSIBLE)); + } else { + /* I've seen a case where cbSize was set to 0. Assume sizeof(WAVEFORMATEX) in this case. */ + size_t cbSize = pNativeFormat->cbSize; + if (cbSize == 0) { + cbSize = sizeof(MA_WAVEFORMATEX); + } + + /* Make sure we don't copy more than the capacity of `wf`. */ + if (cbSize > sizeof(wf)) { + cbSize = sizeof(wf); + } + + MA_COPY_MEMORY(&wf, pNativeFormat, cbSize); + } + + result = MA_SUCCESS; + } + + ma_CoTaskMemFree(pContext, pNativeFormat); + + shareMode = MA_AUDCLNT_SHAREMODE_SHARED; + } + + /* Return an error if we still haven't found a format. */ + if (result != MA_SUCCESS) { + errorMsg = "[WASAPI] Failed to find best device mix format."; + goto done; + } + + /* + Override the native sample rate with the one requested by the caller, but only if we're not using the default sample rate. We'll use + WASAPI to perform the sample rate conversion. + */ + nativeSampleRate = wf.nSamplesPerSec; + if (streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) { + wf.nSamplesPerSec = (pData->sampleRateIn != 0) ? pData->sampleRateIn : MA_DEFAULT_SAMPLE_RATE; + wf.nAvgBytesPerSec = wf.nSamplesPerSec * wf.nBlockAlign; + } + + pData->formatOut = ma_format_from_WAVEFORMATEX((MA_WAVEFORMATEX*)&wf); + if (pData->formatOut == ma_format_unknown) { + /* + The format isn't supported. This is almost certainly because the exclusive mode format isn't supported by miniaudio. We need to return MA_SHARE_MODE_NOT_SUPPORTED + in this case so that the caller can detect it and fall back to shared mode if desired. We should never get here if shared mode was requested, but just for + completeness we'll check for it and return MA_FORMAT_NOT_SUPPORTED. + */ + if (shareMode == MA_AUDCLNT_SHAREMODE_EXCLUSIVE) { + result = MA_SHARE_MODE_NOT_SUPPORTED; + } else { + result = MA_FORMAT_NOT_SUPPORTED; + } + + errorMsg = "[WASAPI] Native format not supported."; + goto done; + } + + pData->channelsOut = wf.nChannels; + pData->sampleRateOut = wf.nSamplesPerSec; + + /* + Get the internal channel map based on the channel mask. There is a possibility that GetMixFormat() returns + a WAVEFORMATEX instead of a WAVEFORMATEXTENSIBLE, in which case the channel mask will be undefined. In this + case we'll just use the default channel map. + */ + if (wf.wFormatTag == WAVE_FORMAT_EXTENSIBLE || wf.cbSize >= sizeof(MA_WAVEFORMATEXTENSIBLE)) { + ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pData->channelsOut, pData->channelMapOut); + } else { + ma_channel_map_init_standard(ma_standard_channel_map_microsoft, pData->channelMapOut, ma_countof(pData->channelMapOut), pData->channelsOut); + } + + /* Period size. */ + pData->periodsOut = (pData->periodsIn != 0) ? pData->periodsIn : MA_DEFAULT_PERIODS; + pData->periodSizeInFramesOut = pData->periodSizeInFramesIn; + if (pData->periodSizeInFramesOut == 0) { + if (pData->periodSizeInMillisecondsIn == 0) { + if (pData->performanceProfile == ma_performance_profile_low_latency) { + pData->periodSizeInFramesOut = ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY, wf.nSamplesPerSec); + } else { + pData->periodSizeInFramesOut = ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE, wf.nSamplesPerSec); + } + } else { + pData->periodSizeInFramesOut = ma_calculate_buffer_size_in_frames_from_milliseconds(pData->periodSizeInMillisecondsIn, wf.nSamplesPerSec); + } + } + + periodDurationInMicroseconds = ((ma_uint64)pData->periodSizeInFramesOut * 1000 * 1000) / wf.nSamplesPerSec; + + + /* Slightly different initialization for shared and exclusive modes. We try exclusive mode first, and if it fails, fall back to shared mode. */ + if (shareMode == MA_AUDCLNT_SHAREMODE_EXCLUSIVE) { + MA_REFERENCE_TIME bufferDuration = periodDurationInMicroseconds * pData->periodsOut * 10; + + /* + If the periodicity is too small, Initialize() will fail with AUDCLNT_E_INVALID_DEVICE_PERIOD. In this case we should just keep increasing + it and trying it again. + */ + hr = E_FAIL; + for (;;) { + hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, bufferDuration, (MA_WAVEFORMATEX*)&wf, NULL); + if (hr == MA_AUDCLNT_E_INVALID_DEVICE_PERIOD) { + if (bufferDuration > 500*10000) { + break; + } else { + if (bufferDuration == 0) { /* <-- Just a sanity check to prevent an infinite loop. Should never happen, but it makes me feel better. */ + break; + } + + bufferDuration = bufferDuration * 2; + continue; + } + } else { + break; + } + } + + if (hr == MA_AUDCLNT_E_BUFFER_SIZE_NOT_ALIGNED) { + ma_uint32 bufferSizeInFrames; + hr = ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pData->pAudioClient, &bufferSizeInFrames); + if (SUCCEEDED(hr)) { + bufferDuration = (MA_REFERENCE_TIME)((10000.0 * 1000 / wf.nSamplesPerSec * bufferSizeInFrames) + 0.5); + + /* Unfortunately we need to release and re-acquire the audio client according to MSDN. Seems silly - why not just call IAudioClient_Initialize() again?! */ + ma_IAudioClient_Release((ma_IAudioClient*)pData->pAudioClient); + + #if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + hr = ma_IMMDevice_Activate(pDeviceInterface, &MA_IID_IAudioClient, CLSCTX_ALL, NULL, (void**)&pData->pAudioClient); + #else + hr = ma_IUnknown_QueryInterface(pDeviceInterface, &MA_IID_IAudioClient, (void**)&pData->pAudioClient); + #endif + + if (SUCCEEDED(hr)) { + hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, bufferDuration, (MA_WAVEFORMATEX*)&wf, NULL); + } + } + } + + if (FAILED(hr)) { + /* Failed to initialize in exclusive mode. Don't fall back to shared mode - instead tell the client about it. They can reinitialize in shared mode if they want. */ + if (hr == E_ACCESSDENIED) { + errorMsg = "[WASAPI] Failed to initialize device in exclusive mode. Access denied.", result = MA_ACCESS_DENIED; + } else if (hr == MA_AUDCLNT_E_DEVICE_IN_USE) { + errorMsg = "[WASAPI] Failed to initialize device in exclusive mode. Device in use.", result = MA_BUSY; + } else { + errorMsg = "[WASAPI] Failed to initialize device in exclusive mode."; result = ma_result_from_HRESULT(hr); + } + goto done; + } + } + + if (shareMode == MA_AUDCLNT_SHAREMODE_SHARED) { + /* + Low latency shared mode via IAudioClient3. + + NOTE + ==== + Contrary to the documentation on MSDN (https://docs.microsoft.com/en-us/windows/win32/api/audioclient/nf-audioclient-iaudioclient3-initializesharedaudiostream), the + use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM and AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY with IAudioClient3_InitializeSharedAudioStream() absolutely does not work. Using + any of these flags will result in HRESULT code 0x88890021. The other problem is that calling IAudioClient3_GetSharedModeEnginePeriod() with a sample rate different to + that returned by IAudioClient_GetMixFormat() also results in an error. I'm therefore disabling low-latency shared mode with AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM. + */ + #ifndef MA_WASAPI_NO_LOW_LATENCY_SHARED_MODE + { + if ((streamFlags & MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM) == 0 || nativeSampleRate == wf.nSamplesPerSec) { + ma_IAudioClient3* pAudioClient3 = NULL; + hr = ma_IAudioClient_QueryInterface(pData->pAudioClient, &MA_IID_IAudioClient3, (void**)&pAudioClient3); + if (SUCCEEDED(hr)) { + ma_uint32 defaultPeriodInFrames; + ma_uint32 fundamentalPeriodInFrames; + ma_uint32 minPeriodInFrames; + ma_uint32 maxPeriodInFrames; + hr = ma_IAudioClient3_GetSharedModeEnginePeriod(pAudioClient3, (MA_WAVEFORMATEX*)&wf, &defaultPeriodInFrames, &fundamentalPeriodInFrames, &minPeriodInFrames, &maxPeriodInFrames); + if (SUCCEEDED(hr)) { + ma_uint32 desiredPeriodInFrames = pData->periodSizeInFramesOut; + ma_uint32 actualPeriodInFrames = desiredPeriodInFrames; + + /* Make sure the period size is a multiple of fundamentalPeriodInFrames. */ + actualPeriodInFrames = actualPeriodInFrames / fundamentalPeriodInFrames; + actualPeriodInFrames = actualPeriodInFrames * fundamentalPeriodInFrames; + + /* The period needs to be clamped between minPeriodInFrames and maxPeriodInFrames. */ + actualPeriodInFrames = ma_clamp(actualPeriodInFrames, minPeriodInFrames, maxPeriodInFrames); + + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] Trying IAudioClient3_InitializeSharedAudioStream(actualPeriodInFrames=%d)\n", actualPeriodInFrames); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " defaultPeriodInFrames=%d\n", defaultPeriodInFrames); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " fundamentalPeriodInFrames=%d\n", fundamentalPeriodInFrames); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " minPeriodInFrames=%d\n", minPeriodInFrames); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " maxPeriodInFrames=%d\n", maxPeriodInFrames); + + /* If the client requested a largish buffer than we don't actually want to use low latency shared mode because it forces small buffers. */ + if (actualPeriodInFrames >= desiredPeriodInFrames) { + /* + MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY must not be in the stream flags. If either of these are specified, + IAudioClient3_InitializeSharedAudioStream() will fail. + */ + hr = ma_IAudioClient3_InitializeSharedAudioStream(pAudioClient3, streamFlags & ~(MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY), actualPeriodInFrames, (MA_WAVEFORMATEX*)&wf, NULL); + if (SUCCEEDED(hr)) { + wasInitializedUsingIAudioClient3 = MA_TRUE; + pData->periodSizeInFramesOut = actualPeriodInFrames; + + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] Using IAudioClient3\n"); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " periodSizeInFramesOut=%d\n", pData->periodSizeInFramesOut); + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] IAudioClient3_InitializeSharedAudioStream failed. Falling back to IAudioClient.\n"); + } + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] Not using IAudioClient3 because the desired period size is larger than the maximum supported by IAudioClient3.\n"); + } + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] IAudioClient3_GetSharedModeEnginePeriod failed. Falling back to IAudioClient.\n"); + } + + ma_IAudioClient3_Release(pAudioClient3); + pAudioClient3 = NULL; + } + } + } + #else + { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[WASAPI] Not using IAudioClient3 because MA_WASAPI_NO_LOW_LATENCY_SHARED_MODE is enabled.\n"); + } + #endif + + /* If we don't have an IAudioClient3 then we need to use the normal initialization routine. */ + if (!wasInitializedUsingIAudioClient3) { + MA_REFERENCE_TIME bufferDuration = periodDurationInMicroseconds * pData->periodsOut * 10; /* <-- Multiply by 10 for microseconds to 100-nanoseconds. */ + hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, 0, (const MA_WAVEFORMATEX*)&wf, NULL); + if (FAILED(hr)) { + if (hr == E_ACCESSDENIED) { + errorMsg = "[WASAPI] Failed to initialize device. Access denied.", result = MA_ACCESS_DENIED; + } else if (hr == MA_AUDCLNT_E_DEVICE_IN_USE) { + errorMsg = "[WASAPI] Failed to initialize device. Device in use.", result = MA_BUSY; + } else { + errorMsg = "[WASAPI] Failed to initialize device.", result = ma_result_from_HRESULT(hr); + } + + goto done; + } + } + } + + if (!wasInitializedUsingIAudioClient3) { + ma_uint32 bufferSizeInFrames = 0; + hr = ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pData->pAudioClient, &bufferSizeInFrames); + if (FAILED(hr)) { + errorMsg = "[WASAPI] Failed to get audio client's actual buffer size.", result = ma_result_from_HRESULT(hr); + goto done; + } + + /* + When using process loopback mode, retrieval of the buffer size seems to result in totally + incorrect values. In this case we'll just assume it's the same size as what we requested + when we initialized the client. + */ + if (usingProcessLoopback) { + bufferSizeInFrames = (ma_uint32)((periodDurationInMicroseconds * pData->periodsOut) * pData->sampleRateOut / 1000000); + } + + pData->periodSizeInFramesOut = bufferSizeInFrames / pData->periodsOut; + } + + pData->usingAudioClient3 = wasInitializedUsingIAudioClient3; + + + if (deviceType == ma_device_type_playback) { + result = ma_device_create_IAudioClient_service__wasapi(pContext, deviceType, (ma_IAudioClient*)pData->pAudioClient, (void**)&pData->pRenderClient); + } else { + result = ma_device_create_IAudioClient_service__wasapi(pContext, deviceType, (ma_IAudioClient*)pData->pAudioClient, (void**)&pData->pCaptureClient); + } + + /*if (FAILED(hr)) {*/ + if (result != MA_SUCCESS) { + errorMsg = "[WASAPI] Failed to get audio client service."; + goto done; + } + + + /* Grab the name of the device. */ + #if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + { + ma_IPropertyStore *pProperties; + hr = ma_IMMDevice_OpenPropertyStore(pDeviceInterface, STGM_READ, &pProperties); + if (SUCCEEDED(hr)) { + MA_PROPVARIANT varName; + ma_PropVariantInit(&varName); + hr = ma_IPropertyStore_GetValue(pProperties, &MA_PKEY_Device_FriendlyName, &varName); + if (SUCCEEDED(hr)) { + WideCharToMultiByte(CP_UTF8, 0, varName.pwszVal, -1, pData->deviceName, sizeof(pData->deviceName), 0, FALSE); + ma_PropVariantClear(pContext, &varName); + } + + ma_IPropertyStore_Release(pProperties); + } + } + #endif + + /* + For the WASAPI backend we need to know the actual IDs of the device in order to do automatic + stream routing so that IDs can be compared and we can determine which device has been detached + and whether or not it matches with our ma_device. + */ + #if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + { + /* Desktop */ + ma_context_get_device_id_from_MMDevice__wasapi(pContext, pDeviceInterface, &pData->id); + } + #else + { + /* UWP */ + /* TODO: Implement me. Need to figure out how to get the ID of the default device. */ + } + #endif + +done: + /* Clean up. */ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + if (pDeviceInterface != NULL) { + ma_IMMDevice_Release(pDeviceInterface); + } +#else + if (pDeviceInterface != NULL) { + ma_IUnknown_Release(pDeviceInterface); + } +#endif + + if (result != MA_SUCCESS) { + if (pData->pRenderClient) { + ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pData->pRenderClient); + pData->pRenderClient = NULL; + } + if (pData->pCaptureClient) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pData->pCaptureClient); + pData->pCaptureClient = NULL; + } + if (pData->pAudioClient) { + ma_IAudioClient_Release((ma_IAudioClient*)pData->pAudioClient); + pData->pAudioClient = NULL; + } + + if (errorMsg != NULL && errorMsg[0] != '\0') { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "%s\n", errorMsg); + } + + return result; + } else { + return MA_SUCCESS; + } +} + +static ma_result ma_device_reinit__wasapi(ma_device* pDevice, ma_device_type deviceType) +{ + ma_device_init_internal_data__wasapi data; + ma_result result; + + MA_ASSERT(pDevice != NULL); + + /* We only re-initialize the playback or capture device. Never a full-duplex device. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + + /* + Before reinitializing the device we need to free the previous audio clients. + + There's a known memory leak here. We will be calling this from the routing change callback that + is fired by WASAPI. If we attempt to release the IAudioClient we will deadlock. In my opinion + this is a bug. I'm not sure what I need to do to handle this cleanly, but I think we'll probably + need some system where we post an event, but delay the execution of it until the callback has + returned. I'm not sure how to do this reliably, however. I have set up some infrastructure for + a command thread which might be useful for this. + */ + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_loopback) { + if (pDevice->wasapi.pCaptureClient) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + pDevice->wasapi.pCaptureClient = NULL; + } + + if (pDevice->wasapi.pAudioClientCapture) { + /*ma_device_release_IAudioClient_service__wasapi(pDevice, ma_device_type_capture);*/ + pDevice->wasapi.pAudioClientCapture = NULL; + } + } + + if (deviceType == ma_device_type_playback) { + if (pDevice->wasapi.pRenderClient) { + ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient); + pDevice->wasapi.pRenderClient = NULL; + } + + if (pDevice->wasapi.pAudioClientPlayback) { + /*ma_device_release_IAudioClient_service__wasapi(pDevice, ma_device_type_playback);*/ + pDevice->wasapi.pAudioClientPlayback = NULL; + } + } + + + if (deviceType == ma_device_type_playback) { + data.formatIn = pDevice->playback.format; + data.channelsIn = pDevice->playback.channels; + MA_COPY_MEMORY(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap)); + data.shareMode = pDevice->playback.shareMode; + } else { + data.formatIn = pDevice->capture.format; + data.channelsIn = pDevice->capture.channels; + MA_COPY_MEMORY(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap)); + data.shareMode = pDevice->capture.shareMode; + } + + data.sampleRateIn = pDevice->sampleRate; + data.periodSizeInFramesIn = pDevice->wasapi.originalPeriodSizeInFrames; + data.periodSizeInMillisecondsIn = pDevice->wasapi.originalPeriodSizeInMilliseconds; + data.periodsIn = pDevice->wasapi.originalPeriods; + data.performanceProfile = pDevice->wasapi.originalPerformanceProfile; + data.noAutoConvertSRC = pDevice->wasapi.noAutoConvertSRC; + data.noDefaultQualitySRC = pDevice->wasapi.noDefaultQualitySRC; + data.noHardwareOffloading = pDevice->wasapi.noHardwareOffloading; + data.loopbackProcessID = pDevice->wasapi.loopbackProcessID; + data.loopbackProcessExclude = pDevice->wasapi.loopbackProcessExclude; + result = ma_device_init_internal__wasapi(pDevice->pContext, deviceType, NULL, &data); + if (result != MA_SUCCESS) { + return result; + } + + /* At this point we have some new objects ready to go. We need to uninitialize the previous ones and then set the new ones. */ + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_loopback) { + pDevice->wasapi.pAudioClientCapture = data.pAudioClient; + pDevice->wasapi.pCaptureClient = data.pCaptureClient; + + pDevice->capture.internalFormat = data.formatOut; + pDevice->capture.internalChannels = data.channelsOut; + pDevice->capture.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->capture.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->capture.internalPeriods = data.periodsOut; + ma_strcpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), data.deviceName); + + ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, (HANDLE)pDevice->wasapi.hEventCapture); + + pDevice->wasapi.periodSizeInFramesCapture = data.periodSizeInFramesOut; + ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &pDevice->wasapi.actualBufferSizeInFramesCapture); + + /* We must always have a valid ID. */ + ma_strcpy_s_WCHAR(pDevice->capture.id.wasapi, sizeof(pDevice->capture.id.wasapi), data.id.wasapi); + } + + if (deviceType == ma_device_type_playback) { + pDevice->wasapi.pAudioClientPlayback = data.pAudioClient; + pDevice->wasapi.pRenderClient = data.pRenderClient; + + pDevice->playback.internalFormat = data.formatOut; + pDevice->playback.internalChannels = data.channelsOut; + pDevice->playback.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->playback.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->playback.internalPeriods = data.periodsOut; + ma_strcpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), data.deviceName); + + ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, (HANDLE)pDevice->wasapi.hEventPlayback); + + pDevice->wasapi.periodSizeInFramesPlayback = data.periodSizeInFramesOut; + ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &pDevice->wasapi.actualBufferSizeInFramesPlayback); + + /* We must always have a valid ID because rerouting will look at it. */ + ma_strcpy_s_WCHAR(pDevice->playback.id.wasapi, sizeof(pDevice->playback.id.wasapi), data.id.wasapi); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init__wasapi(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + ma_result result = MA_SUCCESS; + +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + HRESULT hr; + ma_IMMDeviceEnumerator* pDeviceEnumerator; +#endif + + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->wasapi); + pDevice->wasapi.usage = pConfig->wasapi.usage; + pDevice->wasapi.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC; + pDevice->wasapi.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC; + pDevice->wasapi.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading; + pDevice->wasapi.loopbackProcessID = pConfig->wasapi.loopbackProcessID; + pDevice->wasapi.loopbackProcessExclude = pConfig->wasapi.loopbackProcessExclude; + + /* Exclusive mode is not allowed with loopback. */ + if (pConfig->deviceType == ma_device_type_loopback && pConfig->playback.shareMode == ma_share_mode_exclusive) { + return MA_INVALID_DEVICE_CONFIG; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) { + ma_device_init_internal_data__wasapi data; + data.formatIn = pDescriptorCapture->format; + data.channelsIn = pDescriptorCapture->channels; + data.sampleRateIn = pDescriptorCapture->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pDescriptorCapture->channelMap, sizeof(pDescriptorCapture->channelMap)); + data.periodSizeInFramesIn = pDescriptorCapture->periodSizeInFrames; + data.periodSizeInMillisecondsIn = pDescriptorCapture->periodSizeInMilliseconds; + data.periodsIn = pDescriptorCapture->periodCount; + data.shareMode = pDescriptorCapture->shareMode; + data.performanceProfile = pConfig->performanceProfile; + data.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC; + data.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC; + data.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading; + data.loopbackProcessID = pConfig->wasapi.loopbackProcessID; + data.loopbackProcessExclude = pConfig->wasapi.loopbackProcessExclude; + + result = ma_device_init_internal__wasapi(pDevice->pContext, (pConfig->deviceType == ma_device_type_loopback) ? ma_device_type_loopback : ma_device_type_capture, pDescriptorCapture->pDeviceID, &data); + if (result != MA_SUCCESS) { + return result; + } + + pDevice->wasapi.pAudioClientCapture = data.pAudioClient; + pDevice->wasapi.pCaptureClient = data.pCaptureClient; + pDevice->wasapi.originalPeriodSizeInMilliseconds = pDescriptorCapture->periodSizeInMilliseconds; + pDevice->wasapi.originalPeriodSizeInFrames = pDescriptorCapture->periodSizeInFrames; + pDevice->wasapi.originalPeriods = pDescriptorCapture->periodCount; + pDevice->wasapi.originalPerformanceProfile = pConfig->performanceProfile; + + /* + The event for capture needs to be manual reset for the same reason as playback. We keep the initial state set to unsignaled, + however, because we want to block until we actually have something for the first call to ma_device_read(). + */ + pDevice->wasapi.hEventCapture = (ma_handle)CreateEventA(NULL, FALSE, FALSE, NULL); /* Auto reset, unsignaled by default. */ + if (pDevice->wasapi.hEventCapture == NULL) { + result = ma_result_from_GetLastError(GetLastError()); + + if (pDevice->wasapi.pCaptureClient != NULL) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + pDevice->wasapi.pCaptureClient = NULL; + } + if (pDevice->wasapi.pAudioClientCapture != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + pDevice->wasapi.pAudioClientCapture = NULL; + } + + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for capture."); + return result; + } + ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, (HANDLE)pDevice->wasapi.hEventCapture); + + pDevice->wasapi.periodSizeInFramesCapture = data.periodSizeInFramesOut; + ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture, &pDevice->wasapi.actualBufferSizeInFramesCapture); + + /* We must always have a valid ID. */ + ma_strcpy_s_WCHAR(pDevice->capture.id.wasapi, sizeof(pDevice->capture.id.wasapi), data.id.wasapi); + + /* The descriptor needs to be updated with actual values. */ + pDescriptorCapture->format = data.formatOut; + pDescriptorCapture->channels = data.channelsOut; + pDescriptorCapture->sampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDescriptorCapture->channelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDescriptorCapture->periodSizeInFrames = data.periodSizeInFramesOut; + pDescriptorCapture->periodCount = data.periodsOut; + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_device_init_internal_data__wasapi data; + data.formatIn = pDescriptorPlayback->format; + data.channelsIn = pDescriptorPlayback->channels; + data.sampleRateIn = pDescriptorPlayback->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pDescriptorPlayback->channelMap, sizeof(pDescriptorPlayback->channelMap)); + data.periodSizeInFramesIn = pDescriptorPlayback->periodSizeInFrames; + data.periodSizeInMillisecondsIn = pDescriptorPlayback->periodSizeInMilliseconds; + data.periodsIn = pDescriptorPlayback->periodCount; + data.shareMode = pDescriptorPlayback->shareMode; + data.performanceProfile = pConfig->performanceProfile; + data.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC; + data.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC; + data.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading; + data.loopbackProcessID = pConfig->wasapi.loopbackProcessID; + data.loopbackProcessExclude = pConfig->wasapi.loopbackProcessExclude; + + result = ma_device_init_internal__wasapi(pDevice->pContext, ma_device_type_playback, pDescriptorPlayback->pDeviceID, &data); + if (result != MA_SUCCESS) { + if (pConfig->deviceType == ma_device_type_duplex) { + if (pDevice->wasapi.pCaptureClient != NULL) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + pDevice->wasapi.pCaptureClient = NULL; + } + if (pDevice->wasapi.pAudioClientCapture != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + pDevice->wasapi.pAudioClientCapture = NULL; + } + + CloseHandle((HANDLE)pDevice->wasapi.hEventCapture); + pDevice->wasapi.hEventCapture = NULL; + } + return result; + } + + pDevice->wasapi.pAudioClientPlayback = data.pAudioClient; + pDevice->wasapi.pRenderClient = data.pRenderClient; + pDevice->wasapi.originalPeriodSizeInMilliseconds = pDescriptorPlayback->periodSizeInMilliseconds; + pDevice->wasapi.originalPeriodSizeInFrames = pDescriptorPlayback->periodSizeInFrames; + pDevice->wasapi.originalPeriods = pDescriptorPlayback->periodCount; + pDevice->wasapi.originalPerformanceProfile = pConfig->performanceProfile; + + /* + The event for playback is needs to be manual reset because we want to explicitly control the fact that it becomes signalled + only after the whole available space has been filled, never before. + + The playback event also needs to be initially set to a signaled state so that the first call to ma_device_write() is able + to get passed WaitForMultipleObjects(). + */ + pDevice->wasapi.hEventPlayback = (ma_handle)CreateEventA(NULL, FALSE, TRUE, NULL); /* Auto reset, signaled by default. */ + if (pDevice->wasapi.hEventPlayback == NULL) { + result = ma_result_from_GetLastError(GetLastError()); + + if (pConfig->deviceType == ma_device_type_duplex) { + if (pDevice->wasapi.pCaptureClient != NULL) { + ma_IAudioCaptureClient_Release((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient); + pDevice->wasapi.pCaptureClient = NULL; + } + if (pDevice->wasapi.pAudioClientCapture != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + pDevice->wasapi.pAudioClientCapture = NULL; + } + + CloseHandle((HANDLE)pDevice->wasapi.hEventCapture); + pDevice->wasapi.hEventCapture = NULL; + } + + if (pDevice->wasapi.pRenderClient != NULL) { + ma_IAudioRenderClient_Release((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient); + pDevice->wasapi.pRenderClient = NULL; + } + if (pDevice->wasapi.pAudioClientPlayback != NULL) { + ma_IAudioClient_Release((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + pDevice->wasapi.pAudioClientPlayback = NULL; + } + + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create event for playback."); + return result; + } + ma_IAudioClient_SetEventHandle((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, (HANDLE)pDevice->wasapi.hEventPlayback); + + pDevice->wasapi.periodSizeInFramesPlayback = data.periodSizeInFramesOut; + ma_IAudioClient_GetBufferSize((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &pDevice->wasapi.actualBufferSizeInFramesPlayback); + + /* We must always have a valid ID because rerouting will look at it. */ + ma_strcpy_s_WCHAR(pDevice->playback.id.wasapi, sizeof(pDevice->playback.id.wasapi), data.id.wasapi); + + /* The descriptor needs to be updated with actual values. */ + pDescriptorPlayback->format = data.formatOut; + pDescriptorPlayback->channels = data.channelsOut; + pDescriptorPlayback->sampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDescriptorPlayback->channelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDescriptorPlayback->periodSizeInFrames = data.periodSizeInFramesOut; + pDescriptorPlayback->periodCount = data.periodsOut; + } + + /* + We need to register a notification client to detect when the device has been disabled, unplugged or re-routed (when the default device changes). When + we are connecting to the default device we want to do automatic stream routing when the device is disabled or unplugged. Otherwise we want to just + stop the device outright and let the application handle it. + */ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + if (pConfig->wasapi.noAutoStreamRouting == MA_FALSE) { + if ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) && pConfig->capture.pDeviceID == NULL) { + pDevice->wasapi.allowCaptureAutoStreamRouting = MA_TRUE; + } + if ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.pDeviceID == NULL) { + pDevice->wasapi.allowPlaybackAutoStreamRouting = MA_TRUE; + } + } + + ma_mutex_init(&pDevice->wasapi.rerouteLock); + + hr = ma_CoCreateInstance(pDevice->pContext, &MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, &MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); + if (FAILED(hr)) { + ma_device_uninit__wasapi(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator."); + return ma_result_from_HRESULT(hr); + } + + pDevice->wasapi.notificationClient.lpVtbl = (void*)&g_maNotificationCientVtbl; + pDevice->wasapi.notificationClient.counter = 1; + pDevice->wasapi.notificationClient.pDevice = pDevice; + + hr = pDeviceEnumerator->lpVtbl->RegisterEndpointNotificationCallback(pDeviceEnumerator, &pDevice->wasapi.notificationClient); + if (SUCCEEDED(hr)) { + pDevice->wasapi.pDeviceEnumerator = (ma_ptr)pDeviceEnumerator; + } else { + /* Not the end of the world if we fail to register the notification callback. We just won't support automatic stream routing. */ + ma_IMMDeviceEnumerator_Release(pDeviceEnumerator); + } +#endif + + ma_atomic_bool32_set(&pDevice->wasapi.isStartedCapture, MA_FALSE); + ma_atomic_bool32_set(&pDevice->wasapi.isStartedPlayback, MA_FALSE); + + return MA_SUCCESS; +} + +static ma_result ma_device__get_available_frames__wasapi(ma_device* pDevice, ma_IAudioClient* pAudioClient, ma_uint32* pFrameCount) +{ + ma_uint32 paddingFramesCount; + HRESULT hr; + ma_share_mode shareMode; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pFrameCount != NULL); + + *pFrameCount = 0; + + if ((ma_ptr)pAudioClient != pDevice->wasapi.pAudioClientPlayback && (ma_ptr)pAudioClient != pDevice->wasapi.pAudioClientCapture) { + return MA_INVALID_OPERATION; + } + + /* + I've had a report that GetCurrentPadding() is returning a frame count of 0 which is preventing + higher level function calls from doing anything because it thinks nothing is available. I have + taken a look at the documentation and it looks like this is unnecessary in exclusive mode. + + From Microsoft's documentation: + + For an exclusive-mode rendering or capture stream that was initialized with the + AUDCLNT_STREAMFLAGS_EVENTCALLBACK flag, the client typically has no use for the padding + value reported by GetCurrentPadding. Instead, the client accesses an entire buffer during + each processing pass. + + Considering this, I'm going to skip GetCurrentPadding() for exclusive mode and just report the + entire buffer. This depends on the caller making sure they wait on the event handler. + */ + shareMode = ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) ? pDevice->playback.shareMode : pDevice->capture.shareMode; + if (shareMode == ma_share_mode_shared) { + /* Shared mode. */ + hr = ma_IAudioClient_GetCurrentPadding(pAudioClient, &paddingFramesCount); + if (FAILED(hr)) { + return ma_result_from_HRESULT(hr); + } + + if ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) { + *pFrameCount = pDevice->wasapi.actualBufferSizeInFramesPlayback - paddingFramesCount; + } else { + *pFrameCount = paddingFramesCount; + } + } else { + /* Exclusive mode. */ + if ((ma_ptr)pAudioClient == pDevice->wasapi.pAudioClientPlayback) { + *pFrameCount = pDevice->wasapi.actualBufferSizeInFramesPlayback; + } else { + *pFrameCount = pDevice->wasapi.actualBufferSizeInFramesCapture; + } + } + + return MA_SUCCESS; +} + + +static ma_result ma_device_reroute__wasapi(ma_device* pDevice, ma_device_type deviceType) +{ + ma_result result; + + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "=== CHANGING DEVICE ===\n"); + + result = ma_device_reinit__wasapi(pDevice, deviceType); + if (result != MA_SUCCESS) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[WASAPI] Reinitializing device after route change failed.\n"); + return result; + } + + ma_device__post_init_setup(pDevice, deviceType); + ma_device__on_notification_rerouted(pDevice); + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "=== DEVICE CHANGED ===\n"); + + return MA_SUCCESS; +} + +static ma_result ma_device_start__wasapi_nolock(ma_device* pDevice) +{ + HRESULT hr; + + if (pDevice->pContext->wasapi.hAvrt) { + const char* pTaskName = ma_to_usage_string__wasapi(pDevice->wasapi.usage); + if (pTaskName) { + DWORD idx = 0; + pDevice->wasapi.hAvrtHandle = (ma_handle)((MA_PFN_AvSetMmThreadCharacteristicsA)pDevice->pContext->wasapi.AvSetMmThreadCharacteristicsA)(pTaskName, &idx); + } + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + if (FAILED(hr)) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal capture device. HRESULT = %d.", (int)hr); + return ma_result_from_HRESULT(hr); + } + + ma_atomic_bool32_set(&pDevice->wasapi.isStartedCapture, MA_TRUE); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + hr = ma_IAudioClient_Start((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + if (FAILED(hr)) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to start internal playback device. HRESULT = %d.", (int)hr); + return ma_result_from_HRESULT(hr); + } + + ma_atomic_bool32_set(&pDevice->wasapi.isStartedPlayback, MA_TRUE); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__wasapi(ma_device* pDevice) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + /* Wait for any rerouting to finish before attempting to start the device. */ + ma_mutex_lock(&pDevice->wasapi.rerouteLock); + { + result = ma_device_start__wasapi_nolock(pDevice); + } + ma_mutex_unlock(&pDevice->wasapi.rerouteLock); + + return result; +} + +static ma_result ma_device_stop__wasapi_nolock(ma_device* pDevice) +{ + ma_result result; + HRESULT hr; + + MA_ASSERT(pDevice != NULL); + + if (pDevice->wasapi.hAvrtHandle) { + ((MA_PFN_AvRevertMmThreadCharacteristics)pDevice->pContext->wasapi.AvRevertMmThreadcharacteristics)((HANDLE)pDevice->wasapi.hAvrtHandle); + pDevice->wasapi.hAvrtHandle = NULL; + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + /* If we have a mapped buffer we need to release it. */ + if (pDevice->wasapi.pMappedBufferCapture != NULL) { + ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap); + pDevice->wasapi.pMappedBufferCapture = NULL; + pDevice->wasapi.mappedBufferCaptureCap = 0; + pDevice->wasapi.mappedBufferCaptureLen = 0; + } + + hr = ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to stop internal capture device."); + return ma_result_from_HRESULT(hr); + } + + /* The audio client needs to be reset otherwise restarting will fail. */ + hr = ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to reset internal capture device."); + return ma_result_from_HRESULT(hr); + } + + ma_atomic_bool32_set(&pDevice->wasapi.isStartedCapture, MA_FALSE); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + if (pDevice->wasapi.pMappedBufferPlayback != NULL) { + ma_silence_pcm_frames( + ma_offset_pcm_frames_ptr(pDevice->wasapi.pMappedBufferPlayback, pDevice->wasapi.mappedBufferPlaybackLen, pDevice->playback.internalFormat, pDevice->playback.internalChannels), + pDevice->wasapi.mappedBufferPlaybackCap - pDevice->wasapi.mappedBufferPlaybackLen, + pDevice->playback.internalFormat, pDevice->playback.internalChannels + ); + ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, pDevice->wasapi.mappedBufferPlaybackCap, 0); + pDevice->wasapi.pMappedBufferPlayback = NULL; + pDevice->wasapi.mappedBufferPlaybackCap = 0; + pDevice->wasapi.mappedBufferPlaybackLen = 0; + } + + /* + The buffer needs to be drained before stopping the device. Not doing this will result in the last few frames not getting output to + the speakers. This is a problem for very short sounds because it'll result in a significant portion of it not getting played. + */ + if (ma_atomic_bool32_get(&pDevice->wasapi.isStartedPlayback)) { + /* We need to make sure we put a timeout here or else we'll risk getting stuck in a deadlock in some cases. */ + DWORD waitTime = (pDevice->wasapi.actualBufferSizeInFramesPlayback * 1000) / pDevice->playback.internalSampleRate; + + if (pDevice->playback.shareMode == ma_share_mode_exclusive) { + WaitForSingleObject((HANDLE)pDevice->wasapi.hEventPlayback, waitTime); + } else { + ma_uint32 prevFramesAvailablePlayback = (ma_uint32)-1; + ma_uint32 framesAvailablePlayback; + for (;;) { + result = ma_device__get_available_frames__wasapi(pDevice, (ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback, &framesAvailablePlayback); + if (result != MA_SUCCESS) { + break; + } + + if (framesAvailablePlayback >= pDevice->wasapi.actualBufferSizeInFramesPlayback) { + break; + } + + /* + Just a safety check to avoid an infinite loop. If this iteration results in a situation where the number of available frames + has not changed, get out of the loop. I don't think this should ever happen, but I think it's nice to have just in case. + */ + if (framesAvailablePlayback == prevFramesAvailablePlayback) { + break; + } + prevFramesAvailablePlayback = framesAvailablePlayback; + + ResetEvent((HANDLE)pDevice->wasapi.hEventPlayback); /* Manual reset. */ + WaitForSingleObject((HANDLE)pDevice->wasapi.hEventPlayback, waitTime); + } + } + } + + hr = ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to stop internal playback device."); + return ma_result_from_HRESULT(hr); + } + + /* The audio client needs to be reset otherwise restarting will fail. */ + { + ma_int32 retries = 5; + + while ((hr = ma_IAudioClient_Reset((ma_IAudioClient*)pDevice->wasapi.pAudioClientPlayback)) == MA_AUDCLNT_E_BUFFER_OPERATION_PENDING && retries > 0) { + ma_sleep(10); + retries -= 1; + } + } + + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to reset internal playback device."); + return ma_result_from_HRESULT(hr); + } + + ma_atomic_bool32_set(&pDevice->wasapi.isStartedPlayback, MA_FALSE); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__wasapi(ma_device* pDevice) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + /* Wait for any rerouting to finish before attempting to stop the device. */ + ma_mutex_lock(&pDevice->wasapi.rerouteLock); + { + result = ma_device_stop__wasapi_nolock(pDevice); + } + ma_mutex_unlock(&pDevice->wasapi.rerouteLock); + + return result; +} + + +#ifndef MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS +#define MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS 5000 +#endif + +static ma_result ma_device_read__wasapi(ma_device* pDevice, void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint32 totalFramesProcessed = 0; + + /* + When reading, we need to get a buffer and process all of it before releasing it. Because the + frame count (frameCount) can be different to the size of the buffer, we'll need to cache the + pointer to the buffer. + */ + + /* Keep running until we've processed the requested number of frames. */ + while (ma_device_get_state(pDevice) == ma_device_state_started && totalFramesProcessed < frameCount) { + ma_uint32 framesRemaining = frameCount - totalFramesProcessed; + + /* If we have a mapped data buffer, consume that first. */ + if (pDevice->wasapi.pMappedBufferCapture != NULL) { + /* We have a cached data pointer so consume that before grabbing another one from WASAPI. */ + ma_uint32 framesToProcessNow = framesRemaining; + if (framesToProcessNow > pDevice->wasapi.mappedBufferCaptureLen) { + framesToProcessNow = pDevice->wasapi.mappedBufferCaptureLen; + } + + /* Now just copy the data over to the output buffer. */ + ma_copy_pcm_frames( + ma_offset_pcm_frames_ptr(pFrames, totalFramesProcessed, pDevice->capture.internalFormat, pDevice->capture.internalChannels), + ma_offset_pcm_frames_const_ptr(pDevice->wasapi.pMappedBufferCapture, pDevice->wasapi.mappedBufferCaptureCap - pDevice->wasapi.mappedBufferCaptureLen, pDevice->capture.internalFormat, pDevice->capture.internalChannels), + framesToProcessNow, + pDevice->capture.internalFormat, pDevice->capture.internalChannels + ); + + totalFramesProcessed += framesToProcessNow; + pDevice->wasapi.mappedBufferCaptureLen -= framesToProcessNow; + + /* If the data buffer has been fully consumed we need to release it. */ + if (pDevice->wasapi.mappedBufferCaptureLen == 0) { + ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap); + pDevice->wasapi.pMappedBufferCapture = NULL; + pDevice->wasapi.mappedBufferCaptureCap = 0; + } + } else { + /* We don't have any cached data pointer, so grab another one. */ + HRESULT hr; + DWORD flags = 0; + + /* First just ask WASAPI for a data buffer. If it's not available, we'll wait for more. */ + hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pDevice->wasapi.pMappedBufferCapture, &pDevice->wasapi.mappedBufferCaptureCap, &flags, NULL, NULL); + if (hr == S_OK) { + /* We got a data buffer. Continue to the next loop iteration which will then read from the mapped pointer. */ + pDevice->wasapi.mappedBufferCaptureLen = pDevice->wasapi.mappedBufferCaptureCap; + + /* + There have been reports that indicate that at times the AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY is reported for every + call to IAudioCaptureClient_GetBuffer() above which results in spamming of the debug messages below. To partially + work around this, I'm only outputting these messages when MA_DEBUG_OUTPUT is explicitly defined. The better solution + would be to figure out why the flag is always getting reported. + */ + #if defined(MA_DEBUG_OUTPUT) + { + if (flags != 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Capture Flags: %ld\n", flags); + + if ((flags & MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) != 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Data discontinuity (possible overrun). Attempting recovery. mappedBufferCaptureCap=%d\n", pDevice->wasapi.mappedBufferCaptureCap); + } + } + } + #endif + + /* Overrun detection. */ + if ((flags & MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) != 0) { + /* Glitched. Probably due to an overrun. */ + + /* + If we got an overrun it probably means we're straddling the end of the buffer. In normal capture + mode this is the fault of the client application because they're responsible for ensuring data is + processed fast enough. In duplex mode, however, the processing of audio is tied to the playback + device, so this can possibly be the result of a timing de-sync. + + In capture mode we're not going to do any kind of recovery because the real fix is for the client + application to process faster. In duplex mode, we'll treat this as a desync and reset the buffers + to prevent a never-ending sequence of glitches due to straddling the end of the buffer. + */ + if (pDevice->type == ma_device_type_duplex) { + /* + Experiment: + + If we empty out the *entire* buffer we may end up putting ourselves into an underrun position + which isn't really any better than the overrun we're probably in right now. Instead we'll just + empty out about half. + */ + ma_uint32 i; + ma_uint32 periodCount = (pDevice->wasapi.actualBufferSizeInFramesCapture / pDevice->wasapi.periodSizeInFramesCapture); + ma_uint32 iterationCount = periodCount / 2; + if ((periodCount % 2) > 0) { + iterationCount += 1; + } + + for (i = 0; i < iterationCount; i += 1) { + hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap); + if (FAILED(hr)) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Data discontinuity recovery: IAudioCaptureClient_ReleaseBuffer() failed with %ld.\n", hr); + break; + } + + flags = 0; + hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pDevice->wasapi.pMappedBufferCapture, &pDevice->wasapi.mappedBufferCaptureCap, &flags, NULL, NULL); + if (hr == MA_AUDCLNT_S_BUFFER_EMPTY || FAILED(hr)) { + /* + The buffer has been completely emptied or an error occurred. In this case we'll need + to reset the state of the mapped buffer which will trigger the next iteration to get + a fresh buffer from WASAPI. + */ + pDevice->wasapi.pMappedBufferCapture = NULL; + pDevice->wasapi.mappedBufferCaptureCap = 0; + pDevice->wasapi.mappedBufferCaptureLen = 0; + + if (hr == MA_AUDCLNT_S_BUFFER_EMPTY) { + if ((flags & MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) != 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Data discontinuity recovery: Buffer emptied, and data discontinuity still reported.\n"); + } else { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Data discontinuity recovery: Buffer emptied.\n"); + } + } + + if (FAILED(hr)) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Data discontinuity recovery: IAudioCaptureClient_GetBuffer() failed with %ld.\n", hr); + } + + break; + } + } + + /* If at this point we have a valid buffer mapped, make sure the buffer length is set appropriately. */ + if (pDevice->wasapi.pMappedBufferCapture != NULL) { + pDevice->wasapi.mappedBufferCaptureLen = pDevice->wasapi.mappedBufferCaptureCap; + } + } + } + + continue; + } else { + if (hr == MA_AUDCLNT_S_BUFFER_EMPTY || hr == MA_AUDCLNT_E_BUFFER_ERROR) { + /* + No data is available. We need to wait for more. There's two situations to consider + here. The first is normal capture mode. If this times out it probably means the + microphone isn't delivering data for whatever reason. In this case we'll just + abort the read and return whatever we were able to get. The other situations is + loopback mode, in which case a timeout probably just means the nothing is playing + through the speakers. + */ + + /* Experiment: Use a shorter timeout for loopback mode. */ + DWORD timeoutInMilliseconds = MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS; + if (pDevice->type == ma_device_type_loopback) { + timeoutInMilliseconds = 10; + } + + if (WaitForSingleObject((HANDLE)pDevice->wasapi.hEventCapture, timeoutInMilliseconds) != WAIT_OBJECT_0) { + if (pDevice->type == ma_device_type_loopback) { + continue; /* Keep waiting in loopback mode. */ + } else { + result = MA_ERROR; + break; /* Wait failed. */ + } + } + + /* At this point we should be able to loop back to the start of the loop and try retrieving a data buffer again. */ + } else { + /* An error occurred and we need to abort. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for reading from the device. HRESULT = %d. Stopping device.\n", (int)hr); + result = ma_result_from_HRESULT(hr); + break; + } + } + } + } + + /* + If we were unable to process the entire requested frame count, but we still have a mapped buffer, + there's a good chance either an error occurred or the device was stopped mid-read. In this case + we'll need to make sure the buffer is released. + */ + if (totalFramesProcessed < frameCount && pDevice->wasapi.pMappedBufferCapture != NULL) { + ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, pDevice->wasapi.mappedBufferCaptureCap); + pDevice->wasapi.pMappedBufferCapture = NULL; + pDevice->wasapi.mappedBufferCaptureCap = 0; + pDevice->wasapi.mappedBufferCaptureLen = 0; + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesProcessed; + } + + return result; +} + +static ma_result ma_device_write__wasapi(ma_device* pDevice, const void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + ma_result result = MA_SUCCESS; + ma_uint32 totalFramesProcessed = 0; + + /* Keep writing to the device until it's stopped or we've consumed all of our input. */ + while (ma_device_get_state(pDevice) == ma_device_state_started && totalFramesProcessed < frameCount) { + ma_uint32 framesRemaining = frameCount - totalFramesProcessed; + + /* + We're going to do this in a similar way to capture. We'll first check if the cached data pointer + is valid, and if so, read from that. Otherwise We will call IAudioRenderClient_GetBuffer() with + a requested buffer size equal to our actual period size. If it returns AUDCLNT_E_BUFFER_TOO_LARGE + it means we need to wait for some data to become available. + */ + if (pDevice->wasapi.pMappedBufferPlayback != NULL) { + /* We still have some space available in the mapped data buffer. Write to it. */ + ma_uint32 framesToProcessNow = framesRemaining; + if (framesToProcessNow > (pDevice->wasapi.mappedBufferPlaybackCap - pDevice->wasapi.mappedBufferPlaybackLen)) { + framesToProcessNow = (pDevice->wasapi.mappedBufferPlaybackCap - pDevice->wasapi.mappedBufferPlaybackLen); + } + + /* Now just copy the data over to the output buffer. */ + ma_copy_pcm_frames( + ma_offset_pcm_frames_ptr(pDevice->wasapi.pMappedBufferPlayback, pDevice->wasapi.mappedBufferPlaybackLen, pDevice->playback.internalFormat, pDevice->playback.internalChannels), + ma_offset_pcm_frames_const_ptr(pFrames, totalFramesProcessed, pDevice->playback.internalFormat, pDevice->playback.internalChannels), + framesToProcessNow, + pDevice->playback.internalFormat, pDevice->playback.internalChannels + ); + + totalFramesProcessed += framesToProcessNow; + pDevice->wasapi.mappedBufferPlaybackLen += framesToProcessNow; + + /* If the data buffer has been fully consumed we need to release it. */ + if (pDevice->wasapi.mappedBufferPlaybackLen == pDevice->wasapi.mappedBufferPlaybackCap) { + ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, pDevice->wasapi.mappedBufferPlaybackCap, 0); + pDevice->wasapi.pMappedBufferPlayback = NULL; + pDevice->wasapi.mappedBufferPlaybackCap = 0; + pDevice->wasapi.mappedBufferPlaybackLen = 0; + + /* + In exclusive mode we need to wait here. Exclusive mode is weird because GetBuffer() never + seems to return AUDCLNT_E_BUFFER_TOO_LARGE, which is what we normally use to determine + whether or not we need to wait for more data. + */ + if (pDevice->playback.shareMode == ma_share_mode_exclusive) { + if (WaitForSingleObject((HANDLE)pDevice->wasapi.hEventPlayback, MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS) != WAIT_OBJECT_0) { + result = MA_ERROR; + break; /* Wait failed. Probably timed out. */ + } + } + } + } else { + /* We don't have a mapped data buffer so we'll need to get one. */ + HRESULT hr; + ma_uint32 bufferSizeInFrames; + + /* Special rules for exclusive mode. */ + if (pDevice->playback.shareMode == ma_share_mode_exclusive) { + bufferSizeInFrames = pDevice->wasapi.actualBufferSizeInFramesPlayback; + } else { + bufferSizeInFrames = pDevice->wasapi.periodSizeInFramesPlayback; + } + + hr = ma_IAudioRenderClient_GetBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, bufferSizeInFrames, (BYTE**)&pDevice->wasapi.pMappedBufferPlayback); + if (hr == S_OK) { + /* We have data available. */ + pDevice->wasapi.mappedBufferPlaybackCap = bufferSizeInFrames; + pDevice->wasapi.mappedBufferPlaybackLen = 0; + } else { + if (hr == MA_AUDCLNT_E_BUFFER_TOO_LARGE || hr == MA_AUDCLNT_E_BUFFER_ERROR) { + /* Not enough data available. We need to wait for more. */ + if (WaitForSingleObject((HANDLE)pDevice->wasapi.hEventPlayback, MA_WASAPI_WAIT_TIMEOUT_MILLISECONDS) != WAIT_OBJECT_0) { + result = MA_ERROR; + break; /* Wait failed. Probably timed out. */ + } + } else { + /* Some error occurred. We'll need to abort. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from playback device in preparation for writing to the device. HRESULT = %d. Stopping device.\n", (int)hr); + result = ma_result_from_HRESULT(hr); + break; + } + } + } + } + + if (pFramesWritten != NULL) { + *pFramesWritten = totalFramesProcessed; + } + + return result; +} + +static ma_result ma_device_data_loop_wakeup__wasapi(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + SetEvent((HANDLE)pDevice->wasapi.hEventCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + SetEvent((HANDLE)pDevice->wasapi.hEventPlayback); + } + + return MA_SUCCESS; +} + + +static ma_result ma_context_uninit__wasapi(ma_context* pContext) +{ + ma_context_command__wasapi cmd = ma_context_init_command__wasapi(MA_CONTEXT_COMMAND_QUIT__WASAPI); + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_wasapi); + + ma_context_post_command__wasapi(pContext, &cmd); + ma_thread_wait(&pContext->wasapi.commandThread); + + if (pContext->wasapi.hAvrt) { + ma_dlclose(ma_context_get_log(pContext), pContext->wasapi.hAvrt); + pContext->wasapi.hAvrt = NULL; + } + + #if defined(MA_WIN32_UWP) + { + if (pContext->wasapi.hMMDevapi) { + ma_dlclose(ma_context_get_log(pContext), pContext->wasapi.hMMDevapi); + pContext->wasapi.hMMDevapi = NULL; + } + } + #endif + + /* Only after the thread has been terminated can we uninitialize the sync objects for the command thread. */ + ma_semaphore_uninit(&pContext->wasapi.commandSem); + ma_mutex_uninit(&pContext->wasapi.commandLock); + + return MA_SUCCESS; +} + +static ma_result ma_context_init__wasapi(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + ma_result result = MA_SUCCESS; + + MA_ASSERT(pContext != NULL); + + (void)pConfig; + +#ifdef MA_WIN32_DESKTOP + /* + WASAPI is only supported in Vista SP1 and newer. The reason for SP1 and not the base version of Vista is that event-driven + exclusive mode does not work until SP1. + + Unfortunately older compilers don't define these functions so we need to dynamically load them in order to avoid a link error. + */ + { + ma_OSVERSIONINFOEXW osvi; + ma_handle kernel32DLL; + ma_PFNVerifyVersionInfoW _VerifyVersionInfoW; + ma_PFNVerSetConditionMask _VerSetConditionMask; + + kernel32DLL = ma_dlopen(ma_context_get_log(pContext), "kernel32.dll"); + if (kernel32DLL == NULL) { + return MA_NO_BACKEND; + } + + _VerifyVersionInfoW = (ma_PFNVerifyVersionInfoW )ma_dlsym(ma_context_get_log(pContext), kernel32DLL, "VerifyVersionInfoW"); + _VerSetConditionMask = (ma_PFNVerSetConditionMask)ma_dlsym(ma_context_get_log(pContext), kernel32DLL, "VerSetConditionMask"); + if (_VerifyVersionInfoW == NULL || _VerSetConditionMask == NULL) { + ma_dlclose(ma_context_get_log(pContext), kernel32DLL); + return MA_NO_BACKEND; + } + + MA_ZERO_OBJECT(&osvi); + osvi.dwOSVersionInfoSize = sizeof(osvi); + osvi.dwMajorVersion = ((MA_WIN32_WINNT_VISTA >> 8) & 0xFF); + osvi.dwMinorVersion = ((MA_WIN32_WINNT_VISTA >> 0) & 0xFF); + osvi.wServicePackMajor = 1; + if (_VerifyVersionInfoW(&osvi, MA_VER_MAJORVERSION | MA_VER_MINORVERSION | MA_VER_SERVICEPACKMAJOR, _VerSetConditionMask(_VerSetConditionMask(_VerSetConditionMask(0, MA_VER_MAJORVERSION, MA_VER_GREATER_EQUAL), MA_VER_MINORVERSION, MA_VER_GREATER_EQUAL), MA_VER_SERVICEPACKMAJOR, MA_VER_GREATER_EQUAL))) { + result = MA_SUCCESS; + } else { + result = MA_NO_BACKEND; + } + + ma_dlclose(ma_context_get_log(pContext), kernel32DLL); + } +#endif + + if (result != MA_SUCCESS) { + return result; + } + + MA_ZERO_OBJECT(&pContext->wasapi); + + + #if defined(MA_WIN32_UWP) + { + /* Link to mmdevapi so we can get access to ActivateAudioInterfaceAsync(). */ + pContext->wasapi.hMMDevapi = ma_dlopen(ma_context_get_log(pContext), "mmdevapi.dll"); + if (pContext->wasapi.hMMDevapi) { + pContext->wasapi.ActivateAudioInterfaceAsync = ma_dlsym(ma_context_get_log(pContext), pContext->wasapi.hMMDevapi, "ActivateAudioInterfaceAsync"); + if (pContext->wasapi.ActivateAudioInterfaceAsync == NULL) { + ma_dlclose(ma_context_get_log(pContext), pContext->wasapi.hMMDevapi); + return MA_NO_BACKEND; /* ActivateAudioInterfaceAsync() could not be loaded. */ + } + } else { + return MA_NO_BACKEND; /* Failed to load mmdevapi.dll which is required for ActivateAudioInterfaceAsync() */ + } + } + #endif + + /* Optionally use the Avrt API to specify the audio thread's latency sensitivity requirements */ + pContext->wasapi.hAvrt = ma_dlopen(ma_context_get_log(pContext), "avrt.dll"); + if (pContext->wasapi.hAvrt) { + pContext->wasapi.AvSetMmThreadCharacteristicsA = ma_dlsym(ma_context_get_log(pContext), pContext->wasapi.hAvrt, "AvSetMmThreadCharacteristicsA"); + pContext->wasapi.AvRevertMmThreadcharacteristics = ma_dlsym(ma_context_get_log(pContext), pContext->wasapi.hAvrt, "AvRevertMmThreadCharacteristics"); + + /* If either function could not be found, disable use of avrt entirely. */ + if (!pContext->wasapi.AvSetMmThreadCharacteristicsA || !pContext->wasapi.AvRevertMmThreadcharacteristics) { + pContext->wasapi.AvSetMmThreadCharacteristicsA = NULL; + pContext->wasapi.AvRevertMmThreadcharacteristics = NULL; + ma_dlclose(ma_context_get_log(pContext), pContext->wasapi.hAvrt); + pContext->wasapi.hAvrt = NULL; + } + } + + + /* + Annoyingly, WASAPI does not allow you to release an IAudioClient object from a different thread + than the one that retrieved it with GetService(). This can result in a deadlock in two + situations: + + 1) When calling ma_device_uninit() from a different thread to ma_device_init(); and + 2) When uninitializing and reinitializing the internal IAudioClient object in response to + automatic stream routing. + + We could define ma_device_uninit() such that it must be called on the same thread as + ma_device_init(). We could also just not release the IAudioClient when performing automatic + stream routing to avoid the deadlock. Neither of these are acceptable solutions in my view so + we're going to have to work around this with a worker thread. This is not ideal, but I can't + think of a better way to do this. + + More information about this can be found here: + + https://docs.microsoft.com/en-us/windows/win32/api/audioclient/nn-audioclient-iaudiorenderclient + + Note this section: + + When releasing an IAudioRenderClient interface instance, the client must call the interface's + Release method from the same thread as the call to IAudioClient::GetService that created the + object. + */ + { + result = ma_mutex_init(&pContext->wasapi.commandLock); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_semaphore_init(0, &pContext->wasapi.commandSem); + if (result != MA_SUCCESS) { + ma_mutex_uninit(&pContext->wasapi.commandLock); + return result; + } + + result = ma_thread_create(&pContext->wasapi.commandThread, ma_thread_priority_normal, 0, ma_context_command_thread__wasapi, pContext, &pContext->allocationCallbacks); + if (result != MA_SUCCESS) { + ma_semaphore_uninit(&pContext->wasapi.commandSem); + ma_mutex_uninit(&pContext->wasapi.commandLock); + return result; + } + } + + + pCallbacks->onContextInit = ma_context_init__wasapi; + pCallbacks->onContextUninit = ma_context_uninit__wasapi; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__wasapi; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__wasapi; + pCallbacks->onDeviceInit = ma_device_init__wasapi; + pCallbacks->onDeviceUninit = ma_device_uninit__wasapi; + pCallbacks->onDeviceStart = ma_device_start__wasapi; + pCallbacks->onDeviceStop = ma_device_stop__wasapi; + pCallbacks->onDeviceRead = ma_device_read__wasapi; + pCallbacks->onDeviceWrite = ma_device_write__wasapi; + pCallbacks->onDeviceDataLoop = NULL; + pCallbacks->onDeviceDataLoopWakeup = ma_device_data_loop_wakeup__wasapi; + + return MA_SUCCESS; +} +#endif + +/****************************************************************************** + +DirectSound Backend + +******************************************************************************/ +#ifdef MA_HAS_DSOUND +/*#include */ + +/*static const GUID MA_GUID_IID_DirectSoundNotify = {0xb0210783, 0x89cd, 0x11d0, {0xaf, 0x08, 0x00, 0xa0, 0xc9, 0x25, 0xcd, 0x16}};*/ + +/* miniaudio only uses priority or exclusive modes. */ +#define MA_DSSCL_NORMAL 1 +#define MA_DSSCL_PRIORITY 2 +#define MA_DSSCL_EXCLUSIVE 3 +#define MA_DSSCL_WRITEPRIMARY 4 + +#define MA_DSCAPS_PRIMARYMONO 0x00000001 +#define MA_DSCAPS_PRIMARYSTEREO 0x00000002 +#define MA_DSCAPS_PRIMARY8BIT 0x00000004 +#define MA_DSCAPS_PRIMARY16BIT 0x00000008 +#define MA_DSCAPS_CONTINUOUSRATE 0x00000010 +#define MA_DSCAPS_EMULDRIVER 0x00000020 +#define MA_DSCAPS_CERTIFIED 0x00000040 +#define MA_DSCAPS_SECONDARYMONO 0x00000100 +#define MA_DSCAPS_SECONDARYSTEREO 0x00000200 +#define MA_DSCAPS_SECONDARY8BIT 0x00000400 +#define MA_DSCAPS_SECONDARY16BIT 0x00000800 + +#define MA_DSBCAPS_PRIMARYBUFFER 0x00000001 +#define MA_DSBCAPS_STATIC 0x00000002 +#define MA_DSBCAPS_LOCHARDWARE 0x00000004 +#define MA_DSBCAPS_LOCSOFTWARE 0x00000008 +#define MA_DSBCAPS_CTRL3D 0x00000010 +#define MA_DSBCAPS_CTRLFREQUENCY 0x00000020 +#define MA_DSBCAPS_CTRLPAN 0x00000040 +#define MA_DSBCAPS_CTRLVOLUME 0x00000080 +#define MA_DSBCAPS_CTRLPOSITIONNOTIFY 0x00000100 +#define MA_DSBCAPS_CTRLFX 0x00000200 +#define MA_DSBCAPS_STICKYFOCUS 0x00004000 +#define MA_DSBCAPS_GLOBALFOCUS 0x00008000 +#define MA_DSBCAPS_GETCURRENTPOSITION2 0x00010000 +#define MA_DSBCAPS_MUTE3DATMAXDISTANCE 0x00020000 +#define MA_DSBCAPS_LOCDEFER 0x00040000 +#define MA_DSBCAPS_TRUEPLAYPOSITION 0x00080000 + +#define MA_DSBPLAY_LOOPING 0x00000001 +#define MA_DSBPLAY_LOCHARDWARE 0x00000002 +#define MA_DSBPLAY_LOCSOFTWARE 0x00000004 +#define MA_DSBPLAY_TERMINATEBY_TIME 0x00000008 +#define MA_DSBPLAY_TERMINATEBY_DISTANCE 0x00000010 +#define MA_DSBPLAY_TERMINATEBY_PRIORITY 0x00000020 + +#define MA_DSBSTATUS_PLAYING 0x00000001 +#define MA_DSBSTATUS_BUFFERLOST 0x00000002 +#define MA_DSBSTATUS_LOOPING 0x00000004 +#define MA_DSBSTATUS_LOCHARDWARE 0x00000008 +#define MA_DSBSTATUS_LOCSOFTWARE 0x00000010 +#define MA_DSBSTATUS_TERMINATED 0x00000020 + +#define MA_DSCBSTART_LOOPING 0x00000001 + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwBufferBytes; + DWORD dwReserved; + MA_WAVEFORMATEX* lpwfxFormat; + GUID guid3DAlgorithm; +} MA_DSBUFFERDESC; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwBufferBytes; + DWORD dwReserved; + MA_WAVEFORMATEX* lpwfxFormat; + DWORD dwFXCount; + void* lpDSCFXDesc; /* <-- miniaudio doesn't use this, so set to void*. */ +} MA_DSCBUFFERDESC; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwMinSecondarySampleRate; + DWORD dwMaxSecondarySampleRate; + DWORD dwPrimaryBuffers; + DWORD dwMaxHwMixingAllBuffers; + DWORD dwMaxHwMixingStaticBuffers; + DWORD dwMaxHwMixingStreamingBuffers; + DWORD dwFreeHwMixingAllBuffers; + DWORD dwFreeHwMixingStaticBuffers; + DWORD dwFreeHwMixingStreamingBuffers; + DWORD dwMaxHw3DAllBuffers; + DWORD dwMaxHw3DStaticBuffers; + DWORD dwMaxHw3DStreamingBuffers; + DWORD dwFreeHw3DAllBuffers; + DWORD dwFreeHw3DStaticBuffers; + DWORD dwFreeHw3DStreamingBuffers; + DWORD dwTotalHwMemBytes; + DWORD dwFreeHwMemBytes; + DWORD dwMaxContigFreeHwMemBytes; + DWORD dwUnlockTransferRateHwBuffers; + DWORD dwPlayCpuOverheadSwBuffers; + DWORD dwReserved1; + DWORD dwReserved2; +} MA_DSCAPS; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwBufferBytes; + DWORD dwUnlockTransferRate; + DWORD dwPlayCpuOverhead; +} MA_DSBCAPS; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwFormats; + DWORD dwChannels; +} MA_DSCCAPS; + +typedef struct +{ + DWORD dwSize; + DWORD dwFlags; + DWORD dwBufferBytes; + DWORD dwReserved; +} MA_DSCBCAPS; + +typedef struct +{ + DWORD dwOffset; + HANDLE hEventNotify; +} MA_DSBPOSITIONNOTIFY; + +typedef struct ma_IDirectSound ma_IDirectSound; +typedef struct ma_IDirectSoundBuffer ma_IDirectSoundBuffer; +typedef struct ma_IDirectSoundCapture ma_IDirectSoundCapture; +typedef struct ma_IDirectSoundCaptureBuffer ma_IDirectSoundCaptureBuffer; +typedef struct ma_IDirectSoundNotify ma_IDirectSoundNotify; + + +/* +COM objects. The way these work is that you have a vtable (a list of function pointers, kind of +like how C++ works internally), and then you have a structure with a single member, which is a +pointer to the vtable. The vtable is where the methods of the object are defined. Methods need +to be in a specific order, and parent classes need to have their methods declared first. +*/ + +/* IDirectSound */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSound* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSound* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSound* pThis); + + /* IDirectSound */ + HRESULT (STDMETHODCALLTYPE * CreateSoundBuffer) (ma_IDirectSound* pThis, const MA_DSBUFFERDESC* pDSBufferDesc, ma_IDirectSoundBuffer** ppDSBuffer, void* pUnkOuter); + HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSound* pThis, MA_DSCAPS* pDSCaps); + HRESULT (STDMETHODCALLTYPE * DuplicateSoundBuffer)(ma_IDirectSound* pThis, ma_IDirectSoundBuffer* pDSBufferOriginal, ma_IDirectSoundBuffer** ppDSBufferDuplicate); + HRESULT (STDMETHODCALLTYPE * SetCooperativeLevel) (ma_IDirectSound* pThis, HWND hwnd, DWORD dwLevel); + HRESULT (STDMETHODCALLTYPE * Compact) (ma_IDirectSound* pThis); + HRESULT (STDMETHODCALLTYPE * GetSpeakerConfig) (ma_IDirectSound* pThis, DWORD* pSpeakerConfig); + HRESULT (STDMETHODCALLTYPE * SetSpeakerConfig) (ma_IDirectSound* pThis, DWORD dwSpeakerConfig); + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSound* pThis, const GUID* pGuidDevice); +} ma_IDirectSoundVtbl; +struct ma_IDirectSound +{ + ma_IDirectSoundVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSound_QueryInterface(ma_IDirectSound* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSound_AddRef(ma_IDirectSound* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSound_Release(ma_IDirectSound* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSound_CreateSoundBuffer(ma_IDirectSound* pThis, const MA_DSBUFFERDESC* pDSBufferDesc, ma_IDirectSoundBuffer** ppDSBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateSoundBuffer(pThis, pDSBufferDesc, ppDSBuffer, pUnkOuter); } +static MA_INLINE HRESULT ma_IDirectSound_GetCaps(ma_IDirectSound* pThis, MA_DSCAPS* pDSCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCaps); } +static MA_INLINE HRESULT ma_IDirectSound_DuplicateSoundBuffer(ma_IDirectSound* pThis, ma_IDirectSoundBuffer* pDSBufferOriginal, ma_IDirectSoundBuffer** ppDSBufferDuplicate) { return pThis->lpVtbl->DuplicateSoundBuffer(pThis, pDSBufferOriginal, ppDSBufferDuplicate); } +static MA_INLINE HRESULT ma_IDirectSound_SetCooperativeLevel(ma_IDirectSound* pThis, HWND hwnd, DWORD dwLevel) { return pThis->lpVtbl->SetCooperativeLevel(pThis, hwnd, dwLevel); } +static MA_INLINE HRESULT ma_IDirectSound_Compact(ma_IDirectSound* pThis) { return pThis->lpVtbl->Compact(pThis); } +static MA_INLINE HRESULT ma_IDirectSound_GetSpeakerConfig(ma_IDirectSound* pThis, DWORD* pSpeakerConfig) { return pThis->lpVtbl->GetSpeakerConfig(pThis, pSpeakerConfig); } +static MA_INLINE HRESULT ma_IDirectSound_SetSpeakerConfig(ma_IDirectSound* pThis, DWORD dwSpeakerConfig) { return pThis->lpVtbl->SetSpeakerConfig(pThis, dwSpeakerConfig); } +static MA_INLINE HRESULT ma_IDirectSound_Initialize(ma_IDirectSound* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); } + + +/* IDirectSoundBuffer */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundBuffer* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundBuffer* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundBuffer* pThis); + + /* IDirectSoundBuffer */ + HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundBuffer* pThis, MA_DSBCAPS* pDSBufferCaps); + HRESULT (STDMETHODCALLTYPE * GetCurrentPosition)(ma_IDirectSoundBuffer* pThis, DWORD* pCurrentPlayCursor, DWORD* pCurrentWriteCursor); + HRESULT (STDMETHODCALLTYPE * GetFormat) (ma_IDirectSoundBuffer* pThis, MA_WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten); + HRESULT (STDMETHODCALLTYPE * GetVolume) (ma_IDirectSoundBuffer* pThis, LONG* pVolume); + HRESULT (STDMETHODCALLTYPE * GetPan) (ma_IDirectSoundBuffer* pThis, LONG* pPan); + HRESULT (STDMETHODCALLTYPE * GetFrequency) (ma_IDirectSoundBuffer* pThis, DWORD* pFrequency); + HRESULT (STDMETHODCALLTYPE * GetStatus) (ma_IDirectSoundBuffer* pThis, DWORD* pStatus); + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundBuffer* pThis, ma_IDirectSound* pDirectSound, const MA_DSBUFFERDESC* pDSBufferDesc); + HRESULT (STDMETHODCALLTYPE * Lock) (ma_IDirectSoundBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags); + HRESULT (STDMETHODCALLTYPE * Play) (ma_IDirectSoundBuffer* pThis, DWORD dwReserved1, DWORD dwPriority, DWORD dwFlags); + HRESULT (STDMETHODCALLTYPE * SetCurrentPosition)(ma_IDirectSoundBuffer* pThis, DWORD dwNewPosition); + HRESULT (STDMETHODCALLTYPE * SetFormat) (ma_IDirectSoundBuffer* pThis, const MA_WAVEFORMATEX* pFormat); + HRESULT (STDMETHODCALLTYPE * SetVolume) (ma_IDirectSoundBuffer* pThis, LONG volume); + HRESULT (STDMETHODCALLTYPE * SetPan) (ma_IDirectSoundBuffer* pThis, LONG pan); + HRESULT (STDMETHODCALLTYPE * SetFrequency) (ma_IDirectSoundBuffer* pThis, DWORD dwFrequency); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IDirectSoundBuffer* pThis); + HRESULT (STDMETHODCALLTYPE * Unlock) (ma_IDirectSoundBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2); + HRESULT (STDMETHODCALLTYPE * Restore) (ma_IDirectSoundBuffer* pThis); +} ma_IDirectSoundBufferVtbl; +struct ma_IDirectSoundBuffer +{ + ma_IDirectSoundBufferVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSoundBuffer_QueryInterface(ma_IDirectSoundBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSoundBuffer_AddRef(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSoundBuffer_Release(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetCaps(ma_IDirectSoundBuffer* pThis, MA_DSBCAPS* pDSBufferCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSBufferCaps); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD* pCurrentPlayCursor, DWORD* pCurrentWriteCursor) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCurrentPlayCursor, pCurrentWriteCursor); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetFormat(ma_IDirectSoundBuffer* pThis, MA_WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetVolume(ma_IDirectSoundBuffer* pThis, LONG* pVolume) { return pThis->lpVtbl->GetVolume(pThis, pVolume); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetPan(ma_IDirectSoundBuffer* pThis, LONG* pPan) { return pThis->lpVtbl->GetPan(pThis, pPan); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetFrequency(ma_IDirectSoundBuffer* pThis, DWORD* pFrequency) { return pThis->lpVtbl->GetFrequency(pThis, pFrequency); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_GetStatus(ma_IDirectSoundBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Initialize(ma_IDirectSoundBuffer* pThis, ma_IDirectSound* pDirectSound, const MA_DSBUFFERDESC* pDSBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSound, pDSBufferDesc); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Lock(ma_IDirectSoundBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Play(ma_IDirectSoundBuffer* pThis, DWORD dwReserved1, DWORD dwPriority, DWORD dwFlags) { return pThis->lpVtbl->Play(pThis, dwReserved1, dwPriority, dwFlags); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD dwNewPosition) { return pThis->lpVtbl->SetCurrentPosition(pThis, dwNewPosition); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetFormat(ma_IDirectSoundBuffer* pThis, const MA_WAVEFORMATEX* pFormat) { return pThis->lpVtbl->SetFormat(pThis, pFormat); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetVolume(ma_IDirectSoundBuffer* pThis, LONG volume) { return pThis->lpVtbl->SetVolume(pThis, volume); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetPan(ma_IDirectSoundBuffer* pThis, LONG pan) { return pThis->lpVtbl->SetPan(pThis, pan); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_SetFrequency(ma_IDirectSoundBuffer* pThis, DWORD dwFrequency) { return pThis->lpVtbl->SetFrequency(pThis, dwFrequency); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Stop(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Unlock(ma_IDirectSoundBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); } +static MA_INLINE HRESULT ma_IDirectSoundBuffer_Restore(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Restore(pThis); } + + +/* IDirectSoundCapture */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundCapture* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundCapture* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundCapture* pThis); + + /* IDirectSoundCapture */ + HRESULT (STDMETHODCALLTYPE * CreateCaptureBuffer)(ma_IDirectSoundCapture* pThis, const MA_DSCBUFFERDESC* pDSCBufferDesc, ma_IDirectSoundCaptureBuffer** ppDSCBuffer, void* pUnkOuter); + HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundCapture* pThis, MA_DSCCAPS* pDSCCaps); + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundCapture* pThis, const GUID* pGuidDevice); +} ma_IDirectSoundCaptureVtbl; +struct ma_IDirectSoundCapture +{ + ma_IDirectSoundCaptureVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSoundCapture_QueryInterface (ma_IDirectSoundCapture* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSoundCapture_AddRef (ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSoundCapture_Release (ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundCapture_CreateCaptureBuffer(ma_IDirectSoundCapture* pThis, const MA_DSCBUFFERDESC* pDSCBufferDesc, ma_IDirectSoundCaptureBuffer** ppDSCBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateCaptureBuffer(pThis, pDSCBufferDesc, ppDSCBuffer, pUnkOuter); } +static MA_INLINE HRESULT ma_IDirectSoundCapture_GetCaps (ma_IDirectSoundCapture* pThis, MA_DSCCAPS* pDSCCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCCaps); } +static MA_INLINE HRESULT ma_IDirectSoundCapture_Initialize (ma_IDirectSoundCapture* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); } + + +/* IDirectSoundCaptureBuffer */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundCaptureBuffer* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundCaptureBuffer* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundCaptureBuffer* pThis); + + /* IDirectSoundCaptureBuffer */ + HRESULT (STDMETHODCALLTYPE * GetCaps) (ma_IDirectSoundCaptureBuffer* pThis, MA_DSCBCAPS* pDSCBCaps); + HRESULT (STDMETHODCALLTYPE * GetCurrentPosition)(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pCapturePosition, DWORD* pReadPosition); + HRESULT (STDMETHODCALLTYPE * GetFormat) (ma_IDirectSoundCaptureBuffer* pThis, MA_WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten); + HRESULT (STDMETHODCALLTYPE * GetStatus) (ma_IDirectSoundCaptureBuffer* pThis, DWORD* pStatus); + HRESULT (STDMETHODCALLTYPE * Initialize) (ma_IDirectSoundCaptureBuffer* pThis, ma_IDirectSoundCapture* pDirectSoundCapture, const MA_DSCBUFFERDESC* pDSCBufferDesc); + HRESULT (STDMETHODCALLTYPE * Lock) (ma_IDirectSoundCaptureBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags); + HRESULT (STDMETHODCALLTYPE * Start) (ma_IDirectSoundCaptureBuffer* pThis, DWORD dwFlags); + HRESULT (STDMETHODCALLTYPE * Stop) (ma_IDirectSoundCaptureBuffer* pThis); + HRESULT (STDMETHODCALLTYPE * Unlock) (ma_IDirectSoundCaptureBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2); +} ma_IDirectSoundCaptureBufferVtbl; +struct ma_IDirectSoundCaptureBuffer +{ + ma_IDirectSoundCaptureBufferVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_QueryInterface(ma_IDirectSoundCaptureBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSoundCaptureBuffer_AddRef(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSoundCaptureBuffer_Release(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetCaps(ma_IDirectSoundCaptureBuffer* pThis, MA_DSCBCAPS* pDSCBCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCBCaps); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetCurrentPosition(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pCapturePosition, DWORD* pReadPosition) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCapturePosition, pReadPosition); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetFormat(ma_IDirectSoundCaptureBuffer* pThis, MA_WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_GetStatus(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Initialize(ma_IDirectSoundCaptureBuffer* pThis, ma_IDirectSoundCapture* pDirectSoundCapture, const MA_DSCBUFFERDESC* pDSCBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSoundCapture, pDSCBufferDesc); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Lock(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Start(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwFlags) { return pThis->lpVtbl->Start(pThis, dwFlags); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Stop(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundCaptureBuffer_Unlock(ma_IDirectSoundCaptureBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); } + + +/* IDirectSoundNotify */ +typedef struct +{ + /* IUnknown */ + HRESULT (STDMETHODCALLTYPE * QueryInterface)(ma_IDirectSoundNotify* pThis, const IID* const riid, void** ppObject); + ULONG (STDMETHODCALLTYPE * AddRef) (ma_IDirectSoundNotify* pThis); + ULONG (STDMETHODCALLTYPE * Release) (ma_IDirectSoundNotify* pThis); + + /* IDirectSoundNotify */ + HRESULT (STDMETHODCALLTYPE * SetNotificationPositions)(ma_IDirectSoundNotify* pThis, DWORD dwPositionNotifies, const MA_DSBPOSITIONNOTIFY* pPositionNotifies); +} ma_IDirectSoundNotifyVtbl; +struct ma_IDirectSoundNotify +{ + ma_IDirectSoundNotifyVtbl* lpVtbl; +}; +static MA_INLINE HRESULT ma_IDirectSoundNotify_QueryInterface(ma_IDirectSoundNotify* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static MA_INLINE ULONG ma_IDirectSoundNotify_AddRef(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static MA_INLINE ULONG ma_IDirectSoundNotify_Release(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->Release(pThis); } +static MA_INLINE HRESULT ma_IDirectSoundNotify_SetNotificationPositions(ma_IDirectSoundNotify* pThis, DWORD dwPositionNotifies, const MA_DSBPOSITIONNOTIFY* pPositionNotifies) { return pThis->lpVtbl->SetNotificationPositions(pThis, dwPositionNotifies, pPositionNotifies); } + + +typedef BOOL (CALLBACK * ma_DSEnumCallbackAProc) (GUID* pDeviceGUID, const char* pDeviceDescription, const char* pModule, void* pContext); +typedef HRESULT (WINAPI * ma_DirectSoundCreateProc) (const GUID* pcGuidDevice, ma_IDirectSound** ppDS8, ma_IUnknown* pUnkOuter); +typedef HRESULT (WINAPI * ma_DirectSoundEnumerateAProc) (ma_DSEnumCallbackAProc pDSEnumCallback, void* pContext); +typedef HRESULT (WINAPI * ma_DirectSoundCaptureCreateProc) (const GUID* pcGuidDevice, ma_IDirectSoundCapture** ppDSC8, ma_IUnknown* pUnkOuter); +typedef HRESULT (WINAPI * ma_DirectSoundCaptureEnumerateAProc)(ma_DSEnumCallbackAProc pDSEnumCallback, void* pContext); + +static ma_uint32 ma_get_best_sample_rate_within_range(ma_uint32 sampleRateMin, ma_uint32 sampleRateMax) +{ + /* Normalize the range in case we were given something stupid. */ + if (sampleRateMin < (ma_uint32)ma_standard_sample_rate_min) { + sampleRateMin = (ma_uint32)ma_standard_sample_rate_min; + } + if (sampleRateMax > (ma_uint32)ma_standard_sample_rate_max) { + sampleRateMax = (ma_uint32)ma_standard_sample_rate_max; + } + if (sampleRateMin > sampleRateMax) { + sampleRateMin = sampleRateMax; + } + + if (sampleRateMin == sampleRateMax) { + return sampleRateMax; + } else { + size_t iStandardRate; + for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) { + ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate]; + if (standardRate >= sampleRateMin && standardRate <= sampleRateMax) { + return standardRate; + } + } + } + + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return 0; +} + +/* +Retrieves the channel count and channel map for the given speaker configuration. If the speaker configuration is unknown, +the channel count and channel map will be left unmodified. +*/ +static void ma_get_channels_from_speaker_config__dsound(DWORD speakerConfig, WORD* pChannelsOut, DWORD* pChannelMapOut) +{ + WORD channels; + DWORD channelMap; + + channels = 0; + if (pChannelsOut != NULL) { + channels = *pChannelsOut; + } + + channelMap = 0; + if (pChannelMapOut != NULL) { + channelMap = *pChannelMapOut; + } + + /* + The speaker configuration is a combination of speaker config and speaker geometry. The lower 8 bits is what we care about. The upper + 16 bits is for the geometry. + */ + switch ((BYTE)(speakerConfig)) { + case 1 /*DSSPEAKER_HEADPHONE*/: channels = 2; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break; + case 2 /*DSSPEAKER_MONO*/: channels = 1; channelMap = SPEAKER_FRONT_CENTER; break; + case 3 /*DSSPEAKER_QUAD*/: channels = 4; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break; + case 4 /*DSSPEAKER_STEREO*/: channels = 2; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break; + case 5 /*DSSPEAKER_SURROUND*/: channels = 4; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_BACK_CENTER; break; + case 6 /*DSSPEAKER_5POINT1_BACK*/ /*DSSPEAKER_5POINT1*/: channels = 6; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break; + case 7 /*DSSPEAKER_7POINT1_WIDE*/ /*DSSPEAKER_7POINT1*/: channels = 8; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_FRONT_LEFT_OF_CENTER | SPEAKER_FRONT_RIGHT_OF_CENTER; break; + case 8 /*DSSPEAKER_7POINT1_SURROUND*/: channels = 8; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break; + case 9 /*DSSPEAKER_5POINT1_SURROUND*/: channels = 6; channelMap = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_SIDE_LEFT | SPEAKER_SIDE_RIGHT; break; + default: break; + } + + if (pChannelsOut != NULL) { + *pChannelsOut = channels; + } + + if (pChannelMapOut != NULL) { + *pChannelMapOut = channelMap; + } +} + + +static ma_result ma_context_create_IDirectSound__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSound** ppDirectSound) +{ + ma_IDirectSound* pDirectSound; + HWND hWnd; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppDirectSound != NULL); + + *ppDirectSound = NULL; + pDirectSound = NULL; + + if (FAILED(((ma_DirectSoundCreateProc)pContext->dsound.DirectSoundCreate)((pDeviceID == NULL) ? NULL : (const GUID*)pDeviceID->dsound, &pDirectSound, NULL))) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] DirectSoundCreate() failed for playback device."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + /* The cooperative level must be set before doing anything else. */ + hWnd = (HWND)pContext->dsound.hWnd; + if (hWnd == 0) { + hWnd = ((MA_PFN_GetForegroundWindow)pContext->win32.GetForegroundWindow)(); + if (hWnd == 0) { + hWnd = ((MA_PFN_GetDesktopWindow)pContext->win32.GetDesktopWindow)(); + } + } + + hr = ma_IDirectSound_SetCooperativeLevel(pDirectSound, hWnd, (shareMode == ma_share_mode_exclusive) ? MA_DSSCL_EXCLUSIVE : MA_DSSCL_PRIORITY); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_SetCooperateiveLevel() failed for playback device."); + return ma_result_from_HRESULT(hr); + } + + *ppDirectSound = pDirectSound; + return MA_SUCCESS; +} + +static ma_result ma_context_create_IDirectSoundCapture__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSoundCapture** ppDirectSoundCapture) +{ + ma_IDirectSoundCapture* pDirectSoundCapture; + HRESULT hr; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppDirectSoundCapture != NULL); + + /* DirectSound does not support exclusive mode for capture. */ + if (shareMode == ma_share_mode_exclusive) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + *ppDirectSoundCapture = NULL; + pDirectSoundCapture = NULL; + + hr = ((ma_DirectSoundCaptureCreateProc)pContext->dsound.DirectSoundCaptureCreate)((pDeviceID == NULL) ? NULL : (const GUID*)pDeviceID->dsound, &pDirectSoundCapture, NULL); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] DirectSoundCaptureCreate() failed for capture device."); + return ma_result_from_HRESULT(hr); + } + + *ppDirectSoundCapture = pDirectSoundCapture; + return MA_SUCCESS; +} + +static ma_result ma_context_get_format_info_for_IDirectSoundCapture__dsound(ma_context* pContext, ma_IDirectSoundCapture* pDirectSoundCapture, WORD* pChannels, WORD* pBitsPerSample, DWORD* pSampleRate) +{ + HRESULT hr; + MA_DSCCAPS caps; + WORD bitsPerSample; + DWORD sampleRate; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDirectSoundCapture != NULL); + + if (pChannels) { + *pChannels = 0; + } + if (pBitsPerSample) { + *pBitsPerSample = 0; + } + if (pSampleRate) { + *pSampleRate = 0; + } + + MA_ZERO_OBJECT(&caps); + caps.dwSize = sizeof(caps); + hr = ma_IDirectSoundCapture_GetCaps(pDirectSoundCapture, &caps); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCapture_GetCaps() failed for capture device."); + return ma_result_from_HRESULT(hr); + } + + if (pChannels) { + *pChannels = (WORD)caps.dwChannels; + } + + /* The device can support multiple formats. We just go through the different formats in order of priority and pick the first one. This the same type of system as the WinMM backend. */ + bitsPerSample = 16; + sampleRate = 48000; + + if (caps.dwChannels == 1) { + if ((caps.dwFormats & WAVE_FORMAT_48M16) != 0) { + sampleRate = 48000; + } else if ((caps.dwFormats & WAVE_FORMAT_44M16) != 0) { + sampleRate = 44100; + } else if ((caps.dwFormats & WAVE_FORMAT_2M16) != 0) { + sampleRate = 22050; + } else if ((caps.dwFormats & WAVE_FORMAT_1M16) != 0) { + sampleRate = 11025; + } else if ((caps.dwFormats & WAVE_FORMAT_96M16) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 8; + if ((caps.dwFormats & WAVE_FORMAT_48M08) != 0) { + sampleRate = 48000; + } else if ((caps.dwFormats & WAVE_FORMAT_44M08) != 0) { + sampleRate = 44100; + } else if ((caps.dwFormats & WAVE_FORMAT_2M08) != 0) { + sampleRate = 22050; + } else if ((caps.dwFormats & WAVE_FORMAT_1M08) != 0) { + sampleRate = 11025; + } else if ((caps.dwFormats & WAVE_FORMAT_96M08) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 16; /* Didn't find it. Just fall back to 16-bit. */ + } + } + } else if (caps.dwChannels == 2) { + if ((caps.dwFormats & WAVE_FORMAT_48S16) != 0) { + sampleRate = 48000; + } else if ((caps.dwFormats & WAVE_FORMAT_44S16) != 0) { + sampleRate = 44100; + } else if ((caps.dwFormats & WAVE_FORMAT_2S16) != 0) { + sampleRate = 22050; + } else if ((caps.dwFormats & WAVE_FORMAT_1S16) != 0) { + sampleRate = 11025; + } else if ((caps.dwFormats & WAVE_FORMAT_96S16) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 8; + if ((caps.dwFormats & WAVE_FORMAT_48S08) != 0) { + sampleRate = 48000; + } else if ((caps.dwFormats & WAVE_FORMAT_44S08) != 0) { + sampleRate = 44100; + } else if ((caps.dwFormats & WAVE_FORMAT_2S08) != 0) { + sampleRate = 22050; + } else if ((caps.dwFormats & WAVE_FORMAT_1S08) != 0) { + sampleRate = 11025; + } else if ((caps.dwFormats & WAVE_FORMAT_96S08) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 16; /* Didn't find it. Just fall back to 16-bit. */ + } + } + } + + if (pBitsPerSample) { + *pBitsPerSample = bitsPerSample; + } + if (pSampleRate) { + *pSampleRate = sampleRate; + } + + return MA_SUCCESS; +} + + +typedef struct +{ + ma_context* pContext; + ma_device_type deviceType; + ma_enum_devices_callback_proc callback; + void* pUserData; + ma_bool32 terminated; +} ma_context_enumerate_devices_callback_data__dsound; + +static BOOL CALLBACK ma_context_enumerate_devices_callback__dsound(GUID* lpGuid, const char* lpcstrDescription, const char* lpcstrModule, void* lpContext) +{ + ma_context_enumerate_devices_callback_data__dsound* pData = (ma_context_enumerate_devices_callback_data__dsound*)lpContext; + ma_device_info deviceInfo; + + (void)lpcstrModule; + + MA_ZERO_OBJECT(&deviceInfo); + + /* ID. */ + if (lpGuid != NULL) { + MA_COPY_MEMORY(deviceInfo.id.dsound, lpGuid, 16); + } else { + MA_ZERO_MEMORY(deviceInfo.id.dsound, 16); + deviceInfo.isDefault = MA_TRUE; + } + + /* Name / Description */ + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), lpcstrDescription, (size_t)-1); + + + /* Call the callback function, but make sure we stop enumerating if the callee requested so. */ + MA_ASSERT(pData != NULL); + pData->terminated = (pData->callback(pData->pContext, pData->deviceType, &deviceInfo, pData->pUserData) == MA_FALSE); + if (pData->terminated) { + return FALSE; /* Stop enumeration. */ + } else { + return TRUE; /* Continue enumeration. */ + } +} + +static ma_result ma_context_enumerate_devices__dsound(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_context_enumerate_devices_callback_data__dsound data; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + data.pContext = pContext; + data.callback = callback; + data.pUserData = pUserData; + data.terminated = MA_FALSE; + + /* Playback. */ + if (!data.terminated) { + data.deviceType = ma_device_type_playback; + ((ma_DirectSoundEnumerateAProc)pContext->dsound.DirectSoundEnumerateA)(ma_context_enumerate_devices_callback__dsound, &data); + } + + /* Capture. */ + if (!data.terminated) { + data.deviceType = ma_device_type_capture; + ((ma_DirectSoundCaptureEnumerateAProc)pContext->dsound.DirectSoundCaptureEnumerateA)(ma_context_enumerate_devices_callback__dsound, &data); + } + + return MA_SUCCESS; +} + + +typedef struct +{ + const ma_device_id* pDeviceID; + ma_device_info* pDeviceInfo; + ma_bool32 found; +} ma_context_get_device_info_callback_data__dsound; + +static BOOL CALLBACK ma_context_get_device_info_callback__dsound(GUID* lpGuid, const char* lpcstrDescription, const char* lpcstrModule, void* lpContext) +{ + ma_context_get_device_info_callback_data__dsound* pData = (ma_context_get_device_info_callback_data__dsound*)lpContext; + MA_ASSERT(pData != NULL); + + if ((pData->pDeviceID == NULL || ma_is_guid_null(pData->pDeviceID->dsound)) && (lpGuid == NULL || ma_is_guid_null(lpGuid))) { + /* Default device. */ + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), lpcstrDescription, (size_t)-1); + pData->pDeviceInfo->isDefault = MA_TRUE; + pData->found = MA_TRUE; + return FALSE; /* Stop enumeration. */ + } else { + /* Not the default device. */ + if (lpGuid != NULL && pData->pDeviceID != NULL) { + if (memcmp(pData->pDeviceID->dsound, lpGuid, sizeof(pData->pDeviceID->dsound)) == 0) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), lpcstrDescription, (size_t)-1); + pData->found = MA_TRUE; + return FALSE; /* Stop enumeration. */ + } + } + } + + (void)lpcstrModule; + return TRUE; +} + +static ma_result ma_context_get_device_info__dsound(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_result result; + HRESULT hr; + + if (pDeviceID != NULL) { + ma_context_get_device_info_callback_data__dsound data; + + /* ID. */ + MA_COPY_MEMORY(pDeviceInfo->id.dsound, pDeviceID->dsound, 16); + + /* Name / Description. This is retrieved by enumerating over each device until we find that one that matches the input ID. */ + data.pDeviceID = pDeviceID; + data.pDeviceInfo = pDeviceInfo; + data.found = MA_FALSE; + if (deviceType == ma_device_type_playback) { + ((ma_DirectSoundEnumerateAProc)pContext->dsound.DirectSoundEnumerateA)(ma_context_get_device_info_callback__dsound, &data); + } else { + ((ma_DirectSoundCaptureEnumerateAProc)pContext->dsound.DirectSoundCaptureEnumerateA)(ma_context_get_device_info_callback__dsound, &data); + } + + if (!data.found) { + return MA_NO_DEVICE; + } + } else { + /* I don't think there's a way to get the name of the default device with DirectSound. In this case we just need to use defaults. */ + + /* ID */ + MA_ZERO_MEMORY(pDeviceInfo->id.dsound, 16); + + /* Name / Description */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + pDeviceInfo->isDefault = MA_TRUE; + } + + /* Retrieving detailed information is slightly different depending on the device type. */ + if (deviceType == ma_device_type_playback) { + /* Playback. */ + ma_IDirectSound* pDirectSound; + MA_DSCAPS caps; + WORD channels; + + result = ma_context_create_IDirectSound__dsound(pContext, ma_share_mode_shared, pDeviceID, &pDirectSound); + if (result != MA_SUCCESS) { + return result; + } + + MA_ZERO_OBJECT(&caps); + caps.dwSize = sizeof(caps); + hr = ma_IDirectSound_GetCaps(pDirectSound, &caps); + if (FAILED(hr)) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_GetCaps() failed for playback device."); + return ma_result_from_HRESULT(hr); + } + + + /* Channels. Only a single channel count is reported for DirectSound. */ + if ((caps.dwFlags & MA_DSCAPS_PRIMARYSTEREO) != 0) { + /* It supports at least stereo, but could support more. */ + DWORD speakerConfig; + + channels = 2; + + /* Look at the speaker configuration to get a better idea on the channel count. */ + hr = ma_IDirectSound_GetSpeakerConfig(pDirectSound, &speakerConfig); + if (SUCCEEDED(hr)) { + ma_get_channels_from_speaker_config__dsound(speakerConfig, &channels, NULL); + } + } else { + /* It does not support stereo, which means we are stuck with mono. */ + channels = 1; + } + + + /* + In DirectSound, our native formats are centered around sample rates. All formats are supported, and we're only reporting a single channel + count. However, DirectSound can report a range of supported sample rates. We're only going to include standard rates known by miniaudio + in order to keep the size of this within reason. + */ + if ((caps.dwFlags & MA_DSCAPS_CONTINUOUSRATE) != 0) { + /* Multiple sample rates are supported. We'll report in order of our preferred sample rates. */ + size_t iStandardSampleRate; + for (iStandardSampleRate = 0; iStandardSampleRate < ma_countof(g_maStandardSampleRatePriorities); iStandardSampleRate += 1) { + ma_uint32 sampleRate = g_maStandardSampleRatePriorities[iStandardSampleRate]; + if (sampleRate >= caps.dwMinSecondarySampleRate && sampleRate <= caps.dwMaxSecondarySampleRate) { + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = ma_format_unknown; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = sampleRate; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = 0; + pDeviceInfo->nativeDataFormatCount += 1; + } + } + } else { + /* Only a single sample rate is supported. */ + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = ma_format_unknown; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = caps.dwMaxSecondarySampleRate; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = 0; + pDeviceInfo->nativeDataFormatCount += 1; + } + + ma_IDirectSound_Release(pDirectSound); + } else { + /* + Capture. This is a little different to playback due to the say the supported formats are reported. Technically capture + devices can support a number of different formats, but for simplicity and consistency with ma_device_init() I'm just + reporting the best format. + */ + ma_IDirectSoundCapture* pDirectSoundCapture; + WORD channels; + WORD bitsPerSample; + DWORD sampleRate; + + result = ma_context_create_IDirectSoundCapture__dsound(pContext, ma_share_mode_shared, pDeviceID, &pDirectSoundCapture); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_context_get_format_info_for_IDirectSoundCapture__dsound(pContext, pDirectSoundCapture, &channels, &bitsPerSample, &sampleRate); + if (result != MA_SUCCESS) { + ma_IDirectSoundCapture_Release(pDirectSoundCapture); + return result; + } + + ma_IDirectSoundCapture_Release(pDirectSoundCapture); + + /* The format is always an integer format and is based on the bits per sample. */ + if (bitsPerSample == 8) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_u8; + } else if (bitsPerSample == 16) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_s16; + } else if (bitsPerSample == 24) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_s24; + } else if (bitsPerSample == 32) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_s32; + } else { + return MA_FORMAT_NOT_SUPPORTED; + } + + pDeviceInfo->nativeDataFormats[0].channels = channels; + pDeviceInfo->nativeDataFormats[0].sampleRate = sampleRate; + pDeviceInfo->nativeDataFormats[0].flags = 0; + pDeviceInfo->nativeDataFormatCount = 1; + } + + return MA_SUCCESS; +} + + + +static ma_result ma_device_uninit__dsound(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->dsound.pCaptureBuffer != NULL) { + ma_IDirectSoundCaptureBuffer_Release((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + } + if (pDevice->dsound.pCapture != NULL) { + ma_IDirectSoundCapture_Release((ma_IDirectSoundCapture*)pDevice->dsound.pCapture); + } + + if (pDevice->dsound.pPlaybackBuffer != NULL) { + ma_IDirectSoundBuffer_Release((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer); + } + if (pDevice->dsound.pPlaybackPrimaryBuffer != NULL) { + ma_IDirectSoundBuffer_Release((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer); + } + if (pDevice->dsound.pPlayback != NULL) { + ma_IDirectSound_Release((ma_IDirectSound*)pDevice->dsound.pPlayback); + } + + return MA_SUCCESS; +} + +static ma_result ma_config_to_WAVEFORMATEXTENSIBLE(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* pChannelMap, MA_WAVEFORMATEXTENSIBLE* pWF) +{ + GUID subformat; + + if (format == ma_format_unknown) { + format = MA_DEFAULT_FORMAT; + } + + if (channels == 0) { + channels = MA_DEFAULT_CHANNELS; + } + + if (sampleRate == 0) { + sampleRate = MA_DEFAULT_SAMPLE_RATE; + } + + switch (format) + { + case ma_format_u8: + case ma_format_s16: + case ma_format_s24: + /*case ma_format_s24_32:*/ + case ma_format_s32: + { + subformat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM; + } break; + + case ma_format_f32: + { + subformat = MA_GUID_KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; + } break; + + default: + return MA_FORMAT_NOT_SUPPORTED; + } + + MA_ZERO_OBJECT(pWF); + pWF->cbSize = sizeof(*pWF); + pWF->wFormatTag = WAVE_FORMAT_EXTENSIBLE; + pWF->nChannels = (WORD)channels; + pWF->nSamplesPerSec = (DWORD)sampleRate; + pWF->wBitsPerSample = (WORD)(ma_get_bytes_per_sample(format)*8); + pWF->nBlockAlign = (WORD)(pWF->nChannels * pWF->wBitsPerSample / 8); + pWF->nAvgBytesPerSec = pWF->nBlockAlign * pWF->nSamplesPerSec; + pWF->Samples.wValidBitsPerSample = pWF->wBitsPerSample; + pWF->dwChannelMask = ma_channel_map_to_channel_mask__win32(pChannelMap, channels); + pWF->SubFormat = subformat; + + return MA_SUCCESS; +} + +static ma_uint32 ma_calculate_period_size_in_frames_from_descriptor__dsound(const ma_device_descriptor* pDescriptor, ma_uint32 nativeSampleRate, ma_performance_profile performanceProfile) +{ + /* + DirectSound has a minimum period size of 20ms. In practice, this doesn't seem to be enough for + reliable glitch-free processing so going to use 30ms instead. + */ + ma_uint32 minPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(30, nativeSampleRate); + ma_uint32 periodSizeInFrames; + + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, nativeSampleRate, performanceProfile); + if (periodSizeInFrames < minPeriodSizeInFrames) { + periodSizeInFrames = minPeriodSizeInFrames; + } + + return periodSizeInFrames; +} + +static ma_result ma_device_init__dsound(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + ma_result result; + HRESULT hr; + + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->dsound); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* + Unfortunately DirectSound uses different APIs and data structures for playback and capture devices. We need to initialize + the capture device first because we'll want to match its buffer size and period count on the playback side if we're using + full-duplex mode. + */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + MA_WAVEFORMATEXTENSIBLE wf; + MA_DSCBUFFERDESC descDS; + ma_uint32 periodSizeInFrames; + ma_uint32 periodCount; + char rawdata[1024]; /* <-- Ugly hack to avoid a malloc() due to a crappy DirectSound API. */ + MA_WAVEFORMATEXTENSIBLE* pActualFormat; + + result = ma_config_to_WAVEFORMATEXTENSIBLE(pDescriptorCapture->format, pDescriptorCapture->channels, pDescriptorCapture->sampleRate, pDescriptorCapture->channelMap, &wf); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_context_create_IDirectSoundCapture__dsound(pDevice->pContext, pDescriptorCapture->shareMode, pDescriptorCapture->pDeviceID, (ma_IDirectSoundCapture**)&pDevice->dsound.pCapture); + if (result != MA_SUCCESS) { + ma_device_uninit__dsound(pDevice); + return result; + } + + result = ma_context_get_format_info_for_IDirectSoundCapture__dsound(pDevice->pContext, (ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &wf.nChannels, &wf.wBitsPerSample, &wf.nSamplesPerSec); + if (result != MA_SUCCESS) { + ma_device_uninit__dsound(pDevice); + return result; + } + + wf.nBlockAlign = (WORD)(wf.nChannels * wf.wBitsPerSample / 8); + wf.nAvgBytesPerSec = wf.nBlockAlign * wf.nSamplesPerSec; + wf.Samples.wValidBitsPerSample = wf.wBitsPerSample; + wf.SubFormat = MA_GUID_KSDATAFORMAT_SUBTYPE_PCM; + + /* The size of the buffer must be a clean multiple of the period count. */ + periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__dsound(pDescriptorCapture, wf.nSamplesPerSec, pConfig->performanceProfile); + periodCount = (pDescriptorCapture->periodCount > 0) ? pDescriptorCapture->periodCount : MA_DEFAULT_PERIODS; + + MA_ZERO_OBJECT(&descDS); + descDS.dwSize = sizeof(descDS); + descDS.dwFlags = 0; + descDS.dwBufferBytes = periodSizeInFrames * periodCount * wf.nBlockAlign; + descDS.lpwfxFormat = (MA_WAVEFORMATEX*)&wf; + hr = ma_IDirectSoundCapture_CreateCaptureBuffer((ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &descDS, (ma_IDirectSoundCaptureBuffer**)&pDevice->dsound.pCaptureBuffer, NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCapture_CreateCaptureBuffer() failed for capture device."); + return ma_result_from_HRESULT(hr); + } + + /* Get the _actual_ properties of the buffer. */ + pActualFormat = (MA_WAVEFORMATEXTENSIBLE*)rawdata; + hr = ma_IDirectSoundCaptureBuffer_GetFormat((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, (MA_WAVEFORMATEX*)pActualFormat, sizeof(rawdata), NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to retrieve the actual format of the capture device's buffer."); + return ma_result_from_HRESULT(hr); + } + + /* We can now start setting the output data formats. */ + pDescriptorCapture->format = ma_format_from_WAVEFORMATEX((MA_WAVEFORMATEX*)pActualFormat); + pDescriptorCapture->channels = pActualFormat->nChannels; + pDescriptorCapture->sampleRate = pActualFormat->nSamplesPerSec; + + /* Get the native channel map based on the channel mask. */ + if (pActualFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { + ma_channel_mask_to_channel_map__win32(pActualFormat->dwChannelMask, pDescriptorCapture->channels, pDescriptorCapture->channelMap); + } else { + ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pDescriptorCapture->channels, pDescriptorCapture->channelMap); + } + + /* + After getting the actual format the size of the buffer in frames may have actually changed. However, we want this to be as close to what the + user has asked for as possible, so let's go ahead and release the old capture buffer and create a new one in this case. + */ + if (periodSizeInFrames != (descDS.dwBufferBytes / ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels) / periodCount)) { + descDS.dwBufferBytes = periodSizeInFrames * ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels) * periodCount; + ma_IDirectSoundCaptureBuffer_Release((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + + hr = ma_IDirectSoundCapture_CreateCaptureBuffer((ma_IDirectSoundCapture*)pDevice->dsound.pCapture, &descDS, (ma_IDirectSoundCaptureBuffer**)&pDevice->dsound.pCaptureBuffer, NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Second attempt at IDirectSoundCapture_CreateCaptureBuffer() failed for capture device."); + return ma_result_from_HRESULT(hr); + } + } + + /* DirectSound should give us a buffer exactly the size we asked for. */ + pDescriptorCapture->periodSizeInFrames = periodSizeInFrames; + pDescriptorCapture->periodCount = periodCount; + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + MA_WAVEFORMATEXTENSIBLE wf; + MA_DSBUFFERDESC descDSPrimary; + MA_DSCAPS caps; + char rawdata[1024]; /* <-- Ugly hack to avoid a malloc() due to a crappy DirectSound API. */ + MA_WAVEFORMATEXTENSIBLE* pActualFormat; + ma_uint32 periodSizeInFrames; + ma_uint32 periodCount; + MA_DSBUFFERDESC descDS; + WORD nativeChannelCount; + DWORD nativeChannelMask = 0; + + result = ma_config_to_WAVEFORMATEXTENSIBLE(pDescriptorPlayback->format, pDescriptorPlayback->channels, pDescriptorPlayback->sampleRate, pDescriptorPlayback->channelMap, &wf); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_context_create_IDirectSound__dsound(pDevice->pContext, pDescriptorPlayback->shareMode, pDescriptorPlayback->pDeviceID, (ma_IDirectSound**)&pDevice->dsound.pPlayback); + if (result != MA_SUCCESS) { + ma_device_uninit__dsound(pDevice); + return result; + } + + MA_ZERO_OBJECT(&descDSPrimary); + descDSPrimary.dwSize = sizeof(MA_DSBUFFERDESC); + descDSPrimary.dwFlags = MA_DSBCAPS_PRIMARYBUFFER | MA_DSBCAPS_CTRLVOLUME; + hr = ma_IDirectSound_CreateSoundBuffer((ma_IDirectSound*)pDevice->dsound.pPlayback, &descDSPrimary, (ma_IDirectSoundBuffer**)&pDevice->dsound.pPlaybackPrimaryBuffer, NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_CreateSoundBuffer() failed for playback device's primary buffer."); + return ma_result_from_HRESULT(hr); + } + + + /* We may want to make some adjustments to the format if we are using defaults. */ + MA_ZERO_OBJECT(&caps); + caps.dwSize = sizeof(caps); + hr = ma_IDirectSound_GetCaps((ma_IDirectSound*)pDevice->dsound.pPlayback, &caps); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_GetCaps() failed for playback device."); + return ma_result_from_HRESULT(hr); + } + + if ((caps.dwFlags & MA_DSCAPS_PRIMARYSTEREO) != 0) { + DWORD speakerConfig; + + /* It supports at least stereo, but could support more. */ + nativeChannelCount = 2; + + /* Look at the speaker configuration to get a better idea on the channel count. */ + if (SUCCEEDED(ma_IDirectSound_GetSpeakerConfig((ma_IDirectSound*)pDevice->dsound.pPlayback, &speakerConfig))) { + ma_get_channels_from_speaker_config__dsound(speakerConfig, &nativeChannelCount, &nativeChannelMask); + } + } else { + /* It does not support stereo, which means we are stuck with mono. */ + nativeChannelCount = 1; + nativeChannelMask = 0x00000001; + } + + if (pDescriptorPlayback->channels == 0) { + wf.nChannels = nativeChannelCount; + wf.dwChannelMask = nativeChannelMask; + } + + if (pDescriptorPlayback->sampleRate == 0) { + /* We base the sample rate on the values returned by GetCaps(). */ + if ((caps.dwFlags & MA_DSCAPS_CONTINUOUSRATE) != 0) { + wf.nSamplesPerSec = ma_get_best_sample_rate_within_range(caps.dwMinSecondarySampleRate, caps.dwMaxSecondarySampleRate); + } else { + wf.nSamplesPerSec = caps.dwMaxSecondarySampleRate; + } + } + + wf.nBlockAlign = (WORD)(wf.nChannels * wf.wBitsPerSample / 8); + wf.nAvgBytesPerSec = wf.nBlockAlign * wf.nSamplesPerSec; + + /* + From MSDN: + + The method succeeds even if the hardware does not support the requested format; DirectSound sets the buffer to the closest + supported format. To determine whether this has happened, an application can call the GetFormat method for the primary buffer + and compare the result with the format that was requested with the SetFormat method. + */ + hr = ma_IDirectSoundBuffer_SetFormat((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer, (MA_WAVEFORMATEX*)&wf); + if (FAILED(hr)) { + /* + If setting of the format failed we'll try again with some fallback settings. On Windows 98 I have + observed that IEEE_FLOAT does not work. We'll therefore enforce PCM. I also had issues where a + sample rate of 48000 did not work correctly. Not sure if it was a driver issue or not, but will + use 44100 for the sample rate. + */ + wf.cbSize = 18; /* NOTE: Don't use sizeof(MA_WAVEFORMATEX) here because it's got an extra 2 bytes due to padding. */ + wf.wFormatTag = WAVE_FORMAT_PCM; + wf.wBitsPerSample = 16; + wf.nChannels = nativeChannelCount; + wf.nSamplesPerSec = 44100; + wf.nBlockAlign = wf.nChannels * (wf.wBitsPerSample / 8); + wf.nAvgBytesPerSec = wf.nSamplesPerSec * wf.nBlockAlign; + + hr = ma_IDirectSoundBuffer_SetFormat((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer, (MA_WAVEFORMATEX*)&wf); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to set format of playback device's primary buffer."); + return ma_result_from_HRESULT(hr); + } + } + + /* Get the _actual_ properties of the buffer. */ + pActualFormat = (MA_WAVEFORMATEXTENSIBLE*)rawdata; + hr = ma_IDirectSoundBuffer_GetFormat((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackPrimaryBuffer, (MA_WAVEFORMATEX*)pActualFormat, sizeof(rawdata), NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to retrieve the actual format of the playback device's primary buffer."); + return ma_result_from_HRESULT(hr); + } + + /* We now have enough information to start setting some output properties. */ + pDescriptorPlayback->format = ma_format_from_WAVEFORMATEX((MA_WAVEFORMATEX*)pActualFormat); + pDescriptorPlayback->channels = pActualFormat->nChannels; + pDescriptorPlayback->sampleRate = pActualFormat->nSamplesPerSec; + + /* Get the internal channel map based on the channel mask. */ + if (pActualFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { + ma_channel_mask_to_channel_map__win32(pActualFormat->dwChannelMask, pDescriptorPlayback->channels, pDescriptorPlayback->channelMap); + } else { + ma_channel_mask_to_channel_map__win32(wf.dwChannelMask, pDescriptorPlayback->channels, pDescriptorPlayback->channelMap); + } + + /* The size of the buffer must be a clean multiple of the period count. */ + periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__dsound(pDescriptorPlayback, pDescriptorPlayback->sampleRate, pConfig->performanceProfile); + periodCount = (pDescriptorPlayback->periodCount > 0) ? pDescriptorPlayback->periodCount : MA_DEFAULT_PERIODS; + + /* + Meaning of dwFlags (from MSDN): + + DSBCAPS_CTRLPOSITIONNOTIFY + The buffer has position notification capability. + + DSBCAPS_GLOBALFOCUS + With this flag set, an application using DirectSound can continue to play its buffers if the user switches focus to + another application, even if the new application uses DirectSound. + + DSBCAPS_GETCURRENTPOSITION2 + In the first version of DirectSound, the play cursor was significantly ahead of the actual playing sound on emulated + sound cards; it was directly behind the write cursor. Now, if the DSBCAPS_GETCURRENTPOSITION2 flag is specified, the + application can get a more accurate play cursor. + */ + MA_ZERO_OBJECT(&descDS); + descDS.dwSize = sizeof(descDS); + descDS.dwFlags = MA_DSBCAPS_CTRLPOSITIONNOTIFY | MA_DSBCAPS_GLOBALFOCUS | MA_DSBCAPS_GETCURRENTPOSITION2; + descDS.dwBufferBytes = periodSizeInFrames * periodCount * ma_get_bytes_per_frame(pDescriptorPlayback->format, pDescriptorPlayback->channels); + descDS.lpwfxFormat = (MA_WAVEFORMATEX*)pActualFormat; + hr = ma_IDirectSound_CreateSoundBuffer((ma_IDirectSound*)pDevice->dsound.pPlayback, &descDS, (ma_IDirectSoundBuffer**)&pDevice->dsound.pPlaybackBuffer, NULL); + if (FAILED(hr)) { + ma_device_uninit__dsound(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_CreateSoundBuffer() failed for playback device's secondary buffer."); + return ma_result_from_HRESULT(hr); + } + + /* DirectSound should give us a buffer exactly the size we asked for. */ + pDescriptorPlayback->periodSizeInFrames = periodSizeInFrames; + pDescriptorPlayback->periodCount = periodCount; + } + + return MA_SUCCESS; +} + + +static ma_result ma_device_data_loop__dsound(ma_device* pDevice) +{ + ma_result result = MA_SUCCESS; + ma_uint32 bpfDeviceCapture = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 bpfDevicePlayback = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + HRESULT hr; + DWORD lockOffsetInBytesCapture; + DWORD lockSizeInBytesCapture; + DWORD mappedSizeInBytesCapture; + DWORD mappedDeviceFramesProcessedCapture; + void* pMappedDeviceBufferCapture; + DWORD lockOffsetInBytesPlayback; + DWORD lockSizeInBytesPlayback; + DWORD mappedSizeInBytesPlayback; + void* pMappedDeviceBufferPlayback; + DWORD prevReadCursorInBytesCapture = 0; + DWORD prevPlayCursorInBytesPlayback = 0; + ma_bool32 physicalPlayCursorLoopFlagPlayback = 0; + DWORD virtualWriteCursorInBytesPlayback = 0; + ma_bool32 virtualWriteCursorLoopFlagPlayback = 0; + ma_bool32 isPlaybackDeviceStarted = MA_FALSE; + ma_uint32 framesWrittenToPlaybackDevice = 0; /* For knowing whether or not the playback device needs to be started. */ + ma_uint32 waitTimeInMilliseconds = 1; + DWORD playbackBufferStatus = 0; + + MA_ASSERT(pDevice != NULL); + + /* The first thing to do is start the capture device. The playback device is only started after the first period is written. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + hr = ma_IDirectSoundCaptureBuffer_Start((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, MA_DSCBSTART_LOOPING); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCaptureBuffer_Start() failed."); + return ma_result_from_HRESULT(hr); + } + } + + while (ma_device_get_state(pDevice) == ma_device_state_started) { + switch (pDevice->type) + { + case ma_device_type_duplex: + { + DWORD physicalCaptureCursorInBytes; + DWORD physicalReadCursorInBytes; + hr = ma_IDirectSoundCaptureBuffer_GetCurrentPosition((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, &physicalCaptureCursorInBytes, &physicalReadCursorInBytes); + if (FAILED(hr)) { + return ma_result_from_HRESULT(hr); + } + + /* If nothing is available we just sleep for a bit and return from this iteration. */ + if (physicalReadCursorInBytes == prevReadCursorInBytesCapture) { + ma_sleep(waitTimeInMilliseconds); + continue; /* Nothing is available in the capture buffer. */ + } + + /* + The current position has moved. We need to map all of the captured samples and write them to the playback device, making sure + we don't return until every frame has been copied over. + */ + if (prevReadCursorInBytesCapture < physicalReadCursorInBytes) { + /* The capture position has not looped. This is the simple case. */ + lockOffsetInBytesCapture = prevReadCursorInBytesCapture; + lockSizeInBytesCapture = (physicalReadCursorInBytes - prevReadCursorInBytesCapture); + } else { + /* + The capture position has looped. This is the more complex case. Map to the end of the buffer. If this does not return anything, + do it again from the start. + */ + if (prevReadCursorInBytesCapture < pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) { + /* Lock up to the end of the buffer. */ + lockOffsetInBytesCapture = prevReadCursorInBytesCapture; + lockSizeInBytesCapture = (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) - prevReadCursorInBytesCapture; + } else { + /* Lock starting from the start of the buffer. */ + lockOffsetInBytesCapture = 0; + lockSizeInBytesCapture = physicalReadCursorInBytes; + } + } + + if (lockSizeInBytesCapture == 0) { + ma_sleep(waitTimeInMilliseconds); + continue; /* Nothing is available in the capture buffer. */ + } + + hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedDeviceBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device."); + return ma_result_from_HRESULT(hr); + } + + + /* At this point we have some input data that we need to output. We do not return until every mapped frame of the input data is written to the playback device. */ + mappedDeviceFramesProcessedCapture = 0; + + for (;;) { /* Keep writing to the playback device. */ + ma_uint8 inputFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 inputFramesInClientFormatCap = sizeof(inputFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint8 outputFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 outputFramesInClientFormatCap = sizeof(outputFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint32 outputFramesInClientFormatCount; + ma_uint32 outputFramesInClientFormatConsumed = 0; + ma_uint64 clientCapturedFramesToProcess = ma_min(inputFramesInClientFormatCap, outputFramesInClientFormatCap); + ma_uint64 deviceCapturedFramesToProcess = (mappedSizeInBytesCapture / bpfDeviceCapture) - mappedDeviceFramesProcessedCapture; + void* pRunningMappedDeviceBufferCapture = ma_offset_ptr(pMappedDeviceBufferCapture, mappedDeviceFramesProcessedCapture * bpfDeviceCapture); + + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningMappedDeviceBufferCapture, &deviceCapturedFramesToProcess, inputFramesInClientFormat, &clientCapturedFramesToProcess); + if (result != MA_SUCCESS) { + break; + } + + outputFramesInClientFormatCount = (ma_uint32)clientCapturedFramesToProcess; + mappedDeviceFramesProcessedCapture += (ma_uint32)deviceCapturedFramesToProcess; + + ma_device__handle_data_callback(pDevice, outputFramesInClientFormat, inputFramesInClientFormat, (ma_uint32)clientCapturedFramesToProcess); + + /* At this point we have input and output data in client format. All we need to do now is convert it to the output device format. This may take a few passes. */ + for (;;) { + ma_uint32 framesWrittenThisIteration; + DWORD physicalPlayCursorInBytes; + DWORD physicalWriteCursorInBytes; + DWORD availableBytesPlayback; + DWORD silentPaddingInBytes = 0; /* <-- Must be initialized to 0. */ + + /* We need the physical play and write cursors. */ + if (FAILED(ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes))) { + break; + } + + if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) { + physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback; + } + prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes; + + /* If there's any bytes available for writing we can do that now. The space between the virtual cursor position and play cursor. */ + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */ + } else { + /* This is an error. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Duplex/Playback): Play cursor has moved in front of the write cursor (same loop iteration). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback); + availableBytesPlayback = 0; + } + } else { + /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } else { + /* This is an error. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Duplex/Playback): Write cursor has moved behind the play cursor (different loop iterations). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback); + availableBytesPlayback = 0; + } + } + + /* If there's no room available for writing we need to wait for more. */ + if (availableBytesPlayback == 0) { + /* If we haven't started the device yet, this will never get beyond 0. In this case we need to get the device started. */ + if (!isPlaybackDeviceStarted) { + hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING); + if (FAILED(hr)) { + ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed."); + return ma_result_from_HRESULT(hr); + } + isPlaybackDeviceStarted = MA_TRUE; + } else { + ma_sleep(waitTimeInMilliseconds); + continue; + } + } + + + /* Getting here means there room available somewhere. We limit this to either the end of the buffer or the physical play cursor, whichever is closest. */ + lockOffsetInBytesPlayback = virtualWriteCursorInBytesPlayback; + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. Go up to the end of the buffer. */ + lockSizeInBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + } else { + /* Different loop iterations. Go up to the physical play cursor. */ + lockSizeInBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } + + hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedDeviceBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from playback device in preparation for writing to the device."); + result = ma_result_from_HRESULT(hr); + break; + } + + /* + Experiment: If the playback buffer is being starved, pad it with some silence to get it back in sync. This will cause a glitch, but it may prevent + endless glitching due to it constantly running out of data. + */ + if (isPlaybackDeviceStarted) { + DWORD bytesQueuedForPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - availableBytesPlayback; + if (bytesQueuedForPlayback < (pDevice->playback.internalPeriodSizeInFrames*bpfDevicePlayback)) { + silentPaddingInBytes = (pDevice->playback.internalPeriodSizeInFrames*2*bpfDevicePlayback) - bytesQueuedForPlayback; + if (silentPaddingInBytes > lockSizeInBytesPlayback) { + silentPaddingInBytes = lockSizeInBytesPlayback; + } + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Duplex/Playback) Playback buffer starved. availableBytesPlayback=%ld, silentPaddingInBytes=%ld\n", availableBytesPlayback, silentPaddingInBytes); + } + } + + /* At this point we have a buffer for output. */ + if (silentPaddingInBytes > 0) { + MA_ZERO_MEMORY(pMappedDeviceBufferPlayback, silentPaddingInBytes); + framesWrittenThisIteration = silentPaddingInBytes/bpfDevicePlayback; + } else { + ma_uint64 convertedFrameCountIn = (outputFramesInClientFormatCount - outputFramesInClientFormatConsumed); + ma_uint64 convertedFrameCountOut = mappedSizeInBytesPlayback/bpfDevicePlayback; + void* pConvertedFramesIn = ma_offset_ptr(outputFramesInClientFormat, outputFramesInClientFormatConsumed * bpfDevicePlayback); + void* pConvertedFramesOut = pMappedDeviceBufferPlayback; + + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pConvertedFramesIn, &convertedFrameCountIn, pConvertedFramesOut, &convertedFrameCountOut); + if (result != MA_SUCCESS) { + break; + } + + outputFramesInClientFormatConsumed += (ma_uint32)convertedFrameCountOut; + framesWrittenThisIteration = (ma_uint32)convertedFrameCountOut; + } + + + hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedDeviceBufferPlayback, framesWrittenThisIteration*bpfDevicePlayback, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from playback device after writing to the device."); + result = ma_result_from_HRESULT(hr); + break; + } + + virtualWriteCursorInBytesPlayback += framesWrittenThisIteration*bpfDevicePlayback; + if ((virtualWriteCursorInBytesPlayback/bpfDevicePlayback) == pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods) { + virtualWriteCursorInBytesPlayback = 0; + virtualWriteCursorLoopFlagPlayback = !virtualWriteCursorLoopFlagPlayback; + } + + /* + We may need to start the device. We want two full periods to be written before starting the playback device. Having an extra period adds + a bit of a buffer to prevent the playback buffer from getting starved. + */ + framesWrittenToPlaybackDevice += framesWrittenThisIteration; + if (!isPlaybackDeviceStarted && framesWrittenToPlaybackDevice >= (pDevice->playback.internalPeriodSizeInFrames*2)) { + hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING); + if (FAILED(hr)) { + ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed."); + return ma_result_from_HRESULT(hr); + } + isPlaybackDeviceStarted = MA_TRUE; + } + + if (framesWrittenThisIteration < mappedSizeInBytesPlayback/bpfDevicePlayback) { + break; /* We're finished with the output data.*/ + } + } + + if (clientCapturedFramesToProcess == 0) { + break; /* We just consumed every input sample. */ + } + } + + + /* At this point we're done with the mapped portion of the capture buffer. */ + hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedDeviceBufferCapture, mappedSizeInBytesCapture, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device."); + return ma_result_from_HRESULT(hr); + } + prevReadCursorInBytesCapture = (lockOffsetInBytesCapture + mappedSizeInBytesCapture); + } break; + + + + case ma_device_type_capture: + { + DWORD physicalCaptureCursorInBytes; + DWORD physicalReadCursorInBytes; + hr = ma_IDirectSoundCaptureBuffer_GetCurrentPosition((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, &physicalCaptureCursorInBytes, &physicalReadCursorInBytes); + if (FAILED(hr)) { + return MA_ERROR; + } + + /* If the previous capture position is the same as the current position we need to wait a bit longer. */ + if (prevReadCursorInBytesCapture == physicalReadCursorInBytes) { + ma_sleep(waitTimeInMilliseconds); + continue; + } + + /* Getting here means we have capture data available. */ + if (prevReadCursorInBytesCapture < physicalReadCursorInBytes) { + /* The capture position has not looped. This is the simple case. */ + lockOffsetInBytesCapture = prevReadCursorInBytesCapture; + lockSizeInBytesCapture = (physicalReadCursorInBytes - prevReadCursorInBytesCapture); + } else { + /* + The capture position has looped. This is the more complex case. Map to the end of the buffer. If this does not return anything, + do it again from the start. + */ + if (prevReadCursorInBytesCapture < pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) { + /* Lock up to the end of the buffer. */ + lockOffsetInBytesCapture = prevReadCursorInBytesCapture; + lockSizeInBytesCapture = (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) - prevReadCursorInBytesCapture; + } else { + /* Lock starting from the start of the buffer. */ + lockOffsetInBytesCapture = 0; + lockSizeInBytesCapture = physicalReadCursorInBytes; + } + } + + if (lockSizeInBytesCapture < pDevice->capture.internalPeriodSizeInFrames) { + ma_sleep(waitTimeInMilliseconds); + continue; /* Nothing is available in the capture buffer. */ + } + + hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedDeviceBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device."); + result = ma_result_from_HRESULT(hr); + } + + if (lockSizeInBytesCapture != mappedSizeInBytesCapture) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[DirectSound] (Capture) lockSizeInBytesCapture=%ld != mappedSizeInBytesCapture=%ld\n", lockSizeInBytesCapture, mappedSizeInBytesCapture); + } + + ma_device__send_frames_to_client(pDevice, mappedSizeInBytesCapture/bpfDeviceCapture, pMappedDeviceBufferCapture); + + hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedDeviceBufferCapture, mappedSizeInBytesCapture, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device."); + return ma_result_from_HRESULT(hr); + } + prevReadCursorInBytesCapture = lockOffsetInBytesCapture + mappedSizeInBytesCapture; + + if (prevReadCursorInBytesCapture == (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture)) { + prevReadCursorInBytesCapture = 0; + } + } break; + + + + case ma_device_type_playback: + { + DWORD availableBytesPlayback; + DWORD physicalPlayCursorInBytes; + DWORD physicalWriteCursorInBytes; + hr = ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes); + if (FAILED(hr)) { + break; + } + + hr = ma_IDirectSoundBuffer_GetStatus((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &playbackBufferStatus); + if (SUCCEEDED(hr) && (playbackBufferStatus & MA_DSBSTATUS_PLAYING) == 0 && isPlaybackDeviceStarted) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[DirectSound] Attempting to resume audio due to state: %d.", (int)playbackBufferStatus); + hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING); + if (FAILED(hr)) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed after attempting to resume from state %d.", (int)playbackBufferStatus); + return ma_result_from_HRESULT(hr); + } + + isPlaybackDeviceStarted = MA_TRUE; + ma_sleep(waitTimeInMilliseconds); + continue; + } + + if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) { + physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback; + } + prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes; + + /* If there's any bytes available for writing we can do that now. The space between the virtual cursor position and play cursor. */ + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */ + } else { + /* This is an error. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Playback): Play cursor has moved in front of the write cursor (same loop iterations). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback); + availableBytesPlayback = 0; + } + } else { + /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } else { + /* This is an error. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[DirectSound] (Playback): Write cursor has moved behind the play cursor (different loop iterations). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, virtualWriteCursorInBytesPlayback); + availableBytesPlayback = 0; + } + } + + /* If there's no room available for writing we need to wait for more. */ + if (availableBytesPlayback < pDevice->playback.internalPeriodSizeInFrames) { + /* If we haven't started the device yet, this will never get beyond 0. In this case we need to get the device started. */ + if (availableBytesPlayback == 0 && !isPlaybackDeviceStarted) { + hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed."); + return ma_result_from_HRESULT(hr); + } + isPlaybackDeviceStarted = MA_TRUE; + } else { + ma_sleep(waitTimeInMilliseconds); + continue; + } + } + + /* Getting here means there room available somewhere. We limit this to either the end of the buffer or the physical play cursor, whichever is closest. */ + lockOffsetInBytesPlayback = virtualWriteCursorInBytesPlayback; + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. Go up to the end of the buffer. */ + lockSizeInBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + } else { + /* Different loop iterations. Go up to the physical play cursor. */ + lockSizeInBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } + + hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedDeviceBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from playback device in preparation for writing to the device."); + result = ma_result_from_HRESULT(hr); + break; + } + + /* At this point we have a buffer for output. */ + ma_device__read_frames_from_client(pDevice, (mappedSizeInBytesPlayback/bpfDevicePlayback), pMappedDeviceBufferPlayback); + + hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedDeviceBufferPlayback, mappedSizeInBytesPlayback, NULL, 0); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from playback device after writing to the device."); + result = ma_result_from_HRESULT(hr); + break; + } + + virtualWriteCursorInBytesPlayback += mappedSizeInBytesPlayback; + if (virtualWriteCursorInBytesPlayback == pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) { + virtualWriteCursorInBytesPlayback = 0; + virtualWriteCursorLoopFlagPlayback = !virtualWriteCursorLoopFlagPlayback; + } + + /* + We may need to start the device. We want two full periods to be written before starting the playback device. Having an extra period adds + a bit of a buffer to prevent the playback buffer from getting starved. + */ + framesWrittenToPlaybackDevice += mappedSizeInBytesPlayback/bpfDevicePlayback; + if (!isPlaybackDeviceStarted && framesWrittenToPlaybackDevice >= pDevice->playback.internalPeriodSizeInFrames) { + hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed."); + return ma_result_from_HRESULT(hr); + } + isPlaybackDeviceStarted = MA_TRUE; + } + } break; + + + default: return MA_INVALID_ARGS; /* Invalid device type. */ + } + + if (result != MA_SUCCESS) { + return result; + } + } + + /* Getting here means the device is being stopped. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + hr = ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCaptureBuffer_Stop() failed."); + return ma_result_from_HRESULT(hr); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + /* The playback device should be drained before stopping. All we do is wait until the available bytes is equal to the size of the buffer. */ + if (isPlaybackDeviceStarted) { + for (;;) { + DWORD availableBytesPlayback = 0; + DWORD physicalPlayCursorInBytes; + DWORD physicalWriteCursorInBytes; + hr = ma_IDirectSoundBuffer_GetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, &physicalPlayCursorInBytes, &physicalWriteCursorInBytes); + if (FAILED(hr)) { + break; + } + + if (physicalPlayCursorInBytes < prevPlayCursorInBytesPlayback) { + physicalPlayCursorLoopFlagPlayback = !physicalPlayCursorLoopFlagPlayback; + } + prevPlayCursorInBytesPlayback = physicalPlayCursorInBytes; + + if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { + /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; + availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */ + } else { + break; + } + } else { + /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */ + if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) { + availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; + } else { + break; + } + } + + if (availableBytesPlayback >= (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback)) { + break; + } + + ma_sleep(waitTimeInMilliseconds); + } + } + + hr = ma_IDirectSoundBuffer_Stop((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer); + if (FAILED(hr)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Stop() failed."); + return ma_result_from_HRESULT(hr); + } + + ma_IDirectSoundBuffer_SetCurrentPosition((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__dsound(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_dsound); + + ma_dlclose(ma_context_get_log(pContext), pContext->dsound.hDSoundDLL); + + return MA_SUCCESS; +} + +static ma_result ma_context_init__dsound(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + pContext->dsound.hDSoundDLL = ma_dlopen(ma_context_get_log(pContext), "dsound.dll"); + if (pContext->dsound.hDSoundDLL == NULL) { + return MA_API_NOT_FOUND; + } + + pContext->dsound.DirectSoundCreate = ma_dlsym(ma_context_get_log(pContext), pContext->dsound.hDSoundDLL, "DirectSoundCreate"); + pContext->dsound.DirectSoundEnumerateA = ma_dlsym(ma_context_get_log(pContext), pContext->dsound.hDSoundDLL, "DirectSoundEnumerateA"); + pContext->dsound.DirectSoundCaptureCreate = ma_dlsym(ma_context_get_log(pContext), pContext->dsound.hDSoundDLL, "DirectSoundCaptureCreate"); + pContext->dsound.DirectSoundCaptureEnumerateA = ma_dlsym(ma_context_get_log(pContext), pContext->dsound.hDSoundDLL, "DirectSoundCaptureEnumerateA"); + + /* + We need to support all functions or nothing. DirectSound with Windows 95 seems to not work too + well in my testing. For example, it's missing DirectSoundCaptureEnumerateA(). This is a convenient + place to just disable the DirectSound backend for Windows 95. + */ + if (pContext->dsound.DirectSoundCreate == NULL || + pContext->dsound.DirectSoundEnumerateA == NULL || + pContext->dsound.DirectSoundCaptureCreate == NULL || + pContext->dsound.DirectSoundCaptureEnumerateA == NULL) { + return MA_API_NOT_FOUND; + } + + pContext->dsound.hWnd = pConfig->dsound.hWnd; + + pCallbacks->onContextInit = ma_context_init__dsound; + pCallbacks->onContextUninit = ma_context_uninit__dsound; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__dsound; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__dsound; + pCallbacks->onDeviceInit = ma_device_init__dsound; + pCallbacks->onDeviceUninit = ma_device_uninit__dsound; + pCallbacks->onDeviceStart = NULL; /* Not used. Started in onDeviceDataLoop. */ + pCallbacks->onDeviceStop = NULL; /* Not used. Stopped in onDeviceDataLoop. */ + pCallbacks->onDeviceRead = NULL; /* Not used. Data is read directly in onDeviceDataLoop. */ + pCallbacks->onDeviceWrite = NULL; /* Not used. Data is written directly in onDeviceDataLoop. */ + pCallbacks->onDeviceDataLoop = ma_device_data_loop__dsound; + + return MA_SUCCESS; +} +#endif + + + +/****************************************************************************** + +WinMM Backend + +******************************************************************************/ +#ifdef MA_HAS_WINMM + +/* +Some build configurations will exclude the WinMM API. An example is when WIN32_LEAN_AND_MEAN +is defined. We need to define the types and functions we need manually. +*/ +#define MA_MMSYSERR_NOERROR 0 +#define MA_MMSYSERR_ERROR 1 +#define MA_MMSYSERR_BADDEVICEID 2 +#define MA_MMSYSERR_INVALHANDLE 5 +#define MA_MMSYSERR_NOMEM 7 +#define MA_MMSYSERR_INVALFLAG 10 +#define MA_MMSYSERR_INVALPARAM 11 +#define MA_MMSYSERR_HANDLEBUSY 12 + +#define MA_CALLBACK_EVENT 0x00050000 +#define MA_WAVE_ALLOWSYNC 0x0002 + +#define MA_WHDR_DONE 0x00000001 +#define MA_WHDR_PREPARED 0x00000002 +#define MA_WHDR_BEGINLOOP 0x00000004 +#define MA_WHDR_ENDLOOP 0x00000008 +#define MA_WHDR_INQUEUE 0x00000010 + +#define MA_MAXPNAMELEN 32 + +typedef void* MA_HWAVEIN; +typedef void* MA_HWAVEOUT; +typedef UINT MA_MMRESULT; +typedef UINT MA_MMVERSION; + +typedef struct +{ + WORD wMid; + WORD wPid; + MA_MMVERSION vDriverVersion; + CHAR szPname[MA_MAXPNAMELEN]; + DWORD dwFormats; + WORD wChannels; + WORD wReserved1; +} MA_WAVEINCAPSA; + +typedef struct +{ + WORD wMid; + WORD wPid; + MA_MMVERSION vDriverVersion; + CHAR szPname[MA_MAXPNAMELEN]; + DWORD dwFormats; + WORD wChannels; + WORD wReserved1; + DWORD dwSupport; +} MA_WAVEOUTCAPSA; + +typedef struct tagWAVEHDR +{ + char* lpData; + DWORD dwBufferLength; + DWORD dwBytesRecorded; + DWORD_PTR dwUser; + DWORD dwFlags; + DWORD dwLoops; + struct tagWAVEHDR* lpNext; + DWORD_PTR reserved; +} MA_WAVEHDR; + +typedef struct +{ + WORD wMid; + WORD wPid; + MA_MMVERSION vDriverVersion; + CHAR szPname[MA_MAXPNAMELEN]; + DWORD dwFormats; + WORD wChannels; + WORD wReserved1; + DWORD dwSupport; + GUID ManufacturerGuid; + GUID ProductGuid; + GUID NameGuid; +} MA_WAVEOUTCAPS2A; + +typedef struct +{ + WORD wMid; + WORD wPid; + MA_MMVERSION vDriverVersion; + CHAR szPname[MA_MAXPNAMELEN]; + DWORD dwFormats; + WORD wChannels; + WORD wReserved1; + GUID ManufacturerGuid; + GUID ProductGuid; + GUID NameGuid; +} MA_WAVEINCAPS2A; + +typedef UINT (WINAPI * MA_PFN_waveOutGetNumDevs)(void); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveOutGetDevCapsA)(ma_uintptr uDeviceID, MA_WAVEOUTCAPSA* pwoc, UINT cbwoc); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveOutOpen)(MA_HWAVEOUT* phwo, UINT uDeviceID, const MA_WAVEFORMATEX* pwfx, DWORD_PTR dwCallback, DWORD_PTR dwInstance, DWORD fdwOpen); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveOutClose)(MA_HWAVEOUT hwo); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveOutPrepareHeader)(MA_HWAVEOUT hwo, MA_WAVEHDR* pwh, UINT cbwh); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveOutUnprepareHeader)(MA_HWAVEOUT hwo, MA_WAVEHDR* pwh, UINT cbwh); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveOutWrite)(MA_HWAVEOUT hwo, MA_WAVEHDR* pwh, UINT cbwh); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveOutReset)(MA_HWAVEOUT hwo); +typedef UINT (WINAPI * MA_PFN_waveInGetNumDevs)(void); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInGetDevCapsA)(ma_uintptr uDeviceID, MA_WAVEINCAPSA* pwic, UINT cbwic); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInOpen)(MA_HWAVEIN* phwi, UINT uDeviceID, const MA_WAVEFORMATEX* pwfx, DWORD_PTR dwCallback, DWORD_PTR dwInstance, DWORD fdwOpen); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInClose)(MA_HWAVEIN hwi); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInPrepareHeader)(MA_HWAVEIN hwi, MA_WAVEHDR* pwh, UINT cbwh); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInUnprepareHeader)(MA_HWAVEIN hwi, MA_WAVEHDR* pwh, UINT cbwh); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInAddBuffer)(MA_HWAVEIN hwi, MA_WAVEHDR* pwh, UINT cbwh); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInStart)(MA_HWAVEIN hwi); +typedef MA_MMRESULT (WINAPI * MA_PFN_waveInReset)(MA_HWAVEIN hwi); + +static ma_result ma_result_from_MMRESULT(MA_MMRESULT resultMM) +{ + switch (resultMM) + { + case MA_MMSYSERR_NOERROR: return MA_SUCCESS; + case MA_MMSYSERR_BADDEVICEID: return MA_INVALID_ARGS; + case MA_MMSYSERR_INVALHANDLE: return MA_INVALID_ARGS; + case MA_MMSYSERR_NOMEM: return MA_OUT_OF_MEMORY; + case MA_MMSYSERR_INVALFLAG: return MA_INVALID_ARGS; + case MA_MMSYSERR_INVALPARAM: return MA_INVALID_ARGS; + case MA_MMSYSERR_HANDLEBUSY: return MA_BUSY; + case MA_MMSYSERR_ERROR: return MA_ERROR; + default: return MA_ERROR; + } +} + +static char* ma_find_last_character(char* str, char ch) +{ + char* last; + + if (str == NULL) { + return NULL; + } + + last = NULL; + while (*str != '\0') { + if (*str == ch) { + last = str; + } + + str += 1; + } + + return last; +} + +static ma_uint32 ma_get_period_size_in_bytes(ma_uint32 periodSizeInFrames, ma_format format, ma_uint32 channels) +{ + return periodSizeInFrames * ma_get_bytes_per_frame(format, channels); +} + + +/* +Our own "WAVECAPS" structure that contains generic information shared between WAVEOUTCAPS2 and WAVEINCAPS2 so +we can do things generically and typesafely. Names are being kept the same for consistency. +*/ +typedef struct +{ + CHAR szPname[MA_MAXPNAMELEN]; + DWORD dwFormats; + WORD wChannels; + GUID NameGuid; +} MA_WAVECAPSA; + +static ma_result ma_get_best_info_from_formats_flags__winmm(DWORD dwFormats, WORD channels, WORD* pBitsPerSample, DWORD* pSampleRate) +{ + WORD bitsPerSample = 0; + DWORD sampleRate = 0; + + if (pBitsPerSample) { + *pBitsPerSample = 0; + } + if (pSampleRate) { + *pSampleRate = 0; + } + + if (channels == 1) { + bitsPerSample = 16; + if ((dwFormats & WAVE_FORMAT_48M16) != 0) { + sampleRate = 48000; + } else if ((dwFormats & WAVE_FORMAT_44M16) != 0) { + sampleRate = 44100; + } else if ((dwFormats & WAVE_FORMAT_2M16) != 0) { + sampleRate = 22050; + } else if ((dwFormats & WAVE_FORMAT_1M16) != 0) { + sampleRate = 11025; + } else if ((dwFormats & WAVE_FORMAT_96M16) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 8; + if ((dwFormats & WAVE_FORMAT_48M08) != 0) { + sampleRate = 48000; + } else if ((dwFormats & WAVE_FORMAT_44M08) != 0) { + sampleRate = 44100; + } else if ((dwFormats & WAVE_FORMAT_2M08) != 0) { + sampleRate = 22050; + } else if ((dwFormats & WAVE_FORMAT_1M08) != 0) { + sampleRate = 11025; + } else if ((dwFormats & WAVE_FORMAT_96M08) != 0) { + sampleRate = 96000; + } else { + return MA_FORMAT_NOT_SUPPORTED; + } + } + } else { + bitsPerSample = 16; + if ((dwFormats & WAVE_FORMAT_48S16) != 0) { + sampleRate = 48000; + } else if ((dwFormats & WAVE_FORMAT_44S16) != 0) { + sampleRate = 44100; + } else if ((dwFormats & WAVE_FORMAT_2S16) != 0) { + sampleRate = 22050; + } else if ((dwFormats & WAVE_FORMAT_1S16) != 0) { + sampleRate = 11025; + } else if ((dwFormats & WAVE_FORMAT_96S16) != 0) { + sampleRate = 96000; + } else { + bitsPerSample = 8; + if ((dwFormats & WAVE_FORMAT_48S08) != 0) { + sampleRate = 48000; + } else if ((dwFormats & WAVE_FORMAT_44S08) != 0) { + sampleRate = 44100; + } else if ((dwFormats & WAVE_FORMAT_2S08) != 0) { + sampleRate = 22050; + } else if ((dwFormats & WAVE_FORMAT_1S08) != 0) { + sampleRate = 11025; + } else if ((dwFormats & WAVE_FORMAT_96S08) != 0) { + sampleRate = 96000; + } else { + return MA_FORMAT_NOT_SUPPORTED; + } + } + } + + if (pBitsPerSample) { + *pBitsPerSample = bitsPerSample; + } + if (pSampleRate) { + *pSampleRate = sampleRate; + } + + return MA_SUCCESS; +} + +static ma_result ma_formats_flags_to_WAVEFORMATEX__winmm(DWORD dwFormats, WORD channels, MA_WAVEFORMATEX* pWF) +{ + ma_result result; + + MA_ASSERT(pWF != NULL); + + MA_ZERO_OBJECT(pWF); + pWF->cbSize = sizeof(*pWF); + pWF->wFormatTag = WAVE_FORMAT_PCM; + pWF->nChannels = (WORD)channels; + if (pWF->nChannels > 2) { + pWF->nChannels = 2; + } + + result = ma_get_best_info_from_formats_flags__winmm(dwFormats, channels, &pWF->wBitsPerSample, &pWF->nSamplesPerSec); + if (result != MA_SUCCESS) { + return result; + } + + pWF->nBlockAlign = (WORD)(pWF->nChannels * pWF->wBitsPerSample / 8); + pWF->nAvgBytesPerSec = pWF->nBlockAlign * pWF->nSamplesPerSec; + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info_from_WAVECAPS(ma_context* pContext, MA_WAVECAPSA* pCaps, ma_device_info* pDeviceInfo) +{ + WORD bitsPerSample; + DWORD sampleRate; + ma_result result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCaps != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + /* + Name / Description + + Unfortunately the name specified in WAVE(OUT/IN)CAPS2 is limited to 31 characters. This results in an unprofessional looking + situation where the names of the devices are truncated. To help work around this, we need to look at the name GUID and try + looking in the registry for the full name. If we can't find it there, we need to just fall back to the default name. + */ + + /* Set the default to begin with. */ + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), pCaps->szPname, (size_t)-1); + + /* + Now try the registry. There's a few things to consider here: + - The name GUID can be null, in which we case we just need to stick to the original 31 characters. + - If the name GUID is not present in the registry we'll also need to stick to the original 31 characters. + - I like consistency, so I want the returned device names to be consistent with those returned by WASAPI and DirectSound. The + problem, however is that WASAPI and DirectSound use " ()" format (such as "Speakers (High Definition Audio)"), + but WinMM does not specify the component name. From my admittedly limited testing, I've notice the component name seems to + usually fit within the 31 characters of the fixed sized buffer, so what I'm going to do is parse that string for the component + name, and then concatenate the name from the registry. + */ + if (!ma_is_guid_null(&pCaps->NameGuid)) { + WCHAR guidStrW[256]; + if (((MA_PFN_StringFromGUID2)pContext->win32.StringFromGUID2)(&pCaps->NameGuid, guidStrW, ma_countof(guidStrW)) > 0) { + char guidStr[256]; + char keyStr[1024]; + HKEY hKey; + + WideCharToMultiByte(CP_UTF8, 0, guidStrW, -1, guidStr, sizeof(guidStr), 0, FALSE); + + ma_strcpy_s(keyStr, sizeof(keyStr), "SYSTEM\\CurrentControlSet\\Control\\MediaCategories\\"); + ma_strcat_s(keyStr, sizeof(keyStr), guidStr); + + if (((MA_PFN_RegOpenKeyExA)pContext->win32.RegOpenKeyExA)(HKEY_LOCAL_MACHINE, keyStr, 0, KEY_READ, &hKey) == ERROR_SUCCESS) { + BYTE nameFromReg[512]; + DWORD nameFromRegSize = sizeof(nameFromReg); + LONG resultWin32 = ((MA_PFN_RegQueryValueExA)pContext->win32.RegQueryValueExA)(hKey, "Name", 0, NULL, (BYTE*)nameFromReg, (DWORD*)&nameFromRegSize); + ((MA_PFN_RegCloseKey)pContext->win32.RegCloseKey)(hKey); + + if (resultWin32 == ERROR_SUCCESS) { + /* We have the value from the registry, so now we need to construct the name string. */ + char name[1024]; + if (ma_strcpy_s(name, sizeof(name), pDeviceInfo->name) == 0) { + char* nameBeg = ma_find_last_character(name, '('); + if (nameBeg != NULL) { + size_t leadingLen = (nameBeg - name); + ma_strncpy_s(nameBeg + 1, sizeof(name) - leadingLen, (const char*)nameFromReg, (size_t)-1); + + /* The closing ")", if it can fit. */ + if (leadingLen + nameFromRegSize < sizeof(name)-1) { + ma_strcat_s(name, sizeof(name), ")"); + } + + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), name, (size_t)-1); + } + } + } + } + } + } + + + result = ma_get_best_info_from_formats_flags__winmm(pCaps->dwFormats, pCaps->wChannels, &bitsPerSample, &sampleRate); + if (result != MA_SUCCESS) { + return result; + } + + if (bitsPerSample == 8) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_u8; + } else if (bitsPerSample == 16) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_s16; + } else if (bitsPerSample == 24) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_s24; + } else if (bitsPerSample == 32) { + pDeviceInfo->nativeDataFormats[0].format = ma_format_s32; + } else { + return MA_FORMAT_NOT_SUPPORTED; + } + pDeviceInfo->nativeDataFormats[0].channels = pCaps->wChannels; + pDeviceInfo->nativeDataFormats[0].sampleRate = sampleRate; + pDeviceInfo->nativeDataFormats[0].flags = 0; + pDeviceInfo->nativeDataFormatCount = 1; + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info_from_WAVEOUTCAPS2(ma_context* pContext, MA_WAVEOUTCAPS2A* pCaps, ma_device_info* pDeviceInfo) +{ + MA_WAVECAPSA caps; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCaps != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + MA_COPY_MEMORY(caps.szPname, pCaps->szPname, sizeof(caps.szPname)); + caps.dwFormats = pCaps->dwFormats; + caps.wChannels = pCaps->wChannels; + caps.NameGuid = pCaps->NameGuid; + return ma_context_get_device_info_from_WAVECAPS(pContext, &caps, pDeviceInfo); +} + +static ma_result ma_context_get_device_info_from_WAVEINCAPS2(ma_context* pContext, MA_WAVEINCAPS2A* pCaps, ma_device_info* pDeviceInfo) +{ + MA_WAVECAPSA caps; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCaps != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + MA_COPY_MEMORY(caps.szPname, pCaps->szPname, sizeof(caps.szPname)); + caps.dwFormats = pCaps->dwFormats; + caps.wChannels = pCaps->wChannels; + caps.NameGuid = pCaps->NameGuid; + return ma_context_get_device_info_from_WAVECAPS(pContext, &caps, pDeviceInfo); +} + + +static ma_result ma_context_enumerate_devices__winmm(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + UINT playbackDeviceCount; + UINT captureDeviceCount; + UINT iPlaybackDevice; + UINT iCaptureDevice; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Playback. */ + playbackDeviceCount = ((MA_PFN_waveOutGetNumDevs)pContext->winmm.waveOutGetNumDevs)(); + for (iPlaybackDevice = 0; iPlaybackDevice < playbackDeviceCount; ++iPlaybackDevice) { + MA_MMRESULT result; + MA_WAVEOUTCAPS2A caps; + + MA_ZERO_OBJECT(&caps); + + result = ((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(iPlaybackDevice, (MA_WAVEOUTCAPSA*)&caps, sizeof(caps)); + if (result == MA_MMSYSERR_NOERROR) { + ma_device_info deviceInfo; + + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.winmm = iPlaybackDevice; + + /* The first enumerated device is the default device. */ + if (iPlaybackDevice == 0) { + deviceInfo.isDefault = MA_TRUE; + } + + if (ma_context_get_device_info_from_WAVEOUTCAPS2(pContext, &caps, &deviceInfo) == MA_SUCCESS) { + ma_bool32 cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + return MA_SUCCESS; /* Enumeration was stopped. */ + } + } + } + } + + /* Capture. */ + captureDeviceCount = ((MA_PFN_waveInGetNumDevs)pContext->winmm.waveInGetNumDevs)(); + for (iCaptureDevice = 0; iCaptureDevice < captureDeviceCount; ++iCaptureDevice) { + MA_MMRESULT result; + MA_WAVEINCAPS2A caps; + + MA_ZERO_OBJECT(&caps); + + result = ((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(iCaptureDevice, (MA_WAVEINCAPSA*)&caps, sizeof(caps)); + if (result == MA_MMSYSERR_NOERROR) { + ma_device_info deviceInfo; + + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.winmm = iCaptureDevice; + + /* The first enumerated device is the default device. */ + if (iCaptureDevice == 0) { + deviceInfo.isDefault = MA_TRUE; + } + + if (ma_context_get_device_info_from_WAVEINCAPS2(pContext, &caps, &deviceInfo) == MA_SUCCESS) { + ma_bool32 cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + return MA_SUCCESS; /* Enumeration was stopped. */ + } + } + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__winmm(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + UINT winMMDeviceID; + + MA_ASSERT(pContext != NULL); + + winMMDeviceID = 0; + if (pDeviceID != NULL) { + winMMDeviceID = (UINT)pDeviceID->winmm; + } + + pDeviceInfo->id.winmm = winMMDeviceID; + + /* The first ID is the default device. */ + if (winMMDeviceID == 0) { + pDeviceInfo->isDefault = MA_TRUE; + } + + if (deviceType == ma_device_type_playback) { + MA_MMRESULT result; + MA_WAVEOUTCAPS2A caps; + + MA_ZERO_OBJECT(&caps); + + result = ((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(winMMDeviceID, (MA_WAVEOUTCAPSA*)&caps, sizeof(caps)); + if (result == MA_MMSYSERR_NOERROR) { + return ma_context_get_device_info_from_WAVEOUTCAPS2(pContext, &caps, pDeviceInfo); + } + } else { + MA_MMRESULT result; + MA_WAVEINCAPS2A caps; + + MA_ZERO_OBJECT(&caps); + + result = ((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(winMMDeviceID, (MA_WAVEINCAPSA*)&caps, sizeof(caps)); + if (result == MA_MMSYSERR_NOERROR) { + return ma_context_get_device_info_from_WAVEINCAPS2(pContext, &caps, pDeviceInfo); + } + } + + return MA_NO_DEVICE; +} + + +static ma_result ma_device_uninit__winmm(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((MA_PFN_waveInClose)pDevice->pContext->winmm.waveInClose)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture); + CloseHandle((HANDLE)pDevice->winmm.hEventCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ((MA_PFN_waveOutReset)pDevice->pContext->winmm.waveOutReset)((MA_HWAVEOUT)pDevice->winmm.hDevicePlayback); + ((MA_PFN_waveOutClose)pDevice->pContext->winmm.waveOutClose)((MA_HWAVEOUT)pDevice->winmm.hDevicePlayback); + CloseHandle((HANDLE)pDevice->winmm.hEventPlayback); + } + + ma_free(pDevice->winmm._pHeapData, &pDevice->pContext->allocationCallbacks); + + MA_ZERO_OBJECT(&pDevice->winmm); /* Safety. */ + + return MA_SUCCESS; +} + +static ma_uint32 ma_calculate_period_size_in_frames_from_descriptor__winmm(const ma_device_descriptor* pDescriptor, ma_uint32 nativeSampleRate, ma_performance_profile performanceProfile) +{ + /* WinMM has a minimum period size of 40ms. */ + ma_uint32 minPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(40, nativeSampleRate); + ma_uint32 periodSizeInFrames; + + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, nativeSampleRate, performanceProfile); + if (periodSizeInFrames < minPeriodSizeInFrames) { + periodSizeInFrames = minPeriodSizeInFrames; + } + + return periodSizeInFrames; +} + +static ma_result ma_device_init__winmm(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + const char* errorMsg = ""; + ma_result errorCode = MA_ERROR; + ma_result result = MA_SUCCESS; + ma_uint32 heapSize; + UINT winMMDeviceIDPlayback = 0; + UINT winMMDeviceIDCapture = 0; + + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->winmm); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* No exclusive mode with WinMM. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + if (pDescriptorPlayback->pDeviceID != NULL) { + winMMDeviceIDPlayback = (UINT)pDescriptorPlayback->pDeviceID->winmm; + } + if (pDescriptorCapture->pDeviceID != NULL) { + winMMDeviceIDCapture = (UINT)pDescriptorCapture->pDeviceID->winmm; + } + + /* The capture device needs to be initialized first. */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + MA_WAVEINCAPSA caps; + MA_WAVEFORMATEX wf; + MA_MMRESULT resultMM; + + /* We use an event to know when a new fragment needs to be enqueued. */ + pDevice->winmm.hEventCapture = (ma_handle)CreateEventA(NULL, TRUE, TRUE, NULL); + if (pDevice->winmm.hEventCapture == NULL) { + errorMsg = "[WinMM] Failed to create event for fragment enqueuing for the capture device.", errorCode = ma_result_from_GetLastError(GetLastError()); + goto on_error; + } + + /* The format should be based on the device's actual format. */ + if (((MA_PFN_waveInGetDevCapsA)pDevice->pContext->winmm.waveInGetDevCapsA)(winMMDeviceIDCapture, &caps, sizeof(caps)) != MA_MMSYSERR_NOERROR) { + errorMsg = "[WinMM] Failed to retrieve internal device caps.", errorCode = MA_FORMAT_NOT_SUPPORTED; + goto on_error; + } + + result = ma_formats_flags_to_WAVEFORMATEX__winmm(caps.dwFormats, caps.wChannels, &wf); + if (result != MA_SUCCESS) { + errorMsg = "[WinMM] Could not find appropriate format for internal device.", errorCode = result; + goto on_error; + } + + resultMM = ((MA_PFN_waveInOpen)pDevice->pContext->winmm.waveInOpen)((MA_HWAVEIN*)&pDevice->winmm.hDeviceCapture, winMMDeviceIDCapture, &wf, (DWORD_PTR)pDevice->winmm.hEventCapture, (DWORD_PTR)pDevice, MA_CALLBACK_EVENT | MA_WAVE_ALLOWSYNC); + if (resultMM != MA_MMSYSERR_NOERROR) { + errorMsg = "[WinMM] Failed to open capture device.", errorCode = MA_FAILED_TO_OPEN_BACKEND_DEVICE; + goto on_error; + } + + pDescriptorCapture->format = ma_format_from_WAVEFORMATEX(&wf); + pDescriptorCapture->channels = wf.nChannels; + pDescriptorCapture->sampleRate = wf.nSamplesPerSec; + ma_channel_map_init_standard(ma_standard_channel_map_microsoft, pDescriptorCapture->channelMap, ma_countof(pDescriptorCapture->channelMap), pDescriptorCapture->channels); + pDescriptorCapture->periodCount = pDescriptorCapture->periodCount; + pDescriptorCapture->periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__winmm(pDescriptorCapture, pDescriptorCapture->sampleRate, pConfig->performanceProfile); + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + MA_WAVEOUTCAPSA caps; + MA_WAVEFORMATEX wf; + MA_MMRESULT resultMM; + + /* We use an event to know when a new fragment needs to be enqueued. */ + pDevice->winmm.hEventPlayback = (ma_handle)CreateEventA(NULL, TRUE, TRUE, NULL); + if (pDevice->winmm.hEventPlayback == NULL) { + errorMsg = "[WinMM] Failed to create event for fragment enqueuing for the playback device.", errorCode = ma_result_from_GetLastError(GetLastError()); + goto on_error; + } + + /* The format should be based on the device's actual format. */ + if (((MA_PFN_waveOutGetDevCapsA)pDevice->pContext->winmm.waveOutGetDevCapsA)(winMMDeviceIDPlayback, &caps, sizeof(caps)) != MA_MMSYSERR_NOERROR) { + errorMsg = "[WinMM] Failed to retrieve internal device caps.", errorCode = MA_FORMAT_NOT_SUPPORTED; + goto on_error; + } + + result = ma_formats_flags_to_WAVEFORMATEX__winmm(caps.dwFormats, caps.wChannels, &wf); + if (result != MA_SUCCESS) { + errorMsg = "[WinMM] Could not find appropriate format for internal device.", errorCode = result; + goto on_error; + } + + resultMM = ((MA_PFN_waveOutOpen)pDevice->pContext->winmm.waveOutOpen)((MA_HWAVEOUT*)&pDevice->winmm.hDevicePlayback, winMMDeviceIDPlayback, &wf, (DWORD_PTR)pDevice->winmm.hEventPlayback, (DWORD_PTR)pDevice, MA_CALLBACK_EVENT | MA_WAVE_ALLOWSYNC); + if (resultMM != MA_MMSYSERR_NOERROR) { + errorMsg = "[WinMM] Failed to open playback device.", errorCode = MA_FAILED_TO_OPEN_BACKEND_DEVICE; + goto on_error; + } + + pDescriptorPlayback->format = ma_format_from_WAVEFORMATEX(&wf); + pDescriptorPlayback->channels = wf.nChannels; + pDescriptorPlayback->sampleRate = wf.nSamplesPerSec; + ma_channel_map_init_standard(ma_standard_channel_map_microsoft, pDescriptorPlayback->channelMap, ma_countof(pDescriptorPlayback->channelMap), pDescriptorPlayback->channels); + pDescriptorPlayback->periodCount = pDescriptorPlayback->periodCount; + pDescriptorPlayback->periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__winmm(pDescriptorPlayback, pDescriptorPlayback->sampleRate, pConfig->performanceProfile); + } + + /* + The heap allocated data is allocated like so: + + [Capture WAVEHDRs][Playback WAVEHDRs][Capture Intermediary Buffer][Playback Intermediary Buffer] + */ + heapSize = 0; + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + heapSize += sizeof(MA_WAVEHDR)*pDescriptorCapture->periodCount + (pDescriptorCapture->periodSizeInFrames * pDescriptorCapture->periodCount * ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels)); + } + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + heapSize += sizeof(MA_WAVEHDR)*pDescriptorPlayback->periodCount + (pDescriptorPlayback->periodSizeInFrames * pDescriptorPlayback->periodCount * ma_get_bytes_per_frame(pDescriptorPlayback->format, pDescriptorPlayback->channels)); + } + + pDevice->winmm._pHeapData = (ma_uint8*)ma_calloc(heapSize, &pDevice->pContext->allocationCallbacks); + if (pDevice->winmm._pHeapData == NULL) { + errorMsg = "[WinMM] Failed to allocate memory for the intermediary buffer.", errorCode = MA_OUT_OF_MEMORY; + goto on_error; + } + + MA_ZERO_MEMORY(pDevice->winmm._pHeapData, heapSize); + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 iPeriod; + + if (pConfig->deviceType == ma_device_type_capture) { + pDevice->winmm.pWAVEHDRCapture = pDevice->winmm._pHeapData; + pDevice->winmm.pIntermediaryBufferCapture = pDevice->winmm._pHeapData + (sizeof(MA_WAVEHDR)*(pDescriptorCapture->periodCount)); + } else { + pDevice->winmm.pWAVEHDRCapture = pDevice->winmm._pHeapData; + pDevice->winmm.pIntermediaryBufferCapture = pDevice->winmm._pHeapData + (sizeof(MA_WAVEHDR)*(pDescriptorCapture->periodCount + pDescriptorPlayback->periodCount)); + } + + /* Prepare headers. */ + for (iPeriod = 0; iPeriod < pDescriptorCapture->periodCount; ++iPeriod) { + ma_uint32 periodSizeInBytes = ma_get_period_size_in_bytes(pDescriptorCapture->periodSizeInFrames, pDescriptorCapture->format, pDescriptorCapture->channels); + + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].lpData = (char*)(pDevice->winmm.pIntermediaryBufferCapture + (periodSizeInBytes*iPeriod)); + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwBufferLength = periodSizeInBytes; + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwFlags = 0L; + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwLoops = 0L; + ((MA_PFN_waveInPrepareHeader)pDevice->pContext->winmm.waveInPrepareHeader)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture, &((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(MA_WAVEHDR)); + + /* + The user data of the MA_WAVEHDR structure is a single flag the controls whether or not it is ready for writing. Consider it to be named "isLocked". A value of 0 means + it's unlocked and available for writing. A value of 1 means it's locked. + */ + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod].dwUser = 0; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 iPeriod; + + if (pConfig->deviceType == ma_device_type_playback) { + pDevice->winmm.pWAVEHDRPlayback = pDevice->winmm._pHeapData; + pDevice->winmm.pIntermediaryBufferPlayback = pDevice->winmm._pHeapData + (sizeof(MA_WAVEHDR)*pDescriptorPlayback->periodCount); + } else { + pDevice->winmm.pWAVEHDRPlayback = pDevice->winmm._pHeapData + (sizeof(MA_WAVEHDR)*(pDescriptorCapture->periodCount)); + pDevice->winmm.pIntermediaryBufferPlayback = pDevice->winmm._pHeapData + (sizeof(MA_WAVEHDR)*(pDescriptorCapture->periodCount + pDescriptorPlayback->periodCount)) + (pDescriptorCapture->periodSizeInFrames*pDescriptorCapture->periodCount*ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels)); + } + + /* Prepare headers. */ + for (iPeriod = 0; iPeriod < pDescriptorPlayback->periodCount; ++iPeriod) { + ma_uint32 periodSizeInBytes = ma_get_period_size_in_bytes(pDescriptorPlayback->periodSizeInFrames, pDescriptorPlayback->format, pDescriptorPlayback->channels); + + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].lpData = (char*)(pDevice->winmm.pIntermediaryBufferPlayback + (periodSizeInBytes*iPeriod)); + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwBufferLength = periodSizeInBytes; + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwFlags = 0L; + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwLoops = 0L; + ((MA_PFN_waveOutPrepareHeader)pDevice->pContext->winmm.waveOutPrepareHeader)((MA_HWAVEOUT)pDevice->winmm.hDevicePlayback, &((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod], sizeof(MA_WAVEHDR)); + + /* + The user data of the MA_WAVEHDR structure is a single flag the controls whether or not it is ready for writing. Consider it to be named "isLocked". A value of 0 means + it's unlocked and available for writing. A value of 1 means it's locked. + */ + ((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod].dwUser = 0; + } + } + + return MA_SUCCESS; + +on_error: + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->winmm.pWAVEHDRCapture != NULL) { + ma_uint32 iPeriod; + for (iPeriod = 0; iPeriod < pDescriptorCapture->periodCount; ++iPeriod) { + ((MA_PFN_waveInUnprepareHeader)pDevice->pContext->winmm.waveInUnprepareHeader)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture, &((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(MA_WAVEHDR)); + } + } + + ((MA_PFN_waveInClose)pDevice->pContext->winmm.waveInClose)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + if (pDevice->winmm.pWAVEHDRCapture != NULL) { + ma_uint32 iPeriod; + for (iPeriod = 0; iPeriod < pDescriptorPlayback->periodCount; ++iPeriod) { + ((MA_PFN_waveOutUnprepareHeader)pDevice->pContext->winmm.waveOutUnprepareHeader)((MA_HWAVEOUT)pDevice->winmm.hDevicePlayback, &((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback)[iPeriod], sizeof(MA_WAVEHDR)); + } + } + + ((MA_PFN_waveOutClose)pDevice->pContext->winmm.waveOutClose)((MA_HWAVEOUT)pDevice->winmm.hDevicePlayback); + } + + ma_free(pDevice->winmm._pHeapData, &pDevice->pContext->allocationCallbacks); + + if (errorMsg != NULL && errorMsg[0] != '\0') { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "%s", errorMsg); + } + + return errorCode; +} + +static ma_result ma_device_start__winmm(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + MA_MMRESULT resultMM; + MA_WAVEHDR* pWAVEHDR; + ma_uint32 iPeriod; + + pWAVEHDR = (MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture; + + /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */ + ResetEvent((HANDLE)pDevice->winmm.hEventCapture); + + /* To start the device we attach all of the buffers and then start it. As the buffers are filled with data we will get notifications. */ + for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) { + resultMM = ((MA_PFN_waveInAddBuffer)pDevice->pContext->winmm.waveInAddBuffer)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture, &((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[iPeriod], sizeof(MA_WAVEHDR)); + if (resultMM != MA_MMSYSERR_NOERROR) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WinMM] Failed to attach input buffers to capture device in preparation for capture."); + return ma_result_from_MMRESULT(resultMM); + } + + /* Make sure all of the buffers start out locked. We don't want to access them until the backend tells us we can. */ + pWAVEHDR[iPeriod].dwUser = 1; /* 1 = locked. */ + } + + /* Capture devices need to be explicitly started, unlike playback devices. */ + resultMM = ((MA_PFN_waveInStart)pDevice->pContext->winmm.waveInStart)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture); + if (resultMM != MA_MMSYSERR_NOERROR) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WinMM] Failed to start backend device."); + return ma_result_from_MMRESULT(resultMM); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + /* Don't need to do anything for playback. It'll be started automatically in ma_device_start__winmm(). */ + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__winmm(ma_device* pDevice) +{ + MA_MMRESULT resultMM; + + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->winmm.hDeviceCapture == NULL) { + return MA_INVALID_ARGS; + } + + resultMM = ((MA_PFN_waveInReset)pDevice->pContext->winmm.waveInReset)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture); + if (resultMM != MA_MMSYSERR_NOERROR) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[WinMM] WARNING: Failed to reset capture device."); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_uint32 iPeriod; + MA_WAVEHDR* pWAVEHDR; + + if (pDevice->winmm.hDevicePlayback == NULL) { + return MA_INVALID_ARGS; + } + + /* We need to drain the device. To do this we just loop over each header and if it's locked just wait for the event. */ + pWAVEHDR = (MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback; + for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; iPeriod += 1) { + if (pWAVEHDR[iPeriod].dwUser == 1) { /* 1 = locked. */ + if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventPlayback, INFINITE) != WAIT_OBJECT_0) { + break; /* An error occurred so just abandon ship and stop the device without draining. */ + } + + pWAVEHDR[iPeriod].dwUser = 0; + } + } + + resultMM = ((MA_PFN_waveOutReset)pDevice->pContext->winmm.waveOutReset)((MA_HWAVEOUT)pDevice->winmm.hDevicePlayback); + if (resultMM != MA_MMSYSERR_NOERROR) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[WinMM] WARNING: Failed to reset playback device."); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_write__winmm(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + ma_result result = MA_SUCCESS; + MA_MMRESULT resultMM; + ma_uint32 totalFramesWritten; + MA_WAVEHDR* pWAVEHDR; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pPCMFrames != NULL); + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + pWAVEHDR = (MA_WAVEHDR*)pDevice->winmm.pWAVEHDRPlayback; + + /* Keep processing as much data as possible. */ + totalFramesWritten = 0; + while (totalFramesWritten < frameCount) { + /* If the current header has some space available we need to write part of it. */ + if (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser == 0) { /* 0 = unlocked. */ + /* + This header has room in it. We copy as much of it as we can. If we end up fully consuming the buffer we need to + write it out and move on to the next iteration. + */ + ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 framesRemainingInHeader = (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwBufferLength/bpf) - pDevice->winmm.headerFramesConsumedPlayback; + + ma_uint32 framesToCopy = ma_min(framesRemainingInHeader, (frameCount - totalFramesWritten)); + const void* pSrc = ma_offset_ptr(pPCMFrames, totalFramesWritten*bpf); + void* pDst = ma_offset_ptr(pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].lpData, pDevice->winmm.headerFramesConsumedPlayback*bpf); + MA_COPY_MEMORY(pDst, pSrc, framesToCopy*bpf); + + pDevice->winmm.headerFramesConsumedPlayback += framesToCopy; + totalFramesWritten += framesToCopy; + + /* If we've consumed the buffer entirely we need to write it out to the device. */ + if (pDevice->winmm.headerFramesConsumedPlayback == (pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwBufferLength/bpf)) { + pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser = 1; /* 1 = locked. */ + pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwFlags &= ~MA_WHDR_DONE; /* <-- Need to make sure the WHDR_DONE flag is unset. */ + + /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */ + ResetEvent((HANDLE)pDevice->winmm.hEventPlayback); + + /* The device will be started here. */ + resultMM = ((MA_PFN_waveOutWrite)pDevice->pContext->winmm.waveOutWrite)((MA_HWAVEOUT)pDevice->winmm.hDevicePlayback, &pWAVEHDR[pDevice->winmm.iNextHeaderPlayback], sizeof(MA_WAVEHDR)); + if (resultMM != MA_MMSYSERR_NOERROR) { + result = ma_result_from_MMRESULT(resultMM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WinMM] waveOutWrite() failed."); + break; + } + + /* Make sure we move to the next header. */ + pDevice->winmm.iNextHeaderPlayback = (pDevice->winmm.iNextHeaderPlayback + 1) % pDevice->playback.internalPeriods; + pDevice->winmm.headerFramesConsumedPlayback = 0; + } + + /* If at this point we have consumed the entire input buffer we can return. */ + MA_ASSERT(totalFramesWritten <= frameCount); + if (totalFramesWritten == frameCount) { + break; + } + + /* Getting here means there's more to process. */ + continue; + } + + /* Getting here means there isn't enough room in the buffer and we need to wait for one to become available. */ + if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventPlayback, INFINITE) != WAIT_OBJECT_0) { + result = MA_ERROR; + break; + } + + /* Something happened. If the next buffer has been marked as done we need to reset a bit of state. */ + if ((pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwFlags & MA_WHDR_DONE) != 0) { + pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].dwUser = 0; /* 0 = unlocked (make it available for writing). */ + pDevice->winmm.headerFramesConsumedPlayback = 0; + } + + /* If the device has been stopped we need to break. */ + if (ma_device_get_state(pDevice) != ma_device_state_started) { + break; + } + } + + if (pFramesWritten != NULL) { + *pFramesWritten = totalFramesWritten; + } + + return result; +} + +static ma_result ma_device_read__winmm(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + ma_result result = MA_SUCCESS; + MA_MMRESULT resultMM; + ma_uint32 totalFramesRead; + MA_WAVEHDR* pWAVEHDR; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pPCMFrames != NULL); + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + pWAVEHDR = (MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture; + + /* Keep processing as much data as possible. */ + totalFramesRead = 0; + while (totalFramesRead < frameCount) { + /* If the current header has some space available we need to write part of it. */ + if (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser == 0) { /* 0 = unlocked. */ + /* The buffer is available for reading. If we fully consume it we need to add it back to the buffer. */ + ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 framesRemainingInHeader = (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwBufferLength/bpf) - pDevice->winmm.headerFramesConsumedCapture; + + ma_uint32 framesToCopy = ma_min(framesRemainingInHeader, (frameCount - totalFramesRead)); + const void* pSrc = ma_offset_ptr(pWAVEHDR[pDevice->winmm.iNextHeaderCapture].lpData, pDevice->winmm.headerFramesConsumedCapture*bpf); + void* pDst = ma_offset_ptr(pPCMFrames, totalFramesRead*bpf); + MA_COPY_MEMORY(pDst, pSrc, framesToCopy*bpf); + + pDevice->winmm.headerFramesConsumedCapture += framesToCopy; + totalFramesRead += framesToCopy; + + /* If we've consumed the buffer entirely we need to add it back to the device. */ + if (pDevice->winmm.headerFramesConsumedCapture == (pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwBufferLength/bpf)) { + pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser = 1; /* 1 = locked. */ + pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwFlags &= ~MA_WHDR_DONE; /* <-- Need to make sure the WHDR_DONE flag is unset. */ + + /* Make sure the event is reset to a non-signaled state to ensure we don't prematurely return from WaitForSingleObject(). */ + ResetEvent((HANDLE)pDevice->winmm.hEventCapture); + + /* The device will be started here. */ + resultMM = ((MA_PFN_waveInAddBuffer)pDevice->pContext->winmm.waveInAddBuffer)((MA_HWAVEIN)pDevice->winmm.hDeviceCapture, &((MA_WAVEHDR*)pDevice->winmm.pWAVEHDRCapture)[pDevice->winmm.iNextHeaderCapture], sizeof(MA_WAVEHDR)); + if (resultMM != MA_MMSYSERR_NOERROR) { + result = ma_result_from_MMRESULT(resultMM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[WinMM] waveInAddBuffer() failed."); + break; + } + + /* Make sure we move to the next header. */ + pDevice->winmm.iNextHeaderCapture = (pDevice->winmm.iNextHeaderCapture + 1) % pDevice->capture.internalPeriods; + pDevice->winmm.headerFramesConsumedCapture = 0; + } + + /* If at this point we have filled the entire input buffer we can return. */ + MA_ASSERT(totalFramesRead <= frameCount); + if (totalFramesRead == frameCount) { + break; + } + + /* Getting here means there's more to process. */ + continue; + } + + /* Getting here means there isn't enough any data left to send to the client which means we need to wait for more. */ + if (WaitForSingleObject((HANDLE)pDevice->winmm.hEventCapture, INFINITE) != WAIT_OBJECT_0) { + result = MA_ERROR; + break; + } + + /* Something happened. If the next buffer has been marked as done we need to reset a bit of state. */ + if ((pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwFlags & MA_WHDR_DONE) != 0) { + pWAVEHDR[pDevice->winmm.iNextHeaderCapture].dwUser = 0; /* 0 = unlocked (make it available for reading). */ + pDevice->winmm.headerFramesConsumedCapture = 0; + } + + /* If the device has been stopped we need to break. */ + if (ma_device_get_state(pDevice) != ma_device_state_started) { + break; + } + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + return result; +} + +static ma_result ma_context_uninit__winmm(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_winmm); + + ma_dlclose(ma_context_get_log(pContext), pContext->winmm.hWinMM); + return MA_SUCCESS; +} + +static ma_result ma_context_init__winmm(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + pContext->winmm.hWinMM = ma_dlopen(ma_context_get_log(pContext), "winmm.dll"); + if (pContext->winmm.hWinMM == NULL) { + return MA_NO_BACKEND; + } + + pContext->winmm.waveOutGetNumDevs = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutGetNumDevs"); + pContext->winmm.waveOutGetDevCapsA = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutGetDevCapsA"); + pContext->winmm.waveOutOpen = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutOpen"); + pContext->winmm.waveOutClose = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutClose"); + pContext->winmm.waveOutPrepareHeader = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutPrepareHeader"); + pContext->winmm.waveOutUnprepareHeader = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutUnprepareHeader"); + pContext->winmm.waveOutWrite = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutWrite"); + pContext->winmm.waveOutReset = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveOutReset"); + pContext->winmm.waveInGetNumDevs = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInGetNumDevs"); + pContext->winmm.waveInGetDevCapsA = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInGetDevCapsA"); + pContext->winmm.waveInOpen = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInOpen"); + pContext->winmm.waveInClose = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInClose"); + pContext->winmm.waveInPrepareHeader = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInPrepareHeader"); + pContext->winmm.waveInUnprepareHeader = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInUnprepareHeader"); + pContext->winmm.waveInAddBuffer = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInAddBuffer"); + pContext->winmm.waveInStart = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInStart"); + pContext->winmm.waveInReset = ma_dlsym(ma_context_get_log(pContext), pContext->winmm.hWinMM, "waveInReset"); + + pCallbacks->onContextInit = ma_context_init__winmm; + pCallbacks->onContextUninit = ma_context_uninit__winmm; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__winmm; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__winmm; + pCallbacks->onDeviceInit = ma_device_init__winmm; + pCallbacks->onDeviceUninit = ma_device_uninit__winmm; + pCallbacks->onDeviceStart = ma_device_start__winmm; + pCallbacks->onDeviceStop = ma_device_stop__winmm; + pCallbacks->onDeviceRead = ma_device_read__winmm; + pCallbacks->onDeviceWrite = ma_device_write__winmm; + pCallbacks->onDeviceDataLoop = NULL; /* This is a blocking read-write API, so this can be NULL since miniaudio will manage the audio thread for us. */ + + return MA_SUCCESS; +} +#endif + + + + +/****************************************************************************** + +ALSA Backend + +******************************************************************************/ +#ifdef MA_HAS_ALSA + +#include /* poll(), struct pollfd */ +#include /* eventfd() */ + +#ifdef MA_NO_RUNTIME_LINKING + +/* asoundlib.h marks some functions with "inline" which isn't always supported. Need to emulate it. */ +#if !defined(__cplusplus) + #if defined(__STRICT_ANSI__) + #if !defined(inline) + #define inline __inline__ __attribute__((always_inline)) + #define MA_INLINE_DEFINED + #endif + #endif +#endif +#include +#if defined(MA_INLINE_DEFINED) + #undef inline + #undef MA_INLINE_DEFINED +#endif + +typedef snd_pcm_uframes_t ma_snd_pcm_uframes_t; +typedef snd_pcm_sframes_t ma_snd_pcm_sframes_t; +typedef snd_pcm_stream_t ma_snd_pcm_stream_t; +typedef snd_pcm_format_t ma_snd_pcm_format_t; +typedef snd_pcm_access_t ma_snd_pcm_access_t; +typedef snd_pcm_t ma_snd_pcm_t; +typedef snd_pcm_hw_params_t ma_snd_pcm_hw_params_t; +typedef snd_pcm_sw_params_t ma_snd_pcm_sw_params_t; +typedef snd_pcm_format_mask_t ma_snd_pcm_format_mask_t; +typedef snd_pcm_info_t ma_snd_pcm_info_t; +typedef snd_pcm_channel_area_t ma_snd_pcm_channel_area_t; +typedef snd_pcm_chmap_t ma_snd_pcm_chmap_t; +typedef snd_pcm_state_t ma_snd_pcm_state_t; + +/* snd_pcm_stream_t */ +#define MA_SND_PCM_STREAM_PLAYBACK SND_PCM_STREAM_PLAYBACK +#define MA_SND_PCM_STREAM_CAPTURE SND_PCM_STREAM_CAPTURE + +/* snd_pcm_format_t */ +#define MA_SND_PCM_FORMAT_UNKNOWN SND_PCM_FORMAT_UNKNOWN +#define MA_SND_PCM_FORMAT_U8 SND_PCM_FORMAT_U8 +#define MA_SND_PCM_FORMAT_S16_LE SND_PCM_FORMAT_S16_LE +#define MA_SND_PCM_FORMAT_S16_BE SND_PCM_FORMAT_S16_BE +#define MA_SND_PCM_FORMAT_S24_LE SND_PCM_FORMAT_S24_LE +#define MA_SND_PCM_FORMAT_S24_BE SND_PCM_FORMAT_S24_BE +#define MA_SND_PCM_FORMAT_S32_LE SND_PCM_FORMAT_S32_LE +#define MA_SND_PCM_FORMAT_S32_BE SND_PCM_FORMAT_S32_BE +#define MA_SND_PCM_FORMAT_FLOAT_LE SND_PCM_FORMAT_FLOAT_LE +#define MA_SND_PCM_FORMAT_FLOAT_BE SND_PCM_FORMAT_FLOAT_BE +#define MA_SND_PCM_FORMAT_FLOAT64_LE SND_PCM_FORMAT_FLOAT64_LE +#define MA_SND_PCM_FORMAT_FLOAT64_BE SND_PCM_FORMAT_FLOAT64_BE +#define MA_SND_PCM_FORMAT_MU_LAW SND_PCM_FORMAT_MU_LAW +#define MA_SND_PCM_FORMAT_A_LAW SND_PCM_FORMAT_A_LAW +#define MA_SND_PCM_FORMAT_S24_3LE SND_PCM_FORMAT_S24_3LE +#define MA_SND_PCM_FORMAT_S24_3BE SND_PCM_FORMAT_S24_3BE + +/* ma_snd_pcm_access_t */ +#define MA_SND_PCM_ACCESS_MMAP_INTERLEAVED SND_PCM_ACCESS_MMAP_INTERLEAVED +#define MA_SND_PCM_ACCESS_MMAP_NONINTERLEAVED SND_PCM_ACCESS_MMAP_NONINTERLEAVED +#define MA_SND_PCM_ACCESS_MMAP_COMPLEX SND_PCM_ACCESS_MMAP_COMPLEX +#define MA_SND_PCM_ACCESS_RW_INTERLEAVED SND_PCM_ACCESS_RW_INTERLEAVED +#define MA_SND_PCM_ACCESS_RW_NONINTERLEAVED SND_PCM_ACCESS_RW_NONINTERLEAVED + +/* Channel positions. */ +#define MA_SND_CHMAP_UNKNOWN SND_CHMAP_UNKNOWN +#define MA_SND_CHMAP_NA SND_CHMAP_NA +#define MA_SND_CHMAP_MONO SND_CHMAP_MONO +#define MA_SND_CHMAP_FL SND_CHMAP_FL +#define MA_SND_CHMAP_FR SND_CHMAP_FR +#define MA_SND_CHMAP_RL SND_CHMAP_RL +#define MA_SND_CHMAP_RR SND_CHMAP_RR +#define MA_SND_CHMAP_FC SND_CHMAP_FC +#define MA_SND_CHMAP_LFE SND_CHMAP_LFE +#define MA_SND_CHMAP_SL SND_CHMAP_SL +#define MA_SND_CHMAP_SR SND_CHMAP_SR +#define MA_SND_CHMAP_RC SND_CHMAP_RC +#define MA_SND_CHMAP_FLC SND_CHMAP_FLC +#define MA_SND_CHMAP_FRC SND_CHMAP_FRC +#define MA_SND_CHMAP_RLC SND_CHMAP_RLC +#define MA_SND_CHMAP_RRC SND_CHMAP_RRC +#define MA_SND_CHMAP_FLW SND_CHMAP_FLW +#define MA_SND_CHMAP_FRW SND_CHMAP_FRW +#define MA_SND_CHMAP_FLH SND_CHMAP_FLH +#define MA_SND_CHMAP_FCH SND_CHMAP_FCH +#define MA_SND_CHMAP_FRH SND_CHMAP_FRH +#define MA_SND_CHMAP_TC SND_CHMAP_TC +#define MA_SND_CHMAP_TFL SND_CHMAP_TFL +#define MA_SND_CHMAP_TFR SND_CHMAP_TFR +#define MA_SND_CHMAP_TFC SND_CHMAP_TFC +#define MA_SND_CHMAP_TRL SND_CHMAP_TRL +#define MA_SND_CHMAP_TRR SND_CHMAP_TRR +#define MA_SND_CHMAP_TRC SND_CHMAP_TRC +#define MA_SND_CHMAP_TFLC SND_CHMAP_TFLC +#define MA_SND_CHMAP_TFRC SND_CHMAP_TFRC +#define MA_SND_CHMAP_TSL SND_CHMAP_TSL +#define MA_SND_CHMAP_TSR SND_CHMAP_TSR +#define MA_SND_CHMAP_LLFE SND_CHMAP_LLFE +#define MA_SND_CHMAP_RLFE SND_CHMAP_RLFE +#define MA_SND_CHMAP_BC SND_CHMAP_BC +#define MA_SND_CHMAP_BLC SND_CHMAP_BLC +#define MA_SND_CHMAP_BRC SND_CHMAP_BRC + +/* Open mode flags. */ +#define MA_SND_PCM_NO_AUTO_RESAMPLE SND_PCM_NO_AUTO_RESAMPLE +#define MA_SND_PCM_NO_AUTO_CHANNELS SND_PCM_NO_AUTO_CHANNELS +#define MA_SND_PCM_NO_AUTO_FORMAT SND_PCM_NO_AUTO_FORMAT +#else +#include /* For EPIPE, etc. */ +typedef unsigned long ma_snd_pcm_uframes_t; +typedef long ma_snd_pcm_sframes_t; +typedef int ma_snd_pcm_stream_t; +typedef int ma_snd_pcm_format_t; +typedef int ma_snd_pcm_access_t; +typedef int ma_snd_pcm_state_t; +typedef struct ma_snd_pcm_t ma_snd_pcm_t; +typedef struct ma_snd_pcm_hw_params_t ma_snd_pcm_hw_params_t; +typedef struct ma_snd_pcm_sw_params_t ma_snd_pcm_sw_params_t; +typedef struct ma_snd_pcm_format_mask_t ma_snd_pcm_format_mask_t; +typedef struct ma_snd_pcm_info_t ma_snd_pcm_info_t; +typedef struct +{ + void* addr; + unsigned int first; + unsigned int step; +} ma_snd_pcm_channel_area_t; +typedef struct +{ + unsigned int channels; + unsigned int pos[1]; +} ma_snd_pcm_chmap_t; + +/* snd_pcm_state_t */ +#define MA_SND_PCM_STATE_OPEN 0 +#define MA_SND_PCM_STATE_SETUP 1 +#define MA_SND_PCM_STATE_PREPARED 2 +#define MA_SND_PCM_STATE_RUNNING 3 +#define MA_SND_PCM_STATE_XRUN 4 +#define MA_SND_PCM_STATE_DRAINING 5 +#define MA_SND_PCM_STATE_PAUSED 6 +#define MA_SND_PCM_STATE_SUSPENDED 7 +#define MA_SND_PCM_STATE_DISCONNECTED 8 + +/* snd_pcm_stream_t */ +#define MA_SND_PCM_STREAM_PLAYBACK 0 +#define MA_SND_PCM_STREAM_CAPTURE 1 + +/* snd_pcm_format_t */ +#define MA_SND_PCM_FORMAT_UNKNOWN -1 +#define MA_SND_PCM_FORMAT_U8 1 +#define MA_SND_PCM_FORMAT_S16_LE 2 +#define MA_SND_PCM_FORMAT_S16_BE 3 +#define MA_SND_PCM_FORMAT_S24_LE 6 +#define MA_SND_PCM_FORMAT_S24_BE 7 +#define MA_SND_PCM_FORMAT_S32_LE 10 +#define MA_SND_PCM_FORMAT_S32_BE 11 +#define MA_SND_PCM_FORMAT_FLOAT_LE 14 +#define MA_SND_PCM_FORMAT_FLOAT_BE 15 +#define MA_SND_PCM_FORMAT_FLOAT64_LE 16 +#define MA_SND_PCM_FORMAT_FLOAT64_BE 17 +#define MA_SND_PCM_FORMAT_MU_LAW 20 +#define MA_SND_PCM_FORMAT_A_LAW 21 +#define MA_SND_PCM_FORMAT_S24_3LE 32 +#define MA_SND_PCM_FORMAT_S24_3BE 33 + +/* snd_pcm_access_t */ +#define MA_SND_PCM_ACCESS_MMAP_INTERLEAVED 0 +#define MA_SND_PCM_ACCESS_MMAP_NONINTERLEAVED 1 +#define MA_SND_PCM_ACCESS_MMAP_COMPLEX 2 +#define MA_SND_PCM_ACCESS_RW_INTERLEAVED 3 +#define MA_SND_PCM_ACCESS_RW_NONINTERLEAVED 4 + +/* Channel positions. */ +#define MA_SND_CHMAP_UNKNOWN 0 +#define MA_SND_CHMAP_NA 1 +#define MA_SND_CHMAP_MONO 2 +#define MA_SND_CHMAP_FL 3 +#define MA_SND_CHMAP_FR 4 +#define MA_SND_CHMAP_RL 5 +#define MA_SND_CHMAP_RR 6 +#define MA_SND_CHMAP_FC 7 +#define MA_SND_CHMAP_LFE 8 +#define MA_SND_CHMAP_SL 9 +#define MA_SND_CHMAP_SR 10 +#define MA_SND_CHMAP_RC 11 +#define MA_SND_CHMAP_FLC 12 +#define MA_SND_CHMAP_FRC 13 +#define MA_SND_CHMAP_RLC 14 +#define MA_SND_CHMAP_RRC 15 +#define MA_SND_CHMAP_FLW 16 +#define MA_SND_CHMAP_FRW 17 +#define MA_SND_CHMAP_FLH 18 +#define MA_SND_CHMAP_FCH 19 +#define MA_SND_CHMAP_FRH 20 +#define MA_SND_CHMAP_TC 21 +#define MA_SND_CHMAP_TFL 22 +#define MA_SND_CHMAP_TFR 23 +#define MA_SND_CHMAP_TFC 24 +#define MA_SND_CHMAP_TRL 25 +#define MA_SND_CHMAP_TRR 26 +#define MA_SND_CHMAP_TRC 27 +#define MA_SND_CHMAP_TFLC 28 +#define MA_SND_CHMAP_TFRC 29 +#define MA_SND_CHMAP_TSL 30 +#define MA_SND_CHMAP_TSR 31 +#define MA_SND_CHMAP_LLFE 32 +#define MA_SND_CHMAP_RLFE 33 +#define MA_SND_CHMAP_BC 34 +#define MA_SND_CHMAP_BLC 35 +#define MA_SND_CHMAP_BRC 36 + +/* Open mode flags. */ +#define MA_SND_PCM_NO_AUTO_RESAMPLE 0x00010000 +#define MA_SND_PCM_NO_AUTO_CHANNELS 0x00020000 +#define MA_SND_PCM_NO_AUTO_FORMAT 0x00040000 +#endif + +typedef int (* ma_snd_pcm_open_proc) (ma_snd_pcm_t **pcm, const char *name, ma_snd_pcm_stream_t stream, int mode); +typedef int (* ma_snd_pcm_close_proc) (ma_snd_pcm_t *pcm); +typedef size_t (* ma_snd_pcm_hw_params_sizeof_proc) (void); +typedef int (* ma_snd_pcm_hw_params_any_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params); +typedef int (* ma_snd_pcm_hw_params_set_format_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t val); +typedef int (* ma_snd_pcm_hw_params_set_format_first_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t *format); +typedef void (* ma_snd_pcm_hw_params_get_format_mask_proc) (ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_mask_t *mask); +typedef int (* ma_snd_pcm_hw_params_set_channels_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val); +typedef int (* ma_snd_pcm_hw_params_set_channels_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val); +typedef int (* ma_snd_pcm_hw_params_set_channels_minmax_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *minimum, unsigned int *maximum); +typedef int (* ma_snd_pcm_hw_params_set_rate_resample_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val); +typedef int (* ma_snd_pcm_hw_params_set_rate_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val, int dir); +typedef int (* ma_snd_pcm_hw_params_set_rate_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir); +typedef int (* ma_snd_pcm_hw_params_set_buffer_size_near_proc)(ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_uframes_t *val); +typedef int (* ma_snd_pcm_hw_params_set_periods_near_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir); +typedef int (* ma_snd_pcm_hw_params_set_access_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_access_t _access); +typedef int (* ma_snd_pcm_hw_params_get_format_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t *format); +typedef int (* ma_snd_pcm_hw_params_get_channels_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val); +typedef int (* ma_snd_pcm_hw_params_get_channels_min_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val); +typedef int (* ma_snd_pcm_hw_params_get_channels_max_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val); +typedef int (* ma_snd_pcm_hw_params_get_rate_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir); +typedef int (* ma_snd_pcm_hw_params_get_rate_min_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir); +typedef int (* ma_snd_pcm_hw_params_get_rate_max_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *rate, int *dir); +typedef int (* ma_snd_pcm_hw_params_get_buffer_size_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_uframes_t *val); +typedef int (* ma_snd_pcm_hw_params_get_periods_proc) (const ma_snd_pcm_hw_params_t *params, unsigned int *val, int *dir); +typedef int (* ma_snd_pcm_hw_params_get_access_proc) (const ma_snd_pcm_hw_params_t *params, ma_snd_pcm_access_t *_access); +typedef int (* ma_snd_pcm_hw_params_test_format_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, ma_snd_pcm_format_t val); +typedef int (* ma_snd_pcm_hw_params_test_channels_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val); +typedef int (* ma_snd_pcm_hw_params_test_rate_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params, unsigned int val, int dir); +typedef int (* ma_snd_pcm_hw_params_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_hw_params_t *params); +typedef size_t (* ma_snd_pcm_sw_params_sizeof_proc) (void); +typedef int (* ma_snd_pcm_sw_params_current_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params); +typedef int (* ma_snd_pcm_sw_params_get_boundary_proc) (const ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t* val); +typedef int (* ma_snd_pcm_sw_params_set_avail_min_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val); +typedef int (* ma_snd_pcm_sw_params_set_start_threshold_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val); +typedef int (* ma_snd_pcm_sw_params_set_stop_threshold_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params, ma_snd_pcm_uframes_t val); +typedef int (* ma_snd_pcm_sw_params_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_sw_params_t *params); +typedef size_t (* ma_snd_pcm_format_mask_sizeof_proc) (void); +typedef int (* ma_snd_pcm_format_mask_test_proc) (const ma_snd_pcm_format_mask_t *mask, ma_snd_pcm_format_t val); +typedef ma_snd_pcm_chmap_t * (* ma_snd_pcm_get_chmap_proc) (ma_snd_pcm_t *pcm); +typedef ma_snd_pcm_state_t (* ma_snd_pcm_state_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_prepare_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_start_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_drop_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_drain_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_reset_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_device_name_hint_proc) (int card, const char *iface, void ***hints); +typedef char * (* ma_snd_device_name_get_hint_proc) (const void *hint, const char *id); +typedef int (* ma_snd_card_get_index_proc) (const char *name); +typedef int (* ma_snd_device_name_free_hint_proc) (void **hints); +typedef int (* ma_snd_pcm_mmap_begin_proc) (ma_snd_pcm_t *pcm, const ma_snd_pcm_channel_area_t **areas, ma_snd_pcm_uframes_t *offset, ma_snd_pcm_uframes_t *frames); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_mmap_commit_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_uframes_t offset, ma_snd_pcm_uframes_t frames); +typedef int (* ma_snd_pcm_recover_proc) (ma_snd_pcm_t *pcm, int err, int silent); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_readi_proc) (ma_snd_pcm_t *pcm, void *buffer, ma_snd_pcm_uframes_t size); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_writei_proc) (ma_snd_pcm_t *pcm, const void *buffer, ma_snd_pcm_uframes_t size); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_avail_proc) (ma_snd_pcm_t *pcm); +typedef ma_snd_pcm_sframes_t (* ma_snd_pcm_avail_update_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_wait_proc) (ma_snd_pcm_t *pcm, int timeout); +typedef int (* ma_snd_pcm_nonblock_proc) (ma_snd_pcm_t *pcm, int nonblock); +typedef int (* ma_snd_pcm_info_proc) (ma_snd_pcm_t *pcm, ma_snd_pcm_info_t* info); +typedef size_t (* ma_snd_pcm_info_sizeof_proc) (void); +typedef const char* (* ma_snd_pcm_info_get_name_proc) (const ma_snd_pcm_info_t* info); +typedef int (* ma_snd_pcm_poll_descriptors_proc) (ma_snd_pcm_t *pcm, struct pollfd *pfds, unsigned int space); +typedef int (* ma_snd_pcm_poll_descriptors_count_proc) (ma_snd_pcm_t *pcm); +typedef int (* ma_snd_pcm_poll_descriptors_revents_proc) (ma_snd_pcm_t *pcm, struct pollfd *pfds, unsigned int nfds, unsigned short *revents); +typedef int (* ma_snd_config_update_free_global_proc) (void); + +/* This array specifies each of the common devices that can be used for both playback and capture. */ +static const char* g_maCommonDeviceNamesALSA[] = { + "default", + "null", + "pulse", + "jack" +}; + +/* This array allows us to blacklist specific playback devices. */ +static const char* g_maBlacklistedPlaybackDeviceNamesALSA[] = { + "" +}; + +/* This array allows us to blacklist specific capture devices. */ +static const char* g_maBlacklistedCaptureDeviceNamesALSA[] = { + "" +}; + + +static ma_snd_pcm_format_t ma_convert_ma_format_to_alsa_format(ma_format format) +{ + ma_snd_pcm_format_t ALSAFormats[] = { + MA_SND_PCM_FORMAT_UNKNOWN, /* ma_format_unknown */ + MA_SND_PCM_FORMAT_U8, /* ma_format_u8 */ + MA_SND_PCM_FORMAT_S16_LE, /* ma_format_s16 */ + MA_SND_PCM_FORMAT_S24_3LE, /* ma_format_s24 */ + MA_SND_PCM_FORMAT_S32_LE, /* ma_format_s32 */ + MA_SND_PCM_FORMAT_FLOAT_LE /* ma_format_f32 */ + }; + + if (ma_is_big_endian()) { + ALSAFormats[0] = MA_SND_PCM_FORMAT_UNKNOWN; + ALSAFormats[1] = MA_SND_PCM_FORMAT_U8; + ALSAFormats[2] = MA_SND_PCM_FORMAT_S16_BE; + ALSAFormats[3] = MA_SND_PCM_FORMAT_S24_3BE; + ALSAFormats[4] = MA_SND_PCM_FORMAT_S32_BE; + ALSAFormats[5] = MA_SND_PCM_FORMAT_FLOAT_BE; + } + + return ALSAFormats[format]; +} + +static ma_format ma_format_from_alsa(ma_snd_pcm_format_t formatALSA) +{ + if (ma_is_little_endian()) { + switch (formatALSA) { + case MA_SND_PCM_FORMAT_S16_LE: return ma_format_s16; + case MA_SND_PCM_FORMAT_S24_3LE: return ma_format_s24; + case MA_SND_PCM_FORMAT_S32_LE: return ma_format_s32; + case MA_SND_PCM_FORMAT_FLOAT_LE: return ma_format_f32; + default: break; + } + } else { + switch (formatALSA) { + case MA_SND_PCM_FORMAT_S16_BE: return ma_format_s16; + case MA_SND_PCM_FORMAT_S24_3BE: return ma_format_s24; + case MA_SND_PCM_FORMAT_S32_BE: return ma_format_s32; + case MA_SND_PCM_FORMAT_FLOAT_BE: return ma_format_f32; + default: break; + } + } + + /* Endian agnostic. */ + switch (formatALSA) { + case MA_SND_PCM_FORMAT_U8: return ma_format_u8; + default: return ma_format_unknown; + } +} + +static ma_channel ma_convert_alsa_channel_position_to_ma_channel(unsigned int alsaChannelPos) +{ + switch (alsaChannelPos) + { + case MA_SND_CHMAP_MONO: return MA_CHANNEL_MONO; + case MA_SND_CHMAP_FL: return MA_CHANNEL_FRONT_LEFT; + case MA_SND_CHMAP_FR: return MA_CHANNEL_FRONT_RIGHT; + case MA_SND_CHMAP_RL: return MA_CHANNEL_BACK_LEFT; + case MA_SND_CHMAP_RR: return MA_CHANNEL_BACK_RIGHT; + case MA_SND_CHMAP_FC: return MA_CHANNEL_FRONT_CENTER; + case MA_SND_CHMAP_LFE: return MA_CHANNEL_LFE; + case MA_SND_CHMAP_SL: return MA_CHANNEL_SIDE_LEFT; + case MA_SND_CHMAP_SR: return MA_CHANNEL_SIDE_RIGHT; + case MA_SND_CHMAP_RC: return MA_CHANNEL_BACK_CENTER; + case MA_SND_CHMAP_FLC: return MA_CHANNEL_FRONT_LEFT_CENTER; + case MA_SND_CHMAP_FRC: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case MA_SND_CHMAP_RLC: return 0; + case MA_SND_CHMAP_RRC: return 0; + case MA_SND_CHMAP_FLW: return 0; + case MA_SND_CHMAP_FRW: return 0; + case MA_SND_CHMAP_FLH: return 0; + case MA_SND_CHMAP_FCH: return 0; + case MA_SND_CHMAP_FRH: return 0; + case MA_SND_CHMAP_TC: return MA_CHANNEL_TOP_CENTER; + case MA_SND_CHMAP_TFL: return MA_CHANNEL_TOP_FRONT_LEFT; + case MA_SND_CHMAP_TFR: return MA_CHANNEL_TOP_FRONT_RIGHT; + case MA_SND_CHMAP_TFC: return MA_CHANNEL_TOP_FRONT_CENTER; + case MA_SND_CHMAP_TRL: return MA_CHANNEL_TOP_BACK_LEFT; + case MA_SND_CHMAP_TRR: return MA_CHANNEL_TOP_BACK_RIGHT; + case MA_SND_CHMAP_TRC: return MA_CHANNEL_TOP_BACK_CENTER; + default: break; + } + + return 0; +} + +static ma_bool32 ma_is_common_device_name__alsa(const char* name) +{ + size_t iName; + for (iName = 0; iName < ma_countof(g_maCommonDeviceNamesALSA); ++iName) { + if (ma_strcmp(name, g_maCommonDeviceNamesALSA[iName]) == 0) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + + +static ma_bool32 ma_is_playback_device_blacklisted__alsa(const char* name) +{ + size_t iName; + for (iName = 0; iName < ma_countof(g_maBlacklistedPlaybackDeviceNamesALSA); ++iName) { + if (ma_strcmp(name, g_maBlacklistedPlaybackDeviceNamesALSA[iName]) == 0) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + +static ma_bool32 ma_is_capture_device_blacklisted__alsa(const char* name) +{ + size_t iName; + for (iName = 0; iName < ma_countof(g_maBlacklistedCaptureDeviceNamesALSA); ++iName) { + if (ma_strcmp(name, g_maBlacklistedCaptureDeviceNamesALSA[iName]) == 0) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + +static ma_bool32 ma_is_device_blacklisted__alsa(ma_device_type deviceType, const char* name) +{ + if (deviceType == ma_device_type_playback) { + return ma_is_playback_device_blacklisted__alsa(name); + } else { + return ma_is_capture_device_blacklisted__alsa(name); + } +} + + +static const char* ma_find_char(const char* str, char c, int* index) +{ + int i = 0; + for (;;) { + if (str[i] == '\0') { + if (index) *index = -1; + return NULL; + } + + if (str[i] == c) { + if (index) *index = i; + return str + i; + } + + i += 1; + } + + /* Should never get here, but treat it as though the character was not found to make me feel better inside. */ + if (index) *index = -1; + return NULL; +} + +static ma_bool32 ma_is_device_name_in_hw_format__alsa(const char* hwid) +{ + /* This function is just checking whether or not hwid is in "hw:%d,%d" format. */ + + int commaPos; + const char* dev; + int i; + + if (hwid == NULL) { + return MA_FALSE; + } + + if (hwid[0] != 'h' || hwid[1] != 'w' || hwid[2] != ':') { + return MA_FALSE; + } + + hwid += 3; + + dev = ma_find_char(hwid, ',', &commaPos); + if (dev == NULL) { + return MA_FALSE; + } else { + dev += 1; /* Skip past the ",". */ + } + + /* Check if the part between the ":" and the "," contains only numbers. If not, return false. */ + for (i = 0; i < commaPos; ++i) { + if (hwid[i] < '0' || hwid[i] > '9') { + return MA_FALSE; + } + } + + /* Check if everything after the "," is numeric. If not, return false. */ + i = 0; + while (dev[i] != '\0') { + if (dev[i] < '0' || dev[i] > '9') { + return MA_FALSE; + } + i += 1; + } + + return MA_TRUE; +} + +static int ma_convert_device_name_to_hw_format__alsa(ma_context* pContext, char* dst, size_t dstSize, const char* src) /* Returns 0 on success, non-0 on error. */ +{ + /* src should look something like this: "hw:CARD=I82801AAICH,DEV=0" */ + + int colonPos; + int commaPos; + char card[256]; + const char* dev; + int cardIndex; + + if (dst == NULL) { + return -1; + } + if (dstSize < 7) { + return -1; /* Absolute minimum size of the output buffer is 7 bytes. */ + } + + *dst = '\0'; /* Safety. */ + if (src == NULL) { + return -1; + } + + /* If the input name is already in "hw:%d,%d" format, just return that verbatim. */ + if (ma_is_device_name_in_hw_format__alsa(src)) { + return ma_strcpy_s(dst, dstSize, src); + } + + src = ma_find_char(src, ':', &colonPos); + if (src == NULL) { + return -1; /* Couldn't find a colon */ + } + + dev = ma_find_char(src, ',', &commaPos); + if (dev == NULL) { + dev = "0"; + ma_strncpy_s(card, sizeof(card), src+6, (size_t)-1); /* +6 = ":CARD=" */ + } else { + dev = dev + 5; /* +5 = ",DEV=" */ + ma_strncpy_s(card, sizeof(card), src+6, commaPos-6); /* +6 = ":CARD=" */ + } + + cardIndex = ((ma_snd_card_get_index_proc)pContext->alsa.snd_card_get_index)(card); + if (cardIndex < 0) { + return -2; /* Failed to retrieve the card index. */ + } + + + /* Construction. */ + dst[0] = 'h'; dst[1] = 'w'; dst[2] = ':'; + if (ma_itoa_s(cardIndex, dst+3, dstSize-3, 10) != 0) { + return -3; + } + if (ma_strcat_s(dst, dstSize, ",") != 0) { + return -3; + } + if (ma_strcat_s(dst, dstSize, dev) != 0) { + return -3; + } + + return 0; +} + +static ma_bool32 ma_does_id_exist_in_list__alsa(ma_device_id* pUniqueIDs, ma_uint32 count, const char* pHWID) +{ + ma_uint32 i; + + MA_ASSERT(pHWID != NULL); + + for (i = 0; i < count; ++i) { + if (ma_strcmp(pUniqueIDs[i].alsa, pHWID) == 0) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + + +static ma_result ma_context_open_pcm__alsa(ma_context* pContext, ma_share_mode shareMode, ma_device_type deviceType, const ma_device_id* pDeviceID, int openMode, ma_snd_pcm_t** ppPCM) +{ + ma_snd_pcm_t* pPCM; + ma_snd_pcm_stream_t stream; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppPCM != NULL); + + *ppPCM = NULL; + pPCM = NULL; + + stream = (deviceType == ma_device_type_playback) ? MA_SND_PCM_STREAM_PLAYBACK : MA_SND_PCM_STREAM_CAPTURE; + + if (pDeviceID == NULL) { + ma_bool32 isDeviceOpen; + size_t i; + + /* + We're opening the default device. I don't know if trying anything other than "default" is necessary, but it makes + me feel better to try as hard as we can get to get _something_ working. + */ + const char* defaultDeviceNames[] = { + "default", + NULL, + NULL, + NULL, + NULL, + NULL, + NULL + }; + + if (shareMode == ma_share_mode_exclusive) { + defaultDeviceNames[1] = "hw"; + defaultDeviceNames[2] = "hw:0"; + defaultDeviceNames[3] = "hw:0,0"; + } else { + if (deviceType == ma_device_type_playback) { + defaultDeviceNames[1] = "dmix"; + defaultDeviceNames[2] = "dmix:0"; + defaultDeviceNames[3] = "dmix:0,0"; + } else { + defaultDeviceNames[1] = "dsnoop"; + defaultDeviceNames[2] = "dsnoop:0"; + defaultDeviceNames[3] = "dsnoop:0,0"; + } + defaultDeviceNames[4] = "hw"; + defaultDeviceNames[5] = "hw:0"; + defaultDeviceNames[6] = "hw:0,0"; + } + + isDeviceOpen = MA_FALSE; + for (i = 0; i < ma_countof(defaultDeviceNames); ++i) { + if (defaultDeviceNames[i] != NULL && defaultDeviceNames[i][0] != '\0') { + if (((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, defaultDeviceNames[i], stream, openMode) == 0) { + isDeviceOpen = MA_TRUE; + break; + } + } + } + + if (!isDeviceOpen) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_open() failed when trying to open an appropriate default device."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + } else { + /* + We're trying to open a specific device. There's a few things to consider here: + + miniaudio recognizes a special format of device id that excludes the "hw", "dmix", etc. prefix. It looks like this: ":0,0", ":0,1", etc. When + an ID of this format is specified, it indicates to miniaudio that it can try different combinations of plugins ("hw", "dmix", etc.) until it + finds an appropriate one that works. This comes in very handy when trying to open a device in shared mode ("dmix"), vs exclusive mode ("hw"). + */ + + /* May end up needing to make small adjustments to the ID, so make a copy. */ + ma_device_id deviceID = *pDeviceID; + int resultALSA = -ENODEV; + + if (deviceID.alsa[0] != ':') { + /* The ID is not in ":0,0" format. Use the ID exactly as-is. */ + resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, deviceID.alsa, stream, openMode); + } else { + char hwid[256]; + + /* The ID is in ":0,0" format. Try different plugins depending on the shared mode. */ + if (deviceID.alsa[1] == '\0') { + deviceID.alsa[0] = '\0'; /* An ID of ":" should be converted to "". */ + } + + if (shareMode == ma_share_mode_shared) { + if (deviceType == ma_device_type_playback) { + ma_strcpy_s(hwid, sizeof(hwid), "dmix"); + } else { + ma_strcpy_s(hwid, sizeof(hwid), "dsnoop"); + } + + if (ma_strcat_s(hwid, sizeof(hwid), deviceID.alsa) == 0) { + resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, hwid, stream, openMode); + } + } + + /* If at this point we still don't have an open device it means we're either preferencing exclusive mode or opening with "dmix"/"dsnoop" failed. */ + if (resultALSA != 0) { + ma_strcpy_s(hwid, sizeof(hwid), "hw"); + if (ma_strcat_s(hwid, sizeof(hwid), deviceID.alsa) == 0) { + resultALSA = ((ma_snd_pcm_open_proc)pContext->alsa.snd_pcm_open)(&pPCM, hwid, stream, openMode); + } + } + } + + if (resultALSA < 0) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_open() failed."); + return ma_result_from_errno(-resultALSA); + } + } + + *ppPCM = pPCM; + return MA_SUCCESS; +} + + +static ma_result ma_context_enumerate_devices__alsa(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + int resultALSA; + ma_bool32 cbResult = MA_TRUE; + char** ppDeviceHints; + ma_device_id* pUniqueIDs = NULL; + ma_uint32 uniqueIDCount = 0; + char** ppNextDeviceHint; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + ma_mutex_lock(&pContext->alsa.internalDeviceEnumLock); + + resultALSA = ((ma_snd_device_name_hint_proc)pContext->alsa.snd_device_name_hint)(-1, "pcm", (void***)&ppDeviceHints); + if (resultALSA < 0) { + ma_mutex_unlock(&pContext->alsa.internalDeviceEnumLock); + return ma_result_from_errno(-resultALSA); + } + + ppNextDeviceHint = ppDeviceHints; + while (*ppNextDeviceHint != NULL) { + char* NAME = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "NAME"); + char* DESC = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "DESC"); + char* IOID = ((ma_snd_device_name_get_hint_proc)pContext->alsa.snd_device_name_get_hint)(*ppNextDeviceHint, "IOID"); + ma_device_type deviceType = ma_device_type_playback; + ma_bool32 stopEnumeration = MA_FALSE; + char hwid[sizeof(pUniqueIDs->alsa)]; + ma_device_info deviceInfo; + + if ((IOID == NULL || ma_strcmp(IOID, "Output") == 0)) { + deviceType = ma_device_type_playback; + } + if ((IOID != NULL && ma_strcmp(IOID, "Input" ) == 0)) { + deviceType = ma_device_type_capture; + } + + if (NAME != NULL) { + if (pContext->alsa.useVerboseDeviceEnumeration) { + /* Verbose mode. Use the name exactly as-is. */ + ma_strncpy_s(hwid, sizeof(hwid), NAME, (size_t)-1); + } else { + /* Simplified mode. Use ":%d,%d" format. */ + if (ma_convert_device_name_to_hw_format__alsa(pContext, hwid, sizeof(hwid), NAME) == 0) { + /* + At this point, hwid looks like "hw:0,0". In simplified enumeration mode, we actually want to strip off the + plugin name so it looks like ":0,0". The reason for this is that this special format is detected at device + initialization time and is used as an indicator to try to use the most appropriate plugin depending on the + device type and sharing mode. + */ + char* dst = hwid; + char* src = hwid+2; + while ((*dst++ = *src++)); + } else { + /* Conversion to "hw:%d,%d" failed. Just use the name as-is. */ + ma_strncpy_s(hwid, sizeof(hwid), NAME, (size_t)-1); + } + + if (ma_does_id_exist_in_list__alsa(pUniqueIDs, uniqueIDCount, hwid)) { + goto next_device; /* The device has already been enumerated. Move on to the next one. */ + } else { + /* The device has not yet been enumerated. Make sure it's added to our list so that it's not enumerated again. */ + size_t newCapacity = sizeof(*pUniqueIDs) * (uniqueIDCount + 1); + ma_device_id* pNewUniqueIDs = (ma_device_id*)ma_realloc(pUniqueIDs, newCapacity, &pContext->allocationCallbacks); + if (pNewUniqueIDs == NULL) { + goto next_device; /* Failed to allocate memory. */ + } + + pUniqueIDs = pNewUniqueIDs; + MA_COPY_MEMORY(pUniqueIDs[uniqueIDCount].alsa, hwid, sizeof(hwid)); + uniqueIDCount += 1; + } + } + } else { + MA_ZERO_MEMORY(hwid, sizeof(hwid)); + } + + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.id.alsa, sizeof(deviceInfo.id.alsa), hwid, (size_t)-1); + + /* + There's no good way to determine whether or not a device is the default on Linux. We're just going to do something simple and + just use the name of "default" as the indicator. + */ + if (ma_strcmp(deviceInfo.id.alsa, "default") == 0) { + deviceInfo.isDefault = MA_TRUE; + } + + + /* + DESC is the friendly name. We treat this slightly differently depending on whether or not we are using verbose + device enumeration. In verbose mode we want to take the entire description so that the end-user can distinguish + between the subdevices of each card/dev pair. In simplified mode, however, we only want the first part of the + description. + + The value in DESC seems to be split into two lines, with the first line being the name of the device and the + second line being a description of the device. I don't like having the description be across two lines because + it makes formatting ugly and annoying. I'm therefore deciding to put it all on a single line with the second line + being put into parentheses. In simplified mode I'm just stripping the second line entirely. + */ + if (DESC != NULL) { + int lfPos; + const char* line2 = ma_find_char(DESC, '\n', &lfPos); + if (line2 != NULL) { + line2 += 1; /* Skip past the new-line character. */ + + if (pContext->alsa.useVerboseDeviceEnumeration) { + /* Verbose mode. Put the second line in brackets. */ + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, lfPos); + ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), " ("); + ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), line2); + ma_strcat_s (deviceInfo.name, sizeof(deviceInfo.name), ")"); + } else { + /* Simplified mode. Strip the second line entirely. */ + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, lfPos); + } + } else { + /* There's no second line. Just copy the whole description. */ + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), DESC, (size_t)-1); + } + } + + if (!ma_is_device_blacklisted__alsa(deviceType, NAME)) { + cbResult = callback(pContext, deviceType, &deviceInfo, pUserData); + } + + /* + Some devices are both playback and capture, but they are only enumerated by ALSA once. We need to fire the callback + again for the other device type in this case. We do this for known devices and where the IOID hint is NULL, which + means both Input and Output. + */ + if (cbResult) { + if (ma_is_common_device_name__alsa(NAME) || IOID == NULL) { + if (deviceType == ma_device_type_playback) { + if (!ma_is_capture_device_blacklisted__alsa(NAME)) { + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + } else { + if (!ma_is_playback_device_blacklisted__alsa(NAME)) { + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + } + } + } + + if (cbResult == MA_FALSE) { + stopEnumeration = MA_TRUE; + } + + next_device: + free(NAME); + free(DESC); + free(IOID); + ppNextDeviceHint += 1; + + /* We need to stop enumeration if the callback returned false. */ + if (stopEnumeration) { + break; + } + } + + ma_free(pUniqueIDs, &pContext->allocationCallbacks); + ((ma_snd_device_name_free_hint_proc)pContext->alsa.snd_device_name_free_hint)((void**)ppDeviceHints); + + ma_mutex_unlock(&pContext->alsa.internalDeviceEnumLock); + + return MA_SUCCESS; +} + + +typedef struct +{ + ma_device_type deviceType; + const ma_device_id* pDeviceID; + ma_share_mode shareMode; + ma_device_info* pDeviceInfo; + ma_bool32 foundDevice; +} ma_context_get_device_info_enum_callback_data__alsa; + +static ma_bool32 ma_context_get_device_info_enum_callback__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pDeviceInfo, void* pUserData) +{ + ma_context_get_device_info_enum_callback_data__alsa* pData = (ma_context_get_device_info_enum_callback_data__alsa*)pUserData; + MA_ASSERT(pData != NULL); + + (void)pContext; + + if (pData->pDeviceID == NULL && ma_strcmp(pDeviceInfo->id.alsa, "default") == 0) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pDeviceInfo->name, (size_t)-1); + pData->foundDevice = MA_TRUE; + } else { + if (pData->deviceType == deviceType && (pData->pDeviceID != NULL && ma_strcmp(pData->pDeviceID->alsa, pDeviceInfo->id.alsa) == 0)) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pDeviceInfo->name, (size_t)-1); + pData->foundDevice = MA_TRUE; + } + } + + /* Keep enumerating until we have found the device. */ + return !pData->foundDevice; +} + +static void ma_context_test_rate_and_add_native_data_format__alsa(ma_context* pContext, ma_snd_pcm_t* pPCM, ma_snd_pcm_hw_params_t* pHWParams, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 flags, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pPCM != NULL); + MA_ASSERT(pHWParams != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + if (pDeviceInfo->nativeDataFormatCount < ma_countof(pDeviceInfo->nativeDataFormats) && ((ma_snd_pcm_hw_params_test_rate_proc)pContext->alsa.snd_pcm_hw_params_test_rate)(pPCM, pHWParams, sampleRate, 0) == 0) { + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = format; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = sampleRate; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = flags; + pDeviceInfo->nativeDataFormatCount += 1; + } +} + +static void ma_context_iterate_rates_and_add_native_data_format__alsa(ma_context* pContext, ma_snd_pcm_t* pPCM, ma_snd_pcm_hw_params_t* pHWParams, ma_format format, ma_uint32 channels, ma_uint32 flags, ma_device_info* pDeviceInfo) +{ + ma_uint32 iSampleRate; + unsigned int minSampleRate; + unsigned int maxSampleRate; + int sampleRateDir; /* Not used. Just passed into snd_pcm_hw_params_get_rate_min/max(). */ + + /* There could be a range. */ + ((ma_snd_pcm_hw_params_get_rate_min_proc)pContext->alsa.snd_pcm_hw_params_get_rate_min)(pHWParams, &minSampleRate, &sampleRateDir); + ((ma_snd_pcm_hw_params_get_rate_max_proc)pContext->alsa.snd_pcm_hw_params_get_rate_max)(pHWParams, &maxSampleRate, &sampleRateDir); + + /* Make sure our sample rates are clamped to sane values. Stupid devices like "pulse" will reports rates like "1" which is ridiculous. */ + minSampleRate = ma_clamp(minSampleRate, (unsigned int)ma_standard_sample_rate_min, (unsigned int)ma_standard_sample_rate_max); + maxSampleRate = ma_clamp(maxSampleRate, (unsigned int)ma_standard_sample_rate_min, (unsigned int)ma_standard_sample_rate_max); + + for (iSampleRate = 0; iSampleRate < ma_countof(g_maStandardSampleRatePriorities); iSampleRate += 1) { + ma_uint32 standardSampleRate = g_maStandardSampleRatePriorities[iSampleRate]; + + if (standardSampleRate >= minSampleRate && standardSampleRate <= maxSampleRate) { + ma_context_test_rate_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, channels, standardSampleRate, flags, pDeviceInfo); + } + } + + /* Now make sure our min and max rates are included just in case they aren't in the range of our standard rates. */ + if (!ma_is_standard_sample_rate(minSampleRate)) { + ma_context_test_rate_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, channels, minSampleRate, flags, pDeviceInfo); + } + + if (!ma_is_standard_sample_rate(maxSampleRate) && maxSampleRate != minSampleRate) { + ma_context_test_rate_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, channels, maxSampleRate, flags, pDeviceInfo); + } +} + +static ma_result ma_context_get_device_info__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_context_get_device_info_enum_callback_data__alsa data; + ma_result result; + int resultALSA; + ma_snd_pcm_t* pPCM; + ma_snd_pcm_hw_params_t* pHWParams; + ma_uint32 iFormat; + ma_uint32 iChannel; + + MA_ASSERT(pContext != NULL); + + /* We just enumerate to find basic information about the device. */ + data.deviceType = deviceType; + data.pDeviceID = pDeviceID; + data.pDeviceInfo = pDeviceInfo; + data.foundDevice = MA_FALSE; + result = ma_context_enumerate_devices__alsa(pContext, ma_context_get_device_info_enum_callback__alsa, &data); + if (result != MA_SUCCESS) { + return result; + } + + if (!data.foundDevice) { + return MA_NO_DEVICE; + } + + if (ma_strcmp(pDeviceInfo->id.alsa, "default") == 0) { + pDeviceInfo->isDefault = MA_TRUE; + } + + /* For detailed info we need to open the device. */ + result = ma_context_open_pcm__alsa(pContext, ma_share_mode_shared, deviceType, pDeviceID, 0, &pPCM); + if (result != MA_SUCCESS) { + return result; + } + + /* We need to initialize a HW parameters object in order to know what formats are supported. */ + pHWParams = (ma_snd_pcm_hw_params_t*)ma_calloc(((ma_snd_pcm_hw_params_sizeof_proc)pContext->alsa.snd_pcm_hw_params_sizeof)(), &pContext->allocationCallbacks); + if (pHWParams == NULL) { + ((ma_snd_pcm_close_proc)pContext->alsa.snd_pcm_close)(pPCM); + return MA_OUT_OF_MEMORY; + } + + resultALSA = ((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams); + if (resultALSA < 0) { + ma_free(pHWParams, &pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pContext->alsa.snd_pcm_close)(pPCM); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize hardware parameters. snd_pcm_hw_params_any() failed."); + return ma_result_from_errno(-resultALSA); + } + + /* + Some ALSA devices can support many permutations of formats, channels and rates. We only support + a fixed number of permutations which means we need to employ some strategies to ensure the best + combinations are returned. An example is the "pulse" device which can do its own data conversion + in software and as a result can support any combination of format, channels and rate. + + We want to ensure that the first data formats are the best. We have a list of favored sample + formats and sample rates, so these will be the basis of our iteration. + */ + + /* Formats. We just iterate over our standard formats and test them, making sure we reset the configuration space each iteration. */ + for (iFormat = 0; iFormat < ma_countof(g_maFormatPriorities); iFormat += 1) { + ma_format format = g_maFormatPriorities[iFormat]; + + /* + For each format we need to make sure we reset the configuration space so we don't return + channel counts and rates that aren't compatible with a format. + */ + ((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams); + + /* Test the format first. If this fails it means the format is not supported and we can skip it. */ + if (((ma_snd_pcm_hw_params_test_format_proc)pContext->alsa.snd_pcm_hw_params_test_format)(pPCM, pHWParams, ma_convert_ma_format_to_alsa_format(format)) == 0) { + /* The format is supported. */ + unsigned int minChannels; + unsigned int maxChannels; + + /* + The configuration space needs to be restricted to this format so we can get an accurate + picture of which sample rates and channel counts are support with this format. + */ + ((ma_snd_pcm_hw_params_set_format_proc)pContext->alsa.snd_pcm_hw_params_set_format)(pPCM, pHWParams, ma_convert_ma_format_to_alsa_format(format)); + + /* Now we need to check for supported channels. */ + ((ma_snd_pcm_hw_params_get_channels_min_proc)pContext->alsa.snd_pcm_hw_params_get_channels_min)(pHWParams, &minChannels); + ((ma_snd_pcm_hw_params_get_channels_max_proc)pContext->alsa.snd_pcm_hw_params_get_channels_max)(pHWParams, &maxChannels); + + if (minChannels > MA_MAX_CHANNELS) { + continue; /* Too many channels. */ + } + if (maxChannels < MA_MIN_CHANNELS) { + continue; /* Not enough channels. */ + } + + /* + Make sure the channel count is clamped. This is mainly intended for the max channels + because some devices can report an unbound maximum. + */ + minChannels = ma_clamp(minChannels, MA_MIN_CHANNELS, MA_MAX_CHANNELS); + maxChannels = ma_clamp(maxChannels, MA_MIN_CHANNELS, MA_MAX_CHANNELS); + + if (minChannels == MA_MIN_CHANNELS && maxChannels == MA_MAX_CHANNELS) { + /* The device supports all channels. Don't iterate over every single one. Instead just set the channels to 0 which means all channels are supported. */ + ma_context_iterate_rates_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, 0, 0, pDeviceInfo); /* Intentionally setting the channel count to 0 as that means all channels are supported. */ + } else { + /* The device only supports a specific set of channels. We need to iterate over all of them. */ + for (iChannel = minChannels; iChannel <= maxChannels; iChannel += 1) { + /* Test the channel before applying it to the configuration space. */ + unsigned int channels = iChannel; + + /* Make sure our channel range is reset before testing again or else we'll always fail the test. */ + ((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams); + ((ma_snd_pcm_hw_params_set_format_proc)pContext->alsa.snd_pcm_hw_params_set_format)(pPCM, pHWParams, ma_convert_ma_format_to_alsa_format(format)); + + if (((ma_snd_pcm_hw_params_test_channels_proc)pContext->alsa.snd_pcm_hw_params_test_channels)(pPCM, pHWParams, channels) == 0) { + /* The channel count is supported. */ + + /* The configuration space now needs to be restricted to the channel count before extracting the sample rate. */ + ((ma_snd_pcm_hw_params_set_channels_proc)pContext->alsa.snd_pcm_hw_params_set_channels)(pPCM, pHWParams, channels); + + /* Only after the configuration space has been restricted to the specific channel count should we iterate over our sample rates. */ + ma_context_iterate_rates_and_add_native_data_format__alsa(pContext, pPCM, pHWParams, format, channels, 0, pDeviceInfo); + } else { + /* The channel count is not supported. Skip. */ + } + } + } + } else { + /* The format is not supported. Skip. */ + } + } + + ma_free(pHWParams, &pContext->allocationCallbacks); + + ((ma_snd_pcm_close_proc)pContext->alsa.snd_pcm_close)(pPCM); + return MA_SUCCESS; +} + +static ma_result ma_device_uninit__alsa(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if ((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture); + close(pDevice->alsa.wakeupfdCapture); + ma_free(pDevice->alsa.pPollDescriptorsCapture, &pDevice->pContext->allocationCallbacks); + } + + if ((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback); + close(pDevice->alsa.wakeupfdPlayback); + ma_free(pDevice->alsa.pPollDescriptorsPlayback, &pDevice->pContext->allocationCallbacks); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init_by_type__alsa(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptor, ma_device_type deviceType) +{ + ma_result result; + int resultALSA; + ma_snd_pcm_t* pPCM; + ma_bool32 isUsingMMap; + ma_snd_pcm_format_t formatALSA; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_channel internalChannelMap[MA_MAX_CHANNELS]; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + int openMode; + ma_snd_pcm_hw_params_t* pHWParams; + ma_snd_pcm_sw_params_t* pSWParams; + ma_snd_pcm_uframes_t bufferBoundary; + int pollDescriptorCount; + struct pollfd* pPollDescriptors; + int wakeupfd; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); /* This function should only be called for playback _or_ capture, never duplex. */ + MA_ASSERT(pDevice != NULL); + + formatALSA = ma_convert_ma_format_to_alsa_format(pDescriptor->format); + + openMode = 0; + if (pConfig->alsa.noAutoResample) { + openMode |= MA_SND_PCM_NO_AUTO_RESAMPLE; + } + if (pConfig->alsa.noAutoChannels) { + openMode |= MA_SND_PCM_NO_AUTO_CHANNELS; + } + if (pConfig->alsa.noAutoFormat) { + openMode |= MA_SND_PCM_NO_AUTO_FORMAT; + } + + result = ma_context_open_pcm__alsa(pDevice->pContext, pDescriptor->shareMode, deviceType, pDescriptor->pDeviceID, openMode, &pPCM); + if (result != MA_SUCCESS) { + return result; + } + + + /* Hardware parameters. */ + pHWParams = (ma_snd_pcm_hw_params_t*)ma_calloc(((ma_snd_pcm_hw_params_sizeof_proc)pDevice->pContext->alsa.snd_pcm_hw_params_sizeof)(), &pDevice->pContext->allocationCallbacks); + if (pHWParams == NULL) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to allocate memory for hardware parameters."); + return MA_OUT_OF_MEMORY; + } + + resultALSA = ((ma_snd_pcm_hw_params_any_proc)pDevice->pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize hardware parameters. snd_pcm_hw_params_any() failed."); + return ma_result_from_errno(-resultALSA); + } + + /* MMAP Mode. Try using interleaved MMAP access. If this fails, fall back to standard readi/writei. */ + isUsingMMap = MA_FALSE; +#if 0 /* NOTE: MMAP mode temporarily disabled. */ + if (deviceType != ma_device_type_capture) { /* <-- Disabling MMAP mode for capture devices because I apparently do not have a device that supports it which means I can't test it... Contributions welcome. */ + if (!pConfig->alsa.noMMap) { + if (((ma_snd_pcm_hw_params_set_access_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_access)(pPCM, pHWParams, MA_SND_PCM_ACCESS_MMAP_INTERLEAVED) == 0) { + pDevice->alsa.isUsingMMap = MA_TRUE; + } + } + } +#endif + + if (!isUsingMMap) { + resultALSA = ((ma_snd_pcm_hw_params_set_access_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_access)(pPCM, pHWParams, MA_SND_PCM_ACCESS_RW_INTERLEAVED); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set access mode to neither SND_PCM_ACCESS_MMAP_INTERLEAVED nor SND_PCM_ACCESS_RW_INTERLEAVED. snd_pcm_hw_params_set_access() failed."); + return ma_result_from_errno(-resultALSA); + } + } + + /* + Most important properties first. The documentation for OSS (yes, I know this is ALSA!) recommends format, channels, then sample rate. I can't + find any documentation for ALSA specifically, so I'm going to copy the recommendation for OSS. + */ + + /* Format. */ + { + /* + At this point we should have a list of supported formats, so now we need to find the best one. We first check if the requested format is + supported, and if so, use that one. If it's not supported, we just run though a list of formats and try to find the best one. + */ + if (formatALSA == MA_SND_PCM_FORMAT_UNKNOWN || ((ma_snd_pcm_hw_params_test_format_proc)pDevice->pContext->alsa.snd_pcm_hw_params_test_format)(pPCM, pHWParams, formatALSA) != 0) { + /* We're either requesting the native format or the specified format is not supported. */ + size_t iFormat; + + formatALSA = MA_SND_PCM_FORMAT_UNKNOWN; + for (iFormat = 0; iFormat < ma_countof(g_maFormatPriorities); ++iFormat) { + if (((ma_snd_pcm_hw_params_test_format_proc)pDevice->pContext->alsa.snd_pcm_hw_params_test_format)(pPCM, pHWParams, ma_convert_ma_format_to_alsa_format(g_maFormatPriorities[iFormat])) == 0) { + formatALSA = ma_convert_ma_format_to_alsa_format(g_maFormatPriorities[iFormat]); + break; + } + } + + if (formatALSA == MA_SND_PCM_FORMAT_UNKNOWN) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Format not supported. The device does not support any miniaudio formats."); + return MA_FORMAT_NOT_SUPPORTED; + } + } + + resultALSA = ((ma_snd_pcm_hw_params_set_format_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_format)(pPCM, pHWParams, formatALSA); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Format not supported. snd_pcm_hw_params_set_format() failed."); + return ma_result_from_errno(-resultALSA); + } + + internalFormat = ma_format_from_alsa(formatALSA); + if (internalFormat == ma_format_unknown) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] The chosen format is not supported by miniaudio."); + return MA_FORMAT_NOT_SUPPORTED; + } + } + + /* Channels. */ + { + unsigned int channels = pDescriptor->channels; + if (channels == 0) { + channels = MA_DEFAULT_CHANNELS; + } + + resultALSA = ((ma_snd_pcm_hw_params_set_channels_near_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_channels_near)(pPCM, pHWParams, &channels); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set channel count. snd_pcm_hw_params_set_channels_near() failed."); + return ma_result_from_errno(-resultALSA); + } + + internalChannels = (ma_uint32)channels; + } + + /* Sample Rate */ + { + unsigned int sampleRate; + + /* + It appears there's either a bug in ALSA, a bug in some drivers, or I'm doing something silly; but having resampling enabled causes + problems with some device configurations when used in conjunction with MMAP access mode. To fix this problem we need to disable + resampling. + + To reproduce this problem, open the "plug:dmix" device, and set the sample rate to 44100. Internally, it looks like dmix uses a + sample rate of 48000. The hardware parameters will get set correctly with no errors, but it looks like the 44100 -> 48000 resampling + doesn't work properly - but only with MMAP access mode. You will notice skipping/crackling in the audio, and it'll run at a slightly + faster rate. + + miniaudio has built-in support for sample rate conversion (albeit low quality at the moment), so disabling resampling should be fine + for us. The only problem is that it won't be taking advantage of any kind of hardware-accelerated resampling and it won't be very + good quality until I get a chance to improve the quality of miniaudio's software sample rate conversion. + + I don't currently know if the dmix plugin is the only one with this error. Indeed, this is the only one I've been able to reproduce + this error with. In the future, we may want to restrict the disabling of resampling to only known bad plugins. + */ + ((ma_snd_pcm_hw_params_set_rate_resample_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_rate_resample)(pPCM, pHWParams, 0); + + sampleRate = pDescriptor->sampleRate; + if (sampleRate == 0) { + sampleRate = MA_DEFAULT_SAMPLE_RATE; + } + + resultALSA = ((ma_snd_pcm_hw_params_set_rate_near_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_rate_near)(pPCM, pHWParams, &sampleRate, 0); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Sample rate not supported. snd_pcm_hw_params_set_rate_near() failed."); + return ma_result_from_errno(-resultALSA); + } + + internalSampleRate = (ma_uint32)sampleRate; + } + + /* Periods. */ + { + ma_uint32 periods = pDescriptor->periodCount; + + resultALSA = ((ma_snd_pcm_hw_params_set_periods_near_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_periods_near)(pPCM, pHWParams, &periods, NULL); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set period count. snd_pcm_hw_params_set_periods_near() failed."); + return ma_result_from_errno(-resultALSA); + } + + internalPeriods = periods; + } + + /* Buffer Size */ + { + ma_snd_pcm_uframes_t actualBufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, internalSampleRate, pConfig->performanceProfile) * internalPeriods; + + resultALSA = ((ma_snd_pcm_hw_params_set_buffer_size_near_proc)pDevice->pContext->alsa.snd_pcm_hw_params_set_buffer_size_near)(pPCM, pHWParams, &actualBufferSizeInFrames); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set buffer size for device. snd_pcm_hw_params_set_buffer_size() failed."); + return ma_result_from_errno(-resultALSA); + } + + internalPeriodSizeInFrames = actualBufferSizeInFrames / internalPeriods; + } + + /* Apply hardware parameters. */ + resultALSA = ((ma_snd_pcm_hw_params_proc)pDevice->pContext->alsa.snd_pcm_hw_params)(pPCM, pHWParams); + if (resultALSA < 0) { + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set hardware parameters. snd_pcm_hw_params() failed."); + return ma_result_from_errno(-resultALSA); + } + + ma_free(pHWParams, &pDevice->pContext->allocationCallbacks); + pHWParams = NULL; + + + /* Software parameters. */ + pSWParams = (ma_snd_pcm_sw_params_t*)ma_calloc(((ma_snd_pcm_sw_params_sizeof_proc)pDevice->pContext->alsa.snd_pcm_sw_params_sizeof)(), &pDevice->pContext->allocationCallbacks); + if (pSWParams == NULL) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to allocate memory for software parameters."); + return MA_OUT_OF_MEMORY; + } + + resultALSA = ((ma_snd_pcm_sw_params_current_proc)pDevice->pContext->alsa.snd_pcm_sw_params_current)(pPCM, pSWParams); + if (resultALSA < 0) { + ma_free(pSWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize software parameters. snd_pcm_sw_params_current() failed."); + return ma_result_from_errno(-resultALSA); + } + + resultALSA = ((ma_snd_pcm_sw_params_set_avail_min_proc)pDevice->pContext->alsa.snd_pcm_sw_params_set_avail_min)(pPCM, pSWParams, ma_prev_power_of_2(internalPeriodSizeInFrames)); + if (resultALSA < 0) { + ma_free(pSWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_sw_params_set_avail_min() failed."); + return ma_result_from_errno(-resultALSA); + } + + resultALSA = ((ma_snd_pcm_sw_params_get_boundary_proc)pDevice->pContext->alsa.snd_pcm_sw_params_get_boundary)(pSWParams, &bufferBoundary); + if (resultALSA < 0) { + bufferBoundary = internalPeriodSizeInFrames * internalPeriods; + } + + if (deviceType == ma_device_type_playback && !isUsingMMap) { /* Only playback devices in writei/readi mode need a start threshold. */ + /* + Subtle detail here with the start threshold. When in playback-only mode (no full-duplex) we can set the start threshold to + the size of a period. But for full-duplex we need to set it such that it is at least two periods. + */ + resultALSA = ((ma_snd_pcm_sw_params_set_start_threshold_proc)pDevice->pContext->alsa.snd_pcm_sw_params_set_start_threshold)(pPCM, pSWParams, internalPeriodSizeInFrames*2); + if (resultALSA < 0) { + ma_free(pSWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set start threshold for playback device. snd_pcm_sw_params_set_start_threshold() failed."); + return ma_result_from_errno(-resultALSA); + } + + resultALSA = ((ma_snd_pcm_sw_params_set_stop_threshold_proc)pDevice->pContext->alsa.snd_pcm_sw_params_set_stop_threshold)(pPCM, pSWParams, bufferBoundary); + if (resultALSA < 0) { /* Set to boundary to loop instead of stop in the event of an xrun. */ + ma_free(pSWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set stop threshold for playback device. snd_pcm_sw_params_set_stop_threshold() failed."); + return ma_result_from_errno(-resultALSA); + } + } + + resultALSA = ((ma_snd_pcm_sw_params_proc)pDevice->pContext->alsa.snd_pcm_sw_params)(pPCM, pSWParams); + if (resultALSA < 0) { + ma_free(pSWParams, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set software parameters. snd_pcm_sw_params() failed."); + return ma_result_from_errno(-resultALSA); + } + + ma_free(pSWParams, &pDevice->pContext->allocationCallbacks); + pSWParams = NULL; + + + /* Grab the internal channel map. For now we're not going to bother trying to change the channel map and instead just do it ourselves. */ + { + ma_snd_pcm_chmap_t* pChmap = NULL; + if (pDevice->pContext->alsa.snd_pcm_get_chmap != NULL) { + pChmap = ((ma_snd_pcm_get_chmap_proc)pDevice->pContext->alsa.snd_pcm_get_chmap)(pPCM); + } + + if (pChmap != NULL) { + ma_uint32 iChannel; + + /* There are cases where the returned channel map can have a different channel count than was returned by snd_pcm_hw_params_set_channels_near(). */ + if (pChmap->channels >= internalChannels) { + /* Drop excess channels. */ + for (iChannel = 0; iChannel < internalChannels; ++iChannel) { + internalChannelMap[iChannel] = ma_convert_alsa_channel_position_to_ma_channel(pChmap->pos[iChannel]); + } + } else { + ma_uint32 i; + + /* + Excess channels use defaults. Do an initial fill with defaults, overwrite the first pChmap->channels, validate to ensure there are no duplicate + channels. If validation fails, fall back to defaults. + */ + ma_bool32 isValid = MA_TRUE; + + /* Fill with defaults. */ + ma_channel_map_init_standard(ma_standard_channel_map_alsa, internalChannelMap, ma_countof(internalChannelMap), internalChannels); + + /* Overwrite first pChmap->channels channels. */ + for (iChannel = 0; iChannel < pChmap->channels; ++iChannel) { + internalChannelMap[iChannel] = ma_convert_alsa_channel_position_to_ma_channel(pChmap->pos[iChannel]); + } + + /* Validate. */ + for (i = 0; i < internalChannels && isValid; ++i) { + ma_uint32 j; + for (j = i+1; j < internalChannels; ++j) { + if (internalChannelMap[i] == internalChannelMap[j]) { + isValid = MA_FALSE; + break; + } + } + } + + /* If our channel map is invalid, fall back to defaults. */ + if (!isValid) { + ma_channel_map_init_standard(ma_standard_channel_map_alsa, internalChannelMap, ma_countof(internalChannelMap), internalChannels); + } + } + + free(pChmap); + pChmap = NULL; + } else { + /* Could not retrieve the channel map. Fall back to a hard-coded assumption. */ + ma_channel_map_init_standard(ma_standard_channel_map_alsa, internalChannelMap, ma_countof(internalChannelMap), internalChannels); + } + } + + + /* + We need to retrieve the poll descriptors so we can use poll() to wait for data to become + available for reading or writing. There's no well defined maximum for this so we're just going + to allocate this on the heap. + */ + pollDescriptorCount = ((ma_snd_pcm_poll_descriptors_count_proc)pDevice->pContext->alsa.snd_pcm_poll_descriptors_count)(pPCM); + if (pollDescriptorCount <= 0) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to retrieve poll descriptors count."); + return MA_ERROR; + } + + pPollDescriptors = (struct pollfd*)ma_malloc(sizeof(*pPollDescriptors) * (pollDescriptorCount + 1), &pDevice->pContext->allocationCallbacks); /* +1 because we want room for the wakeup descriptor. */ + if (pPollDescriptors == NULL) { + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to allocate memory for poll descriptors."); + return MA_OUT_OF_MEMORY; + } + + /* + We need an eventfd to wakeup from poll() and avoid a deadlock in situations where the driver + never returns from writei() and readi(). This has been observed with the "pulse" device. + */ + wakeupfd = eventfd(0, 0); + if (wakeupfd < 0) { + ma_free(pPollDescriptors, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to create eventfd for poll wakeup."); + return ma_result_from_errno(errno); + } + + /* We'll place the wakeup fd at the start of the buffer. */ + pPollDescriptors[0].fd = wakeupfd; + pPollDescriptors[0].events = POLLIN; /* We only care about waiting to read from the wakeup file descriptor. */ + pPollDescriptors[0].revents = 0; + + /* We can now extract the PCM poll descriptors which we place after the wakeup descriptor. */ + pollDescriptorCount = ((ma_snd_pcm_poll_descriptors_proc)pDevice->pContext->alsa.snd_pcm_poll_descriptors)(pPCM, pPollDescriptors + 1, pollDescriptorCount); /* +1 because we want to place these descriptors after the wakeup descriptor. */ + if (pollDescriptorCount <= 0) { + close(wakeupfd); + ma_free(pPollDescriptors, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to retrieve poll descriptors."); + return MA_ERROR; + } + + if (deviceType == ma_device_type_capture) { + pDevice->alsa.pollDescriptorCountCapture = pollDescriptorCount; + pDevice->alsa.pPollDescriptorsCapture = pPollDescriptors; + pDevice->alsa.wakeupfdCapture = wakeupfd; + } else { + pDevice->alsa.pollDescriptorCountPlayback = pollDescriptorCount; + pDevice->alsa.pPollDescriptorsPlayback = pPollDescriptors; + pDevice->alsa.wakeupfdPlayback = wakeupfd; + } + + + /* We're done. Prepare the device. */ + resultALSA = ((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)(pPCM); + if (resultALSA < 0) { + close(wakeupfd); + ma_free(pPollDescriptors, &pDevice->pContext->allocationCallbacks); + ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to prepare device."); + return ma_result_from_errno(-resultALSA); + } + + + if (deviceType == ma_device_type_capture) { + pDevice->alsa.pPCMCapture = (ma_ptr)pPCM; + pDevice->alsa.isUsingMMapCapture = isUsingMMap; + } else { + pDevice->alsa.pPCMPlayback = (ma_ptr)pPCM; + pDevice->alsa.isUsingMMapPlayback = isUsingMMap; + } + + pDescriptor->format = internalFormat; + pDescriptor->channels = internalChannels; + pDescriptor->sampleRate = internalSampleRate; + ma_channel_map_copy(pDescriptor->channelMap, internalChannelMap, ma_min(internalChannels, MA_MAX_CHANNELS)); + pDescriptor->periodSizeInFrames = internalPeriodSizeInFrames; + pDescriptor->periodCount = internalPeriods; + + return MA_SUCCESS; +} + +static ma_result ma_device_init__alsa(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->alsa); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_by_type__alsa(pDevice, pConfig, pDescriptorCapture, ma_device_type_capture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_by_type__alsa(pDevice, pConfig, pDescriptorPlayback, ma_device_type_playback); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__alsa(ma_device* pDevice) +{ + int resultALSA; + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture); + if (resultALSA < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start capture device."); + return ma_result_from_errno(-resultALSA); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + /* + When data is written to the device we wait for the device to get ready to receive data with poll(). In my testing + I have observed that poll() can sometimes block forever unless the device is started explicitly with snd_pcm_start() + or some data is written with snd_pcm_writei(). + + To resolve this I've decided to do an explicit start with snd_pcm_start(). The problem with this is that the device + is started without any data in the internal buffer which will result in an immediate underrun. If instead we were + to call into snd_pcm_writei() in an attempt to prevent the underrun, we would run the risk of a weird deadlock + issue as documented inside ma_device_write__alsa(). + */ + resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback); + if (resultALSA < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start playback device."); + return ma_result_from_errno(-resultALSA); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__alsa(ma_device* pDevice) +{ + /* + The stop callback will get called on the worker thread after read/write__alsa() has returned. At this point there is + a small chance that our wakeupfd has not been cleared. We'll clear that out now if applicable. + */ + int resultPoll; + int resultRead; + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Dropping capture device...\n"); + ((ma_snd_pcm_drop_proc)pDevice->pContext->alsa.snd_pcm_drop)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Dropping capture device successful.\n"); + + /* We need to prepare the device again, otherwise we won't be able to restart the device. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing capture device...\n"); + if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) < 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing capture device failed.\n"); + } else { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing capture device successful.\n"); + } + + /* Clear the wakeupfd. */ + resultPoll = poll((struct pollfd*)pDevice->alsa.pPollDescriptorsCapture, 1, 0); + if (resultPoll > 0) { + ma_uint64 t; + resultRead = read(((struct pollfd*)pDevice->alsa.pPollDescriptorsCapture)[0].fd, &t, sizeof(t)); + if (resultRead != sizeof(t)) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Failed to read from capture wakeupfd. read() = %d\n", resultRead); + } + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Dropping playback device...\n"); + ((ma_snd_pcm_drop_proc)pDevice->pContext->alsa.snd_pcm_drop)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Dropping playback device successful.\n"); + + /* We need to prepare the device again, otherwise we won't be able to restart the device. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing playback device...\n"); + if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback) < 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing playback device failed.\n"); + } else { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing playback device successful.\n"); + } + + /* Clear the wakeupfd. */ + resultPoll = poll((struct pollfd*)pDevice->alsa.pPollDescriptorsPlayback, 1, 0); + if (resultPoll > 0) { + ma_uint64 t; + resultRead = read(((struct pollfd*)pDevice->alsa.pPollDescriptorsPlayback)[0].fd, &t, sizeof(t)); + if (resultRead != sizeof(t)) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Failed to read from playback wakeupfd. read() = %d\n", resultRead); + } + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_wait__alsa(ma_device* pDevice, ma_snd_pcm_t* pPCM, struct pollfd* pPollDescriptors, int pollDescriptorCount, short requiredEvent) +{ + for (;;) { + unsigned short revents; + int resultALSA; + int resultPoll = poll(pPollDescriptors, pollDescriptorCount, -1); + if (resultPoll < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[ALSA] poll() failed.\n"); + + /* + There have been reports that poll() is returning an error randomly and that instead of + returning an error, simply trying again will work. I'm experimenting with adopting this + advice. + */ + continue; + /*return ma_result_from_errno(errno);*/ + } + + /* + Before checking the ALSA poll descriptor flag we need to check if the wakeup descriptor + has had it's POLLIN flag set. If so, we need to actually read the data and then exit the + function. The wakeup descriptor will be the first item in the descriptors buffer. + */ + if ((pPollDescriptors[0].revents & POLLIN) != 0) { + ma_uint64 t; + int resultRead = read(pPollDescriptors[0].fd, &t, sizeof(t)); /* <-- Important that we read here so that the next write() does not block. */ + if (resultRead < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] read() failed.\n"); + return ma_result_from_errno(errno); + } + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] POLLIN set for wakeupfd\n"); + return MA_DEVICE_NOT_STARTED; + } + + /* + Getting here means that some data should be able to be read. We need to use ALSA to + translate the revents flags for us. + */ + resultALSA = ((ma_snd_pcm_poll_descriptors_revents_proc)pDevice->pContext->alsa.snd_pcm_poll_descriptors_revents)(pPCM, pPollDescriptors + 1, pollDescriptorCount - 1, &revents); /* +1, -1 to ignore the wakeup descriptor. */ + if (resultALSA < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_poll_descriptors_revents() failed.\n"); + return ma_result_from_errno(-resultALSA); + } + + if ((revents & POLLERR) != 0) { + ma_snd_pcm_state_t state = ((ma_snd_pcm_state_proc)pDevice->pContext->alsa.snd_pcm_state)(pPCM); + if (state == MA_SND_PCM_STATE_XRUN) { + /* The PCM is in a xrun state. This will be recovered from at a higher level. We can disregard this. */ + } else { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[ALSA] POLLERR detected. status = %d\n", ((ma_snd_pcm_state_proc)pDevice->pContext->alsa.snd_pcm_state)(pPCM)); + } + } + + if ((revents & requiredEvent) == requiredEvent) { + break; /* We're done. Data available for reading or writing. */ + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_wait_read__alsa(ma_device* pDevice) +{ + return ma_device_wait__alsa(pDevice, (ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, (struct pollfd*)pDevice->alsa.pPollDescriptorsCapture, pDevice->alsa.pollDescriptorCountCapture + 1, POLLIN); /* +1 to account for the wakeup descriptor. */ +} + +static ma_result ma_device_wait_write__alsa(ma_device* pDevice) +{ + return ma_device_wait__alsa(pDevice, (ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, (struct pollfd*)pDevice->alsa.pPollDescriptorsPlayback, pDevice->alsa.pollDescriptorCountPlayback + 1, POLLOUT); /* +1 to account for the wakeup descriptor. */ +} + +static ma_result ma_device_read__alsa(ma_device* pDevice, void* pFramesOut, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + ma_snd_pcm_sframes_t resultALSA = 0; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + while (ma_device_get_state(pDevice) == ma_device_state_started) { + ma_result result; + + /* The first thing to do is wait for data to become available for reading. This will return an error code if the device has been stopped. */ + result = ma_device_wait_read__alsa(pDevice); + if (result != MA_SUCCESS) { + return result; + } + + /* Getting here means we should have data available. */ + resultALSA = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, pFramesOut, frameCount); + if (resultALSA >= 0) { + break; /* Success. */ + } else { + if (resultALSA == -EAGAIN) { + /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "EGAIN (read)\n");*/ + continue; /* Try again. */ + } else if (resultALSA == -EPIPE) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "EPIPE (read)\n"); + + /* Overrun. Recover and try again. If this fails we need to return an error. */ + resultALSA = ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, resultALSA, MA_TRUE); + if (resultALSA < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after overrun."); + return ma_result_from_errno((int)-resultALSA); + } + + resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture); + if (resultALSA < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device after underrun."); + return ma_result_from_errno((int)-resultALSA); + } + + continue; /* Try reading again. */ + } + } + } + + if (pFramesRead != NULL) { + *pFramesRead = resultALSA; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_write__alsa(ma_device* pDevice, const void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + ma_snd_pcm_sframes_t resultALSA = 0; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pFrames != NULL); + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + while (ma_device_get_state(pDevice) == ma_device_state_started) { + ma_result result; + + /* The first thing to do is wait for space to become available for writing. This will return an error code if the device has been stopped. */ + result = ma_device_wait_write__alsa(pDevice); + if (result != MA_SUCCESS) { + return result; + } + + resultALSA = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, pFrames, frameCount); + if (resultALSA >= 0) { + break; /* Success. */ + } else { + if (resultALSA == -EAGAIN) { + /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "EGAIN (write)\n");*/ + continue; /* Try again. */ + } else if (resultALSA == -EPIPE) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "EPIPE (write)\n"); + + /* Underrun. Recover and try again. If this fails we need to return an error. */ + resultALSA = ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, resultALSA, MA_TRUE); /* MA_TRUE=silent (don't print anything on error). */ + if (resultALSA < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after underrun."); + return ma_result_from_errno((int)-resultALSA); + } + + /* + In my testing I have had a situation where writei() does not automatically restart the device even though I've set it + up as such in the software parameters. What will happen is writei() will block indefinitely even though the number of + frames is well beyond the auto-start threshold. To work around this I've needed to add an explicit start here. Not sure + if this is me just being stupid and not recovering the device properly, but this definitely feels like something isn't + quite right here. + */ + resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback); + if (resultALSA < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start device after underrun."); + return ma_result_from_errno((int)-resultALSA); + } + + continue; /* Try writing again. */ + } + } + } + + if (pFramesWritten != NULL) { + *pFramesWritten = resultALSA; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_data_loop_wakeup__alsa(ma_device* pDevice) +{ + ma_uint64 t = 1; + int resultWrite = 0; + + MA_ASSERT(pDevice != NULL); + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Waking up...\n"); + + /* Write to an eventfd to trigger a wakeup from poll() and abort any reading or writing. */ + if (pDevice->alsa.pPollDescriptorsCapture != NULL) { + resultWrite = write(pDevice->alsa.wakeupfdCapture, &t, sizeof(t)); + } + if (pDevice->alsa.pPollDescriptorsPlayback != NULL) { + resultWrite = write(pDevice->alsa.wakeupfdPlayback, &t, sizeof(t)); + } + + if (resultWrite < 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[ALSA] write() failed.\n"); + return ma_result_from_errno(errno); + } + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Waking up completed successfully.\n"); + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__alsa(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_alsa); + + /* Clean up memory for memory leak checkers. */ + ((ma_snd_config_update_free_global_proc)pContext->alsa.snd_config_update_free_global)(); + +#ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(ma_context_get_log(pContext), pContext->alsa.asoundSO); +#endif + + ma_mutex_uninit(&pContext->alsa.internalDeviceEnumLock); + + return MA_SUCCESS; +} + +static ma_result ma_context_init__alsa(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + ma_result result; +#ifndef MA_NO_RUNTIME_LINKING + const char* libasoundNames[] = { + "libasound.so.2", + "libasound.so" + }; + size_t i; + + for (i = 0; i < ma_countof(libasoundNames); ++i) { + pContext->alsa.asoundSO = ma_dlopen(ma_context_get_log(pContext), libasoundNames[i]); + if (pContext->alsa.asoundSO != NULL) { + break; + } + } + + if (pContext->alsa.asoundSO == NULL) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "[ALSA] Failed to open shared object.\n"); + return MA_NO_BACKEND; + } + + pContext->alsa.snd_pcm_open = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_open"); + pContext->alsa.snd_pcm_close = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_close"); + pContext->alsa.snd_pcm_hw_params_sizeof = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_sizeof"); + pContext->alsa.snd_pcm_hw_params_any = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_any"); + pContext->alsa.snd_pcm_hw_params_set_format = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_format"); + pContext->alsa.snd_pcm_hw_params_set_format_first = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_format_first"); + pContext->alsa.snd_pcm_hw_params_get_format_mask = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_format_mask"); + pContext->alsa.snd_pcm_hw_params_set_channels = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_channels"); + pContext->alsa.snd_pcm_hw_params_set_channels_near = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_channels_near"); + pContext->alsa.snd_pcm_hw_params_set_channels_minmax = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_channels_minmax"); + pContext->alsa.snd_pcm_hw_params_set_rate_resample = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate_resample"); + pContext->alsa.snd_pcm_hw_params_set_rate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate"); + pContext->alsa.snd_pcm_hw_params_set_rate_near = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_rate_near"); + pContext->alsa.snd_pcm_hw_params_set_buffer_size_near = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_buffer_size_near"); + pContext->alsa.snd_pcm_hw_params_set_periods_near = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_periods_near"); + pContext->alsa.snd_pcm_hw_params_set_access = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_set_access"); + pContext->alsa.snd_pcm_hw_params_get_format = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_format"); + pContext->alsa.snd_pcm_hw_params_get_channels = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels"); + pContext->alsa.snd_pcm_hw_params_get_channels_min = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels_min"); + pContext->alsa.snd_pcm_hw_params_get_channels_max = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_channels_max"); + pContext->alsa.snd_pcm_hw_params_get_rate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate"); + pContext->alsa.snd_pcm_hw_params_get_rate_min = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate_min"); + pContext->alsa.snd_pcm_hw_params_get_rate_max = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_rate_max"); + pContext->alsa.snd_pcm_hw_params_get_buffer_size = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_buffer_size"); + pContext->alsa.snd_pcm_hw_params_get_periods = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_periods"); + pContext->alsa.snd_pcm_hw_params_get_access = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_get_access"); + pContext->alsa.snd_pcm_hw_params_test_format = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_test_format"); + pContext->alsa.snd_pcm_hw_params_test_channels = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_test_channels"); + pContext->alsa.snd_pcm_hw_params_test_rate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params_test_rate"); + pContext->alsa.snd_pcm_hw_params = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_hw_params"); + pContext->alsa.snd_pcm_sw_params_sizeof = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_sw_params_sizeof"); + pContext->alsa.snd_pcm_sw_params_current = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_sw_params_current"); + pContext->alsa.snd_pcm_sw_params_get_boundary = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_sw_params_get_boundary"); + pContext->alsa.snd_pcm_sw_params_set_avail_min = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_sw_params_set_avail_min"); + pContext->alsa.snd_pcm_sw_params_set_start_threshold = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_sw_params_set_start_threshold"); + pContext->alsa.snd_pcm_sw_params_set_stop_threshold = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_sw_params_set_stop_threshold"); + pContext->alsa.snd_pcm_sw_params = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_sw_params"); + pContext->alsa.snd_pcm_format_mask_sizeof = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_format_mask_sizeof"); + pContext->alsa.snd_pcm_format_mask_test = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_format_mask_test"); + pContext->alsa.snd_pcm_get_chmap = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_get_chmap"); + pContext->alsa.snd_pcm_state = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_state"); + pContext->alsa.snd_pcm_prepare = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_prepare"); + pContext->alsa.snd_pcm_start = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_start"); + pContext->alsa.snd_pcm_drop = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_drop"); + pContext->alsa.snd_pcm_drain = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_drain"); + pContext->alsa.snd_pcm_reset = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_reset"); + pContext->alsa.snd_device_name_hint = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_device_name_hint"); + pContext->alsa.snd_device_name_get_hint = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_device_name_get_hint"); + pContext->alsa.snd_card_get_index = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_card_get_index"); + pContext->alsa.snd_device_name_free_hint = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_device_name_free_hint"); + pContext->alsa.snd_pcm_mmap_begin = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_mmap_begin"); + pContext->alsa.snd_pcm_mmap_commit = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_mmap_commit"); + pContext->alsa.snd_pcm_recover = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_recover"); + pContext->alsa.snd_pcm_readi = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_readi"); + pContext->alsa.snd_pcm_writei = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_writei"); + pContext->alsa.snd_pcm_avail = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_avail"); + pContext->alsa.snd_pcm_avail_update = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_avail_update"); + pContext->alsa.snd_pcm_wait = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_wait"); + pContext->alsa.snd_pcm_nonblock = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_nonblock"); + pContext->alsa.snd_pcm_info = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_info"); + pContext->alsa.snd_pcm_info_sizeof = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_info_sizeof"); + pContext->alsa.snd_pcm_info_get_name = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_info_get_name"); + pContext->alsa.snd_pcm_poll_descriptors = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_poll_descriptors"); + pContext->alsa.snd_pcm_poll_descriptors_count = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_poll_descriptors_count"); + pContext->alsa.snd_pcm_poll_descriptors_revents = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_pcm_poll_descriptors_revents"); + pContext->alsa.snd_config_update_free_global = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->alsa.asoundSO, "snd_config_update_free_global"); +#else + /* The system below is just for type safety. */ + ma_snd_pcm_open_proc _snd_pcm_open = snd_pcm_open; + ma_snd_pcm_close_proc _snd_pcm_close = snd_pcm_close; + ma_snd_pcm_hw_params_sizeof_proc _snd_pcm_hw_params_sizeof = snd_pcm_hw_params_sizeof; + ma_snd_pcm_hw_params_any_proc _snd_pcm_hw_params_any = snd_pcm_hw_params_any; + ma_snd_pcm_hw_params_set_format_proc _snd_pcm_hw_params_set_format = snd_pcm_hw_params_set_format; + ma_snd_pcm_hw_params_set_format_first_proc _snd_pcm_hw_params_set_format_first = snd_pcm_hw_params_set_format_first; + ma_snd_pcm_hw_params_get_format_mask_proc _snd_pcm_hw_params_get_format_mask = snd_pcm_hw_params_get_format_mask; + ma_snd_pcm_hw_params_set_channels_proc _snd_pcm_hw_params_set_channels = snd_pcm_hw_params_set_channels; + ma_snd_pcm_hw_params_set_channels_near_proc _snd_pcm_hw_params_set_channels_near = snd_pcm_hw_params_set_channels_near; + ma_snd_pcm_hw_params_set_rate_resample_proc _snd_pcm_hw_params_set_rate_resample = snd_pcm_hw_params_set_rate_resample; + ma_snd_pcm_hw_params_set_rate_near _snd_pcm_hw_params_set_rate = snd_pcm_hw_params_set_rate; + ma_snd_pcm_hw_params_set_rate_near_proc _snd_pcm_hw_params_set_rate_near = snd_pcm_hw_params_set_rate_near; + ma_snd_pcm_hw_params_set_rate_minmax_proc _snd_pcm_hw_params_set_rate_minmax = snd_pcm_hw_params_set_rate_minmax; + ma_snd_pcm_hw_params_set_buffer_size_near_proc _snd_pcm_hw_params_set_buffer_size_near = snd_pcm_hw_params_set_buffer_size_near; + ma_snd_pcm_hw_params_set_periods_near_proc _snd_pcm_hw_params_set_periods_near = snd_pcm_hw_params_set_periods_near; + ma_snd_pcm_hw_params_set_access_proc _snd_pcm_hw_params_set_access = snd_pcm_hw_params_set_access; + ma_snd_pcm_hw_params_get_format_proc _snd_pcm_hw_params_get_format = snd_pcm_hw_params_get_format; + ma_snd_pcm_hw_params_get_channels_proc _snd_pcm_hw_params_get_channels = snd_pcm_hw_params_get_channels; + ma_snd_pcm_hw_params_get_channels_min_proc _snd_pcm_hw_params_get_channels_min = snd_pcm_hw_params_get_channels_min; + ma_snd_pcm_hw_params_get_channels_max_proc _snd_pcm_hw_params_get_channels_max = snd_pcm_hw_params_get_channels_max; + ma_snd_pcm_hw_params_get_rate_proc _snd_pcm_hw_params_get_rate = snd_pcm_hw_params_get_rate; + ma_snd_pcm_hw_params_get_rate_min_proc _snd_pcm_hw_params_get_rate_min = snd_pcm_hw_params_get_rate_min; + ma_snd_pcm_hw_params_get_rate_max_proc _snd_pcm_hw_params_get_rate_max = snd_pcm_hw_params_get_rate_max; + ma_snd_pcm_hw_params_get_buffer_size_proc _snd_pcm_hw_params_get_buffer_size = snd_pcm_hw_params_get_buffer_size; + ma_snd_pcm_hw_params_get_periods_proc _snd_pcm_hw_params_get_periods = snd_pcm_hw_params_get_periods; + ma_snd_pcm_hw_params_get_access_proc _snd_pcm_hw_params_get_access = snd_pcm_hw_params_get_access; + ma_snd_pcm_hw_params_test_format_proc _snd_pcm_hw_params_test_format = snd_pcm_hw_params_test_format; + ma_snd_pcm_hw_params_test_channels_proc _snd_pcm_hw_params_test_channels = snd_pcm_hw_params_test_channels; + ma_snd_pcm_hw_params_test_rate_proc _snd_pcm_hw_params_test_rate = snd_pcm_hw_params_test_rate; + ma_snd_pcm_hw_params_proc _snd_pcm_hw_params = snd_pcm_hw_params; + ma_snd_pcm_sw_params_sizeof_proc _snd_pcm_sw_params_sizeof = snd_pcm_sw_params_sizeof; + ma_snd_pcm_sw_params_current_proc _snd_pcm_sw_params_current = snd_pcm_sw_params_current; + ma_snd_pcm_sw_params_get_boundary_proc _snd_pcm_sw_params_get_boundary = snd_pcm_sw_params_get_boundary; + ma_snd_pcm_sw_params_set_avail_min_proc _snd_pcm_sw_params_set_avail_min = snd_pcm_sw_params_set_avail_min; + ma_snd_pcm_sw_params_set_start_threshold_proc _snd_pcm_sw_params_set_start_threshold = snd_pcm_sw_params_set_start_threshold; + ma_snd_pcm_sw_params_set_stop_threshold_proc _snd_pcm_sw_params_set_stop_threshold = snd_pcm_sw_params_set_stop_threshold; + ma_snd_pcm_sw_params_proc _snd_pcm_sw_params = snd_pcm_sw_params; + ma_snd_pcm_format_mask_sizeof_proc _snd_pcm_format_mask_sizeof = snd_pcm_format_mask_sizeof; + ma_snd_pcm_format_mask_test_proc _snd_pcm_format_mask_test = snd_pcm_format_mask_test; + ma_snd_pcm_get_chmap_proc _snd_pcm_get_chmap = snd_pcm_get_chmap; + ma_snd_pcm_state_proc _snd_pcm_state = snd_pcm_state; + ma_snd_pcm_prepare_proc _snd_pcm_prepare = snd_pcm_prepare; + ma_snd_pcm_start_proc _snd_pcm_start = snd_pcm_start; + ma_snd_pcm_drop_proc _snd_pcm_drop = snd_pcm_drop; + ma_snd_pcm_drain_proc _snd_pcm_drain = snd_pcm_drain; + ma_snd_pcm_reset_proc _snd_pcm_reset = snd_pcm_reset; + ma_snd_device_name_hint_proc _snd_device_name_hint = snd_device_name_hint; + ma_snd_device_name_get_hint_proc _snd_device_name_get_hint = snd_device_name_get_hint; + ma_snd_card_get_index_proc _snd_card_get_index = snd_card_get_index; + ma_snd_device_name_free_hint_proc _snd_device_name_free_hint = snd_device_name_free_hint; + ma_snd_pcm_mmap_begin_proc _snd_pcm_mmap_begin = snd_pcm_mmap_begin; + ma_snd_pcm_mmap_commit_proc _snd_pcm_mmap_commit = snd_pcm_mmap_commit; + ma_snd_pcm_recover_proc _snd_pcm_recover = snd_pcm_recover; + ma_snd_pcm_readi_proc _snd_pcm_readi = snd_pcm_readi; + ma_snd_pcm_writei_proc _snd_pcm_writei = snd_pcm_writei; + ma_snd_pcm_avail_proc _snd_pcm_avail = snd_pcm_avail; + ma_snd_pcm_avail_update_proc _snd_pcm_avail_update = snd_pcm_avail_update; + ma_snd_pcm_wait_proc _snd_pcm_wait = snd_pcm_wait; + ma_snd_pcm_nonblock_proc _snd_pcm_nonblock = snd_pcm_nonblock; + ma_snd_pcm_info_proc _snd_pcm_info = snd_pcm_info; + ma_snd_pcm_info_sizeof_proc _snd_pcm_info_sizeof = snd_pcm_info_sizeof; + ma_snd_pcm_info_get_name_proc _snd_pcm_info_get_name = snd_pcm_info_get_name; + ma_snd_pcm_poll_descriptors _snd_pcm_poll_descriptors = snd_pcm_poll_descriptors; + ma_snd_pcm_poll_descriptors_count _snd_pcm_poll_descriptors_count = snd_pcm_poll_descriptors_count; + ma_snd_pcm_poll_descriptors_revents _snd_pcm_poll_descriptors_revents = snd_pcm_poll_descriptors_revents; + ma_snd_config_update_free_global_proc _snd_config_update_free_global = snd_config_update_free_global; + + pContext->alsa.snd_pcm_open = (ma_proc)_snd_pcm_open; + pContext->alsa.snd_pcm_close = (ma_proc)_snd_pcm_close; + pContext->alsa.snd_pcm_hw_params_sizeof = (ma_proc)_snd_pcm_hw_params_sizeof; + pContext->alsa.snd_pcm_hw_params_any = (ma_proc)_snd_pcm_hw_params_any; + pContext->alsa.snd_pcm_hw_params_set_format = (ma_proc)_snd_pcm_hw_params_set_format; + pContext->alsa.snd_pcm_hw_params_set_format_first = (ma_proc)_snd_pcm_hw_params_set_format_first; + pContext->alsa.snd_pcm_hw_params_get_format_mask = (ma_proc)_snd_pcm_hw_params_get_format_mask; + pContext->alsa.snd_pcm_hw_params_set_channels = (ma_proc)_snd_pcm_hw_params_set_channels; + pContext->alsa.snd_pcm_hw_params_set_channels_near = (ma_proc)_snd_pcm_hw_params_set_channels_near; + pContext->alsa.snd_pcm_hw_params_set_channels_minmax = (ma_proc)_snd_pcm_hw_params_set_channels_minmax; + pContext->alsa.snd_pcm_hw_params_set_rate_resample = (ma_proc)_snd_pcm_hw_params_set_rate_resample; + pContext->alsa.snd_pcm_hw_params_set_rate = (ma_proc)_snd_pcm_hw_params_set_rate; + pContext->alsa.snd_pcm_hw_params_set_rate_near = (ma_proc)_snd_pcm_hw_params_set_rate_near; + pContext->alsa.snd_pcm_hw_params_set_buffer_size_near = (ma_proc)_snd_pcm_hw_params_set_buffer_size_near; + pContext->alsa.snd_pcm_hw_params_set_periods_near = (ma_proc)_snd_pcm_hw_params_set_periods_near; + pContext->alsa.snd_pcm_hw_params_set_access = (ma_proc)_snd_pcm_hw_params_set_access; + pContext->alsa.snd_pcm_hw_params_get_format = (ma_proc)_snd_pcm_hw_params_get_format; + pContext->alsa.snd_pcm_hw_params_get_channels = (ma_proc)_snd_pcm_hw_params_get_channels; + pContext->alsa.snd_pcm_hw_params_get_channels_min = (ma_proc)_snd_pcm_hw_params_get_channels_min; + pContext->alsa.snd_pcm_hw_params_get_channels_max = (ma_proc)_snd_pcm_hw_params_get_channels_max; + pContext->alsa.snd_pcm_hw_params_get_rate = (ma_proc)_snd_pcm_hw_params_get_rate; + pContext->alsa.snd_pcm_hw_params_get_rate_min = (ma_proc)_snd_pcm_hw_params_get_rate_min; + pContext->alsa.snd_pcm_hw_params_get_rate_max = (ma_proc)_snd_pcm_hw_params_get_rate_max; + pContext->alsa.snd_pcm_hw_params_get_buffer_size = (ma_proc)_snd_pcm_hw_params_get_buffer_size; + pContext->alsa.snd_pcm_hw_params_get_periods = (ma_proc)_snd_pcm_hw_params_get_periods; + pContext->alsa.snd_pcm_hw_params_get_access = (ma_proc)_snd_pcm_hw_params_get_access; + pContext->alsa.snd_pcm_hw_params_test_format = (ma_proc)_snd_pcm_hw_params_test_format; + pContext->alsa.snd_pcm_hw_params_test_channels = (ma_proc)_snd_pcm_hw_params_test_channels; + pContext->alsa.snd_pcm_hw_params_test_rate = (ma_proc)_snd_pcm_hw_params_test_rate; + pContext->alsa.snd_pcm_hw_params = (ma_proc)_snd_pcm_hw_params; + pContext->alsa.snd_pcm_sw_params_sizeof = (ma_proc)_snd_pcm_sw_params_sizeof; + pContext->alsa.snd_pcm_sw_params_current = (ma_proc)_snd_pcm_sw_params_current; + pContext->alsa.snd_pcm_sw_params_get_boundary = (ma_proc)_snd_pcm_sw_params_get_boundary; + pContext->alsa.snd_pcm_sw_params_set_avail_min = (ma_proc)_snd_pcm_sw_params_set_avail_min; + pContext->alsa.snd_pcm_sw_params_set_start_threshold = (ma_proc)_snd_pcm_sw_params_set_start_threshold; + pContext->alsa.snd_pcm_sw_params_set_stop_threshold = (ma_proc)_snd_pcm_sw_params_set_stop_threshold; + pContext->alsa.snd_pcm_sw_params = (ma_proc)_snd_pcm_sw_params; + pContext->alsa.snd_pcm_format_mask_sizeof = (ma_proc)_snd_pcm_format_mask_sizeof; + pContext->alsa.snd_pcm_format_mask_test = (ma_proc)_snd_pcm_format_mask_test; + pContext->alsa.snd_pcm_get_chmap = (ma_proc)_snd_pcm_get_chmap; + pContext->alsa.snd_pcm_state = (ma_proc)_snd_pcm_state; + pContext->alsa.snd_pcm_prepare = (ma_proc)_snd_pcm_prepare; + pContext->alsa.snd_pcm_start = (ma_proc)_snd_pcm_start; + pContext->alsa.snd_pcm_drop = (ma_proc)_snd_pcm_drop; + pContext->alsa.snd_pcm_drain = (ma_proc)_snd_pcm_drain; + pContext->alsa.snd_pcm_reset = (ma_proc)_snd_pcm_reset; + pContext->alsa.snd_device_name_hint = (ma_proc)_snd_device_name_hint; + pContext->alsa.snd_device_name_get_hint = (ma_proc)_snd_device_name_get_hint; + pContext->alsa.snd_card_get_index = (ma_proc)_snd_card_get_index; + pContext->alsa.snd_device_name_free_hint = (ma_proc)_snd_device_name_free_hint; + pContext->alsa.snd_pcm_mmap_begin = (ma_proc)_snd_pcm_mmap_begin; + pContext->alsa.snd_pcm_mmap_commit = (ma_proc)_snd_pcm_mmap_commit; + pContext->alsa.snd_pcm_recover = (ma_proc)_snd_pcm_recover; + pContext->alsa.snd_pcm_readi = (ma_proc)_snd_pcm_readi; + pContext->alsa.snd_pcm_writei = (ma_proc)_snd_pcm_writei; + pContext->alsa.snd_pcm_avail = (ma_proc)_snd_pcm_avail; + pContext->alsa.snd_pcm_avail_update = (ma_proc)_snd_pcm_avail_update; + pContext->alsa.snd_pcm_wait = (ma_proc)_snd_pcm_wait; + pContext->alsa.snd_pcm_nonblock = (ma_proc)_snd_pcm_nonblock; + pContext->alsa.snd_pcm_info = (ma_proc)_snd_pcm_info; + pContext->alsa.snd_pcm_info_sizeof = (ma_proc)_snd_pcm_info_sizeof; + pContext->alsa.snd_pcm_info_get_name = (ma_proc)_snd_pcm_info_get_name; + pContext->alsa.snd_pcm_poll_descriptors = (ma_proc)_snd_pcm_poll_descriptors; + pContext->alsa.snd_pcm_poll_descriptors_count = (ma_proc)_snd_pcm_poll_descriptors_count; + pContext->alsa.snd_pcm_poll_descriptors_revents = (ma_proc)_snd_pcm_poll_descriptors_revents; + pContext->alsa.snd_config_update_free_global = (ma_proc)_snd_config_update_free_global; +#endif + + pContext->alsa.useVerboseDeviceEnumeration = pConfig->alsa.useVerboseDeviceEnumeration; + + result = ma_mutex_init(&pContext->alsa.internalDeviceEnumLock); + if (result != MA_SUCCESS) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[ALSA] WARNING: Failed to initialize mutex for internal device enumeration."); + return result; + } + + pCallbacks->onContextInit = ma_context_init__alsa; + pCallbacks->onContextUninit = ma_context_uninit__alsa; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__alsa; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__alsa; + pCallbacks->onDeviceInit = ma_device_init__alsa; + pCallbacks->onDeviceUninit = ma_device_uninit__alsa; + pCallbacks->onDeviceStart = ma_device_start__alsa; + pCallbacks->onDeviceStop = ma_device_stop__alsa; + pCallbacks->onDeviceRead = ma_device_read__alsa; + pCallbacks->onDeviceWrite = ma_device_write__alsa; + pCallbacks->onDeviceDataLoop = NULL; + pCallbacks->onDeviceDataLoopWakeup = ma_device_data_loop_wakeup__alsa; + + return MA_SUCCESS; +} +#endif /* MA_HAS_ALSA */ + + + +/****************************************************************************** + +PulseAudio Backend + +******************************************************************************/ +#ifdef MA_HAS_PULSEAUDIO +/* +The PulseAudio API, along with Apple's Core Audio, is the worst of the mainstream audio APIs. This is a brief description of what's going on +in the PulseAudio backend. I apologize if this gets a bit ranty for your liking - you might want to skip this discussion. + +PulseAudio has something they call the "Simple API", which unfortunately isn't suitable for miniaudio. I've not seen anywhere where it +allows you to enumerate over devices, nor does it seem to support the ability to stop and start streams. Looking at the documentation, it +appears as though the stream is constantly running and you prevent sound from being emitted or captured by simply not calling the read or +write functions. This is not a professional solution as it would be much better to *actually* stop the underlying stream. Perhaps the +simple API has some smarts to do this automatically, but I'm not sure. Another limitation with the simple API is that it seems inefficient +when you want to have multiple streams to a single context. For these reasons, miniaudio is not using the simple API. + +Since we're not using the simple API, we're left with the asynchronous API as our only other option. And boy, is this where it starts to +get fun, and I don't mean that in a good way... + +The problems start with the very name of the API - "asynchronous". Yes, this is an asynchronous oriented API which means your commands +don't immediately take effect. You instead need to issue your commands, and then wait for them to complete. The waiting mechanism is +enabled through the use of a "main loop". In the asynchronous API you cannot get away from the main loop, and the main loop is where almost +all of PulseAudio's problems stem from. + +When you first initialize PulseAudio you need an object referred to as "main loop". You can implement this yourself by defining your own +vtable, but it's much easier to just use one of the built-in main loop implementations. There's two generic implementations called +pa_mainloop and pa_threaded_mainloop, and another implementation specific to GLib called pa_glib_mainloop. We're using pa_threaded_mainloop +because it simplifies management of the worker thread. The idea of the main loop object is pretty self explanatory - you're supposed to use +it to implement a worker thread which runs in a loop. The main loop is where operations are actually executed. + +To initialize the main loop, you just use `pa_threaded_mainloop_new()`. This is the first function you'll call. You can then get a pointer +to the vtable with `pa_threaded_mainloop_get_api()` (the main loop vtable is called `pa_mainloop_api`). Again, you can bypass the threaded +main loop object entirely and just implement `pa_mainloop_api` directly, but there's no need for it unless you're doing something extremely +specialized such as if you want to integrate it into your application's existing main loop infrastructure. + +(EDIT 2021-01-26: miniaudio is no longer using `pa_threaded_mainloop` due to this issue: https://github.com/mackron/miniaudio/issues/262. +It is now using `pa_mainloop` which turns out to be a simpler solution anyway. The rest of this rant still applies, however.) + +Once you have your main loop vtable (the `pa_mainloop_api` object) you can create the PulseAudio context. This is very similar to +miniaudio's context and they map to each other quite well. You have one context to many streams, which is basically the same as miniaudio's +one `ma_context` to many `ma_device`s. Here's where it starts to get annoying, however. When you first create the PulseAudio context, which +is done with `pa_context_new()`, it's not actually connected to anything. When you connect, you call `pa_context_connect()`. However, if +you remember, PulseAudio is an asynchronous API. That means you cannot just assume the context is connected after `pa_context_context()` +has returned. You instead need to wait for it to connect. To do this, you need to either wait for a callback to get fired, which you can +set with `pa_context_set_state_callback()`, or you can continuously poll the context's state. Either way, you need to run this in a loop. +All objects from here out are created from the context, and, I believe, you can't be creating these objects until the context is connected. +This waiting loop is therefore unavoidable. In order for the waiting to ever complete, however, the main loop needs to be running. Before +attempting to connect the context, the main loop needs to be started with `pa_threaded_mainloop_start()`. + +The reason for this asynchronous design is to support cases where you're connecting to a remote server, say through a local network or an +internet connection. However, the *VAST* majority of cases don't involve this at all - they just connect to a local "server" running on the +host machine. The fact that this would be the default rather than making `pa_context_connect()` synchronous tends to boggle the mind. + +Once the context has been created and connected you can start creating a stream. A PulseAudio stream is analogous to miniaudio's device. +The initialization of a stream is fairly standard - you configure some attributes (analogous to miniaudio's device config) and then call +`pa_stream_new()` to actually create it. Here is where we start to get into "operations". When configuring the stream, you can get +information about the source (such as sample format, sample rate, etc.), however it's not synchronous. Instead, a `pa_operation` object +is returned from `pa_context_get_source_info_by_name()` (capture) or `pa_context_get_sink_info_by_name()` (playback). Then, you need to +run a loop (again!) to wait for the operation to complete which you can determine via a callback or polling, just like we did with the +context. Then, as an added bonus, you need to decrement the reference counter of the `pa_operation` object to ensure memory is cleaned up. +All of that just to retrieve basic information about a device! + +Once the basic information about the device has been retrieved, miniaudio can now create the stream with `ma_stream_new()`. Like the +context, this needs to be connected. But we need to be careful here, because we're now about to introduce one of the most horrific design +choices in PulseAudio. + +PulseAudio allows you to specify a callback that is fired when data can be written to or read from a stream. The language is important here +because PulseAudio takes it literally, specifically the "can be". You would think these callbacks would be appropriate as the place for +writing and reading data to and from the stream, and that would be right, except when it's not. When you initialize the stream, you can +set a flag that tells PulseAudio to not start the stream automatically. This is required because miniaudio does not auto-start devices +straight after initialization - you need to call `ma_device_start()` manually. The problem is that even when this flag is specified, +PulseAudio will immediately fire its write or read callback. This is *technically* correct (based on the wording in the documentation) +because indeed, data *can* be written at this point. The problem is that it's not *practical*. It makes sense that the write/read callback +would be where a program will want to write or read data to or from the stream, but when it's called before the application has even +requested that the stream be started, it's just not practical because the program probably isn't ready for any kind of data delivery at +that point (it may still need to load files or whatnot). Instead, this callback should only be fired when the application requests the +stream be started which is how it works with literally *every* other callback-based audio API. Since miniaudio forbids firing of the data +callback until the device has been started (as it should be with *all* callback based APIs), logic needs to be added to ensure miniaudio +doesn't just blindly fire the application-defined data callback from within the PulseAudio callback before the stream has actually been +started. The device state is used for this - if the state is anything other than `ma_device_state_starting` or `ma_device_state_started`, the main data +callback is not fired. + +This, unfortunately, is not the end of the problems with the PulseAudio write callback. Any normal callback based audio API will +continuously fire the callback at regular intervals based on the size of the internal buffer. This will only ever be fired when the device +is running, and will be fired regardless of whether or not the user actually wrote anything to the device/stream. This not the case in +PulseAudio. In PulseAudio, the data callback will *only* be called if you wrote something to it previously. That means, if you don't call +`pa_stream_write()`, the callback will not get fired. On the surface you wouldn't think this would matter because you should be always +writing data, and if you don't have anything to write, just write silence. That's fine until you want to drain the stream. You see, if +you're continuously writing data to the stream, the stream will never get drained! That means in order to drain the stream, you need to +*not* write data to it! But remember, when you don't write data to the stream, the callback won't get fired again! Why is draining +important? Because that's how we've defined stopping to work in miniaudio. In miniaudio, stopping the device requires it to be drained +before returning from ma_device_stop(). So we've stopped the device, which requires us to drain, but draining requires us to *not* write +data to the stream (or else it won't ever complete draining), but not writing to the stream means the callback won't get fired again! + +This becomes a problem when stopping and then restarting the device. When the device is stopped, it's drained, which requires us to *not* +write anything to the stream. But then, since we didn't write anything to it, the write callback will *never* get called again if we just +resume the stream naively. This means that starting the stream requires us to write data to the stream from outside the callback. This +disconnect is something PulseAudio has got seriously wrong - there should only ever be a single source of data delivery, that being the +callback. (I have tried using `pa_stream_flush()` to trigger the write callback to fire, but this just doesn't work for some reason.) + +Once you've created the stream, you need to connect it which involves the whole waiting procedure. This is the same process as the context, +only this time you'll poll for the state with `pa_stream_get_status()`. The starting and stopping of a streaming is referred to as +"corking" in PulseAudio. The analogy is corking a barrel. To start the stream, you uncork it, to stop it you cork it. Personally I think +it's silly - why would you not just call it "starting" and "stopping" like any other normal audio API? Anyway, the act of corking is, you +guessed it, asynchronous. This means you'll need our waiting loop as usual. Again, why this asynchronous design is the default is +absolutely beyond me. Would it really be that hard to just make it run synchronously? + +Teardown is pretty simple (what?!). It's just a matter of calling the relevant `_unref()` function on each object in reverse order that +they were initialized in. + +That's about it from the PulseAudio side. A bit ranty, I know, but they really need to fix that main loop and callback system. They're +embarrassingly unpractical. The main loop thing is an easy fix - have synchronous versions of all APIs. If an application wants these to +run asynchronously, they can execute them in a separate thread themselves. The desire to run these asynchronously is such a niche +requirement - it makes no sense to make it the default. The stream write callback needs to be change, or an alternative provided, that is +constantly fired, regardless of whether or not `pa_stream_write()` has been called, and it needs to take a pointer to a buffer as a +parameter which the program just writes to directly rather than having to call `pa_stream_writable_size()` and `pa_stream_write()`. These +changes alone will change PulseAudio from one of the worst audio APIs to one of the best. +*/ + + +/* +It is assumed pulseaudio.h is available when linking at compile time. When linking at compile time, we use the declarations in the header +to check for type safety. We cannot do this when linking at run time because the header might not be available. +*/ +#ifdef MA_NO_RUNTIME_LINKING + +/* pulseaudio.h marks some functions with "inline" which isn't always supported. Need to emulate it. */ +#if !defined(__cplusplus) + #if defined(__STRICT_ANSI__) + #if !defined(inline) + #define inline __inline__ __attribute__((always_inline)) + #define MA_INLINE_DEFINED + #endif + #endif +#endif +#include +#if defined(MA_INLINE_DEFINED) + #undef inline + #undef MA_INLINE_DEFINED +#endif + +#define MA_PA_OK PA_OK +#define MA_PA_ERR_ACCESS PA_ERR_ACCESS +#define MA_PA_ERR_INVALID PA_ERR_INVALID +#define MA_PA_ERR_NOENTITY PA_ERR_NOENTITY +#define MA_PA_ERR_NOTSUPPORTED PA_ERR_NOTSUPPORTED + +#define MA_PA_CHANNELS_MAX PA_CHANNELS_MAX +#define MA_PA_RATE_MAX PA_RATE_MAX + +typedef pa_context_flags_t ma_pa_context_flags_t; +#define MA_PA_CONTEXT_NOFLAGS PA_CONTEXT_NOFLAGS +#define MA_PA_CONTEXT_NOAUTOSPAWN PA_CONTEXT_NOAUTOSPAWN +#define MA_PA_CONTEXT_NOFAIL PA_CONTEXT_NOFAIL + +typedef pa_stream_flags_t ma_pa_stream_flags_t; +#define MA_PA_STREAM_NOFLAGS PA_STREAM_NOFLAGS +#define MA_PA_STREAM_START_CORKED PA_STREAM_START_CORKED +#define MA_PA_STREAM_INTERPOLATE_TIMING PA_STREAM_INTERPOLATE_TIMING +#define MA_PA_STREAM_NOT_MONOTONIC PA_STREAM_NOT_MONOTONIC +#define MA_PA_STREAM_AUTO_TIMING_UPDATE PA_STREAM_AUTO_TIMING_UPDATE +#define MA_PA_STREAM_NO_REMAP_CHANNELS PA_STREAM_NO_REMAP_CHANNELS +#define MA_PA_STREAM_NO_REMIX_CHANNELS PA_STREAM_NO_REMIX_CHANNELS +#define MA_PA_STREAM_FIX_FORMAT PA_STREAM_FIX_FORMAT +#define MA_PA_STREAM_FIX_RATE PA_STREAM_FIX_RATE +#define MA_PA_STREAM_FIX_CHANNELS PA_STREAM_FIX_CHANNELS +#define MA_PA_STREAM_DONT_MOVE PA_STREAM_DONT_MOVE +#define MA_PA_STREAM_VARIABLE_RATE PA_STREAM_VARIABLE_RATE +#define MA_PA_STREAM_PEAK_DETECT PA_STREAM_PEAK_DETECT +#define MA_PA_STREAM_START_MUTED PA_STREAM_START_MUTED +#define MA_PA_STREAM_ADJUST_LATENCY PA_STREAM_ADJUST_LATENCY +#define MA_PA_STREAM_EARLY_REQUESTS PA_STREAM_EARLY_REQUESTS +#define MA_PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND +#define MA_PA_STREAM_START_UNMUTED PA_STREAM_START_UNMUTED +#define MA_PA_STREAM_FAIL_ON_SUSPEND PA_STREAM_FAIL_ON_SUSPEND +#define MA_PA_STREAM_RELATIVE_VOLUME PA_STREAM_RELATIVE_VOLUME +#define MA_PA_STREAM_PASSTHROUGH PA_STREAM_PASSTHROUGH + +typedef pa_sink_flags_t ma_pa_sink_flags_t; +#define MA_PA_SINK_NOFLAGS PA_SINK_NOFLAGS +#define MA_PA_SINK_HW_VOLUME_CTRL PA_SINK_HW_VOLUME_CTRL +#define MA_PA_SINK_LATENCY PA_SINK_LATENCY +#define MA_PA_SINK_HARDWARE PA_SINK_HARDWARE +#define MA_PA_SINK_NETWORK PA_SINK_NETWORK +#define MA_PA_SINK_HW_MUTE_CTRL PA_SINK_HW_MUTE_CTRL +#define MA_PA_SINK_DECIBEL_VOLUME PA_SINK_DECIBEL_VOLUME +#define MA_PA_SINK_FLAT_VOLUME PA_SINK_FLAT_VOLUME +#define MA_PA_SINK_DYNAMIC_LATENCY PA_SINK_DYNAMIC_LATENCY +#define MA_PA_SINK_SET_FORMATS PA_SINK_SET_FORMATS + +typedef pa_source_flags_t ma_pa_source_flags_t; +#define MA_PA_SOURCE_NOFLAGS PA_SOURCE_NOFLAGS +#define MA_PA_SOURCE_HW_VOLUME_CTRL PA_SOURCE_HW_VOLUME_CTRL +#define MA_PA_SOURCE_LATENCY PA_SOURCE_LATENCY +#define MA_PA_SOURCE_HARDWARE PA_SOURCE_HARDWARE +#define MA_PA_SOURCE_NETWORK PA_SOURCE_NETWORK +#define MA_PA_SOURCE_HW_MUTE_CTRL PA_SOURCE_HW_MUTE_CTRL +#define MA_PA_SOURCE_DECIBEL_VOLUME PA_SOURCE_DECIBEL_VOLUME +#define MA_PA_SOURCE_DYNAMIC_LATENCY PA_SOURCE_DYNAMIC_LATENCY +#define MA_PA_SOURCE_FLAT_VOLUME PA_SOURCE_FLAT_VOLUME + +typedef pa_context_state_t ma_pa_context_state_t; +#define MA_PA_CONTEXT_UNCONNECTED PA_CONTEXT_UNCONNECTED +#define MA_PA_CONTEXT_CONNECTING PA_CONTEXT_CONNECTING +#define MA_PA_CONTEXT_AUTHORIZING PA_CONTEXT_AUTHORIZING +#define MA_PA_CONTEXT_SETTING_NAME PA_CONTEXT_SETTING_NAME +#define MA_PA_CONTEXT_READY PA_CONTEXT_READY +#define MA_PA_CONTEXT_FAILED PA_CONTEXT_FAILED +#define MA_PA_CONTEXT_TERMINATED PA_CONTEXT_TERMINATED + +typedef pa_stream_state_t ma_pa_stream_state_t; +#define MA_PA_STREAM_UNCONNECTED PA_STREAM_UNCONNECTED +#define MA_PA_STREAM_CREATING PA_STREAM_CREATING +#define MA_PA_STREAM_READY PA_STREAM_READY +#define MA_PA_STREAM_FAILED PA_STREAM_FAILED +#define MA_PA_STREAM_TERMINATED PA_STREAM_TERMINATED + +typedef pa_operation_state_t ma_pa_operation_state_t; +#define MA_PA_OPERATION_RUNNING PA_OPERATION_RUNNING +#define MA_PA_OPERATION_DONE PA_OPERATION_DONE +#define MA_PA_OPERATION_CANCELLED PA_OPERATION_CANCELLED + +typedef pa_sink_state_t ma_pa_sink_state_t; +#define MA_PA_SINK_INVALID_STATE PA_SINK_INVALID_STATE +#define MA_PA_SINK_RUNNING PA_SINK_RUNNING +#define MA_PA_SINK_IDLE PA_SINK_IDLE +#define MA_PA_SINK_SUSPENDED PA_SINK_SUSPENDED + +typedef pa_source_state_t ma_pa_source_state_t; +#define MA_PA_SOURCE_INVALID_STATE PA_SOURCE_INVALID_STATE +#define MA_PA_SOURCE_RUNNING PA_SOURCE_RUNNING +#define MA_PA_SOURCE_IDLE PA_SOURCE_IDLE +#define MA_PA_SOURCE_SUSPENDED PA_SOURCE_SUSPENDED + +typedef pa_seek_mode_t ma_pa_seek_mode_t; +#define MA_PA_SEEK_RELATIVE PA_SEEK_RELATIVE +#define MA_PA_SEEK_ABSOLUTE PA_SEEK_ABSOLUTE +#define MA_PA_SEEK_RELATIVE_ON_READ PA_SEEK_RELATIVE_ON_READ +#define MA_PA_SEEK_RELATIVE_END PA_SEEK_RELATIVE_END + +typedef pa_channel_position_t ma_pa_channel_position_t; +#define MA_PA_CHANNEL_POSITION_INVALID PA_CHANNEL_POSITION_INVALID +#define MA_PA_CHANNEL_POSITION_MONO PA_CHANNEL_POSITION_MONO +#define MA_PA_CHANNEL_POSITION_FRONT_LEFT PA_CHANNEL_POSITION_FRONT_LEFT +#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT PA_CHANNEL_POSITION_FRONT_RIGHT +#define MA_PA_CHANNEL_POSITION_FRONT_CENTER PA_CHANNEL_POSITION_FRONT_CENTER +#define MA_PA_CHANNEL_POSITION_REAR_CENTER PA_CHANNEL_POSITION_REAR_CENTER +#define MA_PA_CHANNEL_POSITION_REAR_LEFT PA_CHANNEL_POSITION_REAR_LEFT +#define MA_PA_CHANNEL_POSITION_REAR_RIGHT PA_CHANNEL_POSITION_REAR_RIGHT +#define MA_PA_CHANNEL_POSITION_LFE PA_CHANNEL_POSITION_LFE +#define MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER +#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER +#define MA_PA_CHANNEL_POSITION_SIDE_LEFT PA_CHANNEL_POSITION_SIDE_LEFT +#define MA_PA_CHANNEL_POSITION_SIDE_RIGHT PA_CHANNEL_POSITION_SIDE_RIGHT +#define MA_PA_CHANNEL_POSITION_AUX0 PA_CHANNEL_POSITION_AUX0 +#define MA_PA_CHANNEL_POSITION_AUX1 PA_CHANNEL_POSITION_AUX1 +#define MA_PA_CHANNEL_POSITION_AUX2 PA_CHANNEL_POSITION_AUX2 +#define MA_PA_CHANNEL_POSITION_AUX3 PA_CHANNEL_POSITION_AUX3 +#define MA_PA_CHANNEL_POSITION_AUX4 PA_CHANNEL_POSITION_AUX4 +#define MA_PA_CHANNEL_POSITION_AUX5 PA_CHANNEL_POSITION_AUX5 +#define MA_PA_CHANNEL_POSITION_AUX6 PA_CHANNEL_POSITION_AUX6 +#define MA_PA_CHANNEL_POSITION_AUX7 PA_CHANNEL_POSITION_AUX7 +#define MA_PA_CHANNEL_POSITION_AUX8 PA_CHANNEL_POSITION_AUX8 +#define MA_PA_CHANNEL_POSITION_AUX9 PA_CHANNEL_POSITION_AUX9 +#define MA_PA_CHANNEL_POSITION_AUX10 PA_CHANNEL_POSITION_AUX10 +#define MA_PA_CHANNEL_POSITION_AUX11 PA_CHANNEL_POSITION_AUX11 +#define MA_PA_CHANNEL_POSITION_AUX12 PA_CHANNEL_POSITION_AUX12 +#define MA_PA_CHANNEL_POSITION_AUX13 PA_CHANNEL_POSITION_AUX13 +#define MA_PA_CHANNEL_POSITION_AUX14 PA_CHANNEL_POSITION_AUX14 +#define MA_PA_CHANNEL_POSITION_AUX15 PA_CHANNEL_POSITION_AUX15 +#define MA_PA_CHANNEL_POSITION_AUX16 PA_CHANNEL_POSITION_AUX16 +#define MA_PA_CHANNEL_POSITION_AUX17 PA_CHANNEL_POSITION_AUX17 +#define MA_PA_CHANNEL_POSITION_AUX18 PA_CHANNEL_POSITION_AUX18 +#define MA_PA_CHANNEL_POSITION_AUX19 PA_CHANNEL_POSITION_AUX19 +#define MA_PA_CHANNEL_POSITION_AUX20 PA_CHANNEL_POSITION_AUX20 +#define MA_PA_CHANNEL_POSITION_AUX21 PA_CHANNEL_POSITION_AUX21 +#define MA_PA_CHANNEL_POSITION_AUX22 PA_CHANNEL_POSITION_AUX22 +#define MA_PA_CHANNEL_POSITION_AUX23 PA_CHANNEL_POSITION_AUX23 +#define MA_PA_CHANNEL_POSITION_AUX24 PA_CHANNEL_POSITION_AUX24 +#define MA_PA_CHANNEL_POSITION_AUX25 PA_CHANNEL_POSITION_AUX25 +#define MA_PA_CHANNEL_POSITION_AUX26 PA_CHANNEL_POSITION_AUX26 +#define MA_PA_CHANNEL_POSITION_AUX27 PA_CHANNEL_POSITION_AUX27 +#define MA_PA_CHANNEL_POSITION_AUX28 PA_CHANNEL_POSITION_AUX28 +#define MA_PA_CHANNEL_POSITION_AUX29 PA_CHANNEL_POSITION_AUX29 +#define MA_PA_CHANNEL_POSITION_AUX30 PA_CHANNEL_POSITION_AUX30 +#define MA_PA_CHANNEL_POSITION_AUX31 PA_CHANNEL_POSITION_AUX31 +#define MA_PA_CHANNEL_POSITION_TOP_CENTER PA_CHANNEL_POSITION_TOP_CENTER +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT PA_CHANNEL_POSITION_TOP_FRONT_LEFT +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT PA_CHANNEL_POSITION_TOP_FRONT_RIGHT +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER PA_CHANNEL_POSITION_TOP_FRONT_CENTER +#define MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT PA_CHANNEL_POSITION_TOP_REAR_LEFT +#define MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT PA_CHANNEL_POSITION_TOP_REAR_RIGHT +#define MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER PA_CHANNEL_POSITION_TOP_REAR_CENTER +#define MA_PA_CHANNEL_POSITION_LEFT PA_CHANNEL_POSITION_LEFT +#define MA_PA_CHANNEL_POSITION_RIGHT PA_CHANNEL_POSITION_RIGHT +#define MA_PA_CHANNEL_POSITION_CENTER PA_CHANNEL_POSITION_CENTER +#define MA_PA_CHANNEL_POSITION_SUBWOOFER PA_CHANNEL_POSITION_SUBWOOFER + +typedef pa_channel_map_def_t ma_pa_channel_map_def_t; +#define MA_PA_CHANNEL_MAP_AIFF PA_CHANNEL_MAP_AIFF +#define MA_PA_CHANNEL_MAP_ALSA PA_CHANNEL_MAP_ALSA +#define MA_PA_CHANNEL_MAP_AUX PA_CHANNEL_MAP_AUX +#define MA_PA_CHANNEL_MAP_WAVEEX PA_CHANNEL_MAP_WAVEEX +#define MA_PA_CHANNEL_MAP_OSS PA_CHANNEL_MAP_OSS +#define MA_PA_CHANNEL_MAP_DEFAULT PA_CHANNEL_MAP_DEFAULT + +typedef pa_sample_format_t ma_pa_sample_format_t; +#define MA_PA_SAMPLE_INVALID PA_SAMPLE_INVALID +#define MA_PA_SAMPLE_U8 PA_SAMPLE_U8 +#define MA_PA_SAMPLE_ALAW PA_SAMPLE_ALAW +#define MA_PA_SAMPLE_ULAW PA_SAMPLE_ULAW +#define MA_PA_SAMPLE_S16LE PA_SAMPLE_S16LE +#define MA_PA_SAMPLE_S16BE PA_SAMPLE_S16BE +#define MA_PA_SAMPLE_FLOAT32LE PA_SAMPLE_FLOAT32LE +#define MA_PA_SAMPLE_FLOAT32BE PA_SAMPLE_FLOAT32BE +#define MA_PA_SAMPLE_S32LE PA_SAMPLE_S32LE +#define MA_PA_SAMPLE_S32BE PA_SAMPLE_S32BE +#define MA_PA_SAMPLE_S24LE PA_SAMPLE_S24LE +#define MA_PA_SAMPLE_S24BE PA_SAMPLE_S24BE +#define MA_PA_SAMPLE_S24_32LE PA_SAMPLE_S24_32LE +#define MA_PA_SAMPLE_S24_32BE PA_SAMPLE_S24_32BE + +typedef pa_mainloop ma_pa_mainloop; +typedef pa_threaded_mainloop ma_pa_threaded_mainloop; +typedef pa_mainloop_api ma_pa_mainloop_api; +typedef pa_context ma_pa_context; +typedef pa_operation ma_pa_operation; +typedef pa_stream ma_pa_stream; +typedef pa_spawn_api ma_pa_spawn_api; +typedef pa_buffer_attr ma_pa_buffer_attr; +typedef pa_channel_map ma_pa_channel_map; +typedef pa_cvolume ma_pa_cvolume; +typedef pa_sample_spec ma_pa_sample_spec; +typedef pa_sink_info ma_pa_sink_info; +typedef pa_source_info ma_pa_source_info; + +typedef pa_context_notify_cb_t ma_pa_context_notify_cb_t; +typedef pa_sink_info_cb_t ma_pa_sink_info_cb_t; +typedef pa_source_info_cb_t ma_pa_source_info_cb_t; +typedef pa_stream_success_cb_t ma_pa_stream_success_cb_t; +typedef pa_stream_request_cb_t ma_pa_stream_request_cb_t; +typedef pa_stream_notify_cb_t ma_pa_stream_notify_cb_t; +typedef pa_free_cb_t ma_pa_free_cb_t; +#else +#define MA_PA_OK 0 +#define MA_PA_ERR_ACCESS 1 +#define MA_PA_ERR_INVALID 2 +#define MA_PA_ERR_NOENTITY 5 +#define MA_PA_ERR_NOTSUPPORTED 19 + +#define MA_PA_CHANNELS_MAX 32 +#define MA_PA_RATE_MAX 384000 + +typedef int ma_pa_context_flags_t; +#define MA_PA_CONTEXT_NOFLAGS 0x00000000 +#define MA_PA_CONTEXT_NOAUTOSPAWN 0x00000001 +#define MA_PA_CONTEXT_NOFAIL 0x00000002 + +typedef int ma_pa_stream_flags_t; +#define MA_PA_STREAM_NOFLAGS 0x00000000 +#define MA_PA_STREAM_START_CORKED 0x00000001 +#define MA_PA_STREAM_INTERPOLATE_TIMING 0x00000002 +#define MA_PA_STREAM_NOT_MONOTONIC 0x00000004 +#define MA_PA_STREAM_AUTO_TIMING_UPDATE 0x00000008 +#define MA_PA_STREAM_NO_REMAP_CHANNELS 0x00000010 +#define MA_PA_STREAM_NO_REMIX_CHANNELS 0x00000020 +#define MA_PA_STREAM_FIX_FORMAT 0x00000040 +#define MA_PA_STREAM_FIX_RATE 0x00000080 +#define MA_PA_STREAM_FIX_CHANNELS 0x00000100 +#define MA_PA_STREAM_DONT_MOVE 0x00000200 +#define MA_PA_STREAM_VARIABLE_RATE 0x00000400 +#define MA_PA_STREAM_PEAK_DETECT 0x00000800 +#define MA_PA_STREAM_START_MUTED 0x00001000 +#define MA_PA_STREAM_ADJUST_LATENCY 0x00002000 +#define MA_PA_STREAM_EARLY_REQUESTS 0x00004000 +#define MA_PA_STREAM_DONT_INHIBIT_AUTO_SUSPEND 0x00008000 +#define MA_PA_STREAM_START_UNMUTED 0x00010000 +#define MA_PA_STREAM_FAIL_ON_SUSPEND 0x00020000 +#define MA_PA_STREAM_RELATIVE_VOLUME 0x00040000 +#define MA_PA_STREAM_PASSTHROUGH 0x00080000 + +typedef int ma_pa_sink_flags_t; +#define MA_PA_SINK_NOFLAGS 0x00000000 +#define MA_PA_SINK_HW_VOLUME_CTRL 0x00000001 +#define MA_PA_SINK_LATENCY 0x00000002 +#define MA_PA_SINK_HARDWARE 0x00000004 +#define MA_PA_SINK_NETWORK 0x00000008 +#define MA_PA_SINK_HW_MUTE_CTRL 0x00000010 +#define MA_PA_SINK_DECIBEL_VOLUME 0x00000020 +#define MA_PA_SINK_FLAT_VOLUME 0x00000040 +#define MA_PA_SINK_DYNAMIC_LATENCY 0x00000080 +#define MA_PA_SINK_SET_FORMATS 0x00000100 + +typedef int ma_pa_source_flags_t; +#define MA_PA_SOURCE_NOFLAGS 0x00000000 +#define MA_PA_SOURCE_HW_VOLUME_CTRL 0x00000001 +#define MA_PA_SOURCE_LATENCY 0x00000002 +#define MA_PA_SOURCE_HARDWARE 0x00000004 +#define MA_PA_SOURCE_NETWORK 0x00000008 +#define MA_PA_SOURCE_HW_MUTE_CTRL 0x00000010 +#define MA_PA_SOURCE_DECIBEL_VOLUME 0x00000020 +#define MA_PA_SOURCE_DYNAMIC_LATENCY 0x00000040 +#define MA_PA_SOURCE_FLAT_VOLUME 0x00000080 + +typedef int ma_pa_context_state_t; +#define MA_PA_CONTEXT_UNCONNECTED 0 +#define MA_PA_CONTEXT_CONNECTING 1 +#define MA_PA_CONTEXT_AUTHORIZING 2 +#define MA_PA_CONTEXT_SETTING_NAME 3 +#define MA_PA_CONTEXT_READY 4 +#define MA_PA_CONTEXT_FAILED 5 +#define MA_PA_CONTEXT_TERMINATED 6 + +typedef int ma_pa_stream_state_t; +#define MA_PA_STREAM_UNCONNECTED 0 +#define MA_PA_STREAM_CREATING 1 +#define MA_PA_STREAM_READY 2 +#define MA_PA_STREAM_FAILED 3 +#define MA_PA_STREAM_TERMINATED 4 + +typedef int ma_pa_operation_state_t; +#define MA_PA_OPERATION_RUNNING 0 +#define MA_PA_OPERATION_DONE 1 +#define MA_PA_OPERATION_CANCELLED 2 + +typedef int ma_pa_sink_state_t; +#define MA_PA_SINK_INVALID_STATE -1 +#define MA_PA_SINK_RUNNING 0 +#define MA_PA_SINK_IDLE 1 +#define MA_PA_SINK_SUSPENDED 2 + +typedef int ma_pa_source_state_t; +#define MA_PA_SOURCE_INVALID_STATE -1 +#define MA_PA_SOURCE_RUNNING 0 +#define MA_PA_SOURCE_IDLE 1 +#define MA_PA_SOURCE_SUSPENDED 2 + +typedef int ma_pa_seek_mode_t; +#define MA_PA_SEEK_RELATIVE 0 +#define MA_PA_SEEK_ABSOLUTE 1 +#define MA_PA_SEEK_RELATIVE_ON_READ 2 +#define MA_PA_SEEK_RELATIVE_END 3 + +typedef int ma_pa_channel_position_t; +#define MA_PA_CHANNEL_POSITION_INVALID -1 +#define MA_PA_CHANNEL_POSITION_MONO 0 +#define MA_PA_CHANNEL_POSITION_FRONT_LEFT 1 +#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT 2 +#define MA_PA_CHANNEL_POSITION_FRONT_CENTER 3 +#define MA_PA_CHANNEL_POSITION_REAR_CENTER 4 +#define MA_PA_CHANNEL_POSITION_REAR_LEFT 5 +#define MA_PA_CHANNEL_POSITION_REAR_RIGHT 6 +#define MA_PA_CHANNEL_POSITION_LFE 7 +#define MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER 8 +#define MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER 9 +#define MA_PA_CHANNEL_POSITION_SIDE_LEFT 10 +#define MA_PA_CHANNEL_POSITION_SIDE_RIGHT 11 +#define MA_PA_CHANNEL_POSITION_AUX0 12 +#define MA_PA_CHANNEL_POSITION_AUX1 13 +#define MA_PA_CHANNEL_POSITION_AUX2 14 +#define MA_PA_CHANNEL_POSITION_AUX3 15 +#define MA_PA_CHANNEL_POSITION_AUX4 16 +#define MA_PA_CHANNEL_POSITION_AUX5 17 +#define MA_PA_CHANNEL_POSITION_AUX6 18 +#define MA_PA_CHANNEL_POSITION_AUX7 19 +#define MA_PA_CHANNEL_POSITION_AUX8 20 +#define MA_PA_CHANNEL_POSITION_AUX9 21 +#define MA_PA_CHANNEL_POSITION_AUX10 22 +#define MA_PA_CHANNEL_POSITION_AUX11 23 +#define MA_PA_CHANNEL_POSITION_AUX12 24 +#define MA_PA_CHANNEL_POSITION_AUX13 25 +#define MA_PA_CHANNEL_POSITION_AUX14 26 +#define MA_PA_CHANNEL_POSITION_AUX15 27 +#define MA_PA_CHANNEL_POSITION_AUX16 28 +#define MA_PA_CHANNEL_POSITION_AUX17 29 +#define MA_PA_CHANNEL_POSITION_AUX18 30 +#define MA_PA_CHANNEL_POSITION_AUX19 31 +#define MA_PA_CHANNEL_POSITION_AUX20 32 +#define MA_PA_CHANNEL_POSITION_AUX21 33 +#define MA_PA_CHANNEL_POSITION_AUX22 34 +#define MA_PA_CHANNEL_POSITION_AUX23 35 +#define MA_PA_CHANNEL_POSITION_AUX24 36 +#define MA_PA_CHANNEL_POSITION_AUX25 37 +#define MA_PA_CHANNEL_POSITION_AUX26 38 +#define MA_PA_CHANNEL_POSITION_AUX27 39 +#define MA_PA_CHANNEL_POSITION_AUX28 40 +#define MA_PA_CHANNEL_POSITION_AUX29 41 +#define MA_PA_CHANNEL_POSITION_AUX30 42 +#define MA_PA_CHANNEL_POSITION_AUX31 43 +#define MA_PA_CHANNEL_POSITION_TOP_CENTER 44 +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT 45 +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT 46 +#define MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER 47 +#define MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT 48 +#define MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT 49 +#define MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER 50 +#define MA_PA_CHANNEL_POSITION_LEFT MA_PA_CHANNEL_POSITION_FRONT_LEFT +#define MA_PA_CHANNEL_POSITION_RIGHT MA_PA_CHANNEL_POSITION_FRONT_RIGHT +#define MA_PA_CHANNEL_POSITION_CENTER MA_PA_CHANNEL_POSITION_FRONT_CENTER +#define MA_PA_CHANNEL_POSITION_SUBWOOFER MA_PA_CHANNEL_POSITION_LFE + +typedef int ma_pa_channel_map_def_t; +#define MA_PA_CHANNEL_MAP_AIFF 0 +#define MA_PA_CHANNEL_MAP_ALSA 1 +#define MA_PA_CHANNEL_MAP_AUX 2 +#define MA_PA_CHANNEL_MAP_WAVEEX 3 +#define MA_PA_CHANNEL_MAP_OSS 4 +#define MA_PA_CHANNEL_MAP_DEFAULT MA_PA_CHANNEL_MAP_AIFF + +typedef int ma_pa_sample_format_t; +#define MA_PA_SAMPLE_INVALID -1 +#define MA_PA_SAMPLE_U8 0 +#define MA_PA_SAMPLE_ALAW 1 +#define MA_PA_SAMPLE_ULAW 2 +#define MA_PA_SAMPLE_S16LE 3 +#define MA_PA_SAMPLE_S16BE 4 +#define MA_PA_SAMPLE_FLOAT32LE 5 +#define MA_PA_SAMPLE_FLOAT32BE 6 +#define MA_PA_SAMPLE_S32LE 7 +#define MA_PA_SAMPLE_S32BE 8 +#define MA_PA_SAMPLE_S24LE 9 +#define MA_PA_SAMPLE_S24BE 10 +#define MA_PA_SAMPLE_S24_32LE 11 +#define MA_PA_SAMPLE_S24_32BE 12 + +typedef struct ma_pa_mainloop ma_pa_mainloop; +typedef struct ma_pa_threaded_mainloop ma_pa_threaded_mainloop; +typedef struct ma_pa_mainloop_api ma_pa_mainloop_api; +typedef struct ma_pa_context ma_pa_context; +typedef struct ma_pa_operation ma_pa_operation; +typedef struct ma_pa_stream ma_pa_stream; +typedef struct ma_pa_spawn_api ma_pa_spawn_api; + +typedef struct +{ + ma_uint32 maxlength; + ma_uint32 tlength; + ma_uint32 prebuf; + ma_uint32 minreq; + ma_uint32 fragsize; +} ma_pa_buffer_attr; + +typedef struct +{ + ma_uint8 channels; + ma_pa_channel_position_t map[MA_PA_CHANNELS_MAX]; +} ma_pa_channel_map; + +typedef struct +{ + ma_uint8 channels; + ma_uint32 values[MA_PA_CHANNELS_MAX]; +} ma_pa_cvolume; + +typedef struct +{ + ma_pa_sample_format_t format; + ma_uint32 rate; + ma_uint8 channels; +} ma_pa_sample_spec; + +typedef struct +{ + const char* name; + ma_uint32 index; + const char* description; + ma_pa_sample_spec sample_spec; + ma_pa_channel_map channel_map; + ma_uint32 owner_module; + ma_pa_cvolume volume; + int mute; + ma_uint32 monitor_source; + const char* monitor_source_name; + ma_uint64 latency; + const char* driver; + ma_pa_sink_flags_t flags; + void* proplist; + ma_uint64 configured_latency; + ma_uint32 base_volume; + ma_pa_sink_state_t state; + ma_uint32 n_volume_steps; + ma_uint32 card; + ma_uint32 n_ports; + void** ports; + void* active_port; + ma_uint8 n_formats; + void** formats; +} ma_pa_sink_info; + +typedef struct +{ + const char *name; + ma_uint32 index; + const char *description; + ma_pa_sample_spec sample_spec; + ma_pa_channel_map channel_map; + ma_uint32 owner_module; + ma_pa_cvolume volume; + int mute; + ma_uint32 monitor_of_sink; + const char *monitor_of_sink_name; + ma_uint64 latency; + const char *driver; + ma_pa_source_flags_t flags; + void* proplist; + ma_uint64 configured_latency; + ma_uint32 base_volume; + ma_pa_source_state_t state; + ma_uint32 n_volume_steps; + ma_uint32 card; + ma_uint32 n_ports; + void** ports; + void* active_port; + ma_uint8 n_formats; + void** formats; +} ma_pa_source_info; + +typedef void (* ma_pa_context_notify_cb_t)(ma_pa_context* c, void* userdata); +typedef void (* ma_pa_sink_info_cb_t) (ma_pa_context* c, const ma_pa_sink_info* i, int eol, void* userdata); +typedef void (* ma_pa_source_info_cb_t) (ma_pa_context* c, const ma_pa_source_info* i, int eol, void* userdata); +typedef void (* ma_pa_stream_success_cb_t)(ma_pa_stream* s, int success, void* userdata); +typedef void (* ma_pa_stream_request_cb_t)(ma_pa_stream* s, size_t nbytes, void* userdata); +typedef void (* ma_pa_stream_notify_cb_t) (ma_pa_stream* s, void* userdata); +typedef void (* ma_pa_free_cb_t) (void* p); +#endif + + +typedef ma_pa_mainloop* (* ma_pa_mainloop_new_proc) (void); +typedef void (* ma_pa_mainloop_free_proc) (ma_pa_mainloop* m); +typedef void (* ma_pa_mainloop_quit_proc) (ma_pa_mainloop* m, int retval); +typedef ma_pa_mainloop_api* (* ma_pa_mainloop_get_api_proc) (ma_pa_mainloop* m); +typedef int (* ma_pa_mainloop_iterate_proc) (ma_pa_mainloop* m, int block, int* retval); +typedef void (* ma_pa_mainloop_wakeup_proc) (ma_pa_mainloop* m); +typedef ma_pa_threaded_mainloop* (* ma_pa_threaded_mainloop_new_proc) (void); +typedef void (* ma_pa_threaded_mainloop_free_proc) (ma_pa_threaded_mainloop* m); +typedef int (* ma_pa_threaded_mainloop_start_proc) (ma_pa_threaded_mainloop* m); +typedef void (* ma_pa_threaded_mainloop_stop_proc) (ma_pa_threaded_mainloop* m); +typedef void (* ma_pa_threaded_mainloop_lock_proc) (ma_pa_threaded_mainloop* m); +typedef void (* ma_pa_threaded_mainloop_unlock_proc) (ma_pa_threaded_mainloop* m); +typedef void (* ma_pa_threaded_mainloop_wait_proc) (ma_pa_threaded_mainloop* m); +typedef void (* ma_pa_threaded_mainloop_signal_proc) (ma_pa_threaded_mainloop* m, int wait_for_accept); +typedef void (* ma_pa_threaded_mainloop_accept_proc) (ma_pa_threaded_mainloop* m); +typedef int (* ma_pa_threaded_mainloop_get_retval_proc) (ma_pa_threaded_mainloop* m); +typedef ma_pa_mainloop_api* (* ma_pa_threaded_mainloop_get_api_proc) (ma_pa_threaded_mainloop* m); +typedef int (* ma_pa_threaded_mainloop_in_thread_proc) (ma_pa_threaded_mainloop* m); +typedef void (* ma_pa_threaded_mainloop_set_name_proc) (ma_pa_threaded_mainloop* m, const char* name); +typedef ma_pa_context* (* ma_pa_context_new_proc) (ma_pa_mainloop_api* mainloop, const char* name); +typedef void (* ma_pa_context_unref_proc) (ma_pa_context* c); +typedef int (* ma_pa_context_connect_proc) (ma_pa_context* c, const char* server, ma_pa_context_flags_t flags, const ma_pa_spawn_api* api); +typedef void (* ma_pa_context_disconnect_proc) (ma_pa_context* c); +typedef void (* ma_pa_context_set_state_callback_proc) (ma_pa_context* c, ma_pa_context_notify_cb_t cb, void* userdata); +typedef ma_pa_context_state_t (* ma_pa_context_get_state_proc) (ma_pa_context* c); +typedef ma_pa_operation* (* ma_pa_context_get_sink_info_list_proc) (ma_pa_context* c, ma_pa_sink_info_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_context_get_source_info_list_proc) (ma_pa_context* c, ma_pa_source_info_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_context_get_sink_info_by_name_proc) (ma_pa_context* c, const char* name, ma_pa_sink_info_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_context_get_source_info_by_name_proc)(ma_pa_context* c, const char* name, ma_pa_source_info_cb_t cb, void* userdata); +typedef void (* ma_pa_operation_unref_proc) (ma_pa_operation* o); +typedef ma_pa_operation_state_t (* ma_pa_operation_get_state_proc) (ma_pa_operation* o); +typedef ma_pa_channel_map* (* ma_pa_channel_map_init_extend_proc) (ma_pa_channel_map* m, unsigned channels, ma_pa_channel_map_def_t def); +typedef int (* ma_pa_channel_map_valid_proc) (const ma_pa_channel_map* m); +typedef int (* ma_pa_channel_map_compatible_proc) (const ma_pa_channel_map* m, const ma_pa_sample_spec* ss); +typedef ma_pa_stream* (* ma_pa_stream_new_proc) (ma_pa_context* c, const char* name, const ma_pa_sample_spec* ss, const ma_pa_channel_map* map); +typedef void (* ma_pa_stream_unref_proc) (ma_pa_stream* s); +typedef int (* ma_pa_stream_connect_playback_proc) (ma_pa_stream* s, const char* dev, const ma_pa_buffer_attr* attr, ma_pa_stream_flags_t flags, const ma_pa_cvolume* volume, ma_pa_stream* sync_stream); +typedef int (* ma_pa_stream_connect_record_proc) (ma_pa_stream* s, const char* dev, const ma_pa_buffer_attr* attr, ma_pa_stream_flags_t flags); +typedef int (* ma_pa_stream_disconnect_proc) (ma_pa_stream* s); +typedef ma_pa_stream_state_t (* ma_pa_stream_get_state_proc) (ma_pa_stream* s); +typedef const ma_pa_sample_spec* (* ma_pa_stream_get_sample_spec_proc) (ma_pa_stream* s); +typedef const ma_pa_channel_map* (* ma_pa_stream_get_channel_map_proc) (ma_pa_stream* s); +typedef const ma_pa_buffer_attr* (* ma_pa_stream_get_buffer_attr_proc) (ma_pa_stream* s); +typedef ma_pa_operation* (* ma_pa_stream_set_buffer_attr_proc) (ma_pa_stream* s, const ma_pa_buffer_attr* attr, ma_pa_stream_success_cb_t cb, void* userdata); +typedef const char* (* ma_pa_stream_get_device_name_proc) (ma_pa_stream* s); +typedef void (* ma_pa_stream_set_write_callback_proc) (ma_pa_stream* s, ma_pa_stream_request_cb_t cb, void* userdata); +typedef void (* ma_pa_stream_set_read_callback_proc) (ma_pa_stream* s, ma_pa_stream_request_cb_t cb, void* userdata); +typedef void (* ma_pa_stream_set_suspended_callback_proc) (ma_pa_stream* s, ma_pa_stream_notify_cb_t cb, void* userdata); +typedef void (* ma_pa_stream_set_moved_callback_proc) (ma_pa_stream* s, ma_pa_stream_notify_cb_t cb, void* userdata); +typedef int (* ma_pa_stream_is_suspended_proc) (const ma_pa_stream* s); +typedef ma_pa_operation* (* ma_pa_stream_flush_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_stream_drain_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata); +typedef int (* ma_pa_stream_is_corked_proc) (ma_pa_stream* s); +typedef ma_pa_operation* (* ma_pa_stream_cork_proc) (ma_pa_stream* s, int b, ma_pa_stream_success_cb_t cb, void* userdata); +typedef ma_pa_operation* (* ma_pa_stream_trigger_proc) (ma_pa_stream* s, ma_pa_stream_success_cb_t cb, void* userdata); +typedef int (* ma_pa_stream_begin_write_proc) (ma_pa_stream* s, void** data, size_t* nbytes); +typedef int (* ma_pa_stream_write_proc) (ma_pa_stream* s, const void* data, size_t nbytes, ma_pa_free_cb_t free_cb, int64_t offset, ma_pa_seek_mode_t seek); +typedef int (* ma_pa_stream_peek_proc) (ma_pa_stream* s, const void** data, size_t* nbytes); +typedef int (* ma_pa_stream_drop_proc) (ma_pa_stream* s); +typedef size_t (* ma_pa_stream_writable_size_proc) (ma_pa_stream* s); +typedef size_t (* ma_pa_stream_readable_size_proc) (ma_pa_stream* s); + +typedef struct +{ + ma_uint32 count; + ma_uint32 capacity; + ma_device_info* pInfo; +} ma_pulse_device_enum_data; + +static ma_result ma_result_from_pulse(int result) +{ + if (result < 0) { + return MA_ERROR; + } + + switch (result) { + case MA_PA_OK: return MA_SUCCESS; + case MA_PA_ERR_ACCESS: return MA_ACCESS_DENIED; + case MA_PA_ERR_INVALID: return MA_INVALID_ARGS; + case MA_PA_ERR_NOENTITY: return MA_NO_DEVICE; + default: return MA_ERROR; + } +} + +#if 0 +static ma_pa_sample_format_t ma_format_to_pulse(ma_format format) +{ + if (ma_is_little_endian()) { + switch (format) { + case ma_format_s16: return MA_PA_SAMPLE_S16LE; + case ma_format_s24: return MA_PA_SAMPLE_S24LE; + case ma_format_s32: return MA_PA_SAMPLE_S32LE; + case ma_format_f32: return MA_PA_SAMPLE_FLOAT32LE; + default: break; + } + } else { + switch (format) { + case ma_format_s16: return MA_PA_SAMPLE_S16BE; + case ma_format_s24: return MA_PA_SAMPLE_S24BE; + case ma_format_s32: return MA_PA_SAMPLE_S32BE; + case ma_format_f32: return MA_PA_SAMPLE_FLOAT32BE; + default: break; + } + } + + /* Endian agnostic. */ + switch (format) { + case ma_format_u8: return MA_PA_SAMPLE_U8; + default: return MA_PA_SAMPLE_INVALID; + } +} +#endif + +static ma_format ma_format_from_pulse(ma_pa_sample_format_t format) +{ + if (ma_is_little_endian()) { + switch (format) { + case MA_PA_SAMPLE_S16LE: return ma_format_s16; + case MA_PA_SAMPLE_S24LE: return ma_format_s24; + case MA_PA_SAMPLE_S32LE: return ma_format_s32; + case MA_PA_SAMPLE_FLOAT32LE: return ma_format_f32; + default: break; + } + } else { + switch (format) { + case MA_PA_SAMPLE_S16BE: return ma_format_s16; + case MA_PA_SAMPLE_S24BE: return ma_format_s24; + case MA_PA_SAMPLE_S32BE: return ma_format_s32; + case MA_PA_SAMPLE_FLOAT32BE: return ma_format_f32; + default: break; + } + } + + /* Endian agnostic. */ + switch (format) { + case MA_PA_SAMPLE_U8: return ma_format_u8; + default: return ma_format_unknown; + } +} + +static ma_channel ma_channel_position_from_pulse(ma_pa_channel_position_t position) +{ + switch (position) + { + case MA_PA_CHANNEL_POSITION_INVALID: return MA_CHANNEL_NONE; + case MA_PA_CHANNEL_POSITION_MONO: return MA_CHANNEL_MONO; + case MA_PA_CHANNEL_POSITION_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT; + case MA_PA_CHANNEL_POSITION_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT; + case MA_PA_CHANNEL_POSITION_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER; + case MA_PA_CHANNEL_POSITION_REAR_CENTER: return MA_CHANNEL_BACK_CENTER; + case MA_PA_CHANNEL_POSITION_REAR_LEFT: return MA_CHANNEL_BACK_LEFT; + case MA_PA_CHANNEL_POSITION_REAR_RIGHT: return MA_CHANNEL_BACK_RIGHT; + case MA_PA_CHANNEL_POSITION_LFE: return MA_CHANNEL_LFE; + case MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER; + case MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case MA_PA_CHANNEL_POSITION_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT; + case MA_PA_CHANNEL_POSITION_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT; + case MA_PA_CHANNEL_POSITION_AUX0: return MA_CHANNEL_AUX_0; + case MA_PA_CHANNEL_POSITION_AUX1: return MA_CHANNEL_AUX_1; + case MA_PA_CHANNEL_POSITION_AUX2: return MA_CHANNEL_AUX_2; + case MA_PA_CHANNEL_POSITION_AUX3: return MA_CHANNEL_AUX_3; + case MA_PA_CHANNEL_POSITION_AUX4: return MA_CHANNEL_AUX_4; + case MA_PA_CHANNEL_POSITION_AUX5: return MA_CHANNEL_AUX_5; + case MA_PA_CHANNEL_POSITION_AUX6: return MA_CHANNEL_AUX_6; + case MA_PA_CHANNEL_POSITION_AUX7: return MA_CHANNEL_AUX_7; + case MA_PA_CHANNEL_POSITION_AUX8: return MA_CHANNEL_AUX_8; + case MA_PA_CHANNEL_POSITION_AUX9: return MA_CHANNEL_AUX_9; + case MA_PA_CHANNEL_POSITION_AUX10: return MA_CHANNEL_AUX_10; + case MA_PA_CHANNEL_POSITION_AUX11: return MA_CHANNEL_AUX_11; + case MA_PA_CHANNEL_POSITION_AUX12: return MA_CHANNEL_AUX_12; + case MA_PA_CHANNEL_POSITION_AUX13: return MA_CHANNEL_AUX_13; + case MA_PA_CHANNEL_POSITION_AUX14: return MA_CHANNEL_AUX_14; + case MA_PA_CHANNEL_POSITION_AUX15: return MA_CHANNEL_AUX_15; + case MA_PA_CHANNEL_POSITION_AUX16: return MA_CHANNEL_AUX_16; + case MA_PA_CHANNEL_POSITION_AUX17: return MA_CHANNEL_AUX_17; + case MA_PA_CHANNEL_POSITION_AUX18: return MA_CHANNEL_AUX_18; + case MA_PA_CHANNEL_POSITION_AUX19: return MA_CHANNEL_AUX_19; + case MA_PA_CHANNEL_POSITION_AUX20: return MA_CHANNEL_AUX_20; + case MA_PA_CHANNEL_POSITION_AUX21: return MA_CHANNEL_AUX_21; + case MA_PA_CHANNEL_POSITION_AUX22: return MA_CHANNEL_AUX_22; + case MA_PA_CHANNEL_POSITION_AUX23: return MA_CHANNEL_AUX_23; + case MA_PA_CHANNEL_POSITION_AUX24: return MA_CHANNEL_AUX_24; + case MA_PA_CHANNEL_POSITION_AUX25: return MA_CHANNEL_AUX_25; + case MA_PA_CHANNEL_POSITION_AUX26: return MA_CHANNEL_AUX_26; + case MA_PA_CHANNEL_POSITION_AUX27: return MA_CHANNEL_AUX_27; + case MA_PA_CHANNEL_POSITION_AUX28: return MA_CHANNEL_AUX_28; + case MA_PA_CHANNEL_POSITION_AUX29: return MA_CHANNEL_AUX_29; + case MA_PA_CHANNEL_POSITION_AUX30: return MA_CHANNEL_AUX_30; + case MA_PA_CHANNEL_POSITION_AUX31: return MA_CHANNEL_AUX_31; + case MA_PA_CHANNEL_POSITION_TOP_CENTER: return MA_CHANNEL_TOP_CENTER; + case MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT; + case MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT; + case MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER; + case MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT: return MA_CHANNEL_TOP_BACK_LEFT; + case MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT; + case MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER: return MA_CHANNEL_TOP_BACK_CENTER; + default: return MA_CHANNEL_NONE; + } +} + +#if 0 +static ma_pa_channel_position_t ma_channel_position_to_pulse(ma_channel position) +{ + switch (position) + { + case MA_CHANNEL_NONE: return MA_PA_CHANNEL_POSITION_INVALID; + case MA_CHANNEL_FRONT_LEFT: return MA_PA_CHANNEL_POSITION_FRONT_LEFT; + case MA_CHANNEL_FRONT_RIGHT: return MA_PA_CHANNEL_POSITION_FRONT_RIGHT; + case MA_CHANNEL_FRONT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_CENTER; + case MA_CHANNEL_LFE: return MA_PA_CHANNEL_POSITION_LFE; + case MA_CHANNEL_BACK_LEFT: return MA_PA_CHANNEL_POSITION_REAR_LEFT; + case MA_CHANNEL_BACK_RIGHT: return MA_PA_CHANNEL_POSITION_REAR_RIGHT; + case MA_CHANNEL_FRONT_LEFT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER; + case MA_CHANNEL_FRONT_RIGHT_CENTER: return MA_PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER; + case MA_CHANNEL_BACK_CENTER: return MA_PA_CHANNEL_POSITION_REAR_CENTER; + case MA_CHANNEL_SIDE_LEFT: return MA_PA_CHANNEL_POSITION_SIDE_LEFT; + case MA_CHANNEL_SIDE_RIGHT: return MA_PA_CHANNEL_POSITION_SIDE_RIGHT; + case MA_CHANNEL_TOP_CENTER: return MA_PA_CHANNEL_POSITION_TOP_CENTER; + case MA_CHANNEL_TOP_FRONT_LEFT: return MA_PA_CHANNEL_POSITION_TOP_FRONT_LEFT; + case MA_CHANNEL_TOP_FRONT_CENTER: return MA_PA_CHANNEL_POSITION_TOP_FRONT_CENTER; + case MA_CHANNEL_TOP_FRONT_RIGHT: return MA_PA_CHANNEL_POSITION_TOP_FRONT_RIGHT; + case MA_CHANNEL_TOP_BACK_LEFT: return MA_PA_CHANNEL_POSITION_TOP_REAR_LEFT; + case MA_CHANNEL_TOP_BACK_CENTER: return MA_PA_CHANNEL_POSITION_TOP_REAR_CENTER; + case MA_CHANNEL_TOP_BACK_RIGHT: return MA_PA_CHANNEL_POSITION_TOP_REAR_RIGHT; + case MA_CHANNEL_19: return MA_PA_CHANNEL_POSITION_AUX18; + case MA_CHANNEL_20: return MA_PA_CHANNEL_POSITION_AUX19; + case MA_CHANNEL_21: return MA_PA_CHANNEL_POSITION_AUX20; + case MA_CHANNEL_22: return MA_PA_CHANNEL_POSITION_AUX21; + case MA_CHANNEL_23: return MA_PA_CHANNEL_POSITION_AUX22; + case MA_CHANNEL_24: return MA_PA_CHANNEL_POSITION_AUX23; + case MA_CHANNEL_25: return MA_PA_CHANNEL_POSITION_AUX24; + case MA_CHANNEL_26: return MA_PA_CHANNEL_POSITION_AUX25; + case MA_CHANNEL_27: return MA_PA_CHANNEL_POSITION_AUX26; + case MA_CHANNEL_28: return MA_PA_CHANNEL_POSITION_AUX27; + case MA_CHANNEL_29: return MA_PA_CHANNEL_POSITION_AUX28; + case MA_CHANNEL_30: return MA_PA_CHANNEL_POSITION_AUX29; + case MA_CHANNEL_31: return MA_PA_CHANNEL_POSITION_AUX30; + case MA_CHANNEL_32: return MA_PA_CHANNEL_POSITION_AUX31; + default: return (ma_pa_channel_position_t)position; + } +} +#endif + +static ma_result ma_wait_for_operation__pulse(ma_context* pContext, ma_ptr pMainLoop, ma_pa_operation* pOP) +{ + int resultPA; + ma_pa_operation_state_t state; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pOP != NULL); + + for (;;) { + state = ((ma_pa_operation_get_state_proc)pContext->pulse.pa_operation_get_state)(pOP); + if (state != MA_PA_OPERATION_RUNNING) { + break; /* Done. */ + } + + resultPA = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pMainLoop, 1, NULL); + if (resultPA < 0) { + return ma_result_from_pulse(resultPA); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_wait_for_operation_and_unref__pulse(ma_context* pContext, ma_ptr pMainLoop, ma_pa_operation* pOP) +{ + ma_result result; + + if (pOP == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_wait_for_operation__pulse(pContext, pMainLoop, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + + return result; +} + +static ma_result ma_wait_for_pa_context_to_connect__pulse(ma_context* pContext, ma_ptr pMainLoop, ma_ptr pPulseContext) +{ + int resultPA; + ma_pa_context_state_t state; + + for (;;) { + state = ((ma_pa_context_get_state_proc)pContext->pulse.pa_context_get_state)((ma_pa_context*)pPulseContext); + if (state == MA_PA_CONTEXT_READY) { + break; /* Done. */ + } + + if (state == MA_PA_CONTEXT_FAILED || state == MA_PA_CONTEXT_TERMINATED) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while connecting the PulseAudio context."); + return MA_ERROR; + } + + resultPA = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pMainLoop, 1, NULL); + if (resultPA < 0) { + return ma_result_from_pulse(resultPA); + } + } + + /* Should never get here. */ + return MA_SUCCESS; +} + +static ma_result ma_wait_for_pa_stream_to_connect__pulse(ma_context* pContext, ma_ptr pMainLoop, ma_ptr pStream) +{ + int resultPA; + ma_pa_stream_state_t state; + + for (;;) { + state = ((ma_pa_stream_get_state_proc)pContext->pulse.pa_stream_get_state)((ma_pa_stream*)pStream); + if (state == MA_PA_STREAM_READY) { + break; /* Done. */ + } + + if (state == MA_PA_STREAM_FAILED || state == MA_PA_STREAM_TERMINATED) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while connecting the PulseAudio stream."); + return MA_ERROR; + } + + resultPA = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pMainLoop, 1, NULL); + if (resultPA < 0) { + return ma_result_from_pulse(resultPA); + } + } + + return MA_SUCCESS; +} + + +static ma_result ma_init_pa_mainloop_and_pa_context__pulse(ma_context* pContext, const char* pApplicationName, const char* pServerName, ma_bool32 tryAutoSpawn, ma_ptr* ppMainLoop, ma_ptr* ppPulseContext) +{ + ma_result result; + ma_ptr pMainLoop; + ma_ptr pPulseContext; + + MA_ASSERT(ppMainLoop != NULL); + MA_ASSERT(ppPulseContext != NULL); + + /* The PulseAudio context maps well to miniaudio's notion of a context. The pa_context object will be initialized as part of the ma_context. */ + pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)(); + if (pMainLoop == NULL) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create mainloop."); + return MA_FAILED_TO_INIT_BACKEND; + } + + pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)(((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)((ma_pa_mainloop*)pMainLoop), pApplicationName); + if (pPulseContext == NULL) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio context."); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)(pMainLoop)); + return MA_FAILED_TO_INIT_BACKEND; + } + + /* Now we need to connect to the context. Everything is asynchronous so we need to wait for it to connect before returning. */ + result = ma_result_from_pulse(((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)((ma_pa_context*)pPulseContext, pServerName, (tryAutoSpawn) ? 0 : MA_PA_CONTEXT_NOAUTOSPAWN, NULL)); + if (result != MA_SUCCESS) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio context."); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)(pMainLoop)); + return result; + } + + /* Since ma_context_init() runs synchronously we need to wait for the PulseAudio context to connect before we return. */ + result = ma_wait_for_pa_context_to_connect__pulse(pContext, pMainLoop, pPulseContext); + if (result != MA_SUCCESS) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[PulseAudio] Waiting for connection failed."); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)(pMainLoop)); + return result; + } + + *ppMainLoop = pMainLoop; + *ppPulseContext = pPulseContext; + + return MA_SUCCESS; +} + + +static void ma_device_sink_info_callback(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData) +{ + ma_pa_sink_info* pInfoOut; + + if (endOfList > 0) { + return; + } + + /* + There has been a report that indicates that pInfo can be null which results + in a null pointer dereference below. We'll check for this for safety. + */ + if (pInfo == NULL) { + return; + } + + pInfoOut = (ma_pa_sink_info*)pUserData; + MA_ASSERT(pInfoOut != NULL); + + *pInfoOut = *pInfo; + + (void)pPulseContext; /* Unused. */ +} + +static void ma_device_source_info_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) +{ + ma_pa_source_info* pInfoOut; + + if (endOfList > 0) { + return; + } + + /* + There has been a report that indicates that pInfo can be null which results + in a null pointer dereference below. We'll check for this for safety. + */ + if (pInfo == NULL) { + return; + } + + pInfoOut = (ma_pa_source_info*)pUserData; + MA_ASSERT(pInfoOut != NULL); + + *pInfoOut = *pInfo; + + (void)pPulseContext; /* Unused. */ +} + +#if 0 +static void ma_device_sink_name_callback(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData) +{ + ma_device* pDevice; + + if (endOfList > 0) { + return; + } + + pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), pInfo->description, (size_t)-1); + + (void)pPulseContext; /* Unused. */ +} + +static void ma_device_source_name_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) +{ + ma_device* pDevice; + + if (endOfList > 0) { + return; + } + + pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), pInfo->description, (size_t)-1); + + (void)pPulseContext; /* Unused. */ +} +#endif + +static ma_result ma_context_get_sink_info__pulse(ma_context* pContext, const char* pDeviceName, ma_pa_sink_info* pSinkInfo) +{ + ma_pa_operation* pOP; + + pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)((ma_pa_context*)pContext->pulse.pPulseContext, pDeviceName, ma_device_sink_info_callback, pSinkInfo); + if (pOP == NULL) { + return MA_ERROR; + } + + return ma_wait_for_operation_and_unref__pulse(pContext, pContext->pulse.pMainLoop, pOP); +} + +static ma_result ma_context_get_source_info__pulse(ma_context* pContext, const char* pDeviceName, ma_pa_source_info* pSourceInfo) +{ + ma_pa_operation* pOP; + + pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)((ma_pa_context*)pContext->pulse.pPulseContext, pDeviceName, ma_device_source_info_callback, pSourceInfo); + if (pOP == NULL) { + return MA_ERROR; + } + + return ma_wait_for_operation_and_unref__pulse(pContext, pContext->pulse.pMainLoop, pOP); +} + +static ma_result ma_context_get_default_device_index__pulse(ma_context* pContext, ma_device_type deviceType, ma_uint32* pIndex) +{ + ma_result result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pIndex != NULL); + + if (pIndex != NULL) { + *pIndex = (ma_uint32)-1; + } + + if (deviceType == ma_device_type_playback) { + ma_pa_sink_info sinkInfo; + result = ma_context_get_sink_info__pulse(pContext, NULL, &sinkInfo); + if (result != MA_SUCCESS) { + return result; + } + + if (pIndex != NULL) { + *pIndex = sinkInfo.index; + } + } + + if (deviceType == ma_device_type_capture) { + ma_pa_source_info sourceInfo; + result = ma_context_get_source_info__pulse(pContext, NULL, &sourceInfo); + if (result != MA_SUCCESS) { + return result; + } + + if (pIndex != NULL) { + *pIndex = sourceInfo.index; + } + } + + return MA_SUCCESS; +} + + +typedef struct +{ + ma_context* pContext; + ma_enum_devices_callback_proc callback; + void* pUserData; + ma_bool32 isTerminated; + ma_uint32 defaultDeviceIndexPlayback; + ma_uint32 defaultDeviceIndexCapture; +} ma_context_enumerate_devices_callback_data__pulse; + +static void ma_context_enumerate_devices_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pSinkInfo, int endOfList, void* pUserData) +{ + ma_context_enumerate_devices_callback_data__pulse* pData = (ma_context_enumerate_devices_callback_data__pulse*)pUserData; + ma_device_info deviceInfo; + + MA_ASSERT(pData != NULL); + + if (endOfList || pData->isTerminated) { + return; + } + + MA_ZERO_OBJECT(&deviceInfo); + + /* The name from PulseAudio is the ID for miniaudio. */ + if (pSinkInfo->name != NULL) { + ma_strncpy_s(deviceInfo.id.pulse, sizeof(deviceInfo.id.pulse), pSinkInfo->name, (size_t)-1); + } + + /* The description from PulseAudio is the name for miniaudio. */ + if (pSinkInfo->description != NULL) { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), pSinkInfo->description, (size_t)-1); + } + + if (pSinkInfo->index == pData->defaultDeviceIndexPlayback) { + deviceInfo.isDefault = MA_TRUE; + } + + pData->isTerminated = !pData->callback(pData->pContext, ma_device_type_playback, &deviceInfo, pData->pUserData); + + (void)pPulseContext; /* Unused. */ +} + +static void ma_context_enumerate_devices_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pSourceInfo, int endOfList, void* pUserData) +{ + ma_context_enumerate_devices_callback_data__pulse* pData = (ma_context_enumerate_devices_callback_data__pulse*)pUserData; + ma_device_info deviceInfo; + + MA_ASSERT(pData != NULL); + + if (endOfList || pData->isTerminated) { + return; + } + + MA_ZERO_OBJECT(&deviceInfo); + + /* The name from PulseAudio is the ID for miniaudio. */ + if (pSourceInfo->name != NULL) { + ma_strncpy_s(deviceInfo.id.pulse, sizeof(deviceInfo.id.pulse), pSourceInfo->name, (size_t)-1); + } + + /* The description from PulseAudio is the name for miniaudio. */ + if (pSourceInfo->description != NULL) { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), pSourceInfo->description, (size_t)-1); + } + + if (pSourceInfo->index == pData->defaultDeviceIndexCapture) { + deviceInfo.isDefault = MA_TRUE; + } + + pData->isTerminated = !pData->callback(pData->pContext, ma_device_type_capture, &deviceInfo, pData->pUserData); + + (void)pPulseContext; /* Unused. */ +} + +static ma_result ma_context_enumerate_devices__pulse(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_result result = MA_SUCCESS; + ma_context_enumerate_devices_callback_data__pulse callbackData; + ma_pa_operation* pOP = NULL; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + callbackData.pContext = pContext; + callbackData.callback = callback; + callbackData.pUserData = pUserData; + callbackData.isTerminated = MA_FALSE; + callbackData.defaultDeviceIndexPlayback = (ma_uint32)-1; + callbackData.defaultDeviceIndexCapture = (ma_uint32)-1; + + /* We need to get the index of the default devices. */ + ma_context_get_default_device_index__pulse(pContext, ma_device_type_playback, &callbackData.defaultDeviceIndexPlayback); + ma_context_get_default_device_index__pulse(pContext, ma_device_type_capture, &callbackData.defaultDeviceIndexCapture); + + /* Playback. */ + if (!callbackData.isTerminated) { + pOP = ((ma_pa_context_get_sink_info_list_proc)pContext->pulse.pa_context_get_sink_info_list)((ma_pa_context*)(pContext->pulse.pPulseContext), ma_context_enumerate_devices_sink_callback__pulse, &callbackData); + if (pOP == NULL) { + result = MA_ERROR; + goto done; + } + + result = ma_wait_for_operation__pulse(pContext, pContext->pulse.pMainLoop, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + + if (result != MA_SUCCESS) { + goto done; + } + } + + + /* Capture. */ + if (!callbackData.isTerminated) { + pOP = ((ma_pa_context_get_source_info_list_proc)pContext->pulse.pa_context_get_source_info_list)((ma_pa_context*)(pContext->pulse.pPulseContext), ma_context_enumerate_devices_source_callback__pulse, &callbackData); + if (pOP == NULL) { + result = MA_ERROR; + goto done; + } + + result = ma_wait_for_operation__pulse(pContext, pContext->pulse.pMainLoop, pOP); + ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + + if (result != MA_SUCCESS) { + goto done; + } + } + +done: + return result; +} + + +typedef struct +{ + ma_device_info* pDeviceInfo; + ma_uint32 defaultDeviceIndex; + ma_bool32 foundDevice; +} ma_context_get_device_info_callback_data__pulse; + +static void ma_context_get_device_info_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData) +{ + ma_context_get_device_info_callback_data__pulse* pData = (ma_context_get_device_info_callback_data__pulse*)pUserData; + + if (endOfList > 0) { + return; + } + + MA_ASSERT(pData != NULL); + pData->foundDevice = MA_TRUE; + + if (pInfo->name != NULL) { + ma_strncpy_s(pData->pDeviceInfo->id.pulse, sizeof(pData->pDeviceInfo->id.pulse), pInfo->name, (size_t)-1); + } + + if (pInfo->description != NULL) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pInfo->description, (size_t)-1); + } + + /* + We're just reporting a single data format here. I think technically PulseAudio might support + all formats, but I don't trust that PulseAudio will do *anything* right, so I'm just going to + report the "native" device format. + */ + pData->pDeviceInfo->nativeDataFormats[0].format = ma_format_from_pulse(pInfo->sample_spec.format); + pData->pDeviceInfo->nativeDataFormats[0].channels = pInfo->sample_spec.channels; + pData->pDeviceInfo->nativeDataFormats[0].sampleRate = pInfo->sample_spec.rate; + pData->pDeviceInfo->nativeDataFormats[0].flags = 0; + pData->pDeviceInfo->nativeDataFormatCount = 1; + + if (pData->defaultDeviceIndex == pInfo->index) { + pData->pDeviceInfo->isDefault = MA_TRUE; + } + + (void)pPulseContext; /* Unused. */ +} + +static void ma_context_get_device_info_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) +{ + ma_context_get_device_info_callback_data__pulse* pData = (ma_context_get_device_info_callback_data__pulse*)pUserData; + + if (endOfList > 0) { + return; + } + + MA_ASSERT(pData != NULL); + pData->foundDevice = MA_TRUE; + + if (pInfo->name != NULL) { + ma_strncpy_s(pData->pDeviceInfo->id.pulse, sizeof(pData->pDeviceInfo->id.pulse), pInfo->name, (size_t)-1); + } + + if (pInfo->description != NULL) { + ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pInfo->description, (size_t)-1); + } + + /* + We're just reporting a single data format here. I think technically PulseAudio might support + all formats, but I don't trust that PulseAudio will do *anything* right, so I'm just going to + report the "native" device format. + */ + pData->pDeviceInfo->nativeDataFormats[0].format = ma_format_from_pulse(pInfo->sample_spec.format); + pData->pDeviceInfo->nativeDataFormats[0].channels = pInfo->sample_spec.channels; + pData->pDeviceInfo->nativeDataFormats[0].sampleRate = pInfo->sample_spec.rate; + pData->pDeviceInfo->nativeDataFormats[0].flags = 0; + pData->pDeviceInfo->nativeDataFormatCount = 1; + + if (pData->defaultDeviceIndex == pInfo->index) { + pData->pDeviceInfo->isDefault = MA_TRUE; + } + + (void)pPulseContext; /* Unused. */ +} + +static ma_result ma_context_get_device_info__pulse(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_result result = MA_SUCCESS; + ma_context_get_device_info_callback_data__pulse callbackData; + ma_pa_operation* pOP = NULL; + const char* pDeviceName = NULL; + + MA_ASSERT(pContext != NULL); + + callbackData.pDeviceInfo = pDeviceInfo; + callbackData.foundDevice = MA_FALSE; + + if (pDeviceID != NULL) { + pDeviceName = pDeviceID->pulse; + } else { + pDeviceName = NULL; + } + + result = ma_context_get_default_device_index__pulse(pContext, deviceType, &callbackData.defaultDeviceIndex); + + if (deviceType == ma_device_type_playback) { + pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)((ma_pa_context*)(pContext->pulse.pPulseContext), pDeviceName, ma_context_get_device_info_sink_callback__pulse, &callbackData); + } else { + pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)((ma_pa_context*)(pContext->pulse.pPulseContext), pDeviceName, ma_context_get_device_info_source_callback__pulse, &callbackData); + } + + if (pOP != NULL) { + ma_wait_for_operation_and_unref__pulse(pContext, pContext->pulse.pMainLoop, pOP); + } else { + result = MA_ERROR; + goto done; + } + + if (!callbackData.foundDevice) { + result = MA_NO_DEVICE; + goto done; + } + +done: + return result; +} + +static ma_result ma_device_uninit__pulse(ma_device* pDevice) +{ + ma_context* pContext; + + MA_ASSERT(pDevice != NULL); + + pContext = pDevice->pContext; + MA_ASSERT(pContext != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + ((ma_pa_stream_unref_proc)pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + } + + if (pDevice->type == ma_device_type_duplex) { + ma_duplex_rb_uninit(&pDevice->duplexRB); + } + + ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)((ma_pa_context*)pDevice->pulse.pPulseContext); + ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)((ma_pa_context*)pDevice->pulse.pPulseContext); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)pDevice->pulse.pMainLoop); + + return MA_SUCCESS; +} + +static ma_pa_buffer_attr ma_device__pa_buffer_attr_new(ma_uint32 periodSizeInFrames, ma_uint32 periods, const ma_pa_sample_spec* ss) +{ + ma_pa_buffer_attr attr; + attr.maxlength = periodSizeInFrames * periods * ma_get_bytes_per_frame(ma_format_from_pulse(ss->format), ss->channels); + attr.tlength = attr.maxlength / periods; + attr.prebuf = (ma_uint32)-1; + attr.minreq = (ma_uint32)-1; + attr.fragsize = attr.maxlength / periods; + + return attr; +} + +static ma_pa_stream* ma_device__pa_stream_new__pulse(ma_device* pDevice, const char* pStreamName, const ma_pa_sample_spec* ss, const ma_pa_channel_map* cmap) +{ + static ma_atomic_uint32 g_StreamCounter = { 0 }; + char actualStreamName[256]; + + if (pStreamName != NULL) { + ma_strncpy_s(actualStreamName, sizeof(actualStreamName), pStreamName, (size_t)-1); + } else { + const char* pBaseName = "miniaudio:"; + size_t baseNameLen = strlen(pBaseName); + ma_strcpy_s(actualStreamName, sizeof(actualStreamName), pBaseName); + ma_itoa_s((int)ma_atomic_uint32_get(&g_StreamCounter), actualStreamName + baseNameLen, sizeof(actualStreamName)-baseNameLen, 10); + } + ma_atomic_uint32_fetch_add(&g_StreamCounter, 1); + + return ((ma_pa_stream_new_proc)pDevice->pContext->pulse.pa_stream_new)((ma_pa_context*)pDevice->pulse.pPulseContext, actualStreamName, ss, cmap); +} + + +static void ma_device_on_read__pulse(ma_pa_stream* pStream, size_t byteCount, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + ma_uint32 bpf; + ma_uint32 deviceState; + ma_uint64 frameCount; + ma_uint64 framesProcessed; + + MA_ASSERT(pDevice != NULL); + + /* + Don't do anything if the device isn't initialized yet. Yes, this can happen because PulseAudio + can fire this callback before the stream has even started. Ridiculous. + */ + deviceState = ma_device_get_state(pDevice); + if (deviceState != ma_device_state_starting && deviceState != ma_device_state_started) { + return; + } + + bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + MA_ASSERT(bpf > 0); + + frameCount = byteCount / bpf; + framesProcessed = 0; + + while (ma_device_get_state(pDevice) == ma_device_state_started && framesProcessed < frameCount) { + const void* pMappedPCMFrames; + size_t bytesMapped; + ma_uint64 framesMapped; + + int pulseResult = ((ma_pa_stream_peek_proc)pDevice->pContext->pulse.pa_stream_peek)(pStream, &pMappedPCMFrames, &bytesMapped); + if (pulseResult < 0) { + break; /* Failed to map. Abort. */ + } + + framesMapped = bytesMapped / bpf; + if (framesMapped > 0) { + if (pMappedPCMFrames != NULL) { + ma_device_handle_backend_data_callback(pDevice, NULL, pMappedPCMFrames, framesMapped); + } else { + /* It's a hole. */ + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[PulseAudio] ma_device_on_read__pulse: Hole.\n"); + } + + pulseResult = ((ma_pa_stream_drop_proc)pDevice->pContext->pulse.pa_stream_drop)(pStream); + if (pulseResult < 0) { + break; /* Failed to drop the buffer. */ + } + + framesProcessed += framesMapped; + + } else { + /* Nothing was mapped. Just abort. */ + break; + } + } +} + +static ma_result ma_device_write_to_stream__pulse(ma_device* pDevice, ma_pa_stream* pStream, ma_uint64* pFramesProcessed) +{ + ma_result result = MA_SUCCESS; + ma_uint64 framesProcessed = 0; + size_t bytesMapped; + ma_uint32 bpf; + ma_uint32 deviceState; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pStream != NULL); + + bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + MA_ASSERT(bpf > 0); + + deviceState = ma_device_get_state(pDevice); + + bytesMapped = ((ma_pa_stream_writable_size_proc)pDevice->pContext->pulse.pa_stream_writable_size)(pStream); + if (bytesMapped != (size_t)-1) { + if (bytesMapped > 0) { + ma_uint64 framesMapped; + void* pMappedPCMFrames; + int pulseResult = ((ma_pa_stream_begin_write_proc)pDevice->pContext->pulse.pa_stream_begin_write)(pStream, &pMappedPCMFrames, &bytesMapped); + if (pulseResult < 0) { + result = ma_result_from_pulse(pulseResult); + goto done; + } + + framesMapped = bytesMapped / bpf; + + if (deviceState == ma_device_state_started || deviceState == ma_device_state_starting) { /* Check for starting state just in case this is being used to do the initial fill. */ + ma_device_handle_backend_data_callback(pDevice, pMappedPCMFrames, NULL, framesMapped); + } else { + /* Device is not started. Write silence. */ + ma_silence_pcm_frames(pMappedPCMFrames, framesMapped, pDevice->playback.format, pDevice->playback.channels); + } + + pulseResult = ((ma_pa_stream_write_proc)pDevice->pContext->pulse.pa_stream_write)(pStream, pMappedPCMFrames, bytesMapped, NULL, 0, MA_PA_SEEK_RELATIVE); + if (pulseResult < 0) { + result = ma_result_from_pulse(pulseResult); + goto done; /* Failed to write data to stream. */ + } + + framesProcessed += framesMapped; + } else { + result = MA_SUCCESS; /* No data available for writing. */ + goto done; + } + } else { + result = MA_ERROR; /* Failed to retrieve the writable size. Abort. */ + goto done; + } + +done: + if (pFramesProcessed != NULL) { + *pFramesProcessed = framesProcessed; + } + + return result; +} + +static void ma_device_on_write__pulse(ma_pa_stream* pStream, size_t byteCount, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + ma_uint32 bpf; + ma_uint64 frameCount; + ma_uint64 framesProcessed; + ma_uint32 deviceState; + ma_result result; + + MA_ASSERT(pDevice != NULL); + + /* + Don't do anything if the device isn't initialized yet. Yes, this can happen because PulseAudio + can fire this callback before the stream has even started. Ridiculous. + */ + deviceState = ma_device_get_state(pDevice); + if (deviceState != ma_device_state_starting && deviceState != ma_device_state_started) { + return; + } + + bpf = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + MA_ASSERT(bpf > 0); + + frameCount = byteCount / bpf; + framesProcessed = 0; + + while (framesProcessed < frameCount) { + ma_uint64 framesProcessedThisIteration; + + /* Don't keep trying to process frames if the device isn't started. */ + deviceState = ma_device_get_state(pDevice); + if (deviceState != ma_device_state_starting && deviceState != ma_device_state_started) { + break; + } + + result = ma_device_write_to_stream__pulse(pDevice, pStream, &framesProcessedThisIteration); + if (result != MA_SUCCESS) { + break; + } + + framesProcessed += framesProcessedThisIteration; + } +} + +static void ma_device_on_suspended__pulse(ma_pa_stream* pStream, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + int suspended; + + (void)pStream; + + suspended = ((ma_pa_stream_is_suspended_proc)pDevice->pContext->pulse.pa_stream_is_suspended)(pStream); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[Pulse] Device suspended state changed. pa_stream_is_suspended() returned %d.\n", suspended); + + if (suspended < 0) { + return; + } + + if (suspended == 1) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[Pulse] Device suspended state changed. Suspended.\n"); + ma_device__on_notification_stopped(pDevice); + } else { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[Pulse] Device suspended state changed. Resumed.\n"); + ma_device__on_notification_started(pDevice); + } +} + +static void ma_device_on_rerouted__pulse(ma_pa_stream* pStream, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + + (void)pStream; + (void)pUserData; + + ma_device__on_notification_rerouted(pDevice); +} + +static ma_uint32 ma_calculate_period_size_in_frames_from_descriptor__pulse(const ma_device_descriptor* pDescriptor, ma_uint32 nativeSampleRate, ma_performance_profile performanceProfile) +{ + /* + There have been reports from users where buffers of < ~20ms result glitches when running through + PipeWire. To work around this we're going to have to use a different default buffer size. + */ + const ma_uint32 defaultPeriodSizeInMilliseconds_LowLatency = 25; + const ma_uint32 defaultPeriodSizeInMilliseconds_Conservative = MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE; + + MA_ASSERT(nativeSampleRate != 0); + + if (pDescriptor->periodSizeInFrames == 0) { + if (pDescriptor->periodSizeInMilliseconds == 0) { + if (performanceProfile == ma_performance_profile_low_latency) { + return ma_calculate_buffer_size_in_frames_from_milliseconds(defaultPeriodSizeInMilliseconds_LowLatency, nativeSampleRate); + } else { + return ma_calculate_buffer_size_in_frames_from_milliseconds(defaultPeriodSizeInMilliseconds_Conservative, nativeSampleRate); + } + } else { + return ma_calculate_buffer_size_in_frames_from_milliseconds(pDescriptor->periodSizeInMilliseconds, nativeSampleRate); + } + } else { + return pDescriptor->periodSizeInFrames; + } +} + +static ma_result ma_device_init__pulse(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + /* + Notes for PulseAudio: + + - When both the period size in frames and milliseconds are 0, we default to miniaudio's + default buffer sizes rather than leaving it up to PulseAudio because I don't trust + PulseAudio to give us any kind of reasonable latency by default. + + - Do not ever, *ever* forget to use MA_PA_STREAM_ADJUST_LATENCY. If you don't specify this + flag, capture mode will just not work properly until you open another PulseAudio app. + */ + + ma_result result = MA_SUCCESS; + int error = 0; + const char* devPlayback = NULL; + const char* devCapture = NULL; + ma_format format = ma_format_unknown; + ma_uint32 channels = 0; + ma_uint32 sampleRate = 0; + ma_pa_sink_info sinkInfo; + ma_pa_source_info sourceInfo; + ma_pa_sample_spec ss; + ma_pa_channel_map cmap; + ma_pa_buffer_attr attr; + const ma_pa_sample_spec* pActualSS = NULL; + const ma_pa_buffer_attr* pActualAttr = NULL; + const ma_pa_channel_map* pActualChannelMap = NULL; + ma_uint32 iChannel; + ma_pa_stream_flags_t streamFlags; + + MA_ASSERT(pDevice != NULL); + MA_ZERO_OBJECT(&pDevice->pulse); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* No exclusive mode with the PulseAudio backend. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + if (pDescriptorPlayback->pDeviceID != NULL) { + devPlayback = pDescriptorPlayback->pDeviceID->pulse; + } + + format = pDescriptorPlayback->format; + channels = pDescriptorPlayback->channels; + sampleRate = pDescriptorPlayback->sampleRate; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + if (pDescriptorCapture->pDeviceID != NULL) { + devCapture = pDescriptorCapture->pDeviceID->pulse; + } + + format = pDescriptorCapture->format; + channels = pDescriptorCapture->channels; + sampleRate = pDescriptorCapture->sampleRate; + } + + + + result = ma_init_pa_mainloop_and_pa_context__pulse(pDevice->pContext, pDevice->pContext->pulse.pApplicationName, pDevice->pContext->pulse.pServerName, MA_FALSE, &pDevice->pulse.pMainLoop, &pDevice->pulse.pPulseContext); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to initialize PA mainloop and context for device.\n"); + return result; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + result = ma_context_get_source_info__pulse(pDevice->pContext, devCapture, &sourceInfo); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve source info for capture device."); + goto on_error0; + } + + ss = sourceInfo.sample_spec; + cmap = sourceInfo.channel_map; + + /* Use the requested channel count if we have one. */ + if (pDescriptorCapture->channels != 0) { + ss.channels = pDescriptorCapture->channels; + } + + /* Use a default channel map. */ + ((ma_pa_channel_map_init_extend_proc)pDevice->pContext->pulse.pa_channel_map_init_extend)(&cmap, ss.channels, pConfig->pulse.channelMap); + + /* Use the requested sample rate if one was specified. */ + if (pDescriptorCapture->sampleRate != 0) { + ss.rate = pDescriptorCapture->sampleRate; + } + streamFlags = MA_PA_STREAM_START_CORKED | MA_PA_STREAM_ADJUST_LATENCY; + + if (ma_format_from_pulse(ss.format) == ma_format_unknown) { + if (ma_is_little_endian()) { + ss.format = MA_PA_SAMPLE_FLOAT32LE; + } else { + ss.format = MA_PA_SAMPLE_FLOAT32BE; + } + streamFlags |= MA_PA_STREAM_FIX_FORMAT; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.format not supported by miniaudio. Defaulting to PA_SAMPLE_FLOAT32.\n"); + } + if (ss.rate == 0) { + ss.rate = MA_DEFAULT_SAMPLE_RATE; + streamFlags |= MA_PA_STREAM_FIX_RATE; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.rate = 0. Defaulting to %d.\n", ss.rate); + } + if (ss.channels == 0) { + ss.channels = MA_DEFAULT_CHANNELS; + streamFlags |= MA_PA_STREAM_FIX_CHANNELS; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.channels = 0. Defaulting to %d.\n", ss.channels); + } + + /* We now have enough information to calculate our actual period size in frames. */ + pDescriptorCapture->periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__pulse(pDescriptorCapture, ss.rate, pConfig->performanceProfile); + + attr = ma_device__pa_buffer_attr_new(pDescriptorCapture->periodSizeInFrames, pDescriptorCapture->periodCount, &ss); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Capture attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; periodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDescriptorCapture->periodSizeInFrames); + + pDevice->pulse.pStreamCapture = ma_device__pa_stream_new__pulse(pDevice, pConfig->pulse.pStreamNameCapture, &ss, &cmap); + if (pDevice->pulse.pStreamCapture == NULL) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio capture stream.\n"); + result = MA_ERROR; + goto on_error0; + } + + + /* The callback needs to be set before connecting the stream. */ + ((ma_pa_stream_set_read_callback_proc)pDevice->pContext->pulse.pa_stream_set_read_callback)((ma_pa_stream*)pDevice->pulse.pStreamCapture, ma_device_on_read__pulse, pDevice); + + /* State callback for checking when the device has been corked. */ + ((ma_pa_stream_set_suspended_callback_proc)pDevice->pContext->pulse.pa_stream_set_suspended_callback)((ma_pa_stream*)pDevice->pulse.pStreamCapture, ma_device_on_suspended__pulse, pDevice); + + /* Rerouting notification. */ + ((ma_pa_stream_set_moved_callback_proc)pDevice->pContext->pulse.pa_stream_set_moved_callback)((ma_pa_stream*)pDevice->pulse.pStreamCapture, ma_device_on_rerouted__pulse, pDevice); + + + /* Connect after we've got all of our internal state set up. */ + if (devCapture != NULL) { + streamFlags |= MA_PA_STREAM_DONT_MOVE; + } + + error = ((ma_pa_stream_connect_record_proc)pDevice->pContext->pulse.pa_stream_connect_record)((ma_pa_stream*)pDevice->pulse.pStreamCapture, devCapture, &attr, streamFlags); + if (error != MA_PA_OK) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio capture stream."); + result = ma_result_from_pulse(error); + goto on_error1; + } + + result = ma_wait_for_pa_stream_to_connect__pulse(pDevice->pContext, pDevice->pulse.pMainLoop, (ma_pa_stream*)pDevice->pulse.pStreamCapture); + if (result != MA_SUCCESS) { + goto on_error2; + } + + + /* Internal format. */ + pActualSS = ((ma_pa_stream_get_sample_spec_proc)pDevice->pContext->pulse.pa_stream_get_sample_spec)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + if (pActualSS != NULL) { + ss = *pActualSS; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Capture sample spec: format=%s, channels=%d, rate=%d\n", ma_get_format_name(ma_format_from_pulse(ss.format)), ss.channels, ss.rate); + } else { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Failed to retrieve capture sample spec.\n"); + } + + pDescriptorCapture->format = ma_format_from_pulse(ss.format); + pDescriptorCapture->channels = ss.channels; + pDescriptorCapture->sampleRate = ss.rate; + + if (pDescriptorCapture->format == ma_format_unknown || pDescriptorCapture->channels == 0 || pDescriptorCapture->sampleRate == 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Capture sample spec is invalid. Device unusable by miniaudio. format=%s, channels=%d, sampleRate=%d.\n", ma_get_format_name(pDescriptorCapture->format), pDescriptorCapture->channels, pDescriptorCapture->sampleRate); + result = MA_ERROR; + goto on_error4; + } + + + /* Internal channel map. */ + pActualChannelMap = ((ma_pa_stream_get_channel_map_proc)pDevice->pContext->pulse.pa_stream_get_channel_map)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + if (pActualChannelMap == NULL) { + pActualChannelMap = &cmap; /* Fallback just in case. */ + } + + /* + Bug in PipeWire. There have been reports that PipeWire is returning AUX channels when reporting + the channel map. To somewhat workaround this, I'm hacking in a hard coded channel map for mono + and stereo. In this case it should be safe to assume mono = MONO and stereo = LEFT/RIGHT. For + all other channel counts we need to just put up with whatever PipeWire reports and hope it gets + fixed sooner than later. I might remove this hack later. + */ + if (pDescriptorCapture->channels > 2) { + for (iChannel = 0; iChannel < pDescriptorCapture->channels; iChannel += 1) { + pDescriptorCapture->channelMap[iChannel] = ma_channel_position_from_pulse(pActualChannelMap->map[iChannel]); + } + } else { + /* Hack for mono and stereo. */ + if (pDescriptorCapture->channels == 1) { + pDescriptorCapture->channelMap[0] = MA_CHANNEL_MONO; + } else if (pDescriptorCapture->channels == 2) { + pDescriptorCapture->channelMap[0] = MA_CHANNEL_FRONT_LEFT; + pDescriptorCapture->channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + } else { + MA_ASSERT(MA_FALSE); /* Should never hit this. */ + } + } + + + /* Buffer. */ + pActualAttr = ((ma_pa_stream_get_buffer_attr_proc)pDevice->pContext->pulse.pa_stream_get_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + if (pActualAttr != NULL) { + attr = *pActualAttr; + } + + if (attr.fragsize > 0) { + pDescriptorCapture->periodCount = ma_max(attr.maxlength / attr.fragsize, 1); + } else { + pDescriptorCapture->periodCount = 1; + } + + pDescriptorCapture->periodSizeInFrames = attr.maxlength / ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels) / pDescriptorCapture->periodCount; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Capture actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; periodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDescriptorCapture->periodSizeInFrames); + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + result = ma_context_get_sink_info__pulse(pDevice->pContext, devPlayback, &sinkInfo); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to retrieve sink info for playback device.\n"); + goto on_error2; + } + + ss = sinkInfo.sample_spec; + cmap = sinkInfo.channel_map; + + /* Use the requested channel count if we have one. */ + if (pDescriptorPlayback->channels != 0) { + ss.channels = pDescriptorPlayback->channels; + } + + /* Use a default channel map. */ + ((ma_pa_channel_map_init_extend_proc)pDevice->pContext->pulse.pa_channel_map_init_extend)(&cmap, ss.channels, pConfig->pulse.channelMap); + + + /* Use the requested sample rate if one was specified. */ + if (pDescriptorPlayback->sampleRate != 0) { + ss.rate = pDescriptorPlayback->sampleRate; + } + + streamFlags = MA_PA_STREAM_START_CORKED | MA_PA_STREAM_ADJUST_LATENCY; + if (ma_format_from_pulse(ss.format) == ma_format_unknown) { + if (ma_is_little_endian()) { + ss.format = MA_PA_SAMPLE_FLOAT32LE; + } else { + ss.format = MA_PA_SAMPLE_FLOAT32BE; + } + streamFlags |= MA_PA_STREAM_FIX_FORMAT; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.format not supported by miniaudio. Defaulting to PA_SAMPLE_FLOAT32.\n"); + } + if (ss.rate == 0) { + ss.rate = MA_DEFAULT_SAMPLE_RATE; + streamFlags |= MA_PA_STREAM_FIX_RATE; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.rate = 0. Defaulting to %d.\n", ss.rate); + } + if (ss.channels == 0) { + ss.channels = MA_DEFAULT_CHANNELS; + streamFlags |= MA_PA_STREAM_FIX_CHANNELS; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] sample_spec.channels = 0. Defaulting to %d.\n", ss.channels); + } + + /* We now have enough information to calculate the actual buffer size in frames. */ + pDescriptorPlayback->periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__pulse(pDescriptorPlayback, ss.rate, pConfig->performanceProfile); + + attr = ma_device__pa_buffer_attr_new(pDescriptorPlayback->periodSizeInFrames, pDescriptorPlayback->periodCount, &ss); + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Playback attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; periodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDescriptorPlayback->periodSizeInFrames); + + pDevice->pulse.pStreamPlayback = ma_device__pa_stream_new__pulse(pDevice, pConfig->pulse.pStreamNamePlayback, &ss, &cmap); + if (pDevice->pulse.pStreamPlayback == NULL) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio playback stream.\n"); + result = MA_ERROR; + goto on_error2; + } + + + /* + Note that this callback will be fired as soon as the stream is connected, even though it's started as corked. The callback needs to handle a + device state of ma_device_state_uninitialized. + */ + ((ma_pa_stream_set_write_callback_proc)pDevice->pContext->pulse.pa_stream_set_write_callback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_device_on_write__pulse, pDevice); + + /* State callback for checking when the device has been corked. */ + ((ma_pa_stream_set_suspended_callback_proc)pDevice->pContext->pulse.pa_stream_set_suspended_callback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_device_on_suspended__pulse, pDevice); + + /* Rerouting notification. */ + ((ma_pa_stream_set_moved_callback_proc)pDevice->pContext->pulse.pa_stream_set_moved_callback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_device_on_rerouted__pulse, pDevice); + + + /* Connect after we've got all of our internal state set up. */ + if (devPlayback != NULL) { + streamFlags |= MA_PA_STREAM_DONT_MOVE; + } + + error = ((ma_pa_stream_connect_playback_proc)pDevice->pContext->pulse.pa_stream_connect_playback)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, devPlayback, &attr, streamFlags, NULL, NULL); + if (error != MA_PA_OK) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to connect PulseAudio playback stream."); + result = ma_result_from_pulse(error); + goto on_error3; + } + + result = ma_wait_for_pa_stream_to_connect__pulse(pDevice->pContext, pDevice->pulse.pMainLoop, (ma_pa_stream*)pDevice->pulse.pStreamPlayback); + if (result != MA_SUCCESS) { + goto on_error3; + } + + + /* Internal format. */ + pActualSS = ((ma_pa_stream_get_sample_spec_proc)pDevice->pContext->pulse.pa_stream_get_sample_spec)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + if (pActualSS != NULL) { + ss = *pActualSS; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Playback sample spec: format=%s, channels=%d, rate=%d\n", ma_get_format_name(ma_format_from_pulse(ss.format)), ss.channels, ss.rate); + } else { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Failed to retrieve playback sample spec.\n"); + } + + pDescriptorPlayback->format = ma_format_from_pulse(ss.format); + pDescriptorPlayback->channels = ss.channels; + pDescriptorPlayback->sampleRate = ss.rate; + + if (pDescriptorPlayback->format == ma_format_unknown || pDescriptorPlayback->channels == 0 || pDescriptorPlayback->sampleRate == 0) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Playback sample spec is invalid. Device unusable by miniaudio. format=%s, channels=%d, sampleRate=%d.\n", ma_get_format_name(pDescriptorPlayback->format), pDescriptorPlayback->channels, pDescriptorPlayback->sampleRate); + result = MA_ERROR; + goto on_error4; + } + + + /* Internal channel map. */ + pActualChannelMap = ((ma_pa_stream_get_channel_map_proc)pDevice->pContext->pulse.pa_stream_get_channel_map)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + if (pActualChannelMap == NULL) { + pActualChannelMap = &cmap; /* Fallback just in case. */ + } + + /* + Bug in PipeWire. There have been reports that PipeWire is returning AUX channels when reporting + the channel map. To somewhat workaround this, I'm hacking in a hard coded channel map for mono + and stereo. In this case it should be safe to assume mono = MONO and stereo = LEFT/RIGHT. For + all other channel counts we need to just put up with whatever PipeWire reports and hope it gets + fixed sooner than later. I might remove this hack later. + */ + if (pDescriptorPlayback->channels > 2) { + for (iChannel = 0; iChannel < pDescriptorPlayback->channels; iChannel += 1) { + pDescriptorPlayback->channelMap[iChannel] = ma_channel_position_from_pulse(pActualChannelMap->map[iChannel]); + } + } else { + /* Hack for mono and stereo. */ + if (pDescriptorPlayback->channels == 1) { + pDescriptorPlayback->channelMap[0] = MA_CHANNEL_MONO; + } else if (pDescriptorPlayback->channels == 2) { + pDescriptorPlayback->channelMap[0] = MA_CHANNEL_FRONT_LEFT; + pDescriptorPlayback->channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + } else { + MA_ASSERT(MA_FALSE); /* Should never hit this. */ + } + } + + + /* Buffer. */ + pActualAttr = ((ma_pa_stream_get_buffer_attr_proc)pDevice->pContext->pulse.pa_stream_get_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + if (pActualAttr != NULL) { + attr = *pActualAttr; + } + + if (attr.tlength > 0) { + pDescriptorPlayback->periodCount = ma_max(attr.maxlength / attr.tlength, 1); + } else { + pDescriptorPlayback->periodCount = 1; + } + + pDescriptorPlayback->periodSizeInFrames = attr.maxlength / ma_get_bytes_per_frame(pDescriptorPlayback->format, pDescriptorPlayback->channels) / pDescriptorPlayback->periodCount; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[PulseAudio] Playback actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDescriptorPlayback->periodSizeInFrames); + } + + + /* + We need a ring buffer for handling duplex mode. We can use the main duplex ring buffer in the main + part of the ma_device struct. We cannot, however, depend on ma_device_init() initializing this for + us later on because that will only do it if it's a fully asynchronous backend - i.e. the + onDeviceDataLoop callback is NULL, which is not the case for PulseAudio. + */ + if (pConfig->deviceType == ma_device_type_duplex) { + ma_format rbFormat = (format != ma_format_unknown) ? format : pDescriptorCapture->format; + ma_uint32 rbChannels = (channels > 0) ? channels : pDescriptorCapture->channels; + ma_uint32 rbSampleRate = (sampleRate > 0) ? sampleRate : pDescriptorCapture->sampleRate; + + result = ma_duplex_rb_init(rbFormat, rbChannels, rbSampleRate, pDescriptorCapture->sampleRate, pDescriptorCapture->periodSizeInFrames, &pDevice->pContext->allocationCallbacks, &pDevice->duplexRB); + if (result != MA_SUCCESS) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to initialize ring buffer. %s.\n", ma_result_description(result)); + goto on_error4; + } + } + + return MA_SUCCESS; + + +on_error4: + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ((ma_pa_stream_disconnect_proc)pDevice->pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + } +on_error3: + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ((ma_pa_stream_unref_proc)pDevice->pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamPlayback); + } +on_error2: + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ((ma_pa_stream_disconnect_proc)pDevice->pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + } +on_error1: + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ((ma_pa_stream_unref_proc)pDevice->pContext->pulse.pa_stream_unref)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + } +on_error0: + return result; +} + + +static void ma_pulse_operation_complete_callback(ma_pa_stream* pStream, int success, void* pUserData) +{ + ma_bool32* pIsSuccessful = (ma_bool32*)pUserData; + MA_ASSERT(pIsSuccessful != NULL); + + *pIsSuccessful = (ma_bool32)success; + + (void)pStream; /* Unused. */ +} + +static ma_result ma_device__cork_stream__pulse(ma_device* pDevice, ma_device_type deviceType, int cork) +{ + ma_context* pContext = pDevice->pContext; + ma_bool32 wasSuccessful; + ma_pa_stream* pStream; + ma_pa_operation* pOP; + ma_result result; + + /* This should not be called with a duplex device type. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + wasSuccessful = MA_FALSE; + + pStream = (ma_pa_stream*)((deviceType == ma_device_type_capture) ? pDevice->pulse.pStreamCapture : pDevice->pulse.pStreamPlayback); + MA_ASSERT(pStream != NULL); + + pOP = ((ma_pa_stream_cork_proc)pContext->pulse.pa_stream_cork)(pStream, cork, ma_pulse_operation_complete_callback, &wasSuccessful); + if (pOP == NULL) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to cork PulseAudio stream."); + return MA_ERROR; + } + + result = ma_wait_for_operation_and_unref__pulse(pDevice->pContext, pDevice->pulse.pMainLoop, pOP); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] An error occurred while waiting for the PulseAudio stream to cork."); + return result; + } + + if (!wasSuccessful) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to %s PulseAudio stream.", (cork) ? "stop" : "start"); + return MA_ERROR; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__pulse(ma_device* pDevice) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + result = ma_device__cork_stream__pulse(pDevice, ma_device_type_capture, 0); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + /* + We need to fill some data before uncorking. Not doing this will result in the write callback + never getting fired. We're not going to abort if writing fails because I still want the device + to get uncorked. + */ + ma_device_write_to_stream__pulse(pDevice, (ma_pa_stream*)(pDevice->pulse.pStreamPlayback), NULL); /* No need to check the result here. Always want to fall through an uncork.*/ + + result = ma_device__cork_stream__pulse(pDevice, ma_device_type_playback, 0); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__pulse(ma_device* pDevice) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + result = ma_device__cork_stream__pulse(pDevice, ma_device_type_capture, 1); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + /* + Ideally we would drain the device here, but there's been cases where PulseAudio seems to be + broken on some systems to the point where no audio processing seems to happen. When this + happens, draining never completes and we get stuck here. For now I'm disabling draining of + the device so we don't just freeze the application. + */ + #if 0 + ma_pa_operation* pOP = ((ma_pa_stream_drain_proc)pDevice->pContext->pulse.pa_stream_drain)((ma_pa_stream*)pDevice->pulse.pStreamPlayback, ma_pulse_operation_complete_callback, &wasSuccessful); + ma_wait_for_operation_and_unref__pulse(pDevice->pContext, pDevice->pulse.pMainLoop, pOP); + #endif + + result = ma_device__cork_stream__pulse(pDevice, ma_device_type_playback, 1); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_data_loop__pulse(ma_device* pDevice) +{ + int resultPA; + + MA_ASSERT(pDevice != NULL); + + /* NOTE: Don't start the device here. It'll be done at a higher level. */ + + /* + All data is handled through callbacks. All we need to do is iterate over the main loop and let + the callbacks deal with it. + */ + while (ma_device_get_state(pDevice) == ma_device_state_started) { + resultPA = ((ma_pa_mainloop_iterate_proc)pDevice->pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 1, NULL); + if (resultPA < 0) { + break; + } + } + + /* NOTE: Don't stop the device here. It'll be done at a higher level. */ + return MA_SUCCESS; +} + +static ma_result ma_device_data_loop_wakeup__pulse(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + ((ma_pa_mainloop_wakeup_proc)pDevice->pContext->pulse.pa_mainloop_wakeup)((ma_pa_mainloop*)pDevice->pulse.pMainLoop); + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__pulse(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_pulseaudio); + + ((ma_pa_context_disconnect_proc)pContext->pulse.pa_context_disconnect)((ma_pa_context*)pContext->pulse.pPulseContext); + ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)((ma_pa_context*)pContext->pulse.pPulseContext); + ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)pContext->pulse.pMainLoop); + + ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks); + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); + +#ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(ma_context_get_log(pContext), pContext->pulse.pulseSO); +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_init__pulse(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + ma_result result; +#ifndef MA_NO_RUNTIME_LINKING + const char* libpulseNames[] = { + "libpulse.so", + "libpulse.so.0" + }; + size_t i; + + for (i = 0; i < ma_countof(libpulseNames); ++i) { + pContext->pulse.pulseSO = ma_dlopen(ma_context_get_log(pContext), libpulseNames[i]); + if (pContext->pulse.pulseSO != NULL) { + break; + } + } + + if (pContext->pulse.pulseSO == NULL) { + return MA_NO_BACKEND; + } + + pContext->pulse.pa_mainloop_new = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_mainloop_new"); + pContext->pulse.pa_mainloop_free = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_mainloop_free"); + pContext->pulse.pa_mainloop_quit = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_mainloop_quit"); + pContext->pulse.pa_mainloop_get_api = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_mainloop_get_api"); + pContext->pulse.pa_mainloop_iterate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_mainloop_iterate"); + pContext->pulse.pa_mainloop_wakeup = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_mainloop_wakeup"); + pContext->pulse.pa_threaded_mainloop_new = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_new"); + pContext->pulse.pa_threaded_mainloop_free = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_free"); + pContext->pulse.pa_threaded_mainloop_start = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_start"); + pContext->pulse.pa_threaded_mainloop_stop = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_stop"); + pContext->pulse.pa_threaded_mainloop_lock = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_lock"); + pContext->pulse.pa_threaded_mainloop_unlock = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_unlock"); + pContext->pulse.pa_threaded_mainloop_wait = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_wait"); + pContext->pulse.pa_threaded_mainloop_signal = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_signal"); + pContext->pulse.pa_threaded_mainloop_accept = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_accept"); + pContext->pulse.pa_threaded_mainloop_get_retval = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_get_retval"); + pContext->pulse.pa_threaded_mainloop_get_api = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_get_api"); + pContext->pulse.pa_threaded_mainloop_in_thread = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_in_thread"); + pContext->pulse.pa_threaded_mainloop_set_name = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_threaded_mainloop_set_name"); + pContext->pulse.pa_context_new = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_new"); + pContext->pulse.pa_context_unref = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_unref"); + pContext->pulse.pa_context_connect = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_connect"); + pContext->pulse.pa_context_disconnect = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_disconnect"); + pContext->pulse.pa_context_set_state_callback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_set_state_callback"); + pContext->pulse.pa_context_get_state = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_get_state"); + pContext->pulse.pa_context_get_sink_info_list = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_get_sink_info_list"); + pContext->pulse.pa_context_get_source_info_list = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_get_source_info_list"); + pContext->pulse.pa_context_get_sink_info_by_name = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_get_sink_info_by_name"); + pContext->pulse.pa_context_get_source_info_by_name = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_context_get_source_info_by_name"); + pContext->pulse.pa_operation_unref = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_operation_unref"); + pContext->pulse.pa_operation_get_state = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_operation_get_state"); + pContext->pulse.pa_channel_map_init_extend = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_channel_map_init_extend"); + pContext->pulse.pa_channel_map_valid = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_channel_map_valid"); + pContext->pulse.pa_channel_map_compatible = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_channel_map_compatible"); + pContext->pulse.pa_stream_new = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_new"); + pContext->pulse.pa_stream_unref = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_unref"); + pContext->pulse.pa_stream_connect_playback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_connect_playback"); + pContext->pulse.pa_stream_connect_record = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_connect_record"); + pContext->pulse.pa_stream_disconnect = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_disconnect"); + pContext->pulse.pa_stream_get_state = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_get_state"); + pContext->pulse.pa_stream_get_sample_spec = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_get_sample_spec"); + pContext->pulse.pa_stream_get_channel_map = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_get_channel_map"); + pContext->pulse.pa_stream_get_buffer_attr = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_get_buffer_attr"); + pContext->pulse.pa_stream_set_buffer_attr = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_set_buffer_attr"); + pContext->pulse.pa_stream_get_device_name = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_get_device_name"); + pContext->pulse.pa_stream_set_write_callback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_set_write_callback"); + pContext->pulse.pa_stream_set_read_callback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_set_read_callback"); + pContext->pulse.pa_stream_set_suspended_callback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_set_suspended_callback"); + pContext->pulse.pa_stream_set_moved_callback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_set_moved_callback"); + pContext->pulse.pa_stream_is_suspended = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_is_suspended"); + pContext->pulse.pa_stream_flush = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_flush"); + pContext->pulse.pa_stream_drain = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_drain"); + pContext->pulse.pa_stream_is_corked = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_is_corked"); + pContext->pulse.pa_stream_cork = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_cork"); + pContext->pulse.pa_stream_trigger = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_trigger"); + pContext->pulse.pa_stream_begin_write = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_begin_write"); + pContext->pulse.pa_stream_write = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_write"); + pContext->pulse.pa_stream_peek = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_peek"); + pContext->pulse.pa_stream_drop = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_drop"); + pContext->pulse.pa_stream_writable_size = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_writable_size"); + pContext->pulse.pa_stream_readable_size = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->pulse.pulseSO, "pa_stream_readable_size"); +#else + /* This strange assignment system is just for type safety. */ + ma_pa_mainloop_new_proc _pa_mainloop_new = pa_mainloop_new; + ma_pa_mainloop_free_proc _pa_mainloop_free = pa_mainloop_free; + ma_pa_mainloop_quit_proc _pa_mainloop_quit = pa_mainloop_quit; + ma_pa_mainloop_get_api_proc _pa_mainloop_get_api = pa_mainloop_get_api; + ma_pa_mainloop_iterate_proc _pa_mainloop_iterate = pa_mainloop_iterate; + ma_pa_mainloop_wakeup_proc _pa_mainloop_wakeup = pa_mainloop_wakeup; + ma_pa_threaded_mainloop_new_proc _pa_threaded_mainloop_new = pa_threaded_mainloop_new; + ma_pa_threaded_mainloop_free_proc _pa_threaded_mainloop_free = pa_threaded_mainloop_free; + ma_pa_threaded_mainloop_start_proc _pa_threaded_mainloop_start = pa_threaded_mainloop_start; + ma_pa_threaded_mainloop_stop_proc _pa_threaded_mainloop_stop = pa_threaded_mainloop_stop; + ma_pa_threaded_mainloop_lock_proc _pa_threaded_mainloop_lock = pa_threaded_mainloop_lock; + ma_pa_threaded_mainloop_unlock_proc _pa_threaded_mainloop_unlock = pa_threaded_mainloop_unlock; + ma_pa_threaded_mainloop_wait_proc _pa_threaded_mainloop_wait = pa_threaded_mainloop_wait; + ma_pa_threaded_mainloop_signal_proc _pa_threaded_mainloop_signal = pa_threaded_mainloop_signal; + ma_pa_threaded_mainloop_accept_proc _pa_threaded_mainloop_accept = pa_threaded_mainloop_accept; + ma_pa_threaded_mainloop_get_retval_proc _pa_threaded_mainloop_get_retval = pa_threaded_mainloop_get_retval; + ma_pa_threaded_mainloop_get_api_proc _pa_threaded_mainloop_get_api = pa_threaded_mainloop_get_api; + ma_pa_threaded_mainloop_in_thread_proc _pa_threaded_mainloop_in_thread = pa_threaded_mainloop_in_thread; + ma_pa_threaded_mainloop_set_name_proc _pa_threaded_mainloop_set_name = pa_threaded_mainloop_set_name; + ma_pa_context_new_proc _pa_context_new = pa_context_new; + ma_pa_context_unref_proc _pa_context_unref = pa_context_unref; + ma_pa_context_connect_proc _pa_context_connect = pa_context_connect; + ma_pa_context_disconnect_proc _pa_context_disconnect = pa_context_disconnect; + ma_pa_context_set_state_callback_proc _pa_context_set_state_callback = pa_context_set_state_callback; + ma_pa_context_get_state_proc _pa_context_get_state = pa_context_get_state; + ma_pa_context_get_sink_info_list_proc _pa_context_get_sink_info_list = pa_context_get_sink_info_list; + ma_pa_context_get_source_info_list_proc _pa_context_get_source_info_list = pa_context_get_source_info_list; + ma_pa_context_get_sink_info_by_name_proc _pa_context_get_sink_info_by_name = pa_context_get_sink_info_by_name; + ma_pa_context_get_source_info_by_name_proc _pa_context_get_source_info_by_name= pa_context_get_source_info_by_name; + ma_pa_operation_unref_proc _pa_operation_unref = pa_operation_unref; + ma_pa_operation_get_state_proc _pa_operation_get_state = pa_operation_get_state; + ma_pa_channel_map_init_extend_proc _pa_channel_map_init_extend = pa_channel_map_init_extend; + ma_pa_channel_map_valid_proc _pa_channel_map_valid = pa_channel_map_valid; + ma_pa_channel_map_compatible_proc _pa_channel_map_compatible = pa_channel_map_compatible; + ma_pa_stream_new_proc _pa_stream_new = pa_stream_new; + ma_pa_stream_unref_proc _pa_stream_unref = pa_stream_unref; + ma_pa_stream_connect_playback_proc _pa_stream_connect_playback = pa_stream_connect_playback; + ma_pa_stream_connect_record_proc _pa_stream_connect_record = pa_stream_connect_record; + ma_pa_stream_disconnect_proc _pa_stream_disconnect = pa_stream_disconnect; + ma_pa_stream_get_state_proc _pa_stream_get_state = pa_stream_get_state; + ma_pa_stream_get_sample_spec_proc _pa_stream_get_sample_spec = pa_stream_get_sample_spec; + ma_pa_stream_get_channel_map_proc _pa_stream_get_channel_map = pa_stream_get_channel_map; + ma_pa_stream_get_buffer_attr_proc _pa_stream_get_buffer_attr = pa_stream_get_buffer_attr; + ma_pa_stream_set_buffer_attr_proc _pa_stream_set_buffer_attr = pa_stream_set_buffer_attr; + ma_pa_stream_get_device_name_proc _pa_stream_get_device_name = pa_stream_get_device_name; + ma_pa_stream_set_write_callback_proc _pa_stream_set_write_callback = pa_stream_set_write_callback; + ma_pa_stream_set_read_callback_proc _pa_stream_set_read_callback = pa_stream_set_read_callback; + ma_pa_stream_set_suspended_callback_proc _pa_stream_set_suspended_callback = pa_stream_set_suspended_callback; + ma_pa_stream_set_moved_callback_proc _pa_stream_set_moved_callback = pa_stream_set_moved_callback; + ma_pa_stream_is_suspended_proc _pa_stream_is_suspended = pa_stream_is_suspended; + ma_pa_stream_flush_proc _pa_stream_flush = pa_stream_flush; + ma_pa_stream_drain_proc _pa_stream_drain = pa_stream_drain; + ma_pa_stream_is_corked_proc _pa_stream_is_corked = pa_stream_is_corked; + ma_pa_stream_cork_proc _pa_stream_cork = pa_stream_cork; + ma_pa_stream_trigger_proc _pa_stream_trigger = pa_stream_trigger; + ma_pa_stream_begin_write_proc _pa_stream_begin_write = pa_stream_begin_write; + ma_pa_stream_write_proc _pa_stream_write = pa_stream_write; + ma_pa_stream_peek_proc _pa_stream_peek = pa_stream_peek; + ma_pa_stream_drop_proc _pa_stream_drop = pa_stream_drop; + ma_pa_stream_writable_size_proc _pa_stream_writable_size = pa_stream_writable_size; + ma_pa_stream_readable_size_proc _pa_stream_readable_size = pa_stream_readable_size; + + pContext->pulse.pa_mainloop_new = (ma_proc)_pa_mainloop_new; + pContext->pulse.pa_mainloop_free = (ma_proc)_pa_mainloop_free; + pContext->pulse.pa_mainloop_quit = (ma_proc)_pa_mainloop_quit; + pContext->pulse.pa_mainloop_get_api = (ma_proc)_pa_mainloop_get_api; + pContext->pulse.pa_mainloop_iterate = (ma_proc)_pa_mainloop_iterate; + pContext->pulse.pa_mainloop_wakeup = (ma_proc)_pa_mainloop_wakeup; + pContext->pulse.pa_threaded_mainloop_new = (ma_proc)_pa_threaded_mainloop_new; + pContext->pulse.pa_threaded_mainloop_free = (ma_proc)_pa_threaded_mainloop_free; + pContext->pulse.pa_threaded_mainloop_start = (ma_proc)_pa_threaded_mainloop_start; + pContext->pulse.pa_threaded_mainloop_stop = (ma_proc)_pa_threaded_mainloop_stop; + pContext->pulse.pa_threaded_mainloop_lock = (ma_proc)_pa_threaded_mainloop_lock; + pContext->pulse.pa_threaded_mainloop_unlock = (ma_proc)_pa_threaded_mainloop_unlock; + pContext->pulse.pa_threaded_mainloop_wait = (ma_proc)_pa_threaded_mainloop_wait; + pContext->pulse.pa_threaded_mainloop_signal = (ma_proc)_pa_threaded_mainloop_signal; + pContext->pulse.pa_threaded_mainloop_accept = (ma_proc)_pa_threaded_mainloop_accept; + pContext->pulse.pa_threaded_mainloop_get_retval = (ma_proc)_pa_threaded_mainloop_get_retval; + pContext->pulse.pa_threaded_mainloop_get_api = (ma_proc)_pa_threaded_mainloop_get_api; + pContext->pulse.pa_threaded_mainloop_in_thread = (ma_proc)_pa_threaded_mainloop_in_thread; + pContext->pulse.pa_threaded_mainloop_set_name = (ma_proc)_pa_threaded_mainloop_set_name; + pContext->pulse.pa_context_new = (ma_proc)_pa_context_new; + pContext->pulse.pa_context_unref = (ma_proc)_pa_context_unref; + pContext->pulse.pa_context_connect = (ma_proc)_pa_context_connect; + pContext->pulse.pa_context_disconnect = (ma_proc)_pa_context_disconnect; + pContext->pulse.pa_context_set_state_callback = (ma_proc)_pa_context_set_state_callback; + pContext->pulse.pa_context_get_state = (ma_proc)_pa_context_get_state; + pContext->pulse.pa_context_get_sink_info_list = (ma_proc)_pa_context_get_sink_info_list; + pContext->pulse.pa_context_get_source_info_list = (ma_proc)_pa_context_get_source_info_list; + pContext->pulse.pa_context_get_sink_info_by_name = (ma_proc)_pa_context_get_sink_info_by_name; + pContext->pulse.pa_context_get_source_info_by_name = (ma_proc)_pa_context_get_source_info_by_name; + pContext->pulse.pa_operation_unref = (ma_proc)_pa_operation_unref; + pContext->pulse.pa_operation_get_state = (ma_proc)_pa_operation_get_state; + pContext->pulse.pa_channel_map_init_extend = (ma_proc)_pa_channel_map_init_extend; + pContext->pulse.pa_channel_map_valid = (ma_proc)_pa_channel_map_valid; + pContext->pulse.pa_channel_map_compatible = (ma_proc)_pa_channel_map_compatible; + pContext->pulse.pa_stream_new = (ma_proc)_pa_stream_new; + pContext->pulse.pa_stream_unref = (ma_proc)_pa_stream_unref; + pContext->pulse.pa_stream_connect_playback = (ma_proc)_pa_stream_connect_playback; + pContext->pulse.pa_stream_connect_record = (ma_proc)_pa_stream_connect_record; + pContext->pulse.pa_stream_disconnect = (ma_proc)_pa_stream_disconnect; + pContext->pulse.pa_stream_get_state = (ma_proc)_pa_stream_get_state; + pContext->pulse.pa_stream_get_sample_spec = (ma_proc)_pa_stream_get_sample_spec; + pContext->pulse.pa_stream_get_channel_map = (ma_proc)_pa_stream_get_channel_map; + pContext->pulse.pa_stream_get_buffer_attr = (ma_proc)_pa_stream_get_buffer_attr; + pContext->pulse.pa_stream_set_buffer_attr = (ma_proc)_pa_stream_set_buffer_attr; + pContext->pulse.pa_stream_get_device_name = (ma_proc)_pa_stream_get_device_name; + pContext->pulse.pa_stream_set_write_callback = (ma_proc)_pa_stream_set_write_callback; + pContext->pulse.pa_stream_set_read_callback = (ma_proc)_pa_stream_set_read_callback; + pContext->pulse.pa_stream_set_suspended_callback = (ma_proc)_pa_stream_set_suspended_callback; + pContext->pulse.pa_stream_set_moved_callback = (ma_proc)_pa_stream_set_moved_callback; + pContext->pulse.pa_stream_is_suspended = (ma_proc)_pa_stream_is_suspended; + pContext->pulse.pa_stream_flush = (ma_proc)_pa_stream_flush; + pContext->pulse.pa_stream_drain = (ma_proc)_pa_stream_drain; + pContext->pulse.pa_stream_is_corked = (ma_proc)_pa_stream_is_corked; + pContext->pulse.pa_stream_cork = (ma_proc)_pa_stream_cork; + pContext->pulse.pa_stream_trigger = (ma_proc)_pa_stream_trigger; + pContext->pulse.pa_stream_begin_write = (ma_proc)_pa_stream_begin_write; + pContext->pulse.pa_stream_write = (ma_proc)_pa_stream_write; + pContext->pulse.pa_stream_peek = (ma_proc)_pa_stream_peek; + pContext->pulse.pa_stream_drop = (ma_proc)_pa_stream_drop; + pContext->pulse.pa_stream_writable_size = (ma_proc)_pa_stream_writable_size; + pContext->pulse.pa_stream_readable_size = (ma_proc)_pa_stream_readable_size; +#endif + + /* We need to make a copy of the application and server names so we can pass them to the pa_context of each device. */ + pContext->pulse.pApplicationName = ma_copy_string(pConfig->pulse.pApplicationName, &pContext->allocationCallbacks); + if (pContext->pulse.pApplicationName == NULL && pConfig->pulse.pApplicationName != NULL) { + return MA_OUT_OF_MEMORY; + } + + pContext->pulse.pServerName = ma_copy_string(pConfig->pulse.pServerName, &pContext->allocationCallbacks); + if (pContext->pulse.pServerName == NULL && pConfig->pulse.pServerName != NULL) { + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + result = ma_init_pa_mainloop_and_pa_context__pulse(pContext, pConfig->pulse.pApplicationName, pConfig->pulse.pServerName, pConfig->pulse.tryAutoSpawn, &pContext->pulse.pMainLoop, &pContext->pulse.pPulseContext); + if (result != MA_SUCCESS) { + ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks); + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); + #ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(ma_context_get_log(pContext), pContext->pulse.pulseSO); + #endif + return result; + } + + /* With pa_mainloop we run a synchronous backend, but we implement our own main loop. */ + pCallbacks->onContextInit = ma_context_init__pulse; + pCallbacks->onContextUninit = ma_context_uninit__pulse; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__pulse; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__pulse; + pCallbacks->onDeviceInit = ma_device_init__pulse; + pCallbacks->onDeviceUninit = ma_device_uninit__pulse; + pCallbacks->onDeviceStart = ma_device_start__pulse; + pCallbacks->onDeviceStop = ma_device_stop__pulse; + pCallbacks->onDeviceRead = NULL; /* Not used because we're implementing onDeviceDataLoop. */ + pCallbacks->onDeviceWrite = NULL; /* Not used because we're implementing onDeviceDataLoop. */ + pCallbacks->onDeviceDataLoop = ma_device_data_loop__pulse; + pCallbacks->onDeviceDataLoopWakeup = ma_device_data_loop_wakeup__pulse; + + return MA_SUCCESS; +} +#endif + + +/****************************************************************************** + +JACK Backend + +******************************************************************************/ +#ifdef MA_HAS_JACK + +/* It is assumed jack.h is available when compile-time linking is being used. */ +#ifdef MA_NO_RUNTIME_LINKING +#include + +typedef jack_nframes_t ma_jack_nframes_t; +typedef jack_options_t ma_jack_options_t; +typedef jack_status_t ma_jack_status_t; +typedef jack_client_t ma_jack_client_t; +typedef jack_port_t ma_jack_port_t; +typedef JackProcessCallback ma_JackProcessCallback; +typedef JackBufferSizeCallback ma_JackBufferSizeCallback; +typedef JackShutdownCallback ma_JackShutdownCallback; +#define MA_JACK_DEFAULT_AUDIO_TYPE JACK_DEFAULT_AUDIO_TYPE +#define ma_JackNoStartServer JackNoStartServer +#define ma_JackPortIsInput JackPortIsInput +#define ma_JackPortIsOutput JackPortIsOutput +#define ma_JackPortIsPhysical JackPortIsPhysical +#else +typedef ma_uint32 ma_jack_nframes_t; +typedef int ma_jack_options_t; +typedef int ma_jack_status_t; +typedef struct ma_jack_client_t ma_jack_client_t; +typedef struct ma_jack_port_t ma_jack_port_t; +typedef int (* ma_JackProcessCallback) (ma_jack_nframes_t nframes, void* arg); +typedef int (* ma_JackBufferSizeCallback)(ma_jack_nframes_t nframes, void* arg); +typedef void (* ma_JackShutdownCallback) (void* arg); +#define MA_JACK_DEFAULT_AUDIO_TYPE "32 bit float mono audio" +#define ma_JackNoStartServer 1 +#define ma_JackPortIsInput 1 +#define ma_JackPortIsOutput 2 +#define ma_JackPortIsPhysical 4 +#endif + +typedef ma_jack_client_t* (* ma_jack_client_open_proc) (const char* client_name, ma_jack_options_t options, ma_jack_status_t* status, ...); +typedef int (* ma_jack_client_close_proc) (ma_jack_client_t* client); +typedef int (* ma_jack_client_name_size_proc) (void); +typedef int (* ma_jack_set_process_callback_proc) (ma_jack_client_t* client, ma_JackProcessCallback process_callback, void* arg); +typedef int (* ma_jack_set_buffer_size_callback_proc)(ma_jack_client_t* client, ma_JackBufferSizeCallback bufsize_callback, void* arg); +typedef void (* ma_jack_on_shutdown_proc) (ma_jack_client_t* client, ma_JackShutdownCallback function, void* arg); +typedef ma_jack_nframes_t (* ma_jack_get_sample_rate_proc) (ma_jack_client_t* client); +typedef ma_jack_nframes_t (* ma_jack_get_buffer_size_proc) (ma_jack_client_t* client); +typedef const char** (* ma_jack_get_ports_proc) (ma_jack_client_t* client, const char* port_name_pattern, const char* type_name_pattern, unsigned long flags); +typedef int (* ma_jack_activate_proc) (ma_jack_client_t* client); +typedef int (* ma_jack_deactivate_proc) (ma_jack_client_t* client); +typedef int (* ma_jack_connect_proc) (ma_jack_client_t* client, const char* source_port, const char* destination_port); +typedef ma_jack_port_t* (* ma_jack_port_register_proc) (ma_jack_client_t* client, const char* port_name, const char* port_type, unsigned long flags, unsigned long buffer_size); +typedef const char* (* ma_jack_port_name_proc) (const ma_jack_port_t* port); +typedef void* (* ma_jack_port_get_buffer_proc) (ma_jack_port_t* port, ma_jack_nframes_t nframes); +typedef void (* ma_jack_free_proc) (void* ptr); + +static ma_result ma_context_open_client__jack(ma_context* pContext, ma_jack_client_t** ppClient) +{ + size_t maxClientNameSize; + char clientName[256]; + ma_jack_status_t status; + ma_jack_client_t* pClient; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppClient != NULL); + + if (ppClient) { + *ppClient = NULL; + } + + maxClientNameSize = ((ma_jack_client_name_size_proc)pContext->jack.jack_client_name_size)(); /* Includes null terminator. */ + ma_strncpy_s(clientName, ma_min(sizeof(clientName), maxClientNameSize), (pContext->jack.pClientName != NULL) ? pContext->jack.pClientName : "miniaudio", (size_t)-1); + + pClient = ((ma_jack_client_open_proc)pContext->jack.jack_client_open)(clientName, (pContext->jack.tryStartServer) ? 0 : ma_JackNoStartServer, &status, NULL); + if (pClient == NULL) { + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + if (ppClient) { + *ppClient = pClient; + } + + return MA_SUCCESS; +} + + +static ma_result ma_context_enumerate_devices__jack(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult = MA_TRUE; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + deviceInfo.isDefault = MA_TRUE; /* JACK only uses default devices. */ + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + deviceInfo.isDefault = MA_TRUE; /* JACK only uses default devices. */ + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + (void)cbResult; /* For silencing a static analysis warning. */ + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__jack(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_jack_client_t* pClient; + ma_result result; + const char** ppPorts; + + MA_ASSERT(pContext != NULL); + + if (pDeviceID != NULL && pDeviceID->jack != 0) { + return MA_NO_DEVICE; /* Don't know the device. */ + } + + /* Name / Description */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + /* Jack only uses default devices. */ + pDeviceInfo->isDefault = MA_TRUE; + + /* Jack only supports f32 and has a specific channel count and sample rate. */ + pDeviceInfo->nativeDataFormats[0].format = ma_format_f32; + + /* The channel count and sample rate can only be determined by opening the device. */ + result = ma_context_open_client__jack(pContext, &pClient); + if (result != MA_SUCCESS) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[JACK] Failed to open client."); + return result; + } + + pDeviceInfo->nativeDataFormats[0].sampleRate = ((ma_jack_get_sample_rate_proc)pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pClient); + pDeviceInfo->nativeDataFormats[0].channels = 0; + + ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ((deviceType == ma_device_type_playback) ? ma_JackPortIsInput : ma_JackPortIsOutput)); + if (ppPorts == NULL) { + ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pClient); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + while (ppPorts[pDeviceInfo->nativeDataFormats[0].channels] != NULL) { + pDeviceInfo->nativeDataFormats[0].channels += 1; + } + + pDeviceInfo->nativeDataFormats[0].flags = 0; + pDeviceInfo->nativeDataFormatCount = 1; + + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppPorts); + ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pClient); + + (void)pContext; + return MA_SUCCESS; +} + + +static ma_result ma_device_uninit__jack(ma_device* pDevice) +{ + ma_context* pContext; + + MA_ASSERT(pDevice != NULL); + + pContext = pDevice->pContext; + MA_ASSERT(pContext != NULL); + + if (pDevice->jack.pClient != NULL) { + ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pDevice->jack.pClient); + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_free(pDevice->jack.pIntermediaryBufferCapture, &pDevice->pContext->allocationCallbacks); + ma_free(pDevice->jack.ppPortsCapture, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_free(pDevice->jack.pIntermediaryBufferPlayback, &pDevice->pContext->allocationCallbacks); + ma_free(pDevice->jack.ppPortsPlayback, &pDevice->pContext->allocationCallbacks); + } + + return MA_SUCCESS; +} + +static void ma_device__jack_shutdown_callback(void* pUserData) +{ + /* JACK died. Stop the device. */ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + ma_device_stop(pDevice); +} + +static int ma_device__jack_buffer_size_callback(ma_jack_nframes_t frameCount, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + size_t newBufferSize = frameCount * (pDevice->capture.internalChannels * ma_get_bytes_per_sample(pDevice->capture.internalFormat)); + float* pNewBuffer = (float*)ma_calloc(newBufferSize, &pDevice->pContext->allocationCallbacks); + if (pNewBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + ma_free(pDevice->jack.pIntermediaryBufferCapture, &pDevice->pContext->allocationCallbacks); + + pDevice->jack.pIntermediaryBufferCapture = pNewBuffer; + pDevice->playback.internalPeriodSizeInFrames = frameCount; + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + size_t newBufferSize = frameCount * (pDevice->playback.internalChannels * ma_get_bytes_per_sample(pDevice->playback.internalFormat)); + float* pNewBuffer = (float*)ma_calloc(newBufferSize, &pDevice->pContext->allocationCallbacks); + if (pNewBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + ma_free(pDevice->jack.pIntermediaryBufferPlayback, &pDevice->pContext->allocationCallbacks); + + pDevice->jack.pIntermediaryBufferPlayback = pNewBuffer; + pDevice->playback.internalPeriodSizeInFrames = frameCount; + } + + return 0; +} + +static int ma_device__jack_process_callback(ma_jack_nframes_t frameCount, void* pUserData) +{ + ma_device* pDevice; + ma_context* pContext; + ma_uint32 iChannel; + + pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + pContext = pDevice->pContext; + MA_ASSERT(pContext != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + /* Channels need to be interleaved. */ + for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) { + const float* pSrc = (const float*)((ma_jack_port_get_buffer_proc)pContext->jack.jack_port_get_buffer)((ma_jack_port_t*)pDevice->jack.ppPortsCapture[iChannel], frameCount); + if (pSrc != NULL) { + float* pDst = pDevice->jack.pIntermediaryBufferCapture + iChannel; + ma_jack_nframes_t iFrame; + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + *pDst = *pSrc; + + pDst += pDevice->capture.internalChannels; + pSrc += 1; + } + } + } + + ma_device_handle_backend_data_callback(pDevice, NULL, pDevice->jack.pIntermediaryBufferCapture, frameCount); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_device_handle_backend_data_callback(pDevice, pDevice->jack.pIntermediaryBufferPlayback, NULL, frameCount); + + /* Channels need to be deinterleaved. */ + for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) { + float* pDst = (float*)((ma_jack_port_get_buffer_proc)pContext->jack.jack_port_get_buffer)((ma_jack_port_t*)pDevice->jack.ppPortsPlayback[iChannel], frameCount); + if (pDst != NULL) { + const float* pSrc = pDevice->jack.pIntermediaryBufferPlayback + iChannel; + ma_jack_nframes_t iFrame; + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + *pDst = *pSrc; + + pDst += 1; + pSrc += pDevice->playback.internalChannels; + } + } + } + } + + return 0; +} + +static ma_result ma_device_init__jack(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + ma_result result; + ma_uint32 periodSizeInFrames; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDevice != NULL); + + if (pConfig->deviceType == ma_device_type_loopback) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Loopback mode not supported."); + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* Only supporting default devices with JACK. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->pDeviceID != NULL && pDescriptorPlayback->pDeviceID->jack != 0) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->pDeviceID != NULL && pDescriptorCapture->pDeviceID->jack != 0)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Only default devices are supported."); + return MA_NO_DEVICE; + } + + /* No exclusive mode with the JACK backend. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive)) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Exclusive mode not supported."); + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* Open the client. */ + result = ma_context_open_client__jack(pDevice->pContext, (ma_jack_client_t**)&pDevice->jack.pClient); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to open client."); + return result; + } + + /* Callbacks. */ + if (((ma_jack_set_process_callback_proc)pDevice->pContext->jack.jack_set_process_callback)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_process_callback, pDevice) != 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to set process callback."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + if (((ma_jack_set_buffer_size_callback_proc)pDevice->pContext->jack.jack_set_buffer_size_callback)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_buffer_size_callback, pDevice) != 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to set buffer size callback."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + ((ma_jack_on_shutdown_proc)pDevice->pContext->jack.jack_on_shutdown)((ma_jack_client_t*)pDevice->jack.pClient, ma_device__jack_shutdown_callback, pDevice); + + + /* The buffer size in frames can change. */ + periodSizeInFrames = ((ma_jack_get_buffer_size_proc)pDevice->pContext->jack.jack_get_buffer_size)((ma_jack_client_t*)pDevice->jack.pClient); + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 iPort; + const char** ppPorts; + + pDescriptorCapture->format = ma_format_f32; + pDescriptorCapture->channels = 0; + pDescriptorCapture->sampleRate = ((ma_jack_get_sample_rate_proc)pDevice->pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pDevice->jack.pClient); + ma_channel_map_init_standard(ma_standard_channel_map_alsa, pDescriptorCapture->channelMap, ma_countof(pDescriptorCapture->channelMap), pDescriptorCapture->channels); + + ppPorts = ((ma_jack_get_ports_proc)pDevice->pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsOutput); + if (ppPorts == NULL) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + /* Need to count the number of ports first so we can allocate some memory. */ + while (ppPorts[pDescriptorCapture->channels] != NULL) { + pDescriptorCapture->channels += 1; + } + + pDevice->jack.ppPortsCapture = (ma_ptr*)ma_malloc(sizeof(*pDevice->jack.ppPortsCapture) * pDescriptorCapture->channels, &pDevice->pContext->allocationCallbacks); + if (pDevice->jack.ppPortsCapture == NULL) { + return MA_OUT_OF_MEMORY; + } + + for (iPort = 0; iPort < pDescriptorCapture->channels; iPort += 1) { + char name[64]; + ma_strcpy_s(name, sizeof(name), "capture"); + ma_itoa_s((int)iPort, name+7, sizeof(name)-7, 10); /* 7 = length of "capture" */ + + pDevice->jack.ppPortsCapture[iPort] = ((ma_jack_port_register_proc)pDevice->pContext->jack.jack_port_register)((ma_jack_client_t*)pDevice->jack.pClient, name, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsInput, 0); + if (pDevice->jack.ppPortsCapture[iPort] == NULL) { + ((ma_jack_free_proc)pDevice->pContext->jack.jack_free)((void*)ppPorts); + ma_device_uninit__jack(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to register ports."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + } + + ((ma_jack_free_proc)pDevice->pContext->jack.jack_free)((void*)ppPorts); + + pDescriptorCapture->periodSizeInFrames = periodSizeInFrames; + pDescriptorCapture->periodCount = 1; /* There's no notion of a period in JACK. Just set to 1. */ + + pDevice->jack.pIntermediaryBufferCapture = (float*)ma_calloc(pDescriptorCapture->periodSizeInFrames * ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels), &pDevice->pContext->allocationCallbacks); + if (pDevice->jack.pIntermediaryBufferCapture == NULL) { + ma_device_uninit__jack(pDevice); + return MA_OUT_OF_MEMORY; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 iPort; + const char** ppPorts; + + pDescriptorPlayback->format = ma_format_f32; + pDescriptorPlayback->channels = 0; + pDescriptorPlayback->sampleRate = ((ma_jack_get_sample_rate_proc)pDevice->pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pDevice->jack.pClient); + ma_channel_map_init_standard(ma_standard_channel_map_alsa, pDescriptorPlayback->channelMap, ma_countof(pDescriptorPlayback->channelMap), pDescriptorPlayback->channels); + + ppPorts = ((ma_jack_get_ports_proc)pDevice->pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsInput); + if (ppPorts == NULL) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + /* Need to count the number of ports first so we can allocate some memory. */ + while (ppPorts[pDescriptorPlayback->channels] != NULL) { + pDescriptorPlayback->channels += 1; + } + + pDevice->jack.ppPortsPlayback = (ma_ptr*)ma_malloc(sizeof(*pDevice->jack.ppPortsPlayback) * pDescriptorPlayback->channels, &pDevice->pContext->allocationCallbacks); + if (pDevice->jack.ppPortsPlayback == NULL) { + ma_free(pDevice->jack.ppPortsCapture, &pDevice->pContext->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + for (iPort = 0; iPort < pDescriptorPlayback->channels; iPort += 1) { + char name[64]; + ma_strcpy_s(name, sizeof(name), "playback"); + ma_itoa_s((int)iPort, name+8, sizeof(name)-8, 10); /* 8 = length of "playback" */ + + pDevice->jack.ppPortsPlayback[iPort] = ((ma_jack_port_register_proc)pDevice->pContext->jack.jack_port_register)((ma_jack_client_t*)pDevice->jack.pClient, name, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsOutput, 0); + if (pDevice->jack.ppPortsPlayback[iPort] == NULL) { + ((ma_jack_free_proc)pDevice->pContext->jack.jack_free)((void*)ppPorts); + ma_device_uninit__jack(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to register ports."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + } + + ((ma_jack_free_proc)pDevice->pContext->jack.jack_free)((void*)ppPorts); + + pDescriptorPlayback->periodSizeInFrames = periodSizeInFrames; + pDescriptorPlayback->periodCount = 1; /* There's no notion of a period in JACK. Just set to 1. */ + + pDevice->jack.pIntermediaryBufferPlayback = (float*)ma_calloc(pDescriptorPlayback->periodSizeInFrames * ma_get_bytes_per_frame(pDescriptorPlayback->format, pDescriptorPlayback->channels), &pDevice->pContext->allocationCallbacks); + if (pDevice->jack.pIntermediaryBufferPlayback == NULL) { + ma_device_uninit__jack(pDevice); + return MA_OUT_OF_MEMORY; + } + } + + return MA_SUCCESS; +} + + +static ma_result ma_device_start__jack(ma_device* pDevice) +{ + ma_context* pContext = pDevice->pContext; + int resultJACK; + size_t i; + + resultJACK = ((ma_jack_activate_proc)pContext->jack.jack_activate)((ma_jack_client_t*)pDevice->jack.pClient); + if (resultJACK != 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to activate the JACK client."); + return MA_FAILED_TO_START_BACKEND_DEVICE; + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsOutput); + if (ppServerPorts == NULL) { + ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to retrieve physical ports."); + return MA_ERROR; + } + + for (i = 0; ppServerPorts[i] != NULL; ++i) { + const char* pServerPort = ppServerPorts[i]; + const char* pClientPort = ((ma_jack_port_name_proc)pContext->jack.jack_port_name)((ma_jack_port_t*)pDevice->jack.ppPortsCapture[i]); + + resultJACK = ((ma_jack_connect_proc)pContext->jack.jack_connect)((ma_jack_client_t*)pDevice->jack.pClient, pServerPort, pClientPort); + if (resultJACK != 0) { + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts); + ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to connect ports."); + return MA_ERROR; + } + } + + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsInput); + if (ppServerPorts == NULL) { + ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to retrieve physical ports."); + return MA_ERROR; + } + + for (i = 0; ppServerPorts[i] != NULL; ++i) { + const char* pServerPort = ppServerPorts[i]; + const char* pClientPort = ((ma_jack_port_name_proc)pContext->jack.jack_port_name)((ma_jack_port_t*)pDevice->jack.ppPortsPlayback[i]); + + resultJACK = ((ma_jack_connect_proc)pContext->jack.jack_connect)((ma_jack_client_t*)pDevice->jack.pClient, pClientPort, pServerPort); + if (resultJACK != 0) { + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts); + ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] Failed to connect ports."); + return MA_ERROR; + } + } + + ((ma_jack_free_proc)pContext->jack.jack_free)((void*)ppServerPorts); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__jack(ma_device* pDevice) +{ + ma_context* pContext = pDevice->pContext; + + if (((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient) != 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[JACK] An error occurred when deactivating the JACK client."); + return MA_ERROR; + } + + ma_device__on_notification_stopped(pDevice); + + return MA_SUCCESS; +} + + +static ma_result ma_context_uninit__jack(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_jack); + + ma_free(pContext->jack.pClientName, &pContext->allocationCallbacks); + pContext->jack.pClientName = NULL; + +#ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(ma_context_get_log(pContext), pContext->jack.jackSO); +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_init__jack(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ +#ifndef MA_NO_RUNTIME_LINKING + const char* libjackNames[] = { +#if defined(MA_WIN32) + "libjack.dll", + "libjack64.dll" +#endif +#if defined(MA_UNIX) + "libjack.so", + "libjack.so.0" +#endif + }; + size_t i; + + for (i = 0; i < ma_countof(libjackNames); ++i) { + pContext->jack.jackSO = ma_dlopen(ma_context_get_log(pContext), libjackNames[i]); + if (pContext->jack.jackSO != NULL) { + break; + } + } + + if (pContext->jack.jackSO == NULL) { + return MA_NO_BACKEND; + } + + pContext->jack.jack_client_open = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_client_open"); + pContext->jack.jack_client_close = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_client_close"); + pContext->jack.jack_client_name_size = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_client_name_size"); + pContext->jack.jack_set_process_callback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_set_process_callback"); + pContext->jack.jack_set_buffer_size_callback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_set_buffer_size_callback"); + pContext->jack.jack_on_shutdown = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_on_shutdown"); + pContext->jack.jack_get_sample_rate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_get_sample_rate"); + pContext->jack.jack_get_buffer_size = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_get_buffer_size"); + pContext->jack.jack_get_ports = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_get_ports"); + pContext->jack.jack_activate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_activate"); + pContext->jack.jack_deactivate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_deactivate"); + pContext->jack.jack_connect = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_connect"); + pContext->jack.jack_port_register = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_port_register"); + pContext->jack.jack_port_name = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_port_name"); + pContext->jack.jack_port_get_buffer = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_port_get_buffer"); + pContext->jack.jack_free = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->jack.jackSO, "jack_free"); +#else + /* + This strange assignment system is here just to ensure type safety of miniaudio's function pointer + types. If anything differs slightly the compiler should throw a warning. + */ + ma_jack_client_open_proc _jack_client_open = jack_client_open; + ma_jack_client_close_proc _jack_client_close = jack_client_close; + ma_jack_client_name_size_proc _jack_client_name_size = jack_client_name_size; + ma_jack_set_process_callback_proc _jack_set_process_callback = jack_set_process_callback; + ma_jack_set_buffer_size_callback_proc _jack_set_buffer_size_callback = jack_set_buffer_size_callback; + ma_jack_on_shutdown_proc _jack_on_shutdown = jack_on_shutdown; + ma_jack_get_sample_rate_proc _jack_get_sample_rate = jack_get_sample_rate; + ma_jack_get_buffer_size_proc _jack_get_buffer_size = jack_get_buffer_size; + ma_jack_get_ports_proc _jack_get_ports = jack_get_ports; + ma_jack_activate_proc _jack_activate = jack_activate; + ma_jack_deactivate_proc _jack_deactivate = jack_deactivate; + ma_jack_connect_proc _jack_connect = jack_connect; + ma_jack_port_register_proc _jack_port_register = jack_port_register; + ma_jack_port_name_proc _jack_port_name = jack_port_name; + ma_jack_port_get_buffer_proc _jack_port_get_buffer = jack_port_get_buffer; + ma_jack_free_proc _jack_free = jack_free; + + pContext->jack.jack_client_open = (ma_proc)_jack_client_open; + pContext->jack.jack_client_close = (ma_proc)_jack_client_close; + pContext->jack.jack_client_name_size = (ma_proc)_jack_client_name_size; + pContext->jack.jack_set_process_callback = (ma_proc)_jack_set_process_callback; + pContext->jack.jack_set_buffer_size_callback = (ma_proc)_jack_set_buffer_size_callback; + pContext->jack.jack_on_shutdown = (ma_proc)_jack_on_shutdown; + pContext->jack.jack_get_sample_rate = (ma_proc)_jack_get_sample_rate; + pContext->jack.jack_get_buffer_size = (ma_proc)_jack_get_buffer_size; + pContext->jack.jack_get_ports = (ma_proc)_jack_get_ports; + pContext->jack.jack_activate = (ma_proc)_jack_activate; + pContext->jack.jack_deactivate = (ma_proc)_jack_deactivate; + pContext->jack.jack_connect = (ma_proc)_jack_connect; + pContext->jack.jack_port_register = (ma_proc)_jack_port_register; + pContext->jack.jack_port_name = (ma_proc)_jack_port_name; + pContext->jack.jack_port_get_buffer = (ma_proc)_jack_port_get_buffer; + pContext->jack.jack_free = (ma_proc)_jack_free; +#endif + + if (pConfig->jack.pClientName != NULL) { + pContext->jack.pClientName = ma_copy_string(pConfig->jack.pClientName, &pContext->allocationCallbacks); + } + pContext->jack.tryStartServer = pConfig->jack.tryStartServer; + + /* + Getting here means the JACK library is installed, but it doesn't necessarily mean it's usable. We need to quickly test this by connecting + a temporary client. + */ + { + ma_jack_client_t* pDummyClient; + ma_result result = ma_context_open_client__jack(pContext, &pDummyClient); + if (result != MA_SUCCESS) { + ma_free(pContext->jack.pClientName, &pContext->allocationCallbacks); + #ifndef MA_NO_RUNTIME_LINKING + ma_dlclose(ma_context_get_log(pContext), pContext->jack.jackSO); + #endif + return MA_NO_BACKEND; + } + + ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pDummyClient); + } + + + pCallbacks->onContextInit = ma_context_init__jack; + pCallbacks->onContextUninit = ma_context_uninit__jack; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__jack; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__jack; + pCallbacks->onDeviceInit = ma_device_init__jack; + pCallbacks->onDeviceUninit = ma_device_uninit__jack; + pCallbacks->onDeviceStart = ma_device_start__jack; + pCallbacks->onDeviceStop = ma_device_stop__jack; + pCallbacks->onDeviceRead = NULL; /* Not used because JACK is asynchronous. */ + pCallbacks->onDeviceWrite = NULL; /* Not used because JACK is asynchronous. */ + pCallbacks->onDeviceDataLoop = NULL; /* Not used because JACK is asynchronous. */ + + return MA_SUCCESS; +} +#endif /* MA_HAS_JACK */ + + + +/****************************************************************************** + +Core Audio Backend + +References +========== +- Technical Note TN2091: Device input using the HAL Output Audio Unit + https://developer.apple.com/library/archive/technotes/tn2091/_index.html + +******************************************************************************/ +#ifdef MA_HAS_COREAUDIO +#include + +#if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE == 1 + #define MA_APPLE_MOBILE + #if defined(TARGET_OS_TV) && TARGET_OS_TV == 1 + #define MA_APPLE_TV + #endif + #if defined(TARGET_OS_WATCH) && TARGET_OS_WATCH == 1 + #define MA_APPLE_WATCH + #endif + #if __has_feature(objc_arc) + #define MA_BRIDGE_TRANSFER __bridge_transfer + #define MA_BRIDGE_RETAINED __bridge_retained + #else + #define MA_BRIDGE_TRANSFER + #define MA_BRIDGE_RETAINED + #endif +#else + #define MA_APPLE_DESKTOP +#endif + +#if defined(MA_APPLE_DESKTOP) +#include +#else +#include +#endif + +#include + +/* CoreFoundation */ +typedef Boolean (* ma_CFStringGetCString_proc)(CFStringRef theString, char* buffer, CFIndex bufferSize, CFStringEncoding encoding); +typedef void (* ma_CFRelease_proc)(CFTypeRef cf); + +/* CoreAudio */ +#if defined(MA_APPLE_DESKTOP) +typedef OSStatus (* ma_AudioObjectGetPropertyData_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32* ioDataSize, void* outData); +typedef OSStatus (* ma_AudioObjectGetPropertyDataSize_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32* outDataSize); +typedef OSStatus (* ma_AudioObjectSetPropertyData_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, UInt32 inQualifierDataSize, const void* inQualifierData, UInt32 inDataSize, const void* inData); +typedef OSStatus (* ma_AudioObjectAddPropertyListener_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, AudioObjectPropertyListenerProc inListener, void* inClientData); +typedef OSStatus (* ma_AudioObjectRemovePropertyListener_proc)(AudioObjectID inObjectID, const AudioObjectPropertyAddress* inAddress, AudioObjectPropertyListenerProc inListener, void* inClientData); +#endif + +/* AudioToolbox */ +typedef AudioComponent (* ma_AudioComponentFindNext_proc)(AudioComponent inComponent, const AudioComponentDescription* inDesc); +typedef OSStatus (* ma_AudioComponentInstanceDispose_proc)(AudioComponentInstance inInstance); +typedef OSStatus (* ma_AudioComponentInstanceNew_proc)(AudioComponent inComponent, AudioComponentInstance* outInstance); +typedef OSStatus (* ma_AudioOutputUnitStart_proc)(AudioUnit inUnit); +typedef OSStatus (* ma_AudioOutputUnitStop_proc)(AudioUnit inUnit); +typedef OSStatus (* ma_AudioUnitAddPropertyListener_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitPropertyListenerProc inProc, void* inProcUserData); +typedef OSStatus (* ma_AudioUnitGetPropertyInfo_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, UInt32* outDataSize, Boolean* outWriteable); +typedef OSStatus (* ma_AudioUnitGetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, void* outData, UInt32* ioDataSize); +typedef OSStatus (* ma_AudioUnitSetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, const void* inData, UInt32 inDataSize); +typedef OSStatus (* ma_AudioUnitInitialize_proc)(AudioUnit inUnit); +typedef OSStatus (* ma_AudioUnitRender_proc)(AudioUnit inUnit, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inOutputBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData); + + +#define MA_COREAUDIO_OUTPUT_BUS 0 +#define MA_COREAUDIO_INPUT_BUS 1 + +#if defined(MA_APPLE_DESKTOP) +static ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit); +#endif + +/* +Core Audio + +So far, Core Audio has been the worst backend to work with due to being both unintuitive and having almost no documentation +apart from comments in the headers (which admittedly are quite good). For my own purposes, and for anybody out there whose +needing to figure out how this darn thing works, I'm going to outline a few things here. + +Since miniaudio is a fairly low-level API, one of the things it needs is control over specific devices, and it needs to be +able to identify whether or not it can be used as playback and/or capture. The AudioObject API is the only one I've seen +that supports this level of detail. There was some public domain sample code I stumbled across that used the AudioComponent +and AudioUnit APIs, but I couldn't see anything that gave low-level control over device selection and capabilities (the +distinction between playback and capture in particular). Therefore, miniaudio is using the AudioObject API. + +Most (all?) functions in the AudioObject API take a AudioObjectID as its input. This is the device identifier. When +retrieving global information, such as the device list, you use kAudioObjectSystemObject. When retrieving device-specific +data, you pass in the ID for that device. In order to retrieve device-specific IDs you need to enumerate over each of the +devices. This is done using the AudioObjectGetPropertyDataSize() and AudioObjectGetPropertyData() APIs which seem to be +the central APIs for retrieving information about the system and specific devices. + +To use the AudioObjectGetPropertyData() API you need to use the notion of a property address. A property address is a +structure with three variables and is used to identify which property you are getting or setting. The first is the "selector" +which is basically the specific property that you're wanting to retrieve or set. The second is the "scope", which is +typically set to kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyScopeInput for input-specific properties and +kAudioObjectPropertyScopeOutput for output-specific properties. The last is the "element" which is always set to +kAudioObjectPropertyElementMain in miniaudio's case. I don't know of any cases where this would be set to anything different. + +Back to the earlier issue of device retrieval, you first use the AudioObjectGetPropertyDataSize() API to retrieve the size +of the raw data which is just a list of AudioDeviceID's. You use the kAudioObjectSystemObject AudioObjectID, and a property +address with the kAudioHardwarePropertyDevices selector and the kAudioObjectPropertyScopeGlobal scope. Once you have the +size, allocate a block of memory of that size and then call AudioObjectGetPropertyData(). The data is just a list of +AudioDeviceID's so just do "dataSize/sizeof(AudioDeviceID)" to know the device count. +*/ + +#if defined(MA_APPLE_MOBILE) +static void ma_device__on_notification_interruption_began(ma_device* pDevice) +{ + ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_interruption_began)); +} + +static void ma_device__on_notification_interruption_ended(ma_device* pDevice) +{ + ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_interruption_ended)); +} +#endif + +static ma_result ma_result_from_OSStatus(OSStatus status) +{ + switch (status) + { + case noErr: return MA_SUCCESS; + #if defined(MA_APPLE_DESKTOP) + case kAudioHardwareNotRunningError: return MA_DEVICE_NOT_STARTED; + case kAudioHardwareUnspecifiedError: return MA_ERROR; + case kAudioHardwareUnknownPropertyError: return MA_INVALID_ARGS; + case kAudioHardwareBadPropertySizeError: return MA_INVALID_OPERATION; + case kAudioHardwareIllegalOperationError: return MA_INVALID_OPERATION; + case kAudioHardwareBadObjectError: return MA_INVALID_ARGS; + case kAudioHardwareBadDeviceError: return MA_INVALID_ARGS; + case kAudioHardwareBadStreamError: return MA_INVALID_ARGS; + case kAudioHardwareUnsupportedOperationError: return MA_INVALID_OPERATION; + case kAudioDeviceUnsupportedFormatError: return MA_FORMAT_NOT_SUPPORTED; + case kAudioDevicePermissionsError: return MA_ACCESS_DENIED; + #endif + default: return MA_ERROR; + } +} + +#if 0 +static ma_channel ma_channel_from_AudioChannelBitmap(AudioChannelBitmap bit) +{ + switch (bit) + { + case kAudioChannelBit_Left: return MA_CHANNEL_LEFT; + case kAudioChannelBit_Right: return MA_CHANNEL_RIGHT; + case kAudioChannelBit_Center: return MA_CHANNEL_FRONT_CENTER; + case kAudioChannelBit_LFEScreen: return MA_CHANNEL_LFE; + case kAudioChannelBit_LeftSurround: return MA_CHANNEL_BACK_LEFT; + case kAudioChannelBit_RightSurround: return MA_CHANNEL_BACK_RIGHT; + case kAudioChannelBit_LeftCenter: return MA_CHANNEL_FRONT_LEFT_CENTER; + case kAudioChannelBit_RightCenter: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case kAudioChannelBit_CenterSurround: return MA_CHANNEL_BACK_CENTER; + case kAudioChannelBit_LeftSurroundDirect: return MA_CHANNEL_SIDE_LEFT; + case kAudioChannelBit_RightSurroundDirect: return MA_CHANNEL_SIDE_RIGHT; + case kAudioChannelBit_TopCenterSurround: return MA_CHANNEL_TOP_CENTER; + case kAudioChannelBit_VerticalHeightLeft: return MA_CHANNEL_TOP_FRONT_LEFT; + case kAudioChannelBit_VerticalHeightCenter: return MA_CHANNEL_TOP_FRONT_CENTER; + case kAudioChannelBit_VerticalHeightRight: return MA_CHANNEL_TOP_FRONT_RIGHT; + case kAudioChannelBit_TopBackLeft: return MA_CHANNEL_TOP_BACK_LEFT; + case kAudioChannelBit_TopBackCenter: return MA_CHANNEL_TOP_BACK_CENTER; + case kAudioChannelBit_TopBackRight: return MA_CHANNEL_TOP_BACK_RIGHT; + default: return MA_CHANNEL_NONE; + } +} +#endif + +static ma_result ma_format_from_AudioStreamBasicDescription(const AudioStreamBasicDescription* pDescription, ma_format* pFormatOut) +{ + MA_ASSERT(pDescription != NULL); + MA_ASSERT(pFormatOut != NULL); + + *pFormatOut = ma_format_unknown; /* Safety. */ + + /* There's a few things miniaudio doesn't support. */ + if (pDescription->mFormatID != kAudioFormatLinearPCM) { + return MA_FORMAT_NOT_SUPPORTED; + } + + /* We don't support any non-packed formats that are aligned high. */ + if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsAlignedHigh) != 0) { + return MA_FORMAT_NOT_SUPPORTED; + } + + /* Only supporting native-endian. */ + if ((ma_is_little_endian() && (pDescription->mFormatFlags & kAudioFormatFlagIsBigEndian) != 0) || (ma_is_big_endian() && (pDescription->mFormatFlags & kAudioFormatFlagIsBigEndian) == 0)) { + return MA_FORMAT_NOT_SUPPORTED; + } + + /* We are not currently supporting non-interleaved formats (this will be added in a future version of miniaudio). */ + /*if ((pDescription->mFormatFlags & kAudioFormatFlagIsNonInterleaved) != 0) { + return MA_FORMAT_NOT_SUPPORTED; + }*/ + + if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsFloat) != 0) { + if (pDescription->mBitsPerChannel == 32) { + *pFormatOut = ma_format_f32; + return MA_SUCCESS; + } + } else { + if ((pDescription->mFormatFlags & kLinearPCMFormatFlagIsSignedInteger) != 0) { + if (pDescription->mBitsPerChannel == 16) { + *pFormatOut = ma_format_s16; + return MA_SUCCESS; + } else if (pDescription->mBitsPerChannel == 24) { + if (pDescription->mBytesPerFrame == (pDescription->mBitsPerChannel/8 * pDescription->mChannelsPerFrame)) { + *pFormatOut = ma_format_s24; + return MA_SUCCESS; + } else { + if (pDescription->mBytesPerFrame/pDescription->mChannelsPerFrame == sizeof(ma_int32)) { + /* TODO: Implement ma_format_s24_32. */ + /**pFormatOut = ma_format_s24_32;*/ + /*return MA_SUCCESS;*/ + return MA_FORMAT_NOT_SUPPORTED; + } + } + } else if (pDescription->mBitsPerChannel == 32) { + *pFormatOut = ma_format_s32; + return MA_SUCCESS; + } + } else { + if (pDescription->mBitsPerChannel == 8) { + *pFormatOut = ma_format_u8; + return MA_SUCCESS; + } + } + } + + /* Getting here means the format is not supported. */ + return MA_FORMAT_NOT_SUPPORTED; +} + +#if defined(MA_APPLE_DESKTOP) +static ma_channel ma_channel_from_AudioChannelLabel(AudioChannelLabel label) +{ + switch (label) + { + case kAudioChannelLabel_Unknown: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Unused: return MA_CHANNEL_NONE; + case kAudioChannelLabel_UseCoordinates: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Left: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_Right: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_Center: return MA_CHANNEL_FRONT_CENTER; + case kAudioChannelLabel_LFEScreen: return MA_CHANNEL_LFE; + case kAudioChannelLabel_LeftSurround: return MA_CHANNEL_BACK_LEFT; + case kAudioChannelLabel_RightSurround: return MA_CHANNEL_BACK_RIGHT; + case kAudioChannelLabel_LeftCenter: return MA_CHANNEL_FRONT_LEFT_CENTER; + case kAudioChannelLabel_RightCenter: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case kAudioChannelLabel_CenterSurround: return MA_CHANNEL_BACK_CENTER; + case kAudioChannelLabel_LeftSurroundDirect: return MA_CHANNEL_SIDE_LEFT; + case kAudioChannelLabel_RightSurroundDirect: return MA_CHANNEL_SIDE_RIGHT; + case kAudioChannelLabel_TopCenterSurround: return MA_CHANNEL_TOP_CENTER; + case kAudioChannelLabel_VerticalHeightLeft: return MA_CHANNEL_TOP_FRONT_LEFT; + case kAudioChannelLabel_VerticalHeightCenter: return MA_CHANNEL_TOP_FRONT_CENTER; + case kAudioChannelLabel_VerticalHeightRight: return MA_CHANNEL_TOP_FRONT_RIGHT; + case kAudioChannelLabel_TopBackLeft: return MA_CHANNEL_TOP_BACK_LEFT; + case kAudioChannelLabel_TopBackCenter: return MA_CHANNEL_TOP_BACK_CENTER; + case kAudioChannelLabel_TopBackRight: return MA_CHANNEL_TOP_BACK_RIGHT; + case kAudioChannelLabel_RearSurroundLeft: return MA_CHANNEL_BACK_LEFT; + case kAudioChannelLabel_RearSurroundRight: return MA_CHANNEL_BACK_RIGHT; + case kAudioChannelLabel_LeftWide: return MA_CHANNEL_SIDE_LEFT; + case kAudioChannelLabel_RightWide: return MA_CHANNEL_SIDE_RIGHT; + case kAudioChannelLabel_LFE2: return MA_CHANNEL_LFE; + case kAudioChannelLabel_LeftTotal: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_RightTotal: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_HearingImpaired: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Narration: return MA_CHANNEL_MONO; + case kAudioChannelLabel_Mono: return MA_CHANNEL_MONO; + case kAudioChannelLabel_DialogCentricMix: return MA_CHANNEL_MONO; + case kAudioChannelLabel_CenterSurroundDirect: return MA_CHANNEL_BACK_CENTER; + case kAudioChannelLabel_Haptic: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Ambisonic_W: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Ambisonic_X: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Ambisonic_Y: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Ambisonic_Z: return MA_CHANNEL_NONE; + case kAudioChannelLabel_MS_Mid: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_MS_Side: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_XY_X: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_XY_Y: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_HeadphonesLeft: return MA_CHANNEL_LEFT; + case kAudioChannelLabel_HeadphonesRight: return MA_CHANNEL_RIGHT; + case kAudioChannelLabel_ClickTrack: return MA_CHANNEL_NONE; + case kAudioChannelLabel_ForeignLanguage: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Discrete: return MA_CHANNEL_NONE; + case kAudioChannelLabel_Discrete_0: return MA_CHANNEL_AUX_0; + case kAudioChannelLabel_Discrete_1: return MA_CHANNEL_AUX_1; + case kAudioChannelLabel_Discrete_2: return MA_CHANNEL_AUX_2; + case kAudioChannelLabel_Discrete_3: return MA_CHANNEL_AUX_3; + case kAudioChannelLabel_Discrete_4: return MA_CHANNEL_AUX_4; + case kAudioChannelLabel_Discrete_5: return MA_CHANNEL_AUX_5; + case kAudioChannelLabel_Discrete_6: return MA_CHANNEL_AUX_6; + case kAudioChannelLabel_Discrete_7: return MA_CHANNEL_AUX_7; + case kAudioChannelLabel_Discrete_8: return MA_CHANNEL_AUX_8; + case kAudioChannelLabel_Discrete_9: return MA_CHANNEL_AUX_9; + case kAudioChannelLabel_Discrete_10: return MA_CHANNEL_AUX_10; + case kAudioChannelLabel_Discrete_11: return MA_CHANNEL_AUX_11; + case kAudioChannelLabel_Discrete_12: return MA_CHANNEL_AUX_12; + case kAudioChannelLabel_Discrete_13: return MA_CHANNEL_AUX_13; + case kAudioChannelLabel_Discrete_14: return MA_CHANNEL_AUX_14; + case kAudioChannelLabel_Discrete_15: return MA_CHANNEL_AUX_15; + case kAudioChannelLabel_Discrete_65535: return MA_CHANNEL_NONE; + + #if 0 /* Introduced in a later version of macOS. */ + case kAudioChannelLabel_HOA_ACN: return MA_CHANNEL_NONE; + case kAudioChannelLabel_HOA_ACN_0: return MA_CHANNEL_AUX_0; + case kAudioChannelLabel_HOA_ACN_1: return MA_CHANNEL_AUX_1; + case kAudioChannelLabel_HOA_ACN_2: return MA_CHANNEL_AUX_2; + case kAudioChannelLabel_HOA_ACN_3: return MA_CHANNEL_AUX_3; + case kAudioChannelLabel_HOA_ACN_4: return MA_CHANNEL_AUX_4; + case kAudioChannelLabel_HOA_ACN_5: return MA_CHANNEL_AUX_5; + case kAudioChannelLabel_HOA_ACN_6: return MA_CHANNEL_AUX_6; + case kAudioChannelLabel_HOA_ACN_7: return MA_CHANNEL_AUX_7; + case kAudioChannelLabel_HOA_ACN_8: return MA_CHANNEL_AUX_8; + case kAudioChannelLabel_HOA_ACN_9: return MA_CHANNEL_AUX_9; + case kAudioChannelLabel_HOA_ACN_10: return MA_CHANNEL_AUX_10; + case kAudioChannelLabel_HOA_ACN_11: return MA_CHANNEL_AUX_11; + case kAudioChannelLabel_HOA_ACN_12: return MA_CHANNEL_AUX_12; + case kAudioChannelLabel_HOA_ACN_13: return MA_CHANNEL_AUX_13; + case kAudioChannelLabel_HOA_ACN_14: return MA_CHANNEL_AUX_14; + case kAudioChannelLabel_HOA_ACN_15: return MA_CHANNEL_AUX_15; + case kAudioChannelLabel_HOA_ACN_65024: return MA_CHANNEL_NONE; + #endif + + default: return MA_CHANNEL_NONE; + } +} + +static ma_result ma_get_channel_map_from_AudioChannelLayout(AudioChannelLayout* pChannelLayout, ma_channel* pChannelMap, size_t channelMapCap) +{ + MA_ASSERT(pChannelLayout != NULL); + + if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) { + UInt32 iChannel; + for (iChannel = 0; iChannel < pChannelLayout->mNumberChannelDescriptions && iChannel < channelMapCap; ++iChannel) { + pChannelMap[iChannel] = ma_channel_from_AudioChannelLabel(pChannelLayout->mChannelDescriptions[iChannel].mChannelLabel); + } + } else +#if 0 + if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) { + /* This is the same kind of system that's used by Windows audio APIs. */ + UInt32 iChannel = 0; + UInt32 iBit; + AudioChannelBitmap bitmap = pChannelLayout->mChannelBitmap; + for (iBit = 0; iBit < 32 && iChannel < channelMapCap; ++iBit) { + AudioChannelBitmap bit = bitmap & (1 << iBit); + if (bit != 0) { + pChannelMap[iChannel++] = ma_channel_from_AudioChannelBit(bit); + } + } + } else +#endif + { + /* + Need to use the tag to determine the channel map. For now I'm just assuming a default channel map, but later on this should + be updated to determine the mapping based on the tag. + */ + UInt32 channelCount; + + /* Our channel map retrieval APIs below take 32-bit integers, so we'll want to clamp the channel map capacity. */ + if (channelMapCap > 0xFFFFFFFF) { + channelMapCap = 0xFFFFFFFF; + } + + channelCount = ma_min(AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag), (UInt32)channelMapCap); + + switch (pChannelLayout->mChannelLayoutTag) + { + case kAudioChannelLayoutTag_Mono: + case kAudioChannelLayoutTag_Stereo: + case kAudioChannelLayoutTag_StereoHeadphones: + case kAudioChannelLayoutTag_MatrixStereo: + case kAudioChannelLayoutTag_MidSide: + case kAudioChannelLayoutTag_XY: + case kAudioChannelLayoutTag_Binaural: + case kAudioChannelLayoutTag_Ambisonic_B_Format: + { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, channelCount); + } break; + + case kAudioChannelLayoutTag_Octagonal: + { + pChannelMap[7] = MA_CHANNEL_SIDE_RIGHT; + pChannelMap[6] = MA_CHANNEL_SIDE_LEFT; + } MA_FALLTHROUGH; /* Intentional fallthrough. */ + case kAudioChannelLayoutTag_Hexagonal: + { + pChannelMap[5] = MA_CHANNEL_BACK_CENTER; + } MA_FALLTHROUGH; /* Intentional fallthrough. */ + case kAudioChannelLayoutTag_Pentagonal: + { + pChannelMap[4] = MA_CHANNEL_FRONT_CENTER; + } MA_FALLTHROUGH; /* Intentional fallthrough. */ + case kAudioChannelLayoutTag_Quadraphonic: + { + pChannelMap[3] = MA_CHANNEL_BACK_RIGHT; + pChannelMap[2] = MA_CHANNEL_BACK_LEFT; + pChannelMap[1] = MA_CHANNEL_RIGHT; + pChannelMap[0] = MA_CHANNEL_LEFT; + } break; + + /* TODO: Add support for more tags here. */ + + default: + { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, channelCount); + } break; + } + } + + return MA_SUCCESS; +} + +#if (defined(MAC_OS_VERSION_12_0) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_VERSION_12_0) || \ + (defined(__IPHONE_15_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_15_0) +#define AUDIO_OBJECT_PROPERTY_ELEMENT kAudioObjectPropertyElementMain +#else +/* kAudioObjectPropertyElementMaster is deprecated. */ +#define AUDIO_OBJECT_PROPERTY_ELEMENT kAudioObjectPropertyElementMaster +#endif + +/* kAudioDevicePropertyScope* were renamed to kAudioObjectPropertyScope* in 10.8. */ +#if !defined(MAC_OS_X_VERSION_10_8) || (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_8) +#define kAudioObjectPropertyScopeInput kAudioDevicePropertyScopeInput +#define kAudioObjectPropertyScopeOutput kAudioDevicePropertyScopeOutput +#endif + +static ma_result ma_get_device_object_ids__coreaudio(ma_context* pContext, UInt32* pDeviceCount, AudioObjectID** ppDeviceObjectIDs) /* NOTE: Free the returned buffer with ma_free(). */ +{ + AudioObjectPropertyAddress propAddressDevices; + UInt32 deviceObjectsDataSize; + OSStatus status; + AudioObjectID* pDeviceObjectIDs; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceCount != NULL); + MA_ASSERT(ppDeviceObjectIDs != NULL); + + /* Safety. */ + *pDeviceCount = 0; + *ppDeviceObjectIDs = NULL; + + propAddressDevices.mSelector = kAudioHardwarePropertyDevices; + propAddressDevices.mScope = kAudioObjectPropertyScopeGlobal; + propAddressDevices.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(kAudioObjectSystemObject, &propAddressDevices, 0, NULL, &deviceObjectsDataSize); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pDeviceObjectIDs = (AudioObjectID*)ma_malloc(deviceObjectsDataSize, &pContext->allocationCallbacks); + if (pDeviceObjectIDs == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(kAudioObjectSystemObject, &propAddressDevices, 0, NULL, &deviceObjectsDataSize, pDeviceObjectIDs); + if (status != noErr) { + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + *pDeviceCount = deviceObjectsDataSize / sizeof(AudioObjectID); + *ppDeviceObjectIDs = pDeviceObjectIDs; + + return MA_SUCCESS; +} + +static ma_result ma_get_AudioObject_uid_as_CFStringRef(ma_context* pContext, AudioObjectID objectID, CFStringRef* pUID) +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + + MA_ASSERT(pContext != NULL); + + propAddress.mSelector = kAudioDevicePropertyDeviceUID; + propAddress.mScope = kAudioObjectPropertyScopeGlobal; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + dataSize = sizeof(*pUID); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(objectID, &propAddress, 0, NULL, &dataSize, pUID); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + return MA_SUCCESS; +} + +static ma_result ma_get_AudioObject_uid(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut) +{ + CFStringRef uid; + ma_result result; + + MA_ASSERT(pContext != NULL); + + result = ma_get_AudioObject_uid_as_CFStringRef(pContext, objectID, &uid); + if (result != MA_SUCCESS) { + return result; + } + + if (!((ma_CFStringGetCString_proc)pContext->coreaudio.CFStringGetCString)(uid, bufferOut, bufferSize, kCFStringEncodingUTF8)) { + return MA_ERROR; + } + + ((ma_CFRelease_proc)pContext->coreaudio.CFRelease)(uid); + return MA_SUCCESS; +} + +static ma_result ma_get_AudioObject_name(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut) +{ + AudioObjectPropertyAddress propAddress; + CFStringRef deviceName = NULL; + UInt32 dataSize; + OSStatus status; + + MA_ASSERT(pContext != NULL); + + propAddress.mSelector = kAudioDevicePropertyDeviceNameCFString; + propAddress.mScope = kAudioObjectPropertyScopeGlobal; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + dataSize = sizeof(deviceName); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(objectID, &propAddress, 0, NULL, &dataSize, &deviceName); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + if (!((ma_CFStringGetCString_proc)pContext->coreaudio.CFStringGetCString)(deviceName, bufferOut, bufferSize, kCFStringEncodingUTF8)) { + return MA_ERROR; + } + + ((ma_CFRelease_proc)pContext->coreaudio.CFRelease)(deviceName); + return MA_SUCCESS; +} + +static ma_bool32 ma_does_AudioObject_support_scope(ma_context* pContext, AudioObjectID deviceObjectID, AudioObjectPropertyScope scope) +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + AudioBufferList* pBufferList; + ma_bool32 isSupported; + + MA_ASSERT(pContext != NULL); + + /* To know whether or not a device is an input device we need ot look at the stream configuration. If it has an output channel it's a playback device. */ + propAddress.mSelector = kAudioDevicePropertyStreamConfiguration; + propAddress.mScope = scope; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize); + if (status != noErr) { + return MA_FALSE; + } + + pBufferList = (AudioBufferList*)ma_malloc(dataSize, &pContext->allocationCallbacks); + if (pBufferList == NULL) { + return MA_FALSE; /* Out of memory. */ + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pBufferList); + if (status != noErr) { + ma_free(pBufferList, &pContext->allocationCallbacks); + return MA_FALSE; + } + + isSupported = MA_FALSE; + if (pBufferList->mNumberBuffers > 0) { + isSupported = MA_TRUE; + } + + ma_free(pBufferList, &pContext->allocationCallbacks); + return isSupported; +} + +static ma_bool32 ma_does_AudioObject_support_playback(ma_context* pContext, AudioObjectID deviceObjectID) +{ + return ma_does_AudioObject_support_scope(pContext, deviceObjectID, kAudioObjectPropertyScopeOutput); +} + +static ma_bool32 ma_does_AudioObject_support_capture(ma_context* pContext, AudioObjectID deviceObjectID) +{ + return ma_does_AudioObject_support_scope(pContext, deviceObjectID, kAudioObjectPropertyScopeInput); +} + + +static ma_result ma_get_AudioObject_stream_descriptions(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pDescriptionCount, AudioStreamRangedDescription** ppDescriptions) /* NOTE: Free the returned pointer with ma_free(). */ +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + AudioStreamRangedDescription* pDescriptions; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDescriptionCount != NULL); + MA_ASSERT(ppDescriptions != NULL); + + /* + TODO: Experiment with kAudioStreamPropertyAvailablePhysicalFormats instead of (or in addition to) kAudioStreamPropertyAvailableVirtualFormats. My + MacBook Pro uses s24/32 format, however, which miniaudio does not currently support. + */ + propAddress.mSelector = kAudioStreamPropertyAvailableVirtualFormats; /*kAudioStreamPropertyAvailablePhysicalFormats;*/ + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pDescriptions = (AudioStreamRangedDescription*)ma_malloc(dataSize, &pContext->allocationCallbacks); + if (pDescriptions == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pDescriptions); + if (status != noErr) { + ma_free(pDescriptions, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + *pDescriptionCount = dataSize / sizeof(*pDescriptions); + *ppDescriptions = pDescriptions; + return MA_SUCCESS; +} + + +static ma_result ma_get_AudioObject_channel_layout(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, AudioChannelLayout** ppChannelLayout) /* NOTE: Free the returned pointer with ma_free(). */ +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + AudioChannelLayout* pChannelLayout; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppChannelLayout != NULL); + + *ppChannelLayout = NULL; /* Safety. */ + + propAddress.mSelector = kAudioDevicePropertyPreferredChannelLayout; + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pChannelLayout = (AudioChannelLayout*)ma_malloc(dataSize, &pContext->allocationCallbacks); + if (pChannelLayout == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pChannelLayout); + if (status != noErr) { + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + *ppChannelLayout = pChannelLayout; + return MA_SUCCESS; +} + +static ma_result ma_get_AudioObject_channel_count(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pChannelCount) +{ + AudioChannelLayout* pChannelLayout; + ma_result result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pChannelCount != NULL); + + *pChannelCount = 0; /* Safety. */ + + result = ma_get_AudioObject_channel_layout(pContext, deviceObjectID, deviceType, &pChannelLayout); + if (result != MA_SUCCESS) { + return result; + } + + if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) { + *pChannelCount = pChannelLayout->mNumberChannelDescriptions; + } else if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) { + *pChannelCount = ma_count_set_bits(pChannelLayout->mChannelBitmap); + } else { + *pChannelCount = AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag); + } + + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return MA_SUCCESS; +} + +#if 0 +static ma_result ma_get_AudioObject_channel_map(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_channel* pChannelMap, size_t channelMapCap) +{ + AudioChannelLayout* pChannelLayout; + ma_result result; + + MA_ASSERT(pContext != NULL); + + result = ma_get_AudioObject_channel_layout(pContext, deviceObjectID, deviceType, &pChannelLayout); + if (result != MA_SUCCESS) { + return result; /* Rather than always failing here, would it be more robust to simply assume a default? */ + } + + result = ma_get_channel_map_from_AudioChannelLayout(pChannelLayout, pChannelMap, channelMapCap); + if (result != MA_SUCCESS) { + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return result; + } + + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return result; +} +#endif + +static ma_result ma_get_AudioObject_sample_rates(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pSampleRateRangesCount, AudioValueRange** ppSampleRateRanges) /* NOTE: Free the returned pointer with ma_free(). */ +{ + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + AudioValueRange* pSampleRateRanges; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pSampleRateRangesCount != NULL); + MA_ASSERT(ppSampleRateRanges != NULL); + + /* Safety. */ + *pSampleRateRangesCount = 0; + *ppSampleRateRanges = NULL; + + propAddress.mSelector = kAudioDevicePropertyAvailableNominalSampleRates; + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + status = ((ma_AudioObjectGetPropertyDataSize_proc)pContext->coreaudio.AudioObjectGetPropertyDataSize)(deviceObjectID, &propAddress, 0, NULL, &dataSize); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pSampleRateRanges = (AudioValueRange*)ma_malloc(dataSize, &pContext->allocationCallbacks); + if (pSampleRateRanges == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pSampleRateRanges); + if (status != noErr) { + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + *pSampleRateRangesCount = dataSize / sizeof(*pSampleRateRanges); + *ppSampleRateRanges = pSampleRateRanges; + return MA_SUCCESS; +} + +#if 0 +static ma_result ma_get_AudioObject_get_closest_sample_rate(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 sampleRateIn, ma_uint32* pSampleRateOut) +{ + UInt32 sampleRateRangeCount; + AudioValueRange* pSampleRateRanges; + ma_result result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pSampleRateOut != NULL); + + *pSampleRateOut = 0; /* Safety. */ + + result = ma_get_AudioObject_sample_rates(pContext, deviceObjectID, deviceType, &sampleRateRangeCount, &pSampleRateRanges); + if (result != MA_SUCCESS) { + return result; + } + + if (sampleRateRangeCount == 0) { + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_ERROR; /* Should never hit this case should we? */ + } + + if (sampleRateIn == 0) { + /* Search in order of miniaudio's preferred priority. */ + UInt32 iMALSampleRate; + for (iMALSampleRate = 0; iMALSampleRate < ma_countof(g_maStandardSampleRatePriorities); ++iMALSampleRate) { + ma_uint32 malSampleRate = g_maStandardSampleRatePriorities[iMALSampleRate]; + UInt32 iCASampleRate; + for (iCASampleRate = 0; iCASampleRate < sampleRateRangeCount; ++iCASampleRate) { + AudioValueRange caSampleRate = pSampleRateRanges[iCASampleRate]; + if (caSampleRate.mMinimum <= malSampleRate && caSampleRate.mMaximum >= malSampleRate) { + *pSampleRateOut = malSampleRate; + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_SUCCESS; + } + } + } + + /* + If we get here it means none of miniaudio's standard sample rates matched any of the supported sample rates from the device. In this + case we just fall back to the first one reported by Core Audio. + */ + MA_ASSERT(sampleRateRangeCount > 0); + + *pSampleRateOut = pSampleRateRanges[0].mMinimum; + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_SUCCESS; + } else { + /* Find the closest match to this sample rate. */ + UInt32 currentAbsoluteDifference = INT32_MAX; + UInt32 iCurrentClosestRange = (UInt32)-1; + UInt32 iRange; + for (iRange = 0; iRange < sampleRateRangeCount; ++iRange) { + if (pSampleRateRanges[iRange].mMinimum <= sampleRateIn && pSampleRateRanges[iRange].mMaximum >= sampleRateIn) { + *pSampleRateOut = sampleRateIn; + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_SUCCESS; + } else { + UInt32 absoluteDifference; + if (pSampleRateRanges[iRange].mMinimum > sampleRateIn) { + absoluteDifference = pSampleRateRanges[iRange].mMinimum - sampleRateIn; + } else { + absoluteDifference = sampleRateIn - pSampleRateRanges[iRange].mMaximum; + } + + if (currentAbsoluteDifference > absoluteDifference) { + currentAbsoluteDifference = absoluteDifference; + iCurrentClosestRange = iRange; + } + } + } + + MA_ASSERT(iCurrentClosestRange != (UInt32)-1); + + *pSampleRateOut = pSampleRateRanges[iCurrentClosestRange].mMinimum; + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + return MA_SUCCESS; + } + + /* Should never get here, but it would mean we weren't able to find any suitable sample rates. */ + /*ma_free(pSampleRateRanges, &pContext->allocationCallbacks);*/ + /*return MA_ERROR;*/ +} +#endif + +static ma_result ma_get_AudioObject_closest_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 bufferSizeInFramesIn, ma_uint32* pBufferSizeInFramesOut) +{ + AudioObjectPropertyAddress propAddress; + AudioValueRange bufferSizeRange; + UInt32 dataSize; + OSStatus status; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pBufferSizeInFramesOut != NULL); + + *pBufferSizeInFramesOut = 0; /* Safety. */ + + propAddress.mSelector = kAudioDevicePropertyBufferFrameSizeRange; + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + dataSize = sizeof(bufferSizeRange); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, &bufferSizeRange); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + /* This is just a clamp. */ + if (bufferSizeInFramesIn < bufferSizeRange.mMinimum) { + *pBufferSizeInFramesOut = (ma_uint32)bufferSizeRange.mMinimum; + } else if (bufferSizeInFramesIn > bufferSizeRange.mMaximum) { + *pBufferSizeInFramesOut = (ma_uint32)bufferSizeRange.mMaximum; + } else { + *pBufferSizeInFramesOut = bufferSizeInFramesIn; + } + + return MA_SUCCESS; +} + +static ma_result ma_set_AudioObject_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pPeriodSizeInOut) +{ + ma_result result; + ma_uint32 chosenBufferSizeInFrames; + AudioObjectPropertyAddress propAddress; + UInt32 dataSize; + OSStatus status; + + MA_ASSERT(pContext != NULL); + + result = ma_get_AudioObject_closest_buffer_size_in_frames(pContext, deviceObjectID, deviceType, *pPeriodSizeInOut, &chosenBufferSizeInFrames); + if (result != MA_SUCCESS) { + return result; + } + + /* Try setting the size of the buffer... If this fails we just use whatever is currently set. */ + propAddress.mSelector = kAudioDevicePropertyBufferFrameSize; + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + ((ma_AudioObjectSetPropertyData_proc)pContext->coreaudio.AudioObjectSetPropertyData)(deviceObjectID, &propAddress, 0, NULL, sizeof(chosenBufferSizeInFrames), &chosenBufferSizeInFrames); + + /* Get the actual size of the buffer. */ + dataSize = sizeof(*pPeriodSizeInOut); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, &chosenBufferSizeInFrames); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + *pPeriodSizeInOut = chosenBufferSizeInFrames; + return MA_SUCCESS; +} + +static ma_result ma_find_default_AudioObjectID(ma_context* pContext, ma_device_type deviceType, AudioObjectID* pDeviceObjectID) +{ + AudioObjectPropertyAddress propAddressDefaultDevice; + UInt32 defaultDeviceObjectIDSize = sizeof(AudioObjectID); + AudioObjectID defaultDeviceObjectID; + OSStatus status; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceObjectID != NULL); + + /* Safety. */ + *pDeviceObjectID = 0; + + propAddressDefaultDevice.mScope = kAudioObjectPropertyScopeGlobal; + propAddressDefaultDevice.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + if (deviceType == ma_device_type_playback) { + propAddressDefaultDevice.mSelector = kAudioHardwarePropertyDefaultOutputDevice; + } else { + propAddressDefaultDevice.mSelector = kAudioHardwarePropertyDefaultInputDevice; + } + + defaultDeviceObjectIDSize = sizeof(AudioObjectID); + status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(kAudioObjectSystemObject, &propAddressDefaultDevice, 0, NULL, &defaultDeviceObjectIDSize, &defaultDeviceObjectID); + if (status == noErr) { + *pDeviceObjectID = defaultDeviceObjectID; + return MA_SUCCESS; + } + + /* If we get here it means we couldn't find the device. */ + return MA_NO_DEVICE; +} + +static ma_result ma_find_AudioObjectID(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, AudioObjectID* pDeviceObjectID) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceObjectID != NULL); + + /* Safety. */ + *pDeviceObjectID = 0; + + if (pDeviceID == NULL) { + /* Default device. */ + return ma_find_default_AudioObjectID(pContext, deviceType, pDeviceObjectID); + } else { + /* Explicit device. */ + UInt32 deviceCount; + AudioObjectID* pDeviceObjectIDs; + ma_result result; + UInt32 iDevice; + + result = ma_get_device_object_ids__coreaudio(pContext, &deviceCount, &pDeviceObjectIDs); + if (result != MA_SUCCESS) { + return result; + } + + for (iDevice = 0; iDevice < deviceCount; ++iDevice) { + AudioObjectID deviceObjectID = pDeviceObjectIDs[iDevice]; + + char uid[256]; + if (ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(uid), uid) != MA_SUCCESS) { + continue; + } + + if (deviceType == ma_device_type_playback) { + if (ma_does_AudioObject_support_playback(pContext, deviceObjectID)) { + if (strcmp(uid, pDeviceID->coreaudio) == 0) { + *pDeviceObjectID = deviceObjectID; + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); + return MA_SUCCESS; + } + } + } else { + if (ma_does_AudioObject_support_capture(pContext, deviceObjectID)) { + if (strcmp(uid, pDeviceID->coreaudio) == 0) { + *pDeviceObjectID = deviceObjectID; + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); + return MA_SUCCESS; + } + } + } + } + + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); + } + + /* If we get here it means we couldn't find the device. */ + return MA_NO_DEVICE; +} + + +static ma_result ma_find_best_format__coreaudio(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const AudioStreamBasicDescription* pOrigFormat, AudioStreamBasicDescription* pFormat) +{ + UInt32 deviceFormatDescriptionCount; + AudioStreamRangedDescription* pDeviceFormatDescriptions; + ma_result result; + ma_uint32 desiredSampleRate; + ma_uint32 desiredChannelCount; + ma_format desiredFormat; + AudioStreamBasicDescription bestDeviceFormatSoFar; + ma_bool32 hasSupportedFormat; + UInt32 iFormat; + + result = ma_get_AudioObject_stream_descriptions(pContext, deviceObjectID, deviceType, &deviceFormatDescriptionCount, &pDeviceFormatDescriptions); + if (result != MA_SUCCESS) { + return result; + } + + desiredSampleRate = sampleRate; + if (desiredSampleRate == 0) { + desiredSampleRate = (ma_uint32)pOrigFormat->mSampleRate; + } + + desiredChannelCount = channels; + if (desiredChannelCount == 0) { + desiredChannelCount = pOrigFormat->mChannelsPerFrame; + } + + desiredFormat = format; + if (desiredFormat == ma_format_unknown) { + result = ma_format_from_AudioStreamBasicDescription(pOrigFormat, &desiredFormat); + if (result != MA_SUCCESS || desiredFormat == ma_format_unknown) { + desiredFormat = g_maFormatPriorities[0]; + } + } + + /* + If we get here it means we don't have an exact match to what the client is asking for. We'll need to find the closest one. The next + loop will check for formats that have the same sample rate to what we're asking for. If there is, we prefer that one in all cases. + */ + MA_ZERO_OBJECT(&bestDeviceFormatSoFar); + + hasSupportedFormat = MA_FALSE; + for (iFormat = 0; iFormat < deviceFormatDescriptionCount; ++iFormat) { + ma_format formatFromDescription; + ma_result formatResult = ma_format_from_AudioStreamBasicDescription(&pDeviceFormatDescriptions[iFormat].mFormat, &formatFromDescription); + if (formatResult == MA_SUCCESS && formatFromDescription != ma_format_unknown) { + hasSupportedFormat = MA_TRUE; + bestDeviceFormatSoFar = pDeviceFormatDescriptions[iFormat].mFormat; + break; + } + } + + if (!hasSupportedFormat) { + ma_free(pDeviceFormatDescriptions, &pContext->allocationCallbacks); + return MA_FORMAT_NOT_SUPPORTED; + } + + + for (iFormat = 0; iFormat < deviceFormatDescriptionCount; ++iFormat) { + AudioStreamBasicDescription thisDeviceFormat = pDeviceFormatDescriptions[iFormat].mFormat; + ma_format thisSampleFormat; + ma_result formatResult; + ma_format bestSampleFormatSoFar; + + /* If the format is not supported by miniaudio we need to skip this one entirely. */ + formatResult = ma_format_from_AudioStreamBasicDescription(&pDeviceFormatDescriptions[iFormat].mFormat, &thisSampleFormat); + if (formatResult != MA_SUCCESS || thisSampleFormat == ma_format_unknown) { + continue; /* The format is not supported by miniaudio. Skip. */ + } + + ma_format_from_AudioStreamBasicDescription(&bestDeviceFormatSoFar, &bestSampleFormatSoFar); + + /* Getting here means the format is supported by miniaudio which makes this format a candidate. */ + if (thisDeviceFormat.mSampleRate != desiredSampleRate) { + /* + The sample rate does not match, but this format could still be usable, although it's a very low priority. If the best format + so far has an equal sample rate we can just ignore this one. + */ + if (bestDeviceFormatSoFar.mSampleRate == desiredSampleRate) { + continue; /* The best sample rate so far has the same sample rate as what we requested which means it's still the best so far. Skip this format. */ + } else { + /* In this case, neither the best format so far nor this one have the same sample rate. Check the channel count next. */ + if (thisDeviceFormat.mChannelsPerFrame != desiredChannelCount) { + /* This format has a different sample rate _and_ a different channel count. */ + if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) { + continue; /* No change to the best format. */ + } else { + /* + Both this format and the best so far have different sample rates and different channel counts. Whichever has the + best format is the new best. + */ + if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + continue; /* No change to the best format. */ + } + } + } else { + /* This format has a different sample rate but the desired channel count. */ + if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) { + /* Both this format and the best so far have the desired channel count. Whichever has the best format is the new best. */ + if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + continue; /* No change to the best format for now. */ + } + } else { + /* This format has the desired channel count, but the best so far does not. We have a new best. */ + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } + } + } + } else { + /* + The sample rates match which makes this format a very high priority contender. If the best format so far has a different + sample rate it needs to be replaced with this one. + */ + if (bestDeviceFormatSoFar.mSampleRate != desiredSampleRate) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + /* In this case both this format and the best format so far have the same sample rate. Check the channel count next. */ + if (thisDeviceFormat.mChannelsPerFrame == desiredChannelCount) { + /* + In this case this format has the same channel count as what the client is requesting. If the best format so far has + a different count, this one becomes the new best. + */ + if (bestDeviceFormatSoFar.mChannelsPerFrame != desiredChannelCount) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + /* In this case both this format and the best so far have the ideal sample rate and channel count. Check the format. */ + if (thisSampleFormat == desiredFormat) { + bestDeviceFormatSoFar = thisDeviceFormat; + break; /* Found the exact match. */ + } else { + /* The formats are different. The new best format is the one with the highest priority format according to miniaudio. */ + if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + continue; /* No change to the best format for now. */ + } + } + } + } else { + /* + In this case the channel count is different to what the client has requested. If the best so far has the same channel + count as the requested count then it remains the best. + */ + if (bestDeviceFormatSoFar.mChannelsPerFrame == desiredChannelCount) { + continue; + } else { + /* + This is the case where both have the same sample rate (good) but different channel counts. Right now both have about + the same priority, but we need to compare the format now. + */ + if (thisSampleFormat == bestSampleFormatSoFar) { + if (ma_get_format_priority_index(thisSampleFormat) < ma_get_format_priority_index(bestSampleFormatSoFar)) { + bestDeviceFormatSoFar = thisDeviceFormat; + continue; + } else { + continue; /* No change to the best format for now. */ + } + } + } + } + } + } + } + + *pFormat = bestDeviceFormatSoFar; + + ma_free(pDeviceFormatDescriptions, &pContext->allocationCallbacks); + return MA_SUCCESS; +} + +static ma_result ma_get_AudioUnit_channel_map(ma_context* pContext, AudioUnit audioUnit, ma_device_type deviceType, ma_channel* pChannelMap, size_t channelMapCap) +{ + AudioUnitScope deviceScope; + AudioUnitElement deviceBus; + UInt32 channelLayoutSize; + OSStatus status; + AudioChannelLayout* pChannelLayout; + ma_result result; + + MA_ASSERT(pContext != NULL); + + if (deviceType == ma_device_type_playback) { + deviceScope = kAudioUnitScope_Input; + deviceBus = MA_COREAUDIO_OUTPUT_BUS; + } else { + deviceScope = kAudioUnitScope_Output; + deviceBus = MA_COREAUDIO_INPUT_BUS; + } + + status = ((ma_AudioUnitGetPropertyInfo_proc)pContext->coreaudio.AudioUnitGetPropertyInfo)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, &channelLayoutSize, NULL); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + pChannelLayout = (AudioChannelLayout*)ma_malloc(channelLayoutSize, &pContext->allocationCallbacks); + if (pChannelLayout == NULL) { + return MA_OUT_OF_MEMORY; + } + + status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, pChannelLayout, &channelLayoutSize); + if (status != noErr) { + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return ma_result_from_OSStatus(status); + } + + result = ma_get_channel_map_from_AudioChannelLayout(pChannelLayout, pChannelMap, channelMapCap); + if (result != MA_SUCCESS) { + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return result; + } + + ma_free(pChannelLayout, &pContext->allocationCallbacks); + return MA_SUCCESS; +} +#endif /* MA_APPLE_DESKTOP */ + + +#if !defined(MA_APPLE_DESKTOP) +static void ma_AVAudioSessionPortDescription_to_device_info(AVAudioSessionPortDescription* pPortDesc, ma_device_info* pInfo) +{ + MA_ZERO_OBJECT(pInfo); + ma_strncpy_s(pInfo->name, sizeof(pInfo->name), [pPortDesc.portName UTF8String], (size_t)-1); + ma_strncpy_s(pInfo->id.coreaudio, sizeof(pInfo->id.coreaudio), [pPortDesc.UID UTF8String], (size_t)-1); +} +#endif + +static ma_result ma_context_enumerate_devices__coreaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ +#if defined(MA_APPLE_DESKTOP) + UInt32 deviceCount; + AudioObjectID* pDeviceObjectIDs; + AudioObjectID defaultDeviceObjectIDPlayback; + AudioObjectID defaultDeviceObjectIDCapture; + ma_result result; + UInt32 iDevice; + + ma_find_default_AudioObjectID(pContext, ma_device_type_playback, &defaultDeviceObjectIDPlayback); /* OK if this fails. */ + ma_find_default_AudioObjectID(pContext, ma_device_type_capture, &defaultDeviceObjectIDCapture); /* OK if this fails. */ + + result = ma_get_device_object_ids__coreaudio(pContext, &deviceCount, &pDeviceObjectIDs); + if (result != MA_SUCCESS) { + return result; + } + + for (iDevice = 0; iDevice < deviceCount; ++iDevice) { + AudioObjectID deviceObjectID = pDeviceObjectIDs[iDevice]; + ma_device_info info; + + MA_ZERO_OBJECT(&info); + if (ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(info.id.coreaudio), info.id.coreaudio) != MA_SUCCESS) { + continue; + } + if (ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(info.name), info.name) != MA_SUCCESS) { + continue; + } + + if (ma_does_AudioObject_support_playback(pContext, deviceObjectID)) { + if (deviceObjectID == defaultDeviceObjectIDPlayback) { + info.isDefault = MA_TRUE; + } + + if (!callback(pContext, ma_device_type_playback, &info, pUserData)) { + break; + } + } + if (ma_does_AudioObject_support_capture(pContext, deviceObjectID)) { + if (deviceObjectID == defaultDeviceObjectIDCapture) { + info.isDefault = MA_TRUE; + } + + if (!callback(pContext, ma_device_type_capture, &info, pUserData)) { + break; + } + } + } + + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); +#else + ma_device_info info; + NSArray *pInputs = [[[AVAudioSession sharedInstance] currentRoute] inputs]; + NSArray *pOutputs = [[[AVAudioSession sharedInstance] currentRoute] outputs]; + + for (AVAudioSessionPortDescription* pPortDesc in pOutputs) { + ma_AVAudioSessionPortDescription_to_device_info(pPortDesc, &info); + if (!callback(pContext, ma_device_type_playback, &info, pUserData)) { + return MA_SUCCESS; + } + } + + for (AVAudioSessionPortDescription* pPortDesc in pInputs) { + ma_AVAudioSessionPortDescription_to_device_info(pPortDesc, &info); + if (!callback(pContext, ma_device_type_capture, &info, pUserData)) { + return MA_SUCCESS; + } + } +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_result result; + + MA_ASSERT(pContext != NULL); + +#if defined(MA_APPLE_DESKTOP) + /* Desktop */ + { + AudioObjectID deviceObjectID; + AudioObjectID defaultDeviceObjectID; + UInt32 streamDescriptionCount; + AudioStreamRangedDescription* pStreamDescriptions; + UInt32 iStreamDescription; + UInt32 sampleRateRangeCount; + AudioValueRange* pSampleRateRanges; + + ma_find_default_AudioObjectID(pContext, deviceType, &defaultDeviceObjectID); /* OK if this fails. */ + + result = ma_find_AudioObjectID(pContext, deviceType, pDeviceID, &deviceObjectID); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(pDeviceInfo->id.coreaudio), pDeviceInfo->id.coreaudio); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(pDeviceInfo->name), pDeviceInfo->name); + if (result != MA_SUCCESS) { + return result; + } + + if (deviceObjectID == defaultDeviceObjectID) { + pDeviceInfo->isDefault = MA_TRUE; + } + + /* + There could be a large number of permutations here. Fortunately there is only a single channel count + being reported which reduces this quite a bit. For sample rates we're only reporting those that are + one of miniaudio's recognized "standard" rates. If there are still more formats than can fit into + our fixed sized array we'll just need to truncate them. This is unlikely and will probably only happen + if some driver performs software data conversion and therefore reports every possible format and + sample rate. + */ + pDeviceInfo->nativeDataFormatCount = 0; + + /* Formats. */ + { + ma_format uniqueFormats[ma_format_count]; + ma_uint32 uniqueFormatCount = 0; + ma_uint32 channels; + + /* Channels. */ + result = ma_get_AudioObject_channel_count(pContext, deviceObjectID, deviceType, &channels); + if (result != MA_SUCCESS) { + return result; + } + + /* Formats. */ + result = ma_get_AudioObject_stream_descriptions(pContext, deviceObjectID, deviceType, &streamDescriptionCount, &pStreamDescriptions); + if (result != MA_SUCCESS) { + return result; + } + + for (iStreamDescription = 0; iStreamDescription < streamDescriptionCount; ++iStreamDescription) { + ma_format format; + ma_bool32 hasFormatBeenHandled = MA_FALSE; + ma_uint32 iOutputFormat; + ma_uint32 iSampleRate; + + result = ma_format_from_AudioStreamBasicDescription(&pStreamDescriptions[iStreamDescription].mFormat, &format); + if (result != MA_SUCCESS) { + continue; + } + + MA_ASSERT(format != ma_format_unknown); + + /* Make sure the format isn't already in the output list. */ + for (iOutputFormat = 0; iOutputFormat < uniqueFormatCount; ++iOutputFormat) { + if (uniqueFormats[iOutputFormat] == format) { + hasFormatBeenHandled = MA_TRUE; + break; + } + } + + /* If we've already handled this format just skip it. */ + if (hasFormatBeenHandled) { + continue; + } + + uniqueFormats[uniqueFormatCount] = format; + uniqueFormatCount += 1; + + /* Sample Rates */ + result = ma_get_AudioObject_sample_rates(pContext, deviceObjectID, deviceType, &sampleRateRangeCount, &pSampleRateRanges); + if (result != MA_SUCCESS) { + return result; + } + + /* + Annoyingly Core Audio reports a sample rate range. We just get all the standard rates that are + between this range. + */ + for (iSampleRate = 0; iSampleRate < sampleRateRangeCount; ++iSampleRate) { + ma_uint32 iStandardSampleRate; + for (iStandardSampleRate = 0; iStandardSampleRate < ma_countof(g_maStandardSampleRatePriorities); iStandardSampleRate += 1) { + ma_uint32 standardSampleRate = g_maStandardSampleRatePriorities[iStandardSampleRate]; + if (standardSampleRate >= pSampleRateRanges[iSampleRate].mMinimum && standardSampleRate <= pSampleRateRanges[iSampleRate].mMaximum) { + /* We have a new data format. Add it to the list. */ + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = format; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = standardSampleRate; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = 0; + pDeviceInfo->nativeDataFormatCount += 1; + + if (pDeviceInfo->nativeDataFormatCount >= ma_countof(pDeviceInfo->nativeDataFormats)) { + break; /* No more room for any more formats. */ + } + } + } + } + + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); + + if (pDeviceInfo->nativeDataFormatCount >= ma_countof(pDeviceInfo->nativeDataFormats)) { + break; /* No more room for any more formats. */ + } + } + + ma_free(pStreamDescriptions, &pContext->allocationCallbacks); + } + } +#else + /* Mobile */ + { + AudioComponentDescription desc; + AudioComponent component; + AudioUnit audioUnit; + OSStatus status; + AudioUnitScope formatScope; + AudioUnitElement formatElement; + AudioStreamBasicDescription bestFormat; + UInt32 propSize; + + /* We want to ensure we use a consistent device name to device enumeration. */ + if (pDeviceID != NULL && pDeviceID->coreaudio[0] != '\0') { + ma_bool32 found = MA_FALSE; + if (deviceType == ma_device_type_playback) { + NSArray *pOutputs = [[[AVAudioSession sharedInstance] currentRoute] outputs]; + for (AVAudioSessionPortDescription* pPortDesc in pOutputs) { + if (strcmp(pDeviceID->coreaudio, [pPortDesc.UID UTF8String]) == 0) { + ma_AVAudioSessionPortDescription_to_device_info(pPortDesc, pDeviceInfo); + found = MA_TRUE; + break; + } + } + } else { + NSArray *pInputs = [[[AVAudioSession sharedInstance] currentRoute] inputs]; + for (AVAudioSessionPortDescription* pPortDesc in pInputs) { + if (strcmp(pDeviceID->coreaudio, [pPortDesc.UID UTF8String]) == 0) { + ma_AVAudioSessionPortDescription_to_device_info(pPortDesc, pDeviceInfo); + found = MA_TRUE; + break; + } + } + } + + if (!found) { + return MA_DOES_NOT_EXIST; + } + } else { + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + } + + + /* + Retrieving device information is more annoying on mobile than desktop. For simplicity I'm locking this down to whatever format is + reported on a temporary I/O unit. The problem, however, is that this doesn't return a value for the sample rate which we need to + retrieve from the AVAudioSession shared instance. + */ + desc.componentType = kAudioUnitType_Output; + desc.componentSubType = kAudioUnitSubType_RemoteIO; + desc.componentManufacturer = kAudioUnitManufacturer_Apple; + desc.componentFlags = 0; + desc.componentFlagsMask = 0; + + component = ((ma_AudioComponentFindNext_proc)pContext->coreaudio.AudioComponentFindNext)(NULL, &desc); + if (component == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + status = ((ma_AudioComponentInstanceNew_proc)pContext->coreaudio.AudioComponentInstanceNew)(component, &audioUnit); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + formatScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output; + formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS; + + propSize = sizeof(bestFormat); + status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, &propSize); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit); + return ma_result_from_OSStatus(status); + } + + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit); + audioUnit = NULL; + + /* Only a single format is being reported for iOS. */ + pDeviceInfo->nativeDataFormatCount = 1; + + result = ma_format_from_AudioStreamBasicDescription(&bestFormat, &pDeviceInfo->nativeDataFormats[0].format); + if (result != MA_SUCCESS) { + return result; + } + + pDeviceInfo->nativeDataFormats[0].channels = bestFormat.mChannelsPerFrame; + + /* + It looks like Apple are wanting to push the whole AVAudioSession thing. Thus, we need to use that to determine device settings. To do + this we just get the shared instance and inspect. + */ + @autoreleasepool { + AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; + MA_ASSERT(pAudioSession != NULL); + + pDeviceInfo->nativeDataFormats[0].sampleRate = (ma_uint32)pAudioSession.sampleRate; + } + } +#endif + + (void)pDeviceInfo; /* Unused. */ + return MA_SUCCESS; +} + +static AudioBufferList* ma_allocate_AudioBufferList__coreaudio(ma_uint32 sizeInFrames, ma_format format, ma_uint32 channels, ma_stream_layout layout, const ma_allocation_callbacks* pAllocationCallbacks) +{ + AudioBufferList* pBufferList; + UInt32 audioBufferSizeInBytes; + size_t allocationSize; + + MA_ASSERT(sizeInFrames > 0); + MA_ASSERT(format != ma_format_unknown); + MA_ASSERT(channels > 0); + + allocationSize = sizeof(AudioBufferList) - sizeof(AudioBuffer); /* Subtract sizeof(AudioBuffer) because that part is dynamically sized. */ + if (layout == ma_stream_layout_interleaved) { + /* Interleaved case. This is the simple case because we just have one buffer. */ + allocationSize += sizeof(AudioBuffer) * 1; + } else { + /* Non-interleaved case. This is the more complex case because there's more than one buffer. */ + allocationSize += sizeof(AudioBuffer) * channels; + } + + allocationSize += sizeInFrames * ma_get_bytes_per_frame(format, channels); + + pBufferList = (AudioBufferList*)ma_malloc(allocationSize, pAllocationCallbacks); + if (pBufferList == NULL) { + return NULL; + } + + audioBufferSizeInBytes = (UInt32)(sizeInFrames * ma_get_bytes_per_sample(format)); + + if (layout == ma_stream_layout_interleaved) { + pBufferList->mNumberBuffers = 1; + pBufferList->mBuffers[0].mNumberChannels = channels; + pBufferList->mBuffers[0].mDataByteSize = audioBufferSizeInBytes * channels; + pBufferList->mBuffers[0].mData = (ma_uint8*)pBufferList + sizeof(AudioBufferList); + } else { + ma_uint32 iBuffer; + pBufferList->mNumberBuffers = channels; + for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; ++iBuffer) { + pBufferList->mBuffers[iBuffer].mNumberChannels = 1; + pBufferList->mBuffers[iBuffer].mDataByteSize = audioBufferSizeInBytes; + pBufferList->mBuffers[iBuffer].mData = (ma_uint8*)pBufferList + ((sizeof(AudioBufferList) - sizeof(AudioBuffer)) + (sizeof(AudioBuffer) * channels)) + (audioBufferSizeInBytes * iBuffer); + } + } + + return pBufferList; +} + +static ma_result ma_device_realloc_AudioBufferList__coreaudio(ma_device* pDevice, ma_uint32 sizeInFrames, ma_format format, ma_uint32 channels, ma_stream_layout layout) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(format != ma_format_unknown); + MA_ASSERT(channels > 0); + + /* Only resize the buffer if necessary. */ + if (pDevice->coreaudio.audioBufferCapInFrames < sizeInFrames) { + AudioBufferList* pNewAudioBufferList; + + pNewAudioBufferList = ma_allocate_AudioBufferList__coreaudio(sizeInFrames, format, channels, layout, &pDevice->pContext->allocationCallbacks); + if (pNewAudioBufferList == NULL) { + return MA_OUT_OF_MEMORY; + } + + /* At this point we'll have a new AudioBufferList and we can free the old one. */ + ma_free(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks); + pDevice->coreaudio.pAudioBufferList = pNewAudioBufferList; + pDevice->coreaudio.audioBufferCapInFrames = sizeInFrames; + } + + /* Getting here means the capacity of the audio is fine. */ + return MA_SUCCESS; +} + + +static OSStatus ma_on_output__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pBufferList) +{ + ma_device* pDevice = (ma_device*)pUserData; + ma_stream_layout layout; + + MA_ASSERT(pDevice != NULL); + + /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "INFO: Output Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", (int)busNumber, (int)frameCount, (int)pBufferList->mNumberBuffers);*/ + + /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */ + layout = ma_stream_layout_interleaved; + if (pBufferList->mBuffers[0].mNumberChannels != pDevice->playback.internalChannels) { + layout = ma_stream_layout_deinterleaved; + } + + if (layout == ma_stream_layout_interleaved) { + /* For now we can assume everything is interleaved. */ + UInt32 iBuffer; + for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; ++iBuffer) { + if (pBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->playback.internalChannels) { + ma_uint32 frameCountForThisBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + if (frameCountForThisBuffer > 0) { + ma_device_handle_backend_data_callback(pDevice, pBufferList->mBuffers[iBuffer].mData, NULL, frameCountForThisBuffer); + } + + /*a_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, " frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", (int)frameCount, (int)pBufferList->mBuffers[iBuffer].mNumberChannels, (int)pBufferList->mBuffers[iBuffer].mDataByteSize);*/ + } else { + /* + This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's + not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams. We just + output silence here. + */ + MA_ZERO_MEMORY(pBufferList->mBuffers[iBuffer].mData, pBufferList->mBuffers[iBuffer].mDataByteSize); + /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, " WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", (int)frameCount, (int)pBufferList->mBuffers[iBuffer].mNumberChannels, (int)pBufferList->mBuffers[iBuffer].mDataByteSize);*/ + } + } + } else { + /* This is the deinterleaved case. We need to update each buffer in groups of internalChannels. This assumes each buffer is the same size. */ + MA_ASSERT(pDevice->playback.internalChannels <= MA_MAX_CHANNELS); /* This should have been validated at initialization time. */ + + /* + For safety we'll check that the internal channels is a multiple of the buffer count. If it's not it means something + very strange has happened and we're not going to support it. + */ + if ((pBufferList->mNumberBuffers % pDevice->playback.internalChannels) == 0) { + ma_uint8 tempBuffer[4096]; + UInt32 iBuffer; + + for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; iBuffer += pDevice->playback.internalChannels) { + ma_uint32 frameCountPerBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_sample(pDevice->playback.internalFormat); + ma_uint32 framesRemaining = frameCountPerBuffer; + + while (framesRemaining > 0) { + void* ppDeinterleavedBuffers[MA_MAX_CHANNELS]; + ma_uint32 iChannel; + ma_uint32 framesToRead = sizeof(tempBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + if (framesToRead > framesRemaining) { + framesToRead = framesRemaining; + } + + ma_device_handle_backend_data_callback(pDevice, tempBuffer, NULL, framesToRead); + + for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) { + ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pBufferList->mBuffers[iBuffer+iChannel].mData, (frameCountPerBuffer - framesRemaining) * ma_get_bytes_per_sample(pDevice->playback.internalFormat)); + } + + ma_deinterleave_pcm_frames(pDevice->playback.internalFormat, pDevice->playback.internalChannels, framesToRead, tempBuffer, ppDeinterleavedBuffers); + + framesRemaining -= framesToRead; + } + } + } + } + + (void)pActionFlags; + (void)pTimeStamp; + (void)busNumber; + (void)frameCount; + + return noErr; +} + +static OSStatus ma_on_input__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pUnusedBufferList) +{ + ma_device* pDevice = (ma_device*)pUserData; + AudioBufferList* pRenderedBufferList; + ma_result result; + ma_stream_layout layout; + ma_uint32 iBuffer; + OSStatus status; + + MA_ASSERT(pDevice != NULL); + + pRenderedBufferList = (AudioBufferList*)pDevice->coreaudio.pAudioBufferList; + MA_ASSERT(pRenderedBufferList); + + /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */ + layout = ma_stream_layout_interleaved; + if (pRenderedBufferList->mBuffers[0].mNumberChannels != pDevice->capture.internalChannels) { + layout = ma_stream_layout_deinterleaved; + } + + /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "INFO: Input Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", (int)busNumber, (int)frameCount, (int)pRenderedBufferList->mNumberBuffers);*/ + + /* + There has been a situation reported where frame count passed into this function is greater than the capacity of + our capture buffer. There doesn't seem to be a reliable way to determine what the maximum frame count will be, + so we need to instead resort to dynamically reallocating our buffer to ensure it's large enough to capture the + number of frames requested by this callback. + */ + result = ma_device_realloc_AudioBufferList__coreaudio(pDevice, frameCount, pDevice->capture.internalFormat, pDevice->capture.internalChannels, layout); + if (result != MA_SUCCESS) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "Failed to allocate AudioBufferList for capture.\n"); + return noErr; + } + + pRenderedBufferList = (AudioBufferList*)pDevice->coreaudio.pAudioBufferList; + MA_ASSERT(pRenderedBufferList); + + /* + When you call AudioUnitRender(), Core Audio tries to be helpful by setting the mDataByteSize to the number of bytes + that were actually rendered. The problem with this is that the next call can fail with -50 due to the size no longer + being set to the capacity of the buffer, but instead the size in bytes of the previous render. This will cause a + problem when a future call to this callback specifies a larger number of frames. + + To work around this we need to explicitly set the size of each buffer to their respective size in bytes. + */ + for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; ++iBuffer) { + pRenderedBufferList->mBuffers[iBuffer].mDataByteSize = pDevice->coreaudio.audioBufferCapInFrames * ma_get_bytes_per_sample(pDevice->capture.internalFormat) * pRenderedBufferList->mBuffers[iBuffer].mNumberChannels; + /*printf("DEBUG: nDataByteSize = %d\n", (int)pRenderedBufferList->mBuffers[iBuffer].mDataByteSize);*/ + } + + status = ((ma_AudioUnitRender_proc)pDevice->pContext->coreaudio.AudioUnitRender)((AudioUnit)pDevice->coreaudio.audioUnitCapture, pActionFlags, pTimeStamp, busNumber, frameCount, pRenderedBufferList); + if (status != noErr) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "ERROR: AudioUnitRender() failed with %d.\n", (int)status); + return status; + } + + if (layout == ma_stream_layout_interleaved) { + for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; ++iBuffer) { + if (pRenderedBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->capture.internalChannels) { + ma_device_handle_backend_data_callback(pDevice, NULL, pRenderedBufferList->mBuffers[iBuffer].mData, frameCount); + /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, " mDataByteSize=%d.\n", (int)pRenderedBufferList->mBuffers[iBuffer].mDataByteSize);*/ + } else { + /* + This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's + not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams. + */ + ma_uint8 silentBuffer[4096]; + ma_uint32 framesRemaining; + + MA_ZERO_MEMORY(silentBuffer, sizeof(silentBuffer)); + + framesRemaining = frameCount; + while (framesRemaining > 0) { + ma_uint32 framesToSend = sizeof(silentBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + if (framesToSend > framesRemaining) { + framesToSend = framesRemaining; + } + + ma_device_handle_backend_data_callback(pDevice, NULL, silentBuffer, framesToSend); + + framesRemaining -= framesToSend; + } + + /*ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, " WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", (int)frameCount, (int)pRenderedBufferList->mBuffers[iBuffer].mNumberChannels, (int)pRenderedBufferList->mBuffers[iBuffer].mDataByteSize);*/ + } + } + } else { + /* This is the deinterleaved case. We need to interleave the audio data before sending it to the client. This assumes each buffer is the same size. */ + MA_ASSERT(pDevice->capture.internalChannels <= MA_MAX_CHANNELS); /* This should have been validated at initialization time. */ + + /* + For safety we'll check that the internal channels is a multiple of the buffer count. If it's not it means something + very strange has happened and we're not going to support it. + */ + if ((pRenderedBufferList->mNumberBuffers % pDevice->capture.internalChannels) == 0) { + ma_uint8 tempBuffer[4096]; + for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; iBuffer += pDevice->capture.internalChannels) { + ma_uint32 framesRemaining = frameCount; + while (framesRemaining > 0) { + void* ppDeinterleavedBuffers[MA_MAX_CHANNELS]; + ma_uint32 iChannel; + ma_uint32 framesToSend = sizeof(tempBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + if (framesToSend > framesRemaining) { + framesToSend = framesRemaining; + } + + for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) { + ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pRenderedBufferList->mBuffers[iBuffer+iChannel].mData, (frameCount - framesRemaining) * ma_get_bytes_per_sample(pDevice->capture.internalFormat)); + } + + ma_interleave_pcm_frames(pDevice->capture.internalFormat, pDevice->capture.internalChannels, framesToSend, (const void**)ppDeinterleavedBuffers, tempBuffer); + ma_device_handle_backend_data_callback(pDevice, NULL, tempBuffer, framesToSend); + + framesRemaining -= framesToSend; + } + } + } + } + + (void)pActionFlags; + (void)pTimeStamp; + (void)busNumber; + (void)frameCount; + (void)pUnusedBufferList; + + return noErr; +} + +static void on_start_stop__coreaudio(void* pUserData, AudioUnit audioUnit, AudioUnitPropertyID propertyID, AudioUnitScope scope, AudioUnitElement element) +{ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + /* Don't do anything if it looks like we're just reinitializing due to a device switch. */ + if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isSwitchingPlaybackDevice) || + ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isSwitchingCaptureDevice)) { + return; + } + + /* + There's been a report of a deadlock here when triggered by ma_device_uninit(). It looks like + AudioUnitGetProprty (called below) and AudioComponentInstanceDispose (called in ma_device_uninit) + can try waiting on the same lock. I'm going to try working around this by not calling any Core + Audio APIs in the callback when the device has been stopped or uninitialized. + */ + if (ma_device_get_state(pDevice) == ma_device_state_uninitialized || ma_device_get_state(pDevice) == ma_device_state_stopping || ma_device_get_state(pDevice) == ma_device_state_stopped) { + ma_device__on_notification_stopped(pDevice); + } else { + UInt32 isRunning; + UInt32 isRunningSize = sizeof(isRunning); + OSStatus status = ((ma_AudioUnitGetProperty_proc)pDevice->pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioOutputUnitProperty_IsRunning, scope, element, &isRunning, &isRunningSize); + if (status != noErr) { + goto done; /* Don't really know what to do in this case... just ignore it, I suppose... */ + } + + if (!isRunning) { + /* + The stop event is a bit annoying in Core Audio because it will be called when we automatically switch the default device. Some scenarios to consider: + + 1) When the device is unplugged, this will be called _before_ the default device change notification. + 2) When the device is changed via the default device change notification, this will be called _after_ the switch. + + For case #1, we just check if there's a new default device available. If so, we just ignore the stop event. For case #2 we check a flag. + */ + if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isDefaultPlaybackDevice) || + ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isDefaultCaptureDevice)) { + /* + It looks like the device is switching through an external event, such as the user unplugging the device or changing the default device + via the operating system's sound settings. If we're re-initializing the device, we just terminate because we want the stopping of the + device to be seamless to the client (we don't want them receiving the stopped event and thinking that the device has stopped when it + hasn't!). + */ + if (((audioUnit == pDevice->coreaudio.audioUnitPlayback) && pDevice->coreaudio.isSwitchingPlaybackDevice) || + ((audioUnit == pDevice->coreaudio.audioUnitCapture) && pDevice->coreaudio.isSwitchingCaptureDevice)) { + goto done; + } + + /* + Getting here means the device is not reinitializing which means it may have been unplugged. From what I can see, it looks like Core Audio + will try switching to the new default device seamlessly. We need to somehow find a way to determine whether or not Core Audio will most + likely be successful in switching to the new device. + + TODO: Try to predict if Core Audio will switch devices. If not, the stopped callback needs to be posted. + */ + goto done; + } + + /* Getting here means we need to stop the device. */ + ma_device__on_notification_stopped(pDevice); + } + } + + (void)propertyID; /* Unused. */ + +done: + /* Always signal the stop event. It's possible for the "else" case to get hit which can happen during an interruption. */ + ma_event_signal(&pDevice->coreaudio.stopEvent); +} + +#if defined(MA_APPLE_DESKTOP) +static ma_spinlock g_DeviceTrackingInitLock_CoreAudio = 0; /* A spinlock for mutal exclusion of the init/uninit of the global tracking data. Initialization to 0 is what we need. */ +static ma_uint32 g_DeviceTrackingInitCounter_CoreAudio = 0; +static ma_mutex g_DeviceTrackingMutex_CoreAudio; +static ma_device** g_ppTrackedDevices_CoreAudio = NULL; +static ma_uint32 g_TrackedDeviceCap_CoreAudio = 0; +static ma_uint32 g_TrackedDeviceCount_CoreAudio = 0; + +static OSStatus ma_default_device_changed__coreaudio(AudioObjectID objectID, UInt32 addressCount, const AudioObjectPropertyAddress* pAddresses, void* pUserData) +{ + ma_device_type deviceType; + + /* Not sure if I really need to check this, but it makes me feel better. */ + if (addressCount == 0) { + return noErr; + } + + if (pAddresses[0].mSelector == kAudioHardwarePropertyDefaultOutputDevice) { + deviceType = ma_device_type_playback; + } else if (pAddresses[0].mSelector == kAudioHardwarePropertyDefaultInputDevice) { + deviceType = ma_device_type_capture; + } else { + return noErr; /* Should never hit this. */ + } + + ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio); + { + ma_uint32 iDevice; + for (iDevice = 0; iDevice < g_TrackedDeviceCount_CoreAudio; iDevice += 1) { + ma_result reinitResult; + ma_device* pDevice; + + pDevice = g_ppTrackedDevices_CoreAudio[iDevice]; + if (pDevice->type == deviceType || pDevice->type == ma_device_type_duplex) { + if (deviceType == ma_device_type_playback) { + pDevice->coreaudio.isSwitchingPlaybackDevice = MA_TRUE; + reinitResult = ma_device_reinit_internal__coreaudio(pDevice, deviceType, MA_TRUE); + pDevice->coreaudio.isSwitchingPlaybackDevice = MA_FALSE; + } else { + pDevice->coreaudio.isSwitchingCaptureDevice = MA_TRUE; + reinitResult = ma_device_reinit_internal__coreaudio(pDevice, deviceType, MA_TRUE); + pDevice->coreaudio.isSwitchingCaptureDevice = MA_FALSE; + } + + if (reinitResult == MA_SUCCESS) { + ma_device__post_init_setup(pDevice, deviceType); + + /* Restart the device if required. If this fails we need to stop the device entirely. */ + if (ma_device_get_state(pDevice) == ma_device_state_started) { + OSStatus status; + if (deviceType == ma_device_type_playback) { + status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + if (status != noErr) { + if (pDevice->type == ma_device_type_duplex) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + } + ma_device__set_state(pDevice, ma_device_state_stopped); + } + } else if (deviceType == ma_device_type_capture) { + status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + if (status != noErr) { + if (pDevice->type == ma_device_type_duplex) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + } + ma_device__set_state(pDevice, ma_device_state_stopped); + } + } + } + + ma_device__on_notification_rerouted(pDevice); + } + } + } + } + ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio); + + /* Unused parameters. */ + (void)objectID; + (void)pUserData; + + return noErr; +} + +static ma_result ma_context__init_device_tracking__coreaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + + ma_spinlock_lock(&g_DeviceTrackingInitLock_CoreAudio); + { + /* Don't do anything if we've already initialized device tracking. */ + if (g_DeviceTrackingInitCounter_CoreAudio == 0) { + AudioObjectPropertyAddress propAddress; + propAddress.mScope = kAudioObjectPropertyScopeGlobal; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + ma_mutex_init(&g_DeviceTrackingMutex_CoreAudio); + + propAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice; + ((ma_AudioObjectAddPropertyListener_proc)pContext->coreaudio.AudioObjectAddPropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL); + + propAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice; + ((ma_AudioObjectAddPropertyListener_proc)pContext->coreaudio.AudioObjectAddPropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL); + + } + g_DeviceTrackingInitCounter_CoreAudio += 1; + } + ma_spinlock_unlock(&g_DeviceTrackingInitLock_CoreAudio); + + return MA_SUCCESS; +} + +static ma_result ma_context__uninit_device_tracking__coreaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + + ma_spinlock_lock(&g_DeviceTrackingInitLock_CoreAudio); + { + if (g_DeviceTrackingInitCounter_CoreAudio > 0) + g_DeviceTrackingInitCounter_CoreAudio -= 1; + + if (g_DeviceTrackingInitCounter_CoreAudio == 0) { + AudioObjectPropertyAddress propAddress; + propAddress.mScope = kAudioObjectPropertyScopeGlobal; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + propAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice; + ((ma_AudioObjectRemovePropertyListener_proc)pContext->coreaudio.AudioObjectRemovePropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL); + + propAddress.mSelector = kAudioHardwarePropertyDefaultOutputDevice; + ((ma_AudioObjectRemovePropertyListener_proc)pContext->coreaudio.AudioObjectRemovePropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL); + + /* At this point there should be no tracked devices. If not there's an error somewhere. */ + if (g_ppTrackedDevices_CoreAudio != NULL) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "You have uninitialized all contexts while an associated device is still active."); + ma_spinlock_unlock(&g_DeviceTrackingInitLock_CoreAudio); + return MA_INVALID_OPERATION; + } + + ma_mutex_uninit(&g_DeviceTrackingMutex_CoreAudio); + } + } + ma_spinlock_unlock(&g_DeviceTrackingInitLock_CoreAudio); + + return MA_SUCCESS; +} + +static ma_result ma_device__track__coreaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio); + { + /* Allocate memory if required. */ + if (g_TrackedDeviceCap_CoreAudio <= g_TrackedDeviceCount_CoreAudio) { + ma_uint32 newCap; + ma_device** ppNewDevices; + + newCap = g_TrackedDeviceCap_CoreAudio * 2; + if (newCap == 0) { + newCap = 1; + } + + ppNewDevices = (ma_device**)ma_realloc(g_ppTrackedDevices_CoreAudio, sizeof(*g_ppTrackedDevices_CoreAudio)*newCap, &pDevice->pContext->allocationCallbacks); + if (ppNewDevices == NULL) { + ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio); + return MA_OUT_OF_MEMORY; + } + + g_ppTrackedDevices_CoreAudio = ppNewDevices; + g_TrackedDeviceCap_CoreAudio = newCap; + } + + g_ppTrackedDevices_CoreAudio[g_TrackedDeviceCount_CoreAudio] = pDevice; + g_TrackedDeviceCount_CoreAudio += 1; + } + ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio); + + return MA_SUCCESS; +} + +static ma_result ma_device__untrack__coreaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio); + { + ma_uint32 iDevice; + for (iDevice = 0; iDevice < g_TrackedDeviceCount_CoreAudio; iDevice += 1) { + if (g_ppTrackedDevices_CoreAudio[iDevice] == pDevice) { + /* We've found the device. We now need to remove it from the list. */ + ma_uint32 jDevice; + for (jDevice = iDevice; jDevice < g_TrackedDeviceCount_CoreAudio-1; jDevice += 1) { + g_ppTrackedDevices_CoreAudio[jDevice] = g_ppTrackedDevices_CoreAudio[jDevice+1]; + } + + g_TrackedDeviceCount_CoreAudio -= 1; + + /* If there's nothing else in the list we need to free memory. */ + if (g_TrackedDeviceCount_CoreAudio == 0) { + ma_free(g_ppTrackedDevices_CoreAudio, &pDevice->pContext->allocationCallbacks); + g_ppTrackedDevices_CoreAudio = NULL; + g_TrackedDeviceCap_CoreAudio = 0; + } + + break; + } + } + } + ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio); + + return MA_SUCCESS; +} +#endif + +#if defined(MA_APPLE_MOBILE) +@interface ma_ios_notification_handler:NSObject { + ma_device* m_pDevice; +} +@end + +@implementation ma_ios_notification_handler +-(id)init:(ma_device*)pDevice +{ + self = [super init]; + m_pDevice = pDevice; + + /* For route changes. */ + [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(handle_route_change:) name:AVAudioSessionRouteChangeNotification object:[AVAudioSession sharedInstance]]; + + /* For interruptions. */ + [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(handle_interruption:) name:AVAudioSessionInterruptionNotification object:[AVAudioSession sharedInstance]]; + + return self; +} + +-(void)dealloc +{ + [self remove_handler]; + + #if defined(__has_feature) + #if !__has_feature(objc_arc) + [super dealloc]; + #endif + #endif +} + +-(void)remove_handler +{ + [[NSNotificationCenter defaultCenter] removeObserver:self name:AVAudioSessionRouteChangeNotification object:nil]; + [[NSNotificationCenter defaultCenter] removeObserver:self name:AVAudioSessionInterruptionNotification object:nil]; +} + +-(void)handle_interruption:(NSNotification*)pNotification +{ + NSInteger type = [[[pNotification userInfo] objectForKey:AVAudioSessionInterruptionTypeKey] integerValue]; + switch (type) + { + case AVAudioSessionInterruptionTypeBegan: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Interruption: AVAudioSessionInterruptionTypeBegan\n"); + + /* + Core Audio will have stopped the internal device automatically, but we need explicitly + stop it at a higher level to ensure miniaudio-specific state is updated for consistency. + */ + ma_device_stop(m_pDevice); + + /* + Fire the notification after the device has been stopped to ensure it's in the correct + state when the notification handler is invoked. + */ + ma_device__on_notification_interruption_began(m_pDevice); + } break; + + case AVAudioSessionInterruptionTypeEnded: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Interruption: AVAudioSessionInterruptionTypeEnded\n"); + ma_device__on_notification_interruption_ended(m_pDevice); + } break; + } +} + +-(void)handle_route_change:(NSNotification*)pNotification +{ + AVAudioSession* pSession = [AVAudioSession sharedInstance]; + + NSInteger reason = [[[pNotification userInfo] objectForKey:AVAudioSessionRouteChangeReasonKey] integerValue]; + switch (reason) + { + case AVAudioSessionRouteChangeReasonOldDeviceUnavailable: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonOldDeviceUnavailable\n"); + } break; + + case AVAudioSessionRouteChangeReasonNewDeviceAvailable: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonNewDeviceAvailable\n"); + } break; + + case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory\n"); + } break; + + case AVAudioSessionRouteChangeReasonWakeFromSleep: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonWakeFromSleep\n"); + } break; + + case AVAudioSessionRouteChangeReasonOverride: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonOverride\n"); + } break; + + case AVAudioSessionRouteChangeReasonCategoryChange: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonCategoryChange\n"); + } break; + + case AVAudioSessionRouteChangeReasonUnknown: + default: + { + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_INFO, "[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonUnknown\n"); + } break; + } + + ma_log_postf(ma_device_get_log(m_pDevice), MA_LOG_LEVEL_DEBUG, "[Core Audio] Changing Route. inputNumberChannels=%d; outputNumberOfChannels=%d\n", (int)pSession.inputNumberOfChannels, (int)pSession.outputNumberOfChannels); + + /* Let the application know about the route change. */ + ma_device__on_notification_rerouted(m_pDevice); +} +@end +#endif + +static ma_result ma_device_uninit__coreaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(ma_device_get_state(pDevice) == ma_device_state_uninitialized); + +#if defined(MA_APPLE_DESKTOP) + /* + Make sure we're no longer tracking the device. It doesn't matter if we call this for a non-default device because it'll + just gracefully ignore it. + */ + ma_device__untrack__coreaudio(pDevice); +#endif +#if defined(MA_APPLE_MOBILE) + if (pDevice->coreaudio.pNotificationHandler != NULL) { + ma_ios_notification_handler* pNotificationHandler = (MA_BRIDGE_TRANSFER ma_ios_notification_handler*)pDevice->coreaudio.pNotificationHandler; + [pNotificationHandler remove_handler]; + } +#endif + + if (pDevice->coreaudio.audioUnitCapture != NULL) { + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + } + if (pDevice->coreaudio.audioUnitPlayback != NULL) { + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + } + + if (pDevice->coreaudio.pAudioBufferList) { + ma_free(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks); + } + + return MA_SUCCESS; +} + +typedef struct +{ + ma_bool32 allowNominalSampleRateChange; + + /* Input. */ + ma_format formatIn; + ma_uint32 channelsIn; + ma_uint32 sampleRateIn; + ma_channel channelMapIn[MA_MAX_CHANNELS]; + ma_uint32 periodSizeInFramesIn; + ma_uint32 periodSizeInMillisecondsIn; + ma_uint32 periodsIn; + ma_share_mode shareMode; + ma_performance_profile performanceProfile; + ma_bool32 registerStopEvent; + + /* Output. */ +#if defined(MA_APPLE_DESKTOP) + AudioObjectID deviceObjectID; +#endif + AudioComponent component; + AudioUnit audioUnit; + AudioBufferList* pAudioBufferList; /* Only used for input devices. */ + ma_format formatOut; + ma_uint32 channelsOut; + ma_uint32 sampleRateOut; + ma_channel channelMapOut[MA_MAX_CHANNELS]; + ma_uint32 periodSizeInFramesOut; + ma_uint32 periodsOut; + char deviceName[256]; +} ma_device_init_internal_data__coreaudio; + +static ma_result ma_device_init_internal__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__coreaudio* pData, void* pDevice_DoNotReference) /* <-- pDevice is typed as void* intentionally so as to avoid accidentally referencing it. */ +{ + ma_result result = MA_SUCCESS; + OSStatus status; + UInt32 enableIOFlag; + AudioStreamBasicDescription bestFormat; + ma_uint32 actualPeriodSizeInFrames; + AURenderCallbackStruct callbackInfo; +#if defined(MA_APPLE_DESKTOP) + AudioObjectID deviceObjectID; +#endif + + /* This API should only be used for a single device type: playback or capture. No full-duplex mode. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + MA_ASSERT(pContext != NULL); + MA_ASSERT(deviceType == ma_device_type_playback || deviceType == ma_device_type_capture); + +#if defined(MA_APPLE_DESKTOP) + pData->deviceObjectID = 0; +#endif + pData->component = NULL; + pData->audioUnit = NULL; + pData->pAudioBufferList = NULL; + +#if defined(MA_APPLE_DESKTOP) + result = ma_find_AudioObjectID(pContext, deviceType, pDeviceID, &deviceObjectID); + if (result != MA_SUCCESS) { + return result; + } + + pData->deviceObjectID = deviceObjectID; +#endif + + /* Core audio doesn't really use the notion of a period so we can leave this unmodified, but not too over the top. */ + pData->periodsOut = pData->periodsIn; + if (pData->periodsOut == 0) { + pData->periodsOut = MA_DEFAULT_PERIODS; + } + if (pData->periodsOut > 16) { + pData->periodsOut = 16; + } + + + /* Audio unit. */ + status = ((ma_AudioComponentInstanceNew_proc)pContext->coreaudio.AudioComponentInstanceNew)((AudioComponent)pContext->coreaudio.component, (AudioUnit*)&pData->audioUnit); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + + + /* The input/output buses need to be explicitly enabled and disabled. We set the flag based on the output unit first, then we just swap it for input. */ + enableIOFlag = 1; + if (deviceType == ma_device_type_capture) { + enableIOFlag = 0; + } + + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &enableIOFlag, sizeof(enableIOFlag)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + + enableIOFlag = (enableIOFlag == 0) ? 1 : 0; + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &enableIOFlag, sizeof(enableIOFlag)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + + + /* Set the device to use with this audio unit. This is only used on desktop since we are using defaults on mobile. */ +#if defined(MA_APPLE_DESKTOP) + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &deviceObjectID, sizeof(deviceObjectID)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(result); + } +#else + /* + For some reason it looks like Apple is only allowing selection of the input device. There does not appear to be any way to change + the default output route. I have no idea why this is like this, but for now we'll only be able to configure capture devices. + */ + if (pDeviceID != NULL) { + if (deviceType == ma_device_type_capture) { + ma_bool32 found = MA_FALSE; + NSArray *pInputs = [[[AVAudioSession sharedInstance] currentRoute] inputs]; + for (AVAudioSessionPortDescription* pPortDesc in pInputs) { + if (strcmp(pDeviceID->coreaudio, [pPortDesc.UID UTF8String]) == 0) { + [[AVAudioSession sharedInstance] setPreferredInput:pPortDesc error:nil]; + found = MA_TRUE; + break; + } + } + + if (found == MA_FALSE) { + return MA_DOES_NOT_EXIST; + } + } + } +#endif + + /* + Format. This is the hardest part of initialization because there's a few variables to take into account. + 1) The format must be supported by the device. + 2) The format must be supported miniaudio. + 3) There's a priority that miniaudio prefers. + + Ideally we would like to use a format that's as close to the hardware as possible so we can get as close to a passthrough as possible. The + most important property is the sample rate. miniaudio can do format conversion for any sample rate and channel count, but cannot do the same + for the sample data format. If the sample data format is not supported by miniaudio it must be ignored completely. + + On mobile platforms this is a bit different. We just force the use of whatever the audio unit's current format is set to. + */ + { + AudioStreamBasicDescription origFormat; + UInt32 origFormatSize = sizeof(origFormat); + AudioUnitScope formatScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output; + AudioUnitElement formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS; + + if (deviceType == ma_device_type_playback) { + status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &origFormat, &origFormatSize); + } else { + status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &origFormat, &origFormatSize); + } + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + + #if defined(MA_APPLE_DESKTOP) + result = ma_find_best_format__coreaudio(pContext, deviceObjectID, deviceType, pData->formatIn, pData->channelsIn, pData->sampleRateIn, &origFormat, &bestFormat); + if (result != MA_SUCCESS) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return result; + } + + /* + Technical Note TN2091: Device input using the HAL Output Audio Unit + https://developer.apple.com/library/archive/technotes/tn2091/_index.html + + This documentation says the following: + + The internal AudioConverter can handle any *simple* conversion. Typically, this means that a client can specify ANY + variant of the PCM formats. Consequently, the device's sample rate should match the desired sample rate. If sample rate + conversion is needed, it can be accomplished by buffering the input and converting the data on a separate thread with + another AudioConverter. + + The important part here is the mention that it can handle *simple* conversions, which does *not* include sample rate. We + therefore want to ensure the sample rate stays consistent. This document is specifically for input, but I'm going to play it + safe and apply the same rule to output as well. + + I have tried going against the documentation by setting the sample rate anyway, but this just results in AudioUnitRender() + returning a result code of -10863. I have also tried changing the format directly on the input scope on the input bus, but + this just results in `ca_require: IsStreamFormatWritable(inScope, inElement) NotWritable` when trying to set the format. + + Something that does seem to work, however, has been setting the nominal sample rate on the device object. The problem with + this, however, is that it actually changes the sample rate at the operating system level and not just the application. This + could be intrusive to the user, however, so I don't think it's wise to make this the default. Instead I'm making this a + configuration option. When the `coreaudio.allowNominalSampleRateChange` config option is set to true, changing the sample + rate will be allowed. Otherwise it'll be fixed to the current sample rate. To check the system-defined sample rate, run + the Audio MIDI Setup program that comes installed on macOS and observe how the sample rate changes as the sample rate is + changed by miniaudio. + */ + if (pData->allowNominalSampleRateChange) { + AudioValueRange sampleRateRange; + AudioObjectPropertyAddress propAddress; + + sampleRateRange.mMinimum = bestFormat.mSampleRate; + sampleRateRange.mMaximum = bestFormat.mSampleRate; + + propAddress.mSelector = kAudioDevicePropertyNominalSampleRate; + propAddress.mScope = (deviceType == ma_device_type_playback) ? kAudioObjectPropertyScopeOutput : kAudioObjectPropertyScopeInput; + propAddress.mElement = AUDIO_OBJECT_PROPERTY_ELEMENT; + + status = ((ma_AudioObjectSetPropertyData_proc)pContext->coreaudio.AudioObjectSetPropertyData)(deviceObjectID, &propAddress, 0, NULL, sizeof(sampleRateRange), &sampleRateRange); + if (status != noErr) { + bestFormat.mSampleRate = origFormat.mSampleRate; + } + } else { + bestFormat.mSampleRate = origFormat.mSampleRate; + } + + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, sizeof(bestFormat)); + if (status != noErr) { + /* We failed to set the format, so fall back to the current format of the audio unit. */ + bestFormat = origFormat; + } + #else + bestFormat = origFormat; + + /* + Sample rate is a little different here because for some reason kAudioUnitProperty_StreamFormat returns 0... Oh well. We need to instead try + setting the sample rate to what the user has requested and then just see the results of it. Need to use some Objective-C here for this since + it depends on Apple's AVAudioSession API. To do this we just get the shared AVAudioSession instance and then set it. Note that from what I + can tell, it looks like the sample rate is shared between playback and capture for everything. + */ + @autoreleasepool { + AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; + MA_ASSERT(pAudioSession != NULL); + + [pAudioSession setPreferredSampleRate:(double)pData->sampleRateIn error:nil]; + bestFormat.mSampleRate = pAudioSession.sampleRate; + + /* + I've had a report that the channel count returned by AudioUnitGetProperty above is inconsistent with + AVAudioSession outputNumberOfChannels. I'm going to try using the AVAudioSession values instead. + + UPDATE 20/02/2025: + When testing on the simulator with an iPhone 15 and iOS 17 I get an error when initializing the audio + unit if set the input channels to pAudioSession.inputNumberOfChannels. What is happening is the channel + count returned from AudioUnitGetProperty() above is set to 2, but pAudioSession is reporting a channel + count of 1. When this happens, the call to AudioUnitSetProprty() below just down below will succeed, but + AudioUnitInitialize() further down will fail. The only solution I have come up with is to not set the + channel count to pAudioSession.inputNumberOfChannels. + */ + if (deviceType == ma_device_type_playback) { + bestFormat.mChannelsPerFrame = (UInt32)pAudioSession.outputNumberOfChannels; + } + + #if 0 + if (deviceType == ma_device_type_capture) { + /*printf("DEBUG: bestFormat.mChannelsPerFrame = %d; pAudioSession.inputNumberOfChannels = %d\n", (int)bestFormat.mChannelsPerFrame, (int)pAudioSession.inputNumberOfChannels);*/ + bestFormat.mChannelsPerFrame = (UInt32)pAudioSession.inputNumberOfChannels; + } + #endif + } + + + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, sizeof(bestFormat)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + #endif + + result = ma_format_from_AudioStreamBasicDescription(&bestFormat, &pData->formatOut); + if (result != MA_SUCCESS) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return result; + } + + if (pData->formatOut == ma_format_unknown) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return MA_FORMAT_NOT_SUPPORTED; + } + + pData->channelsOut = bestFormat.mChannelsPerFrame; + pData->sampleRateOut = (ma_uint32)bestFormat.mSampleRate; + } + + /* Clamp the channel count for safety. */ + if (pData->channelsOut > MA_MAX_CHANNELS) { + pData->channelsOut = MA_MAX_CHANNELS; + } + + /* + Internal channel map. This is weird in my testing. If I use the AudioObject to get the + channel map, the channel descriptions are set to "Unknown" for some reason. To work around + this it looks like retrieving it from the AudioUnit will work. However, and this is where + it gets weird, it doesn't seem to work with capture devices, nor at all on iOS... Therefore + I'm going to fall back to a default assumption in these cases. + */ +#if defined(MA_APPLE_DESKTOP) + result = ma_get_AudioUnit_channel_map(pContext, pData->audioUnit, deviceType, pData->channelMapOut, pData->channelsOut); + if (result != MA_SUCCESS) { + #if 0 + /* Try falling back to the channel map from the AudioObject. */ + result = ma_get_AudioObject_channel_map(pContext, deviceObjectID, deviceType, pData->channelMapOut, pData->channelsOut); + if (result != MA_SUCCESS) { + return result; + } + #else + /* Fall back to default assumptions. */ + ma_channel_map_init_standard(ma_standard_channel_map_default, pData->channelMapOut, ma_countof(pData->channelMapOut), pData->channelsOut); + #endif + } +#else + /* TODO: Figure out how to get the channel map using AVAudioSession. */ + ma_channel_map_init_standard(ma_standard_channel_map_default, pData->channelMapOut, ma_countof(pData->channelMapOut), pData->channelsOut); +#endif + + + /* Buffer size. Not allowing this to be configurable on iOS. */ + if (pData->periodSizeInFramesIn == 0) { + if (pData->periodSizeInMillisecondsIn == 0) { + if (pData->performanceProfile == ma_performance_profile_low_latency) { + actualPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY, pData->sampleRateOut); + } else { + actualPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE, pData->sampleRateOut); + } + } else { + actualPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pData->periodSizeInMillisecondsIn, pData->sampleRateOut); + } + } else { + actualPeriodSizeInFrames = pData->periodSizeInFramesIn; + } + +#if defined(MA_APPLE_DESKTOP) + result = ma_set_AudioObject_buffer_size_in_frames(pContext, deviceObjectID, deviceType, &actualPeriodSizeInFrames); + if (result != MA_SUCCESS) { + return result; + } +#else + /* + On iOS, the size of the IO buffer needs to be specified in seconds and is a floating point + number. I don't trust any potential truncation errors due to converting from float to integer + so I'm going to explicitly set the actual period size to the next power of 2. + */ + @autoreleasepool { + AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; + MA_ASSERT(pAudioSession != NULL); + + [pAudioSession setPreferredIOBufferDuration:((float)actualPeriodSizeInFrames / pAudioSession.sampleRate) error:nil]; + actualPeriodSizeInFrames = ma_next_power_of_2((ma_uint32)(pAudioSession.IOBufferDuration * pAudioSession.sampleRate)); + } +#endif + + + /* + During testing I discovered that the buffer size can be too big. You'll get an error like this: + + kAudioUnitErr_TooManyFramesToProcess : inFramesToProcess=4096, mMaxFramesPerSlice=512 + + Note how inFramesToProcess is smaller than mMaxFramesPerSlice. To fix, we need to set kAudioUnitProperty_MaximumFramesPerSlice to that + of the size of our buffer, or do it the other way around and set our buffer size to the kAudioUnitProperty_MaximumFramesPerSlice. + */ + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &actualPeriodSizeInFrames, sizeof(actualPeriodSizeInFrames)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + + pData->periodSizeInFramesOut = (ma_uint32)actualPeriodSizeInFrames; + + /* We need a buffer list if this is an input device. We render into this in the input callback. */ + if (deviceType == ma_device_type_capture) { + ma_bool32 isInterleaved = (bestFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) == 0; + AudioBufferList* pBufferList; + + pBufferList = ma_allocate_AudioBufferList__coreaudio(pData->periodSizeInFramesOut, pData->formatOut, pData->channelsOut, (isInterleaved) ? ma_stream_layout_interleaved : ma_stream_layout_deinterleaved, &pContext->allocationCallbacks); + if (pBufferList == NULL) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return MA_OUT_OF_MEMORY; + } + + pData->pAudioBufferList = pBufferList; + } + + /* Callbacks. */ + callbackInfo.inputProcRefCon = pDevice_DoNotReference; + if (deviceType == ma_device_type_playback) { + callbackInfo.inputProc = ma_on_output__coreaudio; + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &callbackInfo, sizeof(callbackInfo)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + } else { + callbackInfo.inputProc = ma_on_input__coreaudio; + status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callbackInfo, sizeof(callbackInfo)); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + } + + /* We need to listen for stop events. */ + if (pData->registerStopEvent) { + status = ((ma_AudioUnitAddPropertyListener_proc)pContext->coreaudio.AudioUnitAddPropertyListener)(pData->audioUnit, kAudioOutputUnitProperty_IsRunning, on_start_stop__coreaudio, pDevice_DoNotReference); + if (status != noErr) { + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + } + + /* Initialize the audio unit. */ + status = ((ma_AudioUnitInitialize_proc)pContext->coreaudio.AudioUnitInitialize)(pData->audioUnit); + if (status != noErr) { + ma_free(pData->pAudioBufferList, &pContext->allocationCallbacks); + pData->pAudioBufferList = NULL; + ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); + return ma_result_from_OSStatus(status); + } + + /* Grab the name. */ +#if defined(MA_APPLE_DESKTOP) + ma_get_AudioObject_name(pContext, deviceObjectID, sizeof(pData->deviceName), pData->deviceName); +#else + if (deviceType == ma_device_type_playback) { + ma_strcpy_s(pData->deviceName, sizeof(pData->deviceName), MA_DEFAULT_PLAYBACK_DEVICE_NAME); + } else { + ma_strcpy_s(pData->deviceName, sizeof(pData->deviceName), MA_DEFAULT_CAPTURE_DEVICE_NAME); + } +#endif + + return result; +} + +#if defined(MA_APPLE_DESKTOP) +static ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit) +{ + ma_device_init_internal_data__coreaudio data; + ma_result result; + + /* This should only be called for playback or capture, not duplex. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + data.allowNominalSampleRateChange = MA_FALSE; /* Don't change the nominal sample rate when switching devices. */ + + if (deviceType == ma_device_type_capture) { + data.formatIn = pDevice->capture.format; + data.channelsIn = pDevice->capture.channels; + data.sampleRateIn = pDevice->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap)); + data.shareMode = pDevice->capture.shareMode; + data.performanceProfile = pDevice->coreaudio.originalPerformanceProfile; + data.registerStopEvent = MA_TRUE; + + if (disposePreviousAudioUnit) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + } + if (pDevice->coreaudio.pAudioBufferList) { + ma_free(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks); + } + } else if (deviceType == ma_device_type_playback) { + data.formatIn = pDevice->playback.format; + data.channelsIn = pDevice->playback.channels; + data.sampleRateIn = pDevice->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap)); + data.shareMode = pDevice->playback.shareMode; + data.performanceProfile = pDevice->coreaudio.originalPerformanceProfile; + data.registerStopEvent = (pDevice->type != ma_device_type_duplex); + + if (disposePreviousAudioUnit) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + } + } + data.periodSizeInFramesIn = pDevice->coreaudio.originalPeriodSizeInFrames; + data.periodSizeInMillisecondsIn = pDevice->coreaudio.originalPeriodSizeInMilliseconds; + data.periodsIn = pDevice->coreaudio.originalPeriods; + + /* Need at least 3 periods for duplex. */ + if (data.periodsIn < 3 && pDevice->type == ma_device_type_duplex) { + data.periodsIn = 3; + } + + result = ma_device_init_internal__coreaudio(pDevice->pContext, deviceType, NULL, &data, (void*)pDevice); + if (result != MA_SUCCESS) { + return result; + } + + if (deviceType == ma_device_type_capture) { + #if defined(MA_APPLE_DESKTOP) + pDevice->coreaudio.deviceObjectIDCapture = (ma_uint32)data.deviceObjectID; + ma_get_AudioObject_uid(pDevice->pContext, pDevice->coreaudio.deviceObjectIDCapture, sizeof(pDevice->capture.id.coreaudio), pDevice->capture.id.coreaudio); + #endif + pDevice->coreaudio.audioUnitCapture = (ma_ptr)data.audioUnit; + pDevice->coreaudio.pAudioBufferList = (ma_ptr)data.pAudioBufferList; + pDevice->coreaudio.audioBufferCapInFrames = data.periodSizeInFramesOut; + + pDevice->capture.internalFormat = data.formatOut; + pDevice->capture.internalChannels = data.channelsOut; + pDevice->capture.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->capture.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->capture.internalPeriods = data.periodsOut; + } else if (deviceType == ma_device_type_playback) { + #if defined(MA_APPLE_DESKTOP) + pDevice->coreaudio.deviceObjectIDPlayback = (ma_uint32)data.deviceObjectID; + ma_get_AudioObject_uid(pDevice->pContext, pDevice->coreaudio.deviceObjectIDPlayback, sizeof(pDevice->playback.id.coreaudio), pDevice->playback.id.coreaudio); + #endif + pDevice->coreaudio.audioUnitPlayback = (ma_ptr)data.audioUnit; + + pDevice->playback.internalFormat = data.formatOut; + pDevice->playback.internalChannels = data.channelsOut; + pDevice->playback.internalSampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDevice->playback.internalPeriodSizeInFrames = data.periodSizeInFramesOut; + pDevice->playback.internalPeriods = data.periodsOut; + } + + return MA_SUCCESS; +} +#endif /* MA_APPLE_DESKTOP */ + +static ma_result ma_device_init__coreaudio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pConfig != NULL); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* No exclusive mode with the Core Audio backend for now. */ + if (((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* Capture needs to be initialized first. */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_device_init_internal_data__coreaudio data; + data.allowNominalSampleRateChange = pConfig->coreaudio.allowNominalSampleRateChange; + data.formatIn = pDescriptorCapture->format; + data.channelsIn = pDescriptorCapture->channels; + data.sampleRateIn = pDescriptorCapture->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pDescriptorCapture->channelMap, sizeof(pDescriptorCapture->channelMap)); + data.periodSizeInFramesIn = pDescriptorCapture->periodSizeInFrames; + data.periodSizeInMillisecondsIn = pDescriptorCapture->periodSizeInMilliseconds; + data.periodsIn = pDescriptorCapture->periodCount; + data.shareMode = pDescriptorCapture->shareMode; + data.performanceProfile = pConfig->performanceProfile; + data.registerStopEvent = MA_TRUE; + + /* Need at least 3 periods for duplex. */ + if (data.periodsIn < 3 && pConfig->deviceType == ma_device_type_duplex) { + data.periodsIn = 3; + } + + result = ma_device_init_internal__coreaudio(pDevice->pContext, ma_device_type_capture, pDescriptorCapture->pDeviceID, &data, (void*)pDevice); + if (result != MA_SUCCESS) { + return result; + } + + pDevice->coreaudio.isDefaultCaptureDevice = (pConfig->capture.pDeviceID == NULL); + #if defined(MA_APPLE_DESKTOP) + pDevice->coreaudio.deviceObjectIDCapture = (ma_uint32)data.deviceObjectID; + #endif + pDevice->coreaudio.audioUnitCapture = (ma_ptr)data.audioUnit; + pDevice->coreaudio.pAudioBufferList = (ma_ptr)data.pAudioBufferList; + pDevice->coreaudio.audioBufferCapInFrames = data.periodSizeInFramesOut; + pDevice->coreaudio.originalPeriodSizeInFrames = pDescriptorCapture->periodSizeInFrames; + pDevice->coreaudio.originalPeriodSizeInMilliseconds = pDescriptorCapture->periodSizeInMilliseconds; + pDevice->coreaudio.originalPeriods = pDescriptorCapture->periodCount; + pDevice->coreaudio.originalPerformanceProfile = pConfig->performanceProfile; + + pDescriptorCapture->format = data.formatOut; + pDescriptorCapture->channels = data.channelsOut; + pDescriptorCapture->sampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDescriptorCapture->channelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDescriptorCapture->periodSizeInFrames = data.periodSizeInFramesOut; + pDescriptorCapture->periodCount = data.periodsOut; + + #if defined(MA_APPLE_DESKTOP) + ma_get_AudioObject_uid(pDevice->pContext, pDevice->coreaudio.deviceObjectIDCapture, sizeof(pDevice->capture.id.coreaudio), pDevice->capture.id.coreaudio); + + /* + If we are using the default device we'll need to listen for changes to the system's default device so we can seamlessly + switch the device in the background. + */ + if (pConfig->capture.pDeviceID == NULL) { + ma_device__track__coreaudio(pDevice); + } + #endif + } + + /* Playback. */ + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_device_init_internal_data__coreaudio data; + data.allowNominalSampleRateChange = pConfig->coreaudio.allowNominalSampleRateChange; + data.formatIn = pDescriptorPlayback->format; + data.channelsIn = pDescriptorPlayback->channels; + data.sampleRateIn = pDescriptorPlayback->sampleRate; + MA_COPY_MEMORY(data.channelMapIn, pDescriptorPlayback->channelMap, sizeof(pDescriptorPlayback->channelMap)); + data.shareMode = pDescriptorPlayback->shareMode; + data.performanceProfile = pConfig->performanceProfile; + + /* In full-duplex mode we want the playback buffer to be the same size as the capture buffer. */ + if (pConfig->deviceType == ma_device_type_duplex) { + data.periodSizeInFramesIn = pDescriptorCapture->periodSizeInFrames; + data.periodsIn = pDescriptorCapture->periodCount; + data.registerStopEvent = MA_FALSE; + } else { + data.periodSizeInFramesIn = pDescriptorPlayback->periodSizeInFrames; + data.periodSizeInMillisecondsIn = pDescriptorPlayback->periodSizeInMilliseconds; + data.periodsIn = pDescriptorPlayback->periodCount; + data.registerStopEvent = MA_TRUE; + } + + result = ma_device_init_internal__coreaudio(pDevice->pContext, ma_device_type_playback, pDescriptorPlayback->pDeviceID, &data, (void*)pDevice); + if (result != MA_SUCCESS) { + if (pConfig->deviceType == ma_device_type_duplex) { + ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + if (pDevice->coreaudio.pAudioBufferList) { + ma_free(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks); + } + } + return result; + } + + pDevice->coreaudio.isDefaultPlaybackDevice = (pConfig->playback.pDeviceID == NULL); + #if defined(MA_APPLE_DESKTOP) + pDevice->coreaudio.deviceObjectIDPlayback = (ma_uint32)data.deviceObjectID; + #endif + pDevice->coreaudio.audioUnitPlayback = (ma_ptr)data.audioUnit; + pDevice->coreaudio.originalPeriodSizeInFrames = pDescriptorPlayback->periodSizeInFrames; + pDevice->coreaudio.originalPeriodSizeInMilliseconds = pDescriptorPlayback->periodSizeInMilliseconds; + pDevice->coreaudio.originalPeriods = pDescriptorPlayback->periodCount; + pDevice->coreaudio.originalPerformanceProfile = pConfig->performanceProfile; + + pDescriptorPlayback->format = data.formatOut; + pDescriptorPlayback->channels = data.channelsOut; + pDescriptorPlayback->sampleRate = data.sampleRateOut; + MA_COPY_MEMORY(pDescriptorPlayback->channelMap, data.channelMapOut, sizeof(data.channelMapOut)); + pDescriptorPlayback->periodSizeInFrames = data.periodSizeInFramesOut; + pDescriptorPlayback->periodCount = data.periodsOut; + + #if defined(MA_APPLE_DESKTOP) + ma_get_AudioObject_uid(pDevice->pContext, pDevice->coreaudio.deviceObjectIDPlayback, sizeof(pDevice->playback.id.coreaudio), pDevice->playback.id.coreaudio); + + /* + If we are using the default device we'll need to listen for changes to the system's default device so we can seamlessly + switch the device in the background. + */ + if (pDescriptorPlayback->pDeviceID == NULL && (pConfig->deviceType != ma_device_type_duplex || pDescriptorCapture->pDeviceID != NULL)) { + ma_device__track__coreaudio(pDevice); + } + #endif + } + + + + /* + When stopping the device, a callback is called on another thread. We need to wait for this callback + before returning from ma_device_stop(). This event is used for this. + */ + ma_event_init(&pDevice->coreaudio.stopEvent); + + /* + We need to detect when a route has changed so we can update the data conversion pipeline accordingly. This is done + differently on non-Desktop Apple platforms. + */ +#if defined(MA_APPLE_MOBILE) + pDevice->coreaudio.pNotificationHandler = (MA_BRIDGE_RETAINED void*)[[ma_ios_notification_handler alloc] init:pDevice]; +#endif + + return MA_SUCCESS; +} + + +static ma_result ma_device_start__coreaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + if (status != noErr) { + if (pDevice->type == ma_device_type_duplex) { + ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + } + return ma_result_from_OSStatus(status); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__coreaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* It's not clear from the documentation whether or not AudioOutputUnitStop() actually drains the device or not. */ + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + OSStatus status = ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + OSStatus status = ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitPlayback); + if (status != noErr) { + return ma_result_from_OSStatus(status); + } + } + + /* We need to wait for the callback to finish before returning. */ + ma_event_wait(&pDevice->coreaudio.stopEvent); + return MA_SUCCESS; +} + + +static ma_result ma_context_uninit__coreaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_coreaudio); + +#if defined(MA_APPLE_MOBILE) + if (!pContext->coreaudio.noAudioSessionDeactivate) { + if (![[AVAudioSession sharedInstance] setActive:false error:nil]) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "Failed to deactivate audio session."); + return MA_FAILED_TO_INIT_BACKEND; + } + } +#endif + +#if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE) + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation); +#endif + +#if !defined(MA_APPLE_MOBILE) + ma_context__uninit_device_tracking__coreaudio(pContext); +#endif + + (void)pContext; + return MA_SUCCESS; +} + +#if defined(MA_APPLE_MOBILE) && defined(__IPHONE_12_0) +static AVAudioSessionCategory ma_to_AVAudioSessionCategory(ma_ios_session_category category) +{ + /* The "default" and "none" categories are treated different and should not be used as an input into this function. */ + MA_ASSERT(category != ma_ios_session_category_default); + MA_ASSERT(category != ma_ios_session_category_none); + + switch (category) { + case ma_ios_session_category_ambient: return AVAudioSessionCategoryAmbient; + case ma_ios_session_category_solo_ambient: return AVAudioSessionCategorySoloAmbient; + case ma_ios_session_category_playback: return AVAudioSessionCategoryPlayback; + case ma_ios_session_category_record: return AVAudioSessionCategoryRecord; + case ma_ios_session_category_play_and_record: return AVAudioSessionCategoryPlayAndRecord; + case ma_ios_session_category_multi_route: return AVAudioSessionCategoryMultiRoute; + case ma_ios_session_category_none: return AVAudioSessionCategoryAmbient; + case ma_ios_session_category_default: return AVAudioSessionCategoryAmbient; + default: return AVAudioSessionCategoryAmbient; + } +} +#endif + +static ma_result ma_context_init__coreaudio(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ +#if !defined(MA_APPLE_MOBILE) + ma_result result; +#endif + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pContext != NULL); + +#if defined(MA_APPLE_MOBILE) + @autoreleasepool { + AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; + AVAudioSessionCategoryOptions options = pConfig->coreaudio.sessionCategoryOptions; + + MA_ASSERT(pAudioSession != NULL); + + if (pConfig->coreaudio.sessionCategory == ma_ios_session_category_default) { + /* + I'm going to use trial and error to determine our default session category. First we'll try PlayAndRecord. If that fails + we'll try Playback and if that fails we'll try record. If all of these fail we'll just not set the category. + */ + #if !defined(MA_APPLE_TV) && !defined(MA_APPLE_WATCH) + options |= AVAudioSessionCategoryOptionDefaultToSpeaker; + #endif + + if ([pAudioSession setCategory: AVAudioSessionCategoryPlayAndRecord withOptions:options error:nil]) { + /* Using PlayAndRecord */ + } else if ([pAudioSession setCategory: AVAudioSessionCategoryPlayback withOptions:options error:nil]) { + /* Using Playback */ + } else if ([pAudioSession setCategory: AVAudioSessionCategoryRecord withOptions:options error:nil]) { + /* Using Record */ + } else { + /* Leave as default? */ + } + } else { + if (pConfig->coreaudio.sessionCategory != ma_ios_session_category_none) { + #if defined(__IPHONE_12_0) + if (![pAudioSession setCategory: ma_to_AVAudioSessionCategory(pConfig->coreaudio.sessionCategory) withOptions:options error:nil]) { + return MA_INVALID_OPERATION; /* Failed to set session category. */ + } + #else + /* Ignore the session category on version 11 and older, but post a warning. */ + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "Session category only supported in iOS 12 and newer."); + #endif + } + } + + if (!pConfig->coreaudio.noAudioSessionActivate) { + if (![pAudioSession setActive:true error:nil]) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "Failed to activate audio session."); + return MA_FAILED_TO_INIT_BACKEND; + } + } + } +#endif + +#if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE) + pContext->coreaudio.hCoreFoundation = ma_dlopen(ma_context_get_log(pContext), "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation"); + if (pContext->coreaudio.hCoreFoundation == NULL) { + return MA_API_NOT_FOUND; + } + + pContext->coreaudio.CFStringGetCString = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation, "CFStringGetCString"); + pContext->coreaudio.CFRelease = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation, "CFRelease"); + + + pContext->coreaudio.hCoreAudio = ma_dlopen(ma_context_get_log(pContext), "/System/Library/Frameworks/CoreAudio.framework/CoreAudio"); + if (pContext->coreaudio.hCoreAudio == NULL) { + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation); + return MA_API_NOT_FOUND; + } + + pContext->coreaudio.AudioObjectGetPropertyData = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio, "AudioObjectGetPropertyData"); + pContext->coreaudio.AudioObjectGetPropertyDataSize = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio, "AudioObjectGetPropertyDataSize"); + pContext->coreaudio.AudioObjectSetPropertyData = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio, "AudioObjectSetPropertyData"); + pContext->coreaudio.AudioObjectAddPropertyListener = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio, "AudioObjectAddPropertyListener"); + pContext->coreaudio.AudioObjectRemovePropertyListener = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio, "AudioObjectRemovePropertyListener"); + + /* + It looks like Apple has moved some APIs from AudioUnit into AudioToolbox on more recent versions of macOS. They are still + defined in AudioUnit, but just in case they decide to remove them from there entirely I'm going to implement a fallback. + The way it'll work is that it'll first try AudioUnit, and if the required symbols are not present there we'll fall back to + AudioToolbox. + */ + pContext->coreaudio.hAudioUnit = ma_dlopen(ma_context_get_log(pContext), "/System/Library/Frameworks/AudioUnit.framework/AudioUnit"); + if (pContext->coreaudio.hAudioUnit == NULL) { + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation); + return MA_API_NOT_FOUND; + } + + if (ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioComponentFindNext") == NULL) { + /* Couldn't find the required symbols in AudioUnit, so fall back to AudioToolbox. */ + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit); + pContext->coreaudio.hAudioUnit = ma_dlopen(ma_context_get_log(pContext), "/System/Library/Frameworks/AudioToolbox.framework/AudioToolbox"); + if (pContext->coreaudio.hAudioUnit == NULL) { + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation); + return MA_API_NOT_FOUND; + } + } + + pContext->coreaudio.AudioComponentFindNext = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioComponentFindNext"); + pContext->coreaudio.AudioComponentInstanceDispose = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioComponentInstanceDispose"); + pContext->coreaudio.AudioComponentInstanceNew = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioComponentInstanceNew"); + pContext->coreaudio.AudioOutputUnitStart = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioOutputUnitStart"); + pContext->coreaudio.AudioOutputUnitStop = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioOutputUnitStop"); + pContext->coreaudio.AudioUnitAddPropertyListener = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioUnitAddPropertyListener"); + pContext->coreaudio.AudioUnitGetPropertyInfo = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioUnitGetPropertyInfo"); + pContext->coreaudio.AudioUnitGetProperty = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioUnitGetProperty"); + pContext->coreaudio.AudioUnitSetProperty = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioUnitSetProperty"); + pContext->coreaudio.AudioUnitInitialize = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioUnitInitialize"); + pContext->coreaudio.AudioUnitRender = ma_dlsym(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit, "AudioUnitRender"); +#else + pContext->coreaudio.CFStringGetCString = (ma_proc)CFStringGetCString; + pContext->coreaudio.CFRelease = (ma_proc)CFRelease; + + #if defined(MA_APPLE_DESKTOP) + pContext->coreaudio.AudioObjectGetPropertyData = (ma_proc)AudioObjectGetPropertyData; + pContext->coreaudio.AudioObjectGetPropertyDataSize = (ma_proc)AudioObjectGetPropertyDataSize; + pContext->coreaudio.AudioObjectSetPropertyData = (ma_proc)AudioObjectSetPropertyData; + pContext->coreaudio.AudioObjectAddPropertyListener = (ma_proc)AudioObjectAddPropertyListener; + pContext->coreaudio.AudioObjectRemovePropertyListener = (ma_proc)AudioObjectRemovePropertyListener; + #endif + + pContext->coreaudio.AudioComponentFindNext = (ma_proc)AudioComponentFindNext; + pContext->coreaudio.AudioComponentInstanceDispose = (ma_proc)AudioComponentInstanceDispose; + pContext->coreaudio.AudioComponentInstanceNew = (ma_proc)AudioComponentInstanceNew; + pContext->coreaudio.AudioOutputUnitStart = (ma_proc)AudioOutputUnitStart; + pContext->coreaudio.AudioOutputUnitStop = (ma_proc)AudioOutputUnitStop; + pContext->coreaudio.AudioUnitAddPropertyListener = (ma_proc)AudioUnitAddPropertyListener; + pContext->coreaudio.AudioUnitGetPropertyInfo = (ma_proc)AudioUnitGetPropertyInfo; + pContext->coreaudio.AudioUnitGetProperty = (ma_proc)AudioUnitGetProperty; + pContext->coreaudio.AudioUnitSetProperty = (ma_proc)AudioUnitSetProperty; + pContext->coreaudio.AudioUnitInitialize = (ma_proc)AudioUnitInitialize; + pContext->coreaudio.AudioUnitRender = (ma_proc)AudioUnitRender; +#endif + + /* Audio component. */ + { + AudioComponentDescription desc; + desc.componentType = kAudioUnitType_Output; + #if defined(MA_APPLE_DESKTOP) + desc.componentSubType = kAudioUnitSubType_HALOutput; + #else + desc.componentSubType = kAudioUnitSubType_RemoteIO; + #endif + desc.componentManufacturer = kAudioUnitManufacturer_Apple; + desc.componentFlags = 0; + desc.componentFlagsMask = 0; + + pContext->coreaudio.component = ((ma_AudioComponentFindNext_proc)pContext->coreaudio.AudioComponentFindNext)(NULL, &desc); + if (pContext->coreaudio.component == NULL) { + #if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE) + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation); + #endif + return MA_FAILED_TO_INIT_BACKEND; + } + } + +#if !defined(MA_APPLE_MOBILE) + result = ma_context__init_device_tracking__coreaudio(pContext); + if (result != MA_SUCCESS) { + #if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE) + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hAudioUnit); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreAudio); + ma_dlclose(ma_context_get_log(pContext), pContext->coreaudio.hCoreFoundation); + #endif + return result; + } +#endif + + pContext->coreaudio.noAudioSessionDeactivate = pConfig->coreaudio.noAudioSessionDeactivate; + + pCallbacks->onContextInit = ma_context_init__coreaudio; + pCallbacks->onContextUninit = ma_context_uninit__coreaudio; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__coreaudio; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__coreaudio; + pCallbacks->onDeviceInit = ma_device_init__coreaudio; + pCallbacks->onDeviceUninit = ma_device_uninit__coreaudio; + pCallbacks->onDeviceStart = ma_device_start__coreaudio; + pCallbacks->onDeviceStop = ma_device_stop__coreaudio; + pCallbacks->onDeviceRead = NULL; + pCallbacks->onDeviceWrite = NULL; + pCallbacks->onDeviceDataLoop = NULL; + + return MA_SUCCESS; +} +#endif /* MA_HAS_COREAUDIO */ + + + +/****************************************************************************** + +sndio Backend + +******************************************************************************/ +#ifdef MA_HAS_SNDIO +#include + +/* +Only supporting OpenBSD. This did not work very well at all on FreeBSD when I tried it. Not sure if this is due +to miniaudio's implementation or if it's some kind of system configuration issue, but basically the default device +just doesn't emit any sound, or at times you'll hear tiny pieces. I will consider enabling this when there's +demand for it or if I can get it tested and debugged more thoroughly. +*/ +#if 0 +#if defined(__NetBSD__) || defined(__OpenBSD__) +#include +#endif +#if defined(__FreeBSD__) || defined(__DragonFly__) +#include +#endif +#endif + +#define MA_SIO_DEVANY "default" +#define MA_SIO_PLAY 1 +#define MA_SIO_REC 2 +#define MA_SIO_NENC 8 +#define MA_SIO_NCHAN 8 +#define MA_SIO_NRATE 16 +#define MA_SIO_NCONF 4 + +struct ma_sio_hdl; /* <-- Opaque */ + +struct ma_sio_par +{ + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + unsigned int rchan; + unsigned int pchan; + unsigned int rate; + unsigned int bufsz; + unsigned int xrun; + unsigned int round; + unsigned int appbufsz; + int __pad[3]; + unsigned int __magic; +}; + +struct ma_sio_enc +{ + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; +}; + +struct ma_sio_conf +{ + unsigned int enc; + unsigned int rchan; + unsigned int pchan; + unsigned int rate; +}; + +struct ma_sio_cap +{ + struct ma_sio_enc enc[MA_SIO_NENC]; + unsigned int rchan[MA_SIO_NCHAN]; + unsigned int pchan[MA_SIO_NCHAN]; + unsigned int rate[MA_SIO_NRATE]; + int __pad[7]; + unsigned int nconf; + struct ma_sio_conf confs[MA_SIO_NCONF]; +}; + +typedef struct ma_sio_hdl* (* ma_sio_open_proc) (const char*, unsigned int, int); +typedef void (* ma_sio_close_proc) (struct ma_sio_hdl*); +typedef int (* ma_sio_setpar_proc) (struct ma_sio_hdl*, struct ma_sio_par*); +typedef int (* ma_sio_getpar_proc) (struct ma_sio_hdl*, struct ma_sio_par*); +typedef int (* ma_sio_getcap_proc) (struct ma_sio_hdl*, struct ma_sio_cap*); +typedef size_t (* ma_sio_write_proc) (struct ma_sio_hdl*, const void*, size_t); +typedef size_t (* ma_sio_read_proc) (struct ma_sio_hdl*, void*, size_t); +typedef int (* ma_sio_start_proc) (struct ma_sio_hdl*); +typedef int (* ma_sio_stop_proc) (struct ma_sio_hdl*); +typedef int (* ma_sio_initpar_proc)(struct ma_sio_par*); + +static ma_uint32 ma_get_standard_sample_rate_priority_index__sndio(ma_uint32 sampleRate) /* Lower = higher priority */ +{ + ma_uint32 i; + for (i = 0; i < ma_countof(g_maStandardSampleRatePriorities); ++i) { + if (g_maStandardSampleRatePriorities[i] == sampleRate) { + return i; + } + } + + return (ma_uint32)-1; +} + +static ma_format ma_format_from_sio_enc__sndio(unsigned int bits, unsigned int bps, unsigned int sig, unsigned int le, unsigned int msb) +{ + /* We only support native-endian right now. */ + if ((ma_is_little_endian() && le == 0) || (ma_is_big_endian() && le == 1)) { + return ma_format_unknown; + } + + if (bits == 8 && bps == 1 && sig == 0) { + return ma_format_u8; + } + if (bits == 16 && bps == 2 && sig == 1) { + return ma_format_s16; + } + if (bits == 24 && bps == 3 && sig == 1) { + return ma_format_s24; + } + if (bits == 24 && bps == 4 && sig == 1 && msb == 0) { + /*return ma_format_s24_32;*/ + } + if (bits == 32 && bps == 4 && sig == 1) { + return ma_format_s32; + } + + return ma_format_unknown; +} + +static ma_format ma_find_best_format_from_sio_cap__sndio(struct ma_sio_cap* caps) +{ + ma_format bestFormat; + unsigned int iConfig; + + MA_ASSERT(caps != NULL); + + bestFormat = ma_format_unknown; + for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) { + unsigned int iEncoding; + for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) { + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + ma_format format; + + if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) { + continue; + } + + bits = caps->enc[iEncoding].bits; + bps = caps->enc[iEncoding].bps; + sig = caps->enc[iEncoding].sig; + le = caps->enc[iEncoding].le; + msb = caps->enc[iEncoding].msb; + format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb); + if (format == ma_format_unknown) { + continue; /* Format not supported. */ + } + + if (bestFormat == ma_format_unknown) { + bestFormat = format; + } else { + if (ma_get_format_priority_index(bestFormat) > ma_get_format_priority_index(format)) { /* <-- Lower = better. */ + bestFormat = format; + } + } + } + } + + return bestFormat; +} + +static ma_uint32 ma_find_best_channels_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat) +{ + ma_uint32 maxChannels; + unsigned int iConfig; + + MA_ASSERT(caps != NULL); + MA_ASSERT(requiredFormat != ma_format_unknown); + + /* Just pick whatever configuration has the most channels. */ + maxChannels = 0; + for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) { + /* The encoding should be of requiredFormat. */ + unsigned int iEncoding; + for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) { + unsigned int iChannel; + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + ma_format format; + + if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) { + continue; + } + + bits = caps->enc[iEncoding].bits; + bps = caps->enc[iEncoding].bps; + sig = caps->enc[iEncoding].sig; + le = caps->enc[iEncoding].le; + msb = caps->enc[iEncoding].msb; + format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb); + if (format != requiredFormat) { + continue; + } + + /* Getting here means the format is supported. Iterate over each channel count and grab the biggest one. */ + for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) { + unsigned int chan = 0; + unsigned int channels; + + if (deviceType == ma_device_type_playback) { + chan = caps->confs[iConfig].pchan; + } else { + chan = caps->confs[iConfig].rchan; + } + + if ((chan & (1UL << iChannel)) == 0) { + continue; + } + + if (deviceType == ma_device_type_playback) { + channels = caps->pchan[iChannel]; + } else { + channels = caps->rchan[iChannel]; + } + + if (maxChannels < channels) { + maxChannels = channels; + } + } + } + } + + return maxChannels; +} + +static ma_uint32 ma_find_best_sample_rate_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat, ma_uint32 requiredChannels) +{ + ma_uint32 firstSampleRate; + ma_uint32 bestSampleRate; + unsigned int iConfig; + + MA_ASSERT(caps != NULL); + MA_ASSERT(requiredFormat != ma_format_unknown); + MA_ASSERT(requiredChannels > 0); + MA_ASSERT(requiredChannels <= MA_MAX_CHANNELS); + + firstSampleRate = 0; /* <-- If the device does not support a standard rate we'll fall back to the first one that's found. */ + bestSampleRate = 0; + + for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) { + /* The encoding should be of requiredFormat. */ + unsigned int iEncoding; + for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) { + unsigned int iChannel; + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + ma_format format; + + if ((caps->confs[iConfig].enc & (1UL << iEncoding)) == 0) { + continue; + } + + bits = caps->enc[iEncoding].bits; + bps = caps->enc[iEncoding].bps; + sig = caps->enc[iEncoding].sig; + le = caps->enc[iEncoding].le; + msb = caps->enc[iEncoding].msb; + format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb); + if (format != requiredFormat) { + continue; + } + + /* Getting here means the format is supported. Iterate over each channel count and grab the biggest one. */ + for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) { + unsigned int chan = 0; + unsigned int channels; + unsigned int iRate; + + if (deviceType == ma_device_type_playback) { + chan = caps->confs[iConfig].pchan; + } else { + chan = caps->confs[iConfig].rchan; + } + + if ((chan & (1UL << iChannel)) == 0) { + continue; + } + + if (deviceType == ma_device_type_playback) { + channels = caps->pchan[iChannel]; + } else { + channels = caps->rchan[iChannel]; + } + + if (channels != requiredChannels) { + continue; + } + + /* Getting here means we have found a compatible encoding/channel pair. */ + for (iRate = 0; iRate < MA_SIO_NRATE; iRate += 1) { + ma_uint32 rate = (ma_uint32)caps->rate[iRate]; + ma_uint32 ratePriority; + + if (firstSampleRate == 0) { + firstSampleRate = rate; + } + + /* Disregard this rate if it's not a standard one. */ + ratePriority = ma_get_standard_sample_rate_priority_index__sndio(rate); + if (ratePriority == (ma_uint32)-1) { + continue; + } + + if (ma_get_standard_sample_rate_priority_index__sndio(bestSampleRate) > ratePriority) { /* Lower = better. */ + bestSampleRate = rate; + } + } + } + } + } + + /* If a standard sample rate was not found just fall back to the first one that was iterated. */ + if (bestSampleRate == 0) { + bestSampleRate = firstSampleRate; + } + + return bestSampleRate; +} + + +static ma_result ma_context_enumerate_devices__sndio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 isTerminating = MA_FALSE; + struct ma_sio_hdl* handle; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* sndio doesn't seem to have a good device enumeration API, so I'm therefore only enumerating over default devices for now. */ + + /* Playback. */ + if (!isTerminating) { + handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(MA_SIO_DEVANY, MA_SIO_PLAY, 0); + if (handle != NULL) { + /* Supports playback. */ + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strcpy_s(deviceInfo.id.sndio, sizeof(deviceInfo.id.sndio), MA_SIO_DEVANY); + ma_strcpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME); + + isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + + ((ma_sio_close_proc)pContext->sndio.sio_close)(handle); + } + } + + /* Capture. */ + if (!isTerminating) { + handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(MA_SIO_DEVANY, MA_SIO_REC, 0); + if (handle != NULL) { + /* Supports capture. */ + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strcpy_s(deviceInfo.id.sndio, sizeof(deviceInfo.id.sndio), "default"); + ma_strcpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME); + + isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + + ((ma_sio_close_proc)pContext->sndio.sio_close)(handle); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__sndio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + char devid[256]; + struct ma_sio_hdl* handle; + struct ma_sio_cap caps; + unsigned int iConfig; + + MA_ASSERT(pContext != NULL); + + /* We need to open the device before we can get information about it. */ + if (pDeviceID == NULL) { + ma_strcpy_s(devid, sizeof(devid), MA_SIO_DEVANY); + ma_strcpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (deviceType == ma_device_type_playback) ? MA_DEFAULT_PLAYBACK_DEVICE_NAME : MA_DEFAULT_CAPTURE_DEVICE_NAME); + } else { + ma_strcpy_s(devid, sizeof(devid), pDeviceID->sndio); + ma_strcpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), devid); + } + + handle = ((ma_sio_open_proc)pContext->sndio.sio_open)(devid, (deviceType == ma_device_type_playback) ? MA_SIO_PLAY : MA_SIO_REC, 0); + if (handle == NULL) { + return MA_NO_DEVICE; + } + + if (((ma_sio_getcap_proc)pContext->sndio.sio_getcap)(handle, &caps) == 0) { + return MA_ERROR; + } + + pDeviceInfo->nativeDataFormatCount = 0; + + for (iConfig = 0; iConfig < caps.nconf; iConfig += 1) { + /* + The main thing we care about is that the encoding is supported by miniaudio. If it is, we want to give + preference to some formats over others. + */ + unsigned int iEncoding; + unsigned int iChannel; + unsigned int iRate; + + for (iEncoding = 0; iEncoding < MA_SIO_NENC; iEncoding += 1) { + unsigned int bits; + unsigned int bps; + unsigned int sig; + unsigned int le; + unsigned int msb; + ma_format format; + + if ((caps.confs[iConfig].enc & (1UL << iEncoding)) == 0) { + continue; + } + + bits = caps.enc[iEncoding].bits; + bps = caps.enc[iEncoding].bps; + sig = caps.enc[iEncoding].sig; + le = caps.enc[iEncoding].le; + msb = caps.enc[iEncoding].msb; + format = ma_format_from_sio_enc__sndio(bits, bps, sig, le, msb); + if (format == ma_format_unknown) { + continue; /* Format not supported. */ + } + + + /* Channels. */ + for (iChannel = 0; iChannel < MA_SIO_NCHAN; iChannel += 1) { + unsigned int chan = 0; + unsigned int channels; + + if (deviceType == ma_device_type_playback) { + chan = caps.confs[iConfig].pchan; + } else { + chan = caps.confs[iConfig].rchan; + } + + if ((chan & (1UL << iChannel)) == 0) { + continue; + } + + if (deviceType == ma_device_type_playback) { + channels = caps.pchan[iChannel]; + } else { + channels = caps.rchan[iChannel]; + } + + + /* Sample Rates. */ + for (iRate = 0; iRate < MA_SIO_NRATE; iRate += 1) { + if ((caps.confs[iConfig].rate & (1UL << iRate)) != 0) { + ma_device_info_add_native_data_format(pDeviceInfo, format, channels, caps.rate[iRate], 0); + } + } + } + } + } + + ((ma_sio_close_proc)pContext->sndio.sio_close)(handle); + return MA_SUCCESS; +} + +static ma_result ma_device_uninit__sndio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)pDevice->sndio.handleCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init_handle__sndio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptor, ma_device_type deviceType) +{ + const char* pDeviceName; + ma_ptr handle; + int openFlags = 0; + struct ma_sio_cap caps; + struct ma_sio_par par; + const ma_device_id* pDeviceID; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); + MA_ASSERT(pDevice != NULL); + + if (deviceType == ma_device_type_capture) { + openFlags = MA_SIO_REC; + } else { + openFlags = MA_SIO_PLAY; + } + + pDeviceID = pDescriptor->pDeviceID; + format = pDescriptor->format; + channels = pDescriptor->channels; + sampleRate = pDescriptor->sampleRate; + + pDeviceName = MA_SIO_DEVANY; + if (pDeviceID != NULL) { + pDeviceName = pDeviceID->sndio; + } + + handle = (ma_ptr)((ma_sio_open_proc)pDevice->pContext->sndio.sio_open)(pDeviceName, openFlags, 0); + if (handle == NULL) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to open device."); + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + /* We need to retrieve the device caps to determine the most appropriate format to use. */ + if (((ma_sio_getcap_proc)pDevice->pContext->sndio.sio_getcap)((struct ma_sio_hdl*)handle, &caps) == 0) { + ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)handle); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to retrieve device caps."); + return MA_ERROR; + } + + /* + Note: sndio reports a huge range of available channels. This is inconvenient for us because there's no real + way, as far as I can tell, to get the _actual_ channel count of the device. I'm therefore restricting this + to the requested channels, regardless of whether or not the default channel count is requested. + + For hardware devices, I'm suspecting only a single channel count will be reported and we can safely use the + value returned by ma_find_best_channels_from_sio_cap__sndio(). + */ + if (deviceType == ma_device_type_capture) { + if (format == ma_format_unknown) { + format = ma_find_best_format_from_sio_cap__sndio(&caps); + } + + if (channels == 0) { + if (strlen(pDeviceName) > strlen("rsnd/") && strncmp(pDeviceName, "rsnd/", strlen("rsnd/")) == 0) { + channels = ma_find_best_channels_from_sio_cap__sndio(&caps, deviceType, format); + } else { + channels = MA_DEFAULT_CHANNELS; + } + } + } else { + if (format == ma_format_unknown) { + format = ma_find_best_format_from_sio_cap__sndio(&caps); + } + + if (channels == 0) { + if (strlen(pDeviceName) > strlen("rsnd/") && strncmp(pDeviceName, "rsnd/", strlen("rsnd/")) == 0) { + channels = ma_find_best_channels_from_sio_cap__sndio(&caps, deviceType, format); + } else { + channels = MA_DEFAULT_CHANNELS; + } + } + } + + if (sampleRate == 0) { + sampleRate = ma_find_best_sample_rate_from_sio_cap__sndio(&caps, pConfig->deviceType, format, channels); + } + + + ((ma_sio_initpar_proc)pDevice->pContext->sndio.sio_initpar)(&par); + par.msb = 0; + par.le = ma_is_little_endian(); + + switch (format) { + case ma_format_u8: + { + par.bits = 8; + par.bps = 1; + par.sig = 0; + } break; + + case ma_format_s24: + { + par.bits = 24; + par.bps = 3; + par.sig = 1; + } break; + + case ma_format_s32: + { + par.bits = 32; + par.bps = 4; + par.sig = 1; + } break; + + case ma_format_s16: + case ma_format_f32: + case ma_format_unknown: + default: + { + par.bits = 16; + par.bps = 2; + par.sig = 1; + } break; + } + + if (deviceType == ma_device_type_capture) { + par.rchan = channels; + } else { + par.pchan = channels; + } + + par.rate = sampleRate; + + internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, par.rate, pConfig->performanceProfile); + + par.round = internalPeriodSizeInFrames; + par.appbufsz = par.round * pDescriptor->periodCount; + + if (((ma_sio_setpar_proc)pDevice->pContext->sndio.sio_setpar)((struct ma_sio_hdl*)handle, &par) == 0) { + ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)handle); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to set buffer size."); + return MA_ERROR; + } + + if (((ma_sio_getpar_proc)pDevice->pContext->sndio.sio_getpar)((struct ma_sio_hdl*)handle, &par) == 0) { + ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)handle); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to retrieve buffer size."); + return MA_ERROR; + } + + internalFormat = ma_format_from_sio_enc__sndio(par.bits, par.bps, par.sig, par.le, par.msb); + internalChannels = (deviceType == ma_device_type_capture) ? par.rchan : par.pchan; + internalSampleRate = par.rate; + internalPeriods = par.appbufsz / par.round; + internalPeriodSizeInFrames = par.round; + + if (deviceType == ma_device_type_capture) { + pDevice->sndio.handleCapture = handle; + } else { + pDevice->sndio.handlePlayback = handle; + } + + pDescriptor->format = internalFormat; + pDescriptor->channels = internalChannels; + pDescriptor->sampleRate = internalSampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_sndio, pDescriptor->channelMap, ma_countof(pDescriptor->channelMap), internalChannels); + pDescriptor->periodSizeInFrames = internalPeriodSizeInFrames; + pDescriptor->periodCount = internalPeriods; + + return MA_SUCCESS; +} + +static ma_result ma_device_init__sndio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->sndio); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_handle__sndio(pDevice, pConfig, pDescriptorCapture, ma_device_type_capture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_handle__sndio(pDevice, pConfig, pDescriptorPlayback, ma_device_type_playback); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__sndio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_sio_start_proc)pDevice->pContext->sndio.sio_start)((struct ma_sio_hdl*)pDevice->sndio.handleCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ((ma_sio_start_proc)pDevice->pContext->sndio.sio_start)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback); /* <-- Doesn't actually playback until data is written. */ + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__sndio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* + From the documentation: + + The sio_stop() function puts the audio subsystem in the same state as before sio_start() is called. It stops recording, drains the play buffer and then + stops playback. If samples to play are queued but playback hasn't started yet then playback is forced immediately; playback will actually stop once the + buffer is drained. In no case are samples in the play buffer discarded. + + Therefore, sio_stop() performs all of the necessary draining for us. + */ + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ((ma_sio_stop_proc)pDevice->pContext->sndio.sio_stop)((struct ma_sio_hdl*)pDevice->sndio.handleCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ((ma_sio_stop_proc)pDevice->pContext->sndio.sio_stop)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_write__sndio(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + int result; + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + result = ((ma_sio_write_proc)pDevice->pContext->sndio.sio_write)((struct ma_sio_hdl*)pDevice->sndio.handlePlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + if (result == 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to send data from the client to the device."); + return MA_IO_ERROR; + } + + if (pFramesWritten != NULL) { + *pFramesWritten = frameCount; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_read__sndio(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + int result; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + result = ((ma_sio_read_proc)pDevice->pContext->sndio.sio_read)((struct ma_sio_hdl*)pDevice->sndio.handleCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + if (result == 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[sndio] Failed to read data from the device to be sent to the device."); + return MA_IO_ERROR; + } + + if (pFramesRead != NULL) { + *pFramesRead = frameCount; + } + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__sndio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_sndio); + + (void)pContext; + return MA_SUCCESS; +} + +static ma_result ma_context_init__sndio(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ +#ifndef MA_NO_RUNTIME_LINKING + const char* libsndioNames[] = { + "libsndio.so" + }; + size_t i; + + for (i = 0; i < ma_countof(libsndioNames); ++i) { + pContext->sndio.sndioSO = ma_dlopen(ma_context_get_log(pContext), libsndioNames[i]); + if (pContext->sndio.sndioSO != NULL) { + break; + } + } + + if (pContext->sndio.sndioSO == NULL) { + return MA_NO_BACKEND; + } + + pContext->sndio.sio_open = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_open"); + pContext->sndio.sio_close = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_close"); + pContext->sndio.sio_setpar = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_setpar"); + pContext->sndio.sio_getpar = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_getpar"); + pContext->sndio.sio_getcap = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_getcap"); + pContext->sndio.sio_write = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_write"); + pContext->sndio.sio_read = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_read"); + pContext->sndio.sio_start = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_start"); + pContext->sndio.sio_stop = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_stop"); + pContext->sndio.sio_initpar = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->sndio.sndioSO, "sio_initpar"); +#else + pContext->sndio.sio_open = sio_open; + pContext->sndio.sio_close = sio_close; + pContext->sndio.sio_setpar = sio_setpar; + pContext->sndio.sio_getpar = sio_getpar; + pContext->sndio.sio_getcap = sio_getcap; + pContext->sndio.sio_write = sio_write; + pContext->sndio.sio_read = sio_read; + pContext->sndio.sio_start = sio_start; + pContext->sndio.sio_stop = sio_stop; + pContext->sndio.sio_initpar = sio_initpar; +#endif + + pCallbacks->onContextInit = ma_context_init__sndio; + pCallbacks->onContextUninit = ma_context_uninit__sndio; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__sndio; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__sndio; + pCallbacks->onDeviceInit = ma_device_init__sndio; + pCallbacks->onDeviceUninit = ma_device_uninit__sndio; + pCallbacks->onDeviceStart = ma_device_start__sndio; + pCallbacks->onDeviceStop = ma_device_stop__sndio; + pCallbacks->onDeviceRead = ma_device_read__sndio; + pCallbacks->onDeviceWrite = ma_device_write__sndio; + pCallbacks->onDeviceDataLoop = NULL; + + (void)pConfig; + return MA_SUCCESS; +} +#endif /* MA_HAS_SNDIO */ + + + +/****************************************************************************** + +audio(4) Backend + +******************************************************************************/ +#ifdef MA_HAS_AUDIO4 +#include +#include +#include +#include +#include +#include +#include + +#ifdef __NetBSD__ +#include +#endif + +#if defined(__OpenBSD__) + #include + #if defined(OpenBSD) && OpenBSD >= 201709 + #define MA_AUDIO4_USE_NEW_API + #endif +#endif + +static void ma_construct_device_id__audio4(char* id, size_t idSize, const char* base, int deviceIndex) +{ + size_t baseLen; + + MA_ASSERT(id != NULL); + MA_ASSERT(idSize > 0); + MA_ASSERT(deviceIndex >= 0); + + baseLen = strlen(base); + MA_ASSERT(idSize > baseLen); + + ma_strcpy_s(id, idSize, base); + ma_itoa_s(deviceIndex, id+baseLen, idSize-baseLen, 10); +} + +static ma_result ma_extract_device_index_from_id__audio4(const char* id, const char* base, int* pIndexOut) +{ + size_t idLen; + size_t baseLen; + const char* deviceIndexStr; + + MA_ASSERT(id != NULL); + MA_ASSERT(base != NULL); + MA_ASSERT(pIndexOut != NULL); + + idLen = strlen(id); + baseLen = strlen(base); + if (idLen <= baseLen) { + return MA_ERROR; /* Doesn't look like the id starts with the base. */ + } + + if (strncmp(id, base, baseLen) != 0) { + return MA_ERROR; /* ID does not begin with base. */ + } + + deviceIndexStr = id + baseLen; + if (deviceIndexStr[0] == '\0') { + return MA_ERROR; /* No index specified in the ID. */ + } + + if (pIndexOut) { + *pIndexOut = atoi(deviceIndexStr); + } + + return MA_SUCCESS; +} + + +#if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */ +static ma_format ma_format_from_encoding__audio4(unsigned int encoding, unsigned int precision) +{ + if (precision == 8 && (encoding == AUDIO_ENCODING_ULINEAR || encoding == AUDIO_ENCODING_ULINEAR || encoding == AUDIO_ENCODING_ULINEAR_LE || encoding == AUDIO_ENCODING_ULINEAR_BE)) { + return ma_format_u8; + } else { + if (ma_is_little_endian() && encoding == AUDIO_ENCODING_SLINEAR_LE) { + if (precision == 16) { + return ma_format_s16; + } else if (precision == 24) { + return ma_format_s24; + } else if (precision == 32) { + return ma_format_s32; + } + } else if (ma_is_big_endian() && encoding == AUDIO_ENCODING_SLINEAR_BE) { + if (precision == 16) { + return ma_format_s16; + } else if (precision == 24) { + return ma_format_s24; + } else if (precision == 32) { + return ma_format_s32; + } + } + } + + return ma_format_unknown; /* Encoding not supported. */ +} + +static void ma_encoding_from_format__audio4(ma_format format, unsigned int* pEncoding, unsigned int* pPrecision) +{ + MA_ASSERT(pEncoding != NULL); + MA_ASSERT(pPrecision != NULL); + + switch (format) + { + case ma_format_u8: + { + *pEncoding = AUDIO_ENCODING_ULINEAR; + *pPrecision = 8; + } break; + + case ma_format_s24: + { + *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE; + *pPrecision = 24; + } break; + + case ma_format_s32: + { + *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE; + *pPrecision = 32; + } break; + + case ma_format_s16: + case ma_format_f32: + case ma_format_unknown: + default: + { + *pEncoding = (ma_is_little_endian()) ? AUDIO_ENCODING_SLINEAR_LE : AUDIO_ENCODING_SLINEAR_BE; + *pPrecision = 16; + } break; + } +} + +static ma_format ma_format_from_prinfo__audio4(struct audio_prinfo* prinfo) +{ + return ma_format_from_encoding__audio4(prinfo->encoding, prinfo->precision); +} + +static ma_format ma_best_format_from_fd__audio4(int fd, ma_format preferredFormat) +{ + audio_encoding_t encoding; + ma_uint32 iFormat; + int counter = 0; + + /* First check to see if the preferred format is supported. */ + if (preferredFormat != ma_format_unknown) { + counter = 0; + for (;;) { + MA_ZERO_OBJECT(&encoding); + encoding.index = counter; + if (ioctl(fd, AUDIO_GETENC, &encoding) < 0) { + break; + } + + if (preferredFormat == ma_format_from_encoding__audio4(encoding.encoding, encoding.precision)) { + return preferredFormat; /* Found the preferred format. */ + } + + /* Getting here means this encoding does not match our preferred format so we need to more on to the next encoding. */ + counter += 1; + } + } + + /* Getting here means our preferred format is not supported, so fall back to our standard priorities. */ + for (iFormat = 0; iFormat < ma_countof(g_maFormatPriorities); iFormat += 1) { + ma_format format = g_maFormatPriorities[iFormat]; + + counter = 0; + for (;;) { + MA_ZERO_OBJECT(&encoding); + encoding.index = counter; + if (ioctl(fd, AUDIO_GETENC, &encoding) < 0) { + break; + } + + if (format == ma_format_from_encoding__audio4(encoding.encoding, encoding.precision)) { + return format; /* Found a workable format. */ + } + + /* Getting here means this encoding does not match our preferred format so we need to more on to the next encoding. */ + counter += 1; + } + } + + /* Getting here means not appropriate format was found. */ + return ma_format_unknown; +} +#else +static ma_format ma_format_from_swpar__audio4(struct audio_swpar* par) +{ + if (par->bits == 8 && par->bps == 1 && par->sig == 0) { + return ma_format_u8; + } + if (par->bits == 16 && par->bps == 2 && par->sig == 1 && par->le == ma_is_little_endian()) { + return ma_format_s16; + } + if (par->bits == 24 && par->bps == 3 && par->sig == 1 && par->le == ma_is_little_endian()) { + return ma_format_s24; + } + if (par->bits == 32 && par->bps == 4 && par->sig == 1 && par->le == ma_is_little_endian()) { + return ma_format_f32; + } + + /* Format not supported. */ + return ma_format_unknown; +} +#endif + +static ma_result ma_context_get_device_info_from_fd__audio4(ma_context* pContext, ma_device_type deviceType, int fd, ma_device_info* pDeviceInfo) +{ + audio_device_t fdDevice; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(fd >= 0); + MA_ASSERT(pDeviceInfo != NULL); + + (void)pContext; + (void)deviceType; + + if (ioctl(fd, AUDIO_GETDEV, &fdDevice) < 0) { + return MA_ERROR; /* Failed to retrieve device info. */ + } + + /* Name. */ + ma_strcpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), fdDevice.name); + + #if !defined(MA_AUDIO4_USE_NEW_API) + { + audio_info_t fdInfo; + int counter = 0; + ma_uint32 channels; + ma_uint32 sampleRate; + +#if defined(__NetBSD__) && (__NetBSD_Version__ >= 900000000) + if (ioctl(fd, AUDIO_GETFORMAT, &fdInfo) < 0) { + return MA_ERROR; + } +#else + if (ioctl(fd, AUDIO_GETINFO, &fdInfo) < 0) { + return MA_ERROR; + } +#endif + + if (deviceType == ma_device_type_playback) { + channels = fdInfo.play.channels; + sampleRate = fdInfo.play.sample_rate; + } else { + channels = fdInfo.record.channels; + sampleRate = fdInfo.record.sample_rate; + } + + /* Supported formats. We get this by looking at the encodings. */ + pDeviceInfo->nativeDataFormatCount = 0; + for (;;) { + audio_encoding_t encoding; + ma_format format; + + MA_ZERO_OBJECT(&encoding); + encoding.index = counter; + if (ioctl(fd, AUDIO_GETENC, &encoding) < 0) { + break; + } + + format = ma_format_from_encoding__audio4(encoding.encoding, encoding.precision); + if (format != ma_format_unknown) { + ma_device_info_add_native_data_format(pDeviceInfo, format, channels, sampleRate, 0); + } + + counter += 1; + } + } + #else + { + struct audio_swpar fdPar; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + + if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) { + return MA_ERROR; + } + + format = ma_format_from_swpar__audio4(&fdPar); + if (format == ma_format_unknown) { + return MA_FORMAT_NOT_SUPPORTED; + } + + if (deviceType == ma_device_type_playback) { + channels = fdPar.pchan; + } else { + channels = fdPar.rchan; + } + + sampleRate = fdPar.rate; + + pDeviceInfo->nativeDataFormatCount = 0; + ma_device_info_add_native_data_format(pDeviceInfo, format, channels, sampleRate, 0); + } + #endif + + return MA_SUCCESS; +} + +static ma_result ma_context_enumerate_devices__audio4(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + const int maxDevices = 64; + char devpath[256]; + int iDevice; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* + Every device will be named "/dev/audioN", with a "/dev/audioctlN" equivalent. We use the "/dev/audioctlN" + version here since we can open it even when another process has control of the "/dev/audioN" device. + */ + for (iDevice = 0; iDevice < maxDevices; ++iDevice) { + struct stat st; + int fd; + ma_bool32 isTerminating = MA_FALSE; + + ma_strcpy_s(devpath, sizeof(devpath), "/dev/audioctl"); + ma_itoa_s(iDevice, devpath+strlen(devpath), sizeof(devpath)-strlen(devpath), 10); + + if (stat(devpath, &st) < 0) { + break; + } + + /* The device exists, but we need to check if it's usable as playback and/or capture. */ + + /* Playback. */ + if (!isTerminating) { + fd = open(devpath, O_RDONLY, 0); + if (fd >= 0) { + /* Supports playback. */ + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_construct_device_id__audio4(deviceInfo.id.audio4, sizeof(deviceInfo.id.audio4), "/dev/audio", iDevice); + if (ma_context_get_device_info_from_fd__audio4(pContext, ma_device_type_playback, fd, &deviceInfo) == MA_SUCCESS) { + isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + close(fd); + } + } + + /* Capture. */ + if (!isTerminating) { + fd = open(devpath, O_WRONLY, 0); + if (fd >= 0) { + /* Supports capture. */ + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_construct_device_id__audio4(deviceInfo.id.audio4, sizeof(deviceInfo.id.audio4), "/dev/audio", iDevice); + if (ma_context_get_device_info_from_fd__audio4(pContext, ma_device_type_capture, fd, &deviceInfo) == MA_SUCCESS) { + isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + close(fd); + } + } + + if (isTerminating) { + break; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__audio4(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + int fd = -1; + int deviceIndex = -1; + char ctlid[256]; + ma_result result; + + MA_ASSERT(pContext != NULL); + + /* + We need to open the "/dev/audioctlN" device to get the info. To do this we need to extract the number + from the device ID which will be in "/dev/audioN" format. + */ + if (pDeviceID == NULL) { + /* Default device. */ + ma_strcpy_s(ctlid, sizeof(ctlid), "/dev/audioctl"); + } else { + /* Specific device. We need to convert from "/dev/audioN" to "/dev/audioctlN". */ + result = ma_extract_device_index_from_id__audio4(pDeviceID->audio4, "/dev/audio", &deviceIndex); + if (result != MA_SUCCESS) { + return result; + } + + ma_construct_device_id__audio4(ctlid, sizeof(ctlid), "/dev/audioctl", deviceIndex); + } + + fd = open(ctlid, (deviceType == ma_device_type_playback) ? O_WRONLY : O_RDONLY, 0); + if (fd == -1) { + return MA_NO_DEVICE; + } + + if (deviceIndex == -1) { + ma_strcpy_s(pDeviceInfo->id.audio4, sizeof(pDeviceInfo->id.audio4), "/dev/audio"); + } else { + ma_construct_device_id__audio4(pDeviceInfo->id.audio4, sizeof(pDeviceInfo->id.audio4), "/dev/audio", deviceIndex); + } + + result = ma_context_get_device_info_from_fd__audio4(pContext, deviceType, fd, pDeviceInfo); + + close(fd); + return result; +} + +static ma_result ma_device_uninit__audio4(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + close(pDevice->audio4.fdCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + close(pDevice->audio4.fdPlayback); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init_fd__audio4(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptor, ma_device_type deviceType) +{ + const char* pDefaultDeviceNames[] = { + "/dev/audio", + "/dev/audio0" + }; + const char* pDefaultDeviceCtlNames[] = { + "/dev/audioctl", + "/dev/audioctl0" + }; + int fd; + int fdFlags = 0; + size_t iDefaultDevice = (size_t)-1; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_uint32 internalPeriodSizeInFrames; + ma_uint32 internalPeriods; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); + MA_ASSERT(pDevice != NULL); + + /* The first thing to do is open the file. */ + if (deviceType == ma_device_type_capture) { + fdFlags = O_RDONLY; + } else { + fdFlags = O_WRONLY; + } + /*fdFlags |= O_NONBLOCK;*/ + + /* Find the index of the default device as a start. We'll use this index later. Set it to (size_t)-1 otherwise. */ + if (pDescriptor->pDeviceID == NULL) { + /* Default device. */ + for (iDefaultDevice = 0; iDefaultDevice < ma_countof(pDefaultDeviceNames); ++iDefaultDevice) { + fd = open(pDefaultDeviceNames[iDefaultDevice], fdFlags, 0); + if (fd != -1) { + break; + } + } + } else { + /* Specific device. */ + fd = open(pDescriptor->pDeviceID->audio4, fdFlags, 0); + + for (iDefaultDevice = 0; iDefaultDevice < ma_countof(pDefaultDeviceNames); iDefaultDevice += 1) { + if (ma_strcmp(pDefaultDeviceNames[iDefaultDevice], pDescriptor->pDeviceID->audio4) == 0) { + break; + } + } + + if (iDefaultDevice == ma_countof(pDefaultDeviceNames)) { + iDefaultDevice = (size_t)-1; + } + } + + if (fd == -1) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to open device."); + return ma_result_from_errno(errno); + } + + #if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */ + { + audio_info_t fdInfo; + int fdInfoResult = -1; + + /* + The documentation is a little bit unclear to me as to how it handles formats. It says the + following: + + Regardless of formats supported by underlying driver, the audio driver accepts the + following formats. + + By then the next sentence says this: + + `encoding` and `precision` are one of the values obtained by AUDIO_GETENC. + + It sounds like a direct contradiction to me. I'm going to play this safe any only use the + best sample format returned by AUDIO_GETENC. If the requested format is supported we'll + use that, but otherwise we'll just use our standard format priorities to pick an + appropriate one. + */ + AUDIO_INITINFO(&fdInfo); + + /* + Get the default format from the audioctl file if we're asking for a default device. If we + retrieve it from /dev/audio it'll default to mono 8000Hz. + */ + if (iDefaultDevice != (size_t)-1) { + /* We're using a default device. Get the info from the /dev/audioctl file instead of /dev/audio. */ + int fdctl = open(pDefaultDeviceCtlNames[iDefaultDevice], fdFlags, 0); + if (fdctl != -1) { +#if defined(__NetBSD__) && (__NetBSD_Version__ >= 900000000) + fdInfoResult = ioctl(fdctl, AUDIO_GETFORMAT, &fdInfo); +#else + fdInfoResult = ioctl(fdctl, AUDIO_GETINFO, &fdInfo); +#endif + close(fdctl); + } + } + + if (fdInfoResult == -1) { + /* We still don't have the default device info so just retrieve it from the main audio device. */ + if (ioctl(fd, AUDIO_GETINFO, &fdInfo) < 0) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] AUDIO_GETINFO failed."); + return ma_result_from_errno(errno); + } + } + + /* We get the driver to do as much of the data conversion as possible. */ + if (deviceType == ma_device_type_capture) { + fdInfo.mode = AUMODE_RECORD; + ma_encoding_from_format__audio4(ma_best_format_from_fd__audio4(fd, pDescriptor->format), &fdInfo.record.encoding, &fdInfo.record.precision); + + if (pDescriptor->channels != 0) { + fdInfo.record.channels = ma_clamp(pDescriptor->channels, 1, 12); /* From the documentation: `channels` ranges from 1 to 12. */ + } + + if (pDescriptor->sampleRate != 0) { + fdInfo.record.sample_rate = ma_clamp(pDescriptor->sampleRate, 1000, 192000); /* From the documentation: `frequency` ranges from 1000Hz to 192000Hz. (They mean `sample_rate` instead of `frequency`.) */ + } + } else { + fdInfo.mode = AUMODE_PLAY; + ma_encoding_from_format__audio4(ma_best_format_from_fd__audio4(fd, pDescriptor->format), &fdInfo.play.encoding, &fdInfo.play.precision); + + if (pDescriptor->channels != 0) { + fdInfo.play.channels = ma_clamp(pDescriptor->channels, 1, 12); /* From the documentation: `channels` ranges from 1 to 12. */ + } + + if (pDescriptor->sampleRate != 0) { + fdInfo.play.sample_rate = ma_clamp(pDescriptor->sampleRate, 1000, 192000); /* From the documentation: `frequency` ranges from 1000Hz to 192000Hz. (They mean `sample_rate` instead of `frequency`.) */ + } + } + + if (ioctl(fd, AUDIO_SETINFO, &fdInfo) < 0) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to set device format. AUDIO_SETINFO failed."); + return ma_result_from_errno(errno); + } + + if (ioctl(fd, AUDIO_GETINFO, &fdInfo) < 0) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] AUDIO_GETINFO failed."); + return ma_result_from_errno(errno); + } + + if (deviceType == ma_device_type_capture) { + internalFormat = ma_format_from_prinfo__audio4(&fdInfo.record); + internalChannels = fdInfo.record.channels; + internalSampleRate = fdInfo.record.sample_rate; + } else { + internalFormat = ma_format_from_prinfo__audio4(&fdInfo.play); + internalChannels = fdInfo.play.channels; + internalSampleRate = fdInfo.play.sample_rate; + } + + if (internalFormat == ma_format_unknown) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable."); + return MA_FORMAT_NOT_SUPPORTED; + } + + /* Buffer. */ + { + ma_uint32 internalPeriodSizeInBytes; + + internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, internalSampleRate, pConfig->performanceProfile); + + internalPeriodSizeInBytes = internalPeriodSizeInFrames * ma_get_bytes_per_frame(internalFormat, internalChannels); + if (internalPeriodSizeInBytes < 16) { + internalPeriodSizeInBytes = 16; + } + + internalPeriods = pDescriptor->periodCount; + if (internalPeriods < 2) { + internalPeriods = 2; + } + + /* What miniaudio calls a period, audio4 calls a block. */ + AUDIO_INITINFO(&fdInfo); + fdInfo.hiwat = internalPeriods; + fdInfo.lowat = internalPeriods-1; + fdInfo.blocksize = internalPeriodSizeInBytes; + if (ioctl(fd, AUDIO_SETINFO, &fdInfo) < 0) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to set internal buffer size. AUDIO_SETINFO failed."); + return ma_result_from_errno(errno); + } + + internalPeriods = fdInfo.hiwat; + internalPeriodSizeInFrames = fdInfo.blocksize / ma_get_bytes_per_frame(internalFormat, internalChannels); + } + } + #else + { + struct audio_swpar fdPar; + + /* We need to retrieve the format of the device so we can know the channel count and sample rate. Then we can calculate the buffer size. */ + if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to retrieve initial device parameters."); + return ma_result_from_errno(errno); + } + + internalFormat = ma_format_from_swpar__audio4(&fdPar); + internalChannels = (deviceType == ma_device_type_capture) ? fdPar.rchan : fdPar.pchan; + internalSampleRate = fdPar.rate; + + if (internalFormat == ma_format_unknown) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable."); + return MA_FORMAT_NOT_SUPPORTED; + } + + /* Buffer. */ + { + ma_uint32 internalPeriodSizeInBytes; + + internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, internalSampleRate, pConfig->performanceProfile); + + /* What miniaudio calls a period, audio4 calls a block. */ + internalPeriodSizeInBytes = internalPeriodSizeInFrames * ma_get_bytes_per_frame(internalFormat, internalChannels); + if (internalPeriodSizeInBytes < 16) { + internalPeriodSizeInBytes = 16; + } + + fdPar.nblks = pDescriptor->periodCount; + fdPar.round = internalPeriodSizeInBytes; + + if (ioctl(fd, AUDIO_SETPAR, &fdPar) < 0) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to set device parameters."); + return ma_result_from_errno(errno); + } + + if (ioctl(fd, AUDIO_GETPAR, &fdPar) < 0) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to retrieve actual device parameters."); + return ma_result_from_errno(errno); + } + } + + internalFormat = ma_format_from_swpar__audio4(&fdPar); + internalChannels = (deviceType == ma_device_type_capture) ? fdPar.rchan : fdPar.pchan; + internalSampleRate = fdPar.rate; + internalPeriods = fdPar.nblks; + internalPeriodSizeInFrames = fdPar.round / ma_get_bytes_per_frame(internalFormat, internalChannels); + } + #endif + + if (internalFormat == ma_format_unknown) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] The device's internal device format is not supported by miniaudio. The device is unusable."); + return MA_FORMAT_NOT_SUPPORTED; + } + + if (deviceType == ma_device_type_capture) { + pDevice->audio4.fdCapture = fd; + } else { + pDevice->audio4.fdPlayback = fd; + } + + pDescriptor->format = internalFormat; + pDescriptor->channels = internalChannels; + pDescriptor->sampleRate = internalSampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_sound4, pDescriptor->channelMap, ma_countof(pDescriptor->channelMap), internalChannels); + pDescriptor->periodSizeInFrames = internalPeriodSizeInFrames; + pDescriptor->periodCount = internalPeriods; + + return MA_SUCCESS; +} + +static ma_result ma_device_init__audio4(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + MA_ASSERT(pDevice != NULL); + + MA_ZERO_OBJECT(&pDevice->audio4); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + pDevice->audio4.fdCapture = -1; + pDevice->audio4.fdPlayback = -1; + + /* + The version of the operating system dictates whether or not the device is exclusive or shared. NetBSD + introduced in-kernel mixing which means it's shared. All other BSD flavours are exclusive as far as + I'm aware. + */ +#if defined(__NetBSD_Version__) && __NetBSD_Version__ >= 800000000 + /* NetBSD 8.0+ */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } +#else + /* All other flavors. */ +#endif + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_fd__audio4(pDevice, pConfig, pDescriptorCapture, ma_device_type_capture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_fd__audio4(pDevice, pConfig, pDescriptorPlayback, ma_device_type_playback); + if (result != MA_SUCCESS) { + if (pConfig->deviceType == ma_device_type_duplex) { + close(pDevice->audio4.fdCapture); + } + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__audio4(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->audio4.fdCapture == -1) { + return MA_INVALID_ARGS; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + if (pDevice->audio4.fdPlayback == -1) { + return MA_INVALID_ARGS; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop_fd__audio4(ma_device* pDevice, int fd) +{ + if (fd == -1) { + return MA_INVALID_ARGS; + } + +#if !defined(MA_AUDIO4_USE_NEW_API) + if (ioctl(fd, AUDIO_FLUSH, 0) < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to stop device. AUDIO_FLUSH failed."); + return ma_result_from_errno(errno); + } +#else + if (ioctl(fd, AUDIO_STOP, 0) < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to stop device. AUDIO_STOP failed."); + return ma_result_from_errno(errno); + } +#endif + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__audio4(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_result result; + + result = ma_device_stop_fd__audio4(pDevice, pDevice->audio4.fdCapture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_result result; + + /* Drain the device first. If this fails we'll just need to flush without draining. Unfortunately draining isn't available on newer version of OpenBSD. */ + #if !defined(MA_AUDIO4_USE_NEW_API) + ioctl(pDevice->audio4.fdPlayback, AUDIO_DRAIN, 0); + #endif + + /* Here is where the device is stopped immediately. */ + result = ma_device_stop_fd__audio4(pDevice, pDevice->audio4.fdPlayback); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_write__audio4(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + int result; + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + result = write(pDevice->audio4.fdPlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + if (result < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to write data to the device."); + return ma_result_from_errno(errno); + } + + if (pFramesWritten != NULL) { + *pFramesWritten = (ma_uint32)result / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_read__audio4(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + int result; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + result = read(pDevice->audio4.fdCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + if (result < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[audio4] Failed to read data from the device."); + return ma_result_from_errno(errno); + } + + if (pFramesRead != NULL) { + *pFramesRead = (ma_uint32)result / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__audio4(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_audio4); + + (void)pContext; + return MA_SUCCESS; +} + +static ma_result ma_context_init__audio4(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + pCallbacks->onContextInit = ma_context_init__audio4; + pCallbacks->onContextUninit = ma_context_uninit__audio4; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__audio4; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__audio4; + pCallbacks->onDeviceInit = ma_device_init__audio4; + pCallbacks->onDeviceUninit = ma_device_uninit__audio4; + pCallbacks->onDeviceStart = ma_device_start__audio4; + pCallbacks->onDeviceStop = ma_device_stop__audio4; + pCallbacks->onDeviceRead = ma_device_read__audio4; + pCallbacks->onDeviceWrite = ma_device_write__audio4; + pCallbacks->onDeviceDataLoop = NULL; + + return MA_SUCCESS; +} +#endif /* MA_HAS_AUDIO4 */ + + +/****************************************************************************** + +OSS Backend + +******************************************************************************/ +#ifdef MA_HAS_OSS +#include +#include +#include +#include + +#ifndef SNDCTL_DSP_HALT +#define SNDCTL_DSP_HALT SNDCTL_DSP_RESET +#endif + +#define MA_OSS_DEFAULT_DEVICE_NAME "/dev/dsp" + +static int ma_open_temp_device__oss() +{ + /* The OSS sample code uses "/dev/mixer" as the device for getting system properties so I'm going to do the same. */ + int fd = open("/dev/mixer", O_RDONLY, 0); + if (fd >= 0) { + return fd; + } + + return -1; +} + +static ma_result ma_context_open_device__oss(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, int* pfd) +{ + const char* deviceName; + int flags; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pfd != NULL); + (void)pContext; + + *pfd = -1; + + /* This function should only be called for playback or capture, not duplex. */ + if (deviceType == ma_device_type_duplex) { + return MA_INVALID_ARGS; + } + + deviceName = MA_OSS_DEFAULT_DEVICE_NAME; + if (pDeviceID != NULL) { + deviceName = pDeviceID->oss; + } + + flags = (deviceType == ma_device_type_playback) ? O_WRONLY : O_RDONLY; + if (shareMode == ma_share_mode_exclusive) { + flags |= O_EXCL; + } + + *pfd = open(deviceName, flags, 0); + if (*pfd == -1) { + return ma_result_from_errno(errno); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_enumerate_devices__oss(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + int fd; + oss_sysinfo si; + int result; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + fd = ma_open_temp_device__oss(); + if (fd == -1) { + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[OSS] Failed to open a temporary device for retrieving system information used for device enumeration."); + return MA_NO_BACKEND; + } + + result = ioctl(fd, SNDCTL_SYSINFO, &si); + if (result != -1) { + int iAudioDevice; + for (iAudioDevice = 0; iAudioDevice < si.numaudios; ++iAudioDevice) { + oss_audioinfo ai; + ai.dev = iAudioDevice; + result = ioctl(fd, SNDCTL_AUDIOINFO, &ai); + if (result != -1) { + if (ai.devnode[0] != '\0') { /* <-- Can be blank, according to documentation. */ + ma_device_info deviceInfo; + ma_bool32 isTerminating = MA_FALSE; + + MA_ZERO_OBJECT(&deviceInfo); + + /* ID */ + ma_strncpy_s(deviceInfo.id.oss, sizeof(deviceInfo.id.oss), ai.devnode, (size_t)-1); + + /* + The human readable device name should be in the "ai.handle" variable, but it can + sometimes be empty in which case we just fall back to "ai.name" which is less user + friendly, but usually has a value. + */ + if (ai.handle[0] != '\0') { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ai.handle, (size_t)-1); + } else { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ai.name, (size_t)-1); + } + + /* The device can be both playback and capture. */ + if (!isTerminating && (ai.caps & PCM_CAP_OUTPUT) != 0) { + isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + if (!isTerminating && (ai.caps & PCM_CAP_INPUT) != 0) { + isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + if (isTerminating) { + break; + } + } + } + } + } else { + close(fd); + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve system information for device enumeration."); + return MA_NO_BACKEND; + } + + close(fd); + return MA_SUCCESS; +} + +static void ma_context_add_native_data_format__oss(ma_context* pContext, oss_audioinfo* pAudioInfo, ma_format format, ma_device_info* pDeviceInfo) +{ + unsigned int minChannels; + unsigned int maxChannels; + unsigned int iRate; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pAudioInfo != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + /* If we support all channels we just report 0. */ + minChannels = ma_clamp(pAudioInfo->min_channels, MA_MIN_CHANNELS, MA_MAX_CHANNELS); + maxChannels = ma_clamp(pAudioInfo->max_channels, MA_MIN_CHANNELS, MA_MAX_CHANNELS); + + /* + OSS has this annoying thing where sample rates can be reported in two ways. We prefer explicitness, + which OSS has in the form of nrates/rates, however there are times where nrates can be 0, in which + case we'll need to use min_rate and max_rate and report only standard rates. + */ + if (pAudioInfo->nrates > 0) { + for (iRate = 0; iRate < pAudioInfo->nrates; iRate += 1) { + unsigned int rate = pAudioInfo->rates[iRate]; + + if (minChannels == MA_MIN_CHANNELS && maxChannels == MA_MAX_CHANNELS) { + ma_device_info_add_native_data_format(pDeviceInfo, format, 0, rate, 0); /* Set the channel count to 0 to indicate that all channel counts are supported. */ + } else { + unsigned int iChannel; + for (iChannel = minChannels; iChannel <= maxChannels; iChannel += 1) { + ma_device_info_add_native_data_format(pDeviceInfo, format, iChannel, rate, 0); + } + } + } + } else { + for (iRate = 0; iRate < ma_countof(g_maStandardSampleRatePriorities); iRate += 1) { + ma_uint32 standardRate = g_maStandardSampleRatePriorities[iRate]; + + if (standardRate >= (ma_uint32)pAudioInfo->min_rate && standardRate <= (ma_uint32)pAudioInfo->max_rate) { + if (minChannels == MA_MIN_CHANNELS && maxChannels == MA_MAX_CHANNELS) { + ma_device_info_add_native_data_format(pDeviceInfo, format, 0, standardRate, 0); /* Set the channel count to 0 to indicate that all channel counts are supported. */ + } else { + unsigned int iChannel; + for (iChannel = minChannels; iChannel <= maxChannels; iChannel += 1) { + ma_device_info_add_native_data_format(pDeviceInfo, format, iChannel, standardRate, 0); + } + } + } + } + } +} + +static ma_result ma_context_get_device_info__oss(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_bool32 foundDevice; + int fdTemp; + oss_sysinfo si; + int result; + + MA_ASSERT(pContext != NULL); + + /* Handle the default device a little differently. */ + if (pDeviceID == NULL) { + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + return MA_SUCCESS; + } + + + /* If we get here it means we are _not_ using the default device. */ + foundDevice = MA_FALSE; + + fdTemp = ma_open_temp_device__oss(); + if (fdTemp == -1) { + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[OSS] Failed to open a temporary device for retrieving system information used for device enumeration."); + return MA_NO_BACKEND; + } + + result = ioctl(fdTemp, SNDCTL_SYSINFO, &si); + if (result != -1) { + int iAudioDevice; + for (iAudioDevice = 0; iAudioDevice < si.numaudios; ++iAudioDevice) { + oss_audioinfo ai; + ai.dev = iAudioDevice; + result = ioctl(fdTemp, SNDCTL_AUDIOINFO, &ai); + if (result != -1) { + if (ma_strcmp(ai.devnode, pDeviceID->oss) == 0) { + /* It has the same name, so now just confirm the type. */ + if ((deviceType == ma_device_type_playback && ((ai.caps & PCM_CAP_OUTPUT) != 0)) || + (deviceType == ma_device_type_capture && ((ai.caps & PCM_CAP_INPUT) != 0))) { + unsigned int formatMask; + + /* ID */ + ma_strncpy_s(pDeviceInfo->id.oss, sizeof(pDeviceInfo->id.oss), ai.devnode, (size_t)-1); + + /* + The human readable device name should be in the "ai.handle" variable, but it can + sometimes be empty in which case we just fall back to "ai.name" which is less user + friendly, but usually has a value. + */ + if (ai.handle[0] != '\0') { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), ai.handle, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), ai.name, (size_t)-1); + } + + + pDeviceInfo->nativeDataFormatCount = 0; + + if (deviceType == ma_device_type_playback) { + formatMask = ai.oformats; + } else { + formatMask = ai.iformats; + } + + if (((formatMask & AFMT_S16_LE) != 0 && ma_is_little_endian()) || (AFMT_S16_BE && ma_is_big_endian())) { + ma_context_add_native_data_format__oss(pContext, &ai, ma_format_s16, pDeviceInfo); + } + if (((formatMask & AFMT_S32_LE) != 0 && ma_is_little_endian()) || (AFMT_S32_BE && ma_is_big_endian())) { + ma_context_add_native_data_format__oss(pContext, &ai, ma_format_s32, pDeviceInfo); + } + if ((formatMask & AFMT_U8) != 0) { + ma_context_add_native_data_format__oss(pContext, &ai, ma_format_u8, pDeviceInfo); + } + + foundDevice = MA_TRUE; + break; + } + } + } + } + } else { + close(fdTemp); + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve system information for device enumeration."); + return MA_NO_BACKEND; + } + + + close(fdTemp); + + if (!foundDevice) { + return MA_NO_DEVICE; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_uninit__oss(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + close(pDevice->oss.fdCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + close(pDevice->oss.fdPlayback); + } + + return MA_SUCCESS; +} + +static int ma_format_to_oss(ma_format format) +{ + int ossFormat = AFMT_U8; + switch (format) { + case ma_format_s16: ossFormat = (ma_is_little_endian()) ? AFMT_S16_LE : AFMT_S16_BE; break; + case ma_format_s24: ossFormat = (ma_is_little_endian()) ? AFMT_S32_LE : AFMT_S32_BE; break; + case ma_format_s32: ossFormat = (ma_is_little_endian()) ? AFMT_S32_LE : AFMT_S32_BE; break; + case ma_format_f32: ossFormat = (ma_is_little_endian()) ? AFMT_S16_LE : AFMT_S16_BE; break; + case ma_format_u8: + default: ossFormat = AFMT_U8; break; + } + + return ossFormat; +} + +static ma_format ma_format_from_oss(int ossFormat) +{ + if (ossFormat == AFMT_U8) { + return ma_format_u8; + } else { + if (ma_is_little_endian()) { + switch (ossFormat) { + case AFMT_S16_LE: return ma_format_s16; + case AFMT_S32_LE: return ma_format_s32; + default: return ma_format_unknown; + } + } else { + switch (ossFormat) { + case AFMT_S16_BE: return ma_format_s16; + case AFMT_S32_BE: return ma_format_s32; + default: return ma_format_unknown; + } + } + } + + return ma_format_unknown; +} + +static ma_result ma_device_init_fd__oss(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptor, ma_device_type deviceType) +{ + ma_result result; + int ossResult; + int fd; + const ma_device_id* pDeviceID = NULL; + ma_share_mode shareMode; + int ossFormat; + int ossChannels; + int ossSampleRate; + int ossFragment; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); + + pDeviceID = pDescriptor->pDeviceID; + shareMode = pDescriptor->shareMode; + ossFormat = ma_format_to_oss((pDescriptor->format != ma_format_unknown) ? pDescriptor->format : ma_format_s16); /* Use s16 by default because OSS doesn't like floating point. */ + ossChannels = (int)(pDescriptor->channels > 0) ? pDescriptor->channels : MA_DEFAULT_CHANNELS; + ossSampleRate = (int)(pDescriptor->sampleRate > 0) ? pDescriptor->sampleRate : MA_DEFAULT_SAMPLE_RATE; + + result = ma_context_open_device__oss(pDevice->pContext, deviceType, pDeviceID, shareMode, &fd); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device."); + return result; + } + + /* + The OSS documentation is very clear about the order we should be initializing the device's properties: + 1) Format + 2) Channels + 3) Sample rate. + */ + + /* Format. */ + ossResult = ioctl(fd, SNDCTL_DSP_SETFMT, &ossFormat); + if (ossResult == -1) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to set format."); + return ma_result_from_errno(errno); + } + + /* Channels. */ + ossResult = ioctl(fd, SNDCTL_DSP_CHANNELS, &ossChannels); + if (ossResult == -1) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to set channel count."); + return ma_result_from_errno(errno); + } + + /* Sample Rate. */ + ossResult = ioctl(fd, SNDCTL_DSP_SPEED, &ossSampleRate); + if (ossResult == -1) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to set sample rate."); + return ma_result_from_errno(errno); + } + + /* + Buffer. + + The documentation says that the fragment settings should be set as soon as possible, but I'm not sure if + it should be done before or after format/channels/rate. + + OSS wants the fragment size in bytes and a power of 2. When setting, we specify the power, not the actual + value. + */ + { + ma_uint32 periodSizeInFrames; + ma_uint32 periodSizeInBytes; + ma_uint32 ossFragmentSizePower; + + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, (ma_uint32)ossSampleRate, pConfig->performanceProfile); + + periodSizeInBytes = ma_round_to_power_of_2(periodSizeInFrames * ma_get_bytes_per_frame(ma_format_from_oss(ossFormat), ossChannels)); + if (periodSizeInBytes < 16) { + periodSizeInBytes = 16; + } + + ossFragmentSizePower = 4; + periodSizeInBytes >>= 4; + while (periodSizeInBytes >>= 1) { + ossFragmentSizePower += 1; + } + + ossFragment = (int)((pConfig->periods << 16) | ossFragmentSizePower); + ossResult = ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &ossFragment); + if (ossResult == -1) { + close(fd); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to set fragment size and period count."); + return ma_result_from_errno(errno); + } + } + + /* Internal settings. */ + if (deviceType == ma_device_type_capture) { + pDevice->oss.fdCapture = fd; + } else { + pDevice->oss.fdPlayback = fd; + } + + pDescriptor->format = ma_format_from_oss(ossFormat); + pDescriptor->channels = ossChannels; + pDescriptor->sampleRate = ossSampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_sound4, pDescriptor->channelMap, ma_countof(pDescriptor->channelMap), pDescriptor->channels); + pDescriptor->periodCount = (ma_uint32)(ossFragment >> 16); + pDescriptor->periodSizeInFrames = (ma_uint32)(1 << (ossFragment & 0xFFFF)) / ma_get_bytes_per_frame(pDescriptor->format, pDescriptor->channels); + + if (pDescriptor->format == ma_format_unknown) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] The device's internal format is not supported by miniaudio."); + return MA_FORMAT_NOT_SUPPORTED; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init__oss(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pConfig != NULL); + + MA_ZERO_OBJECT(&pDevice->oss); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_fd__oss(pDevice, pConfig, pDescriptorCapture, ma_device_type_capture); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device."); + return result; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_result result = ma_device_init_fd__oss(pDevice, pConfig, pDescriptorPlayback, ma_device_type_playback); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to open device."); + return result; + } + } + + return MA_SUCCESS; +} + +/* +Note on Starting and Stopping +============================= +In the past I was using SNDCTL_DSP_HALT to stop the device, however this results in issues when +trying to resume the device again. If we use SNDCTL_DSP_HALT, the next write() or read() will +fail. Instead what we need to do is just not write or read to and from the device when the +device is not running. + +As a result, both the start and stop functions for OSS are just empty stubs. The starting and +stopping logic is handled by ma_device_write__oss() and ma_device_read__oss(). These will check +the device state, and if the device is stopped they will simply not do any kind of processing. + +The downside to this technique is that I've noticed a fairly lengthy delay in stopping the +device, up to a second. This is on a virtual machine, and as such might just be due to the +virtual drivers, but I'm not fully sure. I am not sure how to work around this problem so for +the moment that's just how it's going to have to be. + +When starting the device, OSS will automatically start it when write() or read() is called. +*/ +static ma_result ma_device_start__oss(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* The device is automatically started with reading and writing. */ + (void)pDevice; + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__oss(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* See note above on why this is empty. */ + (void)pDevice; + + return MA_SUCCESS; +} + +static ma_result ma_device_write__oss(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +{ + int resultOSS; + ma_uint32 deviceState; + + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + /* Don't do any processing if the device is stopped. */ + deviceState = ma_device_get_state(pDevice); + if (deviceState != ma_device_state_started && deviceState != ma_device_state_starting) { + return MA_SUCCESS; + } + + resultOSS = write(pDevice->oss.fdPlayback, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + if (resultOSS < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to send data from the client to the device."); + return ma_result_from_errno(errno); + } + + if (pFramesWritten != NULL) { + *pFramesWritten = (ma_uint32)resultOSS / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + } + + return MA_SUCCESS; +} + +static ma_result ma_device_read__oss(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +{ + int resultOSS; + ma_uint32 deviceState; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + /* Don't do any processing if the device is stopped. */ + deviceState = ma_device_get_state(pDevice); + if (deviceState != ma_device_state_started && deviceState != ma_device_state_starting) { + return MA_SUCCESS; + } + + resultOSS = read(pDevice->oss.fdCapture, pPCMFrames, frameCount * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + if (resultOSS < 0) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OSS] Failed to read data from the device to be sent to the client."); + return ma_result_from_errno(errno); + } + + if (pFramesRead != NULL) { + *pFramesRead = (ma_uint32)resultOSS / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__oss(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_oss); + + (void)pContext; + return MA_SUCCESS; +} + +static ma_result ma_context_init__oss(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + int fd; + int ossVersion; + int result; + + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + /* Try opening a temporary device first so we can get version information. This is closed at the end. */ + fd = ma_open_temp_device__oss(); + if (fd == -1) { + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[OSS] Failed to open temporary device for retrieving system properties."); /* Looks liks OSS isn't installed, or there are no available devices. */ + return MA_NO_BACKEND; + } + + /* Grab the OSS version. */ + ossVersion = 0; + result = ioctl(fd, OSS_GETVERSION, &ossVersion); + if (result == -1) { + close(fd); + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_ERROR, "[OSS] Failed to retrieve OSS version."); + return MA_NO_BACKEND; + } + + /* The file handle to temp device is no longer needed. Close ASAP. */ + close(fd); + + pContext->oss.versionMajor = ((ossVersion & 0xFF0000) >> 16); + pContext->oss.versionMinor = ((ossVersion & 0x00FF00) >> 8); + + pCallbacks->onContextInit = ma_context_init__oss; + pCallbacks->onContextUninit = ma_context_uninit__oss; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__oss; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__oss; + pCallbacks->onDeviceInit = ma_device_init__oss; + pCallbacks->onDeviceUninit = ma_device_uninit__oss; + pCallbacks->onDeviceStart = ma_device_start__oss; + pCallbacks->onDeviceStop = ma_device_stop__oss; + pCallbacks->onDeviceRead = ma_device_read__oss; + pCallbacks->onDeviceWrite = ma_device_write__oss; + pCallbacks->onDeviceDataLoop = NULL; + + return MA_SUCCESS; +} +#endif /* MA_HAS_OSS */ + + + + + +/****************************************************************************** + +AAudio Backend + +******************************************************************************/ +#ifdef MA_HAS_AAUDIO + +#ifdef MA_NO_RUNTIME_LINKING + #include +#endif + +typedef int32_t ma_aaudio_result_t; +typedef int32_t ma_aaudio_direction_t; +typedef int32_t ma_aaudio_sharing_mode_t; +typedef int32_t ma_aaudio_format_t; +typedef int32_t ma_aaudio_stream_state_t; +typedef int32_t ma_aaudio_performance_mode_t; +typedef int32_t ma_aaudio_usage_t; +typedef int32_t ma_aaudio_content_type_t; +typedef int32_t ma_aaudio_input_preset_t; +typedef int32_t ma_aaudio_allowed_capture_policy_t; +typedef int32_t ma_aaudio_data_callback_result_t; +typedef struct ma_AAudioStreamBuilder_t* ma_AAudioStreamBuilder; +typedef struct ma_AAudioStream_t* ma_AAudioStream; + +#define MA_AAUDIO_UNSPECIFIED 0 + +/* Result codes. miniaudio only cares about the success code. */ +#define MA_AAUDIO_OK 0 + +/* Directions. */ +#define MA_AAUDIO_DIRECTION_OUTPUT 0 +#define MA_AAUDIO_DIRECTION_INPUT 1 + +/* Sharing modes. */ +#define MA_AAUDIO_SHARING_MODE_EXCLUSIVE 0 +#define MA_AAUDIO_SHARING_MODE_SHARED 1 + +/* Formats. */ +#define MA_AAUDIO_FORMAT_PCM_I16 1 +#define MA_AAUDIO_FORMAT_PCM_FLOAT 2 + +/* Stream states. */ +#define MA_AAUDIO_STREAM_STATE_UNINITIALIZED 0 +#define MA_AAUDIO_STREAM_STATE_UNKNOWN 1 +#define MA_AAUDIO_STREAM_STATE_OPEN 2 +#define MA_AAUDIO_STREAM_STATE_STARTING 3 +#define MA_AAUDIO_STREAM_STATE_STARTED 4 +#define MA_AAUDIO_STREAM_STATE_PAUSING 5 +#define MA_AAUDIO_STREAM_STATE_PAUSED 6 +#define MA_AAUDIO_STREAM_STATE_FLUSHING 7 +#define MA_AAUDIO_STREAM_STATE_FLUSHED 8 +#define MA_AAUDIO_STREAM_STATE_STOPPING 9 +#define MA_AAUDIO_STREAM_STATE_STOPPED 10 +#define MA_AAUDIO_STREAM_STATE_CLOSING 11 +#define MA_AAUDIO_STREAM_STATE_CLOSED 12 +#define MA_AAUDIO_STREAM_STATE_DISCONNECTED 13 + +/* Performance modes. */ +#define MA_AAUDIO_PERFORMANCE_MODE_NONE 10 +#define MA_AAUDIO_PERFORMANCE_MODE_POWER_SAVING 11 +#define MA_AAUDIO_PERFORMANCE_MODE_LOW_LATENCY 12 + +/* Usage types. */ +#define MA_AAUDIO_USAGE_MEDIA 1 +#define MA_AAUDIO_USAGE_VOICE_COMMUNICATION 2 +#define MA_AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING 3 +#define MA_AAUDIO_USAGE_ALARM 4 +#define MA_AAUDIO_USAGE_NOTIFICATION 5 +#define MA_AAUDIO_USAGE_NOTIFICATION_RINGTONE 6 +#define MA_AAUDIO_USAGE_NOTIFICATION_EVENT 10 +#define MA_AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY 11 +#define MA_AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE 12 +#define MA_AAUDIO_USAGE_ASSISTANCE_SONIFICATION 13 +#define MA_AAUDIO_USAGE_GAME 14 +#define MA_AAUDIO_USAGE_ASSISTANT 16 +#define MA_AAUDIO_SYSTEM_USAGE_EMERGENCY 1000 +#define MA_AAUDIO_SYSTEM_USAGE_SAFETY 1001 +#define MA_AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS 1002 +#define MA_AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT 1003 + +/* Content types. */ +#define MA_AAUDIO_CONTENT_TYPE_SPEECH 1 +#define MA_AAUDIO_CONTENT_TYPE_MUSIC 2 +#define MA_AAUDIO_CONTENT_TYPE_MOVIE 3 +#define MA_AAUDIO_CONTENT_TYPE_SONIFICATION 4 + +/* Input presets. */ +#define MA_AAUDIO_INPUT_PRESET_GENERIC 1 +#define MA_AAUDIO_INPUT_PRESET_CAMCORDER 5 +#define MA_AAUDIO_INPUT_PRESET_VOICE_RECOGNITION 6 +#define MA_AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION 7 +#define MA_AAUDIO_INPUT_PRESET_UNPROCESSED 9 +#define MA_AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE 10 + +/* Allowed Capture Policies */ +#define MA_AAUDIO_ALLOW_CAPTURE_BY_ALL 1 +#define MA_AAUDIO_ALLOW_CAPTURE_BY_SYSTEM 2 +#define MA_AAUDIO_ALLOW_CAPTURE_BY_NONE 3 + +/* Callback results. */ +#define MA_AAUDIO_CALLBACK_RESULT_CONTINUE 0 +#define MA_AAUDIO_CALLBACK_RESULT_STOP 1 + + +typedef ma_aaudio_data_callback_result_t (* ma_AAudioStream_dataCallback) (ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t numFrames); +typedef void (* ma_AAudioStream_errorCallback)(ma_AAudioStream *pStream, void *pUserData, ma_aaudio_result_t error); + +typedef ma_aaudio_result_t (* MA_PFN_AAudio_createStreamBuilder) (ma_AAudioStreamBuilder** ppBuilder); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStreamBuilder_delete) (ma_AAudioStreamBuilder* pBuilder); +typedef void (* MA_PFN_AAudioStreamBuilder_setDeviceId) (ma_AAudioStreamBuilder* pBuilder, int32_t deviceId); +typedef void (* MA_PFN_AAudioStreamBuilder_setDirection) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_direction_t direction); +typedef void (* MA_PFN_AAudioStreamBuilder_setSharingMode) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_sharing_mode_t sharingMode); +typedef void (* MA_PFN_AAudioStreamBuilder_setFormat) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_format_t format); +typedef void (* MA_PFN_AAudioStreamBuilder_setChannelCount) (ma_AAudioStreamBuilder* pBuilder, int32_t channelCount); +typedef void (* MA_PFN_AAudioStreamBuilder_setSampleRate) (ma_AAudioStreamBuilder* pBuilder, int32_t sampleRate); +typedef void (* MA_PFN_AAudioStreamBuilder_setBufferCapacityInFrames)(ma_AAudioStreamBuilder* pBuilder, int32_t numFrames); +typedef void (* MA_PFN_AAudioStreamBuilder_setFramesPerDataCallback) (ma_AAudioStreamBuilder* pBuilder, int32_t numFrames); +typedef void (* MA_PFN_AAudioStreamBuilder_setDataCallback) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream_dataCallback callback, void* pUserData); +typedef void (* MA_PFN_AAudioStreamBuilder_setErrorCallback) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream_errorCallback callback, void* pUserData); +typedef void (* MA_PFN_AAudioStreamBuilder_setPerformanceMode) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_performance_mode_t mode); +typedef void (* MA_PFN_AAudioStreamBuilder_setUsage) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_usage_t contentType); +typedef void (* MA_PFN_AAudioStreamBuilder_setContentType) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_content_type_t contentType); +typedef void (* MA_PFN_AAudioStreamBuilder_setInputPreset) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_input_preset_t inputPreset); +typedef void (* MA_PFN_AAudioStreamBuilder_setAllowedCapturePolicy) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_allowed_capture_policy_t policy); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStreamBuilder_openStream) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream** ppStream); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_close) (ma_AAudioStream* pStream); +typedef ma_aaudio_stream_state_t (* MA_PFN_AAudioStream_getState) (ma_AAudioStream* pStream); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_waitForStateChange) (ma_AAudioStream* pStream, ma_aaudio_stream_state_t inputState, ma_aaudio_stream_state_t* pNextState, int64_t timeoutInNanoseconds); +typedef ma_aaudio_format_t (* MA_PFN_AAudioStream_getFormat) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getChannelCount) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getSampleRate) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getBufferCapacityInFrames) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getFramesPerDataCallback) (ma_AAudioStream* pStream); +typedef int32_t (* MA_PFN_AAudioStream_getFramesPerBurst) (ma_AAudioStream* pStream); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_requestStart) (ma_AAudioStream* pStream); +typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_requestStop) (ma_AAudioStream* pStream); + +static ma_result ma_result_from_aaudio(ma_aaudio_result_t resultAA) +{ + switch (resultAA) + { + case MA_AAUDIO_OK: return MA_SUCCESS; + default: break; + } + + return MA_ERROR; +} + +static ma_aaudio_usage_t ma_to_usage__aaudio(ma_aaudio_usage usage) +{ + switch (usage) { + case ma_aaudio_usage_media: return MA_AAUDIO_USAGE_MEDIA; + case ma_aaudio_usage_voice_communication: return MA_AAUDIO_USAGE_VOICE_COMMUNICATION; + case ma_aaudio_usage_voice_communication_signalling: return MA_AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING; + case ma_aaudio_usage_alarm: return MA_AAUDIO_USAGE_ALARM; + case ma_aaudio_usage_notification: return MA_AAUDIO_USAGE_NOTIFICATION; + case ma_aaudio_usage_notification_ringtone: return MA_AAUDIO_USAGE_NOTIFICATION_RINGTONE; + case ma_aaudio_usage_notification_event: return MA_AAUDIO_USAGE_NOTIFICATION_EVENT; + case ma_aaudio_usage_assistance_accessibility: return MA_AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY; + case ma_aaudio_usage_assistance_navigation_guidance: return MA_AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE; + case ma_aaudio_usage_assistance_sonification: return MA_AAUDIO_USAGE_ASSISTANCE_SONIFICATION; + case ma_aaudio_usage_game: return MA_AAUDIO_USAGE_GAME; + case ma_aaudio_usage_assitant: return MA_AAUDIO_USAGE_ASSISTANT; + case ma_aaudio_usage_emergency: return MA_AAUDIO_SYSTEM_USAGE_EMERGENCY; + case ma_aaudio_usage_safety: return MA_AAUDIO_SYSTEM_USAGE_SAFETY; + case ma_aaudio_usage_vehicle_status: return MA_AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS; + case ma_aaudio_usage_announcement: return MA_AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT; + default: break; + } + + return MA_AAUDIO_USAGE_MEDIA; +} + +static ma_aaudio_content_type_t ma_to_content_type__aaudio(ma_aaudio_content_type contentType) +{ + switch (contentType) { + case ma_aaudio_content_type_speech: return MA_AAUDIO_CONTENT_TYPE_SPEECH; + case ma_aaudio_content_type_music: return MA_AAUDIO_CONTENT_TYPE_MUSIC; + case ma_aaudio_content_type_movie: return MA_AAUDIO_CONTENT_TYPE_MOVIE; + case ma_aaudio_content_type_sonification: return MA_AAUDIO_CONTENT_TYPE_SONIFICATION; + default: break; + } + + return MA_AAUDIO_CONTENT_TYPE_SPEECH; +} + +static ma_aaudio_input_preset_t ma_to_input_preset__aaudio(ma_aaudio_input_preset inputPreset) +{ + switch (inputPreset) { + case ma_aaudio_input_preset_generic: return MA_AAUDIO_INPUT_PRESET_GENERIC; + case ma_aaudio_input_preset_camcorder: return MA_AAUDIO_INPUT_PRESET_CAMCORDER; + case ma_aaudio_input_preset_voice_recognition: return MA_AAUDIO_INPUT_PRESET_VOICE_RECOGNITION; + case ma_aaudio_input_preset_voice_communication: return MA_AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION; + case ma_aaudio_input_preset_unprocessed: return MA_AAUDIO_INPUT_PRESET_UNPROCESSED; + case ma_aaudio_input_preset_voice_performance: return MA_AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE; + default: break; + } + + return MA_AAUDIO_INPUT_PRESET_GENERIC; +} + +static ma_aaudio_allowed_capture_policy_t ma_to_allowed_capture_policy__aaudio(ma_aaudio_allowed_capture_policy allowedCapturePolicy) +{ + switch (allowedCapturePolicy) { + case ma_aaudio_allow_capture_by_all: return MA_AAUDIO_ALLOW_CAPTURE_BY_ALL; + case ma_aaudio_allow_capture_by_system: return MA_AAUDIO_ALLOW_CAPTURE_BY_SYSTEM; + case ma_aaudio_allow_capture_by_none: return MA_AAUDIO_ALLOW_CAPTURE_BY_NONE; + default: break; + } + + return MA_AAUDIO_ALLOW_CAPTURE_BY_ALL; +} + +static void ma_stream_error_callback__aaudio(ma_AAudioStream* pStream, void* pUserData, ma_aaudio_result_t error) +{ + ma_result result; + ma_job job; + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + (void)error; + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[AAudio] ERROR CALLBACK: error=%d, AAudioStream_getState()=%d\n", error, ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream)); + /* + When we get an error, we'll assume that the stream is in an erroneous state and needs to be restarted. From the documentation, + we cannot do this from the error callback. Therefore we are going to use an event thread for the AAudio backend to do this + cleanly and safely. + */ + job = ma_job_init(MA_JOB_TYPE_DEVICE_AAUDIO_REROUTE); + job.data.device.aaudio.reroute.pDevice = pDevice; + + if (pStream == pDevice->aaudio.pStreamCapture) { + job.data.device.aaudio.reroute.deviceType = ma_device_type_capture; + } + else { + job.data.device.aaudio.reroute.deviceType = ma_device_type_playback; + } + + result = ma_device_job_thread_post(&pDevice->pContext->aaudio.jobThread, &job); + if (result != MA_SUCCESS) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[AAudio] Device Disconnected. Failed to post job for rerouting.\n"); + return; + } +} + +static ma_aaudio_data_callback_result_t ma_stream_data_callback_capture__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount) +{ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + if (frameCount > 0) { + ma_device_handle_backend_data_callback(pDevice, NULL, pAudioData, (ma_uint32)frameCount); + } + + (void)pStream; + return MA_AAUDIO_CALLBACK_RESULT_CONTINUE; +} + +static ma_aaudio_data_callback_result_t ma_stream_data_callback_playback__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount) +{ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + /* + I've had a report that AAudio can sometimes post a frame count of 0. We need to check for that here + so we don't get any errors at a deeper level. I'm doing the same with the capture side for safety, + though I've not yet had any reports about that one. + */ + if (frameCount > 0) { + ma_device_handle_backend_data_callback(pDevice, pAudioData, NULL, (ma_uint32)frameCount); + } + + (void)pStream; + return MA_AAUDIO_CALLBACK_RESULT_CONTINUE; +} + +static ma_result ma_create_and_configure_AAudioStreamBuilder__aaudio(ma_context* pContext, const ma_device_id* pDeviceID, ma_device_type deviceType, ma_share_mode shareMode, const ma_device_descriptor* pDescriptor, const ma_device_config* pConfig, ma_device* pDevice, ma_AAudioStreamBuilder** ppBuilder) +{ + ma_AAudioStreamBuilder* pBuilder; + ma_aaudio_result_t resultAA; + + /* Safety. */ + *ppBuilder = NULL; + + resultAA = ((MA_PFN_AAudio_createStreamBuilder)pContext->aaudio.AAudio_createStreamBuilder)(&pBuilder); + if (resultAA != MA_AAUDIO_OK) { + return ma_result_from_aaudio(resultAA); + } + + if (pDeviceID != NULL) { + ((MA_PFN_AAudioStreamBuilder_setDeviceId)pContext->aaudio.AAudioStreamBuilder_setDeviceId)(pBuilder, pDeviceID->aaudio); + } + + ((MA_PFN_AAudioStreamBuilder_setDirection)pContext->aaudio.AAudioStreamBuilder_setDirection)(pBuilder, (deviceType == ma_device_type_playback) ? MA_AAUDIO_DIRECTION_OUTPUT : MA_AAUDIO_DIRECTION_INPUT); + ((MA_PFN_AAudioStreamBuilder_setSharingMode)pContext->aaudio.AAudioStreamBuilder_setSharingMode)(pBuilder, (shareMode == ma_share_mode_shared) ? MA_AAUDIO_SHARING_MODE_SHARED : MA_AAUDIO_SHARING_MODE_EXCLUSIVE); + + + /* If we have a device descriptor make sure we configure the stream builder to take our requested parameters. */ + if (pDescriptor != NULL) { + MA_ASSERT(pConfig != NULL); /* We must have a device config if we also have a descriptor. The config is required for AAudio specific configuration options. */ + + if (pDescriptor->sampleRate != 0) { + ((MA_PFN_AAudioStreamBuilder_setSampleRate)pContext->aaudio.AAudioStreamBuilder_setSampleRate)(pBuilder, pDescriptor->sampleRate); + } + + if (pDescriptor->channels != 0) { + ((MA_PFN_AAudioStreamBuilder_setChannelCount)pContext->aaudio.AAudioStreamBuilder_setChannelCount)(pBuilder, pDescriptor->channels); + } + + if (pDescriptor->format != ma_format_unknown) { + ((MA_PFN_AAudioStreamBuilder_setFormat)pContext->aaudio.AAudioStreamBuilder_setFormat)(pBuilder, (pDescriptor->format == ma_format_s16) ? MA_AAUDIO_FORMAT_PCM_I16 : MA_AAUDIO_FORMAT_PCM_FLOAT); + } + + + /* + There have been reports where setting the frames per data callback results in an error. + In particular, re-routing may inadvertently switch from low-latency mode, resulting in a less stable + stream from the legacy path (AudioStreamLegacy). To address this, we simply don't set the value. It + can still be set if it's explicitly requested via the aaudio.allowSetBufferCapacity variable in the + device config. + */ + if ((!pConfig->aaudio.enableCompatibilityWorkarounds || ma_android_sdk_version() > 30) && pConfig->aaudio.allowSetBufferCapacity) { + /* + AAudio is annoying when it comes to its buffer calculation stuff because it doesn't let you + retrieve the actual sample rate until after you've opened the stream. But you need to configure + the buffer capacity before you open the stream... :/ + + To solve, we're just going to assume MA_DEFAULT_SAMPLE_RATE (48000) and move on. + */ + ma_uint32 bufferCapacityInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptor, pDescriptor->sampleRate, pConfig->performanceProfile) * pDescriptor->periodCount; + + ((MA_PFN_AAudioStreamBuilder_setBufferCapacityInFrames)pContext->aaudio.AAudioStreamBuilder_setBufferCapacityInFrames)(pBuilder, bufferCapacityInFrames); + ((MA_PFN_AAudioStreamBuilder_setFramesPerDataCallback)pContext->aaudio.AAudioStreamBuilder_setFramesPerDataCallback)(pBuilder, bufferCapacityInFrames / pDescriptor->periodCount); + } + + if (deviceType == ma_device_type_capture) { + if (pConfig->aaudio.inputPreset != ma_aaudio_input_preset_default && pContext->aaudio.AAudioStreamBuilder_setInputPreset != NULL) { + ((MA_PFN_AAudioStreamBuilder_setInputPreset)pContext->aaudio.AAudioStreamBuilder_setInputPreset)(pBuilder, ma_to_input_preset__aaudio(pConfig->aaudio.inputPreset)); + } + + ((MA_PFN_AAudioStreamBuilder_setDataCallback)pContext->aaudio.AAudioStreamBuilder_setDataCallback)(pBuilder, ma_stream_data_callback_capture__aaudio, (void*)pDevice); + } else { + if (pConfig->aaudio.usage != ma_aaudio_usage_default && pContext->aaudio.AAudioStreamBuilder_setUsage != NULL) { + ((MA_PFN_AAudioStreamBuilder_setUsage)pContext->aaudio.AAudioStreamBuilder_setUsage)(pBuilder, ma_to_usage__aaudio(pConfig->aaudio.usage)); + } + + if (pConfig->aaudio.contentType != ma_aaudio_content_type_default && pContext->aaudio.AAudioStreamBuilder_setContentType != NULL) { + ((MA_PFN_AAudioStreamBuilder_setContentType)pContext->aaudio.AAudioStreamBuilder_setContentType)(pBuilder, ma_to_content_type__aaudio(pConfig->aaudio.contentType)); + } + + if (pConfig->aaudio.allowedCapturePolicy != ma_aaudio_allow_capture_default && pContext->aaudio.AAudioStreamBuilder_setAllowedCapturePolicy != NULL) { + ((MA_PFN_AAudioStreamBuilder_setAllowedCapturePolicy)pContext->aaudio.AAudioStreamBuilder_setAllowedCapturePolicy)(pBuilder, ma_to_allowed_capture_policy__aaudio(pConfig->aaudio.allowedCapturePolicy)); + } + + ((MA_PFN_AAudioStreamBuilder_setDataCallback)pContext->aaudio.AAudioStreamBuilder_setDataCallback)(pBuilder, ma_stream_data_callback_playback__aaudio, (void*)pDevice); + } + + /* + If we set AAUDIO_PERFORMANCE_MODE_LOW_LATENCY, we allow for MMAP (non-legacy path). + Since there's a mapping between miniaudio's performance profiles and AAudio's performance modes, let's use it. + Beware though, with a conservative performance profile, AAudio will indeed take the legacy path. + */ + ((MA_PFN_AAudioStreamBuilder_setPerformanceMode)pContext->aaudio.AAudioStreamBuilder_setPerformanceMode)(pBuilder, (pConfig->performanceProfile == ma_performance_profile_low_latency) ? MA_AAUDIO_PERFORMANCE_MODE_LOW_LATENCY : MA_AAUDIO_PERFORMANCE_MODE_NONE); + + /* We need to set an error callback to detect device changes. */ + if (pDevice != NULL) { /* <-- pDevice should never be null if pDescriptor is not null, which is always the case if we hit this branch. Check anyway for safety. */ + ((MA_PFN_AAudioStreamBuilder_setErrorCallback)pContext->aaudio.AAudioStreamBuilder_setErrorCallback)(pBuilder, ma_stream_error_callback__aaudio, (void*)pDevice); + } + } + + *ppBuilder = pBuilder; + + return MA_SUCCESS; +} + +static ma_result ma_open_stream_and_close_builder__aaudio(ma_context* pContext, ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream** ppStream) +{ + ma_result result; + + result = ma_result_from_aaudio(((MA_PFN_AAudioStreamBuilder_openStream)pContext->aaudio.AAudioStreamBuilder_openStream)(pBuilder, ppStream)); + ((MA_PFN_AAudioStreamBuilder_delete)pContext->aaudio.AAudioStreamBuilder_delete)(pBuilder); + + return result; +} + +static ma_result ma_open_stream_basic__aaudio(ma_context* pContext, const ma_device_id* pDeviceID, ma_device_type deviceType, ma_share_mode shareMode, ma_AAudioStream** ppStream) +{ + ma_result result; + ma_AAudioStreamBuilder* pBuilder; + + *ppStream = NULL; + + result = ma_create_and_configure_AAudioStreamBuilder__aaudio(pContext, pDeviceID, deviceType, shareMode, NULL, NULL, NULL, &pBuilder); + if (result != MA_SUCCESS) { + return result; + } + + /* Let's give AAudio a hint to avoid the legacy path (AudioStreamLegacy). */ + ((MA_PFN_AAudioStreamBuilder_setPerformanceMode)pContext->aaudio.AAudioStreamBuilder_setPerformanceMode)(pBuilder, MA_AAUDIO_PERFORMANCE_MODE_LOW_LATENCY); + + return ma_open_stream_and_close_builder__aaudio(pContext, pBuilder, ppStream); +} + +static ma_result ma_open_stream__aaudio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_type deviceType, const ma_device_descriptor* pDescriptor, ma_AAudioStream** ppStream) +{ + ma_result result; + ma_AAudioStreamBuilder* pBuilder; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pDescriptor != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); /* This function should not be called for a full-duplex device type. */ + + *ppStream = NULL; + + result = ma_create_and_configure_AAudioStreamBuilder__aaudio(pDevice->pContext, pDescriptor->pDeviceID, deviceType, pDescriptor->shareMode, pDescriptor, pConfig, pDevice, &pBuilder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_open_stream_and_close_builder__aaudio(pDevice->pContext, pBuilder, ppStream); +} + +static ma_result ma_close_stream__aaudio(ma_context* pContext, ma_AAudioStream* pStream) +{ + if (pStream == NULL) { + return MA_INVALID_ARGS; + } + + return ma_result_from_aaudio(((MA_PFN_AAudioStream_close)pContext->aaudio.AAudioStream_close)(pStream)); +} + +static ma_bool32 ma_has_default_device__aaudio(ma_context* pContext, ma_device_type deviceType) +{ + /* The only way to know this is to try creating a stream. */ + ma_AAudioStream* pStream; + ma_result result = ma_open_stream_basic__aaudio(pContext, NULL, deviceType, ma_share_mode_shared, &pStream); + if (result != MA_SUCCESS) { + return MA_FALSE; + } + + ma_close_stream__aaudio(pContext, pStream); + return MA_TRUE; +} + +static ma_result ma_wait_for_simple_state_transition__aaudio(ma_context* pContext, ma_AAudioStream* pStream, ma_aaudio_stream_state_t oldState, ma_aaudio_stream_state_t newState) +{ + ma_aaudio_stream_state_t actualNewState; + ma_aaudio_result_t resultAA = ((MA_PFN_AAudioStream_waitForStateChange)pContext->aaudio.AAudioStream_waitForStateChange)(pStream, oldState, &actualNewState, 5000000000); /* 5 second timeout. */ + if (resultAA != MA_AAUDIO_OK) { + return ma_result_from_aaudio(resultAA); + } + + if (newState != actualNewState) { + return MA_ERROR; /* Failed to transition into the expected state. */ + } + + return MA_SUCCESS; +} + + +static ma_result ma_context_enumerate_devices__aaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult = MA_TRUE; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Unfortunately AAudio does not have an enumeration API. Therefore I'm only going to report default devices, but only if it can instantiate a stream. */ + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.aaudio = MA_AAUDIO_UNSPECIFIED; + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + + if (ma_has_default_device__aaudio(pContext, ma_device_type_playback)) { + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.aaudio = MA_AAUDIO_UNSPECIFIED; + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + + if (ma_has_default_device__aaudio(pContext, ma_device_type_capture)) { + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + } + + return MA_SUCCESS; +} + +static void ma_context_add_native_data_format_from_AAudioStream_ex__aaudio(ma_context* pContext, ma_AAudioStream* pStream, ma_format format, ma_uint32 flags, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pStream != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = format; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = ((MA_PFN_AAudioStream_getChannelCount)pContext->aaudio.AAudioStream_getChannelCount)(pStream); + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = ((MA_PFN_AAudioStream_getSampleRate)pContext->aaudio.AAudioStream_getSampleRate)(pStream); + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = flags; + pDeviceInfo->nativeDataFormatCount += 1; +} + +static void ma_context_add_native_data_format_from_AAudioStream__aaudio(ma_context* pContext, ma_AAudioStream* pStream, ma_uint32 flags, ma_device_info* pDeviceInfo) +{ + /* AAudio supports s16 and f32. */ + ma_context_add_native_data_format_from_AAudioStream_ex__aaudio(pContext, pStream, ma_format_f32, flags, pDeviceInfo); + ma_context_add_native_data_format_from_AAudioStream_ex__aaudio(pContext, pStream, ma_format_s16, flags, pDeviceInfo); +} + +static ma_result ma_context_get_device_info__aaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_AAudioStream* pStream; + ma_result result; + + MA_ASSERT(pContext != NULL); + + /* ID */ + if (pDeviceID != NULL) { + pDeviceInfo->id.aaudio = pDeviceID->aaudio; + } else { + pDeviceInfo->id.aaudio = MA_AAUDIO_UNSPECIFIED; + } + + /* Name */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + + pDeviceInfo->nativeDataFormatCount = 0; + + /* We'll need to open the device to get accurate sample rate and channel count information. */ + result = ma_open_stream_basic__aaudio(pContext, pDeviceID, deviceType, ma_share_mode_shared, &pStream); + if (result != MA_SUCCESS) { + return result; + } + + ma_context_add_native_data_format_from_AAudioStream__aaudio(pContext, pStream, 0, pDeviceInfo); + + ma_close_stream__aaudio(pContext, pStream); + pStream = NULL; + + return MA_SUCCESS; +} + +static ma_result ma_close_streams__aaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* When re-routing, streams may have been closed and never re-opened. Hence the extra checks below. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + pDevice->aaudio.pStreamCapture = NULL; + } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + pDevice->aaudio.pStreamPlayback = NULL; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_uninit__aaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* Wait for any rerouting to finish before attempting to close the streams. */ + ma_mutex_lock(&pDevice->aaudio.rerouteLock); + { + ma_close_streams__aaudio(pDevice); + } + ma_mutex_unlock(&pDevice->aaudio.rerouteLock); + + /* Destroy re-routing lock. */ + ma_mutex_uninit(&pDevice->aaudio.rerouteLock); + + return MA_SUCCESS; +} + +static ma_result ma_device_init_by_type__aaudio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_type deviceType, ma_device_descriptor* pDescriptor, ma_AAudioStream** ppStream) +{ + ma_result result; + int32_t bufferCapacityInFrames; + int32_t framesPerDataCallback; + ma_AAudioStream* pStream; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDescriptor != NULL); + + *ppStream = NULL; /* Safety. */ + + /* First step is to open the stream. From there we'll be able to extract the internal configuration. */ + result = ma_open_stream__aaudio(pDevice, pConfig, deviceType, pDescriptor, &pStream); + if (result != MA_SUCCESS) { + return result; /* Failed to open the AAudio stream. */ + } + + /* Now extract the internal configuration. */ + pDescriptor->format = (((MA_PFN_AAudioStream_getFormat)pDevice->pContext->aaudio.AAudioStream_getFormat)(pStream) == MA_AAUDIO_FORMAT_PCM_I16) ? ma_format_s16 : ma_format_f32; + pDescriptor->channels = ((MA_PFN_AAudioStream_getChannelCount)pDevice->pContext->aaudio.AAudioStream_getChannelCount)(pStream); + pDescriptor->sampleRate = ((MA_PFN_AAudioStream_getSampleRate)pDevice->pContext->aaudio.AAudioStream_getSampleRate)(pStream); + + /* For the channel map we need to be sure we don't overflow any buffers. */ + if (pDescriptor->channels <= MA_MAX_CHANNELS) { + ma_channel_map_init_standard(ma_standard_channel_map_default, pDescriptor->channelMap, ma_countof(pDescriptor->channelMap), pDescriptor->channels); /* <-- Cannot find info on channel order, so assuming a default. */ + } else { + ma_channel_map_init_blank(pDescriptor->channelMap, MA_MAX_CHANNELS); /* Too many channels. Use a blank channel map. */ + } + + bufferCapacityInFrames = ((MA_PFN_AAudioStream_getBufferCapacityInFrames)pDevice->pContext->aaudio.AAudioStream_getBufferCapacityInFrames)(pStream); + framesPerDataCallback = ((MA_PFN_AAudioStream_getFramesPerDataCallback)pDevice->pContext->aaudio.AAudioStream_getFramesPerDataCallback)(pStream); + + if (framesPerDataCallback > 0) { + pDescriptor->periodSizeInFrames = framesPerDataCallback; + pDescriptor->periodCount = bufferCapacityInFrames / framesPerDataCallback; + } else { + pDescriptor->periodSizeInFrames = bufferCapacityInFrames; + pDescriptor->periodCount = 1; + } + + *ppStream = pStream; + + return MA_SUCCESS; +} + +static ma_result ma_device_init_streams__aaudio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + pDevice->aaudio.usage = pConfig->aaudio.usage; + pDevice->aaudio.contentType = pConfig->aaudio.contentType; + pDevice->aaudio.inputPreset = pConfig->aaudio.inputPreset; + pDevice->aaudio.allowedCapturePolicy = pConfig->aaudio.allowedCapturePolicy; + pDevice->aaudio.noAutoStartAfterReroute = pConfig->aaudio.noAutoStartAfterReroute; + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + result = ma_device_init_by_type__aaudio(pDevice, pConfig, ma_device_type_capture, pDescriptorCapture, (ma_AAudioStream**)&pDevice->aaudio.pStreamCapture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + result = ma_device_init_by_type__aaudio(pDevice, pConfig, ma_device_type_playback, pDescriptorPlayback, (ma_AAudioStream**)&pDevice->aaudio.pStreamPlayback); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_init__aaudio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + result = ma_device_init_streams__aaudio(pDevice, pConfig, pDescriptorPlayback, pDescriptorCapture); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_mutex_init(&pDevice->aaudio.rerouteLock); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pStream) +{ + ma_aaudio_result_t resultAA; + ma_aaudio_stream_state_t currentState; + + MA_ASSERT(pDevice != NULL); + + if (pStream == NULL) { + return MA_INVALID_ARGS; + } + + resultAA = ((MA_PFN_AAudioStream_requestStart)pDevice->pContext->aaudio.AAudioStream_requestStart)(pStream); + if (resultAA != MA_AAUDIO_OK) { + return ma_result_from_aaudio(resultAA); + } + + /* Do we actually need to wait for the device to transition into its started state? */ + + /* The device should be in either a starting or started state. If it's not set to started we need to wait for it to transition. It should go from starting to started. */ + currentState = ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream); + if (currentState != MA_AAUDIO_STREAM_STATE_STARTED) { + ma_result result; + + if (currentState != MA_AAUDIO_STREAM_STATE_STARTING) { + return MA_ERROR; /* Expecting the stream to be a starting or started state. */ + } + + result = ma_wait_for_simple_state_transition__aaudio(pDevice->pContext, pStream, currentState, MA_AAUDIO_STREAM_STATE_STARTED); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pStream) +{ + ma_aaudio_result_t resultAA; + ma_aaudio_stream_state_t currentState; + + MA_ASSERT(pDevice != NULL); + + if (pStream == NULL) { + return MA_INVALID_ARGS; + } + + /* + From the AAudio documentation: + + The stream will stop after all of the data currently buffered has been played. + + This maps with miniaudio's requirement that device's be drained which means we don't need to implement any draining logic. + */ + currentState = ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream); + if (currentState == MA_AAUDIO_STREAM_STATE_DISCONNECTED) { + return MA_SUCCESS; /* The device is disconnected. Don't try stopping it. */ + } + + resultAA = ((MA_PFN_AAudioStream_requestStop)pDevice->pContext->aaudio.AAudioStream_requestStop)(pStream); + if (resultAA != MA_AAUDIO_OK) { + return ma_result_from_aaudio(resultAA); + } + + /* The device should be in either a stopping or stopped state. If it's not set to started we need to wait for it to transition. It should go from stopping to stopped. */ + currentState = ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream); + if (currentState != MA_AAUDIO_STREAM_STATE_STOPPED) { + ma_result result; + + if (currentState != MA_AAUDIO_STREAM_STATE_STOPPING) { + return MA_ERROR; /* Expecting the stream to be a stopping or stopped state. */ + } + + result = ma_wait_for_simple_state_transition__aaudio(pDevice->pContext, pStream, currentState, MA_AAUDIO_STREAM_STATE_STOPPED); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_start__aaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_result result = ma_device_start_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_result result = ma_device_start_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + if (result != MA_SUCCESS) { + if (pDevice->type == ma_device_type_duplex) { + ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + } + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__aaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_result result = ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); + if (result != MA_SUCCESS) { + return result; + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_result result = ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback); + if (result != MA_SUCCESS) { + return result; + } + } + + ma_device__on_notification_stopped(pDevice); + + return MA_SUCCESS; +} + +static ma_result ma_device_reinit__aaudio(ma_device* pDevice, ma_device_type deviceType) +{ + ma_result result; + int32_t retries = 0; + + MA_ASSERT(pDevice != NULL); + + /* + TODO: Stop retrying if main thread is about to uninit device. + */ + ma_mutex_lock(&pDevice->aaudio.rerouteLock); + { +error_disconnected: + /* The first thing to do is close the streams. */ + ma_close_streams__aaudio(pDevice); + + /* Now we need to reinitialize each streams. The hardest part with this is just filling output the config and descriptors. */ + ma_device_config deviceConfig; + ma_device_descriptor descriptorPlayback; + ma_device_descriptor descriptorCapture; + + deviceConfig = ma_device_config_init(deviceType); + deviceConfig.playback.pDeviceID = NULL; /* Only doing rerouting with default devices. */ + deviceConfig.playback.shareMode = pDevice->playback.shareMode; + deviceConfig.playback.format = pDevice->playback.format; + deviceConfig.playback.channels = pDevice->playback.channels; + deviceConfig.capture.pDeviceID = NULL; /* Only doing rerouting with default devices. */ + deviceConfig.capture.shareMode = pDevice->capture.shareMode; + deviceConfig.capture.format = pDevice->capture.format; + deviceConfig.capture.channels = pDevice->capture.channels; + deviceConfig.sampleRate = pDevice->sampleRate; + deviceConfig.aaudio.usage = pDevice->aaudio.usage; + deviceConfig.aaudio.contentType = pDevice->aaudio.contentType; + deviceConfig.aaudio.inputPreset = pDevice->aaudio.inputPreset; + deviceConfig.aaudio.allowedCapturePolicy = pDevice->aaudio.allowedCapturePolicy; + deviceConfig.aaudio.noAutoStartAfterReroute = pDevice->aaudio.noAutoStartAfterReroute; + deviceConfig.periods = 1; + + /* Try to get an accurate period size. */ + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + deviceConfig.periodSizeInFrames = pDevice->playback.internalPeriodSizeInFrames; + } else { + deviceConfig.periodSizeInFrames = pDevice->capture.internalPeriodSizeInFrames; + } + + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { + descriptorCapture.pDeviceID = deviceConfig.capture.pDeviceID; + descriptorCapture.shareMode = deviceConfig.capture.shareMode; + descriptorCapture.format = deviceConfig.capture.format; + descriptorCapture.channels = deviceConfig.capture.channels; + descriptorCapture.sampleRate = deviceConfig.sampleRate; + descriptorCapture.periodSizeInFrames = deviceConfig.periodSizeInFrames; + descriptorCapture.periodCount = deviceConfig.periods; + } + + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + descriptorPlayback.pDeviceID = deviceConfig.playback.pDeviceID; + descriptorPlayback.shareMode = deviceConfig.playback.shareMode; + descriptorPlayback.format = deviceConfig.playback.format; + descriptorPlayback.channels = deviceConfig.playback.channels; + descriptorPlayback.sampleRate = deviceConfig.sampleRate; + descriptorPlayback.periodSizeInFrames = deviceConfig.periodSizeInFrames; + descriptorPlayback.periodCount = deviceConfig.periods; + } + + result = ma_device_init_streams__aaudio(pDevice, &deviceConfig, &descriptorPlayback, &descriptorCapture); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[AAudio] Failed to create stream after route change."); + goto done; + } + + result = ma_device_post_init(pDevice, deviceType, &descriptorPlayback, &descriptorCapture); + if (result != MA_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_WARNING, "[AAudio] Failed to initialize device after route change."); + ma_close_streams__aaudio(pDevice); + goto done; + } + + /* We'll only ever do this in response to a reroute. */ + ma_device__on_notification_rerouted(pDevice); + + /* If the device is started, start the streams. Maybe make this configurable? */ + if (ma_device_get_state(pDevice) == ma_device_state_started) { + if (pDevice->aaudio.noAutoStartAfterReroute == MA_FALSE) { + result = ma_device_start__aaudio(pDevice); + if (result != MA_SUCCESS) { + /* We got disconnected! Retry a few times, until we find a connected device! */ + retries += 1; + if (retries <= 3) { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[AAudio] Failed to start stream after route change, retrying(%d)", retries); + goto error_disconnected; + } + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[AAudio] Failed to start stream after route change."); + goto done; + } + } else { + ma_device_stop(pDevice); /* Do a full device stop so we set internal state correctly. */ + } + } + + result = MA_SUCCESS; + } +done: + /* Re-routing done */ + ma_mutex_unlock(&pDevice->aaudio.rerouteLock); + + return result; +} + +static ma_result ma_device_get_info__aaudio(ma_device* pDevice, ma_device_type type, ma_device_info* pDeviceInfo) +{ + ma_AAudioStream* pStream = NULL; + + MA_ASSERT(pDevice != NULL); + MA_ASSERT(type != ma_device_type_duplex); + MA_ASSERT(pDeviceInfo != NULL); + + if (type == ma_device_type_capture) { + pStream = (ma_AAudioStream*)pDevice->aaudio.pStreamCapture; + pDeviceInfo->id.aaudio = pDevice->capture.id.aaudio; + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); /* Only supporting default devices. */ + } + if (type == ma_device_type_playback) { + pStream = (ma_AAudioStream*)pDevice->aaudio.pStreamPlayback; + pDeviceInfo->id.aaudio = pDevice->playback.id.aaudio; + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); /* Only supporting default devices. */ + } + + /* Safety. Should never happen. */ + if (pStream == NULL) { + return MA_INVALID_OPERATION; + } + + pDeviceInfo->nativeDataFormatCount = 0; + ma_context_add_native_data_format_from_AAudioStream__aaudio(pDevice->pContext, pStream, 0, pDeviceInfo); + + return MA_SUCCESS; +} + + +static ma_result ma_context_uninit__aaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_aaudio); + + ma_device_job_thread_uninit(&pContext->aaudio.jobThread, &pContext->allocationCallbacks); + + ma_dlclose(ma_context_get_log(pContext), pContext->aaudio.hAAudio); + pContext->aaudio.hAAudio = NULL; + + return MA_SUCCESS; +} + +static ma_result ma_context_init__aaudio(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ +#if !defined(MA_NO_RUNTIME_LINKING) + size_t i; + const char* libNames[] = { + "libaaudio.so" + }; + + for (i = 0; i < ma_countof(libNames); ++i) { + pContext->aaudio.hAAudio = ma_dlopen(ma_context_get_log(pContext), libNames[i]); + if (pContext->aaudio.hAAudio != NULL) { + break; + } + } + + if (pContext->aaudio.hAAudio == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pContext->aaudio.AAudio_createStreamBuilder = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudio_createStreamBuilder"); + pContext->aaudio.AAudioStreamBuilder_delete = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_delete"); + pContext->aaudio.AAudioStreamBuilder_setDeviceId = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDeviceId"); + pContext->aaudio.AAudioStreamBuilder_setDirection = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDirection"); + pContext->aaudio.AAudioStreamBuilder_setSharingMode = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setSharingMode"); + pContext->aaudio.AAudioStreamBuilder_setFormat = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setFormat"); + pContext->aaudio.AAudioStreamBuilder_setChannelCount = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setChannelCount"); + pContext->aaudio.AAudioStreamBuilder_setSampleRate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setSampleRate"); + pContext->aaudio.AAudioStreamBuilder_setBufferCapacityInFrames = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setBufferCapacityInFrames"); + pContext->aaudio.AAudioStreamBuilder_setFramesPerDataCallback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setFramesPerDataCallback"); + pContext->aaudio.AAudioStreamBuilder_setDataCallback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDataCallback"); + pContext->aaudio.AAudioStreamBuilder_setErrorCallback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setErrorCallback"); + pContext->aaudio.AAudioStreamBuilder_setPerformanceMode = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setPerformanceMode"); + pContext->aaudio.AAudioStreamBuilder_setUsage = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setUsage"); + pContext->aaudio.AAudioStreamBuilder_setContentType = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setContentType"); + pContext->aaudio.AAudioStreamBuilder_setInputPreset = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setInputPreset"); + pContext->aaudio.AAudioStreamBuilder_setAllowedCapturePolicy = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_setAllowedCapturePolicy"); + pContext->aaudio.AAudioStreamBuilder_openStream = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStreamBuilder_openStream"); + pContext->aaudio.AAudioStream_close = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_close"); + pContext->aaudio.AAudioStream_getState = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_getState"); + pContext->aaudio.AAudioStream_waitForStateChange = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_waitForStateChange"); + pContext->aaudio.AAudioStream_getFormat = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_getFormat"); + pContext->aaudio.AAudioStream_getChannelCount = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_getChannelCount"); + pContext->aaudio.AAudioStream_getSampleRate = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_getSampleRate"); + pContext->aaudio.AAudioStream_getBufferCapacityInFrames = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_getBufferCapacityInFrames"); + pContext->aaudio.AAudioStream_getFramesPerDataCallback = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_getFramesPerDataCallback"); + pContext->aaudio.AAudioStream_getFramesPerBurst = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_getFramesPerBurst"); + pContext->aaudio.AAudioStream_requestStart = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_requestStart"); + pContext->aaudio.AAudioStream_requestStop = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->aaudio.hAAudio, "AAudioStream_requestStop"); +#else + pContext->aaudio.AAudio_createStreamBuilder = (ma_proc)AAudio_createStreamBuilder; + pContext->aaudio.AAudioStreamBuilder_delete = (ma_proc)AAudioStreamBuilder_delete; + pContext->aaudio.AAudioStreamBuilder_setDeviceId = (ma_proc)AAudioStreamBuilder_setDeviceId; + pContext->aaudio.AAudioStreamBuilder_setDirection = (ma_proc)AAudioStreamBuilder_setDirection; + pContext->aaudio.AAudioStreamBuilder_setSharingMode = (ma_proc)AAudioStreamBuilder_setSharingMode; + pContext->aaudio.AAudioStreamBuilder_setFormat = (ma_proc)AAudioStreamBuilder_setFormat; + pContext->aaudio.AAudioStreamBuilder_setChannelCount = (ma_proc)AAudioStreamBuilder_setChannelCount; + pContext->aaudio.AAudioStreamBuilder_setSampleRate = (ma_proc)AAudioStreamBuilder_setSampleRate; + pContext->aaudio.AAudioStreamBuilder_setBufferCapacityInFrames = (ma_proc)AAudioStreamBuilder_setBufferCapacityInFrames; + pContext->aaudio.AAudioStreamBuilder_setFramesPerDataCallback = (ma_proc)AAudioStreamBuilder_setFramesPerDataCallback; + pContext->aaudio.AAudioStreamBuilder_setDataCallback = (ma_proc)AAudioStreamBuilder_setDataCallback; + pContext->aaudio.AAudioStreamBuilder_setErrorCallback = (ma_proc)AAudioStreamBuilder_setErrorCallback; + pContext->aaudio.AAudioStreamBuilder_setPerformanceMode = (ma_proc)AAudioStreamBuilder_setPerformanceMode; + pContext->aaudio.AAudioStreamBuilder_setUsage = (ma_proc)AAudioStreamBuilder_setUsage; + pContext->aaudio.AAudioStreamBuilder_setContentType = (ma_proc)AAudioStreamBuilder_setContentType; + pContext->aaudio.AAudioStreamBuilder_setInputPreset = (ma_proc)AAudioStreamBuilder_setInputPreset; + #if defined(__ANDROID_API__) && __ANDROID_API__ >= 29 + pContext->aaudio.AAudioStreamBuilder_setAllowedCapturePolicy = (ma_proc)AAudioStreamBuilder_setAllowedCapturePolicy; + #endif + pContext->aaudio.AAudioStreamBuilder_openStream = (ma_proc)AAudioStreamBuilder_openStream; + pContext->aaudio.AAudioStream_close = (ma_proc)AAudioStream_close; + pContext->aaudio.AAudioStream_getState = (ma_proc)AAudioStream_getState; + pContext->aaudio.AAudioStream_waitForStateChange = (ma_proc)AAudioStream_waitForStateChange; + pContext->aaudio.AAudioStream_getFormat = (ma_proc)AAudioStream_getFormat; + pContext->aaudio.AAudioStream_getChannelCount = (ma_proc)AAudioStream_getChannelCount; + pContext->aaudio.AAudioStream_getSampleRate = (ma_proc)AAudioStream_getSampleRate; + pContext->aaudio.AAudioStream_getBufferCapacityInFrames = (ma_proc)AAudioStream_getBufferCapacityInFrames; + pContext->aaudio.AAudioStream_getFramesPerDataCallback = (ma_proc)AAudioStream_getFramesPerDataCallback; + pContext->aaudio.AAudioStream_getFramesPerBurst = (ma_proc)AAudioStream_getFramesPerBurst; + pContext->aaudio.AAudioStream_requestStart = (ma_proc)AAudioStream_requestStart; + pContext->aaudio.AAudioStream_requestStop = (ma_proc)AAudioStream_requestStop; +#endif + + pCallbacks->onContextInit = ma_context_init__aaudio; + pCallbacks->onContextUninit = ma_context_uninit__aaudio; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__aaudio; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__aaudio; + pCallbacks->onDeviceInit = ma_device_init__aaudio; + pCallbacks->onDeviceUninit = ma_device_uninit__aaudio; + pCallbacks->onDeviceStart = ma_device_start__aaudio; + pCallbacks->onDeviceStop = ma_device_stop__aaudio; + pCallbacks->onDeviceRead = NULL; /* Not used because AAudio is asynchronous. */ + pCallbacks->onDeviceWrite = NULL; /* Not used because AAudio is asynchronous. */ + pCallbacks->onDeviceDataLoop = NULL; /* Not used because AAudio is asynchronous. */ + pCallbacks->onDeviceGetInfo = ma_device_get_info__aaudio; + + + /* We need a job thread so we can deal with rerouting. */ + { + ma_result result; + ma_device_job_thread_config jobThreadConfig; + + jobThreadConfig = ma_device_job_thread_config_init(); + + result = ma_device_job_thread_init(&jobThreadConfig, &pContext->allocationCallbacks, &pContext->aaudio.jobThread); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->aaudio.hAAudio); + pContext->aaudio.hAAudio = NULL; + return result; + } + } + + + (void)pConfig; + return MA_SUCCESS; +} + +static ma_result ma_job_process__device__aaudio_reroute(ma_job* pJob) +{ + ma_result result; + ma_device* pDevice; + + MA_ASSERT(pJob != NULL); + + pDevice = (ma_device*)pJob->data.device.aaudio.reroute.pDevice; + MA_ASSERT(pDevice != NULL); + + /* Here is where we need to reroute the device. To do this we need to uninitialize the stream and reinitialize it. */ + result = ma_device_reinit__aaudio(pDevice, (ma_device_type)pJob->data.device.aaudio.reroute.deviceType); + if (result != MA_SUCCESS) { + /* + Getting here means we failed to reroute the device. The best thing I can think of here is to + just stop the device. + */ + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[AAudio] Stopping device due to reroute failure."); + ma_device_stop(pDevice); + return result; + } + + return MA_SUCCESS; +} +#else +/* Getting here means there is no AAudio backend so we need a no-op job implementation. */ +static ma_result ma_job_process__device__aaudio_reroute(ma_job* pJob) +{ + return ma_job_process__noop(pJob); +} +#endif /* AAudio */ + + +/****************************************************************************** + +OpenSL|ES Backend + +******************************************************************************/ +#ifdef MA_HAS_OPENSL +#include +#ifdef MA_ANDROID +#include +#endif + +typedef SLresult (SLAPIENTRY * ma_slCreateEngine_proc)(SLObjectItf* pEngine, SLuint32 numOptions, SLEngineOption* pEngineOptions, SLuint32 numInterfaces, SLInterfaceID* pInterfaceIds, SLboolean* pInterfaceRequired); + +/* OpenSL|ES has one-per-application objects :( */ +static SLObjectItf g_maEngineObjectSL = NULL; +static SLEngineItf g_maEngineSL = NULL; +static ma_uint32 g_maOpenSLInitCounter = 0; +static ma_spinlock g_maOpenSLSpinlock = 0; /* For init/uninit. */ + +#define MA_OPENSL_OBJ(p) (*((SLObjectItf)(p))) +#define MA_OPENSL_OUTPUTMIX(p) (*((SLOutputMixItf)(p))) +#define MA_OPENSL_PLAY(p) (*((SLPlayItf)(p))) +#define MA_OPENSL_RECORD(p) (*((SLRecordItf)(p))) + +#ifdef MA_ANDROID +#define MA_OPENSL_BUFFERQUEUE(p) (*((SLAndroidSimpleBufferQueueItf)(p))) +#else +#define MA_OPENSL_BUFFERQUEUE(p) (*((SLBufferQueueItf)(p))) +#endif + +static ma_result ma_result_from_OpenSL(SLuint32 result) +{ + switch (result) + { + case SL_RESULT_SUCCESS: return MA_SUCCESS; + case SL_RESULT_PRECONDITIONS_VIOLATED: return MA_ERROR; + case SL_RESULT_PARAMETER_INVALID: return MA_INVALID_ARGS; + case SL_RESULT_MEMORY_FAILURE: return MA_OUT_OF_MEMORY; + case SL_RESULT_RESOURCE_ERROR: return MA_INVALID_DATA; + case SL_RESULT_RESOURCE_LOST: return MA_ERROR; + case SL_RESULT_IO_ERROR: return MA_IO_ERROR; + case SL_RESULT_BUFFER_INSUFFICIENT: return MA_NO_SPACE; + case SL_RESULT_CONTENT_CORRUPTED: return MA_INVALID_DATA; + case SL_RESULT_CONTENT_UNSUPPORTED: return MA_FORMAT_NOT_SUPPORTED; + case SL_RESULT_CONTENT_NOT_FOUND: return MA_ERROR; + case SL_RESULT_PERMISSION_DENIED: return MA_ACCESS_DENIED; + case SL_RESULT_FEATURE_UNSUPPORTED: return MA_NOT_IMPLEMENTED; + case SL_RESULT_INTERNAL_ERROR: return MA_ERROR; + case SL_RESULT_UNKNOWN_ERROR: return MA_ERROR; + case SL_RESULT_OPERATION_ABORTED: return MA_ERROR; + case SL_RESULT_CONTROL_LOST: return MA_ERROR; + default: return MA_ERROR; + } +} + +/* Converts an individual OpenSL-style channel identifier (SL_SPEAKER_FRONT_LEFT, etc.) to miniaudio. */ +static ma_uint8 ma_channel_id_to_ma__opensl(SLuint32 id) +{ + switch (id) + { + case SL_SPEAKER_FRONT_LEFT: return MA_CHANNEL_FRONT_LEFT; + case SL_SPEAKER_FRONT_RIGHT: return MA_CHANNEL_FRONT_RIGHT; + case SL_SPEAKER_FRONT_CENTER: return MA_CHANNEL_FRONT_CENTER; + case SL_SPEAKER_LOW_FREQUENCY: return MA_CHANNEL_LFE; + case SL_SPEAKER_BACK_LEFT: return MA_CHANNEL_BACK_LEFT; + case SL_SPEAKER_BACK_RIGHT: return MA_CHANNEL_BACK_RIGHT; + case SL_SPEAKER_FRONT_LEFT_OF_CENTER: return MA_CHANNEL_FRONT_LEFT_CENTER; + case SL_SPEAKER_FRONT_RIGHT_OF_CENTER: return MA_CHANNEL_FRONT_RIGHT_CENTER; + case SL_SPEAKER_BACK_CENTER: return MA_CHANNEL_BACK_CENTER; + case SL_SPEAKER_SIDE_LEFT: return MA_CHANNEL_SIDE_LEFT; + case SL_SPEAKER_SIDE_RIGHT: return MA_CHANNEL_SIDE_RIGHT; + case SL_SPEAKER_TOP_CENTER: return MA_CHANNEL_TOP_CENTER; + case SL_SPEAKER_TOP_FRONT_LEFT: return MA_CHANNEL_TOP_FRONT_LEFT; + case SL_SPEAKER_TOP_FRONT_CENTER: return MA_CHANNEL_TOP_FRONT_CENTER; + case SL_SPEAKER_TOP_FRONT_RIGHT: return MA_CHANNEL_TOP_FRONT_RIGHT; + case SL_SPEAKER_TOP_BACK_LEFT: return MA_CHANNEL_TOP_BACK_LEFT; + case SL_SPEAKER_TOP_BACK_CENTER: return MA_CHANNEL_TOP_BACK_CENTER; + case SL_SPEAKER_TOP_BACK_RIGHT: return MA_CHANNEL_TOP_BACK_RIGHT; + default: return 0; + } +} + +/* Converts an individual miniaudio channel identifier (MA_CHANNEL_FRONT_LEFT, etc.) to OpenSL-style. */ +static SLuint32 ma_channel_id_to_opensl(ma_uint8 id) +{ + switch (id) + { + case MA_CHANNEL_MONO: return SL_SPEAKER_FRONT_CENTER; + case MA_CHANNEL_FRONT_LEFT: return SL_SPEAKER_FRONT_LEFT; + case MA_CHANNEL_FRONT_RIGHT: return SL_SPEAKER_FRONT_RIGHT; + case MA_CHANNEL_FRONT_CENTER: return SL_SPEAKER_FRONT_CENTER; + case MA_CHANNEL_LFE: return SL_SPEAKER_LOW_FREQUENCY; + case MA_CHANNEL_BACK_LEFT: return SL_SPEAKER_BACK_LEFT; + case MA_CHANNEL_BACK_RIGHT: return SL_SPEAKER_BACK_RIGHT; + case MA_CHANNEL_FRONT_LEFT_CENTER: return SL_SPEAKER_FRONT_LEFT_OF_CENTER; + case MA_CHANNEL_FRONT_RIGHT_CENTER: return SL_SPEAKER_FRONT_RIGHT_OF_CENTER; + case MA_CHANNEL_BACK_CENTER: return SL_SPEAKER_BACK_CENTER; + case MA_CHANNEL_SIDE_LEFT: return SL_SPEAKER_SIDE_LEFT; + case MA_CHANNEL_SIDE_RIGHT: return SL_SPEAKER_SIDE_RIGHT; + case MA_CHANNEL_TOP_CENTER: return SL_SPEAKER_TOP_CENTER; + case MA_CHANNEL_TOP_FRONT_LEFT: return SL_SPEAKER_TOP_FRONT_LEFT; + case MA_CHANNEL_TOP_FRONT_CENTER: return SL_SPEAKER_TOP_FRONT_CENTER; + case MA_CHANNEL_TOP_FRONT_RIGHT: return SL_SPEAKER_TOP_FRONT_RIGHT; + case MA_CHANNEL_TOP_BACK_LEFT: return SL_SPEAKER_TOP_BACK_LEFT; + case MA_CHANNEL_TOP_BACK_CENTER: return SL_SPEAKER_TOP_BACK_CENTER; + case MA_CHANNEL_TOP_BACK_RIGHT: return SL_SPEAKER_TOP_BACK_RIGHT; + default: return 0; + } +} + +/* Converts a channel mapping to an OpenSL-style channel mask. */ +static SLuint32 ma_channel_map_to_channel_mask__opensl(const ma_channel* pChannelMap, ma_uint32 channels) +{ + SLuint32 channelMask = 0; + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + channelMask |= ma_channel_id_to_opensl(pChannelMap[iChannel]); + } + + return channelMask; +} + +/* Converts an OpenSL-style channel mask to a miniaudio channel map. */ +static void ma_channel_mask_to_channel_map__opensl(SLuint32 channelMask, ma_uint32 channels, ma_channel* pChannelMap) +{ + if (channels == 1 && channelMask == 0) { + pChannelMap[0] = MA_CHANNEL_MONO; + } else if (channels == 2 && channelMask == 0) { + pChannelMap[0] = MA_CHANNEL_FRONT_LEFT; + pChannelMap[1] = MA_CHANNEL_FRONT_RIGHT; + } else { + if (channels == 1 && (channelMask & SL_SPEAKER_FRONT_CENTER) != 0) { + pChannelMap[0] = MA_CHANNEL_MONO; + } else { + /* Just iterate over each bit. */ + ma_uint32 iChannel = 0; + ma_uint32 iBit; + for (iBit = 0; iBit < 32 && iChannel < channels; ++iBit) { + SLuint32 bitValue = (channelMask & (1UL << iBit)); + if (bitValue != 0) { + /* The bit is set. */ + pChannelMap[iChannel] = ma_channel_id_to_ma__opensl(bitValue); + iChannel += 1; + } + } + } + } +} + +static SLuint32 ma_round_to_standard_sample_rate__opensl(SLuint32 samplesPerSec) +{ + if (samplesPerSec <= SL_SAMPLINGRATE_8) { + return SL_SAMPLINGRATE_8; + } + if (samplesPerSec <= SL_SAMPLINGRATE_11_025) { + return SL_SAMPLINGRATE_11_025; + } + if (samplesPerSec <= SL_SAMPLINGRATE_12) { + return SL_SAMPLINGRATE_12; + } + if (samplesPerSec <= SL_SAMPLINGRATE_16) { + return SL_SAMPLINGRATE_16; + } + if (samplesPerSec <= SL_SAMPLINGRATE_22_05) { + return SL_SAMPLINGRATE_22_05; + } + if (samplesPerSec <= SL_SAMPLINGRATE_24) { + return SL_SAMPLINGRATE_24; + } + if (samplesPerSec <= SL_SAMPLINGRATE_32) { + return SL_SAMPLINGRATE_32; + } + if (samplesPerSec <= SL_SAMPLINGRATE_44_1) { + return SL_SAMPLINGRATE_44_1; + } + if (samplesPerSec <= SL_SAMPLINGRATE_48) { + return SL_SAMPLINGRATE_48; + } + + /* Android doesn't support more than 48000. */ +#ifndef MA_ANDROID + if (samplesPerSec <= SL_SAMPLINGRATE_64) { + return SL_SAMPLINGRATE_64; + } + if (samplesPerSec <= SL_SAMPLINGRATE_88_2) { + return SL_SAMPLINGRATE_88_2; + } + if (samplesPerSec <= SL_SAMPLINGRATE_96) { + return SL_SAMPLINGRATE_96; + } + if (samplesPerSec <= SL_SAMPLINGRATE_192) { + return SL_SAMPLINGRATE_192; + } +#endif + + return SL_SAMPLINGRATE_16; +} + + +static SLint32 ma_to_stream_type__opensl(ma_opensl_stream_type streamType) +{ + switch (streamType) { + case ma_opensl_stream_type_voice: return SL_ANDROID_STREAM_VOICE; + case ma_opensl_stream_type_system: return SL_ANDROID_STREAM_SYSTEM; + case ma_opensl_stream_type_ring: return SL_ANDROID_STREAM_RING; + case ma_opensl_stream_type_media: return SL_ANDROID_STREAM_MEDIA; + case ma_opensl_stream_type_alarm: return SL_ANDROID_STREAM_ALARM; + case ma_opensl_stream_type_notification: return SL_ANDROID_STREAM_NOTIFICATION; + default: break; + } + + return SL_ANDROID_STREAM_VOICE; +} + +static SLint32 ma_to_recording_preset__opensl(ma_opensl_recording_preset recordingPreset) +{ + switch (recordingPreset) { + case ma_opensl_recording_preset_generic: return SL_ANDROID_RECORDING_PRESET_GENERIC; + case ma_opensl_recording_preset_camcorder: return SL_ANDROID_RECORDING_PRESET_CAMCORDER; + case ma_opensl_recording_preset_voice_recognition: return SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION; + case ma_opensl_recording_preset_voice_communication: return SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION; + case ma_opensl_recording_preset_voice_unprocessed: return SL_ANDROID_RECORDING_PRESET_UNPROCESSED; + default: break; + } + + return SL_ANDROID_RECORDING_PRESET_NONE; +} + + +static ma_result ma_context_enumerate_devices__opensl(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to enumerate devices. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + /* + TODO: Test Me. + + This is currently untested, so for now we are just returning default devices. + */ +#if 0 && !defined(MA_ANDROID) + ma_bool32 isTerminated = MA_FALSE; + + SLuint32 pDeviceIDs[128]; + SLint32 deviceCount = sizeof(pDeviceIDs) / sizeof(pDeviceIDs[0]); + + SLAudioIODeviceCapabilitiesItf deviceCaps; + SLresult resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, (SLInterfaceID)pContext->opensl.SL_IID_AUDIOIODEVICECAPABILITIES, &deviceCaps); + if (resultSL != SL_RESULT_SUCCESS) { + /* The interface may not be supported so just report a default device. */ + goto return_default_device; + } + + /* Playback */ + if (!isTerminated) { + resultSL = (*deviceCaps)->GetAvailableAudioOutputs(deviceCaps, &deviceCount, pDeviceIDs); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_result_from_OpenSL(resultSL); + } + + for (SLint32 iDevice = 0; iDevice < deviceCount; ++iDevice) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.opensl = pDeviceIDs[iDevice]; + + SLAudioOutputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioOutputCapabilities(deviceCaps, deviceInfo.id.opensl, &desc); + if (resultSL == SL_RESULT_SUCCESS) { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), (const char*)desc.pDeviceName, (size_t)-1); + + ma_bool32 cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + isTerminated = MA_TRUE; + break; + } + } + } + } + + /* Capture */ + if (!isTerminated) { + resultSL = (*deviceCaps)->GetAvailableAudioInputs(deviceCaps, &deviceCount, pDeviceIDs); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_result_from_OpenSL(resultSL); + } + + for (SLint32 iDevice = 0; iDevice < deviceCount; ++iDevice) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.opensl = pDeviceIDs[iDevice]; + + SLAudioInputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioInputCapabilities(deviceCaps, deviceInfo.id.opensl, &desc); + if (resultSL == SL_RESULT_SUCCESS) { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), (const char*)desc.deviceName, (size_t)-1); + + ma_bool32 cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + isTerminated = MA_TRUE; + break; + } + } + } + } + + return MA_SUCCESS; +#else + goto return_default_device; +#endif + +return_default_device:; + cbResult = MA_TRUE; + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.opensl = SL_DEFAULTDEVICEID_AUDIOOUTPUT; + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.opensl = SL_DEFAULTDEVICEID_AUDIOINPUT; + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + return MA_SUCCESS; +} + +static void ma_context_add_data_format_ex__opensl(ma_context* pContext, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].format = format; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].channels = channels; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].sampleRate = sampleRate; + pDeviceInfo->nativeDataFormats[pDeviceInfo->nativeDataFormatCount].flags = 0; + pDeviceInfo->nativeDataFormatCount += 1; +} + +static void ma_context_add_data_format__opensl(ma_context* pContext, ma_format format, ma_device_info* pDeviceInfo) +{ + ma_uint32 minChannels = 1; + ma_uint32 maxChannels = 2; + ma_uint32 minSampleRate = (ma_uint32)ma_standard_sample_rate_8000; + ma_uint32 maxSampleRate = (ma_uint32)ma_standard_sample_rate_48000; + ma_uint32 iChannel; + ma_uint32 iSampleRate; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceInfo != NULL); + + /* + Each sample format can support mono and stereo, and we'll support a small subset of standard + rates (up to 48000). A better solution would be to somehow find a native sample rate. + */ + for (iChannel = minChannels; iChannel < maxChannels; iChannel += 1) { + for (iSampleRate = 0; iSampleRate < ma_countof(g_maStandardSampleRatePriorities); iSampleRate += 1) { + ma_uint32 standardSampleRate = g_maStandardSampleRatePriorities[iSampleRate]; + if (standardSampleRate >= minSampleRate && standardSampleRate <= maxSampleRate) { + ma_context_add_data_format_ex__opensl(pContext, format, iChannel, standardSampleRate, pDeviceInfo); + } + } + } +} + +static ma_result ma_context_get_device_info__opensl(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pContext != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to get device info. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + /* + TODO: Test Me. + + This is currently untested, so for now we are just returning default devices. + */ +#if 0 && !defined(MA_ANDROID) + SLAudioIODeviceCapabilitiesItf deviceCaps; + SLresult resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, (SLInterfaceID)pContext->opensl.SL_IID_AUDIOIODEVICECAPABILITIES, &deviceCaps); + if (resultSL != SL_RESULT_SUCCESS) { + /* The interface may not be supported so just report a default device. */ + goto return_default_device; + } + + if (deviceType == ma_device_type_playback) { + SLAudioOutputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioOutputCapabilities(deviceCaps, pDeviceID->opensl, &desc); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_result_from_OpenSL(resultSL); + } + + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.pDeviceName, (size_t)-1); + } else { + SLAudioInputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioInputCapabilities(deviceCaps, pDeviceID->opensl, &desc); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_result_from_OpenSL(resultSL); + } + + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.deviceName, (size_t)-1); + } + + goto return_detailed_info; +#else + goto return_default_device; +#endif + +return_default_device: + if (pDeviceID != NULL) { + if ((deviceType == ma_device_type_playback && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOOUTPUT) || + (deviceType == ma_device_type_capture && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOINPUT)) { + return MA_NO_DEVICE; /* Don't know the device. */ + } + } + + /* ID and Name / Description */ + if (deviceType == ma_device_type_playback) { + pDeviceInfo->id.opensl = SL_DEFAULTDEVICEID_AUDIOOUTPUT; + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + pDeviceInfo->id.opensl = SL_DEFAULTDEVICEID_AUDIOINPUT; + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + pDeviceInfo->isDefault = MA_TRUE; + + goto return_detailed_info; + + +return_detailed_info: + + /* + For now we're just outputting a set of values that are supported by the API but not necessarily supported + by the device natively. Later on we should work on this so that it more closely reflects the device's + actual native format. + */ + pDeviceInfo->nativeDataFormatCount = 0; +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 + ma_context_add_data_format__opensl(pContext, ma_format_f32, pDeviceInfo); +#endif + ma_context_add_data_format__opensl(pContext, ma_format_s16, pDeviceInfo); + ma_context_add_data_format__opensl(pContext, ma_format_u8, pDeviceInfo); + + return MA_SUCCESS; +} + + +#ifdef MA_ANDROID +/*void ma_buffer_queue_callback_capture__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, SLuint32 eventFlags, const void* pBuffer, SLuint32 bufferSize, SLuint32 dataUsed, void* pContext)*/ +static void ma_buffer_queue_callback_capture__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + size_t periodSizeInBytes; + ma_uint8* pBuffer; + SLresult resultSL; + + MA_ASSERT(pDevice != NULL); + + (void)pBufferQueue; + + /* + For now, don't do anything unless the buffer was fully processed. From what I can tell, it looks like + OpenSL|ES 1.1 improves on buffer queues to the point that we could much more intelligently handle this, + but unfortunately it looks like Android is only supporting OpenSL|ES 1.0.1 for now :( + */ + + /* Don't do anything if the device is not started. */ + if (ma_device_get_state(pDevice) != ma_device_state_started) { + return; + } + + /* Don't do anything if the device is being drained. */ + if (pDevice->opensl.isDrainingCapture) { + return; + } + + periodSizeInBytes = pDevice->capture.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + pBuffer = pDevice->opensl.pBufferCapture + (pDevice->opensl.currentBufferIndexCapture * periodSizeInBytes); + + ma_device_handle_backend_data_callback(pDevice, NULL, pBuffer, pDevice->capture.internalPeriodSizeInFrames); + + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, pBuffer, periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + return; + } + + pDevice->opensl.currentBufferIndexCapture = (pDevice->opensl.currentBufferIndexCapture + 1) % pDevice->capture.internalPeriods; +} + +static void ma_buffer_queue_callback_playback__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + size_t periodSizeInBytes; + ma_uint8* pBuffer; + SLresult resultSL; + + MA_ASSERT(pDevice != NULL); + + (void)pBufferQueue; + + /* Don't do anything if the device is not started. */ + if (ma_device_get_state(pDevice) != ma_device_state_started) { + return; + } + + /* Don't do anything if the device is being drained. */ + if (pDevice->opensl.isDrainingPlayback) { + return; + } + + periodSizeInBytes = pDevice->playback.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + pBuffer = pDevice->opensl.pBufferPlayback + (pDevice->opensl.currentBufferIndexPlayback * periodSizeInBytes); + + ma_device_handle_backend_data_callback(pDevice, pBuffer, NULL, pDevice->playback.internalPeriodSizeInFrames); + + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, pBuffer, periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + return; + } + + pDevice->opensl.currentBufferIndexPlayback = (pDevice->opensl.currentBufferIndexPlayback + 1) % pDevice->playback.internalPeriods; +} +#endif + +static ma_result ma_device_uninit__opensl(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it before uninitializing the device. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->opensl.pAudioRecorderObj) { + MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioRecorderObj); + } + + ma_free(pDevice->opensl.pBufferCapture, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + if (pDevice->opensl.pAudioPlayerObj) { + MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioPlayerObj); + } + if (pDevice->opensl.pOutputMixObj) { + MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Destroy((SLObjectItf)pDevice->opensl.pOutputMixObj); + } + + ma_free(pDevice->opensl.pBufferPlayback, &pDevice->pContext->allocationCallbacks); + } + + return MA_SUCCESS; +} + +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 +typedef SLAndroidDataFormat_PCM_EX ma_SLDataFormat_PCM; +#else +typedef SLDataFormat_PCM ma_SLDataFormat_PCM; +#endif + +static ma_result ma_SLDataFormat_PCM_init__opensl(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* channelMap, ma_SLDataFormat_PCM* pDataFormat) +{ + /* We need to convert our format/channels/rate so that they aren't set to default. */ + if (format == ma_format_unknown) { + format = MA_DEFAULT_FORMAT; + } + if (channels == 0) { + channels = MA_DEFAULT_CHANNELS; + } + if (sampleRate == 0) { + sampleRate = MA_DEFAULT_SAMPLE_RATE; + } + +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 + if (format == ma_format_f32) { + pDataFormat->formatType = SL_ANDROID_DATAFORMAT_PCM_EX; + pDataFormat->representation = SL_ANDROID_PCM_REPRESENTATION_FLOAT; + } else { + pDataFormat->formatType = SL_DATAFORMAT_PCM; + } +#else + pDataFormat->formatType = SL_DATAFORMAT_PCM; +#endif + + pDataFormat->numChannels = channels; + ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec = ma_round_to_standard_sample_rate__opensl(sampleRate * 1000); /* In millihertz. Annoyingly, the sample rate variable is named differently between SLAndroidDataFormat_PCM_EX and SLDataFormat_PCM */ + pDataFormat->bitsPerSample = ma_get_bytes_per_sample(format) * 8; + pDataFormat->channelMask = ma_channel_map_to_channel_mask__opensl(channelMap, channels); + pDataFormat->endianness = (ma_is_little_endian()) ? SL_BYTEORDER_LITTLEENDIAN : SL_BYTEORDER_BIGENDIAN; + + /* + Android has a few restrictions on the format as documented here: https://developer.android.com/ndk/guides/audio/opensl-for-android.html + - Only mono and stereo is supported. + - Only u8 and s16 formats are supported. + - Maximum sample rate of 48000. + */ +#ifdef MA_ANDROID + if (pDataFormat->numChannels > 2) { + pDataFormat->numChannels = 2; + } +#if __ANDROID_API__ >= 21 + if (pDataFormat->formatType == SL_ANDROID_DATAFORMAT_PCM_EX) { + /* It's floating point. */ + MA_ASSERT(pDataFormat->representation == SL_ANDROID_PCM_REPRESENTATION_FLOAT); + if (pDataFormat->bitsPerSample > 32) { + pDataFormat->bitsPerSample = 32; + } + } else { + if (pDataFormat->bitsPerSample > 16) { + pDataFormat->bitsPerSample = 16; + } + } +#else + if (pDataFormat->bitsPerSample > 16) { + pDataFormat->bitsPerSample = 16; + } +#endif + if (((SLDataFormat_PCM*)pDataFormat)->samplesPerSec > SL_SAMPLINGRATE_48) { + ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec = SL_SAMPLINGRATE_48; + } +#endif + + pDataFormat->containerSize = pDataFormat->bitsPerSample; /* Always tightly packed for now. */ + + return MA_SUCCESS; +} + +static ma_result ma_deconstruct_SLDataFormat_PCM__opensl(ma_SLDataFormat_PCM* pDataFormat, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + ma_bool32 isFloatingPoint = MA_FALSE; +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 + if (pDataFormat->formatType == SL_ANDROID_DATAFORMAT_PCM_EX) { + MA_ASSERT(pDataFormat->representation == SL_ANDROID_PCM_REPRESENTATION_FLOAT); + isFloatingPoint = MA_TRUE; + } +#endif + if (isFloatingPoint) { + if (pDataFormat->bitsPerSample == 32) { + *pFormat = ma_format_f32; + } + } else { + if (pDataFormat->bitsPerSample == 8) { + *pFormat = ma_format_u8; + } else if (pDataFormat->bitsPerSample == 16) { + *pFormat = ma_format_s16; + } else if (pDataFormat->bitsPerSample == 24) { + *pFormat = ma_format_s24; + } else if (pDataFormat->bitsPerSample == 32) { + *pFormat = ma_format_s32; + } + } + + *pChannels = pDataFormat->numChannels; + *pSampleRate = ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec / 1000; + ma_channel_mask_to_channel_map__opensl(pDataFormat->channelMask, ma_min(pDataFormat->numChannels, channelMapCap), pChannelMap); + + return MA_SUCCESS; +} + +static ma_result ma_device_init__opensl(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ +#ifdef MA_ANDROID + SLDataLocator_AndroidSimpleBufferQueue queue; + SLresult resultSL; + size_t bufferSizeInBytes; + SLInterfaceID itfIDs[2]; + const SLboolean itfIDsRequired[] = { + SL_BOOLEAN_TRUE, /* SL_IID_ANDROIDSIMPLEBUFFERQUEUE */ + SL_BOOLEAN_FALSE /* SL_IID_ANDROIDCONFIGURATION */ + }; +#endif + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to initialize a new device. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* + For now, only supporting Android implementations of OpenSL|ES since that's the only one I've + been able to test with and I currently depend on Android-specific extensions (simple buffer + queues). + */ +#ifdef MA_ANDROID + itfIDs[0] = (SLInterfaceID)pDevice->pContext->opensl.SL_IID_ANDROIDSIMPLEBUFFERQUEUE; + itfIDs[1] = (SLInterfaceID)pDevice->pContext->opensl.SL_IID_ANDROIDCONFIGURATION; + + /* No exclusive mode with OpenSL|ES. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* Now we can start initializing the device properly. */ + MA_ASSERT(pDevice != NULL); + MA_ZERO_OBJECT(&pDevice->opensl); + + queue.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE; + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_SLDataFormat_PCM pcm; + SLDataLocator_IODevice locatorDevice; + SLDataSource source; + SLDataSink sink; + SLAndroidConfigurationItf pRecorderConfig; + + ma_SLDataFormat_PCM_init__opensl(pDescriptorCapture->format, pDescriptorCapture->channels, pDescriptorCapture->sampleRate, pDescriptorCapture->channelMap, &pcm); + + locatorDevice.locatorType = SL_DATALOCATOR_IODEVICE; + locatorDevice.deviceType = SL_IODEVICE_AUDIOINPUT; + locatorDevice.deviceID = SL_DEFAULTDEVICEID_AUDIOINPUT; /* Must always use the default device with Android. */ + locatorDevice.device = NULL; + + source.pLocator = &locatorDevice; + source.pFormat = NULL; + + queue.numBuffers = pDescriptorCapture->periodCount; + + sink.pLocator = &queue; + sink.pFormat = (SLDataFormat_PCM*)&pcm; + + resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, ma_countof(itfIDs), itfIDs, itfIDsRequired); + if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED || resultSL == SL_RESULT_PARAMETER_INVALID) { + /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */ + pcm.formatType = SL_DATAFORMAT_PCM; + pcm.numChannels = 1; + ((SLDataFormat_PCM*)&pcm)->samplesPerSec = SL_SAMPLINGRATE_16; /* The name of the sample rate variable is different between SLAndroidDataFormat_PCM_EX and SLDataFormat_PCM. */ + pcm.bitsPerSample = 16; + pcm.containerSize = pcm.bitsPerSample; /* Always tightly packed for now. */ + pcm.channelMask = 0; + resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, ma_countof(itfIDs), itfIDs, itfIDsRequired); + } + + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create audio recorder."); + return ma_result_from_OpenSL(resultSL); + } + + + /* Set the recording preset before realizing the player. */ + if (pConfig->opensl.recordingPreset != ma_opensl_recording_preset_default) { + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_ANDROIDCONFIGURATION, &pRecorderConfig); + if (resultSL == SL_RESULT_SUCCESS) { + SLint32 recordingPreset = ma_to_recording_preset__opensl(pConfig->opensl.recordingPreset); + resultSL = (*pRecorderConfig)->SetConfiguration(pRecorderConfig, SL_ANDROID_KEY_RECORDING_PRESET, &recordingPreset, sizeof(SLint32)); + if (resultSL != SL_RESULT_SUCCESS) { + /* Failed to set the configuration. Just keep going. */ + } + } + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Realize((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_BOOLEAN_FALSE); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize audio recorder."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_RECORD, &pDevice->opensl.pAudioRecorder); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_RECORD interface."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &pDevice->opensl.pBufferQueueCapture); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_ANDROIDSIMPLEBUFFERQUEUE interface."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->RegisterCallback((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, ma_buffer_queue_callback_capture__opensl_android, pDevice); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to register buffer queue callback."); + return ma_result_from_OpenSL(resultSL); + } + + /* The internal format is determined by the "pcm" object. */ + ma_deconstruct_SLDataFormat_PCM__opensl(&pcm, &pDescriptorCapture->format, &pDescriptorCapture->channels, &pDescriptorCapture->sampleRate, pDescriptorCapture->channelMap, ma_countof(pDescriptorCapture->channelMap)); + + /* Buffer. */ + pDescriptorCapture->periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptorCapture, pDescriptorCapture->sampleRate, pConfig->performanceProfile); + pDevice->opensl.currentBufferIndexCapture = 0; + + bufferSizeInBytes = pDescriptorCapture->periodSizeInFrames * ma_get_bytes_per_frame(pDescriptorCapture->format, pDescriptorCapture->channels) * pDescriptorCapture->periodCount; + pDevice->opensl.pBufferCapture = (ma_uint8*)ma_calloc(bufferSizeInBytes, &pDevice->pContext->allocationCallbacks); + if (pDevice->opensl.pBufferCapture == NULL) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to allocate memory for data buffer."); + return MA_OUT_OF_MEMORY; + } + MA_ZERO_MEMORY(pDevice->opensl.pBufferCapture, bufferSizeInBytes); + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_SLDataFormat_PCM pcm; + SLDataSource source; + SLDataLocator_OutputMix outmixLocator; + SLDataSink sink; + SLAndroidConfigurationItf pPlayerConfig; + + ma_SLDataFormat_PCM_init__opensl(pDescriptorPlayback->format, pDescriptorPlayback->channels, pDescriptorPlayback->sampleRate, pDescriptorPlayback->channelMap, &pcm); + + resultSL = (*g_maEngineSL)->CreateOutputMix(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pOutputMixObj, 0, NULL, NULL); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create output mix."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Realize((SLObjectItf)pDevice->opensl.pOutputMixObj, SL_BOOLEAN_FALSE); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize output mix object."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->GetInterface((SLObjectItf)pDevice->opensl.pOutputMixObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_OUTPUTMIX, &pDevice->opensl.pOutputMix); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_OUTPUTMIX interface."); + return ma_result_from_OpenSL(resultSL); + } + + /* Set the output device. */ + if (pDescriptorPlayback->pDeviceID != NULL) { + SLuint32 deviceID_OpenSL = pDescriptorPlayback->pDeviceID->opensl; + MA_OPENSL_OUTPUTMIX(pDevice->opensl.pOutputMix)->ReRoute((SLOutputMixItf)pDevice->opensl.pOutputMix, 1, &deviceID_OpenSL); + } + + queue.numBuffers = pDescriptorPlayback->periodCount; + + source.pLocator = &queue; + source.pFormat = (SLDataFormat_PCM*)&pcm; + + outmixLocator.locatorType = SL_DATALOCATOR_OUTPUTMIX; + outmixLocator.outputMix = (SLObjectItf)pDevice->opensl.pOutputMixObj; + + sink.pLocator = &outmixLocator; + sink.pFormat = NULL; + + resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, ma_countof(itfIDs), itfIDs, itfIDsRequired); + if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED || resultSL == SL_RESULT_PARAMETER_INVALID) { + /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */ + pcm.formatType = SL_DATAFORMAT_PCM; + pcm.numChannels = 2; + ((SLDataFormat_PCM*)&pcm)->samplesPerSec = SL_SAMPLINGRATE_16; + pcm.bitsPerSample = 16; + pcm.containerSize = pcm.bitsPerSample; /* Always tightly packed for now. */ + pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; + resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, ma_countof(itfIDs), itfIDs, itfIDsRequired); + } + + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create audio player."); + return ma_result_from_OpenSL(resultSL); + } + + + /* Set the stream type before realizing the player. */ + if (pConfig->opensl.streamType != ma_opensl_stream_type_default) { + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_ANDROIDCONFIGURATION, &pPlayerConfig); + if (resultSL == SL_RESULT_SUCCESS) { + SLint32 streamType = ma_to_stream_type__opensl(pConfig->opensl.streamType); + resultSL = (*pPlayerConfig)->SetConfiguration(pPlayerConfig, SL_ANDROID_KEY_STREAM_TYPE, &streamType, sizeof(SLint32)); + if (resultSL != SL_RESULT_SUCCESS) { + /* Failed to set the configuration. Just keep going. */ + } + } + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Realize((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_BOOLEAN_FALSE); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize audio player."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_PLAY, &pDevice->opensl.pAudioPlayer); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_PLAY interface."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &pDevice->opensl.pBufferQueuePlayback); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_ANDROIDSIMPLEBUFFERQUEUE interface."); + return ma_result_from_OpenSL(resultSL); + } + + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->RegisterCallback((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, ma_buffer_queue_callback_playback__opensl_android, pDevice); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to register buffer queue callback."); + return ma_result_from_OpenSL(resultSL); + } + + /* The internal format is determined by the "pcm" object. */ + ma_deconstruct_SLDataFormat_PCM__opensl(&pcm, &pDescriptorPlayback->format, &pDescriptorPlayback->channels, &pDescriptorPlayback->sampleRate, pDescriptorPlayback->channelMap, ma_countof(pDescriptorPlayback->channelMap)); + + /* Buffer. */ + pDescriptorPlayback->periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_descriptor(pDescriptorPlayback, pDescriptorPlayback->sampleRate, pConfig->performanceProfile); + pDevice->opensl.currentBufferIndexPlayback = 0; + + bufferSizeInBytes = pDescriptorPlayback->periodSizeInFrames * ma_get_bytes_per_frame(pDescriptorPlayback->format, pDescriptorPlayback->channels) * pDescriptorPlayback->periodCount; + pDevice->opensl.pBufferPlayback = (ma_uint8*)ma_calloc(bufferSizeInBytes, &pDevice->pContext->allocationCallbacks); + if (pDevice->opensl.pBufferPlayback == NULL) { + ma_device_uninit__opensl(pDevice); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to allocate memory for data buffer."); + return MA_OUT_OF_MEMORY; + } + MA_ZERO_MEMORY(pDevice->opensl.pBufferPlayback, bufferSizeInBytes); + } + + return MA_SUCCESS; +#else + return MA_NO_BACKEND; /* Non-Android implementations are not supported. */ +#endif +} + +static ma_result ma_device_start__opensl(ma_device* pDevice) +{ + SLresult resultSL; + size_t periodSizeInBytes; + ma_uint32 iPeriod; + + MA_ASSERT(pDevice != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to start the device. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + resultSL = MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_RECORDING); + if (resultSL != SL_RESULT_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to start internal capture device."); + return ma_result_from_OpenSL(resultSL); + } + + periodSizeInBytes = pDevice->capture.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) { + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, pDevice->opensl.pBufferCapture + (periodSizeInBytes * iPeriod), periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_STOPPED); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to enqueue buffer for capture device."); + return ma_result_from_OpenSL(resultSL); + } + } + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + resultSL = MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_PLAYING); + if (resultSL != SL_RESULT_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to start internal playback device."); + return ma_result_from_OpenSL(resultSL); + } + + /* In playback mode (no duplex) we need to load some initial buffers. In duplex mode we need to enqueue silent buffers. */ + if (pDevice->type == ma_device_type_duplex) { + MA_ZERO_MEMORY(pDevice->opensl.pBufferPlayback, pDevice->playback.internalPeriodSizeInFrames * pDevice->playback.internalPeriods * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + } else { + ma_device__read_frames_from_client(pDevice, pDevice->playback.internalPeriodSizeInFrames * pDevice->playback.internalPeriods, pDevice->opensl.pBufferPlayback); + } + + periodSizeInBytes = pDevice->playback.internalPeriodSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; ++iPeriod) { + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, pDevice->opensl.pBufferPlayback + (periodSizeInBytes * iPeriod), periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_STOPPED); + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to enqueue buffer for playback device."); + return ma_result_from_OpenSL(resultSL); + } + } + } + + return MA_SUCCESS; +} + +static ma_result ma_device_drain__opensl(ma_device* pDevice, ma_device_type deviceType) +{ + SLAndroidSimpleBufferQueueItf pBufferQueue; + + MA_ASSERT(deviceType == ma_device_type_capture || deviceType == ma_device_type_playback); + + if (pDevice->type == ma_device_type_capture) { + pBufferQueue = (SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture; + pDevice->opensl.isDrainingCapture = MA_TRUE; + } else { + pBufferQueue = (SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback; + pDevice->opensl.isDrainingPlayback = MA_TRUE; + } + + for (;;) { + SLAndroidSimpleBufferQueueState state; + + MA_OPENSL_BUFFERQUEUE(pBufferQueue)->GetState(pBufferQueue, &state); + if (state.count == 0) { + break; + } + + ma_sleep(10); + } + + if (pDevice->type == ma_device_type_capture) { + pDevice->opensl.isDrainingCapture = MA_FALSE; + } else { + pDevice->opensl.isDrainingPlayback = MA_FALSE; + } + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__opensl(ma_device* pDevice) +{ + SLresult resultSL; + + MA_ASSERT(pDevice != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it before stopping/uninitializing the device. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_device_drain__opensl(pDevice, ma_device_type_capture); + + resultSL = MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_STOPPED); + if (resultSL != SL_RESULT_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to stop internal capture device."); + return ma_result_from_OpenSL(resultSL); + } + + MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture); + } + + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_device_drain__opensl(pDevice, ma_device_type_playback); + + resultSL = MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_STOPPED); + if (resultSL != SL_RESULT_SUCCESS) { + ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to stop internal playback device."); + return ma_result_from_OpenSL(resultSL); + } + + MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback); + } + + /* Make sure the client is aware that the device has stopped. There may be an OpenSL|ES callback for this, but I haven't found it. */ + ma_device__on_notification_stopped(pDevice); + + return MA_SUCCESS; +} + + +static ma_result ma_context_uninit__opensl(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_opensl); + (void)pContext; + + /* Uninit global data. */ + ma_spinlock_lock(&g_maOpenSLSpinlock); + { + MA_ASSERT(g_maOpenSLInitCounter > 0); /* If you've triggered this, it means you have ma_context_init/uninit mismatch. Each successful call to ma_context_init() must be matched up with a call to ma_context_uninit(). */ + + g_maOpenSLInitCounter -= 1; + if (g_maOpenSLInitCounter == 0) { + (*g_maEngineObjectSL)->Destroy(g_maEngineObjectSL); + } + } + ma_spinlock_unlock(&g_maOpenSLSpinlock); + + return MA_SUCCESS; +} + +static ma_result ma_dlsym_SLInterfaceID__opensl(ma_context* pContext, const char* pName, ma_handle* pHandle) +{ + /* We need to return an error if the symbol cannot be found. This is important because there have been reports that some symbols do not exist. */ + ma_handle* p = (ma_handle*)ma_dlsym(ma_context_get_log(pContext), pContext->opensl.libOpenSLES, pName); + if (p == NULL) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_INFO, "[OpenSL] Cannot find symbol %s", pName); + return MA_NO_BACKEND; + } + + *pHandle = *p; + return MA_SUCCESS; +} + +static ma_result ma_context_init_engine_nolock__opensl(ma_context* pContext) +{ + g_maOpenSLInitCounter += 1; + if (g_maOpenSLInitCounter == 1) { + SLresult resultSL; + + resultSL = ((ma_slCreateEngine_proc)pContext->opensl.slCreateEngine)(&g_maEngineObjectSL, 0, NULL, 0, NULL, NULL); + if (resultSL != SL_RESULT_SUCCESS) { + g_maOpenSLInitCounter -= 1; + return ma_result_from_OpenSL(resultSL); + } + + (*g_maEngineObjectSL)->Realize(g_maEngineObjectSL, SL_BOOLEAN_FALSE); + + resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, (SLInterfaceID)pContext->opensl.SL_IID_ENGINE, &g_maEngineSL); + if (resultSL != SL_RESULT_SUCCESS) { + (*g_maEngineObjectSL)->Destroy(g_maEngineObjectSL); + g_maOpenSLInitCounter -= 1; + return ma_result_from_OpenSL(resultSL); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_init__opensl(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + ma_result result; + +#if !defined(MA_NO_RUNTIME_LINKING) + size_t i; + const char* libOpenSLESNames[] = { + "libOpenSLES.so" + }; +#endif + + MA_ASSERT(pContext != NULL); + + (void)pConfig; + +#if !defined(MA_NO_RUNTIME_LINKING) + /* + Dynamically link against libOpenSLES.so. I have now had multiple reports that SL_IID_ANDROIDSIMPLEBUFFERQUEUE cannot be found. One + report was happening at compile time and another at runtime. To try working around this, I'm going to link to libOpenSLES at runtime + and extract the symbols rather than reference them directly. This should, hopefully, fix these issues as the compiler won't see any + references to the symbols and will hopefully skip the checks. + */ + for (i = 0; i < ma_countof(libOpenSLESNames); i += 1) { + pContext->opensl.libOpenSLES = ma_dlopen(ma_context_get_log(pContext), libOpenSLESNames[i]); + if (pContext->opensl.libOpenSLES != NULL) { + break; + } + } + + if (pContext->opensl.libOpenSLES == NULL) { + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_INFO, "[OpenSL] Could not find libOpenSLES.so"); + return MA_NO_BACKEND; + } + + result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_ENGINE", &pContext->opensl.SL_IID_ENGINE); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + return result; + } + + result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_AUDIOIODEVICECAPABILITIES", &pContext->opensl.SL_IID_AUDIOIODEVICECAPABILITIES); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + return result; + } + + result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_ANDROIDSIMPLEBUFFERQUEUE", &pContext->opensl.SL_IID_ANDROIDSIMPLEBUFFERQUEUE); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + return result; + } + + result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_RECORD", &pContext->opensl.SL_IID_RECORD); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + return result; + } + + result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_PLAY", &pContext->opensl.SL_IID_PLAY); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + return result; + } + + result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_OUTPUTMIX", &pContext->opensl.SL_IID_OUTPUTMIX); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + return result; + } + + result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_ANDROIDCONFIGURATION", &pContext->opensl.SL_IID_ANDROIDCONFIGURATION); + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + return result; + } + + pContext->opensl.slCreateEngine = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->opensl.libOpenSLES, "slCreateEngine"); + if (pContext->opensl.slCreateEngine == NULL) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_INFO, "[OpenSL] Cannot find symbol slCreateEngine."); + return MA_NO_BACKEND; + } +#else + pContext->opensl.SL_IID_ENGINE = (ma_handle)SL_IID_ENGINE; + pContext->opensl.SL_IID_AUDIOIODEVICECAPABILITIES = (ma_handle)SL_IID_AUDIOIODEVICECAPABILITIES; + pContext->opensl.SL_IID_ANDROIDSIMPLEBUFFERQUEUE = (ma_handle)SL_IID_ANDROIDSIMPLEBUFFERQUEUE; + pContext->opensl.SL_IID_RECORD = (ma_handle)SL_IID_RECORD; + pContext->opensl.SL_IID_PLAY = (ma_handle)SL_IID_PLAY; + pContext->opensl.SL_IID_OUTPUTMIX = (ma_handle)SL_IID_OUTPUTMIX; + pContext->opensl.SL_IID_ANDROIDCONFIGURATION = (ma_handle)SL_IID_ANDROIDCONFIGURATION; + pContext->opensl.slCreateEngine = (ma_proc)slCreateEngine; +#endif + + + /* Initialize global data first if applicable. */ + ma_spinlock_lock(&g_maOpenSLSpinlock); + { + result = ma_context_init_engine_nolock__opensl(pContext); + } + ma_spinlock_unlock(&g_maOpenSLSpinlock); + + if (result != MA_SUCCESS) { + ma_dlclose(ma_context_get_log(pContext), pContext->opensl.libOpenSLES); + ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_INFO, "[OpenSL] Failed to initialize OpenSL engine."); + return result; + } + + pCallbacks->onContextInit = ma_context_init__opensl; + pCallbacks->onContextUninit = ma_context_uninit__opensl; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__opensl; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__opensl; + pCallbacks->onDeviceInit = ma_device_init__opensl; + pCallbacks->onDeviceUninit = ma_device_uninit__opensl; + pCallbacks->onDeviceStart = ma_device_start__opensl; + pCallbacks->onDeviceStop = ma_device_stop__opensl; + pCallbacks->onDeviceRead = NULL; /* Not needed because OpenSL|ES is asynchronous. */ + pCallbacks->onDeviceWrite = NULL; /* Not needed because OpenSL|ES is asynchronous. */ + pCallbacks->onDeviceDataLoop = NULL; /* Not needed because OpenSL|ES is asynchronous. */ + + return MA_SUCCESS; +} +#endif /* OpenSL|ES */ + + +/****************************************************************************** + +Web Audio Backend + +******************************************************************************/ +#ifdef MA_HAS_WEBAUDIO +#include + +#if (__EMSCRIPTEN_major__ > 3) || (__EMSCRIPTEN_major__ == 3 && (__EMSCRIPTEN_minor__ > 1 || (__EMSCRIPTEN_minor__ == 1 && __EMSCRIPTEN_tiny__ >= 32))) + #include + #define MA_SUPPORT_AUDIO_WORKLETS + + #if (__EMSCRIPTEN_major__ > 3) || (__EMSCRIPTEN_major__ == 3 && (__EMSCRIPTEN_minor__ > 1 || (__EMSCRIPTEN_minor__ == 1 && __EMSCRIPTEN_tiny__ >= 70))) + #define MA_SUPPORT_AUDIO_WORKLETS_VARIABLE_BUFFER_SIZE + #endif +#endif + +/* +TODO: Version 0.12: Swap this logic around so that AudioWorklets are used by default. Add MA_NO_AUDIO_WORKLETS. +*/ +#if defined(MA_ENABLE_AUDIO_WORKLETS) && defined(MA_SUPPORT_AUDIO_WORKLETS) + #define MA_USE_AUDIO_WORKLETS +#endif + +/* The thread stack size must be a multiple of 16. */ +#ifndef MA_AUDIO_WORKLETS_THREAD_STACK_SIZE +#define MA_AUDIO_WORKLETS_THREAD_STACK_SIZE 131072 +#endif + +#if defined(MA_USE_AUDIO_WORKLETS) +#define MA_WEBAUDIO_LATENCY_HINT_BALANCED "balanced" +#define MA_WEBAUDIO_LATENCY_HINT_INTERACTIVE "interactive" +#define MA_WEBAUDIO_LATENCY_HINT_PLAYBACK "playback" +#endif + +static ma_bool32 ma_is_capture_supported__webaudio() +{ + return EM_ASM_INT({ + return (navigator.mediaDevices !== undefined && navigator.mediaDevices.getUserMedia !== undefined); + }, 0) != 0; /* Must pass in a dummy argument for C99 compatibility. */ +} + +#ifdef __cplusplus +extern "C" { +#endif +void* EMSCRIPTEN_KEEPALIVE ma_malloc_emscripten(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_malloc(sz, pAllocationCallbacks); +} + +void EMSCRIPTEN_KEEPALIVE ma_free_emscripten(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_free(p, pAllocationCallbacks); +} + +void EMSCRIPTEN_KEEPALIVE ma_device_process_pcm_frames_capture__webaudio(ma_device* pDevice, int frameCount, float* pFrames) +{ + ma_device_handle_backend_data_callback(pDevice, NULL, pFrames, (ma_uint32)frameCount); +} + +void EMSCRIPTEN_KEEPALIVE ma_device_process_pcm_frames_playback__webaudio(ma_device* pDevice, int frameCount, float* pFrames) +{ + ma_device_handle_backend_data_callback(pDevice, pFrames, NULL, (ma_uint32)frameCount); +} +#ifdef __cplusplus +} +#endif + +static ma_result ma_context_enumerate_devices__webaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult = MA_TRUE; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + /* Only supporting default devices for now. */ + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + deviceInfo.isDefault = MA_TRUE; /* Only supporting default devices. */ + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + if (ma_is_capture_supported__webaudio()) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + deviceInfo.isDefault = MA_TRUE; /* Only supporting default devices. */ + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__webaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pContext != NULL); + + if (deviceType == ma_device_type_capture && !ma_is_capture_supported__webaudio()) { + return MA_NO_DEVICE; + } + + MA_ZERO_MEMORY(pDeviceInfo->id.webaudio, sizeof(pDeviceInfo->id.webaudio)); + + /* Only supporting default devices for now. */ + (void)pDeviceID; + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } + + /* Only supporting default devices. */ + pDeviceInfo->isDefault = MA_TRUE; + + /* Web Audio can support any number of channels and sample rates. It only supports f32 formats, however. */ + pDeviceInfo->nativeDataFormats[0].flags = 0; + pDeviceInfo->nativeDataFormats[0].format = ma_format_unknown; + pDeviceInfo->nativeDataFormats[0].channels = 0; /* All channels are supported. */ + pDeviceInfo->nativeDataFormats[0].sampleRate = EM_ASM_INT({ + try { + var temp = new (window.AudioContext || window.webkitAudioContext)(); + var sampleRate = temp.sampleRate; + temp.close(); + return sampleRate; + } catch(e) { + return 0; + } + }, 0); /* Must pass in a dummy argument for C99 compatibility. */ + + if (pDeviceInfo->nativeDataFormats[0].sampleRate == 0) { + return MA_NO_DEVICE; + } + + pDeviceInfo->nativeDataFormatCount = 1; + + return MA_SUCCESS; +} + +static ma_result ma_device_uninit__webaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + #if defined(MA_USE_AUDIO_WORKLETS) + { + EM_ASM({ + var device = window.miniaudio.get_device_by_index($0); + + if (device.streamNode !== undefined) { + device.streamNode.disconnect(); + device.streamNode = undefined; + } + + device.pDevice = undefined; + }, pDevice->webaudio.deviceIndex); + + emscripten_destroy_web_audio_node(pDevice->webaudio.audioWorklet); + emscripten_destroy_audio_context(pDevice->webaudio.audioContext); + ma_free(pDevice->webaudio.pStackBuffer, &pDevice->pContext->allocationCallbacks); + } + #else + { + EM_ASM({ + var device = window.miniaudio.get_device_by_index($0); + + /* Make sure all nodes are disconnected and marked for collection. */ + if (device.scriptNode !== undefined) { + device.scriptNode.onaudioprocess = function(e) {}; /* We want to reset the callback to ensure it doesn't get called after AudioContext.close() has returned. Shouldn't happen since we're disconnecting, but just to be safe... */ + device.scriptNode.disconnect(); + device.scriptNode = undefined; + } + + if (device.streamNode !== undefined) { + device.streamNode.disconnect(); + device.streamNode = undefined; + } + + /* + Stop the device. I think there is a chance the callback could get fired after calling this, hence why we want + to clear the callback before closing. + */ + device.webaudio.close(); + device.webaudio = undefined; + device.pDevice = undefined; + }, pDevice->webaudio.deviceIndex); + } + #endif + + /* Clean up the device on the JS side. */ + EM_ASM({ + window.miniaudio.untrack_device_by_index($0); + }, pDevice->webaudio.deviceIndex); + + ma_free(pDevice->webaudio.pIntermediaryBuffer, &pDevice->pContext->allocationCallbacks); + + return MA_SUCCESS; +} + +#if !defined(MA_USE_AUDIO_WORKLETS) +static ma_uint32 ma_calculate_period_size_in_frames_from_descriptor__webaudio(const ma_device_descriptor* pDescriptor, ma_uint32 nativeSampleRate, ma_performance_profile performanceProfile) +{ + /* + There have been reports of the default buffer size being too small on some browsers. If we're using + the default buffer size, we'll make sure the period size is bigger than our standard defaults. + */ + ma_uint32 periodSizeInFrames; + + if (nativeSampleRate == 0) { + nativeSampleRate = MA_DEFAULT_SAMPLE_RATE; + } + + if (pDescriptor->periodSizeInFrames == 0) { + if (pDescriptor->periodSizeInMilliseconds == 0) { + if (performanceProfile == ma_performance_profile_low_latency) { + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(33, nativeSampleRate); /* 1 frame @ 30 FPS */ + } else { + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(333, nativeSampleRate); + } + } else { + periodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pDescriptor->periodSizeInMilliseconds, nativeSampleRate); + } + } else { + periodSizeInFrames = pDescriptor->periodSizeInFrames; + } + + /* The size of the buffer must be a power of 2 and between 256 and 16384. */ + if (periodSizeInFrames < 256) { + periodSizeInFrames = 256; + } else if (periodSizeInFrames > 16384) { + periodSizeInFrames = 16384; + } else { + periodSizeInFrames = ma_next_power_of_2(periodSizeInFrames); + } + + return periodSizeInFrames; +} +#endif + + +#if defined(MA_USE_AUDIO_WORKLETS) +typedef struct +{ + ma_device* pDevice; + const ma_device_config* pConfig; + ma_device_descriptor* pDescriptorPlayback; + ma_device_descriptor* pDescriptorCapture; +} ma_audio_worklet_thread_initialized_data; + +static EM_BOOL ma_audio_worklet_process_callback__webaudio(int inputCount, const AudioSampleFrame* pInputs, int outputCount, AudioSampleFrame* pOutputs, int paramCount, const AudioParamFrame* pParams, void* pUserData) +{ + ma_device* pDevice = (ma_device*)pUserData; + ma_uint32 frameCount; + + (void)paramCount; + (void)pParams; + + /* + The Emscripten documentation says that it'll always be 128 frames being passed in. Hard coding it like that feels + like a very bad idea to me. Even if it's hard coded in the backend, the API and documentation should always refer + to variables instead of a hard coded number. In any case, will follow along for the time being. + + Unfortunately the audio data is not interleaved so we'll need to convert it before we give the data to miniaudio + for further processing. + */ + if (pDevice->type == ma_device_type_playback) { + frameCount = pDevice->playback.internalPeriodSizeInFrames; + } else { + frameCount = pDevice->capture.internalPeriodSizeInFrames; + } + + if (ma_device_get_state(pDevice) != ma_device_state_started) { + /* Fill the output buffer with zero to avoid a noise sound */ + for (int i = 0; i < outputCount; i += 1) { + MA_ZERO_MEMORY(pOutputs[i].data, pOutputs[i].numberOfChannels * frameCount * sizeof(float)); + } + + return EM_TRUE; + } + + if (inputCount > 0) { + /* Input data needs to be interleaved before we hand it to the client. */ + for (ma_uint32 iChannel = 0; iChannel < pDevice->capture.internalChannels; iChannel += 1) { + for (ma_uint32 iFrame = 0; iFrame < frameCount; iFrame += 1) { + pDevice->webaudio.pIntermediaryBuffer[iFrame*pDevice->capture.internalChannels + iChannel] = pInputs[0].data[frameCount*iChannel + iFrame]; + } + } + + ma_device_process_pcm_frames_capture__webaudio(pDevice, frameCount, pDevice->webaudio.pIntermediaryBuffer); + } + + if (outputCount > 0) { + /* If it's a capture-only device, we'll need to output silence. */ + if (pDevice->type == ma_device_type_capture) { + MA_ZERO_MEMORY(pOutputs[0].data, frameCount * pDevice->playback.internalChannels * sizeof(float)); + } else { + ma_device_process_pcm_frames_playback__webaudio(pDevice, frameCount, pDevice->webaudio.pIntermediaryBuffer); + + /* We've read the data from the client. Now we need to deinterleave the buffer and output to the output buffer. */ + for (ma_uint32 iChannel = 0; iChannel < pDevice->playback.internalChannels; iChannel += 1) { + for (ma_uint32 iFrame = 0; iFrame < frameCount; iFrame += 1) { + pOutputs[0].data[frameCount*iChannel + iFrame] = pDevice->webaudio.pIntermediaryBuffer[iFrame*pDevice->playback.internalChannels + iChannel]; + } + } + } + } + + return EM_TRUE; +} + + +static void ma_audio_worklet_processor_created__webaudio(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void* pUserData) +{ + ma_audio_worklet_thread_initialized_data* pParameters = (ma_audio_worklet_thread_initialized_data*)pUserData; + EmscriptenAudioWorkletNodeCreateOptions audioWorkletOptions; + int channels = 0; + size_t intermediaryBufferSizeInFrames; + int sampleRate; + + if (success == EM_FALSE) { + pParameters->pDevice->webaudio.initResult = MA_ERROR; + ma_free(pParameters, &pParameters->pDevice->pContext->allocationCallbacks); + return; + } + + /* The next step is to initialize the audio worklet node. */ + MA_ZERO_OBJECT(&audioWorkletOptions); + + /* + The way channel counts work with Web Audio is confusing. As far as I can tell, there's no way to know the channel + count from MediaStreamAudioSourceNode (what we use for capture)? The only way to have control is to configure an + output channel count on the capture side. This is slightly confusing for capture mode because intuitively you + wouldn't actually connect an output to an input-only node, but this is what we'll have to do in order to have + proper control over the channel count. In the capture case, we'll have to output silence to its output node. + */ + if (pParameters->pConfig->deviceType == ma_device_type_capture) { + channels = (int)((pParameters->pDescriptorCapture->channels > 0) ? pParameters->pDescriptorCapture->channels : MA_DEFAULT_CHANNELS); + audioWorkletOptions.numberOfInputs = 1; + } else { + channels = (int)((pParameters->pDescriptorPlayback->channels > 0) ? pParameters->pDescriptorPlayback->channels : MA_DEFAULT_CHANNELS); + + if (pParameters->pConfig->deviceType == ma_device_type_duplex) { + audioWorkletOptions.numberOfInputs = 1; + } else { + audioWorkletOptions.numberOfInputs = 0; + } + } + + audioWorkletOptions.numberOfOutputs = 1; + audioWorkletOptions.outputChannelCounts = &channels; + + + /* + Now that we know the channel count to use we can allocate the intermediary buffer. The + intermediary buffer is used for interleaving and deinterleaving. + */ + #if defined(MA_SUPPORT_AUDIO_WORKLETS_VARIABLE_BUFFER_SIZE) + { + intermediaryBufferSizeInFrames = (size_t)emscripten_audio_context_quantum_size(audioContext); + } + #else + { + intermediaryBufferSizeInFrames = 128; + } + #endif + + pParameters->pDevice->webaudio.pIntermediaryBuffer = (float*)ma_malloc(intermediaryBufferSizeInFrames * (ma_uint32)channels * sizeof(float), &pParameters->pDevice->pContext->allocationCallbacks); + if (pParameters->pDevice->webaudio.pIntermediaryBuffer == NULL) { + pParameters->pDevice->webaudio.initResult = MA_OUT_OF_MEMORY; + ma_free(pParameters, &pParameters->pDevice->pContext->allocationCallbacks); + return; + } + + pParameters->pDevice->webaudio.audioWorklet = emscripten_create_wasm_audio_worklet_node(audioContext, "miniaudio", &audioWorkletOptions, &ma_audio_worklet_process_callback__webaudio, pParameters->pDevice); + + /* With the audio worklet initialized we can now attach it to the graph. */ + if (pParameters->pConfig->deviceType == ma_device_type_capture || pParameters->pConfig->deviceType == ma_device_type_duplex) { + ma_result attachmentResult = (ma_result)EM_ASM_INT({ + var getUserMediaResult = 0; + var audioWorklet = emscriptenGetAudioObject($0); + var audioContext = emscriptenGetAudioObject($1); + + navigator.mediaDevices.getUserMedia({audio:true, video:false}) + .then(function(stream) { + audioContext.streamNode = audioContext.createMediaStreamSource(stream); + audioContext.streamNode.connect(audioWorklet); + audioWorklet.connect(audioContext.destination); + getUserMediaResult = 0; /* 0 = MA_SUCCESS */ + }) + .catch(function(error) { + console.log("navigator.mediaDevices.getUserMedia Failed: " + error); + getUserMediaResult = -1; /* -1 = MA_ERROR */ + }); + + return getUserMediaResult; + }, pParameters->pDevice->webaudio.audioWorklet, audioContext); + + if (attachmentResult != MA_SUCCESS) { + ma_log_postf(ma_device_get_log(pParameters->pDevice), MA_LOG_LEVEL_ERROR, "Web Audio: Failed to connect capture node."); + emscripten_destroy_web_audio_node(pParameters->pDevice->webaudio.audioWorklet); + pParameters->pDevice->webaudio.initResult = attachmentResult; + ma_free(pParameters, &pParameters->pDevice->pContext->allocationCallbacks); + return; + } + } + + /* If it's playback only we can now attach the worklet node to the graph. This has already been done for the duplex case. */ + if (pParameters->pConfig->deviceType == ma_device_type_playback) { + ma_result attachmentResult = (ma_result)EM_ASM_INT({ + var audioWorklet = emscriptenGetAudioObject($0); + var audioContext = emscriptenGetAudioObject($1); + audioWorklet.connect(audioContext.destination); + return 0; /* 0 = MA_SUCCESS */ + }, pParameters->pDevice->webaudio.audioWorklet, audioContext); + + if (attachmentResult != MA_SUCCESS) { + ma_log_postf(ma_device_get_log(pParameters->pDevice), MA_LOG_LEVEL_ERROR, "Web Audio: Failed to connect playback node."); + pParameters->pDevice->webaudio.initResult = attachmentResult; + ma_free(pParameters, &pParameters->pDevice->pContext->allocationCallbacks); + return; + } + } + + /* We need to update the descriptors so that they reflect the internal data format. Both capture and playback should be the same. */ + sampleRate = EM_ASM_INT({ return emscriptenGetAudioObject($0).sampleRate; }, audioContext); + + if (pParameters->pDescriptorCapture != NULL) { + pParameters->pDescriptorCapture->format = ma_format_f32; + pParameters->pDescriptorCapture->channels = (ma_uint32)channels; + pParameters->pDescriptorCapture->sampleRate = (ma_uint32)sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_webaudio, pParameters->pDescriptorCapture->channelMap, ma_countof(pParameters->pDescriptorCapture->channelMap), pParameters->pDescriptorCapture->channels); + pParameters->pDescriptorCapture->periodSizeInFrames = intermediaryBufferSizeInFrames; + pParameters->pDescriptorCapture->periodCount = 1; + } + + if (pParameters->pDescriptorPlayback != NULL) { + pParameters->pDescriptorPlayback->format = ma_format_f32; + pParameters->pDescriptorPlayback->channels = (ma_uint32)channels; + pParameters->pDescriptorPlayback->sampleRate = (ma_uint32)sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_webaudio, pParameters->pDescriptorPlayback->channelMap, ma_countof(pParameters->pDescriptorPlayback->channelMap), pParameters->pDescriptorPlayback->channels); + pParameters->pDescriptorPlayback->periodSizeInFrames = intermediaryBufferSizeInFrames; + pParameters->pDescriptorPlayback->periodCount = 1; + } + + /* At this point we're done and we can return. */ + ma_log_postf(ma_device_get_log(pParameters->pDevice), MA_LOG_LEVEL_DEBUG, "AudioWorklets: Created worklet node: %d\n", pParameters->pDevice->webaudio.audioWorklet); + pParameters->pDevice->webaudio.initResult = MA_SUCCESS; + ma_free(pParameters, &pParameters->pDevice->pContext->allocationCallbacks); +} + +static void ma_audio_worklet_thread_initialized__webaudio(EMSCRIPTEN_WEBAUDIO_T audioContext, EM_BOOL success, void* pUserData) +{ + ma_audio_worklet_thread_initialized_data* pParameters = (ma_audio_worklet_thread_initialized_data*)pUserData; + WebAudioWorkletProcessorCreateOptions workletProcessorOptions; + + MA_ASSERT(pParameters != NULL); + + if (success == EM_FALSE) { + pParameters->pDevice->webaudio.initResult = MA_ERROR; + return; + } + + MA_ZERO_OBJECT(&workletProcessorOptions); + workletProcessorOptions.name = "miniaudio"; /* I'm not entirely sure what to call this. Does this need to be globally unique, or does it need only be unique for a given AudioContext? */ + + emscripten_create_wasm_audio_worklet_processor_async(audioContext, &workletProcessorOptions, ma_audio_worklet_processor_created__webaudio, pParameters); +} +#endif + +static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_config* pConfig, ma_device_descriptor* pDescriptorPlayback, ma_device_descriptor* pDescriptorCapture) +{ + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; + } + + /* No exclusive mode with Web Audio. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pDescriptorPlayback->shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pDescriptorCapture->shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* + With AudioWorklets we'll have just a single AudioContext. I'm not sure why I'm not doing this for ScriptProcessorNode so + it might be worthwhile to look into that as well. + */ + #if defined(MA_USE_AUDIO_WORKLETS) + { + EmscriptenWebAudioCreateAttributes audioContextAttributes; + ma_audio_worklet_thread_initialized_data* pInitParameters; + void* pStackBuffer; + + if (pConfig->performanceProfile == ma_performance_profile_conservative) { + audioContextAttributes.latencyHint = MA_WEBAUDIO_LATENCY_HINT_PLAYBACK; + } else { + audioContextAttributes.latencyHint = MA_WEBAUDIO_LATENCY_HINT_INTERACTIVE; + } + + /* + In my testing, Firefox does not seem to capture audio data properly if the sample rate is set + to anything other than 48K. This does not seem to be the case for other browsers. For this reason, + if the device type is anything other than playback, we'll leave the sample rate as-is and let the + browser pick the appropriate rate for us. + */ + if (pConfig->deviceType == ma_device_type_playback) { + audioContextAttributes.sampleRate = pDescriptorPlayback->sampleRate; + } else { + audioContextAttributes.sampleRate = 0; + } + + /* It's not clear if this can return an error. None of the tests in the Emscripten repository check for this, so neither am I for now. */ + pDevice->webaudio.audioContext = emscripten_create_audio_context(&audioContextAttributes); + + /* + With the context created we can now create the worklet. We can only have a single worklet per audio + context which means we'll need to craft this appropriately to handle duplex devices correctly. + */ + + /* + We now need to create a worker thread. This is a bit weird because we need to allocate our + own buffer for the thread's stack. The stack needs to be aligned to 16 bytes. I'm going to + allocate this on the heap to keep it simple. + */ + pStackBuffer = ma_aligned_malloc(MA_AUDIO_WORKLETS_THREAD_STACK_SIZE, 16, &pDevice->pContext->allocationCallbacks); + if (pStackBuffer == NULL) { + emscripten_destroy_audio_context(pDevice->webaudio.audioContext); + return MA_OUT_OF_MEMORY; + } + + /* Our thread initialization parameters need to be allocated on the heap so they don't go out of scope. */ + pInitParameters = (ma_audio_worklet_thread_initialized_data*)ma_malloc(sizeof(*pInitParameters), &pDevice->pContext->allocationCallbacks); + if (pInitParameters == NULL) { + ma_free(pStackBuffer, &pDevice->pContext->allocationCallbacks); + emscripten_destroy_audio_context(pDevice->webaudio.audioContext); + return MA_OUT_OF_MEMORY; + } + + pInitParameters->pDevice = pDevice; + pInitParameters->pConfig = pConfig; + pInitParameters->pDescriptorPlayback = pDescriptorPlayback; + pInitParameters->pDescriptorCapture = pDescriptorCapture; + + /* + We need to flag the device as not yet initialized so we can wait on it later. Unfortunately all of + the Emscripten WebAudio stuff is asynchronous. + */ + pDevice->webaudio.initResult = MA_BUSY; + { + emscripten_start_wasm_audio_worklet_thread_async(pDevice->webaudio.audioContext, pStackBuffer, MA_AUDIO_WORKLETS_THREAD_STACK_SIZE, ma_audio_worklet_thread_initialized__webaudio, pInitParameters); + } + while (pDevice->webaudio.initResult == MA_BUSY) { emscripten_sleep(1); } /* We must wait for initialization to complete. We're just spinning here. The emscripten_sleep() call is why we need to build with `-sASYNCIFY`. */ + + /* Initialization is now complete. Descriptors were updated when the worklet was initialized. */ + if (pDevice->webaudio.initResult != MA_SUCCESS) { + ma_free(pStackBuffer, &pDevice->pContext->allocationCallbacks); + emscripten_destroy_audio_context(pDevice->webaudio.audioContext); + return pDevice->webaudio.initResult; + } + + /* We need to add an entry to the miniaudio.devices list on the JS side so we can do some JS/C interop. */ + pDevice->webaudio.deviceIndex = EM_ASM_INT({ + return window.miniaudio.track_device({ + webaudio: emscriptenGetAudioObject($0), + state: 1, /* 1 = ma_device_state_stopped */ + pDevice: $1 + }); + }, pDevice->webaudio.audioContext, pDevice); + + return MA_SUCCESS; + } + #else + { + /* ScriptProcessorNode. This path requires us to do almost everything in JS, but we'll do as much as we can in C. */ + ma_uint32 deviceIndex; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint32 periodSizeInFrames; + + /* The channel count will depend on the device type. If it's a capture, use its, otherwise use the playback side. */ + if (pConfig->deviceType == ma_device_type_capture) { + channels = (pDescriptorCapture->channels > 0) ? pDescriptorCapture->channels : MA_DEFAULT_CHANNELS; + } else { + channels = (pDescriptorPlayback->channels > 0) ? pDescriptorPlayback->channels : MA_DEFAULT_CHANNELS; + } + + /* + When testing in Firefox, I've seen it where capture mode fails if the sample rate is changed to anything other than it's + native rate. For this reason we're leaving the sample rate untouched for capture devices. + */ + if (pConfig->deviceType == ma_device_type_playback) { + sampleRate = pDescriptorPlayback->sampleRate; + } else { + sampleRate = 0; /* Let the browser decide when capturing. */ + } + + /* The period size needs to be a power of 2. */ + if (pConfig->deviceType == ma_device_type_capture) { + periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__webaudio(pDescriptorCapture, sampleRate, pConfig->performanceProfile); + } else { + periodSizeInFrames = ma_calculate_period_size_in_frames_from_descriptor__webaudio(pDescriptorPlayback, sampleRate, pConfig->performanceProfile); + } + + /* We need an intermediary buffer for doing interleaving and deinterleaving. */ + pDevice->webaudio.pIntermediaryBuffer = (float*)ma_malloc(periodSizeInFrames * channels * sizeof(float), &pDevice->pContext->allocationCallbacks); + if (pDevice->webaudio.pIntermediaryBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + deviceIndex = EM_ASM_INT({ + var deviceType = $0; + var channels = $1; + var sampleRate = $2; + var bufferSize = $3; + var pIntermediaryBuffer = $4; + var pDevice = $5; + + if (typeof(window.miniaudio) === 'undefined') { + return -1; /* Context not initialized. */ + } + + var device = {}; + + /* First thing we need is an AudioContext. */ + var audioContextOptions = {}; + if (deviceType == window.miniaudio.device_type.playback && sampleRate != 0) { + audioContextOptions.sampleRate = sampleRate; + } + + device.webaudio = new (window.AudioContext || window.webkitAudioContext)(audioContextOptions); + device.webaudio.suspend(); /* The AudioContext must be created in a suspended state. */ + device.state = window.miniaudio.device_state.stopped; + + /* + We need to create a ScriptProcessorNode. The channel situation is the same as the AudioWorklet path in that we + need to specify an output and configure the channel count there. + */ + var channelCountIn = 0; + var channelCountOut = channels; + if (deviceType != window.miniaudio.device_type.playback) { + channelCountIn = channels; + } + + device.scriptNode = device.webaudio.createScriptProcessor(bufferSize, channelCountIn, channelCountOut); + + /* The node processing callback. */ + device.scriptNode.onaudioprocess = function(e) { + if (device.intermediaryBufferView == null || device.intermediaryBufferView.length == 0) { + device.intermediaryBufferView = new Float32Array(HEAPF32.buffer, pIntermediaryBuffer, bufferSize * channels); + } + + /* Do the capture side first. */ + if (deviceType == window.miniaudio.device_type.capture || deviceType == window.miniaudio.device_type.duplex) { + /* The data must be interleaved before being processed miniaudio. */ + for (var iChannel = 0; iChannel < channels; iChannel += 1) { + var inputBuffer = e.inputBuffer.getChannelData(iChannel); + var intermediaryBuffer = device.intermediaryBufferView; + + for (var iFrame = 0; iFrame < bufferSize; iFrame += 1) { + intermediaryBuffer[iFrame*channels + iChannel] = inputBuffer[iFrame]; + } + } + + _ma_device_process_pcm_frames_capture__webaudio(pDevice, bufferSize, pIntermediaryBuffer); + } + + if (deviceType == window.miniaudio.device_type.playback || deviceType == window.miniaudio.device_type.duplex) { + _ma_device_process_pcm_frames_playback__webaudio(pDevice, bufferSize, pIntermediaryBuffer); + + for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { + var outputBuffer = e.outputBuffer.getChannelData(iChannel); + var intermediaryBuffer = device.intermediaryBufferView; + + for (var iFrame = 0; iFrame < bufferSize; iFrame += 1) { + outputBuffer[iFrame] = intermediaryBuffer[iFrame*channels + iChannel]; + } + } + } else { + /* It's a capture-only device. Make sure the output is silenced. */ + for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { + e.outputBuffer.getChannelData(iChannel).fill(0.0); + } + } + }; + + /* Now we need to connect our node to the graph. */ + if (deviceType == window.miniaudio.device_type.capture || deviceType == window.miniaudio.device_type.duplex) { + navigator.mediaDevices.getUserMedia({audio:true, video:false}) + .then(function(stream) { + device.streamNode = device.webaudio.createMediaStreamSource(stream); + device.streamNode.connect(device.scriptNode); + device.scriptNode.connect(device.webaudio.destination); + }) + .catch(function(error) { + console.log("Failed to get user media: " + error); + }); + } + + if (deviceType == window.miniaudio.device_type.playback) { + device.scriptNode.connect(device.webaudio.destination); + } + + device.pDevice = pDevice; + + return window.miniaudio.track_device(device); + }, pConfig->deviceType, channels, sampleRate, periodSizeInFrames, pDevice->webaudio.pIntermediaryBuffer, pDevice); + + if (deviceIndex < 0) { + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + } + + pDevice->webaudio.deviceIndex = deviceIndex; + + /* Grab the sample rate from the audio context directly. */ + sampleRate = (ma_uint32)EM_ASM_INT({ return window.miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex); + + if (pDescriptorCapture != NULL) { + pDescriptorCapture->format = ma_format_f32; + pDescriptorCapture->channels = channels; + pDescriptorCapture->sampleRate = sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_webaudio, pDescriptorCapture->channelMap, ma_countof(pDescriptorCapture->channelMap), pDescriptorCapture->channels); + pDescriptorCapture->periodSizeInFrames = periodSizeInFrames; + pDescriptorCapture->periodCount = 1; + } + + if (pDescriptorPlayback != NULL) { + pDescriptorPlayback->format = ma_format_f32; + pDescriptorPlayback->channels = channels; + pDescriptorPlayback->sampleRate = sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_webaudio, pDescriptorPlayback->channelMap, ma_countof(pDescriptorPlayback->channelMap), pDescriptorPlayback->channels); + pDescriptorPlayback->periodSizeInFrames = periodSizeInFrames; + pDescriptorPlayback->periodCount = 1; + } + + return MA_SUCCESS; + } + #endif +} + +static ma_result ma_device_start__webaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + EM_ASM({ + var device = window.miniaudio.get_device_by_index($0); + device.webaudio.resume(); + device.state = window.miniaudio.device_state.started; + }, pDevice->webaudio.deviceIndex); + + return MA_SUCCESS; +} + +static ma_result ma_device_stop__webaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); + + /* + From the WebAudio API documentation for AudioContext.suspend(): + + Suspends the progression of AudioContext's currentTime, allows any current context processing blocks that are already processed to be played to the + destination, and then allows the system to release its claim on audio hardware. + + I read this to mean that "any current context processing blocks" are processed by suspend() - i.e. They they are drained. We therefore shouldn't need to + do any kind of explicit draining. + */ + EM_ASM({ + var device = window.miniaudio.get_device_by_index($0); + device.webaudio.suspend(); + device.state = window.miniaudio.device_state.stopped; + }, pDevice->webaudio.deviceIndex); + + ma_device__on_notification_stopped(pDevice); + + return MA_SUCCESS; +} + +static ma_result ma_context_uninit__webaudio(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_webaudio); + + (void)pContext; /* Unused. */ + + /* Remove the global miniaudio object from window if there are no more references to it. */ + EM_ASM({ + if (typeof(window.miniaudio) !== 'undefined') { + miniaudio.unlock_event_types.map(function(event_type) { + document.removeEventListener(event_type, miniaudio.unlock, true); + }); + + window.miniaudio.referenceCount -= 1; + if (window.miniaudio.referenceCount === 0) { + delete window.miniaudio; + } + } + }); + + return MA_SUCCESS; +} + +static ma_result ma_context_init__webaudio(ma_context* pContext, const ma_context_config* pConfig, ma_backend_callbacks* pCallbacks) +{ + int resultFromJS; + + MA_ASSERT(pContext != NULL); + + (void)pConfig; /* Unused. */ + + /* Here is where our global JavaScript object is initialized. */ + resultFromJS = EM_ASM_INT({ + if (typeof window === 'undefined' || (window.AudioContext || window.webkitAudioContext) === undefined) { + return 0; /* Web Audio not supported. */ + } + + if (typeof(window.miniaudio) === 'undefined') { + window.miniaudio = { + referenceCount: 0 + }; + + /* Device types. */ + window.miniaudio.device_type = {}; + window.miniaudio.device_type.playback = $0; + window.miniaudio.device_type.capture = $1; + window.miniaudio.device_type.duplex = $2; + + /* Device states. */ + window.miniaudio.device_state = {}; + window.miniaudio.device_state.stopped = $3; + window.miniaudio.device_state.started = $4; + + /* Device cache for mapping devices to indexes for JavaScript/C interop. */ + let miniaudio = window.miniaudio; + miniaudio.devices = []; + + miniaudio.track_device = function(device) { + /* Try inserting into a free slot first. */ + for (var iDevice = 0; iDevice < miniaudio.devices.length; ++iDevice) { + if (miniaudio.devices[iDevice] == null) { + miniaudio.devices[iDevice] = device; + return iDevice; + } + } + + /* Getting here means there is no empty slots in the array so we just push to the end. */ + miniaudio.devices.push(device); + return miniaudio.devices.length - 1; + }; + + miniaudio.untrack_device_by_index = function(deviceIndex) { + /* We just set the device's slot to null. The slot will get reused in the next call to ma_track_device. */ + miniaudio.devices[deviceIndex] = null; + + /* Trim the array if possible. */ + while (miniaudio.devices.length > 0) { + if (miniaudio.devices[miniaudio.devices.length-1] == null) { + miniaudio.devices.pop(); + } else { + break; + } + } + }; + + miniaudio.untrack_device = function(device) { + for (var iDevice = 0; iDevice < miniaudio.devices.length; ++iDevice) { + if (miniaudio.devices[iDevice] == device) { + return miniaudio.untrack_device_by_index(iDevice); + } + } + }; + + miniaudio.get_device_by_index = function(deviceIndex) { + return miniaudio.devices[deviceIndex]; + }; + + miniaudio.unlock_event_types = (function(){ + return ['touchend', 'click']; + })(); + + miniaudio.unlock = function() { + for(var i = 0; i < miniaudio.devices.length; ++i) { + var device = miniaudio.devices[i]; + if (device != null && + device.webaudio != null && + device.state === miniaudio.device_state.started) { + + device.webaudio.resume().then(() => { + _ma_device__on_notification_unlocked(device.pDevice); + }, + (error) => {console.error("Failed to resume audiocontext", error); + }); + } + } + miniaudio.unlock_event_types.map(function(event_type) { + document.removeEventListener(event_type, miniaudio.unlock, true); + }); + }; + + miniaudio.unlock_event_types.map(function(event_type) { + document.addEventListener(event_type, miniaudio.unlock, true); + }); + } + + window.miniaudio.referenceCount += 1; + + return 1; + }, ma_device_type_playback, ma_device_type_capture, ma_device_type_duplex, ma_device_state_stopped, ma_device_state_started); + + if (resultFromJS != 1) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pCallbacks->onContextInit = ma_context_init__webaudio; + pCallbacks->onContextUninit = ma_context_uninit__webaudio; + pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__webaudio; + pCallbacks->onContextGetDeviceInfo = ma_context_get_device_info__webaudio; + pCallbacks->onDeviceInit = ma_device_init__webaudio; + pCallbacks->onDeviceUninit = ma_device_uninit__webaudio; + pCallbacks->onDeviceStart = ma_device_start__webaudio; + pCallbacks->onDeviceStop = ma_device_stop__webaudio; + pCallbacks->onDeviceRead = NULL; /* Not needed because WebAudio is asynchronous. */ + pCallbacks->onDeviceWrite = NULL; /* Not needed because WebAudio is asynchronous. */ + pCallbacks->onDeviceDataLoop = NULL; /* Not needed because WebAudio is asynchronous. */ + + return MA_SUCCESS; +} +#endif /* MA_HAS_WEBAUDIO */ + + + +static ma_bool32 ma__is_channel_map_valid(const ma_channel* pChannelMap, ma_uint32 channels) +{ + /* A blank channel map should be allowed, in which case it should use an appropriate default which will depend on context. */ + if (pChannelMap != NULL && pChannelMap[0] != MA_CHANNEL_NONE) { + ma_uint32 iChannel; + + if (channels == 0 || channels > MA_MAX_CHANNELS) { + return MA_FALSE; /* Channel count out of range. */ + } + + /* A channel cannot be present in the channel map more than once. */ + for (iChannel = 0; iChannel < channels; ++iChannel) { + ma_uint32 jChannel; + for (jChannel = iChannel + 1; jChannel < channels; ++jChannel) { + if (pChannelMap[iChannel] == pChannelMap[jChannel]) { + return MA_FALSE; + } + } + } + } + + return MA_TRUE; +} + + +static ma_bool32 ma_context_is_backend_asynchronous(ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + + if (pContext->callbacks.onDeviceRead == NULL && pContext->callbacks.onDeviceWrite == NULL) { + if (pContext->callbacks.onDeviceDataLoop == NULL) { + return MA_TRUE; + } else { + return MA_FALSE; + } + } else { + return MA_FALSE; + } +} + + +static ma_result ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType) +{ + ma_result result; + + MA_ASSERT(pDevice != NULL); + + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { + if (pDevice->capture.format == ma_format_unknown) { + pDevice->capture.format = pDevice->capture.internalFormat; + } + if (pDevice->capture.channels == 0) { + pDevice->capture.channels = pDevice->capture.internalChannels; + } + if (pDevice->capture.channelMap[0] == MA_CHANNEL_NONE) { + MA_ASSERT(pDevice->capture.channels <= MA_MAX_CHANNELS); + if (pDevice->capture.internalChannels == pDevice->capture.channels) { + ma_channel_map_copy(pDevice->capture.channelMap, pDevice->capture.internalChannelMap, pDevice->capture.channels); + } else { + if (pDevice->capture.channelMixMode == ma_channel_mix_mode_simple) { + ma_channel_map_init_blank(pDevice->capture.channelMap, pDevice->capture.channels); + } else { + ma_channel_map_init_standard(ma_standard_channel_map_default, pDevice->capture.channelMap, ma_countof(pDevice->capture.channelMap), pDevice->capture.channels); + } + } + } + } + + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + if (pDevice->playback.format == ma_format_unknown) { + pDevice->playback.format = pDevice->playback.internalFormat; + } + if (pDevice->playback.channels == 0) { + pDevice->playback.channels = pDevice->playback.internalChannels; + } + if (pDevice->playback.channelMap[0] == MA_CHANNEL_NONE) { + MA_ASSERT(pDevice->playback.channels <= MA_MAX_CHANNELS); + if (pDevice->playback.internalChannels == pDevice->playback.channels) { + ma_channel_map_copy(pDevice->playback.channelMap, pDevice->playback.internalChannelMap, pDevice->playback.channels); + } else { + if (pDevice->playback.channelMixMode == ma_channel_mix_mode_simple) { + ma_channel_map_init_blank(pDevice->playback.channelMap, pDevice->playback.channels); + } else { + ma_channel_map_init_standard(ma_standard_channel_map_default, pDevice->playback.channelMap, ma_countof(pDevice->playback.channelMap), pDevice->playback.channels); + } + } + } + } + + if (pDevice->sampleRate == 0) { + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { + pDevice->sampleRate = pDevice->capture.internalSampleRate; + } else { + pDevice->sampleRate = pDevice->playback.internalSampleRate; + } + } + + /* Data converters. */ + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { + /* Converting from internal device format to client format. */ + ma_data_converter_config converterConfig = ma_data_converter_config_init_default(); + converterConfig.formatIn = pDevice->capture.internalFormat; + converterConfig.channelsIn = pDevice->capture.internalChannels; + converterConfig.sampleRateIn = pDevice->capture.internalSampleRate; + converterConfig.pChannelMapIn = pDevice->capture.internalChannelMap; + converterConfig.formatOut = pDevice->capture.format; + converterConfig.channelsOut = pDevice->capture.channels; + converterConfig.sampleRateOut = pDevice->sampleRate; + converterConfig.pChannelMapOut = pDevice->capture.channelMap; + converterConfig.channelMixMode = pDevice->capture.channelMixMode; + converterConfig.calculateLFEFromSpatialChannels = pDevice->capture.calculateLFEFromSpatialChannels; + converterConfig.allowDynamicSampleRate = MA_FALSE; + converterConfig.resampling.algorithm = pDevice->resampling.algorithm; + converterConfig.resampling.linear.lpfOrder = pDevice->resampling.linear.lpfOrder; + converterConfig.resampling.pBackendVTable = pDevice->resampling.pBackendVTable; + converterConfig.resampling.pBackendUserData = pDevice->resampling.pBackendUserData; + + /* Make sure the old converter is uninitialized first. */ + if (ma_device_get_state(pDevice) != ma_device_state_uninitialized) { + ma_data_converter_uninit(&pDevice->capture.converter, &pDevice->pContext->allocationCallbacks); + } + + result = ma_data_converter_init(&converterConfig, &pDevice->pContext->allocationCallbacks, &pDevice->capture.converter); + if (result != MA_SUCCESS) { + return result; + } + } + + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + /* Converting from client format to device format. */ + ma_data_converter_config converterConfig = ma_data_converter_config_init_default(); + converterConfig.formatIn = pDevice->playback.format; + converterConfig.channelsIn = pDevice->playback.channels; + converterConfig.sampleRateIn = pDevice->sampleRate; + converterConfig.pChannelMapIn = pDevice->playback.channelMap; + converterConfig.formatOut = pDevice->playback.internalFormat; + converterConfig.channelsOut = pDevice->playback.internalChannels; + converterConfig.sampleRateOut = pDevice->playback.internalSampleRate; + converterConfig.pChannelMapOut = pDevice->playback.internalChannelMap; + converterConfig.channelMixMode = pDevice->playback.channelMixMode; + converterConfig.calculateLFEFromSpatialChannels = pDevice->playback.calculateLFEFromSpatialChannels; + converterConfig.allowDynamicSampleRate = MA_FALSE; + converterConfig.resampling.algorithm = pDevice->resampling.algorithm; + converterConfig.resampling.linear.lpfOrder = pDevice->resampling.linear.lpfOrder; + converterConfig.resampling.pBackendVTable = pDevice->resampling.pBackendVTable; + converterConfig.resampling.pBackendUserData = pDevice->resampling.pBackendUserData; + + /* Make sure the old converter is uninitialized first. */ + if (ma_device_get_state(pDevice) != ma_device_state_uninitialized) { + ma_data_converter_uninit(&pDevice->playback.converter, &pDevice->pContext->allocationCallbacks); + } + + result = ma_data_converter_init(&converterConfig, &pDevice->pContext->allocationCallbacks, &pDevice->playback.converter); + if (result != MA_SUCCESS) { + return result; + } + } + + + /* + If the device is doing playback (ma_device_type_playback or ma_device_type_duplex), there's + a couple of situations where we'll need a heap allocated cache. + + The first is a duplex device for backends that use a callback for data delivery. The reason + this is needed is that the input stage needs to have a buffer to place the input data while it + waits for the playback stage, after which the miniaudio data callback will get fired. This is + not needed for backends that use a blocking API because miniaudio manages temporary buffers on + the stack to achieve this. + + The other situation is when the data converter does not have the ability to query the number + of input frames that are required in order to process a given number of output frames. When + performing data conversion, it's useful if miniaudio know exactly how many frames it needs + from the client in order to generate a given number of output frames. This way, only exactly + the number of frames are needed to be read from the client which means no cache is necessary. + On the other hand, if miniaudio doesn't know how many frames to read, it is forced to read + in fixed sized chunks and then cache any residual unused input frames, those of which will be + processed at a later stage. + */ + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + ma_uint64 unused; + + pDevice->playback.inputCacheConsumed = 0; + pDevice->playback.inputCacheRemaining = 0; + + if (pDevice->type == ma_device_type_duplex || /* Duplex. backend may decide to use ma_device_handle_backend_data_callback() which will require this cache. */ + ma_data_converter_get_required_input_frame_count(&pDevice->playback.converter, 1, &unused) != MA_SUCCESS) /* Data conversion required input frame calculation not supported. */ + { + /* We need a heap allocated cache. We want to size this based on the period size. */ + void* pNewInputCache; + ma_uint64 newInputCacheCap; + ma_uint64 newInputCacheSizeInBytes; + + newInputCacheCap = ma_calculate_frame_count_after_resampling(pDevice->playback.internalSampleRate, pDevice->sampleRate, pDevice->playback.internalPeriodSizeInFrames); + + newInputCacheSizeInBytes = newInputCacheCap * ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + if (newInputCacheSizeInBytes > MA_SIZE_MAX) { + ma_free(pDevice->playback.pInputCache, &pDevice->pContext->allocationCallbacks); + pDevice->playback.pInputCache = NULL; + pDevice->playback.inputCacheCap = 0; + return MA_OUT_OF_MEMORY; /* Allocation too big. Should never hit this, but makes the cast below safer for 32-bit builds. */ + } + + pNewInputCache = ma_realloc(pDevice->playback.pInputCache, (size_t)newInputCacheSizeInBytes, &pDevice->pContext->allocationCallbacks); + if (pNewInputCache == NULL) { + ma_free(pDevice->playback.pInputCache, &pDevice->pContext->allocationCallbacks); + pDevice->playback.pInputCache = NULL; + pDevice->playback.inputCacheCap = 0; + return MA_OUT_OF_MEMORY; + } + + pDevice->playback.pInputCache = pNewInputCache; + pDevice->playback.inputCacheCap = newInputCacheCap; + } else { + /* Heap allocation not required. Make sure we clear out the old cache just in case this function was called in response to a route change. */ + ma_free(pDevice->playback.pInputCache, &pDevice->pContext->allocationCallbacks); + pDevice->playback.pInputCache = NULL; + pDevice->playback.inputCacheCap = 0; + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_device_post_init(ma_device* pDevice, ma_device_type deviceType, const ma_device_descriptor* pDescriptorPlayback, const ma_device_descriptor* pDescriptorCapture) +{ + ma_result result; + + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + /* Capture. */ + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { + if (ma_device_descriptor_is_valid(pDescriptorCapture) == MA_FALSE) { + return MA_INVALID_ARGS; + } + + pDevice->capture.internalFormat = pDescriptorCapture->format; + pDevice->capture.internalChannels = pDescriptorCapture->channels; + pDevice->capture.internalSampleRate = pDescriptorCapture->sampleRate; + MA_COPY_MEMORY(pDevice->capture.internalChannelMap, pDescriptorCapture->channelMap, sizeof(pDescriptorCapture->channelMap)); + pDevice->capture.internalPeriodSizeInFrames = pDescriptorCapture->periodSizeInFrames; + pDevice->capture.internalPeriods = pDescriptorCapture->periodCount; + + if (pDevice->capture.internalPeriodSizeInFrames == 0) { + pDevice->capture.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pDescriptorCapture->periodSizeInMilliseconds, pDescriptorCapture->sampleRate); + } + } + + /* Playback. */ + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + if (ma_device_descriptor_is_valid(pDescriptorPlayback) == MA_FALSE) { + return MA_INVALID_ARGS; + } + + pDevice->playback.internalFormat = pDescriptorPlayback->format; + pDevice->playback.internalChannels = pDescriptorPlayback->channels; + pDevice->playback.internalSampleRate = pDescriptorPlayback->sampleRate; + MA_COPY_MEMORY(pDevice->playback.internalChannelMap, pDescriptorPlayback->channelMap, sizeof(pDescriptorPlayback->channelMap)); + pDevice->playback.internalPeriodSizeInFrames = pDescriptorPlayback->periodSizeInFrames; + pDevice->playback.internalPeriods = pDescriptorPlayback->periodCount; + + if (pDevice->playback.internalPeriodSizeInFrames == 0) { + pDevice->playback.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pDescriptorPlayback->periodSizeInMilliseconds, pDescriptorPlayback->sampleRate); + } + } + + /* + The name of the device can be retrieved from device info. This may be temporary and replaced with a `ma_device_get_info(pDevice, deviceType)` instead. + For loopback devices, we need to retrieve the name of the playback device. + */ + { + ma_device_info deviceInfo; + + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { + result = ma_device_get_info(pDevice, ma_device_type_capture, &deviceInfo); + if (result == MA_SUCCESS) { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), deviceInfo.name, (size_t)-1); + } else { + /* We failed to retrieve the device info. Fall back to a default name. */ + if (pDescriptorCapture->pDeviceID == NULL) { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), "Capture Device", (size_t)-1); + } + } + } + + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + result = ma_device_get_info(pDevice, ma_device_type_playback, &deviceInfo); + if (result == MA_SUCCESS) { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), deviceInfo.name, (size_t)-1); + } else { + /* We failed to retrieve the device info. Fall back to a default name. */ + if (pDescriptorPlayback->pDeviceID == NULL) { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), "Playback Device", (size_t)-1); + } + } + } + } + + /* Update data conversion. */ + return ma_device__post_init_setup(pDevice, deviceType); /* TODO: Should probably rename ma_device__post_init_setup() to something better. */ +} + + +static ma_thread_result MA_THREADCALL ma_worker_thread(void* pData) +{ + ma_device* pDevice = (ma_device*)pData; +#ifdef MA_WIN32 + HRESULT CoInitializeResult; +#endif + + MA_ASSERT(pDevice != NULL); + +#ifdef MA_WIN32 + CoInitializeResult = ma_CoInitializeEx(pDevice->pContext, NULL, MA_COINIT_VALUE); +#endif + + /* + When the device is being initialized its initial state is set to ma_device_state_uninitialized. Before returning from + ma_device_init(), the state needs to be set to something valid. In miniaudio the device's default state immediately + after initialization is stopped, so therefore we need to mark the device as such. miniaudio will wait on the worker + thread to signal an event to know when the worker thread is ready for action. + */ + ma_device__set_state(pDevice, ma_device_state_stopped); + ma_event_signal(&pDevice->stopEvent); + + for (;;) { /* <-- This loop just keeps the thread alive. The main audio loop is inside. */ + ma_result startResult; + ma_result stopResult; /* <-- This will store the result from onDeviceStop(). If it returns an error, we don't fire the stopped notification callback. */ + + /* We wait on an event to know when something has requested that the device be started and the main loop entered. */ + ma_event_wait(&pDevice->wakeupEvent); + + /* Default result code. */ + pDevice->workResult = MA_SUCCESS; + + /* If the reason for the wake up is that we are terminating, just break from the loop. */ + if (ma_device_get_state(pDevice) == ma_device_state_uninitialized) { + break; + } + + /* + Getting to this point means the device is wanting to get started. The function that has requested that the device + be started will be waiting on an event (pDevice->startEvent) which means we need to make sure we signal the event + in both the success and error case. It's important that the state of the device is set _before_ signaling the event. + */ + MA_ASSERT(ma_device_get_state(pDevice) == ma_device_state_starting); + + /* If the device has a start callback, start it now. */ + if (pDevice->pContext->callbacks.onDeviceStart != NULL) { + startResult = pDevice->pContext->callbacks.onDeviceStart(pDevice); + } else { + startResult = MA_SUCCESS; + } + + /* + If starting was not successful we'll need to loop back to the start and wait for something + to happen (pDevice->wakeupEvent). + */ + if (startResult != MA_SUCCESS) { + pDevice->workResult = startResult; + ma_event_signal(&pDevice->startEvent); /* <-- Always signal the start event so ma_device_start() can return as it'll be waiting on it. */ + continue; + } + + /* Make sure the state is set appropriately. */ + ma_device__set_state(pDevice, ma_device_state_started); /* <-- Set this before signaling the event so that the state is always guaranteed to be good after ma_device_start() has returned. */ + ma_event_signal(&pDevice->startEvent); + + ma_device__on_notification_started(pDevice); + + if (pDevice->pContext->callbacks.onDeviceDataLoop != NULL) { + pDevice->pContext->callbacks.onDeviceDataLoop(pDevice); + } else { + /* The backend is not using a custom main loop implementation, so now fall back to the blocking read-write implementation. */ + ma_device_audio_thread__default_read_write(pDevice); + } + + /* Getting here means we have broken from the main loop which happens the application has requested that device be stopped. */ + if (pDevice->pContext->callbacks.onDeviceStop != NULL) { + stopResult = pDevice->pContext->callbacks.onDeviceStop(pDevice); + } else { + stopResult = MA_SUCCESS; /* No stop callback with the backend. Just assume successful. */ + } + + /* + After the device has stopped, make sure an event is posted. Don't post a stopped event if + stopping failed. This can happen on some backends when the underlying stream has been + stopped due to the device being physically unplugged or disabled via an OS setting. + */ + if (stopResult == MA_SUCCESS) { + ma_device__on_notification_stopped(pDevice); + } + + /* If we stopped because the device has been uninitialized, abort now. */ + if (ma_device_get_state(pDevice) == ma_device_state_uninitialized) { + break; + } + + /* A function somewhere is waiting for the device to have stopped for real so we need to signal an event to allow it to continue. */ + ma_device__set_state(pDevice, ma_device_state_stopped); + ma_event_signal(&pDevice->stopEvent); + } + +#ifdef MA_WIN32 + if (CoInitializeResult == S_OK) { + ma_CoUninitialize(pDevice->pContext); + } +#endif + + return (ma_thread_result)0; +} + + +/* Helper for determining whether or not the given device is initialized. */ +static ma_bool32 ma_device__is_initialized(ma_device* pDevice) +{ + if (pDevice == NULL) { + return MA_FALSE; + } + + return ma_device_get_state(pDevice) != ma_device_state_uninitialized; +} + + +#ifdef MA_WIN32 +static ma_result ma_context_uninit_backend_apis__win32(ma_context* pContext) +{ + /* For some reason UWP complains when CoUninitialize() is called. I'm just not going to call it on UWP. */ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + if (pContext->win32.CoInitializeResult == S_OK) { + ma_CoUninitialize(pContext); + } + + #if defined(MA_WIN32_DESKTOP) + ma_dlclose(ma_context_get_log(pContext), pContext->win32.hUser32DLL); + ma_dlclose(ma_context_get_log(pContext), pContext->win32.hAdvapi32DLL); + #endif + + ma_dlclose(ma_context_get_log(pContext), pContext->win32.hOle32DLL); +#else + (void)pContext; +#endif + + return MA_SUCCESS; +} + +static ma_result ma_context_init_backend_apis__win32(ma_context* pContext) +{ +#if defined(MA_WIN32_DESKTOP) || defined(MA_WIN32_GDK) + #if defined(MA_WIN32_DESKTOP) + /* User32.dll */ + pContext->win32.hUser32DLL = ma_dlopen(ma_context_get_log(pContext), "user32.dll"); + if (pContext->win32.hUser32DLL == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pContext->win32.GetForegroundWindow = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hUser32DLL, "GetForegroundWindow"); + pContext->win32.GetDesktopWindow = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hUser32DLL, "GetDesktopWindow"); + + + /* Advapi32.dll */ + pContext->win32.hAdvapi32DLL = ma_dlopen(ma_context_get_log(pContext), "advapi32.dll"); + if (pContext->win32.hAdvapi32DLL == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pContext->win32.RegOpenKeyExA = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hAdvapi32DLL, "RegOpenKeyExA"); + pContext->win32.RegCloseKey = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hAdvapi32DLL, "RegCloseKey"); + pContext->win32.RegQueryValueExA = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hAdvapi32DLL, "RegQueryValueExA"); + #endif + + /* Ole32.dll */ + pContext->win32.hOle32DLL = ma_dlopen(ma_context_get_log(pContext), "ole32.dll"); + if (pContext->win32.hOle32DLL == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } + + pContext->win32.CoInitialize = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hOle32DLL, "CoInitialize"); + pContext->win32.CoInitializeEx = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hOle32DLL, "CoInitializeEx"); + pContext->win32.CoUninitialize = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hOle32DLL, "CoUninitialize"); + pContext->win32.CoCreateInstance = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hOle32DLL, "CoCreateInstance"); + pContext->win32.CoTaskMemFree = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hOle32DLL, "CoTaskMemFree"); + pContext->win32.PropVariantClear = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hOle32DLL, "PropVariantClear"); + pContext->win32.StringFromGUID2 = (ma_proc)ma_dlsym(ma_context_get_log(pContext), pContext->win32.hOle32DLL, "StringFromGUID2"); +#else + (void)pContext; /* Unused. */ +#endif + + pContext->win32.CoInitializeResult = ma_CoInitializeEx(pContext, NULL, MA_COINIT_VALUE); + return MA_SUCCESS; +} +#else +static ma_result ma_context_uninit_backend_apis__nix(ma_context* pContext) +{ + (void)pContext; + + return MA_SUCCESS; +} + +static ma_result ma_context_init_backend_apis__nix(ma_context* pContext) +{ + (void)pContext; + + return MA_SUCCESS; +} +#endif + +static ma_result ma_context_init_backend_apis(ma_context* pContext) +{ + ma_result result; +#ifdef MA_WIN32 + result = ma_context_init_backend_apis__win32(pContext); +#else + result = ma_context_init_backend_apis__nix(pContext); +#endif + + return result; +} + +static ma_result ma_context_uninit_backend_apis(ma_context* pContext) +{ + ma_result result; +#ifdef MA_WIN32 + result = ma_context_uninit_backend_apis__win32(pContext); +#else + result = ma_context_uninit_backend_apis__nix(pContext); +#endif + + return result; +} + + +/* The default capacity doesn't need to be too big. */ +#ifndef MA_DEFAULT_DEVICE_JOB_QUEUE_CAPACITY +#define MA_DEFAULT_DEVICE_JOB_QUEUE_CAPACITY 32 +#endif + +MA_API ma_device_job_thread_config ma_device_job_thread_config_init(void) +{ + ma_device_job_thread_config config; + + MA_ZERO_OBJECT(&config); + config.noThread = MA_FALSE; + config.jobQueueCapacity = MA_DEFAULT_DEVICE_JOB_QUEUE_CAPACITY; + config.jobQueueFlags = 0; + + return config; +} + + +static ma_thread_result MA_THREADCALL ma_device_job_thread_entry(void* pUserData) +{ + ma_device_job_thread* pJobThread = (ma_device_job_thread*)pUserData; + MA_ASSERT(pJobThread != NULL); + + for (;;) { + ma_result result; + ma_job job; + + result = ma_device_job_thread_next(pJobThread, &job); + if (result != MA_SUCCESS) { + break; + } + + if (job.toc.breakup.code == MA_JOB_TYPE_QUIT) { + break; + } + + ma_job_process(&job); + } + + return (ma_thread_result)0; +} + +MA_API ma_result ma_device_job_thread_init(const ma_device_job_thread_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_device_job_thread* pJobThread) +{ + ma_result result; + ma_job_queue_config jobQueueConfig; + + if (pJobThread == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pJobThread); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + + /* Initialize the job queue before the thread to ensure it's in a valid state. */ + jobQueueConfig = ma_job_queue_config_init(pConfig->jobQueueFlags, pConfig->jobQueueCapacity); + + result = ma_job_queue_init(&jobQueueConfig, pAllocationCallbacks, &pJobThread->jobQueue); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize job queue. */ + } + + + /* The thread needs to be initialized after the job queue to ensure the thread doesn't try to access it prematurely. */ + if (pConfig->noThread == MA_FALSE) { + result = ma_thread_create(&pJobThread->thread, ma_thread_priority_normal, 0, ma_device_job_thread_entry, pJobThread, pAllocationCallbacks); + if (result != MA_SUCCESS) { + ma_job_queue_uninit(&pJobThread->jobQueue, pAllocationCallbacks); + return result; /* Failed to create the job thread. */ + } + + pJobThread->_hasThread = MA_TRUE; + } else { + pJobThread->_hasThread = MA_FALSE; + } + + + return MA_SUCCESS; +} + +MA_API void ma_device_job_thread_uninit(ma_device_job_thread* pJobThread, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pJobThread == NULL) { + return; + } + + /* The first thing to do is post a quit message to the job queue. If we're using a thread we'll need to wait for it. */ + { + ma_job job = ma_job_init(MA_JOB_TYPE_QUIT); + ma_device_job_thread_post(pJobThread, &job); + } + + /* Wait for the thread to terminate naturally. */ + if (pJobThread->_hasThread) { + ma_thread_wait(&pJobThread->thread); + } + + /* At this point the thread should be terminated so we can safely uninitialize the job queue. */ + ma_job_queue_uninit(&pJobThread->jobQueue, pAllocationCallbacks); +} + +MA_API ma_result ma_device_job_thread_post(ma_device_job_thread* pJobThread, const ma_job* pJob) +{ + if (pJobThread == NULL || pJob == NULL) { + return MA_INVALID_ARGS; + } + + return ma_job_queue_post(&pJobThread->jobQueue, pJob); +} + +MA_API ma_result ma_device_job_thread_next(ma_device_job_thread* pJobThread, ma_job* pJob) +{ + if (pJob == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pJob); + + if (pJobThread == NULL) { + return MA_INVALID_ARGS; + } + + return ma_job_queue_next(&pJobThread->jobQueue, pJob); +} + + +MA_API ma_bool32 ma_device_id_equal(const ma_device_id* pA, const ma_device_id* pB) +{ + size_t i; + + if (pA == NULL || pB == NULL) { + return MA_FALSE; + } + + for (i = 0; i < sizeof(ma_device_id); i += 1) { + if (((const char*)pA)[i] != ((const char*)pB)[i]) { + return MA_FALSE; + } + } + + return MA_TRUE; +} + + + +MA_API ma_context_config ma_context_config_init(void) +{ + ma_context_config config; + MA_ZERO_OBJECT(&config); + + return config; +} + +MA_API ma_result ma_context_init(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pConfig, ma_context* pContext) +{ + ma_result result; + ma_context_config defaultConfig; + ma_backend defaultBackends[ma_backend_null+1]; + ma_uint32 iBackend; + ma_backend* pBackendsToIterate; + ma_uint32 backendsToIterateCount; + + if (pContext == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pContext); + + /* Always make sure the config is set first to ensure properties are available as soon as possible. */ + if (pConfig == NULL) { + defaultConfig = ma_context_config_init(); + pConfig = &defaultConfig; + } + + /* Allocation callbacks need to come first because they'll be passed around to other areas. */ + result = ma_allocation_callbacks_init_copy(&pContext->allocationCallbacks, &pConfig->allocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + + /* Get a lot set up first so we can start logging ASAP. */ + if (pConfig->pLog != NULL) { + pContext->pLog = pConfig->pLog; + } else { + result = ma_log_init(&pContext->allocationCallbacks, &pContext->log); + if (result == MA_SUCCESS) { + pContext->pLog = &pContext->log; + } else { + pContext->pLog = NULL; /* Logging is not available. */ + } + } + + pContext->threadPriority = pConfig->threadPriority; + pContext->threadStackSize = pConfig->threadStackSize; + pContext->pUserData = pConfig->pUserData; + + /* Backend APIs need to be initialized first. This is where external libraries will be loaded and linked. */ + result = ma_context_init_backend_apis(pContext); + if (result != MA_SUCCESS) { + return result; + } + + for (iBackend = 0; iBackend <= ma_backend_null; ++iBackend) { + defaultBackends[iBackend] = (ma_backend)iBackend; + } + + pBackendsToIterate = (ma_backend*)backends; + backendsToIterateCount = backendCount; + if (pBackendsToIterate == NULL) { + pBackendsToIterate = (ma_backend*)defaultBackends; + backendsToIterateCount = ma_countof(defaultBackends); + } + + MA_ASSERT(pBackendsToIterate != NULL); + + for (iBackend = 0; iBackend < backendsToIterateCount; iBackend += 1) { + ma_backend backend = pBackendsToIterate[iBackend]; + + /* Make sure all callbacks are reset so we don't accidentally drag in any from previously failed initialization attempts. */ + MA_ZERO_OBJECT(&pContext->callbacks); + + /* These backends are using the new callback system. */ + switch (backend) { + #ifdef MA_HAS_WASAPI + case ma_backend_wasapi: + { + pContext->callbacks.onContextInit = ma_context_init__wasapi; + } break; + #endif + #ifdef MA_HAS_DSOUND + case ma_backend_dsound: + { + pContext->callbacks.onContextInit = ma_context_init__dsound; + } break; + #endif + #ifdef MA_HAS_WINMM + case ma_backend_winmm: + { + pContext->callbacks.onContextInit = ma_context_init__winmm; + } break; + #endif + #ifdef MA_HAS_COREAUDIO + case ma_backend_coreaudio: + { + pContext->callbacks.onContextInit = ma_context_init__coreaudio; + } break; + #endif + #ifdef MA_HAS_SNDIO + case ma_backend_sndio: + { + pContext->callbacks.onContextInit = ma_context_init__sndio; + } break; + #endif + #ifdef MA_HAS_AUDIO4 + case ma_backend_audio4: + { + pContext->callbacks.onContextInit = ma_context_init__audio4; + } break; + #endif + #ifdef MA_HAS_OSS + case ma_backend_oss: + { + pContext->callbacks.onContextInit = ma_context_init__oss; + } break; + #endif + #ifdef MA_HAS_PULSEAUDIO + case ma_backend_pulseaudio: + { + pContext->callbacks.onContextInit = ma_context_init__pulse; + } break; + #endif + #ifdef MA_HAS_ALSA + case ma_backend_alsa: + { + pContext->callbacks.onContextInit = ma_context_init__alsa; + } break; + #endif + #ifdef MA_HAS_JACK + case ma_backend_jack: + { + pContext->callbacks.onContextInit = ma_context_init__jack; + } break; + #endif + #ifdef MA_HAS_AAUDIO + case ma_backend_aaudio: + { + if (ma_is_backend_enabled(backend)) { + pContext->callbacks.onContextInit = ma_context_init__aaudio; + } + } break; + #endif + #ifdef MA_HAS_OPENSL + case ma_backend_opensl: + { + if (ma_is_backend_enabled(backend)) { + pContext->callbacks.onContextInit = ma_context_init__opensl; + } + } break; + #endif + #ifdef MA_HAS_WEBAUDIO + case ma_backend_webaudio: + { + pContext->callbacks.onContextInit = ma_context_init__webaudio; + } break; + #endif + #ifdef MA_HAS_CUSTOM + case ma_backend_custom: + { + /* Slightly different logic for custom backends. Custom backends can optionally set all of their callbacks in the config. */ + pContext->callbacks = pConfig->custom; + } break; + #endif + #ifdef MA_HAS_NULL + case ma_backend_null: + { + pContext->callbacks.onContextInit = ma_context_init__null; + } break; + #endif + + default: break; + } + + if (pContext->callbacks.onContextInit != NULL) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "Attempting to initialize %s backend...\n", ma_get_backend_name(backend)); + result = pContext->callbacks.onContextInit(pContext, pConfig, &pContext->callbacks); + } else { + /* Getting here means the onContextInit callback is not set which means the backend is not enabled. Special case for the custom backend. */ + if (backend != ma_backend_custom) { + result = MA_BACKEND_NOT_ENABLED; + } else { + #if !defined(MA_HAS_CUSTOM) + result = MA_BACKEND_NOT_ENABLED; + #else + result = MA_NO_BACKEND; + #endif + } + } + + /* If this iteration was successful, return. */ + if (result == MA_SUCCESS) { + result = ma_mutex_init(&pContext->deviceEnumLock); + if (result != MA_SUCCESS) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device enumeration. ma_context_get_devices() is not thread safe.\n"); + } + + result = ma_mutex_init(&pContext->deviceInfoLock); + if (result != MA_SUCCESS) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device info retrieval. ma_context_get_device_info() is not thread safe.\n"); + } + + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "System Architecture:\n"); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " Endian: %s\n", ma_is_little_endian() ? "LE" : "BE"); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " SSE2: %s\n", ma_has_sse2() ? "YES" : "NO"); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " AVX2: %s\n", ma_has_avx2() ? "YES" : "NO"); + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, " NEON: %s\n", ma_has_neon() ? "YES" : "NO"); + + pContext->backend = backend; + return result; + } else { + if (result == MA_BACKEND_NOT_ENABLED) { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "%s backend is disabled.\n", ma_get_backend_name(backend)); + } else { + ma_log_postf(ma_context_get_log(pContext), MA_LOG_LEVEL_DEBUG, "Failed to initialize %s backend.\n", ma_get_backend_name(backend)); + } + } + } + + /* If we get here it means an error occurred. */ + MA_ZERO_OBJECT(pContext); /* Safety. */ + return MA_NO_BACKEND; +} + +MA_API ma_result ma_context_uninit(ma_context* pContext) +{ + if (pContext == NULL) { + return MA_INVALID_ARGS; + } + + if (pContext->callbacks.onContextUninit != NULL) { + pContext->callbacks.onContextUninit(pContext); + } + + ma_mutex_uninit(&pContext->deviceEnumLock); + ma_mutex_uninit(&pContext->deviceInfoLock); + ma_free(pContext->pDeviceInfos, &pContext->allocationCallbacks); + ma_context_uninit_backend_apis(pContext); + + if (pContext->pLog == &pContext->log) { + ma_log_uninit(&pContext->log); + } + + return MA_SUCCESS; +} + +MA_API size_t ma_context_sizeof(void) +{ + return sizeof(ma_context); +} + + +MA_API ma_log* ma_context_get_log(ma_context* pContext) +{ + if (pContext == NULL) { + return NULL; + } + + return pContext->pLog; +} + + +MA_API ma_result ma_context_enumerate_devices(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_result result; + + if (pContext == NULL || callback == NULL) { + return MA_INVALID_ARGS; + } + + if (pContext->callbacks.onContextEnumerateDevices == NULL) { + return MA_INVALID_OPERATION; + } + + ma_mutex_lock(&pContext->deviceEnumLock); + { + result = pContext->callbacks.onContextEnumerateDevices(pContext, callback, pUserData); + } + ma_mutex_unlock(&pContext->deviceEnumLock); + + return result; +} + + +static ma_bool32 ma_context_get_devices__enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData) +{ + /* + We need to insert the device info into our main internal buffer. Where it goes depends on the device type. If it's a capture device + it's just appended to the end. If it's a playback device it's inserted just before the first capture device. + */ + + /* + First make sure we have room. Since the number of devices we add to the list is usually relatively small I've decided to use a + simple fixed size increment for buffer expansion. + */ + const ma_uint32 bufferExpansionCount = 2; + const ma_uint32 totalDeviceInfoCount = pContext->playbackDeviceInfoCount + pContext->captureDeviceInfoCount; + + if (totalDeviceInfoCount >= pContext->deviceInfoCapacity) { + ma_uint32 newCapacity = pContext->deviceInfoCapacity + bufferExpansionCount; + ma_device_info* pNewInfos = (ma_device_info*)ma_realloc(pContext->pDeviceInfos, sizeof(*pContext->pDeviceInfos)*newCapacity, &pContext->allocationCallbacks); + if (pNewInfos == NULL) { + return MA_FALSE; /* Out of memory. */ + } + + pContext->pDeviceInfos = pNewInfos; + pContext->deviceInfoCapacity = newCapacity; + } + + if (deviceType == ma_device_type_playback) { + /* Playback. Insert just before the first capture device. */ + + /* The first thing to do is move all of the capture devices down a slot. */ + ma_uint32 iFirstCaptureDevice = pContext->playbackDeviceInfoCount; + size_t iCaptureDevice; + for (iCaptureDevice = totalDeviceInfoCount; iCaptureDevice > iFirstCaptureDevice; --iCaptureDevice) { + pContext->pDeviceInfos[iCaptureDevice] = pContext->pDeviceInfos[iCaptureDevice-1]; + } + + /* Now just insert where the first capture device was before moving it down a slot. */ + pContext->pDeviceInfos[iFirstCaptureDevice] = *pInfo; + pContext->playbackDeviceInfoCount += 1; + } else { + /* Capture. Insert at the end. */ + pContext->pDeviceInfos[totalDeviceInfoCount] = *pInfo; + pContext->captureDeviceInfoCount += 1; + } + + (void)pUserData; + return MA_TRUE; +} + +MA_API ma_result ma_context_get_devices(ma_context* pContext, ma_device_info** ppPlaybackDeviceInfos, ma_uint32* pPlaybackDeviceCount, ma_device_info** ppCaptureDeviceInfos, ma_uint32* pCaptureDeviceCount) +{ + ma_result result; + + /* Safety. */ + if (ppPlaybackDeviceInfos != NULL) *ppPlaybackDeviceInfos = NULL; + if (pPlaybackDeviceCount != NULL) *pPlaybackDeviceCount = 0; + if (ppCaptureDeviceInfos != NULL) *ppCaptureDeviceInfos = NULL; + if (pCaptureDeviceCount != NULL) *pCaptureDeviceCount = 0; + + if (pContext == NULL) { + return MA_INVALID_ARGS; + } + + if (pContext->callbacks.onContextEnumerateDevices == NULL) { + return MA_INVALID_OPERATION; + } + + /* Note that we don't use ma_context_enumerate_devices() here because we want to do locking at a higher level. */ + ma_mutex_lock(&pContext->deviceEnumLock); + { + /* Reset everything first. */ + pContext->playbackDeviceInfoCount = 0; + pContext->captureDeviceInfoCount = 0; + + /* Now enumerate over available devices. */ + result = pContext->callbacks.onContextEnumerateDevices(pContext, ma_context_get_devices__enum_callback, NULL); + if (result == MA_SUCCESS) { + /* Playback devices. */ + if (ppPlaybackDeviceInfos != NULL) { + *ppPlaybackDeviceInfos = pContext->pDeviceInfos; + } + if (pPlaybackDeviceCount != NULL) { + *pPlaybackDeviceCount = pContext->playbackDeviceInfoCount; + } + + /* Capture devices. */ + if (ppCaptureDeviceInfos != NULL) { + *ppCaptureDeviceInfos = pContext->pDeviceInfos; + /* Capture devices come after playback devices. */ + if (pContext->playbackDeviceInfoCount > 0) { + /* Conditional, because NULL+0 is undefined behavior. */ + *ppCaptureDeviceInfos += pContext->playbackDeviceInfoCount; + } + } + if (pCaptureDeviceCount != NULL) { + *pCaptureDeviceCount = pContext->captureDeviceInfoCount; + } + } + } + ma_mutex_unlock(&pContext->deviceEnumLock); + + return result; +} + +MA_API ma_result ma_context_get_device_info(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_info* pDeviceInfo) +{ + ma_result result; + ma_device_info deviceInfo; + + /* NOTE: Do not clear pDeviceInfo on entry. The reason is the pDeviceID may actually point to pDeviceInfo->id which will break things. */ + if (pContext == NULL || pDeviceInfo == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(&deviceInfo); + + /* Help the backend out by copying over the device ID if we have one. */ + if (pDeviceID != NULL) { + MA_COPY_MEMORY(&deviceInfo.id, pDeviceID, sizeof(*pDeviceID)); + } + + if (pContext->callbacks.onContextGetDeviceInfo == NULL) { + return MA_INVALID_OPERATION; + } + + ma_mutex_lock(&pContext->deviceInfoLock); + { + result = pContext->callbacks.onContextGetDeviceInfo(pContext, deviceType, pDeviceID, &deviceInfo); + } + ma_mutex_unlock(&pContext->deviceInfoLock); + + *pDeviceInfo = deviceInfo; + return result; +} + +MA_API ma_bool32 ma_context_is_loopback_supported(ma_context* pContext) +{ + if (pContext == NULL) { + return MA_FALSE; + } + + return ma_is_loopback_supported(pContext->backend); +} + + +MA_API ma_device_config ma_device_config_init(ma_device_type deviceType) +{ + ma_device_config config; + MA_ZERO_OBJECT(&config); + config.deviceType = deviceType; + config.resampling = ma_resampler_config_init(ma_format_unknown, 0, 0, 0, ma_resample_algorithm_linear); /* Format/channels/rate don't matter here. */ + + return config; +} + +MA_API ma_result ma_device_init(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result; + ma_device_descriptor descriptorPlayback; + ma_device_descriptor descriptorCapture; + + /* The context can be null, in which case we self-manage it. */ + if (pContext == NULL) { + return ma_device_init_ex(NULL, 0, NULL, pConfig, pDevice); + } + + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDevice); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Check that we have our callbacks defined. */ + if (pContext->callbacks.onDeviceInit == NULL) { + return MA_INVALID_OPERATION; + } + + /* Basic config validation. */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + if (pConfig->capture.channels > MA_MAX_CHANNELS) { + return MA_INVALID_ARGS; + } + + if (!ma__is_channel_map_valid(pConfig->capture.pChannelMap, pConfig->capture.channels)) { + return MA_INVALID_ARGS; + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) { + if (pConfig->playback.channels > MA_MAX_CHANNELS) { + return MA_INVALID_ARGS; + } + + if (!ma__is_channel_map_valid(pConfig->playback.pChannelMap, pConfig->playback.channels)) { + return MA_INVALID_ARGS; + } + } + + pDevice->pContext = pContext; + + /* Set the user data and log callback ASAP to ensure it is available for the entire initialization process. */ + pDevice->pUserData = pConfig->pUserData; + pDevice->onData = pConfig->dataCallback; + pDevice->onNotification = pConfig->notificationCallback; + pDevice->onStop = pConfig->stopCallback; + + if (pConfig->playback.pDeviceID != NULL) { + MA_COPY_MEMORY(&pDevice->playback.id, pConfig->playback.pDeviceID, sizeof(pDevice->playback.id)); + pDevice->playback.pID = &pDevice->playback.id; + } else { + pDevice->playback.pID = NULL; + } + + if (pConfig->capture.pDeviceID != NULL) { + MA_COPY_MEMORY(&pDevice->capture.id, pConfig->capture.pDeviceID, sizeof(pDevice->capture.id)); + pDevice->capture.pID = &pDevice->capture.id; + } else { + pDevice->capture.pID = NULL; + } + + pDevice->noPreSilencedOutputBuffer = pConfig->noPreSilencedOutputBuffer; + pDevice->noClip = pConfig->noClip; + pDevice->noDisableDenormals = pConfig->noDisableDenormals; + pDevice->noFixedSizedCallback = pConfig->noFixedSizedCallback; + ma_atomic_float_set(&pDevice->masterVolumeFactor, 1); + + pDevice->type = pConfig->deviceType; + pDevice->sampleRate = pConfig->sampleRate; + pDevice->resampling.algorithm = pConfig->resampling.algorithm; + pDevice->resampling.linear.lpfOrder = pConfig->resampling.linear.lpfOrder; + pDevice->resampling.pBackendVTable = pConfig->resampling.pBackendVTable; + pDevice->resampling.pBackendUserData = pConfig->resampling.pBackendUserData; + + pDevice->capture.shareMode = pConfig->capture.shareMode; + pDevice->capture.format = pConfig->capture.format; + pDevice->capture.channels = pConfig->capture.channels; + ma_channel_map_copy_or_default(pDevice->capture.channelMap, ma_countof(pDevice->capture.channelMap), pConfig->capture.pChannelMap, pConfig->capture.channels); + pDevice->capture.channelMixMode = pConfig->capture.channelMixMode; + pDevice->capture.calculateLFEFromSpatialChannels = pConfig->capture.calculateLFEFromSpatialChannels; + + pDevice->playback.shareMode = pConfig->playback.shareMode; + pDevice->playback.format = pConfig->playback.format; + pDevice->playback.channels = pConfig->playback.channels; + ma_channel_map_copy_or_default(pDevice->playback.channelMap, ma_countof(pDevice->playback.channelMap), pConfig->playback.pChannelMap, pConfig->playback.channels); + pDevice->playback.channelMixMode = pConfig->playback.channelMixMode; + pDevice->playback.calculateLFEFromSpatialChannels = pConfig->playback.calculateLFEFromSpatialChannels; + + result = ma_mutex_init(&pDevice->startStopLock); + if (result != MA_SUCCESS) { + return result; + } + + /* + When the device is started, the worker thread is the one that does the actual startup of the backend device. We + use a semaphore to wait for the background thread to finish the work. The same applies for stopping the device. + + Each of these semaphores is released internally by the worker thread when the work is completed. The start + semaphore is also used to wake up the worker thread. + */ + result = ma_event_init(&pDevice->wakeupEvent); + if (result != MA_SUCCESS) { + ma_mutex_uninit(&pDevice->startStopLock); + return result; + } + + result = ma_event_init(&pDevice->startEvent); + if (result != MA_SUCCESS) { + ma_event_uninit(&pDevice->wakeupEvent); + ma_mutex_uninit(&pDevice->startStopLock); + return result; + } + + result = ma_event_init(&pDevice->stopEvent); + if (result != MA_SUCCESS) { + ma_event_uninit(&pDevice->startEvent); + ma_event_uninit(&pDevice->wakeupEvent); + ma_mutex_uninit(&pDevice->startStopLock); + return result; + } + + + MA_ZERO_OBJECT(&descriptorPlayback); + descriptorPlayback.pDeviceID = pConfig->playback.pDeviceID; + descriptorPlayback.shareMode = pConfig->playback.shareMode; + descriptorPlayback.format = pConfig->playback.format; + descriptorPlayback.channels = pConfig->playback.channels; + descriptorPlayback.sampleRate = pConfig->sampleRate; + ma_channel_map_copy_or_default(descriptorPlayback.channelMap, ma_countof(descriptorPlayback.channelMap), pConfig->playback.pChannelMap, pConfig->playback.channels); + descriptorPlayback.periodSizeInFrames = pConfig->periodSizeInFrames; + descriptorPlayback.periodSizeInMilliseconds = pConfig->periodSizeInMilliseconds; + descriptorPlayback.periodCount = pConfig->periods; + + if (descriptorPlayback.periodCount == 0) { + descriptorPlayback.periodCount = MA_DEFAULT_PERIODS; + } + + + MA_ZERO_OBJECT(&descriptorCapture); + descriptorCapture.pDeviceID = pConfig->capture.pDeviceID; + descriptorCapture.shareMode = pConfig->capture.shareMode; + descriptorCapture.format = pConfig->capture.format; + descriptorCapture.channels = pConfig->capture.channels; + descriptorCapture.sampleRate = pConfig->sampleRate; + ma_channel_map_copy_or_default(descriptorCapture.channelMap, ma_countof(descriptorCapture.channelMap), pConfig->capture.pChannelMap, pConfig->capture.channels); + descriptorCapture.periodSizeInFrames = pConfig->periodSizeInFrames; + descriptorCapture.periodSizeInMilliseconds = pConfig->periodSizeInMilliseconds; + descriptorCapture.periodCount = pConfig->periods; + + if (descriptorCapture.periodCount == 0) { + descriptorCapture.periodCount = MA_DEFAULT_PERIODS; + } + + + result = pContext->callbacks.onDeviceInit(pDevice, pConfig, &descriptorPlayback, &descriptorCapture); + if (result != MA_SUCCESS) { + ma_event_uninit(&pDevice->startEvent); + ma_event_uninit(&pDevice->wakeupEvent); + ma_mutex_uninit(&pDevice->startStopLock); + return result; + } + +#if 0 + /* + On output the descriptors will contain the *actual* data format of the device. We need this to know how to convert the data between + the requested format and the internal format. + */ + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) { + if (!ma_device_descriptor_is_valid(&descriptorCapture)) { + ma_device_uninit(pDevice); + return MA_INVALID_ARGS; + } + + pDevice->capture.internalFormat = descriptorCapture.format; + pDevice->capture.internalChannels = descriptorCapture.channels; + pDevice->capture.internalSampleRate = descriptorCapture.sampleRate; + ma_channel_map_copy(pDevice->capture.internalChannelMap, descriptorCapture.channelMap, descriptorCapture.channels); + pDevice->capture.internalPeriodSizeInFrames = descriptorCapture.periodSizeInFrames; + pDevice->capture.internalPeriods = descriptorCapture.periodCount; + + if (pDevice->capture.internalPeriodSizeInFrames == 0) { + pDevice->capture.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(descriptorCapture.periodSizeInMilliseconds, descriptorCapture.sampleRate); + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + if (!ma_device_descriptor_is_valid(&descriptorPlayback)) { + ma_device_uninit(pDevice); + return MA_INVALID_ARGS; + } + + pDevice->playback.internalFormat = descriptorPlayback.format; + pDevice->playback.internalChannels = descriptorPlayback.channels; + pDevice->playback.internalSampleRate = descriptorPlayback.sampleRate; + ma_channel_map_copy(pDevice->playback.internalChannelMap, descriptorPlayback.channelMap, descriptorPlayback.channels); + pDevice->playback.internalPeriodSizeInFrames = descriptorPlayback.periodSizeInFrames; + pDevice->playback.internalPeriods = descriptorPlayback.periodCount; + + if (pDevice->playback.internalPeriodSizeInFrames == 0) { + pDevice->playback.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(descriptorPlayback.periodSizeInMilliseconds, descriptorPlayback.sampleRate); + } + } + + + /* + The name of the device can be retrieved from device info. This may be temporary and replaced with a `ma_device_get_info(pDevice, deviceType)` instead. + For loopback devices, we need to retrieve the name of the playback device. + */ + { + ma_device_info deviceInfo; + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) { + result = ma_device_get_info(pDevice, (pConfig->deviceType == ma_device_type_loopback) ? ma_device_type_playback : ma_device_type_capture, &deviceInfo); + if (result == MA_SUCCESS) { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), deviceInfo.name, (size_t)-1); + } else { + /* We failed to retrieve the device info. Fall back to a default name. */ + if (descriptorCapture.pDeviceID == NULL) { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), "Capture Device", (size_t)-1); + } + } + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + result = ma_device_get_info(pDevice, ma_device_type_playback, &deviceInfo); + if (result == MA_SUCCESS) { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), deviceInfo.name, (size_t)-1); + } else { + /* We failed to retrieve the device info. Fall back to a default name. */ + if (descriptorPlayback.pDeviceID == NULL) { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), "Playback Device", (size_t)-1); + } + } + } + } + + + ma_device__post_init_setup(pDevice, pConfig->deviceType); +#endif + + result = ma_device_post_init(pDevice, pConfig->deviceType, &descriptorPlayback, &descriptorCapture); + if (result != MA_SUCCESS) { + ma_device_uninit(pDevice); + return result; + } + + + /* + If we're using fixed sized callbacks we'll need to make use of an intermediary buffer. Needs to + be done after post_init_setup() because we'll need access to the sample rate. + */ + if (pConfig->noFixedSizedCallback == MA_FALSE) { + /* We're using a fixed sized data callback so we'll need an intermediary buffer. */ + ma_uint32 intermediaryBufferCap = pConfig->periodSizeInFrames; + if (intermediaryBufferCap == 0) { + intermediaryBufferCap = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->periodSizeInMilliseconds, pDevice->sampleRate); + } + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex || pConfig->deviceType == ma_device_type_loopback) { + ma_uint32 intermediaryBufferSizeInBytes; + + pDevice->capture.intermediaryBufferLen = 0; + pDevice->capture.intermediaryBufferCap = intermediaryBufferCap; + if (pDevice->capture.intermediaryBufferCap == 0) { + pDevice->capture.intermediaryBufferCap = pDevice->capture.internalPeriodSizeInFrames; + } + + intermediaryBufferSizeInBytes = pDevice->capture.intermediaryBufferCap * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + + pDevice->capture.pIntermediaryBuffer = ma_malloc((size_t)intermediaryBufferSizeInBytes, &pContext->allocationCallbacks); + if (pDevice->capture.pIntermediaryBuffer == NULL) { + ma_device_uninit(pDevice); + return MA_OUT_OF_MEMORY; + } + + /* Silence the buffer for safety. */ + ma_silence_pcm_frames(pDevice->capture.pIntermediaryBuffer, pDevice->capture.intermediaryBufferCap, pDevice->capture.format, pDevice->capture.channels); + pDevice->capture.intermediaryBufferLen = pDevice->capture.intermediaryBufferCap; + } + + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_uint64 intermediaryBufferSizeInBytes; + + pDevice->playback.intermediaryBufferLen = 0; + if (pConfig->deviceType == ma_device_type_duplex) { + pDevice->playback.intermediaryBufferCap = pDevice->capture.intermediaryBufferCap; /* In duplex mode, make sure the intermediary buffer is always the same size as the capture side. */ + } else { + pDevice->playback.intermediaryBufferCap = intermediaryBufferCap; + if (pDevice->playback.intermediaryBufferCap == 0) { + pDevice->playback.intermediaryBufferCap = pDevice->playback.internalPeriodSizeInFrames; + } + } + + intermediaryBufferSizeInBytes = pDevice->playback.intermediaryBufferCap * ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + + pDevice->playback.pIntermediaryBuffer = ma_malloc((size_t)intermediaryBufferSizeInBytes, &pContext->allocationCallbacks); + if (pDevice->playback.pIntermediaryBuffer == NULL) { + ma_device_uninit(pDevice); + return MA_OUT_OF_MEMORY; + } + + /* Silence the buffer for safety. */ + ma_silence_pcm_frames(pDevice->playback.pIntermediaryBuffer, pDevice->playback.intermediaryBufferCap, pDevice->playback.format, pDevice->playback.channels); + pDevice->playback.intermediaryBufferLen = 0; + } + } else { + /* Not using a fixed sized data callback so no need for an intermediary buffer. */ + } + + + /* Some backends don't require the worker thread. */ + if (!ma_context_is_backend_asynchronous(pContext)) { + /* The worker thread. */ + result = ma_thread_create(&pDevice->thread, pContext->threadPriority, pContext->threadStackSize, ma_worker_thread, pDevice, &pContext->allocationCallbacks); + if (result != MA_SUCCESS) { + ma_device_uninit(pDevice); + return result; + } + + /* Wait for the worker thread to put the device into its stopped state for real. */ + ma_event_wait(&pDevice->stopEvent); + MA_ASSERT(ma_device_get_state(pDevice) == ma_device_state_stopped); + } else { + /* + If the backend is asynchronous and the device is duplex, we'll need an intermediary ring buffer. Note that this needs to be done + after ma_device__post_init_setup(). + */ + if (ma_context_is_backend_asynchronous(pContext)) { + if (pConfig->deviceType == ma_device_type_duplex) { + result = ma_duplex_rb_init(pDevice->capture.format, pDevice->capture.channels, pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalPeriodSizeInFrames, &pDevice->pContext->allocationCallbacks, &pDevice->duplexRB); + if (result != MA_SUCCESS) { + ma_device_uninit(pDevice); + return result; + } + } + } + + ma_device__set_state(pDevice, ma_device_state_stopped); + } + + /* Log device information. */ + { + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[%s]\n", ma_get_backend_name(pDevice->pContext->backend)); + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + char name[MA_MAX_DEVICE_NAME_LENGTH + 1]; + ma_device_get_name(pDevice, ma_device_type_capture, name, sizeof(name), NULL); + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " %s (%s)\n", name, "Capture"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Format: %s -> %s\n", ma_get_format_name(pDevice->capture.internalFormat), ma_get_format_name(pDevice->capture.format)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channels: %d -> %d\n", pDevice->capture.internalChannels, pDevice->capture.channels); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Sample Rate: %d -> %d\n", pDevice->capture.internalSampleRate, pDevice->sampleRate); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Buffer Size: %d*%d (%d)\n", pDevice->capture.internalPeriodSizeInFrames, pDevice->capture.internalPeriods, (pDevice->capture.internalPeriodSizeInFrames * pDevice->capture.internalPeriods)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Conversion:\n"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Pre Format Conversion: %s\n", pDevice->capture.converter.hasPreFormatConversion ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Post Format Conversion: %s\n", pDevice->capture.converter.hasPostFormatConversion ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channel Routing: %s\n", pDevice->capture.converter.hasChannelConverter ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Resampling: %s\n", pDevice->capture.converter.hasResampler ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Passthrough: %s\n", pDevice->capture.converter.isPassthrough ? "YES" : "NO"); + { + char channelMapStr[1024]; + ma_channel_map_to_string(pDevice->capture.internalChannelMap, pDevice->capture.internalChannels, channelMapStr, sizeof(channelMapStr)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channel Map In: {%s}\n", channelMapStr); + + ma_channel_map_to_string(pDevice->capture.channelMap, pDevice->capture.channels, channelMapStr, sizeof(channelMapStr)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channel Map Out: {%s}\n", channelMapStr); + } + } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + char name[MA_MAX_DEVICE_NAME_LENGTH + 1]; + ma_device_get_name(pDevice, ma_device_type_playback, name, sizeof(name), NULL); + + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " %s (%s)\n", name, "Playback"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Format: %s -> %s\n", ma_get_format_name(pDevice->playback.format), ma_get_format_name(pDevice->playback.internalFormat)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channels: %d -> %d\n", pDevice->playback.channels, pDevice->playback.internalChannels); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Sample Rate: %d -> %d\n", pDevice->sampleRate, pDevice->playback.internalSampleRate); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Buffer Size: %d*%d (%d)\n", pDevice->playback.internalPeriodSizeInFrames, pDevice->playback.internalPeriods, (pDevice->playback.internalPeriodSizeInFrames * pDevice->playback.internalPeriods)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Conversion:\n"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Pre Format Conversion: %s\n", pDevice->playback.converter.hasPreFormatConversion ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Post Format Conversion: %s\n", pDevice->playback.converter.hasPostFormatConversion ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channel Routing: %s\n", pDevice->playback.converter.hasChannelConverter ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Resampling: %s\n", pDevice->playback.converter.hasResampler ? "YES" : "NO"); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Passthrough: %s\n", pDevice->playback.converter.isPassthrough ? "YES" : "NO"); + { + char channelMapStr[1024]; + ma_channel_map_to_string(pDevice->playback.channelMap, pDevice->playback.channels, channelMapStr, sizeof(channelMapStr)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channel Map In: {%s}\n", channelMapStr); + + ma_channel_map_to_string(pDevice->playback.internalChannelMap, pDevice->playback.internalChannels, channelMapStr, sizeof(channelMapStr)); + ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, " Channel Map Out: {%s}\n", channelMapStr); + } + } + } + + MA_ASSERT(ma_device_get_state(pDevice) == ma_device_state_stopped); + return MA_SUCCESS; +} + +MA_API ma_result ma_device_init_ex(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pContextConfig, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result; + ma_context* pContext; + ma_backend defaultBackends[ma_backend_null+1]; + ma_uint32 iBackend; + ma_backend* pBackendsToIterate; + ma_uint32 backendsToIterateCount; + ma_allocation_callbacks allocationCallbacks; + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pContextConfig != NULL) { + result = ma_allocation_callbacks_init_copy(&allocationCallbacks, &pContextConfig->allocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + } else { + allocationCallbacks = ma_allocation_callbacks_init_default(); + } + + pContext = (ma_context*)ma_malloc(sizeof(*pContext), &allocationCallbacks); + if (pContext == NULL) { + return MA_OUT_OF_MEMORY; + } + + for (iBackend = 0; iBackend <= ma_backend_null; ++iBackend) { + defaultBackends[iBackend] = (ma_backend)iBackend; + } + + pBackendsToIterate = (ma_backend*)backends; + backendsToIterateCount = backendCount; + if (pBackendsToIterate == NULL) { + pBackendsToIterate = (ma_backend*)defaultBackends; + backendsToIterateCount = ma_countof(defaultBackends); + } + + result = MA_NO_BACKEND; + + for (iBackend = 0; iBackend < backendsToIterateCount; ++iBackend) { + /* + This is a hack for iOS. If the context config is null, there's a good chance the + `ma_device_init(NULL, &deviceConfig, pDevice);` pattern is being used. In this + case, set the session category based on the device type. + */ + #if defined(MA_APPLE_MOBILE) + ma_context_config contextConfig; + + if (pContextConfig == NULL) { + contextConfig = ma_context_config_init(); + switch (pConfig->deviceType) { + case ma_device_type_duplex: { + contextConfig.coreaudio.sessionCategory = ma_ios_session_category_play_and_record; + } break; + case ma_device_type_capture: { + contextConfig.coreaudio.sessionCategory = ma_ios_session_category_record; + } break; + case ma_device_type_playback: + default: { + contextConfig.coreaudio.sessionCategory = ma_ios_session_category_playback; + } break; + } + + pContextConfig = &contextConfig; + } + #endif + + result = ma_context_init(&pBackendsToIterate[iBackend], 1, pContextConfig, pContext); + if (result == MA_SUCCESS) { + result = ma_device_init(pContext, pConfig, pDevice); + if (result == MA_SUCCESS) { + break; /* Success. */ + } else { + ma_context_uninit(pContext); /* Failure. */ + } + } + } + + if (result != MA_SUCCESS) { + ma_free(pContext, &allocationCallbacks); + return result; + } + + pDevice->isOwnerOfContext = MA_TRUE; + return result; +} + +MA_API void ma_device_uninit(ma_device* pDevice) +{ + if (!ma_device__is_initialized(pDevice)) { + return; + } + + /* + It's possible for the miniaudio side of the device and the backend to not be in sync due to + system-level situations such as the computer being put into sleep mode and the backend not + notifying miniaudio of the fact the device has stopped. It's possible for this to result in a + deadlock due to miniaudio thinking the device is in a running state, when in fact it's not + running at all. For this reason I am no longer explicitly stopping the device. I don't think + this should affect anyone in practice since uninitializing the backend will naturally stop the + device anyway. + */ + #if 0 + { + /* Make sure the device is stopped first. The backends will probably handle this naturally, but I like to do it explicitly for my own sanity. */ + if (ma_device_is_started(pDevice)) { + ma_device_stop(pDevice); + } + } + #endif + + /* Putting the device into an uninitialized state will make the worker thread return. */ + ma_device__set_state(pDevice, ma_device_state_uninitialized); + + /* Wake up the worker thread and wait for it to properly terminate. */ + if (!ma_context_is_backend_asynchronous(pDevice->pContext)) { + ma_event_signal(&pDevice->wakeupEvent); + ma_thread_wait(&pDevice->thread); + } + + if (pDevice->pContext->callbacks.onDeviceUninit != NULL) { + pDevice->pContext->callbacks.onDeviceUninit(pDevice); + } + + + ma_event_uninit(&pDevice->stopEvent); + ma_event_uninit(&pDevice->startEvent); + ma_event_uninit(&pDevice->wakeupEvent); + ma_mutex_uninit(&pDevice->startStopLock); + + if (ma_context_is_backend_asynchronous(pDevice->pContext)) { + if (pDevice->type == ma_device_type_duplex) { + ma_duplex_rb_uninit(&pDevice->duplexRB); + } + } + + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + ma_data_converter_uninit(&pDevice->capture.converter, &pDevice->pContext->allocationCallbacks); + } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_data_converter_uninit(&pDevice->playback.converter, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->playback.pInputCache != NULL) { + ma_free(pDevice->playback.pInputCache, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->capture.pIntermediaryBuffer != NULL) { + ma_free(pDevice->capture.pIntermediaryBuffer, &pDevice->pContext->allocationCallbacks); + } + if (pDevice->playback.pIntermediaryBuffer != NULL) { + ma_free(pDevice->playback.pIntermediaryBuffer, &pDevice->pContext->allocationCallbacks); + } + + if (pDevice->isOwnerOfContext) { + ma_allocation_callbacks allocationCallbacks = pDevice->pContext->allocationCallbacks; + + ma_context_uninit(pDevice->pContext); + ma_free(pDevice->pContext, &allocationCallbacks); + } + + MA_ZERO_OBJECT(pDevice); +} + +MA_API ma_context* ma_device_get_context(ma_device* pDevice) +{ + if (pDevice == NULL) { + return NULL; + } + + return pDevice->pContext; +} + +MA_API ma_log* ma_device_get_log(ma_device* pDevice) +{ + return ma_context_get_log(ma_device_get_context(pDevice)); +} + +MA_API ma_result ma_device_get_info(ma_device* pDevice, ma_device_type type, ma_device_info* pDeviceInfo) +{ + if (pDeviceInfo == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDeviceInfo); + + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + /* If the onDeviceGetInfo() callback is set, use that. Otherwise we'll fall back to ma_context_get_device_info(). */ + if (pDevice->pContext->callbacks.onDeviceGetInfo != NULL) { + return pDevice->pContext->callbacks.onDeviceGetInfo(pDevice, type, pDeviceInfo); + } + + /* Getting here means onDeviceGetInfo is not implemented so we need to fall back to an alternative. */ + if (type == ma_device_type_playback) { + return ma_context_get_device_info(pDevice->pContext, type, pDevice->playback.pID, pDeviceInfo); + } else { + /* + Here we're getting the capture side, which is the branch we'll be entering for a loopback + device, since loopback is capturing. However, if the device is using the default device ID, + it won't get the correct information because it'll think we're asking for the default + capture device, where in fact for loopback we want the default *playback* device. We'll do + a bit of a hack here to make sure we get the correct info. + */ + if (pDevice->type == ma_device_type_loopback && pDevice->capture.pID == NULL) { + type = ma_device_type_playback; + } + + return ma_context_get_device_info(pDevice->pContext, type, pDevice->capture.pID, pDeviceInfo); + } +} + +MA_API ma_result ma_device_get_name(ma_device* pDevice, ma_device_type type, char* pName, size_t nameCap, size_t* pLengthNotIncludingNullTerminator) +{ + ma_result result; + ma_device_info deviceInfo; + + if (pLengthNotIncludingNullTerminator != NULL) { + *pLengthNotIncludingNullTerminator = 0; + } + + if (pName != NULL && nameCap > 0) { + pName[0] = '\0'; + } + + result = ma_device_get_info(pDevice, type, &deviceInfo); + if (result != MA_SUCCESS) { + return result; + } + + if (pName != NULL) { + ma_strncpy_s(pName, nameCap, deviceInfo.name, (size_t)-1); + + /* + For safety, make sure the length is based on the truncated output string rather than the + source. Otherwise the caller might assume the output buffer contains more content than it + actually does. + */ + if (pLengthNotIncludingNullTerminator != NULL) { + *pLengthNotIncludingNullTerminator = strlen(pName); + } + } else { + /* Name not specified. Just report the length of the source string. */ + if (pLengthNotIncludingNullTerminator != NULL) { + *pLengthNotIncludingNullTerminator = strlen(deviceInfo.name); + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_device_start(ma_device* pDevice) +{ + ma_result result; + + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_device_get_state(pDevice) == ma_device_state_uninitialized) { + return MA_INVALID_OPERATION; /* Not initialized. */ + } + + if (ma_device_get_state(pDevice) == ma_device_state_started) { + return MA_SUCCESS; /* Already started. */ + } + + ma_mutex_lock(&pDevice->startStopLock); + { + /* + We need to check again if the device is in a started state because it's possible for one thread to have started the device + while another was waiting on the mutex. + */ + if (ma_device_get_state(pDevice) == ma_device_state_started) { + ma_mutex_unlock(&pDevice->startStopLock); + return MA_SUCCESS; /* Already started. */ + } + + /* Starting and stopping are wrapped in a mutex which means we can assert that the device is in a stopped or paused state. */ + MA_ASSERT(ma_device_get_state(pDevice) == ma_device_state_stopped); + + ma_device__set_state(pDevice, ma_device_state_starting); + + /* Asynchronous backends need to be handled differently. */ + if (ma_context_is_backend_asynchronous(pDevice->pContext)) { + if (pDevice->pContext->callbacks.onDeviceStart != NULL) { + result = pDevice->pContext->callbacks.onDeviceStart(pDevice); + } else { + result = MA_INVALID_OPERATION; + } + + if (result == MA_SUCCESS) { + ma_device__set_state(pDevice, ma_device_state_started); + ma_device__on_notification_started(pDevice); + } + } else { + /* + Synchronous backends are started by signaling an event that's being waited on in the worker thread. We first wake up the + thread and then wait for the start event. + */ + ma_event_signal(&pDevice->wakeupEvent); + + /* + Wait for the worker thread to finish starting the device. Note that the worker thread will be the one who puts the device + into the started state. Don't call ma_device__set_state() here. + */ + ma_event_wait(&pDevice->startEvent); + result = pDevice->workResult; + } + + /* We changed the state from stopped to started, so if we failed, make sure we put the state back to stopped. */ + if (result != MA_SUCCESS) { + ma_device__set_state(pDevice, ma_device_state_stopped); + } + } + ma_mutex_unlock(&pDevice->startStopLock); + + return result; +} + +MA_API ma_result ma_device_stop(ma_device* pDevice) +{ + ma_result result; + + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_device_get_state(pDevice) == ma_device_state_uninitialized) { + return MA_INVALID_OPERATION; /* Not initialized. */ + } + + if (ma_device_get_state(pDevice) == ma_device_state_stopped) { + return MA_SUCCESS; /* Already stopped. */ + } + + ma_mutex_lock(&pDevice->startStopLock); + { + /* + We need to check again if the device is in a stopped state because it's possible for one thread to have stopped the device + while another was waiting on the mutex. + */ + if (ma_device_get_state(pDevice) == ma_device_state_stopped) { + ma_mutex_unlock(&pDevice->startStopLock); + return MA_SUCCESS; /* Already stopped. */ + } + + /* Starting and stopping are wrapped in a mutex which means we can assert that the device is in a started or paused state. */ + MA_ASSERT(ma_device_get_state(pDevice) == ma_device_state_started); + + ma_device__set_state(pDevice, ma_device_state_stopping); + + /* Asynchronous backends need to be handled differently. */ + if (ma_context_is_backend_asynchronous(pDevice->pContext)) { + /* Asynchronous backends must have a stop operation. */ + if (pDevice->pContext->callbacks.onDeviceStop != NULL) { + result = pDevice->pContext->callbacks.onDeviceStop(pDevice); + } else { + result = MA_INVALID_OPERATION; + } + + ma_device__set_state(pDevice, ma_device_state_stopped); + } else { + /* + Synchronous backends. The stop callback is always called from the worker thread. Do not call the stop callback here. If + the backend is implementing its own audio thread loop we'll need to wake it up if required. Note that we need to make + sure the state of the device is *not* playing right now, which it shouldn't be since we set it above. This is super + important though, so I'm asserting it here as well for extra safety in case we accidentally change something later. + */ + MA_ASSERT(ma_device_get_state(pDevice) != ma_device_state_started); + + if (pDevice->pContext->callbacks.onDeviceDataLoopWakeup != NULL) { + pDevice->pContext->callbacks.onDeviceDataLoopWakeup(pDevice); + } + + /* + We need to wait for the worker thread to become available for work before returning. Note that the worker thread will be + the one who puts the device into the stopped state. Don't call ma_device__set_state() here. + */ + ma_event_wait(&pDevice->stopEvent); + result = MA_SUCCESS; + } + + /* + This is a safety measure to ensure the internal buffer has been cleared so any leftover + does not get played the next time the device starts. Ideally this should be drained by + the backend first. + */ + pDevice->playback.intermediaryBufferLen = 0; + pDevice->playback.inputCacheConsumed = 0; + pDevice->playback.inputCacheRemaining = 0; + } + ma_mutex_unlock(&pDevice->startStopLock); + + return result; +} + +MA_API ma_bool32 ma_device_is_started(const ma_device* pDevice) +{ + return ma_device_get_state(pDevice) == ma_device_state_started; +} + +MA_API ma_device_state ma_device_get_state(const ma_device* pDevice) +{ + if (pDevice == NULL) { + return ma_device_state_uninitialized; + } + + return ma_atomic_device_state_get((ma_atomic_device_state*)&pDevice->state); /* Naughty cast to get rid of a const warning. */ +} + +MA_API ma_result ma_device_set_master_volume(ma_device* pDevice, float volume) +{ + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + if (volume < 0.0f) { + return MA_INVALID_ARGS; + } + + ma_atomic_float_set(&pDevice->masterVolumeFactor, volume); + + return MA_SUCCESS; +} + +MA_API ma_result ma_device_get_master_volume(ma_device* pDevice, float* pVolume) +{ + if (pVolume == NULL) { + return MA_INVALID_ARGS; + } + + if (pDevice == NULL) { + *pVolume = 0; + return MA_INVALID_ARGS; + } + + *pVolume = ma_atomic_float_get(&pDevice->masterVolumeFactor); + + return MA_SUCCESS; +} + +MA_API ma_result ma_device_set_master_volume_db(ma_device* pDevice, float gainDB) +{ + if (gainDB > 0) { + return MA_INVALID_ARGS; + } + + return ma_device_set_master_volume(pDevice, ma_volume_db_to_linear(gainDB)); +} + +MA_API ma_result ma_device_get_master_volume_db(ma_device* pDevice, float* pGainDB) +{ + float factor; + ma_result result; + + if (pGainDB == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_device_get_master_volume(pDevice, &factor); + if (result != MA_SUCCESS) { + *pGainDB = 0; + return result; + } + + *pGainDB = ma_volume_linear_to_db(factor); + + return MA_SUCCESS; +} + + +MA_API ma_result ma_device_handle_backend_data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount) +{ + if (pDevice == NULL) { + return MA_INVALID_ARGS; + } + + if (pOutput == NULL && pInput == NULL) { + return MA_INVALID_ARGS; + } + + /* + There is an assert deeper in the code that checks that frameCount > 0. Since this is a public facing + API we'll need to check for that here. I've had reports that AAudio can sometimes post a frame count + of 0. + */ + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pDevice->type == ma_device_type_duplex) { + if (pInput != NULL) { + ma_device__handle_duplex_callback_capture(pDevice, frameCount, pInput, &pDevice->duplexRB.rb); + } + + if (pOutput != NULL) { + ma_device__handle_duplex_callback_playback(pDevice, frameCount, pOutput, &pDevice->duplexRB.rb); + } + } else { + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_loopback) { + if (pInput == NULL) { + return MA_INVALID_ARGS; + } + + ma_device__send_frames_to_client(pDevice, frameCount, pInput); + } + + if (pDevice->type == ma_device_type_playback) { + if (pOutput == NULL) { + return MA_INVALID_ARGS; + } + + ma_device__read_frames_from_client(pDevice, frameCount, pOutput); + } + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_calculate_buffer_size_in_frames_from_descriptor(const ma_device_descriptor* pDescriptor, ma_uint32 nativeSampleRate, ma_performance_profile performanceProfile) +{ + if (pDescriptor == NULL) { + return 0; + } + + /* + We must have a non-0 native sample rate, but some backends don't allow retrieval of this at the + time when the size of the buffer needs to be determined. In this case we need to just take a best + guess and move on. We'll try using the sample rate in pDescriptor first. If that's not set we'll + just fall back to MA_DEFAULT_SAMPLE_RATE. + */ + if (nativeSampleRate == 0) { + nativeSampleRate = pDescriptor->sampleRate; + } + if (nativeSampleRate == 0) { + nativeSampleRate = MA_DEFAULT_SAMPLE_RATE; + } + + MA_ASSERT(nativeSampleRate != 0); + + if (pDescriptor->periodSizeInFrames == 0) { + if (pDescriptor->periodSizeInMilliseconds == 0) { + if (performanceProfile == ma_performance_profile_low_latency) { + return ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_LOW_LATENCY, nativeSampleRate); + } else { + return ma_calculate_buffer_size_in_frames_from_milliseconds(MA_DEFAULT_PERIOD_SIZE_IN_MILLISECONDS_CONSERVATIVE, nativeSampleRate); + } + } else { + return ma_calculate_buffer_size_in_frames_from_milliseconds(pDescriptor->periodSizeInMilliseconds, nativeSampleRate); + } + } else { + return pDescriptor->periodSizeInFrames; + } +} +#endif /* MA_NO_DEVICE_IO */ + + +MA_API ma_uint32 ma_calculate_buffer_size_in_milliseconds_from_frames(ma_uint32 bufferSizeInFrames, ma_uint32 sampleRate) +{ + /* Prevent a division by zero. */ + if (sampleRate == 0) { + return 0; + } + + return (bufferSizeInFrames*1000 + (sampleRate - 1)) / sampleRate; +} + +MA_API ma_uint32 ma_calculate_buffer_size_in_frames_from_milliseconds(ma_uint32 bufferSizeInMilliseconds, ma_uint32 sampleRate) +{ + /* Prevent a division by zero. */ + if (sampleRate == 0) { + return 0; + } + + return bufferSizeInMilliseconds*sampleRate / 1000; +} + +MA_API void ma_copy_pcm_frames(void* dst, const void* src, ma_uint64 frameCount, ma_format format, ma_uint32 channels) +{ + if (dst == src) { + return; /* No-op. */ + } + + ma_copy_memory_64(dst, src, frameCount * ma_get_bytes_per_frame(format, channels)); +} + +MA_API void ma_silence_pcm_frames(void* p, ma_uint64 frameCount, ma_format format, ma_uint32 channels) +{ + if (format == ma_format_u8) { + ma_uint64 sampleCount = frameCount * channels; + ma_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + ((ma_uint8*)p)[iSample] = 128; + } + } else { + ma_zero_memory_64(p, frameCount * ma_get_bytes_per_frame(format, channels)); + } +} + +MA_API void* ma_offset_pcm_frames_ptr(void* p, ma_uint64 offsetInFrames, ma_format format, ma_uint32 channels) +{ + return ma_offset_ptr(p, offsetInFrames * ma_get_bytes_per_frame(format, channels)); +} + +MA_API const void* ma_offset_pcm_frames_const_ptr(const void* p, ma_uint64 offsetInFrames, ma_format format, ma_uint32 channels) +{ + return ma_offset_ptr(p, offsetInFrames * ma_get_bytes_per_frame(format, channels)); +} + + +MA_API void ma_clip_samples_u8(ma_uint8* pDst, const ma_int16* pSrc, ma_uint64 count) +{ + ma_uint64 iSample; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_u8(pSrc[iSample]); + } +} + +MA_API void ma_clip_samples_s16(ma_int16* pDst, const ma_int32* pSrc, ma_uint64 count) +{ + ma_uint64 iSample; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_s16(pSrc[iSample]); + } +} + +MA_API void ma_clip_samples_s24(ma_uint8* pDst, const ma_int64* pSrc, ma_uint64 count) +{ + ma_uint64 iSample; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + for (iSample = 0; iSample < count; iSample += 1) { + ma_int64 s = ma_clip_s24(pSrc[iSample]); + pDst[iSample*3 + 0] = (ma_uint8)((s & 0x000000FF) >> 0); + pDst[iSample*3 + 1] = (ma_uint8)((s & 0x0000FF00) >> 8); + pDst[iSample*3 + 2] = (ma_uint8)((s & 0x00FF0000) >> 16); + } +} + +MA_API void ma_clip_samples_s32(ma_int32* pDst, const ma_int64* pSrc, ma_uint64 count) +{ + ma_uint64 iSample; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_s32(pSrc[iSample]); + } +} + +MA_API void ma_clip_samples_f32(float* pDst, const float* pSrc, ma_uint64 count) +{ + ma_uint64 iSample; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_f32(pSrc[iSample]); + } +} + +MA_API void ma_clip_pcm_frames(void* pDst, const void* pSrc, ma_uint64 frameCount, ma_format format, ma_uint32 channels) +{ + ma_uint64 sampleCount; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + sampleCount = frameCount * channels; + + switch (format) { + case ma_format_u8: ma_clip_samples_u8( (ma_uint8*)pDst, (const ma_int16*)pSrc, sampleCount); break; + case ma_format_s16: ma_clip_samples_s16((ma_int16*)pDst, (const ma_int32*)pSrc, sampleCount); break; + case ma_format_s24: ma_clip_samples_s24((ma_uint8*)pDst, (const ma_int64*)pSrc, sampleCount); break; + case ma_format_s32: ma_clip_samples_s32((ma_int32*)pDst, (const ma_int64*)pSrc, sampleCount); break; + case ma_format_f32: ma_clip_samples_f32(( float*)pDst, (const float*)pSrc, sampleCount); break; + + /* Do nothing if we don't know the format. We're including these here to silence a compiler warning about enums not being handled by the switch. */ + case ma_format_unknown: + case ma_format_count: + break; + } +} + + +MA_API void ma_copy_and_apply_volume_factor_u8(ma_uint8* pSamplesOut, const ma_uint8* pSamplesIn, ma_uint64 sampleCount, float factor) +{ + ma_uint64 iSample; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamplesOut[iSample] = (ma_uint8)(pSamplesIn[iSample] * factor); + } +} + +MA_API void ma_copy_and_apply_volume_factor_s16(ma_int16* pSamplesOut, const ma_int16* pSamplesIn, ma_uint64 sampleCount, float factor) +{ + ma_uint64 iSample; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamplesOut[iSample] = (ma_int16)(pSamplesIn[iSample] * factor); + } +} + +MA_API void ma_copy_and_apply_volume_factor_s24(void* pSamplesOut, const void* pSamplesIn, ma_uint64 sampleCount, float factor) +{ + ma_uint64 iSample; + ma_uint8* pSamplesOut8; + ma_uint8* pSamplesIn8; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + pSamplesOut8 = (ma_uint8*)pSamplesOut; + pSamplesIn8 = (ma_uint8*)pSamplesIn; + + for (iSample = 0; iSample < sampleCount; iSample += 1) { + ma_int32 sampleS32; + + sampleS32 = (ma_int32)(((ma_uint32)(pSamplesIn8[iSample*3+0]) << 8) | ((ma_uint32)(pSamplesIn8[iSample*3+1]) << 16) | ((ma_uint32)(pSamplesIn8[iSample*3+2])) << 24); + sampleS32 = (ma_int32)(sampleS32 * factor); + + pSamplesOut8[iSample*3+0] = (ma_uint8)(((ma_uint32)sampleS32 & 0x0000FF00) >> 8); + pSamplesOut8[iSample*3+1] = (ma_uint8)(((ma_uint32)sampleS32 & 0x00FF0000) >> 16); + pSamplesOut8[iSample*3+2] = (ma_uint8)(((ma_uint32)sampleS32 & 0xFF000000) >> 24); + } +} + +MA_API void ma_copy_and_apply_volume_factor_s32(ma_int32* pSamplesOut, const ma_int32* pSamplesIn, ma_uint64 sampleCount, float factor) +{ + ma_uint64 iSample; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamplesOut[iSample] = (ma_int32)(pSamplesIn[iSample] * factor); + } +} + +MA_API void ma_copy_and_apply_volume_factor_f32(float* pSamplesOut, const float* pSamplesIn, ma_uint64 sampleCount, float factor) +{ + ma_uint64 iSample; + + if (pSamplesOut == NULL || pSamplesIn == NULL) { + return; + } + + if (factor == 1) { + if (pSamplesOut == pSamplesIn) { + /* In place. No-op. */ + } else { + /* Just a copy. */ + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamplesOut[iSample] = pSamplesIn[iSample]; + } + } + } else { + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamplesOut[iSample] = pSamplesIn[iSample] * factor; + } + } +} + +MA_API void ma_apply_volume_factor_u8(ma_uint8* pSamples, ma_uint64 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_u8(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_apply_volume_factor_s16(ma_int16* pSamples, ma_uint64 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_s16(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_apply_volume_factor_s24(void* pSamples, ma_uint64 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_s24(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_apply_volume_factor_s32(ma_int32* pSamples, ma_uint64 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_s32(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_apply_volume_factor_f32(float* pSamples, ma_uint64 sampleCount, float factor) +{ + ma_copy_and_apply_volume_factor_f32(pSamples, pSamples, sampleCount, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_u8(ma_uint8* pFramesOut, const ma_uint8* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_u8(pFramesOut, pFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s16(ma_int16* pFramesOut, const ma_int16* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_s16(pFramesOut, pFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s24(void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_s24(pFramesOut, pFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_s32(ma_int32* pFramesOut, const ma_int32* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_s32(pFramesOut, pFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames_f32(float* pFramesOut, const float* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_f32(pFramesOut, pFramesIn, frameCount*channels, factor); +} + +MA_API void ma_copy_and_apply_volume_factor_pcm_frames(void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount, ma_format format, ma_uint32 channels, float factor) +{ + switch (format) + { + case ma_format_u8: ma_copy_and_apply_volume_factor_pcm_frames_u8 ((ma_uint8*)pFramesOut, (const ma_uint8*)pFramesIn, frameCount, channels, factor); return; + case ma_format_s16: ma_copy_and_apply_volume_factor_pcm_frames_s16((ma_int16*)pFramesOut, (const ma_int16*)pFramesIn, frameCount, channels, factor); return; + case ma_format_s24: ma_copy_and_apply_volume_factor_pcm_frames_s24( pFramesOut, pFramesIn, frameCount, channels, factor); return; + case ma_format_s32: ma_copy_and_apply_volume_factor_pcm_frames_s32((ma_int32*)pFramesOut, (const ma_int32*)pFramesIn, frameCount, channels, factor); return; + case ma_format_f32: ma_copy_and_apply_volume_factor_pcm_frames_f32( (float*)pFramesOut, (const float*)pFramesIn, frameCount, channels, factor); return; + default: return; /* Do nothing. */ + } +} + +MA_API void ma_apply_volume_factor_pcm_frames_u8(ma_uint8* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_u8(pFrames, pFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames_s16(ma_int16* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_s16(pFrames, pFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames_s24(void* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_s24(pFrames, pFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames_s32(ma_int32* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_s32(pFrames, pFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames_f32(float* pFrames, ma_uint64 frameCount, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames_f32(pFrames, pFrames, frameCount, channels, factor); +} + +MA_API void ma_apply_volume_factor_pcm_frames(void* pFramesOut, ma_uint64 frameCount, ma_format format, ma_uint32 channels, float factor) +{ + ma_copy_and_apply_volume_factor_pcm_frames(pFramesOut, pFramesOut, frameCount, format, channels, factor); +} + + +MA_API void ma_copy_and_apply_volume_factor_per_channel_f32(float* pFramesOut, const float* pFramesIn, ma_uint64 frameCount, ma_uint32 channels, float* pChannelGains) +{ + ma_uint64 iFrame; + + if (channels == 2) { + /* TODO: Do an optimized implementation for stereo and mono. Can do a SIMD optimized implementation as well. */ + } + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOut[iFrame * channels + iChannel] = pFramesIn[iFrame * channels + iChannel] * pChannelGains[iChannel]; + } + } +} + + + +static MA_INLINE ma_int16 ma_apply_volume_unclipped_u8(ma_int16 x, ma_int16 volume) +{ + return (ma_int16)(((ma_int32)x * (ma_int32)volume) >> 8); +} + +static MA_INLINE ma_int32 ma_apply_volume_unclipped_s16(ma_int32 x, ma_int16 volume) +{ + return (ma_int32)((x * volume) >> 8); +} + +static MA_INLINE ma_int64 ma_apply_volume_unclipped_s24(ma_int64 x, ma_int16 volume) +{ + return (ma_int64)((x * volume) >> 8); +} + +static MA_INLINE ma_int64 ma_apply_volume_unclipped_s32(ma_int64 x, ma_int16 volume) +{ + return (ma_int64)((x * volume) >> 8); +} + +static MA_INLINE float ma_apply_volume_unclipped_f32(float x, float volume) +{ + return x * volume; +} + + +MA_API void ma_copy_and_apply_volume_and_clip_samples_u8(ma_uint8* pDst, const ma_int16* pSrc, ma_uint64 count, float volume) +{ + ma_uint64 iSample; + ma_int16 volumeFixed; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + volumeFixed = ma_float_to_fixed_16(volume); + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_u8(ma_apply_volume_unclipped_u8(pSrc[iSample], volumeFixed)); + } +} + +MA_API void ma_copy_and_apply_volume_and_clip_samples_s16(ma_int16* pDst, const ma_int32* pSrc, ma_uint64 count, float volume) +{ + ma_uint64 iSample; + ma_int16 volumeFixed; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + volumeFixed = ma_float_to_fixed_16(volume); + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_s16(ma_apply_volume_unclipped_s16(pSrc[iSample], volumeFixed)); + } +} + +MA_API void ma_copy_and_apply_volume_and_clip_samples_s24(ma_uint8* pDst, const ma_int64* pSrc, ma_uint64 count, float volume) +{ + ma_uint64 iSample; + ma_int16 volumeFixed; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + volumeFixed = ma_float_to_fixed_16(volume); + + for (iSample = 0; iSample < count; iSample += 1) { + ma_int64 s = ma_clip_s24(ma_apply_volume_unclipped_s24(pSrc[iSample], volumeFixed)); + pDst[iSample*3 + 0] = (ma_uint8)((s & 0x000000FF) >> 0); + pDst[iSample*3 + 1] = (ma_uint8)((s & 0x0000FF00) >> 8); + pDst[iSample*3 + 2] = (ma_uint8)((s & 0x00FF0000) >> 16); + } +} + +MA_API void ma_copy_and_apply_volume_and_clip_samples_s32(ma_int32* pDst, const ma_int64* pSrc, ma_uint64 count, float volume) +{ + ma_uint64 iSample; + ma_int16 volumeFixed; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + volumeFixed = ma_float_to_fixed_16(volume); + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_s32(ma_apply_volume_unclipped_s32(pSrc[iSample], volumeFixed)); + } +} + +MA_API void ma_copy_and_apply_volume_and_clip_samples_f32(float* pDst, const float* pSrc, ma_uint64 count, float volume) +{ + ma_uint64 iSample; + + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + /* For the f32 case we need to make sure this supports in-place processing where the input and output buffers are the same. */ + + for (iSample = 0; iSample < count; iSample += 1) { + pDst[iSample] = ma_clip_f32(ma_apply_volume_unclipped_f32(pSrc[iSample], volume)); + } +} + +MA_API void ma_copy_and_apply_volume_and_clip_pcm_frames(void* pDst, const void* pSrc, ma_uint64 frameCount, ma_format format, ma_uint32 channels, float volume) +{ + MA_ASSERT(pDst != NULL); + MA_ASSERT(pSrc != NULL); + + if (volume == 1) { + ma_clip_pcm_frames(pDst, pSrc, frameCount, format, channels); /* Optimized case for volume = 1. */ + } else if (volume == 0) { + ma_silence_pcm_frames(pDst, frameCount, format, channels); /* Optimized case for volume = 0. */ + } else { + ma_uint64 sampleCount = frameCount * channels; + + switch (format) { + case ma_format_u8: ma_copy_and_apply_volume_and_clip_samples_u8( (ma_uint8*)pDst, (const ma_int16*)pSrc, sampleCount, volume); break; + case ma_format_s16: ma_copy_and_apply_volume_and_clip_samples_s16((ma_int16*)pDst, (const ma_int32*)pSrc, sampleCount, volume); break; + case ma_format_s24: ma_copy_and_apply_volume_and_clip_samples_s24((ma_uint8*)pDst, (const ma_int64*)pSrc, sampleCount, volume); break; + case ma_format_s32: ma_copy_and_apply_volume_and_clip_samples_s32((ma_int32*)pDst, (const ma_int64*)pSrc, sampleCount, volume); break; + case ma_format_f32: ma_copy_and_apply_volume_and_clip_samples_f32(( float*)pDst, (const float*)pSrc, sampleCount, volume); break; + + /* Do nothing if we don't know the format. We're including these here to silence a compiler warning about enums not being handled by the switch. */ + case ma_format_unknown: + case ma_format_count: + break; + } + } +} + + + +MA_API float ma_volume_linear_to_db(float factor) +{ + return 20*ma_log10f(factor); +} + +MA_API float ma_volume_db_to_linear(float gain) +{ + return ma_powf(10, gain/20.0f); +} + + +MA_API ma_result ma_mix_pcm_frames_f32(float* pDst, const float* pSrc, ma_uint64 frameCount, ma_uint32 channels, float volume) +{ + ma_uint64 iSample; + ma_uint64 sampleCount; + + if (pDst == NULL || pSrc == NULL || channels == 0) { + return MA_INVALID_ARGS; + } + + if (volume == 0) { + return MA_SUCCESS; /* No changes if the volume is 0. */ + } + + sampleCount = frameCount * channels; + + if (volume == 1) { + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pDst[iSample] += pSrc[iSample]; + } + } else { + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pDst[iSample] += ma_apply_volume_unclipped_f32(pSrc[iSample], volume); + } + } + + return MA_SUCCESS; +} + + + +/************************************************************************************************************************************************************** + +Format Conversion + +**************************************************************************************************************************************************************/ + +static MA_INLINE ma_int16 ma_pcm_sample_f32_to_s16(float x) +{ + return (ma_int16)(x * 32767.0f); +} + +static MA_INLINE ma_int16 ma_pcm_sample_u8_to_s16_no_scale(ma_uint8 x) +{ + return (ma_int16)((ma_int16)x - 128); +} + +static MA_INLINE ma_int64 ma_pcm_sample_s24_to_s32_no_scale(const ma_uint8* x) +{ + return (ma_int64)(((ma_uint64)x[0] << 40) | ((ma_uint64)x[1] << 48) | ((ma_uint64)x[2] << 56)) >> 40; /* Make sure the sign bits are maintained. */ +} + +static MA_INLINE void ma_pcm_sample_s32_to_s24_no_scale(ma_int64 x, ma_uint8* s24) +{ + s24[0] = (ma_uint8)((x & 0x000000FF) >> 0); + s24[1] = (ma_uint8)((x & 0x0000FF00) >> 8); + s24[2] = (ma_uint8)((x & 0x00FF0000) >> 16); +} + + +/* u8 */ +MA_API void ma_pcm_u8_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + ma_copy_memory_64(dst, src, count * sizeof(ma_uint8)); +} + + +static MA_INLINE void ma_pcm_u8_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_u8[i]; + x = (ma_int16)(x - 128); + x = (ma_int16)(x << 8); + dst_s16[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_u8_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s16__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_u8_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_u8_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_u8_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_u8_to_s16__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_u8_to_s16__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_u8_to_s16__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_u8_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_s24 = (ma_uint8*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_u8[i]; + x = (ma_int16)(x - 128); + + dst_s24[i*3+0] = 0; + dst_s24[i*3+1] = 0; + dst_s24[i*3+2] = (ma_uint8)((ma_int8)x); + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_u8_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s24__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_u8_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_u8_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_u8_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_u8_to_s24__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_u8_to_s24__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_u8_to_s24__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_u8_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_u8[i]; + x = x - 128; + x = x << 24; + dst_s32[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_u8_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_u8_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_u8_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_u8_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_u8_to_s32__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_u8_to_s32__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_u8_to_s32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_u8_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + float x = (float)src_u8[i]; + x = x * 0.00784313725490196078f; /* 0..255 to 0..2 */ + x = x - 1; /* 0..2 to -1..1 */ + + dst_f32[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_u8_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_f32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_u8_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_u8_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_u8_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_u8_to_f32__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_u8_to_f32__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_u8_to_f32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +#ifdef MA_USE_REFERENCE_CONVERSION_APIS +static MA_INLINE void ma_pcm_interleave_u8__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_uint8** src_u8 = (const ma_uint8**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_u8[iFrame*channels + iChannel] = src_u8[iChannel][iFrame]; + } + } +} +#else +static MA_INLINE void ma_pcm_interleave_u8__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_uint8** src_u8 = (const ma_uint8**)src; + + if (channels == 1) { + ma_copy_memory_64(dst, src[0], frameCount * sizeof(ma_uint8)); + } else if (channels == 2) { + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + dst_u8[iFrame*2 + 0] = src_u8[0][iFrame]; + dst_u8[iFrame*2 + 1] = src_u8[1][iFrame]; + } + } else { + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_u8[iFrame*channels + iChannel] = src_u8[iChannel][iFrame]; + } + } + } +} +#endif + +MA_API void ma_pcm_interleave_u8(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_u8__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_u8__optimized(dst, src, frameCount, channels); +#endif +} + + +static MA_INLINE void ma_pcm_deinterleave_u8__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8** dst_u8 = (ma_uint8**)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_u8[iChannel][iFrame] = src_u8[iFrame*channels + iChannel]; + } + } +} + +static MA_INLINE void ma_pcm_deinterleave_u8__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_u8__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_u8(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_u8__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_u8__optimized(dst, src, frameCount, channels); +#endif +} + + +/* s16 */ +static MA_INLINE void ma_pcm_s16_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_s16[i]; + x = (ma_int16)(x >> 8); + x = (ma_int16)(x + 128); + dst_u8[i] = (ma_uint8)x; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_s16[i]; + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x80, 0x7F); + if ((x + dither) <= 0x7FFF) { + x = (ma_int16)(x + dither); + } else { + x = 0x7FFF; + } + + x = (ma_int16)(x >> 8); + x = (ma_int16)(x + 128); + dst_u8[i] = (ma_uint8)x; + } + } +} + +static MA_INLINE void ma_pcm_s16_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_u8__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s16_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s16_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_u8__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s16_to_u8__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s16_to_u8__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); + } +#endif +} + + +MA_API void ma_pcm_s16_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + ma_copy_memory_64(dst, src, count * sizeof(ma_int16)); +} + + +static MA_INLINE void ma_pcm_s16_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_s24 = (ma_uint8*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + dst_s24[i*3+0] = 0; + dst_s24[i*3+1] = (ma_uint8)(src_s16[i] & 0xFF); + dst_s24[i*3+2] = (ma_uint8)(src_s16[i] >> 8); + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s16_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s24__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s16_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s16_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_s24__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s16_to_s24__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s16_to_s24__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s16_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + dst_s32[i] = src_s16[i] << 16; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s16_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s16_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s16_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_s32__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s16_to_s32__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s16_to_s32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s16_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + float x = (float)src_s16[i]; + +#if 0 + /* The accurate way. */ + x = x + 32768.0f; /* -32768..32767 to 0..65535 */ + x = x * 0.00003051804379339284f; /* 0..65535 to 0..2 */ + x = x - 1; /* 0..2 to -1..1 */ +#else + /* The fast way. */ + x = x * 0.000030517578125f; /* -32768..32767 to -1..0.999969482421875 */ +#endif + + dst_f32[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s16_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_f32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s16_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s16_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_f32__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s16_to_f32__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s16_to_f32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_interleave_s16__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_int16** src_s16 = (const ma_int16**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s16[iFrame*channels + iChannel] = src_s16[iChannel][iFrame]; + } + } +} + +static MA_INLINE void ma_pcm_interleave_s16__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_s16__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_interleave_s16(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_s16__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_s16__optimized(dst, src, frameCount, channels); +#endif +} + + +static MA_INLINE void ma_pcm_deinterleave_s16__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int16** dst_s16 = (ma_int16**)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s16[iChannel][iFrame] = src_s16[iFrame*channels + iChannel]; + } + } +} + +static MA_INLINE void ma_pcm_deinterleave_s16__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_s16__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_s16(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_s16__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_s16__optimized(dst, src, frameCount, channels); +#endif +} + + +/* s24 */ +static MA_INLINE void ma_pcm_s24_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + dst_u8[i] = (ma_uint8)((ma_int8)src_s24[i*3 + 2] + 128); + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x800000, 0x7FFFFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 24; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } +} + +static MA_INLINE void ma_pcm_s24_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_u8__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s24_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_u8__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s24_to_u8__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s24_to_u8__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s24_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_uint16 dst_lo = ((ma_uint16)src_s24[i*3 + 1]); + ma_uint16 dst_hi = (ma_uint16)((ma_uint16)src_s24[i*3 + 2] << 8); + dst_s16[i] = (ma_int16)(dst_lo | dst_hi); + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x8000, 0x7FFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 16; + dst_s16[i] = (ma_int16)x; + } + } +} + +static MA_INLINE void ma_pcm_s24_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s16__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s24_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_s16__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s24_to_s16__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s24_to_s16__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); + } +#endif +} + + +MA_API void ma_pcm_s24_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + + ma_copy_memory_64(dst, src, count * 3); +} + + +static MA_INLINE void ma_pcm_s24_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + dst_s32[i] = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s24_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s24_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_s32__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s24_to_s32__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s24_to_s32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s24_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + float x = (float)(((ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24)) >> 8); + +#if 0 + /* The accurate way. */ + x = x + 8388608.0f; /* -8388608..8388607 to 0..16777215 */ + x = x * 0.00000011920929665621f; /* 0..16777215 to 0..2 */ + x = x - 1; /* 0..2 to -1..1 */ +#else + /* The fast way. */ + x = x * 0.00000011920928955078125f; /* -8388608..8388607 to -1..0.999969482421875 */ +#endif + + dst_f32[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s24_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_f32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s24_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_f32__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s24_to_f32__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s24_to_f32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_interleave_s24__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8* dst8 = (ma_uint8*)dst; + const ma_uint8** src8 = (const ma_uint8**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst8[iFrame*3*channels + iChannel*3 + 0] = src8[iChannel][iFrame*3 + 0]; + dst8[iFrame*3*channels + iChannel*3 + 1] = src8[iChannel][iFrame*3 + 1]; + dst8[iFrame*3*channels + iChannel*3 + 2] = src8[iChannel][iFrame*3 + 2]; + } + } +} + +static MA_INLINE void ma_pcm_interleave_s24__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_s24__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_interleave_s24(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_s24__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_s24__optimized(dst, src, frameCount, channels); +#endif +} + + +static MA_INLINE void ma_pcm_deinterleave_s24__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8** dst8 = (ma_uint8**)dst; + const ma_uint8* src8 = (const ma_uint8*)src; + + ma_uint32 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst8[iChannel][iFrame*3 + 0] = src8[iFrame*3*channels + iChannel*3 + 0]; + dst8[iChannel][iFrame*3 + 1] = src8[iFrame*3*channels + iChannel*3 + 1]; + dst8[iChannel][iFrame*3 + 2] = src8[iFrame*3*channels + iChannel*3 + 2]; + } + } +} + +static MA_INLINE void ma_pcm_deinterleave_s24__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_s24__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_s24(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_s24__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_s24__optimized(dst, src, frameCount, channels); +#endif +} + + + +/* s32 */ +static MA_INLINE void ma_pcm_s32_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; + x = x >> 24; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x800000, 0x7FFFFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 24; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } +} + +static MA_INLINE void ma_pcm_s32_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_u8__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s32_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_u8__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s32_to_u8__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s32_to_u8__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s32_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; + x = x >> 16; + dst_s16[i] = (ma_int16)x; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; + + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x8000, 0x7FFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 16; + dst_s16[i] = (ma_int16)x; + } + } +} + +static MA_INLINE void ma_pcm_s32_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s16__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s32_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_s16__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s32_to_s16__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s32_to_s16__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_s32_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_s24 = (ma_uint8*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_uint32 x = (ma_uint32)src_s32[i]; + dst_s24[i*3+0] = (ma_uint8)((x & 0x0000FF00) >> 8); + dst_s24[i*3+1] = (ma_uint8)((x & 0x00FF0000) >> 16); + dst_s24[i*3+2] = (ma_uint8)((x & 0xFF000000) >> 24); + } + + (void)ditherMode; /* No dithering for s32 -> s24. */ +} + +static MA_INLINE void ma_pcm_s32_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s24__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s32_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_s24__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s32_to_s24__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s32_to_s24__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} + + +MA_API void ma_pcm_s32_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + + ma_copy_memory_64(dst, src, count * sizeof(ma_int32)); +} + + +static MA_INLINE void ma_pcm_s32_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + double x = src_s32[i]; + +#if 0 + x = x + 2147483648.0; + x = x * 0.0000000004656612873077392578125; + x = x - 1; +#else + x = x / 2147483648.0; +#endif + + dst_f32[i] = (float)x; + } + + (void)ditherMode; /* No dithering for s32 -> f32. */ +} + +static MA_INLINE void ma_pcm_s32_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_f32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_s32_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_f32__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_s32_to_f32__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_s32_to_f32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_interleave_s32__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_int32** src_s32 = (const ma_int32**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s32[iFrame*channels + iChannel] = src_s32[iChannel][iFrame]; + } + } +} + +static MA_INLINE void ma_pcm_interleave_s32__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_s32__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_interleave_s32(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_s32__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_s32__optimized(dst, src, frameCount, channels); +#endif +} + + +static MA_INLINE void ma_pcm_deinterleave_s32__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int32** dst_s32 = (ma_int32**)dst; + const ma_int32* src_s32 = (const ma_int32*)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s32[iChannel][iFrame] = src_s32[iFrame*channels + iChannel]; + } + } +} + +static MA_INLINE void ma_pcm_deinterleave_s32__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_s32__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_s32(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_s32__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_s32__optimized(dst, src, frameCount, channels); +#endif +} + + +/* f32 */ +static MA_INLINE void ma_pcm_f32_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + + ma_uint8* dst_u8 = (ma_uint8*)dst; + const float* src_f32 = (const float*)src; + + float ditherMin = 0; + float ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -128; + ditherMax = 1.0f / 127; + } + + for (i = 0; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 127.5f; /* 0..2 to 0..255 */ + + dst_u8[i] = (ma_uint8)x; + } +} + +static MA_INLINE void ma_pcm_f32_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_u8__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_f32_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_u8__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_f32_to_u8__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_f32_to_u8__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); + } +#endif +} + +#ifdef MA_USE_REFERENCE_CONVERSION_APIS +static MA_INLINE void ma_pcm_f32_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + + ma_int16* dst_s16 = (ma_int16*)dst; + const float* src_f32 = (const float*)src; + + float ditherMin = 0; + float ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } + + for (i = 0; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + +#if 0 + /* The accurate way. */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 32767.5f; /* 0..2 to 0..65535 */ + x = x - 32768.0f; /* 0...65535 to -32768..32767 */ +#else + /* The fast way. */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ +#endif + + dst_s16[i] = (ma_int16)x; + } +} +#else +static MA_INLINE void ma_pcm_f32_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + ma_uint64 i4; + ma_uint64 count4; + + ma_int16* dst_s16 = (ma_int16*)dst; + const float* src_f32 = (const float*)src; + + float ditherMin = 0; + float ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } + + /* Unrolled. */ + i = 0; + count4 = count >> 2; + for (i4 = 0; i4 < count4; i4 += 1) { + float d0 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + float d1 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + float d2 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + float d3 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + + float x0 = src_f32[i+0]; + float x1 = src_f32[i+1]; + float x2 = src_f32[i+2]; + float x3 = src_f32[i+3]; + + x0 = x0 + d0; + x1 = x1 + d1; + x2 = x2 + d2; + x3 = x3 + d3; + + x0 = ((x0 < -1) ? -1 : ((x0 > 1) ? 1 : x0)); + x1 = ((x1 < -1) ? -1 : ((x1 > 1) ? 1 : x1)); + x2 = ((x2 < -1) ? -1 : ((x2 > 1) ? 1 : x2)); + x3 = ((x3 < -1) ? -1 : ((x3 > 1) ? 1 : x3)); + + x0 = x0 * 32767.0f; + x1 = x1 * 32767.0f; + x2 = x2 * 32767.0f; + x3 = x3 * 32767.0f; + + dst_s16[i+0] = (ma_int16)x0; + dst_s16[i+1] = (ma_int16)x1; + dst_s16[i+2] = (ma_int16)x2; + dst_s16[i+3] = (ma_int16)x3; + + i += 4; + } + + /* Leftover. */ + for (; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ + + dst_s16[i] = (ma_int16)x; + } +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + ma_uint64 i8; + ma_uint64 count8; + ma_int16* dst_s16; + const float* src_f32; + float ditherMin; + float ditherMax; + + /* Both the input and output buffers need to be aligned to 16 bytes. */ + if ((((ma_uintptr)dst & 15) != 0) || (((ma_uintptr)src & 15) != 0)) { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + return; + } + + dst_s16 = (ma_int16*)dst; + src_f32 = (const float*)src; + + ditherMin = 0; + ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } + + i = 0; + + /* SSE2. SSE allows us to output 8 s16's at a time which means our loop is unrolled 8 times. */ + count8 = count >> 3; + for (i8 = 0; i8 < count8; i8 += 1) { + __m128 d0; + __m128 d1; + __m128 x0; + __m128 x1; + + if (ditherMode == ma_dither_mode_none) { + d0 = _mm_set1_ps(0); + d1 = _mm_set1_ps(0); + } else if (ditherMode == ma_dither_mode_rectangle) { + d0 = _mm_set_ps( + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax) + ); + d1 = _mm_set_ps( + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax) + ); + } else { + d0 = _mm_set_ps( + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax) + ); + d1 = _mm_set_ps( + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax) + ); + } + + x0 = *((__m128*)(src_f32 + i) + 0); + x1 = *((__m128*)(src_f32 + i) + 1); + + x0 = _mm_add_ps(x0, d0); + x1 = _mm_add_ps(x1, d1); + + x0 = _mm_mul_ps(x0, _mm_set1_ps(32767.0f)); + x1 = _mm_mul_ps(x1, _mm_set1_ps(32767.0f)); + + _mm_stream_si128(((__m128i*)(dst_s16 + i)), _mm_packs_epi32(_mm_cvttps_epi32(x0), _mm_cvttps_epi32(x1))); + + i += 8; + } + + + /* Leftover. */ + for (; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ + + dst_s16[i] = (ma_int16)x; + } +} +#endif /* SSE2 */ + +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; + ma_uint64 i8; + ma_uint64 count8; + ma_int16* dst_s16; + const float* src_f32; + float ditherMin; + float ditherMax; + + if (!ma_has_neon()) { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + return; + } + + /* Both the input and output buffers need to be aligned to 16 bytes. */ + if ((((ma_uintptr)dst & 15) != 0) || (((ma_uintptr)src & 15) != 0)) { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + return; + } + + dst_s16 = (ma_int16*)dst; + src_f32 = (const float*)src; + + ditherMin = 0; + ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } + + i = 0; + + /* NEON. NEON allows us to output 8 s16's at a time which means our loop is unrolled 8 times. */ + count8 = count >> 3; + for (i8 = 0; i8 < count8; i8 += 1) { + float32x4_t d0; + float32x4_t d1; + float32x4_t x0; + float32x4_t x1; + int32x4_t i0; + int32x4_t i1; + + if (ditherMode == ma_dither_mode_none) { + d0 = vmovq_n_f32(0); + d1 = vmovq_n_f32(0); + } else if (ditherMode == ma_dither_mode_rectangle) { + float d0v[4]; + float d1v[4]; + + d0v[0] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0v[1] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0v[2] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0v[3] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0 = vld1q_f32(d0v); + + d1v[0] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1v[1] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1v[2] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1v[3] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1 = vld1q_f32(d1v); + } else { + float d0v[4]; + float d1v[4]; + + d0v[0] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0v[1] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0v[2] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0v[3] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0 = vld1q_f32(d0v); + + d1v[0] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1v[1] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1v[2] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1v[3] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1 = vld1q_f32(d1v); + } + + x0 = *((float32x4_t*)(src_f32 + i) + 0); + x1 = *((float32x4_t*)(src_f32 + i) + 1); + + x0 = vaddq_f32(x0, d0); + x1 = vaddq_f32(x1, d1); + + x0 = vmulq_n_f32(x0, 32767.0f); + x1 = vmulq_n_f32(x1, 32767.0f); + + i0 = vcvtq_s32_f32(x0); + i1 = vcvtq_s32_f32(x1); + *((int16x8_t*)(dst_s16 + i)) = vcombine_s16(vqmovn_s32(i0), vqmovn_s32(i1)); + + i += 8; + } + + + /* Leftover. */ + for (; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ + + dst_s16[i] = (ma_int16)x; + } +} +#endif /* Neon */ +#endif /* MA_USE_REFERENCE_CONVERSION_APIS */ + +MA_API void ma_pcm_f32_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_s16__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_f32_to_s16__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_f32_to_s16__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_f32_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_s24 = (ma_uint8*)dst; + const float* src_f32 = (const float*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 r; + float x = src_f32[i]; + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + +#if 0 + /* The accurate way. */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 8388607.5f; /* 0..2 to 0..16777215 */ + x = x - 8388608.0f; /* 0..16777215 to -8388608..8388607 */ +#else + /* The fast way. */ + x = x * 8388607.0f; /* -1..1 to -8388607..8388607 */ +#endif + + r = (ma_int32)x; + dst_s24[(i*3)+0] = (ma_uint8)((r & 0x0000FF) >> 0); + dst_s24[(i*3)+1] = (ma_uint8)((r & 0x00FF00) >> 8); + dst_s24[(i*3)+2] = (ma_uint8)((r & 0xFF0000) >> 16); + } + + (void)ditherMode; /* No dithering for f32 -> s24. */ +} + +static MA_INLINE void ma_pcm_f32_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s24__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_f32_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_s24__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_f32_to_s24__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_f32_to_s24__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_f32_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const float* src_f32 = (const float*)src; + + ma_uint32 i; + for (i = 0; i < count; i += 1) { + double x = src_f32[i]; + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + +#if 0 + /* The accurate way. */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 2147483647.5; /* 0..2 to 0..4294967295 */ + x = x - 2147483648.0; /* 0...4294967295 to -2147483648..2147483647 */ +#else + /* The fast way. */ + x = x * 2147483647.0; /* -1..1 to -2147483647..2147483647 */ +#endif + + dst_s32[i] = (ma_int32)x; + } + + (void)ditherMode; /* No dithering for f32 -> s32. */ +} + +static MA_INLINE void ma_pcm_f32_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); +} +#endif + +MA_API void ma_pcm_f32_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_s32__reference(dst, src, count, ditherMode); +#else + # if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_pcm_f32_to_s32__sse2(dst, src, count, ditherMode); + } else + #elif defined(MA_SUPPORT_NEON) + if (ma_has_neon()) { + ma_pcm_f32_to_s32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +MA_API void ma_pcm_f32_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + + ma_copy_memory_64(dst, src, count * sizeof(float)); +} + + +static void ma_pcm_interleave_f32__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + float* dst_f32 = (float*)dst; + const float** src_f32 = (const float**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_f32[iFrame*channels + iChannel] = src_f32[iChannel][iFrame]; + } + } +} + +static void ma_pcm_interleave_f32__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_f32__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_interleave_f32(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_f32__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_f32__optimized(dst, src, frameCount, channels); +#endif +} + + +static void ma_pcm_deinterleave_f32__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + float** dst_f32 = (float**)dst; + const float* src_f32 = (const float*)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_f32[iChannel][iFrame] = src_f32[iFrame*channels + iChannel]; + } + } +} + +static void ma_pcm_deinterleave_f32__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_f32__reference(dst, src, frameCount, channels); +} + +MA_API void ma_pcm_deinterleave_f32(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_f32__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_f32__optimized(dst, src, frameCount, channels); +#endif +} + + +MA_API void ma_pcm_convert(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 sampleCount, ma_dither_mode ditherMode) +{ + if (formatOut == formatIn) { + ma_copy_memory_64(pOut, pIn, sampleCount * ma_get_bytes_per_sample(formatOut)); + return; + } + + switch (formatIn) + { + case ma_format_u8: + { + switch (formatOut) + { + case ma_format_s16: ma_pcm_u8_to_s16(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s24: ma_pcm_u8_to_s24(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s32: ma_pcm_u8_to_s32(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_f32: ma_pcm_u8_to_f32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + case ma_format_s16: + { + switch (formatOut) + { + case ma_format_u8: ma_pcm_s16_to_u8( pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s24: ma_pcm_s16_to_s24(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s32: ma_pcm_s16_to_s32(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_f32: ma_pcm_s16_to_f32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + case ma_format_s24: + { + switch (formatOut) + { + case ma_format_u8: ma_pcm_s24_to_u8( pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s16: ma_pcm_s24_to_s16(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s32: ma_pcm_s24_to_s32(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_f32: ma_pcm_s24_to_f32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + case ma_format_s32: + { + switch (formatOut) + { + case ma_format_u8: ma_pcm_s32_to_u8( pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s16: ma_pcm_s32_to_s16(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s24: ma_pcm_s32_to_s24(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_f32: ma_pcm_s32_to_f32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + case ma_format_f32: + { + switch (formatOut) + { + case ma_format_u8: ma_pcm_f32_to_u8( pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s16: ma_pcm_f32_to_s16(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s24: ma_pcm_f32_to_s24(pOut, pIn, sampleCount, ditherMode); return; + case ma_format_s32: ma_pcm_f32_to_s32(pOut, pIn, sampleCount, ditherMode); return; + default: break; + } + } break; + + default: break; + } +} + +MA_API void ma_convert_pcm_frames_format(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 frameCount, ma_uint32 channels, ma_dither_mode ditherMode) +{ + ma_pcm_convert(pOut, formatOut, pIn, formatIn, frameCount * channels, ditherMode); +} + +MA_API void ma_deinterleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void* pInterleavedPCMFrames, void** ppDeinterleavedPCMFrames) +{ + if (pInterleavedPCMFrames == NULL || ppDeinterleavedPCMFrames == NULL) { + return; /* Invalid args. */ + } + + /* For efficiency we do this per format. */ + switch (format) { + case ma_format_s16: + { + const ma_int16* pSrcS16 = (const ma_int16*)pInterleavedPCMFrames; + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + ma_int16* pDstS16 = (ma_int16*)ppDeinterleavedPCMFrames[iChannel]; + pDstS16[iPCMFrame] = pSrcS16[iPCMFrame*channels+iChannel]; + } + } + } break; + + case ma_format_f32: + { + const float* pSrcF32 = (const float*)pInterleavedPCMFrames; + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + float* pDstF32 = (float*)ppDeinterleavedPCMFrames[iChannel]; + pDstF32[iPCMFrame] = pSrcF32[iPCMFrame*channels+iChannel]; + } + } + } break; + + default: + { + ma_uint32 sampleSizeInBytes = ma_get_bytes_per_sample(format); + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + void* pDst = ma_offset_ptr(ppDeinterleavedPCMFrames[iChannel], iPCMFrame*sampleSizeInBytes); + const void* pSrc = ma_offset_ptr(pInterleavedPCMFrames, (iPCMFrame*channels+iChannel)*sampleSizeInBytes); + memcpy(pDst, pSrc, sampleSizeInBytes); + } + } + } break; + } +} + +MA_API void ma_interleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void** ppDeinterleavedPCMFrames, void* pInterleavedPCMFrames) +{ + switch (format) + { + case ma_format_s16: + { + ma_int16* pDstS16 = (ma_int16*)pInterleavedPCMFrames; + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + const ma_int16* pSrcS16 = (const ma_int16*)ppDeinterleavedPCMFrames[iChannel]; + pDstS16[iPCMFrame*channels+iChannel] = pSrcS16[iPCMFrame]; + } + } + } break; + + case ma_format_f32: + { + float* pDstF32 = (float*)pInterleavedPCMFrames; + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + const float* pSrcF32 = (const float*)ppDeinterleavedPCMFrames[iChannel]; + pDstF32[iPCMFrame*channels+iChannel] = pSrcF32[iPCMFrame]; + } + } + } break; + + default: + { + ma_uint32 sampleSizeInBytes = ma_get_bytes_per_sample(format); + ma_uint64 iPCMFrame; + for (iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + void* pDst = ma_offset_ptr(pInterleavedPCMFrames, (iPCMFrame*channels+iChannel)*sampleSizeInBytes); + const void* pSrc = ma_offset_ptr(ppDeinterleavedPCMFrames[iChannel], iPCMFrame*sampleSizeInBytes); + memcpy(pDst, pSrc, sampleSizeInBytes); + } + } + } break; + } +} + + +/************************************************************************************************************************************************************** + +Biquad Filter + +**************************************************************************************************************************************************************/ +#ifndef MA_BIQUAD_FIXED_POINT_SHIFT +#define MA_BIQUAD_FIXED_POINT_SHIFT 14 +#endif + +static ma_int32 ma_biquad_float_to_fp(double x) +{ + return (ma_int32)(x * (1 << MA_BIQUAD_FIXED_POINT_SHIFT)); +} + +MA_API ma_biquad_config ma_biquad_config_init(ma_format format, ma_uint32 channels, double b0, double b1, double b2, double a0, double a1, double a2) +{ + ma_biquad_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.b0 = b0; + config.b1 = b1; + config.b2 = b2; + config.a0 = a0; + config.a1 = a1; + config.a2 = a2; + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t r1Offset; + size_t r2Offset; +} ma_biquad_heap_layout; + +static ma_result ma_biquad_get_heap_layout(const ma_biquad_config* pConfig, ma_biquad_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* R0 */ + pHeapLayout->r1Offset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(ma_biquad_coefficient) * pConfig->channels; + + /* R1 */ + pHeapLayout->r2Offset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(ma_biquad_coefficient) * pConfig->channels; + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_biquad_get_heap_size(const ma_biquad_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_biquad_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_biquad_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_biquad_init_preallocated(const ma_biquad_config* pConfig, void* pHeap, ma_biquad* pBQ) +{ + ma_result result; + ma_biquad_heap_layout heapLayout; + + if (pBQ == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pBQ); + + result = ma_biquad_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pBQ->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pBQ->pR1 = (ma_biquad_coefficient*)ma_offset_ptr(pHeap, heapLayout.r1Offset); + pBQ->pR2 = (ma_biquad_coefficient*)ma_offset_ptr(pHeap, heapLayout.r2Offset); + + return ma_biquad_reinit(pConfig, pBQ); +} + +MA_API ma_result ma_biquad_init(const ma_biquad_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_biquad* pBQ) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_biquad_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_biquad_init_preallocated(pConfig, pHeap, pBQ); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pBQ->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_biquad_uninit(ma_biquad* pBQ, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pBQ == NULL) { + return; + } + + if (pBQ->_ownsHeap) { + ma_free(pBQ->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_biquad_reinit(const ma_biquad_config* pConfig, ma_biquad* pBQ) +{ + if (pBQ == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->a0 == 0) { + return MA_INVALID_ARGS; /* Division by zero. */ + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pBQ->format != ma_format_unknown && pBQ->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pBQ->channels != 0 && pBQ->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + + pBQ->format = pConfig->format; + pBQ->channels = pConfig->channels; + + /* Normalize. */ + if (pConfig->format == ma_format_f32) { + pBQ->b0.f32 = (float)(pConfig->b0 / pConfig->a0); + pBQ->b1.f32 = (float)(pConfig->b1 / pConfig->a0); + pBQ->b2.f32 = (float)(pConfig->b2 / pConfig->a0); + pBQ->a1.f32 = (float)(pConfig->a1 / pConfig->a0); + pBQ->a2.f32 = (float)(pConfig->a2 / pConfig->a0); + } else { + pBQ->b0.s32 = ma_biquad_float_to_fp(pConfig->b0 / pConfig->a0); + pBQ->b1.s32 = ma_biquad_float_to_fp(pConfig->b1 / pConfig->a0); + pBQ->b2.s32 = ma_biquad_float_to_fp(pConfig->b2 / pConfig->a0); + pBQ->a1.s32 = ma_biquad_float_to_fp(pConfig->a1 / pConfig->a0); + pBQ->a2.s32 = ma_biquad_float_to_fp(pConfig->a2 / pConfig->a0); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_biquad_clear_cache(ma_biquad* pBQ) +{ + if (pBQ == NULL) { + return MA_INVALID_ARGS; + } + + if (pBQ->format == ma_format_f32) { + pBQ->pR1->f32 = 0; + pBQ->pR2->f32 = 0; + } else { + pBQ->pR1->s32 = 0; + pBQ->pR2->s32 = 0; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(ma_biquad* pBQ, float* pY, const float* pX) +{ + ma_uint32 c; + const ma_uint32 channels = pBQ->channels; + const float b0 = pBQ->b0.f32; + const float b1 = pBQ->b1.f32; + const float b2 = pBQ->b2.f32; + const float a1 = pBQ->a1.f32; + const float a2 = pBQ->a2.f32; + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + float r1 = pBQ->pR1[c].f32; + float r2 = pBQ->pR2[c].f32; + float x = pX[c]; + float y; + + y = b0*x + r1; + r1 = b1*x - a1*y + r2; + r2 = b2*x - a2*y; + + pY[c] = y; + pBQ->pR1[c].f32 = r1; + pBQ->pR2[c].f32 = r2; + } +} + +static MA_INLINE void ma_biquad_process_pcm_frame_f32(ma_biquad* pBQ, float* pY, const float* pX) +{ + ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(pBQ, pY, pX); +} + +static MA_INLINE void ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(ma_biquad* pBQ, ma_int16* pY, const ma_int16* pX) +{ + ma_uint32 c; + const ma_uint32 channels = pBQ->channels; + const ma_int32 b0 = pBQ->b0.s32; + const ma_int32 b1 = pBQ->b1.s32; + const ma_int32 b2 = pBQ->b2.s32; + const ma_int32 a1 = pBQ->a1.s32; + const ma_int32 a2 = pBQ->a2.s32; + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + ma_int32 r1 = pBQ->pR1[c].s32; + ma_int32 r2 = pBQ->pR2[c].s32; + ma_int32 x = pX[c]; + ma_int32 y; + + y = (b0*x + r1) >> MA_BIQUAD_FIXED_POINT_SHIFT; + r1 = (b1*x - a1*y + r2); + r2 = (b2*x - a2*y); + + pY[c] = (ma_int16)ma_clamp(y, -32768, 32767); + pBQ->pR1[c].s32 = r1; + pBQ->pR2[c].s32 = r2; + } +} + +static MA_INLINE void ma_biquad_process_pcm_frame_s16(ma_biquad* pBQ, ma_int16* pY, const ma_int16* pX) +{ + ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(pBQ, pY, pX); +} + +MA_API ma_result ma_biquad_process_pcm_frames(ma_biquad* pBQ, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint32 n; + + if (pBQ == NULL || pFramesOut == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + /* Note that the logic below needs to support in-place filtering. That is, it must support the case where pFramesOut and pFramesIn are the same. */ + + if (pBQ->format == ma_format_f32) { + /* */ float* pY = ( float*)pFramesOut; + const float* pX = (const float*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(pBQ, pY, pX); + pY += pBQ->channels; + pX += pBQ->channels; + } + } else if (pBQ->format == ma_format_s16) { + /* */ ma_int16* pY = ( ma_int16*)pFramesOut; + const ma_int16* pX = (const ma_int16*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(pBQ, pY, pX); + pY += pBQ->channels; + pX += pBQ->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; /* Format not supported. Should never hit this because it's checked in ma_biquad_init() and ma_biquad_reinit(). */ + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_biquad_get_latency(const ma_biquad* pBQ) +{ + if (pBQ == NULL) { + return 0; + } + + return 2; +} + + +/************************************************************************************************************************************************************** + +Low-Pass Filter + +**************************************************************************************************************************************************************/ +MA_API ma_lpf1_config ma_lpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency) +{ + ma_lpf1_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.q = 0.5; + + return config; +} + +MA_API ma_lpf2_config ma_lpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q) +{ + ma_lpf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.q = q; + + /* Q cannot be 0 or else it'll result in a division by 0. In this case just default to 0.707107. */ + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t r1Offset; +} ma_lpf1_heap_layout; + +static ma_result ma_lpf1_get_heap_layout(const ma_lpf1_config* pConfig, ma_lpf1_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* R1 */ + pHeapLayout->r1Offset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(ma_biquad_coefficient) * pConfig->channels; + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_lpf1_get_heap_size(const ma_lpf1_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_lpf1_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_lpf1_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_lpf1_init_preallocated(const ma_lpf1_config* pConfig, void* pHeap, ma_lpf1* pLPF) +{ + ma_result result; + ma_lpf1_heap_layout heapLayout; + + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLPF); + + result = ma_lpf1_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pLPF->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pLPF->pR1 = (ma_biquad_coefficient*)ma_offset_ptr(pHeap, heapLayout.r1Offset); + + return ma_lpf1_reinit(pConfig, pLPF); +} + +MA_API ma_result ma_lpf1_init(const ma_lpf1_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf1* pLPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_lpf1_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_lpf1_init_preallocated(pConfig, pHeap, pLPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pLPF->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_lpf1_uninit(ma_lpf1* pLPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pLPF == NULL) { + return; + } + + if (pLPF->_ownsHeap) { + ma_free(pLPF->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_lpf1_reinit(const ma_lpf1_config* pConfig, ma_lpf1* pLPF) +{ + double a; + + if (pLPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pLPF->format != ma_format_unknown && pLPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pLPF->channels != 0 && pLPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + pLPF->format = pConfig->format; + pLPF->channels = pConfig->channels; + + a = ma_expd(-2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate); + if (pConfig->format == ma_format_f32) { + pLPF->a.f32 = (float)a; + } else { + pLPF->a.s32 = ma_biquad_float_to_fp(a); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_lpf1_clear_cache(ma_lpf1* pLPF) +{ + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + if (pLPF->format == ma_format_f32) { + pLPF->a.f32 = 0; + } else { + pLPF->a.s32 = 0; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_lpf1_process_pcm_frame_f32(ma_lpf1* pLPF, float* pY, const float* pX) +{ + ma_uint32 c; + const ma_uint32 channels = pLPF->channels; + const float a = pLPF->a.f32; + const float b = 1 - a; + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + float r1 = pLPF->pR1[c].f32; + float x = pX[c]; + float y; + + y = b*x + a*r1; + + pY[c] = y; + pLPF->pR1[c].f32 = y; + } +} + +static MA_INLINE void ma_lpf1_process_pcm_frame_s16(ma_lpf1* pLPF, ma_int16* pY, const ma_int16* pX) +{ + ma_uint32 c; + const ma_uint32 channels = pLPF->channels; + const ma_int32 a = pLPF->a.s32; + const ma_int32 b = ((1 << MA_BIQUAD_FIXED_POINT_SHIFT) - a); + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + ma_int32 r1 = pLPF->pR1[c].s32; + ma_int32 x = pX[c]; + ma_int32 y; + + y = (b*x + a*r1) >> MA_BIQUAD_FIXED_POINT_SHIFT; + + pY[c] = (ma_int16)y; + pLPF->pR1[c].s32 = (ma_int32)y; + } +} + +MA_API ma_result ma_lpf1_process_pcm_frames(ma_lpf1* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint32 n; + + if (pLPF == NULL || pFramesOut == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + /* Note that the logic below needs to support in-place filtering. That is, it must support the case where pFramesOut and pFramesIn are the same. */ + + if (pLPF->format == ma_format_f32) { + /* */ float* pY = ( float*)pFramesOut; + const float* pX = (const float*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_lpf1_process_pcm_frame_f32(pLPF, pY, pX); + pY += pLPF->channels; + pX += pLPF->channels; + } + } else if (pLPF->format == ma_format_s16) { + /* */ ma_int16* pY = ( ma_int16*)pFramesOut; + const ma_int16* pX = (const ma_int16*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_lpf1_process_pcm_frame_s16(pLPF, pY, pX); + pY += pLPF->channels; + pX += pLPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; /* Format not supported. Should never hit this because it's checked in ma_biquad_init() and ma_biquad_reinit(). */ + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_lpf1_get_latency(const ma_lpf1* pLPF) +{ + if (pLPF == NULL) { + return 0; + } + + return 1; +} + + +static MA_INLINE ma_biquad_config ma_lpf2__get_biquad_config(const ma_lpf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate; + s = ma_sind(w); + c = ma_cosd(w); + a = s / (2*q); + + bqConfig.b0 = (1 - c) / 2; + bqConfig.b1 = 1 - c; + bqConfig.b2 = (1 - c) / 2; + bqConfig.a0 = 1 + a; + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - a; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_lpf2_get_heap_size(const ma_lpf2_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_biquad_config bqConfig; + bqConfig = ma_lpf2__get_biquad_config(pConfig); + + return ma_biquad_get_heap_size(&bqConfig, pHeapSizeInBytes); +} + +MA_API ma_result ma_lpf2_init_preallocated(const ma_lpf2_config* pConfig, void* pHeap, ma_lpf2* pLPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLPF); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_lpf2__get_biquad_config(pConfig); + result = ma_biquad_init_preallocated(&bqConfig, pHeap, &pLPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_lpf2_init(const ma_lpf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf2* pLPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_lpf2_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_lpf2_init_preallocated(pConfig, pHeap, pLPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pLPF->bq._ownsHeap = MA_TRUE; /* <-- This will cause the biquad to take ownership of the heap and free it when it's uninitialized. */ + return MA_SUCCESS; +} + +MA_API void ma_lpf2_uninit(ma_lpf2* pLPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pLPF == NULL) { + return; + } + + ma_biquad_uninit(&pLPF->bq, pAllocationCallbacks); /* <-- This will free the heap allocation. */ +} + +MA_API ma_result ma_lpf2_reinit(const ma_lpf2_config* pConfig, ma_lpf2* pLPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pLPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_lpf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pLPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_lpf2_clear_cache(ma_lpf2* pLPF) +{ + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + ma_biquad_clear_cache(&pLPF->bq); + + return MA_SUCCESS; +} + +static MA_INLINE void ma_lpf2_process_pcm_frame_s16(ma_lpf2* pLPF, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pLPF->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_lpf2_process_pcm_frame_f32(ma_lpf2* pLPF, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pLPF->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_lpf2_process_pcm_frames(ma_lpf2* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pLPF->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_lpf2_get_latency(const ma_lpf2* pLPF) +{ + if (pLPF == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pLPF->bq); +} + + +MA_API ma_lpf_config ma_lpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_lpf_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.order = ma_min(order, MA_MAX_FILTER_ORDER); + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t lpf1Offset; + size_t lpf2Offset; /* Offset of the first second order filter. Subsequent filters will come straight after, and will each have the same heap size. */ +} ma_lpf_heap_layout; + +static void ma_lpf_calculate_sub_lpf_counts(ma_uint32 order, ma_uint32* pLPF1Count, ma_uint32* pLPF2Count) +{ + MA_ASSERT(pLPF1Count != NULL); + MA_ASSERT(pLPF2Count != NULL); + + *pLPF1Count = order % 2; + *pLPF2Count = order / 2; +} + +static ma_result ma_lpf_get_heap_layout(const ma_lpf_config* pConfig, ma_lpf_heap_layout* pHeapLayout) +{ + ma_result result; + ma_uint32 lpf1Count; + ma_uint32 lpf2Count; + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + ma_lpf_calculate_sub_lpf_counts(pConfig->order, &lpf1Count, &lpf2Count); + + pHeapLayout->sizeInBytes = 0; + + /* LPF 1 */ + pHeapLayout->lpf1Offset = pHeapLayout->sizeInBytes; + for (ilpf1 = 0; ilpf1 < lpf1Count; ilpf1 += 1) { + size_t lpf1HeapSizeInBytes; + ma_lpf1_config lpf1Config = ma_lpf1_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency); + + result = ma_lpf1_get_heap_size(&lpf1Config, &lpf1HeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += sizeof(ma_lpf1) + lpf1HeapSizeInBytes; + } + + /* LPF 2*/ + pHeapLayout->lpf2Offset = pHeapLayout->sizeInBytes; + for (ilpf2 = 0; ilpf2 < lpf2Count; ilpf2 += 1) { + size_t lpf2HeapSizeInBytes; + ma_lpf2_config lpf2Config = ma_lpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, 0.707107); /* <-- The "q" parameter does not matter for the purpose of calculating the heap size. */ + + result = ma_lpf2_get_heap_size(&lpf2Config, &lpf2HeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += sizeof(ma_lpf2) + lpf2HeapSizeInBytes; + } + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +static ma_result ma_lpf_reinit__internal(const ma_lpf_config* pConfig, void* pHeap, ma_lpf* pLPF, ma_bool32 isNew) +{ + ma_result result; + ma_uint32 lpf1Count; + ma_uint32 lpf2Count; + ma_uint32 ilpf1; + ma_uint32 ilpf2; + ma_lpf_heap_layout heapLayout; /* Only used if isNew is true. */ + + if (pLPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pLPF->format != ma_format_unknown && pLPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pLPF->channels != 0 && pLPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + ma_lpf_calculate_sub_lpf_counts(pConfig->order, &lpf1Count, &lpf2Count); + + /* The filter order can't change between reinits. */ + if (!isNew) { + if (pLPF->lpf1Count != lpf1Count || pLPF->lpf2Count != lpf2Count) { + return MA_INVALID_OPERATION; + } + } + + if (isNew) { + result = ma_lpf_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pLPF->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pLPF->pLPF1 = (ma_lpf1*)ma_offset_ptr(pHeap, heapLayout.lpf1Offset); + pLPF->pLPF2 = (ma_lpf2*)ma_offset_ptr(pHeap, heapLayout.lpf2Offset); + } else { + MA_ZERO_OBJECT(&heapLayout); /* To silence a compiler warning. */ + } + + for (ilpf1 = 0; ilpf1 < lpf1Count; ilpf1 += 1) { + ma_lpf1_config lpf1Config = ma_lpf1_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency); + + if (isNew) { + size_t lpf1HeapSizeInBytes; + + result = ma_lpf1_get_heap_size(&lpf1Config, &lpf1HeapSizeInBytes); + if (result == MA_SUCCESS) { + result = ma_lpf1_init_preallocated(&lpf1Config, ma_offset_ptr(pHeap, heapLayout.lpf1Offset + (sizeof(ma_lpf1) * lpf1Count) + (ilpf1 * lpf1HeapSizeInBytes)), &pLPF->pLPF1[ilpf1]); + } + } else { + result = ma_lpf1_reinit(&lpf1Config, &pLPF->pLPF1[ilpf1]); + } + + if (result != MA_SUCCESS) { + ma_uint32 jlpf1; + + for (jlpf1 = 0; jlpf1 < ilpf1; jlpf1 += 1) { + ma_lpf1_uninit(&pLPF->pLPF1[jlpf1], NULL); /* No need for allocation callbacks here since we used a preallocated heap allocation. */ + } + + return result; + } + } + + for (ilpf2 = 0; ilpf2 < lpf2Count; ilpf2 += 1) { + ma_lpf2_config lpf2Config; + double q; + double a; + + /* Tempting to use 0.707107, but won't result in a Butterworth filter if the order is > 2. */ + if (lpf1Count == 1) { + a = (1 + ilpf2*1) * (MA_PI_D/(pConfig->order*1)); /* Odd order. */ + } else { + a = (1 + ilpf2*2) * (MA_PI_D/(pConfig->order*2)); /* Even order. */ + } + q = 1 / (2*ma_cosd(a)); + + lpf2Config = ma_lpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, q); + + if (isNew) { + size_t lpf2HeapSizeInBytes; + + result = ma_lpf2_get_heap_size(&lpf2Config, &lpf2HeapSizeInBytes); + if (result == MA_SUCCESS) { + result = ma_lpf2_init_preallocated(&lpf2Config, ma_offset_ptr(pHeap, heapLayout.lpf2Offset + (sizeof(ma_lpf2) * lpf2Count) + (ilpf2 * lpf2HeapSizeInBytes)), &pLPF->pLPF2[ilpf2]); + } + } else { + result = ma_lpf2_reinit(&lpf2Config, &pLPF->pLPF2[ilpf2]); + } + + if (result != MA_SUCCESS) { + ma_uint32 jlpf1; + ma_uint32 jlpf2; + + for (jlpf1 = 0; jlpf1 < lpf1Count; jlpf1 += 1) { + ma_lpf1_uninit(&pLPF->pLPF1[jlpf1], NULL); /* No need for allocation callbacks here since we used a preallocated heap allocation. */ + } + + for (jlpf2 = 0; jlpf2 < ilpf2; jlpf2 += 1) { + ma_lpf2_uninit(&pLPF->pLPF2[jlpf2], NULL); /* No need for allocation callbacks here since we used a preallocated heap allocation. */ + } + + return result; + } + } + + pLPF->lpf1Count = lpf1Count; + pLPF->lpf2Count = lpf2Count; + pLPF->format = pConfig->format; + pLPF->channels = pConfig->channels; + pLPF->sampleRate = pConfig->sampleRate; + + return MA_SUCCESS; +} + +MA_API ma_result ma_lpf_get_heap_size(const ma_lpf_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_lpf_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_lpf_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return result; +} + +MA_API ma_result ma_lpf_init_preallocated(const ma_lpf_config* pConfig, void* pHeap, ma_lpf* pLPF) +{ + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLPF); + + return ma_lpf_reinit__internal(pConfig, pHeap, pLPF, /*isNew*/MA_TRUE); +} + +MA_API ma_result ma_lpf_init(const ma_lpf_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf* pLPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_lpf_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_lpf_init_preallocated(pConfig, pHeap, pLPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pLPF->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_lpf_uninit(ma_lpf* pLPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + if (pLPF == NULL) { + return; + } + + for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) { + ma_lpf1_uninit(&pLPF->pLPF1[ilpf1], pAllocationCallbacks); + } + + for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) { + ma_lpf2_uninit(&pLPF->pLPF2[ilpf2], pAllocationCallbacks); + } + + if (pLPF->_ownsHeap) { + ma_free(pLPF->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_lpf_reinit(const ma_lpf_config* pConfig, ma_lpf* pLPF) +{ + return ma_lpf_reinit__internal(pConfig, NULL, pLPF, /*isNew*/MA_FALSE); +} + +MA_API ma_result ma_lpf_clear_cache(ma_lpf* pLPF) +{ + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) { + ma_lpf1_clear_cache(&pLPF->pLPF1[ilpf1]); + } + + for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) { + ma_lpf2_clear_cache(&pLPF->pLPF2[ilpf2]); + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_lpf_process_pcm_frame_f32(ma_lpf* pLPF, float* pY, const void* pX) +{ + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + MA_ASSERT(pLPF->format == ma_format_f32); + + MA_MOVE_MEMORY(pY, pX, ma_get_bytes_per_frame(pLPF->format, pLPF->channels)); + + for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) { + ma_lpf1_process_pcm_frame_f32(&pLPF->pLPF1[ilpf1], pY, pY); + } + + for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) { + ma_lpf2_process_pcm_frame_f32(&pLPF->pLPF2[ilpf2], pY, pY); + } +} + +static MA_INLINE void ma_lpf_process_pcm_frame_s16(ma_lpf* pLPF, ma_int16* pY, const ma_int16* pX) +{ + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + MA_ASSERT(pLPF->format == ma_format_s16); + + MA_MOVE_MEMORY(pY, pX, ma_get_bytes_per_frame(pLPF->format, pLPF->channels)); + + for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) { + ma_lpf1_process_pcm_frame_s16(&pLPF->pLPF1[ilpf1], pY, pY); + } + + for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) { + ma_lpf2_process_pcm_frame_s16(&pLPF->pLPF2[ilpf2], pY, pY); + } +} + +MA_API ma_result ma_lpf_process_pcm_frames(ma_lpf* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_result result; + ma_uint32 ilpf1; + ma_uint32 ilpf2; + + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + /* Faster path for in-place. */ + if (pFramesOut == pFramesIn) { + for (ilpf1 = 0; ilpf1 < pLPF->lpf1Count; ilpf1 += 1) { + result = ma_lpf1_process_pcm_frames(&pLPF->pLPF1[ilpf1], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + + for (ilpf2 = 0; ilpf2 < pLPF->lpf2Count; ilpf2 += 1) { + result = ma_lpf2_process_pcm_frames(&pLPF->pLPF2[ilpf2], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + } + + /* Slightly slower path for copying. */ + if (pFramesOut != pFramesIn) { + ma_uint32 iFrame; + + /* */ if (pLPF->format == ma_format_f32) { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_lpf_process_pcm_frame_f32(pLPF, pFramesOutF32, pFramesInF32); + pFramesOutF32 += pLPF->channels; + pFramesInF32 += pLPF->channels; + } + } else if (pLPF->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_lpf_process_pcm_frame_s16(pLPF, pFramesOutS16, pFramesInS16); + pFramesOutS16 += pLPF->channels; + pFramesInS16 += pLPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; /* Should never hit this. */ + } + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_lpf_get_latency(const ma_lpf* pLPF) +{ + if (pLPF == NULL) { + return 0; + } + + return pLPF->lpf2Count*2 + pLPF->lpf1Count; +} + + +/************************************************************************************************************************************************************** + +High-Pass Filtering + +**************************************************************************************************************************************************************/ +MA_API ma_hpf1_config ma_hpf1_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency) +{ + ma_hpf1_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + + return config; +} + +MA_API ma_hpf2_config ma_hpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q) +{ + ma_hpf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.q = q; + + /* Q cannot be 0 or else it'll result in a division by 0. In this case just default to 0.707107. */ + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t r1Offset; +} ma_hpf1_heap_layout; + +static ma_result ma_hpf1_get_heap_layout(const ma_hpf1_config* pConfig, ma_hpf1_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* R1 */ + pHeapLayout->r1Offset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(ma_biquad_coefficient) * pConfig->channels; + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_hpf1_get_heap_size(const ma_hpf1_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_hpf1_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_hpf1_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_hpf1_init_preallocated(const ma_hpf1_config* pConfig, void* pHeap, ma_hpf1* pLPF) +{ + ma_result result; + ma_hpf1_heap_layout heapLayout; + + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLPF); + + result = ma_hpf1_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pLPF->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pLPF->pR1 = (ma_biquad_coefficient*)ma_offset_ptr(pHeap, heapLayout.r1Offset); + + return ma_hpf1_reinit(pConfig, pLPF); +} + +MA_API ma_result ma_hpf1_init(const ma_hpf1_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf1* pLPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_hpf1_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_hpf1_init_preallocated(pConfig, pHeap, pLPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pLPF->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_hpf1_uninit(ma_hpf1* pHPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pHPF == NULL) { + return; + } + + if (pHPF->_ownsHeap) { + ma_free(pHPF->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_hpf1_reinit(const ma_hpf1_config* pConfig, ma_hpf1* pHPF) +{ + double a; + + if (pHPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pHPF->format != ma_format_unknown && pHPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pHPF->channels != 0 && pHPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + pHPF->format = pConfig->format; + pHPF->channels = pConfig->channels; + + a = ma_expd(-2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate); + if (pConfig->format == ma_format_f32) { + pHPF->a.f32 = (float)a; + } else { + pHPF->a.s32 = ma_biquad_float_to_fp(a); + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_hpf1_process_pcm_frame_f32(ma_hpf1* pHPF, float* pY, const float* pX) +{ + ma_uint32 c; + const ma_uint32 channels = pHPF->channels; + const float a = 1 - pHPF->a.f32; + const float b = 1 - a; + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + float r1 = pHPF->pR1[c].f32; + float x = pX[c]; + float y; + + y = b*x - a*r1; + + pY[c] = y; + pHPF->pR1[c].f32 = y; + } +} + +static MA_INLINE void ma_hpf1_process_pcm_frame_s16(ma_hpf1* pHPF, ma_int16* pY, const ma_int16* pX) +{ + ma_uint32 c; + const ma_uint32 channels = pHPF->channels; + const ma_int32 a = ((1 << MA_BIQUAD_FIXED_POINT_SHIFT) - pHPF->a.s32); + const ma_int32 b = ((1 << MA_BIQUAD_FIXED_POINT_SHIFT) - a); + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + ma_int32 r1 = pHPF->pR1[c].s32; + ma_int32 x = pX[c]; + ma_int32 y; + + y = (b*x - a*r1) >> MA_BIQUAD_FIXED_POINT_SHIFT; + + pY[c] = (ma_int16)y; + pHPF->pR1[c].s32 = (ma_int32)y; + } +} + +MA_API ma_result ma_hpf1_process_pcm_frames(ma_hpf1* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint32 n; + + if (pHPF == NULL || pFramesOut == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + /* Note that the logic below needs to support in-place filtering. That is, it must support the case where pFramesOut and pFramesIn are the same. */ + + if (pHPF->format == ma_format_f32) { + /* */ float* pY = ( float*)pFramesOut; + const float* pX = (const float*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_hpf1_process_pcm_frame_f32(pHPF, pY, pX); + pY += pHPF->channels; + pX += pHPF->channels; + } + } else if (pHPF->format == ma_format_s16) { + /* */ ma_int16* pY = ( ma_int16*)pFramesOut; + const ma_int16* pX = (const ma_int16*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_hpf1_process_pcm_frame_s16(pHPF, pY, pX); + pY += pHPF->channels; + pX += pHPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; /* Format not supported. Should never hit this because it's checked in ma_biquad_init() and ma_biquad_reinit(). */ + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_hpf1_get_latency(const ma_hpf1* pHPF) +{ + if (pHPF == NULL) { + return 0; + } + + return 1; +} + + +static MA_INLINE ma_biquad_config ma_hpf2__get_biquad_config(const ma_hpf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate; + s = ma_sind(w); + c = ma_cosd(w); + a = s / (2*q); + + bqConfig.b0 = (1 + c) / 2; + bqConfig.b1 = -(1 + c); + bqConfig.b2 = (1 + c) / 2; + bqConfig.a0 = 1 + a; + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - a; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_hpf2_get_heap_size(const ma_hpf2_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_biquad_config bqConfig; + bqConfig = ma_hpf2__get_biquad_config(pConfig); + + return ma_biquad_get_heap_size(&bqConfig, pHeapSizeInBytes); +} + +MA_API ma_result ma_hpf2_init_preallocated(const ma_hpf2_config* pConfig, void* pHeap, ma_hpf2* pHPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pHPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pHPF); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_hpf2__get_biquad_config(pConfig); + result = ma_biquad_init_preallocated(&bqConfig, pHeap, &pHPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_hpf2_init(const ma_hpf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf2* pHPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_hpf2_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_hpf2_init_preallocated(pConfig, pHeap, pHPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pHPF->bq._ownsHeap = MA_TRUE; /* <-- This will cause the biquad to take ownership of the heap and free it when it's uninitialized. */ + return MA_SUCCESS; +} + +MA_API void ma_hpf2_uninit(ma_hpf2* pHPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pHPF == NULL) { + return; + } + + ma_biquad_uninit(&pHPF->bq, pAllocationCallbacks); /* <-- This will free the heap allocation. */ +} + +MA_API ma_result ma_hpf2_reinit(const ma_hpf2_config* pConfig, ma_hpf2* pHPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pHPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_hpf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pHPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_hpf2_process_pcm_frame_s16(ma_hpf2* pHPF, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pHPF->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_hpf2_process_pcm_frame_f32(ma_hpf2* pHPF, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pHPF->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_hpf2_process_pcm_frames(ma_hpf2* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pHPF == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pHPF->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_hpf2_get_latency(const ma_hpf2* pHPF) +{ + if (pHPF == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pHPF->bq); +} + + +MA_API ma_hpf_config ma_hpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_hpf_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.order = ma_min(order, MA_MAX_FILTER_ORDER); + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t hpf1Offset; + size_t hpf2Offset; /* Offset of the first second order filter. Subsequent filters will come straight after, and will each have the same heap size. */ +} ma_hpf_heap_layout; + +static void ma_hpf_calculate_sub_hpf_counts(ma_uint32 order, ma_uint32* pHPF1Count, ma_uint32* pHPF2Count) +{ + MA_ASSERT(pHPF1Count != NULL); + MA_ASSERT(pHPF2Count != NULL); + + *pHPF1Count = order % 2; + *pHPF2Count = order / 2; +} + +static ma_result ma_hpf_get_heap_layout(const ma_hpf_config* pConfig, ma_hpf_heap_layout* pHeapLayout) +{ + ma_result result; + ma_uint32 hpf1Count; + ma_uint32 hpf2Count; + ma_uint32 ihpf1; + ma_uint32 ihpf2; + + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + ma_hpf_calculate_sub_hpf_counts(pConfig->order, &hpf1Count, &hpf2Count); + + pHeapLayout->sizeInBytes = 0; + + /* HPF 1 */ + pHeapLayout->hpf1Offset = pHeapLayout->sizeInBytes; + for (ihpf1 = 0; ihpf1 < hpf1Count; ihpf1 += 1) { + size_t hpf1HeapSizeInBytes; + ma_hpf1_config hpf1Config = ma_hpf1_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency); + + result = ma_hpf1_get_heap_size(&hpf1Config, &hpf1HeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += sizeof(ma_hpf1) + hpf1HeapSizeInBytes; + } + + /* HPF 2*/ + pHeapLayout->hpf2Offset = pHeapLayout->sizeInBytes; + for (ihpf2 = 0; ihpf2 < hpf2Count; ihpf2 += 1) { + size_t hpf2HeapSizeInBytes; + ma_hpf2_config hpf2Config = ma_hpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, 0.707107); /* <-- The "q" parameter does not matter for the purpose of calculating the heap size. */ + + result = ma_hpf2_get_heap_size(&hpf2Config, &hpf2HeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += sizeof(ma_hpf2) + hpf2HeapSizeInBytes; + } + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +static ma_result ma_hpf_reinit__internal(const ma_hpf_config* pConfig, void* pHeap, ma_hpf* pHPF, ma_bool32 isNew) +{ + ma_result result; + ma_uint32 hpf1Count; + ma_uint32 hpf2Count; + ma_uint32 ihpf1; + ma_uint32 ihpf2; + ma_hpf_heap_layout heapLayout; /* Only used if isNew is true. */ + + if (pHPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pHPF->format != ma_format_unknown && pHPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pHPF->channels != 0 && pHPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + ma_hpf_calculate_sub_hpf_counts(pConfig->order, &hpf1Count, &hpf2Count); + + /* The filter order can't change between reinits. */ + if (!isNew) { + if (pHPF->hpf1Count != hpf1Count || pHPF->hpf2Count != hpf2Count) { + return MA_INVALID_OPERATION; + } + } + + if (isNew) { + result = ma_hpf_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pHPF->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pHPF->pHPF1 = (ma_hpf1*)ma_offset_ptr(pHeap, heapLayout.hpf1Offset); + pHPF->pHPF2 = (ma_hpf2*)ma_offset_ptr(pHeap, heapLayout.hpf2Offset); + } else { + MA_ZERO_OBJECT(&heapLayout); /* To silence a compiler warning. */ + } + + for (ihpf1 = 0; ihpf1 < hpf1Count; ihpf1 += 1) { + ma_hpf1_config hpf1Config = ma_hpf1_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency); + + if (isNew) { + size_t hpf1HeapSizeInBytes; + + result = ma_hpf1_get_heap_size(&hpf1Config, &hpf1HeapSizeInBytes); + if (result == MA_SUCCESS) { + result = ma_hpf1_init_preallocated(&hpf1Config, ma_offset_ptr(pHeap, heapLayout.hpf1Offset + (sizeof(ma_hpf1) * hpf1Count) + (ihpf1 * hpf1HeapSizeInBytes)), &pHPF->pHPF1[ihpf1]); + } + } else { + result = ma_hpf1_reinit(&hpf1Config, &pHPF->pHPF1[ihpf1]); + } + + if (result != MA_SUCCESS) { + ma_uint32 jhpf1; + + for (jhpf1 = 0; jhpf1 < ihpf1; jhpf1 += 1) { + ma_hpf1_uninit(&pHPF->pHPF1[jhpf1], NULL); /* No need for allocation callbacks here since we used a preallocated heap allocation. */ + } + + return result; + } + } + + for (ihpf2 = 0; ihpf2 < hpf2Count; ihpf2 += 1) { + ma_hpf2_config hpf2Config; + double q; + double a; + + /* Tempting to use 0.707107, but won't result in a Butterworth filter if the order is > 2. */ + if (hpf1Count == 1) { + a = (1 + ihpf2*1) * (MA_PI_D/(pConfig->order*1)); /* Odd order. */ + } else { + a = (1 + ihpf2*2) * (MA_PI_D/(pConfig->order*2)); /* Even order. */ + } + q = 1 / (2*ma_cosd(a)); + + hpf2Config = ma_hpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, q); + + if (isNew) { + size_t hpf2HeapSizeInBytes; + + result = ma_hpf2_get_heap_size(&hpf2Config, &hpf2HeapSizeInBytes); + if (result == MA_SUCCESS) { + result = ma_hpf2_init_preallocated(&hpf2Config, ma_offset_ptr(pHeap, heapLayout.hpf2Offset + (sizeof(ma_hpf2) * hpf2Count) + (ihpf2 * hpf2HeapSizeInBytes)), &pHPF->pHPF2[ihpf2]); + } + } else { + result = ma_hpf2_reinit(&hpf2Config, &pHPF->pHPF2[ihpf2]); + } + + if (result != MA_SUCCESS) { + ma_uint32 jhpf1; + ma_uint32 jhpf2; + + for (jhpf1 = 0; jhpf1 < hpf1Count; jhpf1 += 1) { + ma_hpf1_uninit(&pHPF->pHPF1[jhpf1], NULL); /* No need for allocation callbacks here since we used a preallocated heap allocation. */ + } + + for (jhpf2 = 0; jhpf2 < ihpf2; jhpf2 += 1) { + ma_hpf2_uninit(&pHPF->pHPF2[jhpf2], NULL); /* No need for allocation callbacks here since we used a preallocated heap allocation. */ + } + + return result; + } + } + + pHPF->hpf1Count = hpf1Count; + pHPF->hpf2Count = hpf2Count; + pHPF->format = pConfig->format; + pHPF->channels = pConfig->channels; + pHPF->sampleRate = pConfig->sampleRate; + + return MA_SUCCESS; +} + +MA_API ma_result ma_hpf_get_heap_size(const ma_hpf_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_hpf_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_hpf_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return result; +} + +MA_API ma_result ma_hpf_init_preallocated(const ma_hpf_config* pConfig, void* pHeap, ma_hpf* pLPF) +{ + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pLPF); + + return ma_hpf_reinit__internal(pConfig, pHeap, pLPF, /*isNew*/MA_TRUE); +} + +MA_API ma_result ma_hpf_init(const ma_hpf_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf* pHPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_hpf_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_hpf_init_preallocated(pConfig, pHeap, pHPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pHPF->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_hpf_uninit(ma_hpf* pHPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_uint32 ihpf1; + ma_uint32 ihpf2; + + if (pHPF == NULL) { + return; + } + + for (ihpf1 = 0; ihpf1 < pHPF->hpf1Count; ihpf1 += 1) { + ma_hpf1_uninit(&pHPF->pHPF1[ihpf1], pAllocationCallbacks); + } + + for (ihpf2 = 0; ihpf2 < pHPF->hpf2Count; ihpf2 += 1) { + ma_hpf2_uninit(&pHPF->pHPF2[ihpf2], pAllocationCallbacks); + } + + if (pHPF->_ownsHeap) { + ma_free(pHPF->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_hpf_reinit(const ma_hpf_config* pConfig, ma_hpf* pHPF) +{ + return ma_hpf_reinit__internal(pConfig, NULL, pHPF, /*isNew*/MA_FALSE); +} + +MA_API ma_result ma_hpf_process_pcm_frames(ma_hpf* pHPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_result result; + ma_uint32 ihpf1; + ma_uint32 ihpf2; + + if (pHPF == NULL) { + return MA_INVALID_ARGS; + } + + /* Faster path for in-place. */ + if (pFramesOut == pFramesIn) { + for (ihpf1 = 0; ihpf1 < pHPF->hpf1Count; ihpf1 += 1) { + result = ma_hpf1_process_pcm_frames(&pHPF->pHPF1[ihpf1], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + + for (ihpf2 = 0; ihpf2 < pHPF->hpf2Count; ihpf2 += 1) { + result = ma_hpf2_process_pcm_frames(&pHPF->pHPF2[ihpf2], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + } + + /* Slightly slower path for copying. */ + if (pFramesOut != pFramesIn) { + ma_uint32 iFrame; + + /* */ if (pHPF->format == ma_format_f32) { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + MA_COPY_MEMORY(pFramesOutF32, pFramesInF32, ma_get_bytes_per_frame(pHPF->format, pHPF->channels)); + + for (ihpf1 = 0; ihpf1 < pHPF->hpf1Count; ihpf1 += 1) { + ma_hpf1_process_pcm_frame_f32(&pHPF->pHPF1[ihpf1], pFramesOutF32, pFramesOutF32); + } + + for (ihpf2 = 0; ihpf2 < pHPF->hpf2Count; ihpf2 += 1) { + ma_hpf2_process_pcm_frame_f32(&pHPF->pHPF2[ihpf2], pFramesOutF32, pFramesOutF32); + } + + pFramesOutF32 += pHPF->channels; + pFramesInF32 += pHPF->channels; + } + } else if (pHPF->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + MA_COPY_MEMORY(pFramesOutS16, pFramesInS16, ma_get_bytes_per_frame(pHPF->format, pHPF->channels)); + + for (ihpf1 = 0; ihpf1 < pHPF->hpf1Count; ihpf1 += 1) { + ma_hpf1_process_pcm_frame_s16(&pHPF->pHPF1[ihpf1], pFramesOutS16, pFramesOutS16); + } + + for (ihpf2 = 0; ihpf2 < pHPF->hpf2Count; ihpf2 += 1) { + ma_hpf2_process_pcm_frame_s16(&pHPF->pHPF2[ihpf2], pFramesOutS16, pFramesOutS16); + } + + pFramesOutS16 += pHPF->channels; + pFramesInS16 += pHPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; /* Should never hit this. */ + } + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_hpf_get_latency(const ma_hpf* pHPF) +{ + if (pHPF == NULL) { + return 0; + } + + return pHPF->hpf2Count*2 + pHPF->hpf1Count; +} + + +/************************************************************************************************************************************************************** + +Band-Pass Filtering + +**************************************************************************************************************************************************************/ +MA_API ma_bpf2_config ma_bpf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, double q) +{ + ma_bpf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.q = q; + + /* Q cannot be 0 or else it'll result in a division by 0. In this case just default to 0.707107. */ + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +static MA_INLINE ma_biquad_config ma_bpf2__get_biquad_config(const ma_bpf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate; + s = ma_sind(w); + c = ma_cosd(w); + a = s / (2*q); + + bqConfig.b0 = q * a; + bqConfig.b1 = 0; + bqConfig.b2 = -q * a; + bqConfig.a0 = 1 + a; + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - a; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_bpf2_get_heap_size(const ma_bpf2_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_biquad_config bqConfig; + bqConfig = ma_bpf2__get_biquad_config(pConfig); + + return ma_biquad_get_heap_size(&bqConfig, pHeapSizeInBytes); +} + +MA_API ma_result ma_bpf2_init_preallocated(const ma_bpf2_config* pConfig, void* pHeap, ma_bpf2* pBPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pBPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pBPF); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_bpf2__get_biquad_config(pConfig); + result = ma_biquad_init_preallocated(&bqConfig, pHeap, &pBPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_bpf2_init(const ma_bpf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_bpf2* pBPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_bpf2_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_bpf2_init_preallocated(pConfig, pHeap, pBPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pBPF->bq._ownsHeap = MA_TRUE; /* <-- This will cause the biquad to take ownership of the heap and free it when it's uninitialized. */ + return MA_SUCCESS; +} + +MA_API void ma_bpf2_uninit(ma_bpf2* pBPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pBPF == NULL) { + return; + } + + ma_biquad_uninit(&pBPF->bq, pAllocationCallbacks); /* <-- This will free the heap allocation. */ +} + +MA_API ma_result ma_bpf2_reinit(const ma_bpf2_config* pConfig, ma_bpf2* pBPF) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pBPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_bpf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pBPF->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_bpf2_process_pcm_frame_s16(ma_bpf2* pBPF, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pBPF->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_bpf2_process_pcm_frame_f32(ma_bpf2* pBPF, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pBPF->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_bpf2_process_pcm_frames(ma_bpf2* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pBPF == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pBPF->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_bpf2_get_latency(const ma_bpf2* pBPF) +{ + if (pBPF == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pBPF->bq); +} + + +MA_API ma_bpf_config ma_bpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_bpf_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + config.order = ma_min(order, MA_MAX_FILTER_ORDER); + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t bpf2Offset; +} ma_bpf_heap_layout; + +static ma_result ma_bpf_get_heap_layout(const ma_bpf_config* pConfig, ma_bpf_heap_layout* pHeapLayout) +{ + ma_result result; + ma_uint32 bpf2Count; + ma_uint32 ibpf2; + + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + /* We must have an even number of order. */ + if ((pConfig->order & 0x1) != 0) { + return MA_INVALID_ARGS; + } + + bpf2Count = pConfig->order / 2; + + pHeapLayout->sizeInBytes = 0; + + /* BPF 2 */ + pHeapLayout->bpf2Offset = pHeapLayout->sizeInBytes; + for (ibpf2 = 0; ibpf2 < bpf2Count; ibpf2 += 1) { + size_t bpf2HeapSizeInBytes; + ma_bpf2_config bpf2Config = ma_bpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, 0.707107); /* <-- The "q" parameter does not matter for the purpose of calculating the heap size. */ + + result = ma_bpf2_get_heap_size(&bpf2Config, &bpf2HeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += sizeof(ma_bpf2) + bpf2HeapSizeInBytes; + } + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +static ma_result ma_bpf_reinit__internal(const ma_bpf_config* pConfig, void* pHeap, ma_bpf* pBPF, ma_bool32 isNew) +{ + ma_result result; + ma_uint32 bpf2Count; + ma_uint32 ibpf2; + ma_bpf_heap_layout heapLayout; /* Only used if isNew is true. */ + + if (pBPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + /* The format cannot be changed after initialization. */ + if (pBPF->format != ma_format_unknown && pBPF->format != pConfig->format) { + return MA_INVALID_OPERATION; + } + + /* The channel count cannot be changed after initialization. */ + if (pBPF->channels != 0 && pBPF->channels != pConfig->channels) { + return MA_INVALID_OPERATION; + } + + if (pConfig->order > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + /* We must have an even number of order. */ + if ((pConfig->order & 0x1) != 0) { + return MA_INVALID_ARGS; + } + + bpf2Count = pConfig->order / 2; + + /* The filter order can't change between reinits. */ + if (!isNew) { + if (pBPF->bpf2Count != bpf2Count) { + return MA_INVALID_OPERATION; + } + } + + if (isNew) { + result = ma_bpf_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pBPF->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pBPF->pBPF2 = (ma_bpf2*)ma_offset_ptr(pHeap, heapLayout.bpf2Offset); + } else { + MA_ZERO_OBJECT(&heapLayout); + } + + for (ibpf2 = 0; ibpf2 < bpf2Count; ibpf2 += 1) { + ma_bpf2_config bpf2Config; + double q; + + /* TODO: Calculate Q to make this a proper Butterworth filter. */ + q = 0.707107; + + bpf2Config = ma_bpf2_config_init(pConfig->format, pConfig->channels, pConfig->sampleRate, pConfig->cutoffFrequency, q); + + if (isNew) { + size_t bpf2HeapSizeInBytes; + + result = ma_bpf2_get_heap_size(&bpf2Config, &bpf2HeapSizeInBytes); + if (result == MA_SUCCESS) { + result = ma_bpf2_init_preallocated(&bpf2Config, ma_offset_ptr(pHeap, heapLayout.bpf2Offset + (sizeof(ma_bpf2) * bpf2Count) + (ibpf2 * bpf2HeapSizeInBytes)), &pBPF->pBPF2[ibpf2]); + } + } else { + result = ma_bpf2_reinit(&bpf2Config, &pBPF->pBPF2[ibpf2]); + } + + if (result != MA_SUCCESS) { + return result; + } + } + + pBPF->bpf2Count = bpf2Count; + pBPF->format = pConfig->format; + pBPF->channels = pConfig->channels; + + return MA_SUCCESS; +} + + +MA_API ma_result ma_bpf_get_heap_size(const ma_bpf_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_bpf_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_bpf_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_bpf_init_preallocated(const ma_bpf_config* pConfig, void* pHeap, ma_bpf* pBPF) +{ + if (pBPF == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pBPF); + + return ma_bpf_reinit__internal(pConfig, pHeap, pBPF, /*isNew*/MA_TRUE); +} + +MA_API ma_result ma_bpf_init(const ma_bpf_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_bpf* pBPF) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_bpf_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_bpf_init_preallocated(pConfig, pHeap, pBPF); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pBPF->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_bpf_uninit(ma_bpf* pBPF, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_uint32 ibpf2; + + if (pBPF == NULL) { + return; + } + + for (ibpf2 = 0; ibpf2 < pBPF->bpf2Count; ibpf2 += 1) { + ma_bpf2_uninit(&pBPF->pBPF2[ibpf2], pAllocationCallbacks); + } + + if (pBPF->_ownsHeap) { + ma_free(pBPF->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_bpf_reinit(const ma_bpf_config* pConfig, ma_bpf* pBPF) +{ + return ma_bpf_reinit__internal(pConfig, NULL, pBPF, /*isNew*/MA_FALSE); +} + +MA_API ma_result ma_bpf_process_pcm_frames(ma_bpf* pBPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_result result; + ma_uint32 ibpf2; + + if (pBPF == NULL) { + return MA_INVALID_ARGS; + } + + /* Faster path for in-place. */ + if (pFramesOut == pFramesIn) { + for (ibpf2 = 0; ibpf2 < pBPF->bpf2Count; ibpf2 += 1) { + result = ma_bpf2_process_pcm_frames(&pBPF->pBPF2[ibpf2], pFramesOut, pFramesOut, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } + } + + /* Slightly slower path for copying. */ + if (pFramesOut != pFramesIn) { + ma_uint32 iFrame; + + /* */ if (pBPF->format == ma_format_f32) { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + MA_COPY_MEMORY(pFramesOutF32, pFramesInF32, ma_get_bytes_per_frame(pBPF->format, pBPF->channels)); + + for (ibpf2 = 0; ibpf2 < pBPF->bpf2Count; ibpf2 += 1) { + ma_bpf2_process_pcm_frame_f32(&pBPF->pBPF2[ibpf2], pFramesOutF32, pFramesOutF32); + } + + pFramesOutF32 += pBPF->channels; + pFramesInF32 += pBPF->channels; + } + } else if (pBPF->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + MA_COPY_MEMORY(pFramesOutS16, pFramesInS16, ma_get_bytes_per_frame(pBPF->format, pBPF->channels)); + + for (ibpf2 = 0; ibpf2 < pBPF->bpf2Count; ibpf2 += 1) { + ma_bpf2_process_pcm_frame_s16(&pBPF->pBPF2[ibpf2], pFramesOutS16, pFramesOutS16); + } + + pFramesOutS16 += pBPF->channels; + pFramesInS16 += pBPF->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; /* Should never hit this. */ + } + } + + return MA_SUCCESS; +} + +MA_API ma_uint32 ma_bpf_get_latency(const ma_bpf* pBPF) +{ + if (pBPF == NULL) { + return 0; + } + + return pBPF->bpf2Count*2; +} + + +/************************************************************************************************************************************************************** + +Notching Filter + +**************************************************************************************************************************************************************/ +MA_API ma_notch2_config ma_notch2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double q, double frequency) +{ + ma_notch2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.q = q; + config.frequency = frequency; + + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +static MA_INLINE ma_biquad_config ma_notch2__get_biquad_config(const ma_notch2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate; + s = ma_sind(w); + c = ma_cosd(w); + a = s / (2*q); + + bqConfig.b0 = 1; + bqConfig.b1 = -2 * c; + bqConfig.b2 = 1; + bqConfig.a0 = 1 + a; + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - a; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_notch2_get_heap_size(const ma_notch2_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_biquad_config bqConfig; + bqConfig = ma_notch2__get_biquad_config(pConfig); + + return ma_biquad_get_heap_size(&bqConfig, pHeapSizeInBytes); +} + +MA_API ma_result ma_notch2_init_preallocated(const ma_notch2_config* pConfig, void* pHeap, ma_notch2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFilter); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_notch2__get_biquad_config(pConfig); + result = ma_biquad_init_preallocated(&bqConfig, pHeap, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_notch2_init(const ma_notch2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_notch2* pFilter) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_notch2_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_notch2_init_preallocated(pConfig, pHeap, pFilter); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pFilter->bq._ownsHeap = MA_TRUE; /* <-- This will cause the biquad to take ownership of the heap and free it when it's uninitialized. */ + return MA_SUCCESS; +} + +MA_API void ma_notch2_uninit(ma_notch2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFilter == NULL) { + return; + } + + ma_biquad_uninit(&pFilter->bq, pAllocationCallbacks); /* <-- This will free the heap allocation. */ +} + +MA_API ma_result ma_notch2_reinit(const ma_notch2_config* pConfig, ma_notch2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_notch2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_notch2_process_pcm_frame_s16(ma_notch2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_notch2_process_pcm_frame_f32(ma_notch2* pFilter, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_notch2_process_pcm_frames(ma_notch2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_notch2_get_latency(const ma_notch2* pFilter) +{ + if (pFilter == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pFilter->bq); +} + + + +/************************************************************************************************************************************************************** + +Peaking EQ Filter + +**************************************************************************************************************************************************************/ +MA_API ma_peak2_config ma_peak2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency) +{ + ma_peak2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.gainDB = gainDB; + config.q = q; + config.frequency = frequency; + + if (config.q == 0) { + config.q = 0.707107; + } + + return config; +} + + +static MA_INLINE ma_biquad_config ma_peak2__get_biquad_config(const ma_peak2_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + double A; + + MA_ASSERT(pConfig != NULL); + + q = pConfig->q; + w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate; + s = ma_sind(w); + c = ma_cosd(w); + a = s / (2*q); + A = ma_powd(10, (pConfig->gainDB / 40)); + + bqConfig.b0 = 1 + (a * A); + bqConfig.b1 = -2 * c; + bqConfig.b2 = 1 - (a * A); + bqConfig.a0 = 1 + (a / A); + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - (a / A); + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_peak2_get_heap_size(const ma_peak2_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_biquad_config bqConfig; + bqConfig = ma_peak2__get_biquad_config(pConfig); + + return ma_biquad_get_heap_size(&bqConfig, pHeapSizeInBytes); +} + +MA_API ma_result ma_peak2_init_preallocated(const ma_peak2_config* pConfig, void* pHeap, ma_peak2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFilter); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_peak2__get_biquad_config(pConfig); + result = ma_biquad_init_preallocated(&bqConfig, pHeap, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_peak2_init(const ma_peak2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_peak2* pFilter) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_peak2_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_peak2_init_preallocated(pConfig, pHeap, pFilter); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pFilter->bq._ownsHeap = MA_TRUE; /* <-- This will cause the biquad to take ownership of the heap and free it when it's uninitialized. */ + return MA_SUCCESS; +} + +MA_API void ma_peak2_uninit(ma_peak2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFilter == NULL) { + return; + } + + ma_biquad_uninit(&pFilter->bq, pAllocationCallbacks); /* <-- This will free the heap allocation. */ +} + +MA_API ma_result ma_peak2_reinit(const ma_peak2_config* pConfig, ma_peak2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_peak2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_peak2_process_pcm_frame_s16(ma_peak2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_peak2_process_pcm_frame_f32(ma_peak2* pFilter, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_peak2_process_pcm_frames(ma_peak2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_peak2_get_latency(const ma_peak2* pFilter) +{ + if (pFilter == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pFilter->bq); +} + + +/************************************************************************************************************************************************************** + +Low Shelf Filter + +**************************************************************************************************************************************************************/ +MA_API ma_loshelf2_config ma_loshelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency) +{ + ma_loshelf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.gainDB = gainDB; + config.shelfSlope = shelfSlope; + config.frequency = frequency; + + return config; +} + + +static MA_INLINE ma_biquad_config ma_loshelf2__get_biquad_config(const ma_loshelf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double w; + double s; + double c; + double A; + double S; + double a; + double sqrtA; + + MA_ASSERT(pConfig != NULL); + + w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate; + s = ma_sind(w); + c = ma_cosd(w); + A = ma_powd(10, (pConfig->gainDB / 40)); + S = pConfig->shelfSlope; + a = s/2 * ma_sqrtd((A + 1/A) * (1/S - 1) + 2); + sqrtA = 2*ma_sqrtd(A)*a; + + bqConfig.b0 = A * ((A + 1) - (A - 1)*c + sqrtA); + bqConfig.b1 = 2 * A * ((A - 1) - (A + 1)*c); + bqConfig.b2 = A * ((A + 1) - (A - 1)*c - sqrtA); + bqConfig.a0 = (A + 1) + (A - 1)*c + sqrtA; + bqConfig.a1 = -2 * ((A - 1) + (A + 1)*c); + bqConfig.a2 = (A + 1) + (A - 1)*c - sqrtA; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_loshelf2_get_heap_size(const ma_loshelf2_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_biquad_config bqConfig; + bqConfig = ma_loshelf2__get_biquad_config(pConfig); + + return ma_biquad_get_heap_size(&bqConfig, pHeapSizeInBytes); +} + +MA_API ma_result ma_loshelf2_init_preallocated(const ma_loshelf2_config* pConfig, void* pHeap, ma_loshelf2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFilter); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_loshelf2__get_biquad_config(pConfig); + result = ma_biquad_init_preallocated(&bqConfig, pHeap, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_loshelf2_init(const ma_loshelf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_loshelf2* pFilter) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_loshelf2_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_loshelf2_init_preallocated(pConfig, pHeap, pFilter); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pFilter->bq._ownsHeap = MA_TRUE; /* <-- This will cause the biquad to take ownership of the heap and free it when it's uninitialized. */ + return MA_SUCCESS; +} + +MA_API void ma_loshelf2_uninit(ma_loshelf2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFilter == NULL) { + return; + } + + ma_biquad_uninit(&pFilter->bq, pAllocationCallbacks); /* <-- This will free the heap allocation. */ +} + +MA_API ma_result ma_loshelf2_reinit(const ma_loshelf2_config* pConfig, ma_loshelf2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_loshelf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_loshelf2_process_pcm_frame_s16(ma_loshelf2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_loshelf2_process_pcm_frame_f32(ma_loshelf2* pFilter, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_loshelf2_process_pcm_frames(ma_loshelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_loshelf2_get_latency(const ma_loshelf2* pFilter) +{ + if (pFilter == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pFilter->bq); +} + + +/************************************************************************************************************************************************************** + +High Shelf Filter + +**************************************************************************************************************************************************************/ +MA_API ma_hishelf2_config ma_hishelf2_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double shelfSlope, double frequency) +{ + ma_hishelf2_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.gainDB = gainDB; + config.shelfSlope = shelfSlope; + config.frequency = frequency; + + return config; +} + + +static MA_INLINE ma_biquad_config ma_hishelf2__get_biquad_config(const ma_hishelf2_config* pConfig) +{ + ma_biquad_config bqConfig; + double w; + double s; + double c; + double A; + double S; + double a; + double sqrtA; + + MA_ASSERT(pConfig != NULL); + + w = 2 * MA_PI_D * pConfig->frequency / pConfig->sampleRate; + s = ma_sind(w); + c = ma_cosd(w); + A = ma_powd(10, (pConfig->gainDB / 40)); + S = pConfig->shelfSlope; + a = s/2 * ma_sqrtd((A + 1/A) * (1/S - 1) + 2); + sqrtA = 2*ma_sqrtd(A)*a; + + bqConfig.b0 = A * ((A + 1) + (A - 1)*c + sqrtA); + bqConfig.b1 = -2 * A * ((A - 1) + (A + 1)*c); + bqConfig.b2 = A * ((A + 1) + (A - 1)*c - sqrtA); + bqConfig.a0 = (A + 1) - (A - 1)*c + sqrtA; + bqConfig.a1 = 2 * ((A - 1) - (A + 1)*c); + bqConfig.a2 = (A + 1) - (A - 1)*c - sqrtA; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; +} + +MA_API ma_result ma_hishelf2_get_heap_size(const ma_hishelf2_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_biquad_config bqConfig; + bqConfig = ma_hishelf2__get_biquad_config(pConfig); + + return ma_biquad_get_heap_size(&bqConfig, pHeapSizeInBytes); +} + +MA_API ma_result ma_hishelf2_init_preallocated(const ma_hishelf2_config* pConfig, void* pHeap, ma_hishelf2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFilter); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_hishelf2__get_biquad_config(pConfig); + result = ma_biquad_init_preallocated(&bqConfig, pHeap, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_hishelf2_init(const ma_hishelf2_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hishelf2* pFilter) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_hishelf2_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_hishelf2_init_preallocated(pConfig, pHeap, pFilter); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pFilter->bq._ownsHeap = MA_TRUE; /* <-- This will cause the biquad to take ownership of the heap and free it when it's uninitialized. */ + return MA_SUCCESS; +} + +MA_API void ma_hishelf2_uninit(ma_hishelf2* pFilter, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFilter == NULL) { + return; + } + + ma_biquad_uninit(&pFilter->bq, pAllocationCallbacks); /* <-- This will free the heap allocation. */ +} + +MA_API ma_result ma_hishelf2_reinit(const ma_hishelf2_config* pConfig, ma_hishelf2* pFilter) +{ + ma_result result; + ma_biquad_config bqConfig; + + if (pFilter == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; + } + + bqConfig = ma_hishelf2__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pFilter->bq); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +static MA_INLINE void ma_hishelf2_process_pcm_frame_s16(ma_hishelf2* pFilter, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pFilter->bq, pFrameOut, pFrameIn); +} + +static MA_INLINE void ma_hishelf2_process_pcm_frame_f32(ma_hishelf2* pFilter, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pFilter->bq, pFrameOut, pFrameIn); +} + +MA_API ma_result ma_hishelf2_process_pcm_frames(ma_hishelf2* pFilter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pFilter == NULL) { + return MA_INVALID_ARGS; + } + + return ma_biquad_process_pcm_frames(&pFilter->bq, pFramesOut, pFramesIn, frameCount); +} + +MA_API ma_uint32 ma_hishelf2_get_latency(const ma_hishelf2* pFilter) +{ + if (pFilter == NULL) { + return 0; + } + + return ma_biquad_get_latency(&pFilter->bq); +} + + + +/* +Delay +*/ +MA_API ma_delay_config ma_delay_config_init(ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 delayInFrames, float decay) +{ + ma_delay_config config; + + MA_ZERO_OBJECT(&config); + config.channels = channels; + config.sampleRate = sampleRate; + config.delayInFrames = delayInFrames; + config.delayStart = (decay == 0) ? MA_TRUE : MA_FALSE; /* Delay the start if it looks like we're not configuring an echo. */ + config.wet = 1; + config.dry = 1; + config.decay = decay; + + return config; +} + + +MA_API ma_result ma_delay_init(const ma_delay_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_delay* pDelay) +{ + if (pDelay == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDelay); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->decay < 0 || pConfig->decay > 1) { + return MA_INVALID_ARGS; + } + + pDelay->config = *pConfig; + pDelay->bufferSizeInFrames = pConfig->delayInFrames; + pDelay->cursor = 0; + + pDelay->pBuffer = (float*)ma_malloc((size_t)(pDelay->bufferSizeInFrames * ma_get_bytes_per_frame(ma_format_f32, pConfig->channels)), pAllocationCallbacks); + if (pDelay->pBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + ma_silence_pcm_frames(pDelay->pBuffer, pDelay->bufferSizeInFrames, ma_format_f32, pConfig->channels); + + return MA_SUCCESS; +} + +MA_API void ma_delay_uninit(ma_delay* pDelay, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pDelay == NULL) { + return; + } + + ma_free(pDelay->pBuffer, pAllocationCallbacks); +} + +MA_API ma_result ma_delay_process_pcm_frames(ma_delay* pDelay, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount) +{ + ma_uint32 iFrame; + ma_uint32 iChannel; + float* pFramesOutF32 = (float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + if (pDelay == NULL || pFramesOut == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pDelay->config.channels; iChannel += 1) { + ma_uint32 iBuffer = (pDelay->cursor * pDelay->config.channels) + iChannel; + + if (pDelay->config.delayStart) { + /* Delayed start. */ + + /* Read */ + pFramesOutF32[iChannel] = pDelay->pBuffer[iBuffer] * pDelay->config.wet; + + /* Feedback */ + pDelay->pBuffer[iBuffer] = (pDelay->pBuffer[iBuffer] * pDelay->config.decay) + (pFramesInF32[iChannel] * pDelay->config.dry); + } else { + /* Immediate start */ + + /* Feedback */ + pDelay->pBuffer[iBuffer] = (pDelay->pBuffer[iBuffer] * pDelay->config.decay) + (pFramesInF32[iChannel] * pDelay->config.dry); + + /* Read */ + pFramesOutF32[iChannel] = pDelay->pBuffer[iBuffer] * pDelay->config.wet; + } + } + + pDelay->cursor = (pDelay->cursor + 1) % pDelay->bufferSizeInFrames; + + pFramesOutF32 += pDelay->config.channels; + pFramesInF32 += pDelay->config.channels; + } + + return MA_SUCCESS; +} + +MA_API void ma_delay_set_wet(ma_delay* pDelay, float value) +{ + if (pDelay == NULL) { + return; + } + + pDelay->config.wet = value; +} + +MA_API float ma_delay_get_wet(const ma_delay* pDelay) +{ + if (pDelay == NULL) { + return 0; + } + + return pDelay->config.wet; +} + +MA_API void ma_delay_set_dry(ma_delay* pDelay, float value) +{ + if (pDelay == NULL) { + return; + } + + pDelay->config.dry = value; +} + +MA_API float ma_delay_get_dry(const ma_delay* pDelay) +{ + if (pDelay == NULL) { + return 0; + } + + return pDelay->config.dry; +} + +MA_API void ma_delay_set_decay(ma_delay* pDelay, float value) +{ + if (pDelay == NULL) { + return; + } + + pDelay->config.decay = value; +} + +MA_API float ma_delay_get_decay(const ma_delay* pDelay) +{ + if (pDelay == NULL) { + return 0; + } + + return pDelay->config.decay; +} + + +MA_API ma_gainer_config ma_gainer_config_init(ma_uint32 channels, ma_uint32 smoothTimeInFrames) +{ + ma_gainer_config config; + + MA_ZERO_OBJECT(&config); + config.channels = channels; + config.smoothTimeInFrames = smoothTimeInFrames; + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t oldGainsOffset; + size_t newGainsOffset; +} ma_gainer_heap_layout; + +static ma_result ma_gainer_get_heap_layout(const ma_gainer_config* pConfig, ma_gainer_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* Old gains. */ + pHeapLayout->oldGainsOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(float) * pConfig->channels; + + /* New gains. */ + pHeapLayout->newGainsOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(float) * pConfig->channels; + + /* Alignment. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + + +MA_API ma_result ma_gainer_get_heap_size(const ma_gainer_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_gainer_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_gainer_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + + +MA_API ma_result ma_gainer_init_preallocated(const ma_gainer_config* pConfig, void* pHeap, ma_gainer* pGainer) +{ + ma_result result; + ma_gainer_heap_layout heapLayout; + ma_uint32 iChannel; + + if (pGainer == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pGainer); + + if (pConfig == NULL || pHeap == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_gainer_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pGainer->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pGainer->pOldGains = (float*)ma_offset_ptr(pHeap, heapLayout.oldGainsOffset); + pGainer->pNewGains = (float*)ma_offset_ptr(pHeap, heapLayout.newGainsOffset); + pGainer->masterVolume = 1; + + pGainer->config = *pConfig; + pGainer->t = (ma_uint32)-1; /* No interpolation by default. */ + + for (iChannel = 0; iChannel < pConfig->channels; iChannel += 1) { + pGainer->pOldGains[iChannel] = 1; + pGainer->pNewGains[iChannel] = 1; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_gainer_init(const ma_gainer_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_gainer* pGainer) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_gainer_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the size of the heap allocation. */ + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_gainer_init_preallocated(pConfig, pHeap, pGainer); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pGainer->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_gainer_uninit(ma_gainer* pGainer, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pGainer == NULL) { + return; + } + + if (pGainer->_ownsHeap) { + ma_free(pGainer->_pHeap, pAllocationCallbacks); + } +} + +static float ma_gainer_calculate_current_gain(const ma_gainer* pGainer, ma_uint32 channel) +{ + float a = (float)pGainer->t / pGainer->config.smoothTimeInFrames; + return ma_mix_f32_fast(pGainer->pOldGains[channel], pGainer->pNewGains[channel], a); +} + +static /*__attribute__((noinline))*/ ma_result ma_gainer_process_pcm_frames_internal(ma_gainer * pGainer, void* MA_RESTRICT pFramesOut, const void* MA_RESTRICT pFramesIn, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannel; + ma_uint64 interpolatedFrameCount; + + MA_ASSERT(pGainer != NULL); + + /* + We don't necessarily need to apply a linear interpolation for the entire frameCount frames. When + linear interpolation is not needed we can do a simple volume adjustment which will be more + efficient than a lerp with an alpha value of 1. + + To do this, all we need to do is determine how many frames need to have a lerp applied. Then we + just process that number of frames with linear interpolation. After that we run on an optimized + path which just applies the new gains without a lerp. + */ + if (pGainer->t >= pGainer->config.smoothTimeInFrames) { + interpolatedFrameCount = 0; + } else { + interpolatedFrameCount = pGainer->t - pGainer->config.smoothTimeInFrames; + if (interpolatedFrameCount > frameCount) { + interpolatedFrameCount = frameCount; + } + } + + /* + Start off with our interpolated frames. When we do this, we'll adjust frameCount and our pointers + so that the fast path can work naturally without consideration of the interpolated path. + */ + if (interpolatedFrameCount > 0) { + /* We can allow the input and output buffers to be null in which case we'll just update the internal timer. */ + if (pFramesOut != NULL && pFramesIn != NULL) { + /* + All we're really doing here is moving the old gains towards the new gains. We don't want to + be modifying the gains inside the ma_gainer object because that will break things. Instead + we can make a copy here on the stack. For extreme channel counts we can fall back to a slower + implementation which just uses a standard lerp. + */ + float* pFramesOutF32 = (float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + float a = (float)pGainer->t / pGainer->config.smoothTimeInFrames; + float d = 1.0f / pGainer->config.smoothTimeInFrames; + + if (pGainer->config.channels <= 32) { + float pRunningGain[32]; + float pRunningGainDelta[32]; /* Could this be heap-allocated as part of the ma_gainer object? */ + + /* Initialize the running gain. */ + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + float t = (pGainer->pNewGains[iChannel] - pGainer->pOldGains[iChannel]) * pGainer->masterVolume; + pRunningGainDelta[iChannel] = t * d; + pRunningGain[iChannel] = (pGainer->pOldGains[iChannel] * pGainer->masterVolume) + (t * a); + } + + iFrame = 0; + + /* Optimized paths for common channel counts. This is mostly just experimenting with some SIMD ideas. It's not necessarily final. */ + if (pGainer->config.channels == 2) { + #if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + ma_uint64 unrolledLoopCount = interpolatedFrameCount >> 1; + + /* Expand some arrays so we can have a clean SIMD loop below. */ + __m128 runningGainDelta0 = _mm_set_ps(pRunningGainDelta[1], pRunningGainDelta[0], pRunningGainDelta[1], pRunningGainDelta[0]); + __m128 runningGain0 = _mm_set_ps(pRunningGain[1] + pRunningGainDelta[1], pRunningGain[0] + pRunningGainDelta[0], pRunningGain[1], pRunningGain[0]); + + for (; iFrame < unrolledLoopCount; iFrame += 1) { + _mm_storeu_ps(&pFramesOutF32[iFrame*4 + 0], _mm_mul_ps(_mm_loadu_ps(&pFramesInF32[iFrame*4 + 0]), runningGain0)); + runningGain0 = _mm_add_ps(runningGain0, runningGainDelta0); + } + + iFrame = unrolledLoopCount << 1; + } else + #endif + { + /* + Two different scalar implementations here. Clang (and I assume GCC) will vectorize + both of these, but the bottom version results in a nicer vectorization with less + instructions emitted. The problem, however, is that the bottom version runs slower + when compiled with MSVC. The top version will be partially vectorized by MSVC. + */ + #if defined(_MSC_VER) && !defined(__clang__) + ma_uint64 unrolledLoopCount = interpolatedFrameCount >> 1; + + /* Expand some arrays so we can have a clean 4x SIMD operation in the loop. */ + pRunningGainDelta[2] = pRunningGainDelta[0]; + pRunningGainDelta[3] = pRunningGainDelta[1]; + pRunningGain[2] = pRunningGain[0] + pRunningGainDelta[0]; + pRunningGain[3] = pRunningGain[1] + pRunningGainDelta[1]; + + for (; iFrame < unrolledLoopCount; iFrame += 1) { + pFramesOutF32[iFrame*4 + 0] = pFramesInF32[iFrame*4 + 0] * pRunningGain[0]; + pFramesOutF32[iFrame*4 + 1] = pFramesInF32[iFrame*4 + 1] * pRunningGain[1]; + pFramesOutF32[iFrame*4 + 2] = pFramesInF32[iFrame*4 + 2] * pRunningGain[2]; + pFramesOutF32[iFrame*4 + 3] = pFramesInF32[iFrame*4 + 3] * pRunningGain[3]; + + /* Move the running gain forward towards the new gain. */ + pRunningGain[0] += pRunningGainDelta[0]; + pRunningGain[1] += pRunningGainDelta[1]; + pRunningGain[2] += pRunningGainDelta[2]; + pRunningGain[3] += pRunningGainDelta[3]; + } + + iFrame = unrolledLoopCount << 1; + #else + for (; iFrame < interpolatedFrameCount; iFrame += 1) { + for (iChannel = 0; iChannel < 2; iChannel += 1) { + pFramesOutF32[iFrame*2 + iChannel] = pFramesInF32[iFrame*2 + iChannel] * pRunningGain[iChannel]; + } + + for (iChannel = 0; iChannel < 2; iChannel += 1) { + pRunningGain[iChannel] += pRunningGainDelta[iChannel]; + } + } + #endif + } + } else if (pGainer->config.channels == 6) { + #if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + /* + For 6 channels things are a bit more complicated because 6 isn't cleanly divisible by 4. We need to do 2 frames + at a time, meaning we'll be doing 12 samples in a group. Like the stereo case we'll need to expand some arrays + so we can do clean 4x SIMD operations. + */ + ma_uint64 unrolledLoopCount = interpolatedFrameCount >> 1; + + /* Expand some arrays so we can have a clean SIMD loop below. */ + __m128 runningGainDelta0 = _mm_set_ps(pRunningGainDelta[3], pRunningGainDelta[2], pRunningGainDelta[1], pRunningGainDelta[0]); + __m128 runningGainDelta1 = _mm_set_ps(pRunningGainDelta[1], pRunningGainDelta[0], pRunningGainDelta[5], pRunningGainDelta[4]); + __m128 runningGainDelta2 = _mm_set_ps(pRunningGainDelta[5], pRunningGainDelta[4], pRunningGainDelta[3], pRunningGainDelta[2]); + + __m128 runningGain0 = _mm_set_ps(pRunningGain[3], pRunningGain[2], pRunningGain[1], pRunningGain[0]); + __m128 runningGain1 = _mm_set_ps(pRunningGain[1] + pRunningGainDelta[1], pRunningGain[0] + pRunningGainDelta[0], pRunningGain[5], pRunningGain[4]); + __m128 runningGain2 = _mm_set_ps(pRunningGain[5] + pRunningGainDelta[5], pRunningGain[4] + pRunningGainDelta[4], pRunningGain[3] + pRunningGainDelta[3], pRunningGain[2] + pRunningGainDelta[2]); + + for (; iFrame < unrolledLoopCount; iFrame += 1) { + _mm_storeu_ps(&pFramesOutF32[iFrame*12 + 0], _mm_mul_ps(_mm_loadu_ps(&pFramesInF32[iFrame*12 + 0]), runningGain0)); + _mm_storeu_ps(&pFramesOutF32[iFrame*12 + 4], _mm_mul_ps(_mm_loadu_ps(&pFramesInF32[iFrame*12 + 4]), runningGain1)); + _mm_storeu_ps(&pFramesOutF32[iFrame*12 + 8], _mm_mul_ps(_mm_loadu_ps(&pFramesInF32[iFrame*12 + 8]), runningGain2)); + + runningGain0 = _mm_add_ps(runningGain0, runningGainDelta0); + runningGain1 = _mm_add_ps(runningGain1, runningGainDelta1); + runningGain2 = _mm_add_ps(runningGain2, runningGainDelta2); + } + + iFrame = unrolledLoopCount << 1; + } else + #endif + { + for (; iFrame < interpolatedFrameCount; iFrame += 1) { + for (iChannel = 0; iChannel < 6; iChannel += 1) { + pFramesOutF32[iFrame*6 + iChannel] = pFramesInF32[iFrame*6 + iChannel] * pRunningGain[iChannel]; + } + + /* Move the running gain forward towards the new gain. */ + for (iChannel = 0; iChannel < 6; iChannel += 1) { + pRunningGain[iChannel] += pRunningGainDelta[iChannel]; + } + } + } + } else if (pGainer->config.channels == 8) { + /* For 8 channels we can just go over frame by frame and do all eight channels as 2 separate 4x SIMD operations. */ + #if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + __m128 runningGainDelta0 = _mm_loadu_ps(&pRunningGainDelta[0]); + __m128 runningGainDelta1 = _mm_loadu_ps(&pRunningGainDelta[4]); + __m128 runningGain0 = _mm_loadu_ps(&pRunningGain[0]); + __m128 runningGain1 = _mm_loadu_ps(&pRunningGain[4]); + + for (; iFrame < interpolatedFrameCount; iFrame += 1) { + _mm_storeu_ps(&pFramesOutF32[iFrame*8 + 0], _mm_mul_ps(_mm_loadu_ps(&pFramesInF32[iFrame*8 + 0]), runningGain0)); + _mm_storeu_ps(&pFramesOutF32[iFrame*8 + 4], _mm_mul_ps(_mm_loadu_ps(&pFramesInF32[iFrame*8 + 4]), runningGain1)); + + runningGain0 = _mm_add_ps(runningGain0, runningGainDelta0); + runningGain1 = _mm_add_ps(runningGain1, runningGainDelta1); + } + } else + #endif + { + /* This is crafted so that it auto-vectorizes when compiled with Clang. */ + for (; iFrame < interpolatedFrameCount; iFrame += 1) { + for (iChannel = 0; iChannel < 8; iChannel += 1) { + pFramesOutF32[iFrame*8 + iChannel] = pFramesInF32[iFrame*8 + iChannel] * pRunningGain[iChannel]; + } + + /* Move the running gain forward towards the new gain. */ + for (iChannel = 0; iChannel < 8; iChannel += 1) { + pRunningGain[iChannel] += pRunningGainDelta[iChannel]; + } + } + } + } + + for (; iFrame < interpolatedFrameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pGainer->config.channels + iChannel] = pFramesInF32[iFrame*pGainer->config.channels + iChannel] * pRunningGain[iChannel]; + pRunningGain[iChannel] += pRunningGainDelta[iChannel]; + } + } + } else { + /* Slower path for extreme channel counts where we can't fit enough on the stack. We could also move this to the heap as part of the ma_gainer object which might even be better since it'll only be updated when the gains actually change. */ + for (iFrame = 0; iFrame < interpolatedFrameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pGainer->config.channels + iChannel] = pFramesInF32[iFrame*pGainer->config.channels + iChannel] * ma_mix_f32_fast(pGainer->pOldGains[iChannel], pGainer->pNewGains[iChannel], a) * pGainer->masterVolume; + } + + a += d; + } + } + } + + /* Make sure the timer is updated. */ + pGainer->t = (ma_uint32)ma_min(pGainer->t + interpolatedFrameCount, pGainer->config.smoothTimeInFrames); + + /* Adjust our arguments so the next part can work normally. */ + frameCount -= interpolatedFrameCount; + pFramesOut = ma_offset_ptr(pFramesOut, interpolatedFrameCount * sizeof(float)); + pFramesIn = ma_offset_ptr(pFramesIn, interpolatedFrameCount * sizeof(float)); + } + + /* All we need to do here is apply the new gains using an optimized path. */ + if (pFramesOut != NULL && pFramesIn != NULL) { + if (pGainer->config.channels <= 32) { + float gains[32]; + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + gains[iChannel] = pGainer->pNewGains[iChannel] * pGainer->masterVolume; + } + + ma_copy_and_apply_volume_factor_per_channel_f32((float*)pFramesOut, (const float*)pFramesIn, frameCount, pGainer->config.channels, gains); + } else { + /* Slow path. Too many channels to fit on the stack. Need to apply a master volume as a separate path. */ + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + ((float*)pFramesOut)[iFrame*pGainer->config.channels + iChannel] = ((const float*)pFramesIn)[iFrame*pGainer->config.channels + iChannel] * pGainer->pNewGains[iChannel] * pGainer->masterVolume; + } + } + } + } + + /* Now that some frames have been processed we need to make sure future changes to the gain are interpolated. */ + if (pGainer->t == (ma_uint32)-1) { + pGainer->t = (ma_uint32)ma_min(pGainer->config.smoothTimeInFrames, frameCount); + } + +#if 0 + if (pGainer->t >= pGainer->config.smoothTimeInFrames) { + /* Fast path. No gain calculation required. */ + ma_copy_and_apply_volume_factor_per_channel_f32(pFramesOutF32, pFramesInF32, frameCount, pGainer->config.channels, pGainer->pNewGains); + ma_apply_volume_factor_f32(pFramesOutF32, frameCount * pGainer->config.channels, pGainer->masterVolume); + + /* Now that some frames have been processed we need to make sure future changes to the gain are interpolated. */ + if (pGainer->t == (ma_uint32)-1) { + pGainer->t = pGainer->config.smoothTimeInFrames; + } + } else { + /* Slow path. Need to interpolate the gain for each channel individually. */ + + /* We can allow the input and output buffers to be null in which case we'll just update the internal timer. */ + if (pFramesOut != NULL && pFramesIn != NULL) { + float a = (float)pGainer->t / pGainer->config.smoothTimeInFrames; + float d = 1.0f / pGainer->config.smoothTimeInFrames; + ma_uint32 channelCount = pGainer->config.channels; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channelCount; iChannel += 1) { + pFramesOutF32[iChannel] = pFramesInF32[iChannel] * ma_mix_f32_fast(pGainer->pOldGains[iChannel], pGainer->pNewGains[iChannel], a) * pGainer->masterVolume; + } + + pFramesOutF32 += channelCount; + pFramesInF32 += channelCount; + + a += d; + if (a > 1) { + a = 1; + } + } + } + + pGainer->t = (ma_uint32)ma_min(pGainer->t + frameCount, pGainer->config.smoothTimeInFrames); + + #if 0 /* Reference implementation. */ + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + /* We can allow the input and output buffers to be null in which case we'll just update the internal timer. */ + if (pFramesOut != NULL && pFramesIn != NULL) { + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + pFramesOutF32[iFrame * pGainer->config.channels + iChannel] = pFramesInF32[iFrame * pGainer->config.channels + iChannel] * ma_gainer_calculate_current_gain(pGainer, iChannel) * pGainer->masterVolume; + } + } + + /* Move interpolation time forward, but don't go beyond our smoothing time. */ + pGainer->t = ma_min(pGainer->t + 1, pGainer->config.smoothTimeInFrames); + } + #endif + } +#endif + + return MA_SUCCESS; +} + +MA_API ma_result ma_gainer_process_pcm_frames(ma_gainer* pGainer, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pGainer == NULL) { + return MA_INVALID_ARGS; + } + + /* + ma_gainer_process_pcm_frames_internal() marks pFramesOut and pFramesIn with MA_RESTRICT which + helps with auto-vectorization. + */ + return ma_gainer_process_pcm_frames_internal(pGainer, pFramesOut, pFramesIn, frameCount); +} + +static void ma_gainer_set_gain_by_index(ma_gainer* pGainer, float newGain, ma_uint32 iChannel) +{ + pGainer->pOldGains[iChannel] = ma_gainer_calculate_current_gain(pGainer, iChannel); + pGainer->pNewGains[iChannel] = newGain; +} + +static void ma_gainer_reset_smoothing_time(ma_gainer* pGainer) +{ + if (pGainer->t == (ma_uint32)-1) { + pGainer->t = pGainer->config.smoothTimeInFrames; /* No smoothing required for initial gains setting. */ + } else { + pGainer->t = 0; + } +} + +MA_API ma_result ma_gainer_set_gain(ma_gainer* pGainer, float newGain) +{ + ma_uint32 iChannel; + + if (pGainer == NULL) { + return MA_INVALID_ARGS; + } + + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + ma_gainer_set_gain_by_index(pGainer, newGain, iChannel); + } + + /* The smoothing time needs to be reset to ensure we always interpolate by the configured smoothing time, but only if it's not the first setting. */ + ma_gainer_reset_smoothing_time(pGainer); + + return MA_SUCCESS; +} + +MA_API ma_result ma_gainer_set_gains(ma_gainer* pGainer, float* pNewGains) +{ + ma_uint32 iChannel; + + if (pGainer == NULL || pNewGains == NULL) { + return MA_INVALID_ARGS; + } + + for (iChannel = 0; iChannel < pGainer->config.channels; iChannel += 1) { + ma_gainer_set_gain_by_index(pGainer, pNewGains[iChannel], iChannel); + } + + /* The smoothing time needs to be reset to ensure we always interpolate by the configured smoothing time, but only if it's not the first setting. */ + ma_gainer_reset_smoothing_time(pGainer); + + return MA_SUCCESS; +} + +MA_API ma_result ma_gainer_set_master_volume(ma_gainer* pGainer, float volume) +{ + if (pGainer == NULL) { + return MA_INVALID_ARGS; + } + + pGainer->masterVolume = volume; + + return MA_SUCCESS; +} + +MA_API ma_result ma_gainer_get_master_volume(const ma_gainer* pGainer, float* pVolume) +{ + if (pGainer == NULL || pVolume == NULL) { + return MA_INVALID_ARGS; + } + + *pVolume = pGainer->masterVolume; + + return MA_SUCCESS; +} + + +MA_API ma_panner_config ma_panner_config_init(ma_format format, ma_uint32 channels) +{ + ma_panner_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.mode = ma_pan_mode_balance; /* Set to balancing mode by default because it's consistent with other audio engines and most likely what the caller is expecting. */ + config.pan = 0; + + return config; +} + + +MA_API ma_result ma_panner_init(const ma_panner_config* pConfig, ma_panner* pPanner) +{ + if (pPanner == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pPanner); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + pPanner->format = pConfig->format; + pPanner->channels = pConfig->channels; + pPanner->mode = pConfig->mode; + pPanner->pan = pConfig->pan; + + return MA_SUCCESS; +} + +static void ma_stereo_balance_pcm_frames_f32(float* pFramesOut, const float* pFramesIn, ma_uint64 frameCount, float pan) +{ + ma_uint64 iFrame; + + if (pan > 0) { + float factor = 1.0f - pan; + if (pFramesOut == pFramesIn) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + pFramesOut[iFrame*2 + 0] = pFramesIn[iFrame*2 + 0] * factor; + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + pFramesOut[iFrame*2 + 0] = pFramesIn[iFrame*2 + 0] * factor; + pFramesOut[iFrame*2 + 1] = pFramesIn[iFrame*2 + 1]; + } + } + } else { + float factor = 1.0f + pan; + if (pFramesOut == pFramesIn) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + pFramesOut[iFrame*2 + 1] = pFramesIn[iFrame*2 + 1] * factor; + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + pFramesOut[iFrame*2 + 0] = pFramesIn[iFrame*2 + 0]; + pFramesOut[iFrame*2 + 1] = pFramesIn[iFrame*2 + 1] * factor; + } + } + } +} + +static void ma_stereo_balance_pcm_frames(void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount, ma_format format, float pan) +{ + if (pan == 0) { + /* Fast path. No panning required. */ + if (pFramesOut == pFramesIn) { + /* No-op */ + } else { + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, format, 2); + } + + return; + } + + switch (format) { + case ma_format_f32: ma_stereo_balance_pcm_frames_f32((float*)pFramesOut, (float*)pFramesIn, frameCount, pan); break; + + /* Unknown format. Just copy. */ + default: + { + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, format, 2); + } break; + } +} + + +static void ma_stereo_pan_pcm_frames_f32(float* pFramesOut, const float* pFramesIn, ma_uint64 frameCount, float pan) +{ + ma_uint64 iFrame; + + if (pan > 0) { + float factorL0 = 1.0f - pan; + float factorL1 = 0.0f + pan; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float sample0 = (pFramesIn[iFrame*2 + 0] * factorL0); + float sample1 = (pFramesIn[iFrame*2 + 0] * factorL1) + pFramesIn[iFrame*2 + 1]; + + pFramesOut[iFrame*2 + 0] = sample0; + pFramesOut[iFrame*2 + 1] = sample1; + } + } else { + float factorR0 = 0.0f - pan; + float factorR1 = 1.0f + pan; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float sample0 = pFramesIn[iFrame*2 + 0] + (pFramesIn[iFrame*2 + 1] * factorR0); + float sample1 = (pFramesIn[iFrame*2 + 1] * factorR1); + + pFramesOut[iFrame*2 + 0] = sample0; + pFramesOut[iFrame*2 + 1] = sample1; + } + } +} + +static void ma_stereo_pan_pcm_frames(void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount, ma_format format, float pan) +{ + if (pan == 0) { + /* Fast path. No panning required. */ + if (pFramesOut == pFramesIn) { + /* No-op */ + } else { + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, format, 2); + } + + return; + } + + switch (format) { + case ma_format_f32: ma_stereo_pan_pcm_frames_f32((float*)pFramesOut, (float*)pFramesIn, frameCount, pan); break; + + /* Unknown format. Just copy. */ + default: + { + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, format, 2); + } break; + } +} + +MA_API ma_result ma_panner_process_pcm_frames(ma_panner* pPanner, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pPanner == NULL || pFramesOut == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + if (pPanner->channels == 2) { + /* Stereo case. For now assume channel 0 is left and channel right is 1, but should probably add support for a channel map. */ + if (pPanner->mode == ma_pan_mode_balance) { + ma_stereo_balance_pcm_frames(pFramesOut, pFramesIn, frameCount, pPanner->format, pPanner->pan); + } else { + ma_stereo_pan_pcm_frames(pFramesOut, pFramesIn, frameCount, pPanner->format, pPanner->pan); + } + } else { + if (pPanner->channels == 1) { + /* Panning has no effect on mono streams. */ + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, pPanner->format, pPanner->channels); + } else { + /* For now we're not going to support non-stereo set ups. Not sure how I want to handle this case just yet. */ + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, pPanner->format, pPanner->channels); + } + } + + return MA_SUCCESS; +} + +MA_API void ma_panner_set_mode(ma_panner* pPanner, ma_pan_mode mode) +{ + if (pPanner == NULL) { + return; + } + + pPanner->mode = mode; +} + +MA_API ma_pan_mode ma_panner_get_mode(const ma_panner* pPanner) +{ + if (pPanner == NULL) { + return ma_pan_mode_balance; + } + + return pPanner->mode; +} + +MA_API void ma_panner_set_pan(ma_panner* pPanner, float pan) +{ + if (pPanner == NULL) { + return; + } + + pPanner->pan = ma_clamp(pan, -1.0f, 1.0f); +} + +MA_API float ma_panner_get_pan(const ma_panner* pPanner) +{ + if (pPanner == NULL) { + return 0; + } + + return pPanner->pan; +} + + + + +MA_API ma_fader_config ma_fader_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate) +{ + ma_fader_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + + return config; +} + + +MA_API ma_result ma_fader_init(const ma_fader_config* pConfig, ma_fader* pFader) +{ + if (pFader == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFader); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* Only f32 is supported for now. */ + if (pConfig->format != ma_format_f32) { + return MA_INVALID_ARGS; + } + + pFader->config = *pConfig; + pFader->volumeBeg = 1; + pFader->volumeEnd = 1; + pFader->lengthInFrames = 0; + pFader->cursorInFrames = 0; + + return MA_SUCCESS; +} + +MA_API ma_result ma_fader_process_pcm_frames(ma_fader* pFader, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pFader == NULL) { + return MA_INVALID_ARGS; + } + + /* If the cursor is still negative we need to just copy the absolute number of those frames, but no more than frameCount. */ + if (pFader->cursorInFrames < 0) { + ma_uint64 absCursorInFrames = (ma_uint64)0 - pFader->cursorInFrames; + if (absCursorInFrames > frameCount) { + absCursorInFrames = frameCount; + } + + ma_copy_pcm_frames(pFramesOut, pFramesIn, absCursorInFrames, pFader->config.format, pFader->config.channels); + + pFader->cursorInFrames += absCursorInFrames; + frameCount -= absCursorInFrames; + pFramesOut = ma_offset_ptr(pFramesOut, ma_get_bytes_per_frame(pFader->config.format, pFader->config.channels)*absCursorInFrames); + pFramesIn = ma_offset_ptr(pFramesIn, ma_get_bytes_per_frame(pFader->config.format, pFader->config.channels)*absCursorInFrames); + } + + if (pFader->cursorInFrames >= 0) { + /* + For now we need to clamp frameCount so that the cursor never overflows 32-bits. This is required for + the conversion to a float which we use for the linear interpolation. This might be changed later. + */ + if (frameCount + pFader->cursorInFrames > UINT_MAX) { + frameCount = UINT_MAX - pFader->cursorInFrames; + } + + /* Optimized path if volumeBeg and volumeEnd are equal. */ + if (pFader->volumeBeg == pFader->volumeEnd) { + if (pFader->volumeBeg == 1) { + /* Straight copy. */ + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, pFader->config.format, pFader->config.channels); + } else { + /* Copy with volume. */ + ma_copy_and_apply_volume_and_clip_pcm_frames(pFramesOut, pFramesIn, frameCount, pFader->config.format, pFader->config.channels, pFader->volumeBeg); + } + } else { + /* Slower path. Volumes are different, so may need to do an interpolation. */ + if ((ma_uint64)pFader->cursorInFrames >= pFader->lengthInFrames) { + /* Fast path. We've gone past the end of the fade period so just apply the end volume to all samples. */ + ma_copy_and_apply_volume_and_clip_pcm_frames(pFramesOut, pFramesIn, frameCount, pFader->config.format, pFader->config.channels, pFader->volumeEnd); + } else { + /* Slow path. This is where we do the actual fading. */ + ma_uint64 iFrame; + ma_uint32 iChannel; + + /* For now we only support f32. Support for other formats might be added later. */ + if (pFader->config.format == ma_format_f32) { + const float* pFramesInF32 = (const float*)pFramesIn; + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float a = (ma_uint32)ma_min(pFader->cursorInFrames + iFrame, pFader->lengthInFrames) / (float)((ma_uint32)pFader->lengthInFrames); /* Safe cast due to the frameCount clamp at the top of this function. */ + float volume = ma_mix_f32_fast(pFader->volumeBeg, pFader->volumeEnd, a); + + for (iChannel = 0; iChannel < pFader->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pFader->config.channels + iChannel] = pFramesInF32[iFrame*pFader->config.channels + iChannel] * volume; + } + } + } else { + return MA_NOT_IMPLEMENTED; + } + } + } + } + + pFader->cursorInFrames += frameCount; + + return MA_SUCCESS; +} + +MA_API void ma_fader_get_data_format(const ma_fader* pFader, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate) +{ + if (pFader == NULL) { + return; + } + + if (pFormat != NULL) { + *pFormat = pFader->config.format; + } + + if (pChannels != NULL) { + *pChannels = pFader->config.channels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pFader->config.sampleRate; + } +} + +MA_API void ma_fader_set_fade(ma_fader* pFader, float volumeBeg, float volumeEnd, ma_uint64 lengthInFrames) +{ + ma_fader_set_fade_ex(pFader, volumeBeg, volumeEnd, lengthInFrames, 0); +} + +MA_API void ma_fader_set_fade_ex(ma_fader* pFader, float volumeBeg, float volumeEnd, ma_uint64 lengthInFrames, ma_int64 startOffsetInFrames) +{ + if (pFader == NULL) { + return; + } + + /* If the volume is negative, use current volume. */ + if (volumeBeg < 0) { + volumeBeg = ma_fader_get_current_volume(pFader); + } + + /* + The length needs to be clamped to 32-bits due to how we convert it to a float for linear + interpolation reasons. I might change this requirement later, but for now it's not important. + */ + if (lengthInFrames > UINT_MAX) { + lengthInFrames = UINT_MAX; + } + + /* The start offset needs to be clamped to ensure it doesn't overflow a signed number. */ + if (startOffsetInFrames > INT_MAX) { + startOffsetInFrames = INT_MAX; + } + + pFader->volumeBeg = volumeBeg; + pFader->volumeEnd = volumeEnd; + pFader->lengthInFrames = lengthInFrames; + pFader->cursorInFrames = -startOffsetInFrames; +} + +MA_API float ma_fader_get_current_volume(const ma_fader* pFader) +{ + if (pFader == NULL) { + return 0.0f; + } + + /* Any frames prior to the start of the fade period will be at unfaded volume. */ + if (pFader->cursorInFrames < 0) { + return 1.0f; + } + + /* The current volume depends on the position of the cursor. */ + if (pFader->cursorInFrames == 0) { + return pFader->volumeBeg; + } else if ((ma_uint64)pFader->cursorInFrames >= pFader->lengthInFrames) { /* Safe case because the < 0 case was checked above. */ + return pFader->volumeEnd; + } else { + /* The cursor is somewhere inside the fading period. We can figure this out with a simple linear interpolation between volumeBeg and volumeEnd based on our cursor position. */ + return ma_mix_f32_fast(pFader->volumeBeg, pFader->volumeEnd, (ma_uint32)pFader->cursorInFrames / (float)((ma_uint32)pFader->lengthInFrames)); /* Safe cast to uint32 because we clamp it in ma_fader_process_pcm_frames(). */ + } +} + + + + + +MA_API ma_vec3f ma_vec3f_init_3f(float x, float y, float z) +{ + ma_vec3f v; + + v.x = x; + v.y = y; + v.z = z; + + return v; +} + +MA_API ma_vec3f ma_vec3f_sub(ma_vec3f a, ma_vec3f b) +{ + return ma_vec3f_init_3f( + a.x - b.x, + a.y - b.y, + a.z - b.z + ); +} + +MA_API ma_vec3f ma_vec3f_neg(ma_vec3f a) +{ + return ma_vec3f_init_3f( + -a.x, + -a.y, + -a.z + ); +} + +MA_API float ma_vec3f_dot(ma_vec3f a, ma_vec3f b) +{ + return a.x*b.x + a.y*b.y + a.z*b.z; +} + +MA_API float ma_vec3f_len2(ma_vec3f v) +{ + return ma_vec3f_dot(v, v); +} + +MA_API float ma_vec3f_len(ma_vec3f v) +{ + return (float)ma_sqrtd(ma_vec3f_len2(v)); +} + + + +MA_API float ma_vec3f_dist(ma_vec3f a, ma_vec3f b) +{ + return ma_vec3f_len(ma_vec3f_sub(a, b)); +} + +MA_API ma_vec3f ma_vec3f_normalize(ma_vec3f v) +{ + float invLen; + float len2 = ma_vec3f_len2(v); + if (len2 == 0) { + return ma_vec3f_init_3f(0, 0, 0); + } + + invLen = ma_rsqrtf(len2); + v.x *= invLen; + v.y *= invLen; + v.z *= invLen; + + return v; +} + +MA_API ma_vec3f ma_vec3f_cross(ma_vec3f a, ma_vec3f b) +{ + return ma_vec3f_init_3f( + a.y*b.z - a.z*b.y, + a.z*b.x - a.x*b.z, + a.x*b.y - a.y*b.x + ); +} + + +MA_API void ma_atomic_vec3f_init(ma_atomic_vec3f* v, ma_vec3f value) +{ + v->v = value; + v->lock = 0; /* Important this is initialized to 0. */ +} + +MA_API void ma_atomic_vec3f_set(ma_atomic_vec3f* v, ma_vec3f value) +{ + ma_spinlock_lock(&v->lock); + { + v->v = value; + } + ma_spinlock_unlock(&v->lock); +} + +MA_API ma_vec3f ma_atomic_vec3f_get(ma_atomic_vec3f* v) +{ + ma_vec3f r; + + ma_spinlock_lock(&v->lock); + { + r = v->v; + } + ma_spinlock_unlock(&v->lock); + + return r; +} + + + +static void ma_channel_map_apply_f32(float* pFramesOut, const ma_channel* pChannelMapOut, ma_uint32 channelsOut, const float* pFramesIn, const ma_channel* pChannelMapIn, ma_uint32 channelsIn, ma_uint64 frameCount, ma_channel_mix_mode mode, ma_mono_expansion_mode monoExpansionMode); +static ma_bool32 ma_is_spatial_channel_position(ma_channel channelPosition); + + +#ifndef MA_DEFAULT_SPEED_OF_SOUND +#define MA_DEFAULT_SPEED_OF_SOUND 343.3f +#endif + +/* +These vectors represent the direction that speakers are facing from the center point. They're used +for panning in the spatializer. Must be normalized. +*/ +static ma_vec3f g_maChannelDirections[MA_CHANNEL_POSITION_COUNT] = { + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_NONE */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_MONO */ + {-0.7071f, 0.0f, -0.7071f }, /* MA_CHANNEL_FRONT_LEFT */ + {+0.7071f, 0.0f, -0.7071f }, /* MA_CHANNEL_FRONT_RIGHT */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_FRONT_CENTER */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_LFE */ + {-0.7071f, 0.0f, +0.7071f }, /* MA_CHANNEL_BACK_LEFT */ + {+0.7071f, 0.0f, +0.7071f }, /* MA_CHANNEL_BACK_RIGHT */ + {-0.3162f, 0.0f, -0.9487f }, /* MA_CHANNEL_FRONT_LEFT_CENTER */ + {+0.3162f, 0.0f, -0.9487f }, /* MA_CHANNEL_FRONT_RIGHT_CENTER */ + { 0.0f, 0.0f, +1.0f }, /* MA_CHANNEL_BACK_CENTER */ + {-1.0f, 0.0f, 0.0f }, /* MA_CHANNEL_SIDE_LEFT */ + {+1.0f, 0.0f, 0.0f }, /* MA_CHANNEL_SIDE_RIGHT */ + { 0.0f, +1.0f, 0.0f }, /* MA_CHANNEL_TOP_CENTER */ + {-0.5774f, +0.5774f, -0.5774f }, /* MA_CHANNEL_TOP_FRONT_LEFT */ + { 0.0f, +0.7071f, -0.7071f }, /* MA_CHANNEL_TOP_FRONT_CENTER */ + {+0.5774f, +0.5774f, -0.5774f }, /* MA_CHANNEL_TOP_FRONT_RIGHT */ + {-0.5774f, +0.5774f, +0.5774f }, /* MA_CHANNEL_TOP_BACK_LEFT */ + { 0.0f, +0.7071f, +0.7071f }, /* MA_CHANNEL_TOP_BACK_CENTER */ + {+0.5774f, +0.5774f, +0.5774f }, /* MA_CHANNEL_TOP_BACK_RIGHT */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_0 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_1 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_2 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_3 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_4 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_5 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_6 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_7 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_8 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_9 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_10 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_11 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_12 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_13 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_14 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_15 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_16 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_17 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_18 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_19 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_20 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_21 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_22 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_23 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_24 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_25 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_26 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_27 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_28 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_29 */ + { 0.0f, 0.0f, -1.0f }, /* MA_CHANNEL_AUX_30 */ + { 0.0f, 0.0f, -1.0f } /* MA_CHANNEL_AUX_31 */ +}; + +static ma_vec3f ma_get_channel_direction(ma_channel channel) +{ + if (channel >= MA_CHANNEL_POSITION_COUNT) { + return ma_vec3f_init_3f(0, 0, -1); + } else { + return g_maChannelDirections[channel]; + } +} + + + +static float ma_attenuation_inverse(float distance, float minDistance, float maxDistance, float rolloff) +{ + if (minDistance >= maxDistance) { + return 1; /* To avoid division by zero. Do not attenuate. */ + } + + return minDistance / (minDistance + rolloff * (ma_clamp(distance, minDistance, maxDistance) - minDistance)); +} + +static float ma_attenuation_linear(float distance, float minDistance, float maxDistance, float rolloff) +{ + if (minDistance >= maxDistance) { + return 1; /* To avoid division by zero. Do not attenuate. */ + } + + return 1 - rolloff * (ma_clamp(distance, minDistance, maxDistance) - minDistance) / (maxDistance - minDistance); +} + +static float ma_attenuation_exponential(float distance, float minDistance, float maxDistance, float rolloff) +{ + if (minDistance >= maxDistance) { + return 1; /* To avoid division by zero. Do not attenuate. */ + } + + return (float)ma_powd(ma_clamp(distance, minDistance, maxDistance) / minDistance, -rolloff); +} + + +/* +Doppler Effect calculation taken from the OpenAL spec, with two main differences: + + 1) The source to listener vector will have already been calculated at an earlier step so we can + just use that directly. We need only the position of the source relative to the origin. + + 2) We don't scale by a frequency because we actually just want the ratio which we'll plug straight + into the resampler directly. +*/ +static float ma_doppler_pitch(ma_vec3f relativePosition, ma_vec3f sourceVelocity, ma_vec3f listenVelocity, float speedOfSound, float dopplerFactor) +{ + float len; + float vls; + float vss; + + len = ma_vec3f_len(relativePosition); + + /* + There's a case where the position of the source will be right on top of the listener in which + case the length will be 0 and we'll end up with a division by zero. We can just return a ratio + of 1.0 in this case. This is not considered in the OpenAL spec, but is necessary. + */ + if (len == 0) { + return 1.0; + } + + vls = ma_vec3f_dot(relativePosition, listenVelocity) / len; + vss = ma_vec3f_dot(relativePosition, sourceVelocity) / len; + + vls = ma_min(vls, speedOfSound / dopplerFactor); + vss = ma_min(vss, speedOfSound / dopplerFactor); + + return (speedOfSound - dopplerFactor*vls) / (speedOfSound - dopplerFactor*vss); +} + + +static void ma_get_default_channel_map_for_spatializer(ma_channel* pChannelMap, size_t channelMapCap, ma_uint32 channelCount) +{ + /* + Special case for stereo. Want to default the left and right speakers to side left and side + right so that they're facing directly down the X axis rather than slightly forward. Not + doing this will result in sounds being quieter when behind the listener. This might + actually be good for some scenarios, but I don't think it's an appropriate default because + it can be a bit unexpected. + */ + if (channelCount == 2) { + pChannelMap[0] = MA_CHANNEL_SIDE_LEFT; + pChannelMap[1] = MA_CHANNEL_SIDE_RIGHT; + } else { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, channelCount); + } +} + + +MA_API ma_spatializer_listener_config ma_spatializer_listener_config_init(ma_uint32 channelsOut) +{ + ma_spatializer_listener_config config; + + MA_ZERO_OBJECT(&config); + config.channelsOut = channelsOut; + config.pChannelMapOut = NULL; + config.handedness = ma_handedness_right; + config.worldUp = ma_vec3f_init_3f(0, 1, 0); + config.coneInnerAngleInRadians = 6.283185f; /* 360 degrees. */ + config.coneOuterAngleInRadians = 6.283185f; /* 360 degrees. */ + config.coneOuterGain = 0; + config.speedOfSound = 343.3f; /* Same as OpenAL. Used for doppler effect. */ + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t channelMapOutOffset; +} ma_spatializer_listener_heap_layout; + +static ma_result ma_spatializer_listener_get_heap_layout(const ma_spatializer_listener_config* pConfig, ma_spatializer_listener_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channelsOut == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* Channel map. We always need this, even for passthroughs. */ + pHeapLayout->channelMapOutOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(sizeof(*pConfig->pChannelMapOut) * pConfig->channelsOut); + + return MA_SUCCESS; +} + + +MA_API ma_result ma_spatializer_listener_get_heap_size(const ma_spatializer_listener_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_spatializer_listener_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_spatializer_listener_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_spatializer_listener_init_preallocated(const ma_spatializer_listener_config* pConfig, void* pHeap, ma_spatializer_listener* pListener) +{ + ma_result result; + ma_spatializer_listener_heap_layout heapLayout; + + if (pListener == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pListener); + + result = ma_spatializer_listener_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pListener->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pListener->config = *pConfig; + ma_atomic_vec3f_init(&pListener->position, ma_vec3f_init_3f(0, 0, 0)); + ma_atomic_vec3f_init(&pListener->direction, ma_vec3f_init_3f(0, 0, -1)); + ma_atomic_vec3f_init(&pListener->velocity, ma_vec3f_init_3f(0, 0, 0)); + pListener->isEnabled = MA_TRUE; + + /* Swap the forward direction if we're left handed (it was initialized based on right handed). */ + if (pListener->config.handedness == ma_handedness_left) { + ma_vec3f negDir = ma_vec3f_neg(ma_spatializer_listener_get_direction(pListener)); + ma_spatializer_listener_set_direction(pListener, negDir.x, negDir.y, negDir.z); + } + + + /* We must always have a valid channel map. */ + pListener->config.pChannelMapOut = (ma_channel*)ma_offset_ptr(pHeap, heapLayout.channelMapOutOffset); + + /* Use a slightly different default channel map for stereo. */ + if (pConfig->pChannelMapOut == NULL) { + ma_get_default_channel_map_for_spatializer(pListener->config.pChannelMapOut, pConfig->channelsOut, pConfig->channelsOut); + } else { + ma_channel_map_copy_or_default(pListener->config.pChannelMapOut, pConfig->channelsOut, pConfig->pChannelMapOut, pConfig->channelsOut); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_spatializer_listener_init(const ma_spatializer_listener_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_spatializer_listener* pListener) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_spatializer_listener_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_spatializer_listener_init_preallocated(pConfig, pHeap, pListener); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pListener->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_spatializer_listener_uninit(ma_spatializer_listener* pListener, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pListener == NULL) { + return; + } + + if (pListener->_ownsHeap) { + ma_free(pListener->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_channel* ma_spatializer_listener_get_channel_map(ma_spatializer_listener* pListener) +{ + if (pListener == NULL) { + return NULL; + } + + return pListener->config.pChannelMapOut; +} + +MA_API void ma_spatializer_listener_set_cone(ma_spatializer_listener* pListener, float innerAngleInRadians, float outerAngleInRadians, float outerGain) +{ + if (pListener == NULL) { + return; + } + + pListener->config.coneInnerAngleInRadians = innerAngleInRadians; + pListener->config.coneOuterAngleInRadians = outerAngleInRadians; + pListener->config.coneOuterGain = outerGain; +} + +MA_API void ma_spatializer_listener_get_cone(const ma_spatializer_listener* pListener, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain) +{ + if (pListener == NULL) { + return; + } + + if (pInnerAngleInRadians != NULL) { + *pInnerAngleInRadians = pListener->config.coneInnerAngleInRadians; + } + + if (pOuterAngleInRadians != NULL) { + *pOuterAngleInRadians = pListener->config.coneOuterAngleInRadians; + } + + if (pOuterGain != NULL) { + *pOuterGain = pListener->config.coneOuterGain; + } +} + +MA_API void ma_spatializer_listener_set_position(ma_spatializer_listener* pListener, float x, float y, float z) +{ + if (pListener == NULL) { + return; + } + + ma_atomic_vec3f_set(&pListener->position, ma_vec3f_init_3f(x, y, z)); +} + +MA_API ma_vec3f ma_spatializer_listener_get_position(const ma_spatializer_listener* pListener) +{ + if (pListener == NULL) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_atomic_vec3f_get((ma_atomic_vec3f*)&pListener->position); /* Naughty const-cast. It's just for atomically loading the vec3 which should be safe. */ +} + +MA_API void ma_spatializer_listener_set_direction(ma_spatializer_listener* pListener, float x, float y, float z) +{ + if (pListener == NULL) { + return; + } + + ma_atomic_vec3f_set(&pListener->direction, ma_vec3f_init_3f(x, y, z)); +} + +MA_API ma_vec3f ma_spatializer_listener_get_direction(const ma_spatializer_listener* pListener) +{ + if (pListener == NULL) { + return ma_vec3f_init_3f(0, 0, -1); + } + + return ma_atomic_vec3f_get((ma_atomic_vec3f*)&pListener->direction); /* Naughty const-cast. It's just for atomically loading the vec3 which should be safe. */ +} + +MA_API void ma_spatializer_listener_set_velocity(ma_spatializer_listener* pListener, float x, float y, float z) +{ + if (pListener == NULL) { + return; + } + + ma_atomic_vec3f_set(&pListener->velocity, ma_vec3f_init_3f(x, y, z)); +} + +MA_API ma_vec3f ma_spatializer_listener_get_velocity(const ma_spatializer_listener* pListener) +{ + if (pListener == NULL) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_atomic_vec3f_get((ma_atomic_vec3f*)&pListener->velocity); /* Naughty const-cast. It's just for atomically loading the vec3 which should be safe. */ +} + +MA_API void ma_spatializer_listener_set_speed_of_sound(ma_spatializer_listener* pListener, float speedOfSound) +{ + if (pListener == NULL) { + return; + } + + pListener->config.speedOfSound = speedOfSound; +} + +MA_API float ma_spatializer_listener_get_speed_of_sound(const ma_spatializer_listener* pListener) +{ + if (pListener == NULL) { + return 0; + } + + return pListener->config.speedOfSound; +} + +MA_API void ma_spatializer_listener_set_world_up(ma_spatializer_listener* pListener, float x, float y, float z) +{ + if (pListener == NULL) { + return; + } + + pListener->config.worldUp = ma_vec3f_init_3f(x, y, z); +} + +MA_API ma_vec3f ma_spatializer_listener_get_world_up(const ma_spatializer_listener* pListener) +{ + if (pListener == NULL) { + return ma_vec3f_init_3f(0, 1, 0); + } + + return pListener->config.worldUp; +} + +MA_API void ma_spatializer_listener_set_enabled(ma_spatializer_listener* pListener, ma_bool32 isEnabled) +{ + if (pListener == NULL) { + return; + } + + pListener->isEnabled = isEnabled; +} + +MA_API ma_bool32 ma_spatializer_listener_is_enabled(const ma_spatializer_listener* pListener) +{ + if (pListener == NULL) { + return MA_FALSE; + } + + return pListener->isEnabled; +} + + + + +MA_API ma_spatializer_config ma_spatializer_config_init(ma_uint32 channelsIn, ma_uint32 channelsOut) +{ + ma_spatializer_config config; + + MA_ZERO_OBJECT(&config); + config.channelsIn = channelsIn; + config.channelsOut = channelsOut; + config.pChannelMapIn = NULL; + config.attenuationModel = ma_attenuation_model_inverse; + config.positioning = ma_positioning_absolute; + config.handedness = ma_handedness_right; + config.minGain = 0; + config.maxGain = 1; + config.minDistance = 1; + config.maxDistance = MA_FLT_MAX; + config.rolloff = 1; + config.coneInnerAngleInRadians = 6.283185f; /* 360 degrees. */ + config.coneOuterAngleInRadians = 6.283185f; /* 360 degrees. */ + config.coneOuterGain = 0.0f; + config.dopplerFactor = 1; + config.directionalAttenuationFactor = 1; + config.minSpatializationChannelGain = 0.2f; + config.gainSmoothTimeInFrames = 360; /* 7.5ms @ 48K. */ + + return config; +} + + +static ma_gainer_config ma_spatializer_gainer_config_init(const ma_spatializer_config* pConfig) +{ + MA_ASSERT(pConfig != NULL); + return ma_gainer_config_init(pConfig->channelsOut, pConfig->gainSmoothTimeInFrames); +} + +static ma_result ma_spatializer_validate_config(const ma_spatializer_config* pConfig) +{ + MA_ASSERT(pConfig != NULL); + + if (pConfig->channelsIn == 0 || pConfig->channelsOut == 0) { + return MA_INVALID_ARGS; + } + + return MA_SUCCESS; +} + +typedef struct +{ + size_t sizeInBytes; + size_t channelMapInOffset; + size_t newChannelGainsOffset; + size_t gainerOffset; +} ma_spatializer_heap_layout; + +static ma_result ma_spatializer_get_heap_layout(const ma_spatializer_config* pConfig, ma_spatializer_heap_layout* pHeapLayout) +{ + ma_result result; + + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_spatializer_validate_config(pConfig); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes = 0; + + /* Channel map. */ + pHeapLayout->channelMapInOffset = MA_SIZE_MAX; /* <-- MA_SIZE_MAX indicates no allocation necessary. */ + if (pConfig->pChannelMapIn != NULL) { + pHeapLayout->channelMapInOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(sizeof(*pConfig->pChannelMapIn) * pConfig->channelsIn); + } + + /* New channel gains for output. */ + pHeapLayout->newChannelGainsOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(sizeof(float) * pConfig->channelsOut); + + /* Gainer. */ + { + size_t gainerHeapSizeInBytes; + ma_gainer_config gainerConfig; + + gainerConfig = ma_spatializer_gainer_config_init(pConfig); + + result = ma_gainer_get_heap_size(&gainerConfig, &gainerHeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->gainerOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(gainerHeapSizeInBytes); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_spatializer_get_heap_size(const ma_spatializer_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_spatializer_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; /* Safety. */ + + result = ma_spatializer_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + + +MA_API ma_result ma_spatializer_init_preallocated(const ma_spatializer_config* pConfig, void* pHeap, ma_spatializer* pSpatializer) +{ + ma_result result; + ma_spatializer_heap_layout heapLayout; + ma_gainer_config gainerConfig; + + if (pSpatializer == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pSpatializer); + + if (pConfig == NULL || pHeap == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_spatializer_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pSpatializer->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pSpatializer->channelsIn = pConfig->channelsIn; + pSpatializer->channelsOut = pConfig->channelsOut; + pSpatializer->attenuationModel = pConfig->attenuationModel; + pSpatializer->positioning = pConfig->positioning; + pSpatializer->handedness = pConfig->handedness; + pSpatializer->minGain = pConfig->minGain; + pSpatializer->maxGain = pConfig->maxGain; + pSpatializer->minDistance = pConfig->minDistance; + pSpatializer->maxDistance = pConfig->maxDistance; + pSpatializer->rolloff = pConfig->rolloff; + pSpatializer->coneInnerAngleInRadians = pConfig->coneInnerAngleInRadians; + pSpatializer->coneOuterAngleInRadians = pConfig->coneOuterAngleInRadians; + pSpatializer->coneOuterGain = pConfig->coneOuterGain; + pSpatializer->dopplerFactor = pConfig->dopplerFactor; + pSpatializer->minSpatializationChannelGain = pConfig->minSpatializationChannelGain; + pSpatializer->directionalAttenuationFactor = pConfig->directionalAttenuationFactor; + pSpatializer->gainSmoothTimeInFrames = pConfig->gainSmoothTimeInFrames; + ma_atomic_vec3f_init(&pSpatializer->position, ma_vec3f_init_3f(0, 0, 0)); + ma_atomic_vec3f_init(&pSpatializer->direction, ma_vec3f_init_3f(0, 0, -1)); + ma_atomic_vec3f_init(&pSpatializer->velocity, ma_vec3f_init_3f(0, 0, 0)); + pSpatializer->dopplerPitch = 1; + + /* Swap the forward direction if we're left handed (it was initialized based on right handed). */ + if (pSpatializer->handedness == ma_handedness_left) { + ma_vec3f negDir = ma_vec3f_neg(ma_spatializer_get_direction(pSpatializer)); + ma_spatializer_set_direction(pSpatializer, negDir.x, negDir.y, negDir.z); + } + + /* Channel map. This will be on the heap. */ + if (pConfig->pChannelMapIn != NULL) { + pSpatializer->pChannelMapIn = (ma_channel*)ma_offset_ptr(pHeap, heapLayout.channelMapInOffset); + ma_channel_map_copy_or_default(pSpatializer->pChannelMapIn, pSpatializer->channelsIn, pConfig->pChannelMapIn, pSpatializer->channelsIn); + } + + /* New channel gains for output channels. */ + pSpatializer->pNewChannelGainsOut = (float*)ma_offset_ptr(pHeap, heapLayout.newChannelGainsOffset); + + /* Gainer. */ + gainerConfig = ma_spatializer_gainer_config_init(pConfig); + + result = ma_gainer_init_preallocated(&gainerConfig, ma_offset_ptr(pHeap, heapLayout.gainerOffset), &pSpatializer->gainer); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the gainer. */ + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_spatializer_init(const ma_spatializer_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_spatializer* pSpatializer) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + /* We'll need a heap allocation to retrieve the size. */ + result = ma_spatializer_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_spatializer_init_preallocated(pConfig, pHeap, pSpatializer); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pSpatializer->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_spatializer_uninit(ma_spatializer* pSpatializer, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pSpatializer == NULL) { + return; + } + + ma_gainer_uninit(&pSpatializer->gainer, pAllocationCallbacks); + + if (pSpatializer->_ownsHeap) { + ma_free(pSpatializer->_pHeap, pAllocationCallbacks); + } +} + +static float ma_calculate_angular_gain(ma_vec3f dirA, ma_vec3f dirB, float coneInnerAngleInRadians, float coneOuterAngleInRadians, float coneOuterGain) +{ + /* + Angular attenuation. + + Unlike distance gain, the math for this is not specified by the OpenAL spec so we'll just go ahead and figure + this out for ourselves at the expense of possibly being inconsistent with other implementations. + + To do cone attenuation, I'm just using the same math that we'd use to implement a basic spotlight in OpenGL. We + just need to get the direction from the source to the listener and then do a dot product against that and the + direction of the spotlight. Then we just compare that dot product against the cosine of the inner and outer + angles. If the dot product is greater than the outer angle, we just use coneOuterGain. If it's less than + the inner angle, we just use a gain of 1. Otherwise we linearly interpolate between 1 and coneOuterGain. + */ + if (coneInnerAngleInRadians < 6.283185f) { + float angularGain = 1; + float cutoffInner = (float)ma_cosd(coneInnerAngleInRadians*0.5f); + float cutoffOuter = (float)ma_cosd(coneOuterAngleInRadians*0.5f); + float d; + + d = ma_vec3f_dot(dirA, dirB); + + if (d > cutoffInner) { + /* It's inside the inner angle. */ + angularGain = 1; + } else { + /* It's outside the inner angle. */ + if (d > cutoffOuter) { + /* It's between the inner and outer angle. We need to linearly interpolate between 1 and coneOuterGain. */ + angularGain = ma_mix_f32(coneOuterGain, 1, (d - cutoffOuter) / (cutoffInner - cutoffOuter)); + } else { + /* It's outside the outer angle. */ + angularGain = coneOuterGain; + } + } + + /*printf("d = %f; cutoffInner = %f; cutoffOuter = %f; angularGain = %f\n", d, cutoffInner, cutoffOuter, angularGain);*/ + return angularGain; + } else { + /* Inner angle is 360 degrees so no need to do any attenuation. */ + return 1; + } +} + +MA_API ma_result ma_spatializer_process_pcm_frames(ma_spatializer* pSpatializer, ma_spatializer_listener* pListener, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_channel* pChannelMapIn = pSpatializer->pChannelMapIn; + ma_channel* pChannelMapOut = pListener->config.pChannelMapOut; + + if (pSpatializer == NULL) { + return MA_INVALID_ARGS; + } + + /* If we're not spatializing we need to run an optimized path. */ + if (ma_atomic_load_i32(&pSpatializer->attenuationModel) == ma_attenuation_model_none) { + if (ma_spatializer_listener_is_enabled(pListener)) { + /* No attenuation is required, but we'll need to do some channel conversion. */ + if (pSpatializer->channelsIn == pSpatializer->channelsOut) { + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, ma_format_f32, pSpatializer->channelsIn); + } else { + ma_channel_map_apply_f32((float*)pFramesOut, pChannelMapOut, pSpatializer->channelsOut, (const float*)pFramesIn, pChannelMapIn, pSpatializer->channelsIn, frameCount, ma_channel_mix_mode_rectangular, ma_mono_expansion_mode_default); /* Safe casts to float* because f32 is the only supported format. */ + } + } else { + /* The listener is disabled. Output silence. */ + ma_silence_pcm_frames(pFramesOut, frameCount, ma_format_f32, pSpatializer->channelsOut); + } + + /* + We're not doing attenuation so don't bother with doppler for now. I'm not sure if this is + the correct thinking so might need to review this later. + */ + pSpatializer->dopplerPitch = 1; + } else { + /* + Let's first determine which listener the sound is closest to. Need to keep in mind that we + might not have a world or any listeners, in which case we just spatializer based on the + listener being positioned at the origin (0, 0, 0). + */ + ma_vec3f relativePosNormalized; + ma_vec3f relativePos; /* The position relative to the listener. */ + ma_vec3f relativeDir; /* The direction of the sound, relative to the listener. */ + ma_vec3f listenerVel; /* The velocity of the listener. For doppler pitch calculation. */ + float speedOfSound; + float distance = 0; + float gain = 1; + ma_uint32 iChannel; + const ma_uint32 channelsOut = pSpatializer->channelsOut; + const ma_uint32 channelsIn = pSpatializer->channelsIn; + float minDistance = ma_spatializer_get_min_distance(pSpatializer); + float maxDistance = ma_spatializer_get_max_distance(pSpatializer); + float rolloff = ma_spatializer_get_rolloff(pSpatializer); + float dopplerFactor = ma_spatializer_get_doppler_factor(pSpatializer); + + /* + We'll need the listener velocity for doppler pitch calculations. The speed of sound is + defined by the listener, so we'll grab that here too. + */ + if (pListener != NULL) { + listenerVel = ma_spatializer_listener_get_velocity(pListener); + speedOfSound = pListener->config.speedOfSound; + } else { + listenerVel = ma_vec3f_init_3f(0, 0, 0); + speedOfSound = MA_DEFAULT_SPEED_OF_SOUND; + } + + if (pListener == NULL || ma_spatializer_get_positioning(pSpatializer) == ma_positioning_relative) { + /* There's no listener or we're using relative positioning. */ + relativePos = ma_spatializer_get_position(pSpatializer); + relativeDir = ma_spatializer_get_direction(pSpatializer); + } else { + /* + We've found a listener and we're using absolute positioning. We need to transform the + sound's position and direction so that it's relative to listener. Later on we'll use + this for determining the factors to apply to each channel to apply the panning effect. + */ + ma_spatializer_get_relative_position_and_direction(pSpatializer, pListener, &relativePos, &relativeDir); + } + + distance = ma_vec3f_len(relativePos); + + /* We've gathered the data, so now we can apply some spatialization. */ + switch (ma_spatializer_get_attenuation_model(pSpatializer)) { + case ma_attenuation_model_inverse: + { + gain = ma_attenuation_inverse(distance, minDistance, maxDistance, rolloff); + } break; + case ma_attenuation_model_linear: + { + gain = ma_attenuation_linear(distance, minDistance, maxDistance, rolloff); + } break; + case ma_attenuation_model_exponential: + { + gain = ma_attenuation_exponential(distance, minDistance, maxDistance, rolloff); + } break; + case ma_attenuation_model_none: + default: + { + gain = 1; + } break; + } + + /* Normalize the position. */ + if (distance > 0.001f) { + float distanceInv = 1/distance; + relativePosNormalized = relativePos; + relativePosNormalized.x *= distanceInv; + relativePosNormalized.y *= distanceInv; + relativePosNormalized.z *= distanceInv; + } else { + distance = 0; + relativePosNormalized = ma_vec3f_init_3f(0, 0, 0); + } + + /* + Angular attenuation. + + Unlike distance gain, the math for this is not specified by the OpenAL spec so we'll just go ahead and figure + this out for ourselves at the expense of possibly being inconsistent with other implementations. + + To do cone attenuation, I'm just using the same math that we'd use to implement a basic spotlight in OpenGL. We + just need to get the direction from the source to the listener and then do a dot product against that and the + direction of the spotlight. Then we just compare that dot product against the cosine of the inner and outer + angles. If the dot product is greater than the outer angle, we just use coneOuterGain. If it's less than + the inner angle, we just use a gain of 1. Otherwise we linearly interpolate between 1 and coneOuterGain. + */ + if (distance > 0) { + /* Source angular gain. */ + float spatializerConeInnerAngle; + float spatializerConeOuterAngle; + float spatializerConeOuterGain; + ma_spatializer_get_cone(pSpatializer, &spatializerConeInnerAngle, &spatializerConeOuterAngle, &spatializerConeOuterGain); + + gain *= ma_calculate_angular_gain(relativeDir, ma_vec3f_neg(relativePosNormalized), spatializerConeInnerAngle, spatializerConeOuterAngle, spatializerConeOuterGain); + + /* + We're supporting angular gain on the listener as well for those who want to reduce the volume of sounds that + are positioned behind the listener. On default settings, this will have no effect. + */ + if (pListener != NULL && pListener->config.coneInnerAngleInRadians < 6.283185f) { + ma_vec3f listenerDirection; + float listenerInnerAngle; + float listenerOuterAngle; + float listenerOuterGain; + + if (pListener->config.handedness == ma_handedness_right) { + listenerDirection = ma_vec3f_init_3f(0, 0, -1); + } else { + listenerDirection = ma_vec3f_init_3f(0, 0, +1); + } + + listenerInnerAngle = pListener->config.coneInnerAngleInRadians; + listenerOuterAngle = pListener->config.coneOuterAngleInRadians; + listenerOuterGain = pListener->config.coneOuterGain; + + gain *= ma_calculate_angular_gain(listenerDirection, relativePosNormalized, listenerInnerAngle, listenerOuterAngle, listenerOuterGain); + } + } else { + /* The sound is right on top of the listener. Don't do any angular attenuation. */ + } + + + /* Clamp the gain. */ + gain = ma_clamp(gain, ma_spatializer_get_min_gain(pSpatializer), ma_spatializer_get_max_gain(pSpatializer)); + + /* + The gain needs to be applied per-channel here. The spatialization code below will be changing the per-channel + gains which will then eventually be passed into the gainer which will deal with smoothing the gain transitions + to avoid harsh changes in gain. + */ + for (iChannel = 0; iChannel < channelsOut; iChannel += 1) { + pSpatializer->pNewChannelGainsOut[iChannel] = gain; + } + + /* + Convert to our output channel count. If the listener is disabled we just output silence here. We cannot ignore + the whole section of code here because we need to update some internal spatialization state. + */ + if (ma_spatializer_listener_is_enabled(pListener)) { + ma_channel_map_apply_f32((float*)pFramesOut, pChannelMapOut, channelsOut, (const float*)pFramesIn, pChannelMapIn, channelsIn, frameCount, ma_channel_mix_mode_rectangular, ma_mono_expansion_mode_default); + } else { + ma_silence_pcm_frames(pFramesOut, frameCount, ma_format_f32, pSpatializer->channelsOut); + } + + + /* + Panning. This is where we'll apply the gain and convert to the output channel count. We have an optimized path for + when we're converting to a mono stream. In that case we don't really need to do any panning - we just apply the + gain to the final output. + */ + /*printf("distance=%f; gain=%f\n", distance, gain);*/ + + /* We must have a valid channel map here to ensure we spatialize properly. */ + MA_ASSERT(pChannelMapOut != NULL); + + /* + We're not converting to mono so we'll want to apply some panning. This is where the feeling of something being + to the left, right, infront or behind the listener is calculated. I'm just using a basic model here. Note that + the code below is not based on any specific algorithm. I'm just implementing this off the top of my head and + seeing how it goes. There might be better ways to do this. + + To determine the direction of the sound relative to a speaker I'm using dot products. Each speaker is given a + direction. For example, the left channel in a stereo system will be -1 on the X axis and the right channel will + be +1 on the X axis. A dot product is performed against the direction vector of the channel and the normalized + position of the sound. + */ + + /* + Calculate our per-channel gains. We do this based on the normalized relative position of the sound and it's + relation to the direction of the channel. + */ + if (distance > 0) { + ma_vec3f unitPos = relativePos; + float distanceInv = 1/distance; + unitPos.x *= distanceInv; + unitPos.y *= distanceInv; + unitPos.z *= distanceInv; + + for (iChannel = 0; iChannel < channelsOut; iChannel += 1) { + ma_channel channelOut; + float d; + float dMin; + + channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannel); + if (ma_is_spatial_channel_position(channelOut)) { + d = ma_mix_f32_fast(1, ma_vec3f_dot(unitPos, ma_get_channel_direction(channelOut)), ma_spatializer_get_directional_attenuation_factor(pSpatializer)); + } else { + d = 1; /* It's not a spatial channel so there's no real notion of direction. */ + } + + /* + In my testing, if the panning effect is too aggressive it makes spatialization feel uncomfortable. + The "dMin" variable below is used to control the aggressiveness of the panning effect. When set to + 0, panning will be most extreme and any sounds that are positioned on the opposite side of the + speaker will be completely silent from that speaker. Not only does this feel uncomfortable, it + doesn't even remotely represent the real world at all because sounds that come from your right side + are still clearly audible from your left side. Setting "dMin" to 1 will result in no panning at + all, which is also not ideal. By setting it to something greater than 0, the spatialization effect + becomes much less dramatic and a lot more bearable. + + Summary: 0 = more extreme panning; 1 = no panning. + */ + dMin = pSpatializer->minSpatializationChannelGain; + + /* + At this point, "d" will be positive if the sound is on the same side as the channel and negative if + it's on the opposite side. It will be in the range of -1..1. There's two ways I can think of to + calculate a panning value. The first is to simply convert it to 0..1, however this has a problem + which I'm not entirely happy with. Considering a stereo system, when a sound is positioned right + in front of the listener it'll result in each speaker getting a gain of 0.5. I don't know if I like + the idea of having a scaling factor of 0.5 being applied to a sound when it's sitting right in front + of the listener. I would intuitively expect that to be played at full volume, or close to it. + + The second idea I think of is to only apply a reduction in gain when the sound is on the opposite + side of the speaker. That is, reduce the gain only when the dot product is negative. The problem + with this is that there will not be any attenuation as the sound sweeps around the 180 degrees + where the dot product is positive. The idea with this option is that you leave the gain at 1 when + the sound is being played on the same side as the speaker and then you just reduce the volume when + the sound is on the other side. + + The summarize, I think the first option should give a better sense of spatialization, but the second + option is better for preserving the sound's power. + + UPDATE: In my testing, I find the first option to sound better. You can feel the sense of space a + bit better, but you can also hear the reduction in volume when it's right in front. + */ + #if 1 + { + /* + Scale the dot product from -1..1 to 0..1. Will result in a sound directly in front losing power + by being played at 0.5 gain. + */ + d = (d + 1) * 0.5f; /* -1..1 to 0..1 */ + d = ma_max(d, dMin); + pSpatializer->pNewChannelGainsOut[iChannel] *= d; + } + #else + { + /* + Only reduce the volume of the sound if it's on the opposite side. This path keeps the volume more + consistent, but comes at the expense of a worse sense of space and positioning. + */ + if (d < 0) { + d += 1; /* Move into the positive range. */ + d = ma_max(d, dMin); + channelGainsOut[iChannel] *= d; + } + } + #endif + } + } else { + /* Assume the sound is right on top of us. Don't do any panning. */ + } + + /* Now we need to apply the volume to each channel. This needs to run through the gainer to ensure we get a smooth volume transition. */ + ma_gainer_set_gains(&pSpatializer->gainer, pSpatializer->pNewChannelGainsOut); + ma_gainer_process_pcm_frames(&pSpatializer->gainer, pFramesOut, pFramesOut, frameCount); + + /* + Before leaving we'll want to update our doppler pitch so that the caller can apply some + pitch shifting if they desire. Note that we need to negate the relative position here + because the doppler calculation needs to be source-to-listener, but ours is listener-to- + source. + */ + if (dopplerFactor > 0) { + pSpatializer->dopplerPitch = ma_doppler_pitch(ma_vec3f_sub(ma_spatializer_listener_get_position(pListener), ma_spatializer_get_position(pSpatializer)), ma_spatializer_get_velocity(pSpatializer), listenerVel, speedOfSound, dopplerFactor); + } else { + pSpatializer->dopplerPitch = 1; + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_spatializer_set_master_volume(ma_spatializer* pSpatializer, float volume) +{ + if (pSpatializer == NULL) { + return MA_INVALID_ARGS; + } + + return ma_gainer_set_master_volume(&pSpatializer->gainer, volume); +} + +MA_API ma_result ma_spatializer_get_master_volume(const ma_spatializer* pSpatializer, float* pVolume) +{ + if (pSpatializer == NULL) { + return MA_INVALID_ARGS; + } + + return ma_gainer_get_master_volume(&pSpatializer->gainer, pVolume); +} + +MA_API ma_uint32 ma_spatializer_get_input_channels(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 0; + } + + return pSpatializer->channelsIn; +} + +MA_API ma_uint32 ma_spatializer_get_output_channels(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 0; + } + + return pSpatializer->channelsOut; +} + +MA_API void ma_spatializer_set_attenuation_model(ma_spatializer* pSpatializer, ma_attenuation_model attenuationModel) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_i32(&pSpatializer->attenuationModel, attenuationModel); +} + +MA_API ma_attenuation_model ma_spatializer_get_attenuation_model(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return ma_attenuation_model_none; + } + + return (ma_attenuation_model)ma_atomic_load_i32(&pSpatializer->attenuationModel); +} + +MA_API void ma_spatializer_set_positioning(ma_spatializer* pSpatializer, ma_positioning positioning) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_i32(&pSpatializer->positioning, positioning); +} + +MA_API ma_positioning ma_spatializer_get_positioning(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return ma_positioning_absolute; + } + + return (ma_positioning)ma_atomic_load_i32(&pSpatializer->positioning); +} + +MA_API void ma_spatializer_set_rolloff(ma_spatializer* pSpatializer, float rolloff) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->rolloff, rolloff); +} + +MA_API float ma_spatializer_get_rolloff(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 0; + } + + return ma_atomic_load_f32(&pSpatializer->rolloff); +} + +MA_API void ma_spatializer_set_min_gain(ma_spatializer* pSpatializer, float minGain) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->minGain, minGain); +} + +MA_API float ma_spatializer_get_min_gain(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 0; + } + + return ma_atomic_load_f32(&pSpatializer->minGain); +} + +MA_API void ma_spatializer_set_max_gain(ma_spatializer* pSpatializer, float maxGain) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->maxGain, maxGain); +} + +MA_API float ma_spatializer_get_max_gain(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 0; + } + + return ma_atomic_load_f32(&pSpatializer->maxGain); +} + +MA_API void ma_spatializer_set_min_distance(ma_spatializer* pSpatializer, float minDistance) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->minDistance, minDistance); +} + +MA_API float ma_spatializer_get_min_distance(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 0; + } + + return ma_atomic_load_f32(&pSpatializer->minDistance); +} + +MA_API void ma_spatializer_set_max_distance(ma_spatializer* pSpatializer, float maxDistance) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->maxDistance, maxDistance); +} + +MA_API float ma_spatializer_get_max_distance(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 0; + } + + return ma_atomic_load_f32(&pSpatializer->maxDistance); +} + +MA_API void ma_spatializer_set_cone(ma_spatializer* pSpatializer, float innerAngleInRadians, float outerAngleInRadians, float outerGain) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->coneInnerAngleInRadians, innerAngleInRadians); + ma_atomic_exchange_f32(&pSpatializer->coneOuterAngleInRadians, outerAngleInRadians); + ma_atomic_exchange_f32(&pSpatializer->coneOuterGain, outerGain); +} + +MA_API void ma_spatializer_get_cone(const ma_spatializer* pSpatializer, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain) +{ + if (pSpatializer == NULL) { + return; + } + + if (pInnerAngleInRadians != NULL) { + *pInnerAngleInRadians = ma_atomic_load_f32(&pSpatializer->coneInnerAngleInRadians); + } + + if (pOuterAngleInRadians != NULL) { + *pOuterAngleInRadians = ma_atomic_load_f32(&pSpatializer->coneOuterAngleInRadians); + } + + if (pOuterGain != NULL) { + *pOuterGain = ma_atomic_load_f32(&pSpatializer->coneOuterGain); + } +} + +MA_API void ma_spatializer_set_doppler_factor(ma_spatializer* pSpatializer, float dopplerFactor) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->dopplerFactor, dopplerFactor); +} + +MA_API float ma_spatializer_get_doppler_factor(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 1; + } + + return ma_atomic_load_f32(&pSpatializer->dopplerFactor); +} + +MA_API void ma_spatializer_set_directional_attenuation_factor(ma_spatializer* pSpatializer, float directionalAttenuationFactor) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_exchange_f32(&pSpatializer->directionalAttenuationFactor, directionalAttenuationFactor); +} + +MA_API float ma_spatializer_get_directional_attenuation_factor(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return 1; + } + + return ma_atomic_load_f32(&pSpatializer->directionalAttenuationFactor); +} + +MA_API void ma_spatializer_set_position(ma_spatializer* pSpatializer, float x, float y, float z) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_vec3f_set(&pSpatializer->position, ma_vec3f_init_3f(x, y, z)); +} + +MA_API ma_vec3f ma_spatializer_get_position(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_atomic_vec3f_get((ma_atomic_vec3f*)&pSpatializer->position); /* Naughty const-cast. It's just for atomically loading the vec3 which should be safe. */ +} + +MA_API void ma_spatializer_set_direction(ma_spatializer* pSpatializer, float x, float y, float z) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_vec3f_set(&pSpatializer->direction, ma_vec3f_init_3f(x, y, z)); +} + +MA_API ma_vec3f ma_spatializer_get_direction(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return ma_vec3f_init_3f(0, 0, -1); + } + + return ma_atomic_vec3f_get((ma_atomic_vec3f*)&pSpatializer->direction); /* Naughty const-cast. It's just for atomically loading the vec3 which should be safe. */ +} + +MA_API void ma_spatializer_set_velocity(ma_spatializer* pSpatializer, float x, float y, float z) +{ + if (pSpatializer == NULL) { + return; + } + + ma_atomic_vec3f_set(&pSpatializer->velocity, ma_vec3f_init_3f(x, y, z)); +} + +MA_API ma_vec3f ma_spatializer_get_velocity(const ma_spatializer* pSpatializer) +{ + if (pSpatializer == NULL) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_atomic_vec3f_get((ma_atomic_vec3f*)&pSpatializer->velocity); /* Naughty const-cast. It's just for atomically loading the vec3 which should be safe. */ +} + +MA_API void ma_spatializer_get_relative_position_and_direction(const ma_spatializer* pSpatializer, const ma_spatializer_listener* pListener, ma_vec3f* pRelativePos, ma_vec3f* pRelativeDir) +{ + if (pRelativePos != NULL) { + pRelativePos->x = 0; + pRelativePos->y = 0; + pRelativePos->z = 0; + } + + if (pRelativeDir != NULL) { + pRelativeDir->x = 0; + pRelativeDir->y = 0; + pRelativeDir->z = -1; + } + + if (pSpatializer == NULL) { + return; + } + + if (pListener == NULL || ma_spatializer_get_positioning(pSpatializer) == ma_positioning_relative) { + /* There's no listener or we're using relative positioning. */ + if (pRelativePos != NULL) { + *pRelativePos = ma_spatializer_get_position(pSpatializer); + } + if (pRelativeDir != NULL) { + *pRelativeDir = ma_spatializer_get_direction(pSpatializer); + } + } else { + ma_vec3f spatializerPosition; + ma_vec3f spatializerDirection; + ma_vec3f listenerPosition; + ma_vec3f listenerDirection; + ma_vec3f v; + ma_vec3f axisX; + ma_vec3f axisY; + ma_vec3f axisZ; + float m[4][4]; + + spatializerPosition = ma_spatializer_get_position(pSpatializer); + spatializerDirection = ma_spatializer_get_direction(pSpatializer); + listenerPosition = ma_spatializer_listener_get_position(pListener); + listenerDirection = ma_spatializer_listener_get_direction(pListener); + + /* + We need to calculate the right vector from our forward and up vectors. This is done with + a cross product. + */ + axisZ = ma_vec3f_normalize(listenerDirection); /* Normalization required here because we can't trust the caller. */ + axisX = ma_vec3f_normalize(ma_vec3f_cross(axisZ, pListener->config.worldUp)); /* Normalization required here because the world up vector may not be perpendicular with the forward vector. */ + + /* + The calculation of axisX above can result in a zero-length vector if the listener is + looking straight up on the Y axis. We'll need to fall back to a +X in this case so that + the calculations below don't fall apart. This is where a quaternion based listener and + sound orientation would come in handy. + */ + if (ma_vec3f_len2(axisX) == 0) { + axisX = ma_vec3f_init_3f(1, 0, 0); + } + + axisY = ma_vec3f_cross(axisX, axisZ); /* No normalization is required here because axisX and axisZ are unit length and perpendicular. */ + + /* + We need to swap the X axis if we're left handed because otherwise the cross product above + will have resulted in it pointing in the wrong direction (right handed was assumed in the + cross products above). + */ + if (pListener->config.handedness == ma_handedness_left) { + axisX = ma_vec3f_neg(axisX); + } + + /* Lookat. */ + m[0][0] = axisX.x; m[1][0] = axisX.y; m[2][0] = axisX.z; m[3][0] = -ma_vec3f_dot(axisX, listenerPosition); + m[0][1] = axisY.x; m[1][1] = axisY.y; m[2][1] = axisY.z; m[3][1] = -ma_vec3f_dot(axisY, listenerPosition); + m[0][2] = -axisZ.x; m[1][2] = -axisZ.y; m[2][2] = -axisZ.z; m[3][2] = -ma_vec3f_dot(ma_vec3f_neg(axisZ), listenerPosition); + m[0][3] = 0; m[1][3] = 0; m[2][3] = 0; m[3][3] = 1; + + /* + Multiply the lookat matrix by the spatializer position to transform it to listener + space. This allows calculations to work based on the sound being relative to the + origin which makes things simpler. + */ + if (pRelativePos != NULL) { + v = spatializerPosition; + pRelativePos->x = m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z + m[3][0] * 1; + pRelativePos->y = m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z + m[3][1] * 1; + pRelativePos->z = m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z + m[3][2] * 1; + } + + /* + The direction of the sound needs to also be transformed so that it's relative to the + rotation of the listener. + */ + if (pRelativeDir != NULL) { + v = spatializerDirection; + pRelativeDir->x = m[0][0] * v.x + m[1][0] * v.y + m[2][0] * v.z; + pRelativeDir->y = m[0][1] * v.x + m[1][1] * v.y + m[2][1] * v.z; + pRelativeDir->z = m[0][2] * v.x + m[1][2] * v.y + m[2][2] * v.z; + } + } +} + + + + +/************************************************************************************************************************************************************** + +Resampling + +**************************************************************************************************************************************************************/ +MA_API ma_linear_resampler_config ma_linear_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + ma_linear_resampler_config config; + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRateIn = sampleRateIn; + config.sampleRateOut = sampleRateOut; + config.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER); + config.lpfNyquistFactor = 1; + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t x0Offset; + size_t x1Offset; + size_t lpfOffset; +} ma_linear_resampler_heap_layout; + + +static void ma_linear_resampler_adjust_timer_for_new_rate(ma_linear_resampler* pResampler, ma_uint32 oldSampleRateOut, ma_uint32 newSampleRateOut) +{ + /* + So what's happening here? Basically we need to adjust the fractional component of the time advance based on the new rate. The old time advance will + be based on the old sample rate, but we are needing to adjust it to that it's based on the new sample rate. + */ + ma_uint32 oldRateTimeWhole = pResampler->inTimeFrac / oldSampleRateOut; /* <-- This should almost never be anything other than 0, but leaving it here to make this more general and robust just in case. */ + ma_uint32 oldRateTimeFract = pResampler->inTimeFrac % oldSampleRateOut; + + pResampler->inTimeFrac = + (oldRateTimeWhole * newSampleRateOut) + + ((oldRateTimeFract * newSampleRateOut) / oldSampleRateOut); + + /* Make sure the fractional part is less than the output sample rate. */ + pResampler->inTimeInt += pResampler->inTimeFrac / pResampler->config.sampleRateOut; + pResampler->inTimeFrac = pResampler->inTimeFrac % pResampler->config.sampleRateOut; +} + +static ma_result ma_linear_resampler_set_rate_internal(ma_linear_resampler* pResampler, void* pHeap, ma_linear_resampler_heap_layout* pHeapLayout, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_bool32 isResamplerAlreadyInitialized) +{ + ma_result result; + ma_uint32 gcf; + ma_uint32 lpfSampleRate; + double lpfCutoffFrequency; + ma_lpf_config lpfConfig; + ma_uint32 oldSampleRateOut; /* Required for adjusting time advance down the bottom. */ + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (sampleRateIn == 0 || sampleRateOut == 0) { + return MA_INVALID_ARGS; + } + + oldSampleRateOut = pResampler->config.sampleRateOut; + + pResampler->config.sampleRateIn = sampleRateIn; + pResampler->config.sampleRateOut = sampleRateOut; + + /* Simplify the sample rate. */ + gcf = ma_gcf_u32(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut); + pResampler->config.sampleRateIn /= gcf; + pResampler->config.sampleRateOut /= gcf; + + /* Always initialize the low-pass filter, even when the order is 0. */ + if (pResampler->config.lpfOrder > MA_MAX_FILTER_ORDER) { + return MA_INVALID_ARGS; + } + + lpfSampleRate = (ma_uint32)(ma_max(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut)); + lpfCutoffFrequency = ( double)(ma_min(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut) * 0.5 * pResampler->config.lpfNyquistFactor); + + lpfConfig = ma_lpf_config_init(pResampler->config.format, pResampler->config.channels, lpfSampleRate, lpfCutoffFrequency, pResampler->config.lpfOrder); + + /* + If the resampler is already initialized we don't want to do a fresh initialization of the low-pass filter because it will result in the cached frames + getting cleared. Instead we re-initialize the filter which will maintain any cached frames. + */ + if (isResamplerAlreadyInitialized) { + result = ma_lpf_reinit(&lpfConfig, &pResampler->lpf); + } else { + result = ma_lpf_init_preallocated(&lpfConfig, ma_offset_ptr(pHeap, pHeapLayout->lpfOffset), &pResampler->lpf); + } + + if (result != MA_SUCCESS) { + return result; + } + + + pResampler->inAdvanceInt = pResampler->config.sampleRateIn / pResampler->config.sampleRateOut; + pResampler->inAdvanceFrac = pResampler->config.sampleRateIn % pResampler->config.sampleRateOut; + + /* Our timer was based on the old rate. We need to adjust it so that it's based on the new rate. */ + ma_linear_resampler_adjust_timer_for_new_rate(pResampler, oldSampleRateOut, pResampler->config.sampleRateOut); + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_get_heap_layout(const ma_linear_resampler_config* pConfig, ma_linear_resampler_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* x0 */ + pHeapLayout->x0Offset = pHeapLayout->sizeInBytes; + if (pConfig->format == ma_format_f32) { + pHeapLayout->sizeInBytes += sizeof(float) * pConfig->channels; + } else { + pHeapLayout->sizeInBytes += sizeof(ma_int16) * pConfig->channels; + } + + /* x1 */ + pHeapLayout->x1Offset = pHeapLayout->sizeInBytes; + if (pConfig->format == ma_format_f32) { + pHeapLayout->sizeInBytes += sizeof(float) * pConfig->channels; + } else { + pHeapLayout->sizeInBytes += sizeof(ma_int16) * pConfig->channels; + } + + /* LPF */ + pHeapLayout->lpfOffset = ma_align_64(pHeapLayout->sizeInBytes); + { + ma_result result; + size_t lpfHeapSizeInBytes; + ma_lpf_config lpfConfig = ma_lpf_config_init(pConfig->format, pConfig->channels, 1, 1, pConfig->lpfOrder); /* Sample rate and cutoff frequency do not matter. */ + + result = ma_lpf_get_heap_size(&lpfConfig, &lpfHeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += lpfHeapSizeInBytes; + } + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_linear_resampler_get_heap_size(const ma_linear_resampler_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_linear_resampler_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_linear_resampler_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_linear_resampler_init_preallocated(const ma_linear_resampler_config* pConfig, void* pHeap, ma_linear_resampler* pResampler) +{ + ma_result result; + ma_linear_resampler_heap_layout heapLayout; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pResampler); + + result = ma_linear_resampler_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pResampler->config = *pConfig; + + pResampler->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + if (pConfig->format == ma_format_f32) { + pResampler->x0.f32 = (float*)ma_offset_ptr(pHeap, heapLayout.x0Offset); + pResampler->x1.f32 = (float*)ma_offset_ptr(pHeap, heapLayout.x1Offset); + } else { + pResampler->x0.s16 = (ma_int16*)ma_offset_ptr(pHeap, heapLayout.x0Offset); + pResampler->x1.s16 = (ma_int16*)ma_offset_ptr(pHeap, heapLayout.x1Offset); + } + + /* Setting the rate will set up the filter and time advances for us. */ + result = ma_linear_resampler_set_rate_internal(pResampler, pHeap, &heapLayout, pConfig->sampleRateIn, pConfig->sampleRateOut, /* isResamplerAlreadyInitialized = */ MA_FALSE); + if (result != MA_SUCCESS) { + return result; + } + + pResampler->inTimeInt = 1; /* Set this to one to force an input sample to always be loaded for the first output frame. */ + pResampler->inTimeFrac = 0; + + return MA_SUCCESS; +} + +MA_API ma_result ma_linear_resampler_init(const ma_linear_resampler_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_linear_resampler* pResampler) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_linear_resampler_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_linear_resampler_init_preallocated(pConfig, pHeap, pResampler); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pResampler->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_linear_resampler_uninit(ma_linear_resampler* pResampler, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pResampler == NULL) { + return; + } + + ma_lpf_uninit(&pResampler->lpf, pAllocationCallbacks); + + if (pResampler->_ownsHeap) { + ma_free(pResampler->_pHeap, pAllocationCallbacks); + } +} + +static MA_INLINE ma_int16 ma_linear_resampler_mix_s16(ma_int16 x, ma_int16 y, ma_int32 a, const ma_int32 shift) +{ + ma_int32 b; + ma_int32 c; + ma_int32 r; + + MA_ASSERT(a <= (1<> shift); +} + +static void ma_linear_resampler_interpolate_frame_s16(ma_linear_resampler* pResampler, ma_int16* MA_RESTRICT pFrameOut) +{ + ma_uint32 c; + ma_uint32 a; + const ma_uint32 channels = pResampler->config.channels; + const ma_uint32 shift = 12; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameOut != NULL); + + a = (pResampler->inTimeFrac << shift) / pResampler->config.sampleRateOut; + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + ma_int16 s = ma_linear_resampler_mix_s16(pResampler->x0.s16[c], pResampler->x1.s16[c], a, shift); + pFrameOut[c] = s; + } +} + + +static void ma_linear_resampler_interpolate_frame_f32(ma_linear_resampler* pResampler, float* MA_RESTRICT pFrameOut) +{ + ma_uint32 c; + float a; + const ma_uint32 channels = pResampler->config.channels; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameOut != NULL); + + a = (float)pResampler->inTimeFrac / pResampler->config.sampleRateOut; + + MA_ASSUME(channels > 0); + for (c = 0; c < channels; c += 1) { + float s = ma_mix_f32_fast(pResampler->x0.f32[c], pResampler->x1.f32[c], a); + pFrameOut[c] = s; + } +} + +static ma_result ma_linear_resampler_process_pcm_frames_s16_downsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + const ma_int16* pFramesInS16; + /* */ ma_int16* pFramesOutS16; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); + + pFramesInS16 = (const ma_int16*)pFramesIn; + pFramesOutS16 = ( ma_int16*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; + + while (framesProcessedOut < frameCountOut) { + /* Before interpolating we need to load the buffers. When doing this we need to ensure we run every input sample through the filter. */ + while (pResampler->inTimeInt > 0 && frameCountIn > framesProcessedIn) { + ma_uint32 iChannel; + + if (pFramesInS16 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = pFramesInS16[iChannel]; + } + pFramesInS16 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = 0; + } + } + + /* Filter. Do not apply filtering if sample rates are the same or else you'll get dangerous glitching. */ + if (pResampler->config.sampleRateIn != pResampler->config.sampleRateOut) { + ma_lpf_process_pcm_frame_s16(&pResampler->lpf, pResampler->x1.s16, pResampler->x1.s16); + } + + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and filtered and we can generate the next output frame. */ + if (pFramesOutS16 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_s16(pResampler, pFramesOutS16); + + pFramesOutS16 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; + } + } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_process_pcm_frames_s16_upsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + const ma_int16* pFramesInS16; + /* */ ma_int16* pFramesOutS16; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); + + pFramesInS16 = (const ma_int16*)pFramesIn; + pFramesOutS16 = ( ma_int16*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; + + while (framesProcessedOut < frameCountOut) { + /* Before interpolating we need to load the buffers. */ + while (pResampler->inTimeInt > 0 && frameCountIn > framesProcessedIn) { + ma_uint32 iChannel; + + if (pFramesInS16 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = pFramesInS16[iChannel]; + } + pFramesInS16 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = 0; + } + } + + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and we can generate the next output frame. */ + if (pFramesOutS16 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_s16(pResampler, pFramesOutS16); + + /* Filter. Do not apply filtering if sample rates are the same or else you'll get dangerous glitching. */ + if (pResampler->config.sampleRateIn != pResampler->config.sampleRateOut) { + ma_lpf_process_pcm_frame_s16(&pResampler->lpf, pFramesOutS16, pFramesOutS16); + } + + pFramesOutS16 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; + } + } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_process_pcm_frames_s16(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pResampler != NULL); + + if (pResampler->config.sampleRateIn > pResampler->config.sampleRateOut) { + return ma_linear_resampler_process_pcm_frames_s16_downsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + return ma_linear_resampler_process_pcm_frames_s16_upsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } +} + + +static ma_result ma_linear_resampler_process_pcm_frames_f32_downsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + const float* pFramesInF32; + /* */ float* pFramesOutF32; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); + + pFramesInF32 = (const float*)pFramesIn; + pFramesOutF32 = ( float*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; + + while (framesProcessedOut < frameCountOut) { + /* Before interpolating we need to load the buffers. When doing this we need to ensure we run every input sample through the filter. */ + while (pResampler->inTimeInt > 0 && frameCountIn > framesProcessedIn) { + ma_uint32 iChannel; + + if (pFramesInF32 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = pFramesInF32[iChannel]; + } + pFramesInF32 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = 0; + } + } + + /* Filter. Do not apply filtering if sample rates are the same or else you'll get dangerous glitching. */ + if (pResampler->config.sampleRateIn != pResampler->config.sampleRateOut) { + ma_lpf_process_pcm_frame_f32(&pResampler->lpf, pResampler->x1.f32, pResampler->x1.f32); + } + + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and filtered and we can generate the next output frame. */ + if (pFramesOutF32 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_f32(pResampler, pFramesOutF32); + + pFramesOutF32 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; + } + } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_process_pcm_frames_f32_upsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + const float* pFramesInF32; + /* */ float* pFramesOutF32; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); + + pFramesInF32 = (const float*)pFramesIn; + pFramesOutF32 = ( float*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; + + while (framesProcessedOut < frameCountOut) { + /* Before interpolating we need to load the buffers. */ + while (pResampler->inTimeInt > 0 && frameCountIn > framesProcessedIn) { + ma_uint32 iChannel; + + if (pFramesInF32 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = pFramesInF32[iChannel]; + } + pFramesInF32 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = 0; + } + } + + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and we can generate the next output frame. */ + if (pFramesOutF32 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_f32(pResampler, pFramesOutF32); + + /* Filter. Do not apply filtering if sample rates are the same or else you'll get dangerous glitching. */ + if (pResampler->config.sampleRateIn != pResampler->config.sampleRateOut) { + ma_lpf_process_pcm_frame_f32(&pResampler->lpf, pFramesOutF32, pFramesOutF32); + } + + pFramesOutF32 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; + } + } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_process_pcm_frames_f32(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pResampler != NULL); + + if (pResampler->config.sampleRateIn > pResampler->config.sampleRateOut) { + return ma_linear_resampler_process_pcm_frames_f32_downsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + return ma_linear_resampler_process_pcm_frames_f32_upsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } +} + + +MA_API ma_result ma_linear_resampler_process_pcm_frames(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + /* */ if (pResampler->config.format == ma_format_s16) { + return ma_linear_resampler_process_pcm_frames_s16(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else if (pResampler->config.format == ma_format_f32) { + return ma_linear_resampler_process_pcm_frames_f32(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* Should never get here. Getting here means the format is not supported and you didn't check the return value of ma_linear_resampler_init(). */ + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; + } +} + + +MA_API ma_result ma_linear_resampler_set_rate(ma_linear_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + return ma_linear_resampler_set_rate_internal(pResampler, NULL, NULL, sampleRateIn, sampleRateOut, /* isResamplerAlreadyInitialized = */ MA_TRUE); +} + +MA_API ma_result ma_linear_resampler_set_rate_ratio(ma_linear_resampler* pResampler, float ratioInOut) +{ + ma_uint32 n; + ma_uint32 d; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (ratioInOut <= 0) { + return MA_INVALID_ARGS; + } + + d = 1000000; + n = (ma_uint32)(ratioInOut * d); + + if (n == 0) { + return MA_INVALID_ARGS; /* Ratio too small. */ + } + + MA_ASSERT(n != 0); + + return ma_linear_resampler_set_rate(pResampler, n, d); +} + +MA_API ma_uint64 ma_linear_resampler_get_input_latency(const ma_linear_resampler* pResampler) +{ + if (pResampler == NULL) { + return 0; + } + + return 1 + ma_lpf_get_latency(&pResampler->lpf); +} + +MA_API ma_uint64 ma_linear_resampler_get_output_latency(const ma_linear_resampler* pResampler) +{ + if (pResampler == NULL) { + return 0; + } + + return ma_linear_resampler_get_input_latency(pResampler) * pResampler->config.sampleRateOut / pResampler->config.sampleRateIn; +} + +MA_API ma_result ma_linear_resampler_get_required_input_frame_count(const ma_linear_resampler* pResampler, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount) +{ + ma_uint64 inputFrameCount; + + if (pInputFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + *pInputFrameCount = 0; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (outputFrameCount == 0) { + return MA_SUCCESS; + } + + /* Any whole input frames are consumed before the first output frame is generated. */ + inputFrameCount = pResampler->inTimeInt; + outputFrameCount -= 1; + + /* The rest of the output frames can be calculated in constant time. */ + inputFrameCount += outputFrameCount * pResampler->inAdvanceInt; + inputFrameCount += (pResampler->inTimeFrac + (outputFrameCount * pResampler->inAdvanceFrac)) / pResampler->config.sampleRateOut; + + *pInputFrameCount = inputFrameCount; + + return MA_SUCCESS; +} + +MA_API ma_result ma_linear_resampler_get_expected_output_frame_count(const ma_linear_resampler* pResampler, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount) +{ + ma_uint64 outputFrameCount; + ma_uint64 preliminaryInputFrameCountFromFrac; + ma_uint64 preliminaryInputFrameCount; + + if (pOutputFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + *pOutputFrameCount = 0; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + /* + The first step is to get a preliminary output frame count. This will either be exactly equal to what we need, or less by 1. We need to + determine how many input frames will be consumed by this value. If it's greater than our original input frame count it means we won't + be able to generate an extra frame because we will have run out of input data. Otherwise we will have enough input for the generation + of an extra output frame. This add-by-one logic is necessary due to how the data loading logic works when processing frames. + */ + outputFrameCount = (inputFrameCount * pResampler->config.sampleRateOut) / pResampler->config.sampleRateIn; + + /* + We need to determine how many *whole* input frames will have been processed to generate our preliminary output frame count. This is + used in the logic below to determine whether or not we need to add an extra output frame. + */ + preliminaryInputFrameCountFromFrac = (pResampler->inTimeFrac + outputFrameCount*pResampler->inAdvanceFrac) / pResampler->config.sampleRateOut; + preliminaryInputFrameCount = (pResampler->inTimeInt + outputFrameCount*pResampler->inAdvanceInt ) + preliminaryInputFrameCountFromFrac; + + /* + If the total number of *whole* input frames that would be required to generate our preliminary output frame count is greater than + the amount of whole input frames we have available as input we need to *not* add an extra output frame as there won't be enough data + to actually process. Otherwise we need to add the extra output frame. + */ + if (preliminaryInputFrameCount <= inputFrameCount) { + outputFrameCount += 1; + } + + *pOutputFrameCount = outputFrameCount; + + return MA_SUCCESS; +} + +MA_API ma_result ma_linear_resampler_reset(ma_linear_resampler* pResampler) +{ + ma_uint32 iChannel; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + /* Timers need to be cleared back to zero. */ + pResampler->inTimeInt = 1; /* Set this to one to force an input sample to always be loaded for the first output frame. */ + pResampler->inTimeFrac = 0; + + /* Cached samples need to be cleared. */ + if (pResampler->config.format == ma_format_f32) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = 0; + pResampler->x1.f32[iChannel] = 0; + } + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = 0; + pResampler->x1.s16[iChannel] = 0; + } + } + + /* The low pass filter needs to have its cache reset. */ + ma_lpf_clear_cache(&pResampler->lpf); + + return MA_SUCCESS; +} + + + +/* Linear resampler backend vtable. */ +static ma_linear_resampler_config ma_resampling_backend_get_config__linear(const ma_resampler_config* pConfig) +{ + ma_linear_resampler_config linearConfig; + + linearConfig = ma_linear_resampler_config_init(pConfig->format, pConfig->channels, pConfig->sampleRateIn, pConfig->sampleRateOut); + linearConfig.lpfOrder = pConfig->linear.lpfOrder; + + return linearConfig; +} + +static ma_result ma_resampling_backend_get_heap_size__linear(void* pUserData, const ma_resampler_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_linear_resampler_config linearConfig; + + (void)pUserData; + + linearConfig = ma_resampling_backend_get_config__linear(pConfig); + + return ma_linear_resampler_get_heap_size(&linearConfig, pHeapSizeInBytes); +} + +static ma_result ma_resampling_backend_init__linear(void* pUserData, const ma_resampler_config* pConfig, void* pHeap, ma_resampling_backend** ppBackend) +{ + ma_resampler* pResampler = (ma_resampler*)pUserData; + ma_result result; + ma_linear_resampler_config linearConfig; + + (void)pUserData; + + linearConfig = ma_resampling_backend_get_config__linear(pConfig); + + result = ma_linear_resampler_init_preallocated(&linearConfig, pHeap, &pResampler->state.linear); + if (result != MA_SUCCESS) { + return result; + } + + *ppBackend = &pResampler->state.linear; + + return MA_SUCCESS; +} + +static void ma_resampling_backend_uninit__linear(void* pUserData, ma_resampling_backend* pBackend, const ma_allocation_callbacks* pAllocationCallbacks) +{ + (void)pUserData; + + ma_linear_resampler_uninit((ma_linear_resampler*)pBackend, pAllocationCallbacks); +} + +static ma_result ma_resampling_backend_process__linear(void* pUserData, ma_resampling_backend* pBackend, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + (void)pUserData; + + return ma_linear_resampler_process_pcm_frames((ma_linear_resampler*)pBackend, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); +} + +static ma_result ma_resampling_backend_set_rate__linear(void* pUserData, ma_resampling_backend* pBackend, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + (void)pUserData; + + return ma_linear_resampler_set_rate((ma_linear_resampler*)pBackend, sampleRateIn, sampleRateOut); +} + +static ma_uint64 ma_resampling_backend_get_input_latency__linear(void* pUserData, const ma_resampling_backend* pBackend) +{ + (void)pUserData; + + return ma_linear_resampler_get_input_latency((const ma_linear_resampler*)pBackend); +} + +static ma_uint64 ma_resampling_backend_get_output_latency__linear(void* pUserData, const ma_resampling_backend* pBackend) +{ + (void)pUserData; + + return ma_linear_resampler_get_output_latency((const ma_linear_resampler*)pBackend); +} + +static ma_result ma_resampling_backend_get_required_input_frame_count__linear(void* pUserData, const ma_resampling_backend* pBackend, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount) +{ + (void)pUserData; + + return ma_linear_resampler_get_required_input_frame_count((const ma_linear_resampler*)pBackend, outputFrameCount, pInputFrameCount); +} + +static ma_result ma_resampling_backend_get_expected_output_frame_count__linear(void* pUserData, const ma_resampling_backend* pBackend, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount) +{ + (void)pUserData; + + return ma_linear_resampler_get_expected_output_frame_count((const ma_linear_resampler*)pBackend, inputFrameCount, pOutputFrameCount); +} + +static ma_result ma_resampling_backend_reset__linear(void* pUserData, ma_resampling_backend* pBackend) +{ + (void)pUserData; + + return ma_linear_resampler_reset((ma_linear_resampler*)pBackend); +} + +static ma_resampling_backend_vtable g_ma_linear_resampler_vtable = +{ + ma_resampling_backend_get_heap_size__linear, + ma_resampling_backend_init__linear, + ma_resampling_backend_uninit__linear, + ma_resampling_backend_process__linear, + ma_resampling_backend_set_rate__linear, + ma_resampling_backend_get_input_latency__linear, + ma_resampling_backend_get_output_latency__linear, + ma_resampling_backend_get_required_input_frame_count__linear, + ma_resampling_backend_get_expected_output_frame_count__linear, + ma_resampling_backend_reset__linear +}; + + + +MA_API ma_resampler_config ma_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_resample_algorithm algorithm) +{ + ma_resampler_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRateIn = sampleRateIn; + config.sampleRateOut = sampleRateOut; + config.algorithm = algorithm; + + /* Linear. */ + config.linear.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER); + + return config; +} + +static ma_result ma_resampler_get_vtable(const ma_resampler_config* pConfig, ma_resampler* pResampler, ma_resampling_backend_vtable** ppVTable, void** ppUserData) +{ + MA_ASSERT(pConfig != NULL); + MA_ASSERT(ppVTable != NULL); + MA_ASSERT(ppUserData != NULL); + + /* Safety. */ + *ppVTable = NULL; + *ppUserData = NULL; + + switch (pConfig->algorithm) + { + case ma_resample_algorithm_linear: + { + *ppVTable = &g_ma_linear_resampler_vtable; + *ppUserData = pResampler; + } break; + + case ma_resample_algorithm_custom: + { + *ppVTable = pConfig->pBackendVTable; + *ppUserData = pConfig->pBackendUserData; + } break; + + default: return MA_INVALID_ARGS; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_resampler_get_heap_size(const ma_resampler_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_resampling_backend_vtable* pVTable; + void* pVTableUserData; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_resampler_get_vtable(pConfig, NULL, &pVTable, &pVTableUserData); + if (result != MA_SUCCESS) { + return result; + } + + if (pVTable == NULL || pVTable->onGetHeapSize == NULL) { + return MA_NOT_IMPLEMENTED; + } + + result = pVTable->onGetHeapSize(pVTableUserData, pConfig, pHeapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_resampler_init_preallocated(const ma_resampler_config* pConfig, void* pHeap, ma_resampler* pResampler) +{ + ma_result result; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pResampler); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + pResampler->_pHeap = pHeap; + pResampler->format = pConfig->format; + pResampler->channels = pConfig->channels; + pResampler->sampleRateIn = pConfig->sampleRateIn; + pResampler->sampleRateOut = pConfig->sampleRateOut; + + result = ma_resampler_get_vtable(pConfig, pResampler, &pResampler->pBackendVTable, &pResampler->pBackendUserData); + if (result != MA_SUCCESS) { + return result; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onInit == NULL) { + return MA_NOT_IMPLEMENTED; /* onInit not implemented. */ + } + + result = pResampler->pBackendVTable->onInit(pResampler->pBackendUserData, pConfig, pHeap, &pResampler->pBackend); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_resampler_init(const ma_resampler_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_resampler* pResampler) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_resampler_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_resampler_init_preallocated(pConfig, pHeap, pResampler); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pResampler->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_resampler_uninit(ma_resampler* pResampler, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pResampler == NULL) { + return; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onUninit == NULL) { + return; + } + + pResampler->pBackendVTable->onUninit(pResampler->pBackendUserData, pResampler->pBackend, pAllocationCallbacks); + + if (pResampler->_ownsHeap) { + ma_free(pResampler->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_resampler_process_pcm_frames(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (pFrameCountOut == NULL && pFrameCountIn == NULL) { + return MA_INVALID_ARGS; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onProcess == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pResampler->pBackendVTable->onProcess(pResampler->pBackendUserData, pResampler->pBackend, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); +} + +MA_API ma_result ma_resampler_set_rate(ma_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + ma_result result; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (sampleRateIn == 0 || sampleRateOut == 0) { + return MA_INVALID_ARGS; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onSetRate == NULL) { + return MA_NOT_IMPLEMENTED; + } + + result = pResampler->pBackendVTable->onSetRate(pResampler->pBackendUserData, pResampler->pBackend, sampleRateIn, sampleRateOut); + if (result != MA_SUCCESS) { + return result; + } + + pResampler->sampleRateIn = sampleRateIn; + pResampler->sampleRateOut = sampleRateOut; + + return MA_SUCCESS; +} + +MA_API ma_result ma_resampler_set_rate_ratio(ma_resampler* pResampler, float ratio) +{ + ma_uint32 n; + ma_uint32 d; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (ratio <= 0) { + return MA_INVALID_ARGS; + } + + d = 1000; + n = (ma_uint32)(ratio * d); + + if (n == 0) { + return MA_INVALID_ARGS; /* Ratio too small. */ + } + + MA_ASSERT(n != 0); + + return ma_resampler_set_rate(pResampler, n, d); +} + +MA_API ma_uint64 ma_resampler_get_input_latency(const ma_resampler* pResampler) +{ + if (pResampler == NULL) { + return 0; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onGetInputLatency == NULL) { + return 0; + } + + return pResampler->pBackendVTable->onGetInputLatency(pResampler->pBackendUserData, pResampler->pBackend); +} + +MA_API ma_uint64 ma_resampler_get_output_latency(const ma_resampler* pResampler) +{ + if (pResampler == NULL) { + return 0; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onGetOutputLatency == NULL) { + return 0; + } + + return pResampler->pBackendVTable->onGetOutputLatency(pResampler->pBackendUserData, pResampler->pBackend); +} + +MA_API ma_result ma_resampler_get_required_input_frame_count(const ma_resampler* pResampler, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount) +{ + if (pInputFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + *pInputFrameCount = 0; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onGetRequiredInputFrameCount == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pResampler->pBackendVTable->onGetRequiredInputFrameCount(pResampler->pBackendUserData, pResampler->pBackend, outputFrameCount, pInputFrameCount); +} + +MA_API ma_result ma_resampler_get_expected_output_frame_count(const ma_resampler* pResampler, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount) +{ + if (pOutputFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + *pOutputFrameCount = 0; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onGetExpectedOutputFrameCount == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pResampler->pBackendVTable->onGetExpectedOutputFrameCount(pResampler->pBackendUserData, pResampler->pBackend, inputFrameCount, pOutputFrameCount); +} + +MA_API ma_result ma_resampler_reset(ma_resampler* pResampler) +{ + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (pResampler->pBackendVTable == NULL || pResampler->pBackendVTable->onReset == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pResampler->pBackendVTable->onReset(pResampler->pBackendUserData, pResampler->pBackend); +} + +/************************************************************************************************************************************************************** + +Channel Conversion + +**************************************************************************************************************************************************************/ +#ifndef MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT +#define MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT 12 +#endif + +#define MA_PLANE_LEFT 0 +#define MA_PLANE_RIGHT 1 +#define MA_PLANE_FRONT 2 +#define MA_PLANE_BACK 3 +#define MA_PLANE_BOTTOM 4 +#define MA_PLANE_TOP 5 + +static float g_maChannelPlaneRatios[MA_CHANNEL_POSITION_COUNT][6] = { + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_NONE */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_MONO */ + { 0.5f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_LEFT */ + { 0.0f, 0.5f, 0.5f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_RIGHT */ + { 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_CENTER */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_LFE */ + { 0.5f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_LEFT */ + { 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_RIGHT */ + { 0.25f, 0.0f, 0.75f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_LEFT_CENTER */ + { 0.0f, 0.25f, 0.75f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_RIGHT_CENTER */ + { 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_CENTER */ + { 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_SIDE_LEFT */ + { 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_SIDE_RIGHT */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f}, /* MA_CHANNEL_TOP_CENTER */ + { 0.33f, 0.0f, 0.33f, 0.0f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_FRONT_LEFT */ + { 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.5f}, /* MA_CHANNEL_TOP_FRONT_CENTER */ + { 0.0f, 0.33f, 0.33f, 0.0f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_FRONT_RIGHT */ + { 0.33f, 0.0f, 0.0f, 0.33f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_BACK_LEFT */ + { 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.5f}, /* MA_CHANNEL_TOP_BACK_CENTER */ + { 0.0f, 0.33f, 0.0f, 0.33f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_BACK_RIGHT */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_0 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_1 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_2 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_3 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_4 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_5 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_6 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_7 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_8 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_9 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_10 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_11 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_12 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_13 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_14 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_15 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_16 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_17 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_18 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_19 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_20 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_21 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_22 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_23 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_24 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_25 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_26 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_27 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_28 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_29 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_30 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_31 */ +}; + +static float ma_calculate_channel_position_rectangular_weight(ma_channel channelPositionA, ma_channel channelPositionB) +{ + /* + Imagine the following simplified example: You have a single input speaker which is the front/left speaker which you want to convert to + the following output configuration: + + - front/left + - side/left + - back/left + + The front/left output is easy - it the same speaker position so it receives the full contribution of the front/left input. The amount + of contribution to apply to the side/left and back/left speakers, however, is a bit more complicated. + + Imagine the front/left speaker as emitting audio from two planes - the front plane and the left plane. You can think of the front/left + speaker emitting half of its total volume from the front, and the other half from the left. Since part of its volume is being emitted + from the left side, and the side/left and back/left channels also emit audio from the left plane, one would expect that they would + receive some amount of contribution from front/left speaker. The amount of contribution depends on how many planes are shared between + the two speakers. Note that in the examples below I've added a top/front/left speaker as an example just to show how the math works + across 3 spatial dimensions. + + The first thing to do is figure out how each speaker's volume is spread over each of plane: + - front/left: 2 planes (front and left) = 1/2 = half its total volume on each plane + - side/left: 1 plane (left only) = 1/1 = entire volume from left plane + - back/left: 2 planes (back and left) = 1/2 = half its total volume on each plane + - top/front/left: 3 planes (top, front and left) = 1/3 = one third its total volume on each plane + + The amount of volume each channel contributes to each of its planes is what controls how much it is willing to given and take to other + channels on the same plane. The volume that is willing to the given by one channel is multiplied by the volume that is willing to be + taken by the other to produce the final contribution. + */ + + /* Contribution = Sum(Volume to Give * Volume to Take) */ + float contribution = + g_maChannelPlaneRatios[channelPositionA][0] * g_maChannelPlaneRatios[channelPositionB][0] + + g_maChannelPlaneRatios[channelPositionA][1] * g_maChannelPlaneRatios[channelPositionB][1] + + g_maChannelPlaneRatios[channelPositionA][2] * g_maChannelPlaneRatios[channelPositionB][2] + + g_maChannelPlaneRatios[channelPositionA][3] * g_maChannelPlaneRatios[channelPositionB][3] + + g_maChannelPlaneRatios[channelPositionA][4] * g_maChannelPlaneRatios[channelPositionB][4] + + g_maChannelPlaneRatios[channelPositionA][5] * g_maChannelPlaneRatios[channelPositionB][5]; + + return contribution; +} + +MA_API ma_channel_converter_config ma_channel_converter_config_init(ma_format format, ma_uint32 channelsIn, const ma_channel* pChannelMapIn, ma_uint32 channelsOut, const ma_channel* pChannelMapOut, ma_channel_mix_mode mixingMode) +{ + ma_channel_converter_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channelsIn = channelsIn; + config.channelsOut = channelsOut; + config.pChannelMapIn = pChannelMapIn; + config.pChannelMapOut = pChannelMapOut; + config.mixingMode = mixingMode; + + return config; +} + +static ma_int32 ma_channel_converter_float_to_fixed(float x) +{ + return (ma_int32)(x * (1< 0); + + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (ma_is_spatial_channel_position(ma_channel_map_get_channel(pChannelMap, channels, iChannel))) { + spatialChannelCount++; + } + } + + return spatialChannelCount; +} + +static ma_bool32 ma_is_spatial_channel_position(ma_channel channelPosition) +{ + int i; + + if (channelPosition == MA_CHANNEL_NONE || channelPosition == MA_CHANNEL_MONO || channelPosition == MA_CHANNEL_LFE) { + return MA_FALSE; + } + + if (channelPosition >= MA_CHANNEL_AUX_0 && channelPosition <= MA_CHANNEL_AUX_31) { + return MA_FALSE; + } + + for (i = 0; i < 6; ++i) { /* Each side of a cube. */ + if (g_maChannelPlaneRatios[channelPosition][i] != 0) { + return MA_TRUE; + } + } + + return MA_FALSE; +} + + +static ma_bool32 ma_channel_map_is_passthrough(const ma_channel* pChannelMapIn, ma_uint32 channelsIn, const ma_channel* pChannelMapOut, ma_uint32 channelsOut) +{ + if (channelsOut == channelsIn) { + return ma_channel_map_is_equal(pChannelMapOut, pChannelMapIn, channelsOut); + } else { + return MA_FALSE; /* Channel counts differ, so cannot be a passthrough. */ + } +} + +static ma_channel_conversion_path ma_channel_map_get_conversion_path(const ma_channel* pChannelMapIn, ma_uint32 channelsIn, const ma_channel* pChannelMapOut, ma_uint32 channelsOut, ma_channel_mix_mode mode) +{ + if (ma_channel_map_is_passthrough(pChannelMapIn, channelsIn, pChannelMapOut, channelsOut)) { + return ma_channel_conversion_path_passthrough; + } + + if (channelsOut == 1 && (pChannelMapOut == NULL || pChannelMapOut[0] == MA_CHANNEL_MONO)) { + return ma_channel_conversion_path_mono_out; + } + + if (channelsIn == 1 && (pChannelMapIn == NULL || pChannelMapIn[0] == MA_CHANNEL_MONO)) { + return ma_channel_conversion_path_mono_in; + } + + if (mode == ma_channel_mix_mode_custom_weights) { + return ma_channel_conversion_path_weights; + } + + /* + We can use a simple shuffle if both channel maps have the same channel count and all channel + positions are present in both. + */ + if (channelsIn == channelsOut) { + ma_uint32 iChannelIn; + ma_bool32 areAllChannelPositionsPresent = MA_TRUE; + for (iChannelIn = 0; iChannelIn < channelsIn; ++iChannelIn) { + ma_bool32 isInputChannelPositionInOutput = ma_channel_map_contains_channel_position(channelsOut, pChannelMapOut, ma_channel_map_get_channel(pChannelMapIn, channelsIn, iChannelIn)); + if (!isInputChannelPositionInOutput) { + areAllChannelPositionsPresent = MA_FALSE; + break; + } + } + + if (areAllChannelPositionsPresent) { + return ma_channel_conversion_path_shuffle; + } + } + + /* Getting here means we'll need to use weights. */ + return ma_channel_conversion_path_weights; +} + + +static ma_result ma_channel_map_build_shuffle_table(const ma_channel* pChannelMapIn, ma_uint32 channelCountIn, const ma_channel* pChannelMapOut, ma_uint32 channelCountOut, ma_uint8* pShuffleTable) +{ + ma_uint32 iChannelIn; + ma_uint32 iChannelOut; + + if (pShuffleTable == NULL || channelCountIn == 0 || channelCountOut == 0) { + return MA_INVALID_ARGS; + } + + /* + When building the shuffle table we just do a 1:1 mapping based on the first occurrence of a channel. If the + input channel has more than one occurrence of a channel position, the second one will be ignored. + */ + for (iChannelOut = 0; iChannelOut < channelCountOut; iChannelOut += 1) { + ma_channel channelOut; + + /* Default to MA_CHANNEL_INDEX_NULL so that if a mapping is not found it'll be set appropriately. */ + pShuffleTable[iChannelOut] = MA_CHANNEL_INDEX_NULL; + + channelOut = ma_channel_map_get_channel(pChannelMapOut, channelCountOut, iChannelOut); + for (iChannelIn = 0; iChannelIn < channelCountIn; iChannelIn += 1) { + ma_channel channelIn; + + channelIn = ma_channel_map_get_channel(pChannelMapIn, channelCountIn, iChannelIn); + if (channelOut == channelIn) { + pShuffleTable[iChannelOut] = (ma_uint8)iChannelIn; + break; + } + + /* + Getting here means the channels don't exactly match, but we are going to support some + relaxed matching for practicality. If, for example, there are two stereo channel maps, + but one uses front left/right and the other uses side left/right, it makes logical + sense to just map these. The way we'll do it is we'll check if there is a logical + corresponding mapping, and if so, apply it, but we will *not* break from the loop, + thereby giving the loop a chance to find an exact match later which will take priority. + */ + switch (channelOut) + { + /* Left channels. */ + case MA_CHANNEL_FRONT_LEFT: + case MA_CHANNEL_SIDE_LEFT: + { + switch (channelIn) { + case MA_CHANNEL_FRONT_LEFT: + case MA_CHANNEL_SIDE_LEFT: + { + pShuffleTable[iChannelOut] = (ma_uint8)iChannelIn; + } break; + } + } break; + + /* Right channels. */ + case MA_CHANNEL_FRONT_RIGHT: + case MA_CHANNEL_SIDE_RIGHT: + { + switch (channelIn) { + case MA_CHANNEL_FRONT_RIGHT: + case MA_CHANNEL_SIDE_RIGHT: + { + pShuffleTable[iChannelOut] = (ma_uint8)iChannelIn; + } break; + } + } break; + + default: break; + } + } + } + + return MA_SUCCESS; +} + + +static void ma_channel_map_apply_shuffle_table_u8(ma_uint8* pFramesOut, ma_uint32 channelsOut, const ma_uint8* pFramesIn, ma_uint32 channelsIn, ma_uint64 frameCount, const ma_uint8* pShuffleTable) +{ + ma_uint64 iFrame; + ma_uint32 iChannelOut; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_uint8 iChannelIn = pShuffleTable[iChannelOut]; + if (iChannelIn < channelsIn) { /* For safety, and to deal with MA_CHANNEL_INDEX_NULL. */ + pFramesOut[iChannelOut] = pFramesIn[iChannelIn]; + } else { + pFramesOut[iChannelOut] = 0; + } + } + + pFramesOut += channelsOut; + pFramesIn += channelsIn; + } +} + +static void ma_channel_map_apply_shuffle_table_s16(ma_int16* pFramesOut, ma_uint32 channelsOut, const ma_int16* pFramesIn, ma_uint32 channelsIn, ma_uint64 frameCount, const ma_uint8* pShuffleTable) +{ + ma_uint64 iFrame; + ma_uint32 iChannelOut; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_uint8 iChannelIn = pShuffleTable[iChannelOut]; + if (iChannelIn < channelsIn) { /* For safety, and to deal with MA_CHANNEL_INDEX_NULL. */ + pFramesOut[iChannelOut] = pFramesIn[iChannelIn]; + } else { + pFramesOut[iChannelOut] = 0; + } + } + + pFramesOut += channelsOut; + pFramesIn += channelsIn; + } +} + +static void ma_channel_map_apply_shuffle_table_s24(ma_uint8* pFramesOut, ma_uint32 channelsOut, const ma_uint8* pFramesIn, ma_uint32 channelsIn, ma_uint64 frameCount, const ma_uint8* pShuffleTable) +{ + ma_uint64 iFrame; + ma_uint32 iChannelOut; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_uint8 iChannelIn = pShuffleTable[iChannelOut]; + if (iChannelIn < channelsIn) { /* For safety, and to deal with MA_CHANNEL_INDEX_NULL. */ + pFramesOut[iChannelOut*3 + 0] = pFramesIn[iChannelIn*3 + 0]; + pFramesOut[iChannelOut*3 + 1] = pFramesIn[iChannelIn*3 + 1]; + pFramesOut[iChannelOut*3 + 2] = pFramesIn[iChannelIn*3 + 2]; + } else { + pFramesOut[iChannelOut*3 + 0] = 0; + } pFramesOut[iChannelOut*3 + 1] = 0; + } pFramesOut[iChannelOut*3 + 2] = 0; + + pFramesOut += channelsOut*3; + pFramesIn += channelsIn*3; + } +} + +static void ma_channel_map_apply_shuffle_table_s32(ma_int32* pFramesOut, ma_uint32 channelsOut, const ma_int32* pFramesIn, ma_uint32 channelsIn, ma_uint64 frameCount, const ma_uint8* pShuffleTable) +{ + ma_uint64 iFrame; + ma_uint32 iChannelOut; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_uint8 iChannelIn = pShuffleTable[iChannelOut]; + if (iChannelIn < channelsIn) { /* For safety, and to deal with MA_CHANNEL_INDEX_NULL. */ + pFramesOut[iChannelOut] = pFramesIn[iChannelIn]; + } else { + pFramesOut[iChannelOut] = 0; + } + } + + pFramesOut += channelsOut; + pFramesIn += channelsIn; + } +} + +static void ma_channel_map_apply_shuffle_table_f32(float* pFramesOut, ma_uint32 channelsOut, const float* pFramesIn, ma_uint32 channelsIn, ma_uint64 frameCount, const ma_uint8* pShuffleTable) +{ + ma_uint64 iFrame; + ma_uint32 iChannelOut; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_uint8 iChannelIn = pShuffleTable[iChannelOut]; + if (iChannelIn < channelsIn) { /* For safety, and to deal with MA_CHANNEL_INDEX_NULL. */ + pFramesOut[iChannelOut] = pFramesIn[iChannelIn]; + } else { + pFramesOut[iChannelOut] = 0; + } + } + + pFramesOut += channelsOut; + pFramesIn += channelsIn; + } +} + +static ma_result ma_channel_map_apply_shuffle_table(void* pFramesOut, ma_uint32 channelsOut, const void* pFramesIn, ma_uint32 channelsIn, ma_uint64 frameCount, const ma_uint8* pShuffleTable, ma_format format) +{ + if (pFramesOut == NULL || pFramesIn == NULL || channelsOut == 0 || pShuffleTable == NULL) { + return MA_INVALID_ARGS; + } + + switch (format) + { + case ma_format_u8: + { + ma_channel_map_apply_shuffle_table_u8((ma_uint8*)pFramesOut, channelsOut, (const ma_uint8*)pFramesIn, channelsIn, frameCount, pShuffleTable); + } break; + + case ma_format_s16: + { + ma_channel_map_apply_shuffle_table_s16((ma_int16*)pFramesOut, channelsOut, (const ma_int16*)pFramesIn, channelsIn, frameCount, pShuffleTable); + } break; + + case ma_format_s24: + { + ma_channel_map_apply_shuffle_table_s24((ma_uint8*)pFramesOut, channelsOut, (const ma_uint8*)pFramesIn, channelsIn, frameCount, pShuffleTable); + } break; + + case ma_format_s32: + { + ma_channel_map_apply_shuffle_table_s32((ma_int32*)pFramesOut, channelsOut, (const ma_int32*)pFramesIn, channelsIn, frameCount, pShuffleTable); + } break; + + case ma_format_f32: + { + ma_channel_map_apply_shuffle_table_f32((float*)pFramesOut, channelsOut, (const float*)pFramesIn, channelsIn, frameCount, pShuffleTable); + } break; + + default: return MA_INVALID_ARGS; /* Unknown format. */ + } + + return MA_SUCCESS; +} + +static ma_result ma_channel_map_apply_mono_out_f32(float* pFramesOut, const float* pFramesIn, const ma_channel* pChannelMapIn, ma_uint32 channelsIn, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannelIn; + ma_uint32 accumulationCount; + + if (pFramesOut == NULL || pFramesIn == NULL || channelsIn == 0) { + return MA_INVALID_ARGS; + } + + /* In this case the output stream needs to be the average of all channels, ignoring NONE. */ + + /* A quick pre-processing step to get the accumulation counter since we're ignoring NONE channels. */ + accumulationCount = 0; + for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) { + if (ma_channel_map_get_channel(pChannelMapIn, channelsIn, iChannelIn) != MA_CHANNEL_NONE) { + accumulationCount += 1; + } + } + + if (accumulationCount > 0) { /* <-- Prevent a division by zero. */ + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float accumulation = 0; + + for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) { + ma_channel channelIn = ma_channel_map_get_channel(pChannelMapIn, channelsIn, iChannelIn); + if (channelIn != MA_CHANNEL_NONE) { + accumulation += pFramesIn[iChannelIn]; + } + } + + pFramesOut[0] = accumulation / accumulationCount; + pFramesOut += 1; + pFramesIn += channelsIn; + } + } else { + ma_silence_pcm_frames(pFramesOut, frameCount, ma_format_f32, 1); + } + + return MA_SUCCESS; +} + +static ma_result ma_channel_map_apply_mono_in_f32(float* MA_RESTRICT pFramesOut, const ma_channel* pChannelMapOut, ma_uint32 channelsOut, const float* MA_RESTRICT pFramesIn, ma_uint64 frameCount, ma_mono_expansion_mode monoExpansionMode) +{ + ma_uint64 iFrame; + ma_uint32 iChannelOut; + + if (pFramesOut == NULL || channelsOut == 0 || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + /* Note that the MA_CHANNEL_NONE channel must be ignored in all cases. */ + switch (monoExpansionMode) + { + case ma_mono_expansion_mode_average: + { + float weight; + ma_uint32 validChannelCount = 0; + + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + if (channelOut != MA_CHANNEL_NONE) { + validChannelCount += 1; + } + } + + weight = 1.0f / validChannelCount; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + if (channelOut != MA_CHANNEL_NONE) { + pFramesOut[iChannelOut] = pFramesIn[0] * weight; + } + } + + pFramesOut += channelsOut; + pFramesIn += 1; + } + } break; + + case ma_mono_expansion_mode_stereo_only: + { + if (channelsOut >= 2) { + ma_uint32 iChannelLeft = (ma_uint32)-1; + ma_uint32 iChannelRight = (ma_uint32)-1; + + /* + We first need to find our stereo channels. We prefer front-left and front-right, but + if they're not available, we'll also try side-left and side-right. If neither are + available we'll fall through to the default case below. + */ + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + if (channelOut == MA_CHANNEL_SIDE_LEFT) { + iChannelLeft = iChannelOut; + } + if (channelOut == MA_CHANNEL_SIDE_RIGHT) { + iChannelRight = iChannelOut; + } + } + + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + if (channelOut == MA_CHANNEL_FRONT_LEFT) { + iChannelLeft = iChannelOut; + } + if (channelOut == MA_CHANNEL_FRONT_RIGHT) { + iChannelRight = iChannelOut; + } + } + + + if (iChannelLeft != (ma_uint32)-1 && iChannelRight != (ma_uint32)-1) { + /* We found our stereo channels so we can duplicate the signal across those channels. */ + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + if (channelOut != MA_CHANNEL_NONE) { + if (iChannelOut == iChannelLeft || iChannelOut == iChannelRight) { + pFramesOut[iChannelOut] = pFramesIn[0]; + } else { + pFramesOut[iChannelOut] = 0.0f; + } + } + } + + pFramesOut += channelsOut; + pFramesIn += 1; + } + + break; /* Get out of the switch. */ + } else { + /* Fallthrough. Does not have left and right channels. */ + goto default_handler; + } + } else { + /* Fallthrough. Does not have stereo channels. */ + goto default_handler; + } + }; /* Fallthrough. See comments above. */ + + case ma_mono_expansion_mode_duplicate: + default: + { + default_handler: + { + if (channelsOut <= MA_MAX_CHANNELS) { + ma_bool32 hasEmptyChannel = MA_FALSE; + ma_channel channelPositions[MA_MAX_CHANNELS]; + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + channelPositions[iChannelOut] = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + if (channelPositions[iChannelOut] == MA_CHANNEL_NONE) { + hasEmptyChannel = MA_TRUE; + } + } + + if (hasEmptyChannel == MA_FALSE) { + /* + Faster path when there's no MA_CHANNEL_NONE channel positions. This should hopefully + help the compiler with auto-vectorization.m + */ + if (channelsOut == 2) { + #if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + /* We want to do two frames in each iteration. */ + ma_uint64 unrolledFrameCount = frameCount >> 1; + + for (iFrame = 0; iFrame < unrolledFrameCount; iFrame += 1) { + __m128 in0 = _mm_set1_ps(pFramesIn[iFrame*2 + 0]); + __m128 in1 = _mm_set1_ps(pFramesIn[iFrame*2 + 1]); + _mm_storeu_ps(&pFramesOut[iFrame*4 + 0], _mm_shuffle_ps(in0, in1, _MM_SHUFFLE(0, 0, 0, 0))); + } + + /* Tail. */ + iFrame = unrolledFrameCount << 1; + goto generic_on_fastpath; + } else + #endif + { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < 2; iChannelOut += 1) { + pFramesOut[iFrame*2 + iChannelOut] = pFramesIn[iFrame]; + } + } + } + } else if (channelsOut == 6) { + #if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + /* We want to do two frames in each iteration so we can have a multiple of 4 samples. */ + ma_uint64 unrolledFrameCount = frameCount >> 1; + + for (iFrame = 0; iFrame < unrolledFrameCount; iFrame += 1) { + __m128 in0 = _mm_set1_ps(pFramesIn[iFrame*2 + 0]); + __m128 in1 = _mm_set1_ps(pFramesIn[iFrame*2 + 1]); + + _mm_storeu_ps(&pFramesOut[iFrame*12 + 0], in0); + _mm_storeu_ps(&pFramesOut[iFrame*12 + 4], _mm_shuffle_ps(in0, in1, _MM_SHUFFLE(0, 0, 0, 0))); + _mm_storeu_ps(&pFramesOut[iFrame*12 + 8], in1); + } + + /* Tail. */ + iFrame = unrolledFrameCount << 1; + goto generic_on_fastpath; + } else + #endif + { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < 6; iChannelOut += 1) { + pFramesOut[iFrame*6 + iChannelOut] = pFramesIn[iFrame]; + } + } + } + } else if (channelsOut == 8) { + #if defined(MA_SUPPORT_SSE2) + if (ma_has_sse2()) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + __m128 in = _mm_set1_ps(pFramesIn[iFrame]); + _mm_storeu_ps(&pFramesOut[iFrame*8 + 0], in); + _mm_storeu_ps(&pFramesOut[iFrame*8 + 4], in); + } + } else + #endif + { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < 8; iChannelOut += 1) { + pFramesOut[iFrame*8 + iChannelOut] = pFramesIn[iFrame]; + } + } + } + } else { + iFrame = 0; + + #if defined(MA_SUPPORT_SSE2) /* For silencing a warning with non-x86 builds. */ + generic_on_fastpath: + #endif + { + for (; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + pFramesOut[iFrame*channelsOut + iChannelOut] = pFramesIn[iFrame]; + } + } + } + } + } else { + /* Slow path. Need to handle MA_CHANNEL_NONE. */ + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + if (channelPositions[iChannelOut] != MA_CHANNEL_NONE) { + pFramesOut[iFrame*channelsOut + iChannelOut] = pFramesIn[iFrame]; + } + } + } + } + } else { + /* Slow path. Too many channels to store on the stack. */ + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + if (channelOut != MA_CHANNEL_NONE) { + pFramesOut[iFrame*channelsOut + iChannelOut] = pFramesIn[iFrame]; + } + } + } + } + } + } break; + } + + return MA_SUCCESS; +} + +static void ma_channel_map_apply_f32(float* pFramesOut, const ma_channel* pChannelMapOut, ma_uint32 channelsOut, const float* pFramesIn, const ma_channel* pChannelMapIn, ma_uint32 channelsIn, ma_uint64 frameCount, ma_channel_mix_mode mode, ma_mono_expansion_mode monoExpansionMode) +{ + ma_channel_conversion_path conversionPath = ma_channel_map_get_conversion_path(pChannelMapIn, channelsIn, pChannelMapOut, channelsOut, mode); + + /* Optimized Path: Passthrough */ + if (conversionPath == ma_channel_conversion_path_passthrough) { + ma_copy_pcm_frames(pFramesOut, pFramesIn, frameCount, ma_format_f32, channelsOut); + return; + } + + /* Special Path: Mono Output. */ + if (conversionPath == ma_channel_conversion_path_mono_out) { + ma_channel_map_apply_mono_out_f32(pFramesOut, pFramesIn, pChannelMapIn, channelsIn, frameCount); + return; + } + + /* Special Path: Mono Input. */ + if (conversionPath == ma_channel_conversion_path_mono_in) { + ma_channel_map_apply_mono_in_f32(pFramesOut, pChannelMapOut, channelsOut, pFramesIn, frameCount, monoExpansionMode); + return; + } + + /* Getting here means we aren't running on an optimized conversion path. */ + if (channelsOut <= MA_MAX_CHANNELS) { + ma_result result; + + if (mode == ma_channel_mix_mode_simple) { + ma_channel shuffleTable[MA_MAX_CHANNELS]; + + result = ma_channel_map_build_shuffle_table(pChannelMapIn, channelsIn, pChannelMapOut, channelsOut, shuffleTable); + if (result != MA_SUCCESS) { + return; + } + + result = ma_channel_map_apply_shuffle_table(pFramesOut, channelsOut, pFramesIn, channelsIn, frameCount, shuffleTable, ma_format_f32); + if (result != MA_SUCCESS) { + return; + } + } else { + ma_uint32 iFrame; + ma_uint32 iChannelOut; + ma_uint32 iChannelIn; + float weights[32][32]; /* Do not use MA_MAX_CHANNELS here! */ + + /* + If we have a small enough number of channels, pre-compute the weights. Otherwise we'll just need to + fall back to a slower path because otherwise we'll run out of stack space. + */ + if (channelsIn <= ma_countof(weights) && channelsOut <= ma_countof(weights)) { + /* Pre-compute weights. */ + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) { + ma_channel channelIn = ma_channel_map_get_channel(pChannelMapIn, channelsIn, iChannelIn); + weights[iChannelOut][iChannelIn] = ma_calculate_channel_position_rectangular_weight(channelOut, channelIn); + } + } + + iFrame = 0; + + /* Experiment: Try an optimized unroll for some specific cases to see how it improves performance. RESULT: Good gains. */ + if (channelsOut == 8) { + /* Experiment 2: Expand the inner loop to see what kind of different it makes. RESULT: Small, but worthwhile gain. */ + if (channelsIn == 2) { + for (; iFrame < frameCount; iFrame += 1) { + float accumulation[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + + accumulation[0] += pFramesIn[iFrame*2 + 0] * weights[0][0]; + accumulation[1] += pFramesIn[iFrame*2 + 0] * weights[1][0]; + accumulation[2] += pFramesIn[iFrame*2 + 0] * weights[2][0]; + accumulation[3] += pFramesIn[iFrame*2 + 0] * weights[3][0]; + accumulation[4] += pFramesIn[iFrame*2 + 0] * weights[4][0]; + accumulation[5] += pFramesIn[iFrame*2 + 0] * weights[5][0]; + accumulation[6] += pFramesIn[iFrame*2 + 0] * weights[6][0]; + accumulation[7] += pFramesIn[iFrame*2 + 0] * weights[7][0]; + + accumulation[0] += pFramesIn[iFrame*2 + 1] * weights[0][1]; + accumulation[1] += pFramesIn[iFrame*2 + 1] * weights[1][1]; + accumulation[2] += pFramesIn[iFrame*2 + 1] * weights[2][1]; + accumulation[3] += pFramesIn[iFrame*2 + 1] * weights[3][1]; + accumulation[4] += pFramesIn[iFrame*2 + 1] * weights[4][1]; + accumulation[5] += pFramesIn[iFrame*2 + 1] * weights[5][1]; + accumulation[6] += pFramesIn[iFrame*2 + 1] * weights[6][1]; + accumulation[7] += pFramesIn[iFrame*2 + 1] * weights[7][1]; + + pFramesOut[iFrame*8 + 0] = accumulation[0]; + pFramesOut[iFrame*8 + 1] = accumulation[1]; + pFramesOut[iFrame*8 + 2] = accumulation[2]; + pFramesOut[iFrame*8 + 3] = accumulation[3]; + pFramesOut[iFrame*8 + 4] = accumulation[4]; + pFramesOut[iFrame*8 + 5] = accumulation[5]; + pFramesOut[iFrame*8 + 6] = accumulation[6]; + pFramesOut[iFrame*8 + 7] = accumulation[7]; + } + } else { + /* When outputting to 8 channels, we can do everything in groups of two 4x SIMD operations. */ + for (; iFrame < frameCount; iFrame += 1) { + float accumulation[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; + + for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) { + accumulation[0] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[0][iChannelIn]; + accumulation[1] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[1][iChannelIn]; + accumulation[2] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[2][iChannelIn]; + accumulation[3] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[3][iChannelIn]; + accumulation[4] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[4][iChannelIn]; + accumulation[5] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[5][iChannelIn]; + accumulation[6] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[6][iChannelIn]; + accumulation[7] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[7][iChannelIn]; + } + + pFramesOut[iFrame*8 + 0] = accumulation[0]; + pFramesOut[iFrame*8 + 1] = accumulation[1]; + pFramesOut[iFrame*8 + 2] = accumulation[2]; + pFramesOut[iFrame*8 + 3] = accumulation[3]; + pFramesOut[iFrame*8 + 4] = accumulation[4]; + pFramesOut[iFrame*8 + 5] = accumulation[5]; + pFramesOut[iFrame*8 + 6] = accumulation[6]; + pFramesOut[iFrame*8 + 7] = accumulation[7]; + } + } + } else if (channelsOut == 6) { + /* + When outputting to 6 channels we unfortunately don't have a nice multiple of 4 to do 4x SIMD operations. Instead we'll + expand our weights and do two frames at a time. + */ + for (; iFrame < frameCount; iFrame += 1) { + float accumulation[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) { + accumulation[0] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[0][iChannelIn]; + accumulation[1] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[1][iChannelIn]; + accumulation[2] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[2][iChannelIn]; + accumulation[3] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[3][iChannelIn]; + accumulation[4] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[4][iChannelIn]; + accumulation[5] += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[5][iChannelIn]; + } + + pFramesOut[iFrame*6 + 0] = accumulation[0]; + pFramesOut[iFrame*6 + 1] = accumulation[1]; + pFramesOut[iFrame*6 + 2] = accumulation[2]; + pFramesOut[iFrame*6 + 3] = accumulation[3]; + pFramesOut[iFrame*6 + 4] = accumulation[4]; + pFramesOut[iFrame*6 + 5] = accumulation[5]; + } + } + + /* Leftover frames. */ + for (; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + float accumulation = 0; + + for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) { + accumulation += pFramesIn[iFrame*channelsIn + iChannelIn] * weights[iChannelOut][iChannelIn]; + } + + pFramesOut[iFrame*channelsOut + iChannelOut] = accumulation; + } + } + } else { + /* Cannot pre-compute weights because not enough room in stack-allocated buffer. */ + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelOut = 0; iChannelOut < channelsOut; iChannelOut += 1) { + float accumulation = 0; + ma_channel channelOut = ma_channel_map_get_channel(pChannelMapOut, channelsOut, iChannelOut); + + for (iChannelIn = 0; iChannelIn < channelsIn; iChannelIn += 1) { + ma_channel channelIn = ma_channel_map_get_channel(pChannelMapIn, channelsIn, iChannelIn); + accumulation += pFramesIn[iFrame*channelsIn + iChannelIn] * ma_calculate_channel_position_rectangular_weight(channelOut, channelIn); + } + + pFramesOut[iFrame*channelsOut + iChannelOut] = accumulation; + } + } + } + } + } else { + /* Fall back to silence. If you hit this, what are you doing with so many channels?! */ + ma_silence_pcm_frames(pFramesOut, frameCount, ma_format_f32, channelsOut); + } +} + + +typedef struct +{ + size_t sizeInBytes; + size_t channelMapInOffset; + size_t channelMapOutOffset; + size_t shuffleTableOffset; + size_t weightsOffset; +} ma_channel_converter_heap_layout; + +static ma_channel_conversion_path ma_channel_converter_config_get_conversion_path(const ma_channel_converter_config* pConfig) +{ + return ma_channel_map_get_conversion_path(pConfig->pChannelMapIn, pConfig->channelsIn, pConfig->pChannelMapOut, pConfig->channelsOut, pConfig->mixingMode); +} + +static ma_result ma_channel_converter_get_heap_layout(const ma_channel_converter_config* pConfig, ma_channel_converter_heap_layout* pHeapLayout) +{ + ma_channel_conversion_path conversionPath; + + MA_ASSERT(pHeapLayout != NULL); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channelsIn == 0 || pConfig->channelsOut == 0) { + return MA_INVALID_ARGS; + } + + if (!ma_channel_map_is_valid(pConfig->pChannelMapIn, pConfig->channelsIn)) { + return MA_INVALID_ARGS; + } + + if (!ma_channel_map_is_valid(pConfig->pChannelMapOut, pConfig->channelsOut)) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* Input channel map. Only need to allocate this if we have an input channel map (otherwise default channel map is assumed). */ + pHeapLayout->channelMapInOffset = pHeapLayout->sizeInBytes; + if (pConfig->pChannelMapIn != NULL) { + pHeapLayout->sizeInBytes += sizeof(ma_channel) * pConfig->channelsIn; + } + + /* Output channel map. Only need to allocate this if we have an output channel map (otherwise default channel map is assumed). */ + pHeapLayout->channelMapOutOffset = pHeapLayout->sizeInBytes; + if (pConfig->pChannelMapOut != NULL) { + pHeapLayout->sizeInBytes += sizeof(ma_channel) * pConfig->channelsOut; + } + + /* Alignment for the next section. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + /* Whether or not we use weights of a shuffle table depends on the channel map themselves and the algorithm we've chosen. */ + conversionPath = ma_channel_converter_config_get_conversion_path(pConfig); + + /* Shuffle table */ + pHeapLayout->shuffleTableOffset = pHeapLayout->sizeInBytes; + if (conversionPath == ma_channel_conversion_path_shuffle) { + pHeapLayout->sizeInBytes += sizeof(ma_uint8) * pConfig->channelsOut; + } + + /* Weights */ + pHeapLayout->weightsOffset = pHeapLayout->sizeInBytes; + if (conversionPath == ma_channel_conversion_path_weights) { + pHeapLayout->sizeInBytes += sizeof(float*) * pConfig->channelsIn; + pHeapLayout->sizeInBytes += sizeof(float ) * pConfig->channelsIn * pConfig->channelsOut; + } + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_channel_converter_get_heap_size(const ma_channel_converter_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_channel_converter_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_channel_converter_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_channel_converter_init_preallocated(const ma_channel_converter_config* pConfig, void* pHeap, ma_channel_converter* pConverter) +{ + ma_result result; + ma_channel_converter_heap_layout heapLayout; + + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pConverter); + + result = ma_channel_converter_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pConverter->_pHeap = pHeap; + MA_ZERO_MEMORY(pConverter->_pHeap, heapLayout.sizeInBytes); + + pConverter->format = pConfig->format; + pConverter->channelsIn = pConfig->channelsIn; + pConverter->channelsOut = pConfig->channelsOut; + pConverter->mixingMode = pConfig->mixingMode; + + if (pConfig->pChannelMapIn != NULL) { + pConverter->pChannelMapIn = (ma_channel*)ma_offset_ptr(pHeap, heapLayout.channelMapInOffset); + ma_channel_map_copy_or_default(pConverter->pChannelMapIn, pConfig->channelsIn, pConfig->pChannelMapIn, pConfig->channelsIn); + } else { + pConverter->pChannelMapIn = NULL; /* Use default channel map. */ + } + + if (pConfig->pChannelMapOut != NULL) { + pConverter->pChannelMapOut = (ma_channel*)ma_offset_ptr(pHeap, heapLayout.channelMapOutOffset); + ma_channel_map_copy_or_default(pConverter->pChannelMapOut, pConfig->channelsOut, pConfig->pChannelMapOut, pConfig->channelsOut); + } else { + pConverter->pChannelMapOut = NULL; /* Use default channel map. */ + } + + pConverter->conversionPath = ma_channel_converter_config_get_conversion_path(pConfig); + + if (pConverter->conversionPath == ma_channel_conversion_path_shuffle) { + pConverter->pShuffleTable = (ma_uint8*)ma_offset_ptr(pHeap, heapLayout.shuffleTableOffset); + ma_channel_map_build_shuffle_table(pConverter->pChannelMapIn, pConverter->channelsIn, pConverter->pChannelMapOut, pConverter->channelsOut, pConverter->pShuffleTable); + } + + if (pConverter->conversionPath == ma_channel_conversion_path_weights) { + ma_uint32 iChannelIn; + ma_uint32 iChannelOut; + + if (pConverter->format == ma_format_f32) { + pConverter->weights.f32 = (float** )ma_offset_ptr(pHeap, heapLayout.weightsOffset); + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; iChannelIn += 1) { + pConverter->weights.f32[iChannelIn] = (float*)ma_offset_ptr(pHeap, heapLayout.weightsOffset + ((sizeof(float*) * pConverter->channelsIn) + (sizeof(float) * pConverter->channelsOut * iChannelIn))); + } + } else { + pConverter->weights.s16 = (ma_int32**)ma_offset_ptr(pHeap, heapLayout.weightsOffset); + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; iChannelIn += 1) { + pConverter->weights.s16[iChannelIn] = (ma_int32*)ma_offset_ptr(pHeap, heapLayout.weightsOffset + ((sizeof(ma_int32*) * pConverter->channelsIn) + (sizeof(ma_int32) * pConverter->channelsOut * iChannelIn))); + } + } + + /* Silence our weights by default. */ + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; iChannelIn += 1) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; iChannelOut += 1) { + if (pConverter->format == ma_format_f32) { + pConverter->weights.f32[iChannelIn][iChannelOut] = 0.0f; + } else { + pConverter->weights.s16[iChannelIn][iChannelOut] = 0; + } + } + } + + /* + We now need to fill out our weights table. This is determined by the mixing mode. + */ + + /* In all cases we need to make sure all channels that are present in both channel maps have a 1:1 mapping. */ + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = ma_channel_map_get_channel(pConverter->pChannelMapIn, pConverter->channelsIn, iChannelIn); + + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = ma_channel_map_get_channel(pConverter->pChannelMapOut, pConverter->channelsOut, iChannelOut); + + if (channelPosIn == channelPosOut) { + float weight = 1; + + if (pConverter->format == ma_format_f32) { + pConverter->weights.f32[iChannelIn][iChannelOut] = weight; + } else { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fixed(weight); + } + } + } + } + + switch (pConverter->mixingMode) + { + case ma_channel_mix_mode_custom_weights: + { + if (pConfig->ppWeights == NULL) { + return MA_INVALID_ARGS; /* Config specified a custom weights mixing mode, but no custom weights have been specified. */ + } + + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; iChannelIn += 1) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; iChannelOut += 1) { + float weight = pConfig->ppWeights[iChannelIn][iChannelOut]; + + if (pConverter->format == ma_format_f32) { + pConverter->weights.f32[iChannelIn][iChannelOut] = weight; + } else { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fixed(weight); + } + } + } + } break; + + case ma_channel_mix_mode_simple: + { + /* + In simple mode, only set weights for channels that have exactly matching types, leave the rest at + zero. The 1:1 mappings have already been covered before this switch statement. + */ + } break; + + case ma_channel_mix_mode_rectangular: + default: + { + /* Unmapped input channels. */ + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = ma_channel_map_get_channel(pConverter->pChannelMapIn, pConverter->channelsIn, iChannelIn); + + if (ma_is_spatial_channel_position(channelPosIn)) { + if (!ma_channel_map_contains_channel_position(pConverter->channelsOut, pConverter->pChannelMapOut, channelPosIn)) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = ma_channel_map_get_channel(pConverter->pChannelMapOut, pConverter->channelsOut, iChannelOut); + + if (ma_is_spatial_channel_position(channelPosOut)) { + float weight = 0; + if (pConverter->mixingMode == ma_channel_mix_mode_rectangular) { + weight = ma_calculate_channel_position_rectangular_weight(channelPosIn, channelPosOut); + } + + /* Only apply the weight if we haven't already got some contribution from the respective channels. */ + if (pConverter->format == ma_format_f32) { + if (pConverter->weights.f32[iChannelIn][iChannelOut] == 0) { + pConverter->weights.f32[iChannelIn][iChannelOut] = weight; + } + } else { + if (pConverter->weights.s16[iChannelIn][iChannelOut] == 0) { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fixed(weight); + } + } + } + } + } + } + } + + /* Unmapped output channels. */ + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = ma_channel_map_get_channel(pConverter->pChannelMapOut, pConverter->channelsOut, iChannelOut); + + if (ma_is_spatial_channel_position(channelPosOut)) { + if (!ma_channel_map_contains_channel_position(pConverter->channelsIn, pConverter->pChannelMapIn, channelPosOut)) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = ma_channel_map_get_channel(pConverter->pChannelMapIn, pConverter->channelsIn, iChannelIn); + + if (ma_is_spatial_channel_position(channelPosIn)) { + float weight = 0; + if (pConverter->mixingMode == ma_channel_mix_mode_rectangular) { + weight = ma_calculate_channel_position_rectangular_weight(channelPosIn, channelPosOut); + } + + /* Only apply the weight if we haven't already got some contribution from the respective channels. */ + if (pConverter->format == ma_format_f32) { + if (pConverter->weights.f32[iChannelIn][iChannelOut] == 0) { + pConverter->weights.f32[iChannelIn][iChannelOut] = weight; + } + } else { + if (pConverter->weights.s16[iChannelIn][iChannelOut] == 0) { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fixed(weight); + } + } + } + } + } + } + } + + /* If LFE is in the output channel map but was not present in the input channel map, configure its weight now */ + if (pConfig->calculateLFEFromSpatialChannels) { + if (!ma_channel_map_contains_channel_position(pConverter->channelsIn, pConverter->pChannelMapIn, MA_CHANNEL_LFE)) { + ma_uint32 spatialChannelCount = ma_channel_map_get_spatial_channel_count(pConverter->pChannelMapIn, pConverter->channelsIn); + ma_uint32 iChannelOutLFE; + + if (spatialChannelCount > 0 && ma_channel_map_find_channel_position(pConverter->channelsOut, pConverter->pChannelMapOut, MA_CHANNEL_LFE, &iChannelOutLFE)) { + const float weightForLFE = 1.0f / spatialChannelCount; + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + const ma_channel channelPosIn = ma_channel_map_get_channel(pConverter->pChannelMapIn, pConverter->channelsIn, iChannelIn); + if (ma_is_spatial_channel_position(channelPosIn)) { + if (pConverter->format == ma_format_f32) { + if (pConverter->weights.f32[iChannelIn][iChannelOutLFE] == 0) { + pConverter->weights.f32[iChannelIn][iChannelOutLFE] = weightForLFE; + } + } else { + if (pConverter->weights.s16[iChannelIn][iChannelOutLFE] == 0) { + pConverter->weights.s16[iChannelIn][iChannelOutLFE] = ma_channel_converter_float_to_fixed(weightForLFE); + } + } + } + } + } + } + } + } break; + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_channel_converter_init(const ma_channel_converter_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_channel_converter* pConverter) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_channel_converter_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_channel_converter_init_preallocated(pConfig, pHeap, pConverter); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pConverter->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_channel_converter_uninit(ma_channel_converter* pConverter, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pConverter == NULL) { + return; + } + + if (pConverter->_ownsHeap) { + ma_free(pConverter->_pHeap, pAllocationCallbacks); + } +} + +static ma_result ma_channel_converter_process_pcm_frames__passthrough(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + + ma_copy_memory_64(pFramesOut, pFramesIn, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut)); + return MA_SUCCESS; +} + +static ma_result ma_channel_converter_process_pcm_frames__shuffle(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + MA_ASSERT(pConverter->channelsIn == pConverter->channelsOut); + + return ma_channel_map_apply_shuffle_table(pFramesOut, pConverter->channelsOut, pFramesIn, pConverter->channelsIn, frameCount, pConverter->pShuffleTable, pConverter->format); +} + +static ma_result ma_channel_converter_process_pcm_frames__mono_in(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + MA_ASSERT(pConverter->channelsIn == 1); + + switch (pConverter->format) + { + case ma_format_u8: + { + /* */ ma_uint8* pFramesOutU8 = ( ma_uint8*)pFramesOut; + const ma_uint8* pFramesInU8 = (const ma_uint8*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) { + pFramesOutU8[iFrame*pConverter->channelsOut + iChannel] = pFramesInU8[iFrame]; + } + } + } break; + + case ma_format_s16: + { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + if (pConverter->channelsOut == 2) { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + pFramesOutS16[iFrame*2 + 0] = pFramesInS16[iFrame]; + pFramesOutS16[iFrame*2 + 1] = pFramesInS16[iFrame]; + } + } else { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) { + pFramesOutS16[iFrame*pConverter->channelsOut + iChannel] = pFramesInS16[iFrame]; + } + } + } + } break; + + case ma_format_s24: + { + /* */ ma_uint8* pFramesOutS24 = ( ma_uint8*)pFramesOut; + const ma_uint8* pFramesInS24 = (const ma_uint8*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) { + ma_uint64 iSampleOut = iFrame*pConverter->channelsOut + iChannel; + ma_uint64 iSampleIn = iFrame; + pFramesOutS24[iSampleOut*3 + 0] = pFramesInS24[iSampleIn*3 + 0]; + pFramesOutS24[iSampleOut*3 + 1] = pFramesInS24[iSampleIn*3 + 1]; + pFramesOutS24[iSampleOut*3 + 2] = pFramesInS24[iSampleIn*3 + 2]; + } + } + } break; + + case ma_format_s32: + { + /* */ ma_int32* pFramesOutS32 = ( ma_int32*)pFramesOut; + const ma_int32* pFramesInS32 = (const ma_int32*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) { + pFramesOutS32[iFrame*pConverter->channelsOut + iChannel] = pFramesInS32[iFrame]; + } + } + } break; + + case ma_format_f32: + { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + if (pConverter->channelsOut == 2) { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + pFramesOutF32[iFrame*2 + 0] = pFramesInF32[iFrame]; + pFramesOutF32[iFrame*2 + 1] = pFramesInF32[iFrame]; + } + } else { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) { + pFramesOutF32[iFrame*pConverter->channelsOut + iChannel] = pFramesInF32[iFrame]; + } + } + } + } break; + + default: return MA_INVALID_OPERATION; /* Unknown format. */ + } + + return MA_SUCCESS; +} + +static ma_result ma_channel_converter_process_pcm_frames__mono_out(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannel; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + MA_ASSERT(pConverter->channelsOut == 1); + + switch (pConverter->format) + { + case ma_format_u8: + { + /* */ ma_uint8* pFramesOutU8 = ( ma_uint8*)pFramesOut; + const ma_uint8* pFramesInU8 = (const ma_uint8*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_int32 t = 0; + for (iChannel = 0; iChannel < pConverter->channelsIn; iChannel += 1) { + t += ma_pcm_sample_u8_to_s16_no_scale(pFramesInU8[iFrame*pConverter->channelsIn + iChannel]); + } + + pFramesOutU8[iFrame] = ma_clip_u8(t / pConverter->channelsOut); + } + } break; + + case ma_format_s16: + { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_int32 t = 0; + for (iChannel = 0; iChannel < pConverter->channelsIn; iChannel += 1) { + t += pFramesInS16[iFrame*pConverter->channelsIn + iChannel]; + } + + pFramesOutS16[iFrame] = (ma_int16)(t / pConverter->channelsIn); + } + } break; + + case ma_format_s24: + { + /* */ ma_uint8* pFramesOutS24 = ( ma_uint8*)pFramesOut; + const ma_uint8* pFramesInS24 = (const ma_uint8*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_int64 t = 0; + for (iChannel = 0; iChannel < pConverter->channelsIn; iChannel += 1) { + t += ma_pcm_sample_s24_to_s32_no_scale(&pFramesInS24[(iFrame*pConverter->channelsIn + iChannel)*3]); + } + + ma_pcm_sample_s32_to_s24_no_scale(t / pConverter->channelsIn, &pFramesOutS24[iFrame*3]); + } + } break; + + case ma_format_s32: + { + /* */ ma_int32* pFramesOutS32 = ( ma_int32*)pFramesOut; + const ma_int32* pFramesInS32 = (const ma_int32*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_int64 t = 0; + for (iChannel = 0; iChannel < pConverter->channelsIn; iChannel += 1) { + t += pFramesInS32[iFrame*pConverter->channelsIn + iChannel]; + } + + pFramesOutS32[iFrame] = (ma_int32)(t / pConverter->channelsIn); + } + } break; + + case ma_format_f32: + { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + float t = 0; + for (iChannel = 0; iChannel < pConverter->channelsIn; iChannel += 1) { + t += pFramesInF32[iFrame*pConverter->channelsIn + iChannel]; + } + + pFramesOutF32[iFrame] = t / pConverter->channelsIn; + } + } break; + + default: return MA_INVALID_OPERATION; /* Unknown format. */ + } + + return MA_SUCCESS; +} + +static ma_result ma_channel_converter_process_pcm_frames__weights(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint32 iFrame; + ma_uint32 iChannelIn; + ma_uint32 iChannelOut; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + + /* This is the more complicated case. Each of the output channels is accumulated with 0 or more input channels. */ + + /* Clear. */ + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut)); + + /* Accumulate. */ + switch (pConverter->format) + { + case ma_format_u8: + { + /* */ ma_uint8* pFramesOutU8 = ( ma_uint8*)pFramesOut; + const ma_uint8* pFramesInU8 = (const ma_uint8*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_int16 u8_O = ma_pcm_sample_u8_to_s16_no_scale(pFramesOutU8[iFrame*pConverter->channelsOut + iChannelOut]); + ma_int16 u8_I = ma_pcm_sample_u8_to_s16_no_scale(pFramesInU8 [iFrame*pConverter->channelsIn + iChannelIn ]); + ma_int32 s = (ma_int32)ma_clamp(u8_O + ((u8_I * pConverter->weights.s16[iChannelIn][iChannelOut]) >> MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT), -128, 127); + pFramesOutU8[iFrame*pConverter->channelsOut + iChannelOut] = ma_clip_u8((ma_int16)s); + } + } + } + } break; + + case ma_format_s16: + { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_int32 s = pFramesOutS16[iFrame*pConverter->channelsOut + iChannelOut]; + s += (pFramesInS16[iFrame*pConverter->channelsIn + iChannelIn] * pConverter->weights.s16[iChannelIn][iChannelOut]) >> MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT; + + pFramesOutS16[iFrame*pConverter->channelsOut + iChannelOut] = (ma_int16)ma_clamp(s, -32768, 32767); + } + } + } + } break; + + case ma_format_s24: + { + /* */ ma_uint8* pFramesOutS24 = ( ma_uint8*)pFramesOut; + const ma_uint8* pFramesInS24 = (const ma_uint8*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_int64 s24_O = ma_pcm_sample_s24_to_s32_no_scale(&pFramesOutS24[(iFrame*pConverter->channelsOut + iChannelOut)*3]); + ma_int64 s24_I = ma_pcm_sample_s24_to_s32_no_scale(&pFramesInS24 [(iFrame*pConverter->channelsIn + iChannelIn )*3]); + ma_int64 s24 = (ma_int32)ma_clamp(s24_O + ((s24_I * pConverter->weights.s16[iChannelIn][iChannelOut]) >> MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT), -8388608, 8388607); + ma_pcm_sample_s32_to_s24_no_scale(s24, &pFramesOutS24[(iFrame*pConverter->channelsOut + iChannelOut)*3]); + } + } + } + } break; + + case ma_format_s32: + { + /* */ ma_int32* pFramesOutS32 = ( ma_int32*)pFramesOut; + const ma_int32* pFramesInS32 = (const ma_int32*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_int64 s = pFramesOutS32[iFrame*pConverter->channelsOut + iChannelOut]; + s += ((ma_int64)pFramesInS32[iFrame*pConverter->channelsIn + iChannelIn] * pConverter->weights.s16[iChannelIn][iChannelOut]) >> MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT; + + pFramesOutS32[iFrame*pConverter->channelsOut + iChannelOut] = ma_clip_s32(s); + } + } + } + } break; + + case ma_format_f32: + { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + pFramesOutF32[iFrame*pConverter->channelsOut + iChannelOut] += pFramesInF32[iFrame*pConverter->channelsIn + iChannelIn] * pConverter->weights.f32[iChannelIn][iChannelOut]; + } + } + } + } break; + + default: return MA_INVALID_OPERATION; /* Unknown format. */ + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_channel_converter_process_pcm_frames(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + if (pFramesOut == NULL) { + return MA_INVALID_ARGS; + } + + if (pFramesIn == NULL) { + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut)); + return MA_SUCCESS; + } + + switch (pConverter->conversionPath) + { + case ma_channel_conversion_path_passthrough: return ma_channel_converter_process_pcm_frames__passthrough(pConverter, pFramesOut, pFramesIn, frameCount); + case ma_channel_conversion_path_mono_out: return ma_channel_converter_process_pcm_frames__mono_out(pConverter, pFramesOut, pFramesIn, frameCount); + case ma_channel_conversion_path_mono_in: return ma_channel_converter_process_pcm_frames__mono_in(pConverter, pFramesOut, pFramesIn, frameCount); + case ma_channel_conversion_path_shuffle: return ma_channel_converter_process_pcm_frames__shuffle(pConverter, pFramesOut, pFramesIn, frameCount); + case ma_channel_conversion_path_weights: + default: + { + return ma_channel_converter_process_pcm_frames__weights(pConverter, pFramesOut, pFramesIn, frameCount); + } + } +} + +MA_API ma_result ma_channel_converter_get_input_channel_map(const ma_channel_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap) +{ + if (pConverter == NULL || pChannelMap == NULL) { + return MA_INVALID_ARGS; + } + + ma_channel_map_copy_or_default(pChannelMap, channelMapCap, pConverter->pChannelMapIn, pConverter->channelsIn); + + return MA_SUCCESS; +} + +MA_API ma_result ma_channel_converter_get_output_channel_map(const ma_channel_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap) +{ + if (pConverter == NULL || pChannelMap == NULL) { + return MA_INVALID_ARGS; + } + + ma_channel_map_copy_or_default(pChannelMap, channelMapCap, pConverter->pChannelMapOut, pConverter->channelsOut); + + return MA_SUCCESS; +} + + +/************************************************************************************************************************************************************** + +Data Conversion + +**************************************************************************************************************************************************************/ +MA_API ma_data_converter_config ma_data_converter_config_init_default(void) +{ + ma_data_converter_config config; + MA_ZERO_OBJECT(&config); + + config.ditherMode = ma_dither_mode_none; + config.resampling.algorithm = ma_resample_algorithm_linear; + config.allowDynamicSampleRate = MA_FALSE; /* Disable dynamic sample rates by default because dynamic rate adjustments should be quite rare and it allows an optimization for cases when the in and out sample rates are the same. */ + + /* Linear resampling defaults. */ + config.resampling.linear.lpfOrder = 1; + + return config; +} + +MA_API ma_data_converter_config ma_data_converter_config_init(ma_format formatIn, ma_format formatOut, ma_uint32 channelsIn, ma_uint32 channelsOut, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + ma_data_converter_config config = ma_data_converter_config_init_default(); + config.formatIn = formatIn; + config.formatOut = formatOut; + config.channelsIn = channelsIn; + config.channelsOut = channelsOut; + config.sampleRateIn = sampleRateIn; + config.sampleRateOut = sampleRateOut; + + return config; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t channelConverterOffset; + size_t resamplerOffset; +} ma_data_converter_heap_layout; + +static ma_bool32 ma_data_converter_config_is_resampler_required(const ma_data_converter_config* pConfig) +{ + MA_ASSERT(pConfig != NULL); + + return pConfig->allowDynamicSampleRate || pConfig->sampleRateIn != pConfig->sampleRateOut; +} + +static ma_format ma_data_converter_config_get_mid_format(const ma_data_converter_config* pConfig) +{ + MA_ASSERT(pConfig != NULL); + + /* + We want to avoid as much data conversion as possible. The channel converter and linear + resampler both support s16 and f32 natively. We need to decide on the format to use for this + stage. We call this the mid format because it's used in the middle stage of the conversion + pipeline. If the output format is either s16 or f32 we use that one. If that is not the case it + will do the same thing for the input format. If it's neither we just use f32. If we are using a + custom resampling backend, we can only guarantee that f32 will be supported so we'll be forced + to use that if resampling is required. + */ + if (ma_data_converter_config_is_resampler_required(pConfig) && pConfig->resampling.algorithm != ma_resample_algorithm_linear) { + return ma_format_f32; /* <-- Force f32 since that is the only one we can guarantee will be supported by the resampler. */ + } else { + /* */ if (pConfig->formatOut == ma_format_s16 || pConfig->formatOut == ma_format_f32) { + return pConfig->formatOut; + } else if (pConfig->formatIn == ma_format_s16 || pConfig->formatIn == ma_format_f32) { + return pConfig->formatIn; + } else { + return ma_format_f32; + } + } +} + +static ma_channel_converter_config ma_channel_converter_config_init_from_data_converter_config(const ma_data_converter_config* pConfig) +{ + ma_channel_converter_config channelConverterConfig; + + MA_ASSERT(pConfig != NULL); + + channelConverterConfig = ma_channel_converter_config_init(ma_data_converter_config_get_mid_format(pConfig), pConfig->channelsIn, pConfig->pChannelMapIn, pConfig->channelsOut, pConfig->pChannelMapOut, pConfig->channelMixMode); + channelConverterConfig.ppWeights = pConfig->ppChannelWeights; + channelConverterConfig.calculateLFEFromSpatialChannels = pConfig->calculateLFEFromSpatialChannels; + + return channelConverterConfig; +} + +static ma_resampler_config ma_resampler_config_init_from_data_converter_config(const ma_data_converter_config* pConfig) +{ + ma_resampler_config resamplerConfig; + ma_uint32 resamplerChannels; + + MA_ASSERT(pConfig != NULL); + + /* The resampler is the most expensive part of the conversion process, so we need to do it at the stage where the channel count is at it's lowest. */ + if (pConfig->channelsIn < pConfig->channelsOut) { + resamplerChannels = pConfig->channelsIn; + } else { + resamplerChannels = pConfig->channelsOut; + } + + resamplerConfig = ma_resampler_config_init(ma_data_converter_config_get_mid_format(pConfig), resamplerChannels, pConfig->sampleRateIn, pConfig->sampleRateOut, pConfig->resampling.algorithm); + resamplerConfig.linear = pConfig->resampling.linear; + resamplerConfig.pBackendVTable = pConfig->resampling.pBackendVTable; + resamplerConfig.pBackendUserData = pConfig->resampling.pBackendUserData; + + return resamplerConfig; +} + +static ma_result ma_data_converter_get_heap_layout(const ma_data_converter_config* pConfig, ma_data_converter_heap_layout* pHeapLayout) +{ + ma_result result; + + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channelsIn == 0 || pConfig->channelsOut == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* Channel converter. */ + pHeapLayout->channelConverterOffset = pHeapLayout->sizeInBytes; + { + size_t heapSizeInBytes; + ma_channel_converter_config channelConverterConfig = ma_channel_converter_config_init_from_data_converter_config(pConfig); + + result = ma_channel_converter_get_heap_size(&channelConverterConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += heapSizeInBytes; + } + + /* Resampler. */ + pHeapLayout->resamplerOffset = pHeapLayout->sizeInBytes; + if (ma_data_converter_config_is_resampler_required(pConfig)) { + size_t heapSizeInBytes; + ma_resampler_config resamplerConfig = ma_resampler_config_init_from_data_converter_config(pConfig); + + result = ma_resampler_get_heap_size(&resamplerConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes += heapSizeInBytes; + } + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_converter_get_heap_size(const ma_data_converter_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_data_converter_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_data_converter_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_converter_init_preallocated(const ma_data_converter_config* pConfig, void* pHeap, ma_data_converter* pConverter) +{ + ma_result result; + ma_data_converter_heap_layout heapLayout; + ma_format midFormat; + ma_bool32 isResamplingRequired; + + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pConverter); + + result = ma_data_converter_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pConverter->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pConverter->formatIn = pConfig->formatIn; + pConverter->formatOut = pConfig->formatOut; + pConverter->channelsIn = pConfig->channelsIn; + pConverter->channelsOut = pConfig->channelsOut; + pConverter->sampleRateIn = pConfig->sampleRateIn; + pConverter->sampleRateOut = pConfig->sampleRateOut; + pConverter->ditherMode = pConfig->ditherMode; + + /* + Determine if resampling is required. We need to do this so we can determine an appropriate + mid format to use. If resampling is required, the mid format must be ma_format_f32 since + that is the only one that is guaranteed to supported by custom resampling backends. + */ + isResamplingRequired = ma_data_converter_config_is_resampler_required(pConfig); + midFormat = ma_data_converter_config_get_mid_format(pConfig); + + + /* Channel converter. We always initialize this, but we check if it configures itself as a passthrough to determine whether or not it's needed. */ + { + ma_channel_converter_config channelConverterConfig = ma_channel_converter_config_init_from_data_converter_config(pConfig); + + result = ma_channel_converter_init_preallocated(&channelConverterConfig, ma_offset_ptr(pHeap, heapLayout.channelConverterOffset), &pConverter->channelConverter); + if (result != MA_SUCCESS) { + return result; + } + + /* If the channel converter is not a passthrough we need to enable it. Otherwise we can skip it. */ + if (pConverter->channelConverter.conversionPath != ma_channel_conversion_path_passthrough) { + pConverter->hasChannelConverter = MA_TRUE; + } + } + + + /* Resampler. */ + if (isResamplingRequired) { + ma_resampler_config resamplerConfig = ma_resampler_config_init_from_data_converter_config(pConfig); + + result = ma_resampler_init_preallocated(&resamplerConfig, ma_offset_ptr(pHeap, heapLayout.resamplerOffset), &pConverter->resampler); + if (result != MA_SUCCESS) { + return result; + } + + pConverter->hasResampler = MA_TRUE; + } + + + /* We can simplify pre- and post-format conversion if we have neither channel conversion nor resampling. */ + if (pConverter->hasChannelConverter == MA_FALSE && pConverter->hasResampler == MA_FALSE) { + /* We have neither channel conversion nor resampling so we'll only need one of pre- or post-format conversion, or none if the input and output formats are the same. */ + if (pConverter->formatIn == pConverter->formatOut) { + /* The formats are the same so we can just pass through. */ + pConverter->hasPreFormatConversion = MA_FALSE; + pConverter->hasPostFormatConversion = MA_FALSE; + } else { + /* The formats are different so we need to do either pre- or post-format conversion. It doesn't matter which. */ + pConverter->hasPreFormatConversion = MA_FALSE; + pConverter->hasPostFormatConversion = MA_TRUE; + } + } else { + /* We have a channel converter and/or resampler so we'll need channel conversion based on the mid format. */ + if (pConverter->formatIn != midFormat) { + pConverter->hasPreFormatConversion = MA_TRUE; + } + if (pConverter->formatOut != midFormat) { + pConverter->hasPostFormatConversion = MA_TRUE; + } + } + + /* We can enable passthrough optimizations if applicable. Note that we'll only be able to do this if the sample rate is static. */ + if (pConverter->hasPreFormatConversion == MA_FALSE && + pConverter->hasPostFormatConversion == MA_FALSE && + pConverter->hasChannelConverter == MA_FALSE && + pConverter->hasResampler == MA_FALSE) { + pConverter->isPassthrough = MA_TRUE; + } + + + /* We now need to determine our execution path. */ + if (pConverter->isPassthrough) { + pConverter->executionPath = ma_data_converter_execution_path_passthrough; + } else { + if (pConverter->channelsIn < pConverter->channelsOut) { + /* Do resampling first, if necessary. */ + MA_ASSERT(pConverter->hasChannelConverter == MA_TRUE); + + if (pConverter->hasResampler) { + pConverter->executionPath = ma_data_converter_execution_path_resample_first; + } else { + pConverter->executionPath = ma_data_converter_execution_path_channels_only; + } + } else { + /* Do channel conversion first, if necessary. */ + if (pConverter->hasChannelConverter) { + if (pConverter->hasResampler) { + pConverter->executionPath = ma_data_converter_execution_path_channels_first; + } else { + pConverter->executionPath = ma_data_converter_execution_path_channels_only; + } + } else { + /* Channel routing not required. */ + if (pConverter->hasResampler) { + pConverter->executionPath = ma_data_converter_execution_path_resample_only; + } else { + pConverter->executionPath = ma_data_converter_execution_path_format_only; + } + } + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_converter_init(const ma_data_converter_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_converter* pConverter) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_data_converter_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_data_converter_init_preallocated(pConfig, pHeap, pConverter); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pConverter->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_data_converter_uninit(ma_data_converter* pConverter, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pConverter == NULL) { + return; + } + + if (pConverter->hasResampler) { + ma_resampler_uninit(&pConverter->resampler, pAllocationCallbacks); + } + + ma_channel_converter_uninit(&pConverter->channelConverter, pAllocationCallbacks); + + if (pConverter->_ownsHeap) { + ma_free(pConverter->_pHeap, pAllocationCallbacks); + } +} + +static ma_result ma_data_converter_process_pcm_frames__passthrough(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 frameCount; + + MA_ASSERT(pConverter != NULL); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + frameCount = ma_min(frameCountIn, frameCountOut); + + if (pFramesOut != NULL) { + if (pFramesIn != NULL) { + ma_copy_memory_64(pFramesOut, pFramesIn, frameCount * ma_get_bytes_per_frame(pConverter->formatOut, pConverter->channelsOut)); + } else { + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->formatOut, pConverter->channelsOut)); + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = frameCount; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = frameCount; + } + + return MA_SUCCESS; +} + +static ma_result ma_data_converter_process_pcm_frames__format_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 frameCount; + + MA_ASSERT(pConverter != NULL); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + frameCount = ma_min(frameCountIn, frameCountOut); + + if (pFramesOut != NULL) { + if (pFramesIn != NULL) { + ma_convert_pcm_frames_format(pFramesOut, pConverter->formatOut, pFramesIn, pConverter->formatIn, frameCount, pConverter->channelsIn, pConverter->ditherMode); + } else { + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->formatOut, pConverter->channelsOut)); + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = frameCount; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = frameCount; + } + + return MA_SUCCESS; +} + + +static ma_result ma_data_converter_process_pcm_frames__resample_with_format_conversion(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_result result = MA_SUCCESS; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + + MA_ASSERT(pConverter != NULL); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + framesProcessedIn = 0; + framesProcessedOut = 0; + + while (framesProcessedOut < frameCountOut) { + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->resampler.format, pConverter->resampler.channels); + const void* pFramesInThisIteration; + /* */ void* pFramesOutThisIteration; + ma_uint64 frameCountInThisIteration; + ma_uint64 frameCountOutThisIteration; + + if (pFramesIn != NULL) { + pFramesInThisIteration = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->formatIn, pConverter->channelsIn)); + } else { + pFramesInThisIteration = NULL; + } + + if (pFramesOut != NULL) { + pFramesOutThisIteration = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->formatOut, pConverter->channelsOut)); + } else { + pFramesOutThisIteration = NULL; + } + + /* Do a pre format conversion if necessary. */ + if (pConverter->hasPreFormatConversion) { + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.format, pConverter->resampler.channels); + + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + if (frameCountInThisIteration > tempBufferInCap) { + frameCountInThisIteration = tempBufferInCap; + } + + if (pConverter->hasPostFormatConversion) { + if (frameCountInThisIteration > tempBufferOutCap) { + frameCountInThisIteration = tempBufferOutCap; + } + } + + if (pFramesInThisIteration != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->resampler.format, pFramesInThisIteration, pConverter->formatIn, frameCountInThisIteration, pConverter->channelsIn, pConverter->ditherMode); + } else { + MA_ZERO_MEMORY(pTempBufferIn, sizeof(pTempBufferIn)); + } + + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + + if (pConverter->hasPostFormatConversion) { + /* Both input and output conversion required. Output to the temp buffer. */ + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferIn, &frameCountInThisIteration, pTempBufferOut, &frameCountOutThisIteration); + } else { + /* Only pre-format required. Output straight to the output buffer. */ + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferIn, &frameCountInThisIteration, pFramesOutThisIteration, &frameCountOutThisIteration); + } + + if (result != MA_SUCCESS) { + break; + } + } else { + /* No pre-format required. Just read straight from the input buffer. */ + MA_ASSERT(pConverter->hasPostFormatConversion == MA_TRUE); + + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pFramesInThisIteration, &frameCountInThisIteration, pTempBufferOut, &frameCountOutThisIteration); + if (result != MA_SUCCESS) { + break; + } + } + + /* If we are doing a post format conversion we need to do that now. */ + if (pConverter->hasPostFormatConversion) { + if (pFramesOutThisIteration != NULL) { + ma_convert_pcm_frames_format(pFramesOutThisIteration, pConverter->formatOut, pTempBufferOut, pConverter->resampler.format, frameCountOutThisIteration, pConverter->resampler.channels, pConverter->ditherMode); + } + } + + framesProcessedIn += frameCountInThisIteration; + framesProcessedOut += frameCountOutThisIteration; + + MA_ASSERT(framesProcessedIn <= frameCountIn); + MA_ASSERT(framesProcessedOut <= frameCountOut); + + if (frameCountOutThisIteration == 0) { + break; /* Consumed all of our input data. */ + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = framesProcessedIn; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = framesProcessedOut; + } + + return result; +} + +static ma_result ma_data_converter_process_pcm_frames__resample_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pConverter != NULL); + + if (pConverter->hasPreFormatConversion == MA_FALSE && pConverter->hasPostFormatConversion == MA_FALSE) { + /* Neither pre- nor post-format required. This is simple case where only resampling is required. */ + return ma_resampler_process_pcm_frames(&pConverter->resampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* Format conversion required. */ + return ma_data_converter_process_pcm_frames__resample_with_format_conversion(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } +} + +static ma_result ma_data_converter_process_pcm_frames__channels_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_result result; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 frameCount; + + MA_ASSERT(pConverter != NULL); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + frameCount = ma_min(frameCountIn, frameCountOut); + + if (pConverter->hasPreFormatConversion == MA_FALSE && pConverter->hasPostFormatConversion == MA_FALSE) { + /* No format conversion required. */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pFramesOut, pFramesIn, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } else { + /* Format conversion required. */ + ma_uint64 framesProcessed = 0; + + while (framesProcessed < frameCount) { + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut); + const void* pFramesInThisIteration; + /* */ void* pFramesOutThisIteration; + ma_uint64 frameCountThisIteration; + + if (pFramesIn != NULL) { + pFramesInThisIteration = ma_offset_ptr(pFramesIn, framesProcessed * ma_get_bytes_per_frame(pConverter->formatIn, pConverter->channelsIn)); + } else { + pFramesInThisIteration = NULL; + } + + if (pFramesOut != NULL) { + pFramesOutThisIteration = ma_offset_ptr(pFramesOut, framesProcessed * ma_get_bytes_per_frame(pConverter->formatOut, pConverter->channelsOut)); + } else { + pFramesOutThisIteration = NULL; + } + + /* Do a pre format conversion if necessary. */ + if (pConverter->hasPreFormatConversion) { + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsIn); + + frameCountThisIteration = (frameCount - framesProcessed); + if (frameCountThisIteration > tempBufferInCap) { + frameCountThisIteration = tempBufferInCap; + } + + if (pConverter->hasPostFormatConversion) { + if (frameCountThisIteration > tempBufferOutCap) { + frameCountThisIteration = tempBufferOutCap; + } + } + + if (pFramesInThisIteration != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->channelConverter.format, pFramesInThisIteration, pConverter->formatIn, frameCountThisIteration, pConverter->channelsIn, pConverter->ditherMode); + } else { + MA_ZERO_MEMORY(pTempBufferIn, sizeof(pTempBufferIn)); + } + + if (pConverter->hasPostFormatConversion) { + /* Both input and output conversion required. Output to the temp buffer. */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferOut, pTempBufferIn, frameCountThisIteration); + } else { + /* Only pre-format required. Output straight to the output buffer. */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pFramesOutThisIteration, pTempBufferIn, frameCountThisIteration); + } + + if (result != MA_SUCCESS) { + break; + } + } else { + /* No pre-format required. Just read straight from the input buffer. */ + MA_ASSERT(pConverter->hasPostFormatConversion == MA_TRUE); + + frameCountThisIteration = (frameCount - framesProcessed); + if (frameCountThisIteration > tempBufferOutCap) { + frameCountThisIteration = tempBufferOutCap; + } + + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferOut, pFramesInThisIteration, frameCountThisIteration); + if (result != MA_SUCCESS) { + break; + } + } + + /* If we are doing a post format conversion we need to do that now. */ + if (pConverter->hasPostFormatConversion) { + if (pFramesOutThisIteration != NULL) { + ma_convert_pcm_frames_format(pFramesOutThisIteration, pConverter->formatOut, pTempBufferOut, pConverter->channelConverter.format, frameCountThisIteration, pConverter->channelConverter.channelsOut, pConverter->ditherMode); + } + } + + framesProcessed += frameCountThisIteration; + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = frameCount; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = frameCount; + } + + return MA_SUCCESS; +} + +static ma_result ma_data_converter_process_pcm_frames__resample_first(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_result result; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format. */ + ma_uint64 tempBufferInCap; + ma_uint8 pTempBufferMid[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format, channel converter input format. */ + ma_uint64 tempBufferMidCap; + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In channel converter output format. */ + ma_uint64 tempBufferOutCap; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pConverter->resampler.format == pConverter->channelConverter.format); + MA_ASSERT(pConverter->resampler.channels == pConverter->channelConverter.channelsIn); + MA_ASSERT(pConverter->resampler.channels < pConverter->channelConverter.channelsOut); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + framesProcessedIn = 0; + framesProcessedOut = 0; + + tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.format, pConverter->resampler.channels); + tempBufferMidCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.format, pConverter->resampler.channels); + tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut); + + while (framesProcessedOut < frameCountOut) { + ma_uint64 frameCountInThisIteration; + ma_uint64 frameCountOutThisIteration; + const void* pRunningFramesIn = NULL; + void* pRunningFramesOut = NULL; + const void* pResampleBufferIn; + void* pChannelsBufferOut; + + if (pFramesIn != NULL) { + pRunningFramesIn = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->formatIn, pConverter->channelsIn)); + } + if (pFramesOut != NULL) { + pRunningFramesOut = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->formatOut, pConverter->channelsOut)); + } + + /* Run input data through the resampler and output it to the temporary buffer. */ + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + + if (pConverter->hasPreFormatConversion) { + if (frameCountInThisIteration > tempBufferInCap) { + frameCountInThisIteration = tempBufferInCap; + } + } + + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + if (frameCountOutThisIteration > tempBufferMidCap) { + frameCountOutThisIteration = tempBufferMidCap; + } + + /* We can't read more frames than can fit in the output buffer. */ + if (pConverter->hasPostFormatConversion) { + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + } + + /* We need to ensure we don't try to process too many input frames that we run out of room in the output buffer. If this happens we'll end up glitching. */ + + /* + We need to try to predict how many input frames will be required for the resampler. If the + resampler can tell us, we'll use that. Otherwise we'll need to make a best guess. The further + off we are from this, the more wasted format conversions we'll end up doing. + */ + #if 1 + { + ma_uint64 requiredInputFrameCount; + + result = ma_resampler_get_required_input_frame_count(&pConverter->resampler, frameCountOutThisIteration, &requiredInputFrameCount); + if (result != MA_SUCCESS) { + /* Fall back to a best guess. */ + requiredInputFrameCount = (frameCountOutThisIteration * pConverter->resampler.sampleRateIn) / pConverter->resampler.sampleRateOut; + } + + if (frameCountInThisIteration > requiredInputFrameCount) { + frameCountInThisIteration = requiredInputFrameCount; + } + } + #endif + + if (pConverter->hasPreFormatConversion) { + if (pFramesIn != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->resampler.format, pRunningFramesIn, pConverter->formatIn, frameCountInThisIteration, pConverter->channelsIn, pConverter->ditherMode); + pResampleBufferIn = pTempBufferIn; + } else { + pResampleBufferIn = NULL; + } + } else { + pResampleBufferIn = pRunningFramesIn; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pResampleBufferIn, &frameCountInThisIteration, pTempBufferMid, &frameCountOutThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + + /* + The input data has been resampled so now we need to run it through the channel converter. The input data is always contained in pTempBufferMid. We only need to do + this part if we have an output buffer. + */ + if (pFramesOut != NULL) { + if (pConverter->hasPostFormatConversion) { + pChannelsBufferOut = pTempBufferOut; + } else { + pChannelsBufferOut = pRunningFramesOut; + } + + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pChannelsBufferOut, pTempBufferMid, frameCountOutThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + /* Finally we do post format conversion. */ + if (pConverter->hasPostFormatConversion) { + ma_convert_pcm_frames_format(pRunningFramesOut, pConverter->formatOut, pChannelsBufferOut, pConverter->channelConverter.format, frameCountOutThisIteration, pConverter->channelConverter.channelsOut, pConverter->ditherMode); + } + } + + + framesProcessedIn += frameCountInThisIteration; + framesProcessedOut += frameCountOutThisIteration; + + MA_ASSERT(framesProcessedIn <= frameCountIn); + MA_ASSERT(framesProcessedOut <= frameCountOut); + + if (frameCountOutThisIteration == 0) { + break; /* Consumed all of our input data. */ + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = framesProcessedIn; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = framesProcessedOut; + } + + return MA_SUCCESS; +} + +static ma_result ma_data_converter_process_pcm_frames__channels_first(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + ma_result result; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format. */ + ma_uint64 tempBufferInCap; + ma_uint8 pTempBufferMid[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format, channel converter input format. */ + ma_uint64 tempBufferMidCap; + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In channel converter output format. */ + ma_uint64 tempBufferOutCap; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pConverter->resampler.format == pConverter->channelConverter.format); + MA_ASSERT(pConverter->resampler.channels == pConverter->channelConverter.channelsOut); + MA_ASSERT(pConverter->resampler.channels <= pConverter->channelConverter.channelsIn); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + + framesProcessedIn = 0; + framesProcessedOut = 0; + + tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsIn); + tempBufferMidCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut); + tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->resampler.format, pConverter->resampler.channels); + + while (framesProcessedOut < frameCountOut) { + ma_uint64 frameCountInThisIteration; + ma_uint64 frameCountOutThisIteration; + const void* pRunningFramesIn = NULL; + void* pRunningFramesOut = NULL; + const void* pChannelsBufferIn; + void* pResampleBufferOut; + + if (pFramesIn != NULL) { + pRunningFramesIn = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->formatIn, pConverter->channelsIn)); + } + if (pFramesOut != NULL) { + pRunningFramesOut = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->formatOut, pConverter->channelsOut)); + } + + /* + Before doing any processing we need to determine how many frames we should try processing + this iteration, for both input and output. The resampler requires us to perform format and + channel conversion before passing any data into it. If we get our input count wrong, we'll + end up performing redundant pre-processing. This isn't the end of the world, but it does + result in some inefficiencies proportionate to how far our estimates are off. + + If the resampler has a means to calculate exactly how much we'll need, we'll use that. + Otherwise we'll make a best guess. In order to do this, we'll need to calculate the output + frame count first. + */ + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + if (frameCountOutThisIteration > tempBufferMidCap) { + frameCountOutThisIteration = tempBufferMidCap; + } + + if (pConverter->hasPostFormatConversion) { + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + } + + /* Now that we have the output frame count we can determine the input frame count. */ + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + if (pConverter->hasPreFormatConversion) { + if (frameCountInThisIteration > tempBufferInCap) { + frameCountInThisIteration = tempBufferInCap; + } + } + + if (frameCountInThisIteration > tempBufferMidCap) { + frameCountInThisIteration = tempBufferMidCap; + } + + #if 1 + { + ma_uint64 requiredInputFrameCount; + + result = ma_resampler_get_required_input_frame_count(&pConverter->resampler, frameCountOutThisIteration, &requiredInputFrameCount); + if (result != MA_SUCCESS) { + /* Fall back to a best guess. */ + requiredInputFrameCount = (frameCountOutThisIteration * pConverter->resampler.sampleRateIn) / pConverter->resampler.sampleRateOut; + } + + if (frameCountInThisIteration > requiredInputFrameCount) { + frameCountInThisIteration = requiredInputFrameCount; + } + } + #endif + + + /* Pre format conversion. */ + if (pConverter->hasPreFormatConversion) { + if (pRunningFramesIn != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->channelConverter.format, pRunningFramesIn, pConverter->formatIn, frameCountInThisIteration, pConverter->channelsIn, pConverter->ditherMode); + pChannelsBufferIn = pTempBufferIn; + } else { + pChannelsBufferIn = NULL; + } + } else { + pChannelsBufferIn = pRunningFramesIn; + } + + + /* Channel conversion. */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferMid, pChannelsBufferIn, frameCountInThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + + /* Resampling. */ + if (pConverter->hasPostFormatConversion) { + pResampleBufferOut = pTempBufferOut; + } else { + pResampleBufferOut = pRunningFramesOut; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferMid, &frameCountInThisIteration, pResampleBufferOut, &frameCountOutThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + + /* Post format conversion. */ + if (pConverter->hasPostFormatConversion) { + if (pRunningFramesOut != NULL) { + ma_convert_pcm_frames_format(pRunningFramesOut, pConverter->formatOut, pResampleBufferOut, pConverter->resampler.format, frameCountOutThisIteration, pConverter->channelsOut, pConverter->ditherMode); + } + } + + + framesProcessedIn += frameCountInThisIteration; + framesProcessedOut += frameCountOutThisIteration; + + MA_ASSERT(framesProcessedIn <= frameCountIn); + MA_ASSERT(framesProcessedOut <= frameCountOut); + + if (frameCountOutThisIteration == 0) { + break; /* Consumed all of our input data. */ + } + } + + if (pFrameCountIn != NULL) { + *pFrameCountIn = framesProcessedIn; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = framesProcessedOut; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_converter_process_pcm_frames(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + switch (pConverter->executionPath) + { + case ma_data_converter_execution_path_passthrough: return ma_data_converter_process_pcm_frames__passthrough(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + case ma_data_converter_execution_path_format_only: return ma_data_converter_process_pcm_frames__format_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + case ma_data_converter_execution_path_channels_only: return ma_data_converter_process_pcm_frames__channels_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + case ma_data_converter_execution_path_resample_only: return ma_data_converter_process_pcm_frames__resample_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + case ma_data_converter_execution_path_resample_first: return ma_data_converter_process_pcm_frames__resample_first(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + case ma_data_converter_execution_path_channels_first: return ma_data_converter_process_pcm_frames__channels_first(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + default: return MA_INVALID_OPERATION; /* Should never hit this. */ + } +} + +MA_API ma_result ma_data_converter_set_rate(ma_data_converter* pConverter, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->hasResampler == MA_FALSE) { + return MA_INVALID_OPERATION; /* Dynamic resampling not enabled. */ + } + + return ma_resampler_set_rate(&pConverter->resampler, sampleRateIn, sampleRateOut); +} + +MA_API ma_result ma_data_converter_set_rate_ratio(ma_data_converter* pConverter, float ratioInOut) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->hasResampler == MA_FALSE) { + return MA_INVALID_OPERATION; /* Dynamic resampling not enabled. */ + } + + return ma_resampler_set_rate_ratio(&pConverter->resampler, ratioInOut); +} + +MA_API ma_uint64 ma_data_converter_get_input_latency(const ma_data_converter* pConverter) +{ + if (pConverter == NULL) { + return 0; + } + + if (pConverter->hasResampler) { + return ma_resampler_get_input_latency(&pConverter->resampler); + } + + return 0; /* No latency without a resampler. */ +} + +MA_API ma_uint64 ma_data_converter_get_output_latency(const ma_data_converter* pConverter) +{ + if (pConverter == NULL) { + return 0; + } + + if (pConverter->hasResampler) { + return ma_resampler_get_output_latency(&pConverter->resampler); + } + + return 0; /* No latency without a resampler. */ +} + +MA_API ma_result ma_data_converter_get_required_input_frame_count(const ma_data_converter* pConverter, ma_uint64 outputFrameCount, ma_uint64* pInputFrameCount) +{ + if (pInputFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + *pInputFrameCount = 0; + + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->hasResampler) { + return ma_resampler_get_required_input_frame_count(&pConverter->resampler, outputFrameCount, pInputFrameCount); + } else { + *pInputFrameCount = outputFrameCount; /* 1:1 */ + return MA_SUCCESS; + } +} + +MA_API ma_result ma_data_converter_get_expected_output_frame_count(const ma_data_converter* pConverter, ma_uint64 inputFrameCount, ma_uint64* pOutputFrameCount) +{ + if (pOutputFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + *pOutputFrameCount = 0; + + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->hasResampler) { + return ma_resampler_get_expected_output_frame_count(&pConverter->resampler, inputFrameCount, pOutputFrameCount); + } else { + *pOutputFrameCount = inputFrameCount; /* 1:1 */ + return MA_SUCCESS; + } +} + +MA_API ma_result ma_data_converter_get_input_channel_map(const ma_data_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap) +{ + if (pConverter == NULL || pChannelMap == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->hasChannelConverter) { + ma_channel_converter_get_output_channel_map(&pConverter->channelConverter, pChannelMap, channelMapCap); + } else { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pConverter->channelsOut); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_converter_get_output_channel_map(const ma_data_converter* pConverter, ma_channel* pChannelMap, size_t channelMapCap) +{ + if (pConverter == NULL || pChannelMap == NULL) { + return MA_INVALID_ARGS; + } + + if (pConverter->hasChannelConverter) { + ma_channel_converter_get_input_channel_map(&pConverter->channelConverter, pChannelMap, channelMapCap); + } else { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pConverter->channelsIn); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_converter_reset(ma_data_converter* pConverter) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } + + /* There's nothing to do if we're not resampling. */ + if (pConverter->hasResampler == MA_FALSE) { + return MA_SUCCESS; + } + + return ma_resampler_reset(&pConverter->resampler); +} + + + +/************************************************************************************************************************************************************** + +Channel Maps + +**************************************************************************************************************************************************************/ +static ma_channel ma_channel_map_init_standard_channel(ma_standard_channel_map standardChannelMap, ma_uint32 channelCount, ma_uint32 channelIndex); + +MA_API ma_channel ma_channel_map_get_channel(const ma_channel* pChannelMap, ma_uint32 channelCount, ma_uint32 channelIndex) +{ + if (pChannelMap == NULL) { + return ma_channel_map_init_standard_channel(ma_standard_channel_map_default, channelCount, channelIndex); + } else { + if (channelIndex >= channelCount) { + return MA_CHANNEL_NONE; + } + + return pChannelMap[channelIndex]; + } +} + +MA_API void ma_channel_map_init_blank(ma_channel* pChannelMap, ma_uint32 channels) +{ + if (pChannelMap == NULL) { + return; + } + + MA_ZERO_MEMORY(pChannelMap, sizeof(*pChannelMap) * channels); +} + + +static ma_channel ma_channel_map_init_standard_channel_microsoft(ma_uint32 channelCount, ma_uint32 channelIndex) +{ + if (channelCount == 0 || channelIndex >= channelCount) { + return MA_CHANNEL_NONE; + } + + /* This is the Microsoft channel map. Based off the speaker configurations mentioned here: https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/ksmedia/ns-ksmedia-ksaudio_channel_config */ + switch (channelCount) + { + case 0: return MA_CHANNEL_NONE; + + case 1: + { + return MA_CHANNEL_MONO; + } break; + + case 2: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 3: /* No defined, but best guess. */ + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 4: + { + switch (channelIndex) { + #ifndef MA_USE_QUAD_MICROSOFT_CHANNEL_MAP + /* Surround. Using the Surround profile has the advantage of the 3rd channel (MA_CHANNEL_FRONT_CENTER) mapping nicely with higher channel counts. */ + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_BACK_CENTER; + #else + /* Quad. */ + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + #endif + } + } break; + + case 5: /* Not defined, but best guess. */ + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_BACK_LEFT; + case 4: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 6: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_LFE; + case 4: return MA_CHANNEL_SIDE_LEFT; + case 5: return MA_CHANNEL_SIDE_RIGHT; + } + } break; + + case 7: /* Not defined, but best guess. */ + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_LFE; + case 4: return MA_CHANNEL_BACK_CENTER; + case 5: return MA_CHANNEL_SIDE_LEFT; + case 6: return MA_CHANNEL_SIDE_RIGHT; + } + } break; + + case 8: + default: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_LFE; + case 4: return MA_CHANNEL_BACK_LEFT; + case 5: return MA_CHANNEL_BACK_RIGHT; + case 6: return MA_CHANNEL_SIDE_LEFT; + case 7: return MA_CHANNEL_SIDE_RIGHT; + } + } break; + } + + if (channelCount > 8) { + if (channelIndex < 32) { /* We have 32 AUX channels. */ + return (ma_channel)(MA_CHANNEL_AUX_0 + (channelIndex - 8)); + } + } + + /* Getting here means we don't know how to map the channel position so just return MA_CHANNEL_NONE. */ + return MA_CHANNEL_NONE; +} + +static ma_channel ma_channel_map_init_standard_channel_alsa(ma_uint32 channelCount, ma_uint32 channelIndex) +{ + switch (channelCount) + { + case 0: return MA_CHANNEL_NONE; + + case 1: + { + return MA_CHANNEL_MONO; + } break; + + case 2: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 3: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 4: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 5: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + case 4: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 6: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + case 4: return MA_CHANNEL_FRONT_CENTER; + case 5: return MA_CHANNEL_LFE; + } + } break; + + case 7: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + case 4: return MA_CHANNEL_FRONT_CENTER; + case 5: return MA_CHANNEL_LFE; + case 6: return MA_CHANNEL_BACK_CENTER; + } + } break; + + case 8: + default: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + case 4: return MA_CHANNEL_FRONT_CENTER; + case 5: return MA_CHANNEL_LFE; + case 6: return MA_CHANNEL_SIDE_LEFT; + case 7: return MA_CHANNEL_SIDE_RIGHT; + } + } break; + } + + if (channelCount > 8) { + if (channelIndex < 32) { /* We have 32 AUX channels. */ + return (ma_channel)(MA_CHANNEL_AUX_0 + (channelIndex - 8)); + } + } + + /* Getting here means we don't know how to map the channel position so just return MA_CHANNEL_NONE. */ + return MA_CHANNEL_NONE; +} + +static ma_channel ma_channel_map_init_standard_channel_rfc3551(ma_uint32 channelCount, ma_uint32 channelIndex) +{ + switch (channelCount) + { + case 0: return MA_CHANNEL_NONE; + + case 1: + { + return MA_CHANNEL_MONO; + } break; + + case 2: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 3: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 4: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_BACK_CENTER; + } + } break; + + case 5: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_BACK_LEFT; + case 4: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 6: + default: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_SIDE_LEFT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_FRONT_RIGHT; + case 4: return MA_CHANNEL_SIDE_RIGHT; + case 5: return MA_CHANNEL_BACK_CENTER; + } + } break; + } + + if (channelCount > 6) { + if (channelIndex < 32) { /* We have 32 AUX channels. */ + return (ma_channel)(MA_CHANNEL_AUX_0 + (channelIndex - 6)); + } + } + + /* Getting here means we don't know how to map the channel position so just return MA_CHANNEL_NONE. */ + return MA_CHANNEL_NONE; +} + +static ma_channel ma_channel_map_init_standard_channel_flac(ma_uint32 channelCount, ma_uint32 channelIndex) +{ + switch (channelCount) + { + case 0: return MA_CHANNEL_NONE; + + case 1: + { + return MA_CHANNEL_MONO; + } break; + + case 2: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 3: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 4: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 5: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_BACK_LEFT; + case 4: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 6: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_LFE; + case 4: return MA_CHANNEL_BACK_LEFT; + case 5: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 7: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_LFE; + case 4: return MA_CHANNEL_BACK_CENTER; + case 5: return MA_CHANNEL_SIDE_LEFT; + case 6: return MA_CHANNEL_SIDE_RIGHT; + } + } break; + + case 8: + default: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_LFE; + case 4: return MA_CHANNEL_BACK_LEFT; + case 5: return MA_CHANNEL_BACK_RIGHT; + case 6: return MA_CHANNEL_SIDE_LEFT; + case 7: return MA_CHANNEL_SIDE_RIGHT; + } + } break; + } + + if (channelCount > 8) { + if (channelIndex < 32) { /* We have 32 AUX channels. */ + return (ma_channel)(MA_CHANNEL_AUX_0 + (channelIndex - 8)); + } + } + + /* Getting here means we don't know how to map the channel position so just return MA_CHANNEL_NONE. */ + return MA_CHANNEL_NONE; +} + +static ma_channel ma_channel_map_init_standard_channel_vorbis(ma_uint32 channelCount, ma_uint32 channelIndex) +{ + switch (channelCount) + { + case 0: return MA_CHANNEL_NONE; + + case 1: + { + return MA_CHANNEL_MONO; + } break; + + case 2: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 3: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 4: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 5: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_BACK_LEFT; + case 4: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 6: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_BACK_LEFT; + case 4: return MA_CHANNEL_BACK_RIGHT; + case 5: return MA_CHANNEL_LFE; + } + } break; + + case 7: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_SIDE_LEFT; + case 4: return MA_CHANNEL_SIDE_RIGHT; + case 5: return MA_CHANNEL_BACK_CENTER; + case 6: return MA_CHANNEL_LFE; + } + } break; + + case 8: + default: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_SIDE_LEFT; + case 4: return MA_CHANNEL_SIDE_RIGHT; + case 5: return MA_CHANNEL_BACK_LEFT; + case 6: return MA_CHANNEL_BACK_RIGHT; + case 7: return MA_CHANNEL_LFE; + } + } break; + } + + if (channelCount > 8) { + if (channelIndex < 32) { /* We have 32 AUX channels. */ + return (ma_channel)(MA_CHANNEL_AUX_0 + (channelIndex - 8)); + } + } + + /* Getting here means we don't know how to map the channel position so just return MA_CHANNEL_NONE. */ + return MA_CHANNEL_NONE; +} + +static ma_channel ma_channel_map_init_standard_channel_sound4(ma_uint32 channelCount, ma_uint32 channelIndex) +{ + switch (channelCount) + { + case 0: return MA_CHANNEL_NONE; + + case 1: + { + return MA_CHANNEL_MONO; + } break; + + case 2: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 3: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 4: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 5: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + case 3: return MA_CHANNEL_BACK_LEFT; + case 4: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 6: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_BACK_LEFT; + case 4: return MA_CHANNEL_BACK_RIGHT; + case 5: return MA_CHANNEL_LFE; + } + } break; + + case 7: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_SIDE_LEFT; + case 4: return MA_CHANNEL_SIDE_RIGHT; + case 5: return MA_CHANNEL_BACK_CENTER; + case 6: return MA_CHANNEL_LFE; + } + } break; + + case 8: + default: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_CENTER; + case 2: return MA_CHANNEL_FRONT_RIGHT; + case 3: return MA_CHANNEL_SIDE_LEFT; + case 4: return MA_CHANNEL_SIDE_RIGHT; + case 5: return MA_CHANNEL_BACK_LEFT; + case 6: return MA_CHANNEL_BACK_RIGHT; + case 7: return MA_CHANNEL_LFE; + } + } break; + } + + if (channelCount > 8) { + if (channelIndex < 32) { /* We have 32 AUX channels. */ + return (ma_channel)(MA_CHANNEL_AUX_0 + (channelIndex - 8)); + } + } + + /* Getting here means we don't know how to map the channel position so just return MA_CHANNEL_NONE. */ + return MA_CHANNEL_NONE; +} + +static ma_channel ma_channel_map_init_standard_channel_sndio(ma_uint32 channelCount, ma_uint32 channelIndex) +{ + switch (channelCount) + { + case 0: return MA_CHANNEL_NONE; + + case 1: + { + return MA_CHANNEL_MONO; + } break; + + case 2: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + } + } break; + + case 3: /* No defined, but best guess. */ + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 4: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + } + } break; + + case 5: /* Not defined, but best guess. */ + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + case 4: return MA_CHANNEL_FRONT_CENTER; + } + } break; + + case 6: + default: + { + switch (channelIndex) { + case 0: return MA_CHANNEL_FRONT_LEFT; + case 1: return MA_CHANNEL_FRONT_RIGHT; + case 2: return MA_CHANNEL_BACK_LEFT; + case 3: return MA_CHANNEL_BACK_RIGHT; + case 4: return MA_CHANNEL_FRONT_CENTER; + case 5: return MA_CHANNEL_LFE; + } + } break; + } + + if (channelCount > 6) { + if (channelIndex < 32) { /* We have 32 AUX channels. */ + return (ma_channel)(MA_CHANNEL_AUX_0 + (channelIndex - 6)); + } + } + + /* Getting here means we don't know how to map the channel position so just return MA_CHANNEL_NONE. */ + return MA_CHANNEL_NONE; +} + + +static ma_channel ma_channel_map_init_standard_channel(ma_standard_channel_map standardChannelMap, ma_uint32 channelCount, ma_uint32 channelIndex) +{ + if (channelCount == 0 || channelIndex >= channelCount) { + return MA_CHANNEL_NONE; + } + + switch (standardChannelMap) + { + case ma_standard_channel_map_alsa: + { + return ma_channel_map_init_standard_channel_alsa(channelCount, channelIndex); + } break; + + case ma_standard_channel_map_rfc3551: + { + return ma_channel_map_init_standard_channel_rfc3551(channelCount, channelIndex); + } break; + + case ma_standard_channel_map_flac: + { + return ma_channel_map_init_standard_channel_flac(channelCount, channelIndex); + } break; + + case ma_standard_channel_map_vorbis: + { + return ma_channel_map_init_standard_channel_vorbis(channelCount, channelIndex); + } break; + + case ma_standard_channel_map_sound4: + { + return ma_channel_map_init_standard_channel_sound4(channelCount, channelIndex); + } break; + + case ma_standard_channel_map_sndio: + { + return ma_channel_map_init_standard_channel_sndio(channelCount, channelIndex); + } break; + + case ma_standard_channel_map_microsoft: /* Also default. */ + /*case ma_standard_channel_map_default;*/ + default: + { + return ma_channel_map_init_standard_channel_microsoft(channelCount, channelIndex); + } break; + } +} + +MA_API void ma_channel_map_init_standard(ma_standard_channel_map standardChannelMap, ma_channel* pChannelMap, size_t channelMapCap, ma_uint32 channels) +{ + ma_uint32 iChannel; + + if (pChannelMap == NULL || channelMapCap == 0 || channels == 0) { + return; + } + + for (iChannel = 0; iChannel < channels; iChannel += 1) { + if (channelMapCap == 0) { + break; /* Ran out of room. */ + } + + pChannelMap[0] = ma_channel_map_init_standard_channel(standardChannelMap, channels, iChannel); + pChannelMap += 1; + channelMapCap -= 1; + } +} + +MA_API void ma_channel_map_copy(ma_channel* pOut, const ma_channel* pIn, ma_uint32 channels) +{ + if (pOut != NULL && pIn != NULL && channels > 0) { + MA_COPY_MEMORY(pOut, pIn, sizeof(*pOut) * channels); + } +} + +MA_API void ma_channel_map_copy_or_default(ma_channel* pOut, size_t channelMapCapOut, const ma_channel* pIn, ma_uint32 channels) +{ + if (pOut == NULL || channels == 0) { + return; + } + + if (pIn != NULL) { + ma_channel_map_copy(pOut, pIn, channels); + } else { + ma_channel_map_init_standard(ma_standard_channel_map_default, pOut, channelMapCapOut, channels); + } +} + +MA_API ma_bool32 ma_channel_map_is_valid(const ma_channel* pChannelMap, ma_uint32 channels) +{ + /* A channel count of 0 is invalid. */ + if (channels == 0) { + return MA_FALSE; + } + + /* It does not make sense to have a mono channel when there is more than 1 channel. */ + if (channels > 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (ma_channel_map_get_channel(pChannelMap, channels, iChannel) == MA_CHANNEL_MONO) { + return MA_FALSE; + } + } + } + + return MA_TRUE; +} + +MA_API ma_bool32 ma_channel_map_is_equal(const ma_channel* pChannelMapA, const ma_channel* pChannelMapB, ma_uint32 channels) +{ + ma_uint32 iChannel; + + if (pChannelMapA == pChannelMapB) { + return MA_TRUE; + } + + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (ma_channel_map_get_channel(pChannelMapA, channels, iChannel) != ma_channel_map_get_channel(pChannelMapB, channels, iChannel)) { + return MA_FALSE; + } + } + + return MA_TRUE; +} + +MA_API ma_bool32 ma_channel_map_is_blank(const ma_channel* pChannelMap, ma_uint32 channels) +{ + ma_uint32 iChannel; + + /* A null channel map is equivalent to the default channel map. */ + if (pChannelMap == NULL) { + return MA_FALSE; + } + + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (pChannelMap[iChannel] != MA_CHANNEL_NONE) { + return MA_FALSE; + } + } + + return MA_TRUE; +} + +MA_API ma_bool32 ma_channel_map_contains_channel_position(ma_uint32 channels, const ma_channel* pChannelMap, ma_channel channelPosition) +{ + return ma_channel_map_find_channel_position(channels, pChannelMap, channelPosition, NULL); +} + +MA_API ma_bool32 ma_channel_map_find_channel_position(ma_uint32 channels, const ma_channel* pChannelMap, ma_channel channelPosition, ma_uint32* pChannelIndex) +{ + ma_uint32 iChannel; + + if (pChannelIndex != NULL) { + *pChannelIndex = (ma_uint32)-1; + } + + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (ma_channel_map_get_channel(pChannelMap, channels, iChannel) == channelPosition) { + if (pChannelIndex != NULL) { + *pChannelIndex = iChannel; + } + + return MA_TRUE; + } + } + + /* Getting here means the channel position was not found. */ + return MA_FALSE; +} + +MA_API size_t ma_channel_map_to_string(const ma_channel* pChannelMap, ma_uint32 channels, char* pBufferOut, size_t bufferCap) +{ + size_t len; + ma_uint32 iChannel; + + len = 0; + + for (iChannel = 0; iChannel < channels; iChannel += 1) { + const char* pChannelStr = ma_channel_position_to_string(ma_channel_map_get_channel(pChannelMap, channels, iChannel)); + size_t channelStrLen = strlen(pChannelStr); + + /* Append the string if necessary. */ + if (pBufferOut != NULL && bufferCap > len + channelStrLen) { + MA_COPY_MEMORY(pBufferOut + len, pChannelStr, channelStrLen); + } + len += channelStrLen; + + /* Append a space if it's not the last item. */ + if (iChannel+1 < channels) { + if (pBufferOut != NULL && bufferCap > len + 1) { + pBufferOut[len] = ' '; + } + len += 1; + } + } + + /* Null terminate. Don't increment the length here. */ + if (pBufferOut != NULL && bufferCap > len + 1) { + pBufferOut[len] = '\0'; + } + + return len; +} + +MA_API const char* ma_channel_position_to_string(ma_channel channel) +{ + switch (channel) + { + case MA_CHANNEL_NONE : return "CHANNEL_NONE"; + case MA_CHANNEL_MONO : return "CHANNEL_MONO"; + case MA_CHANNEL_FRONT_LEFT : return "CHANNEL_FRONT_LEFT"; + case MA_CHANNEL_FRONT_RIGHT : return "CHANNEL_FRONT_RIGHT"; + case MA_CHANNEL_FRONT_CENTER : return "CHANNEL_FRONT_CENTER"; + case MA_CHANNEL_LFE : return "CHANNEL_LFE"; + case MA_CHANNEL_BACK_LEFT : return "CHANNEL_BACK_LEFT"; + case MA_CHANNEL_BACK_RIGHT : return "CHANNEL_BACK_RIGHT"; + case MA_CHANNEL_FRONT_LEFT_CENTER : return "CHANNEL_FRONT_LEFT_CENTER"; + case MA_CHANNEL_FRONT_RIGHT_CENTER: return "CHANNEL_FRONT_RIGHT_CENTER"; + case MA_CHANNEL_BACK_CENTER : return "CHANNEL_BACK_CENTER"; + case MA_CHANNEL_SIDE_LEFT : return "CHANNEL_SIDE_LEFT"; + case MA_CHANNEL_SIDE_RIGHT : return "CHANNEL_SIDE_RIGHT"; + case MA_CHANNEL_TOP_CENTER : return "CHANNEL_TOP_CENTER"; + case MA_CHANNEL_TOP_FRONT_LEFT : return "CHANNEL_TOP_FRONT_LEFT"; + case MA_CHANNEL_TOP_FRONT_CENTER : return "CHANNEL_TOP_FRONT_CENTER"; + case MA_CHANNEL_TOP_FRONT_RIGHT : return "CHANNEL_TOP_FRONT_RIGHT"; + case MA_CHANNEL_TOP_BACK_LEFT : return "CHANNEL_TOP_BACK_LEFT"; + case MA_CHANNEL_TOP_BACK_CENTER : return "CHANNEL_TOP_BACK_CENTER"; + case MA_CHANNEL_TOP_BACK_RIGHT : return "CHANNEL_TOP_BACK_RIGHT"; + case MA_CHANNEL_AUX_0 : return "CHANNEL_AUX_0"; + case MA_CHANNEL_AUX_1 : return "CHANNEL_AUX_1"; + case MA_CHANNEL_AUX_2 : return "CHANNEL_AUX_2"; + case MA_CHANNEL_AUX_3 : return "CHANNEL_AUX_3"; + case MA_CHANNEL_AUX_4 : return "CHANNEL_AUX_4"; + case MA_CHANNEL_AUX_5 : return "CHANNEL_AUX_5"; + case MA_CHANNEL_AUX_6 : return "CHANNEL_AUX_6"; + case MA_CHANNEL_AUX_7 : return "CHANNEL_AUX_7"; + case MA_CHANNEL_AUX_8 : return "CHANNEL_AUX_8"; + case MA_CHANNEL_AUX_9 : return "CHANNEL_AUX_9"; + case MA_CHANNEL_AUX_10 : return "CHANNEL_AUX_10"; + case MA_CHANNEL_AUX_11 : return "CHANNEL_AUX_11"; + case MA_CHANNEL_AUX_12 : return "CHANNEL_AUX_12"; + case MA_CHANNEL_AUX_13 : return "CHANNEL_AUX_13"; + case MA_CHANNEL_AUX_14 : return "CHANNEL_AUX_14"; + case MA_CHANNEL_AUX_15 : return "CHANNEL_AUX_15"; + case MA_CHANNEL_AUX_16 : return "CHANNEL_AUX_16"; + case MA_CHANNEL_AUX_17 : return "CHANNEL_AUX_17"; + case MA_CHANNEL_AUX_18 : return "CHANNEL_AUX_18"; + case MA_CHANNEL_AUX_19 : return "CHANNEL_AUX_19"; + case MA_CHANNEL_AUX_20 : return "CHANNEL_AUX_20"; + case MA_CHANNEL_AUX_21 : return "CHANNEL_AUX_21"; + case MA_CHANNEL_AUX_22 : return "CHANNEL_AUX_22"; + case MA_CHANNEL_AUX_23 : return "CHANNEL_AUX_23"; + case MA_CHANNEL_AUX_24 : return "CHANNEL_AUX_24"; + case MA_CHANNEL_AUX_25 : return "CHANNEL_AUX_25"; + case MA_CHANNEL_AUX_26 : return "CHANNEL_AUX_26"; + case MA_CHANNEL_AUX_27 : return "CHANNEL_AUX_27"; + case MA_CHANNEL_AUX_28 : return "CHANNEL_AUX_28"; + case MA_CHANNEL_AUX_29 : return "CHANNEL_AUX_29"; + case MA_CHANNEL_AUX_30 : return "CHANNEL_AUX_30"; + case MA_CHANNEL_AUX_31 : return "CHANNEL_AUX_31"; + default: break; + } + + return "UNKNOWN"; +} + + + +/************************************************************************************************************************************************************** + +Conversion Helpers + +**************************************************************************************************************************************************************/ +MA_API ma_uint64 ma_convert_frames(void* pOut, ma_uint64 frameCountOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, const void* pIn, ma_uint64 frameCountIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn) +{ + ma_data_converter_config config; + + config = ma_data_converter_config_init(formatIn, formatOut, channelsIn, channelsOut, sampleRateIn, sampleRateOut); + config.resampling.linear.lpfOrder = ma_min(MA_DEFAULT_RESAMPLER_LPF_ORDER, MA_MAX_FILTER_ORDER); + + return ma_convert_frames_ex(pOut, frameCountOut, pIn, frameCountIn, &config); +} + +MA_API ma_uint64 ma_convert_frames_ex(void* pOut, ma_uint64 frameCountOut, const void* pIn, ma_uint64 frameCountIn, const ma_data_converter_config* pConfig) +{ + ma_result result; + ma_data_converter converter; + + if (frameCountIn == 0 || pConfig == NULL) { + return 0; + } + + result = ma_data_converter_init(pConfig, NULL, &converter); + if (result != MA_SUCCESS) { + return 0; /* Failed to initialize the data converter. */ + } + + if (pOut == NULL) { + result = ma_data_converter_get_expected_output_frame_count(&converter, frameCountIn, &frameCountOut); + if (result != MA_SUCCESS) { + if (result == MA_NOT_IMPLEMENTED) { + /* No way to calculate the number of frames, so we'll need to brute force it and loop. */ + frameCountOut = 0; + + while (frameCountIn > 0) { + ma_uint64 framesProcessedIn = frameCountIn; + ma_uint64 framesProcessedOut = 0xFFFFFFFF; + + result = ma_data_converter_process_pcm_frames(&converter, pIn, &framesProcessedIn, NULL, &framesProcessedOut); + if (result != MA_SUCCESS) { + break; + } + + frameCountIn -= framesProcessedIn; + } + } + } + } else { + result = ma_data_converter_process_pcm_frames(&converter, pIn, &frameCountIn, pOut, &frameCountOut); + if (result != MA_SUCCESS) { + frameCountOut = 0; + } + } + + ma_data_converter_uninit(&converter, NULL); + return frameCountOut; +} + + +/************************************************************************************************************************************************************** + +Ring Buffer + +**************************************************************************************************************************************************************/ +static MA_INLINE ma_uint32 ma_rb__extract_offset_in_bytes(ma_uint32 encodedOffset) +{ + return encodedOffset & 0x7FFFFFFF; +} + +static MA_INLINE ma_uint32 ma_rb__extract_offset_loop_flag(ma_uint32 encodedOffset) +{ + return encodedOffset & 0x80000000; +} + +static MA_INLINE void* ma_rb__get_read_ptr(ma_rb* pRB) +{ + MA_ASSERT(pRB != NULL); + return ma_offset_ptr(pRB->pBuffer, ma_rb__extract_offset_in_bytes(ma_atomic_load_32(&pRB->encodedReadOffset))); +} + +static MA_INLINE void* ma_rb__get_write_ptr(ma_rb* pRB) +{ + MA_ASSERT(pRB != NULL); + return ma_offset_ptr(pRB->pBuffer, ma_rb__extract_offset_in_bytes(ma_atomic_load_32(&pRB->encodedWriteOffset))); +} + +static MA_INLINE ma_uint32 ma_rb__construct_offset(ma_uint32 offsetInBytes, ma_uint32 offsetLoopFlag) +{ + return offsetLoopFlag | offsetInBytes; +} + +static MA_INLINE void ma_rb__deconstruct_offset(ma_uint32 encodedOffset, ma_uint32* pOffsetInBytes, ma_uint32* pOffsetLoopFlag) +{ + MA_ASSERT(pOffsetInBytes != NULL); + MA_ASSERT(pOffsetLoopFlag != NULL); + + *pOffsetInBytes = ma_rb__extract_offset_in_bytes(encodedOffset); + *pOffsetLoopFlag = ma_rb__extract_offset_loop_flag(encodedOffset); +} + + +MA_API ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size_t subbufferStrideInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB) +{ + ma_result result; + const ma_uint32 maxSubBufferSize = 0x7FFFFFFF - (MA_SIMD_ALIGNMENT-1); + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + if (subbufferSizeInBytes == 0 || subbufferCount == 0) { + return MA_INVALID_ARGS; + } + + if (subbufferSizeInBytes > maxSubBufferSize) { + return MA_INVALID_ARGS; /* Maximum buffer size is ~2GB. The most significant bit is a flag for use internally. */ + } + + + MA_ZERO_OBJECT(pRB); + + result = ma_allocation_callbacks_init_copy(&pRB->allocationCallbacks, pAllocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + + pRB->subbufferSizeInBytes = (ma_uint32)subbufferSizeInBytes; + pRB->subbufferCount = (ma_uint32)subbufferCount; + + if (pOptionalPreallocatedBuffer != NULL) { + pRB->subbufferStrideInBytes = (ma_uint32)subbufferStrideInBytes; + pRB->pBuffer = pOptionalPreallocatedBuffer; + } else { + size_t bufferSizeInBytes; + + /* + Here is where we allocate our own buffer. We always want to align this to MA_SIMD_ALIGNMENT for future SIMD optimization opportunity. To do this + we need to make sure the stride is a multiple of MA_SIMD_ALIGNMENT. + */ + pRB->subbufferStrideInBytes = (pRB->subbufferSizeInBytes + (MA_SIMD_ALIGNMENT-1)) & ~MA_SIMD_ALIGNMENT; + + bufferSizeInBytes = (size_t)pRB->subbufferCount*pRB->subbufferStrideInBytes; + pRB->pBuffer = ma_aligned_malloc(bufferSizeInBytes, MA_SIMD_ALIGNMENT, &pRB->allocationCallbacks); + if (pRB->pBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + MA_ZERO_MEMORY(pRB->pBuffer, bufferSizeInBytes); + pRB->ownsBuffer = MA_TRUE; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_init(size_t bufferSizeInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB) +{ + return ma_rb_init_ex(bufferSizeInBytes, 1, 0, pOptionalPreallocatedBuffer, pAllocationCallbacks, pRB); +} + +MA_API void ma_rb_uninit(ma_rb* pRB) +{ + if (pRB == NULL) { + return; + } + + if (pRB->ownsBuffer) { + ma_aligned_free(pRB->pBuffer, &pRB->allocationCallbacks); + } +} + +MA_API void ma_rb_reset(ma_rb* pRB) +{ + if (pRB == NULL) { + return; + } + + ma_atomic_exchange_32(&pRB->encodedReadOffset, 0); + ma_atomic_exchange_32(&pRB->encodedWriteOffset, 0); +} + +MA_API ma_result ma_rb_acquire_read(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut) +{ + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + size_t bytesAvailable; + size_t bytesRequested; + + if (pRB == NULL || pSizeInBytes == NULL || ppBufferOut == NULL) { + return MA_INVALID_ARGS; + } + + /* The returned buffer should never move ahead of the write pointer. */ + writeOffset = ma_atomic_load_32(&pRB->encodedWriteOffset); + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + readOffset = ma_atomic_load_32(&pRB->encodedReadOffset); + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + /* + The number of bytes available depends on whether or not the read and write pointers are on the same loop iteration. If so, we + can only read up to the write pointer. If not, we can only read up to the end of the buffer. + */ + if (readOffsetLoopFlag == writeOffsetLoopFlag) { + bytesAvailable = writeOffsetInBytes - readOffsetInBytes; + } else { + bytesAvailable = pRB->subbufferSizeInBytes - readOffsetInBytes; + } + + bytesRequested = *pSizeInBytes; + if (bytesRequested > bytesAvailable) { + bytesRequested = bytesAvailable; + } + + *pSizeInBytes = bytesRequested; + (*ppBufferOut) = ma_rb__get_read_ptr(pRB); + + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_commit_read(ma_rb* pRB, size_t sizeInBytes) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 newReadOffsetInBytes; + ma_uint32 newReadOffsetLoopFlag; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + readOffset = ma_atomic_load_32(&pRB->encodedReadOffset); + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + /* Check that sizeInBytes is correct. It should never go beyond the end of the buffer. */ + newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + sizeInBytes); + if (newReadOffsetInBytes > pRB->subbufferSizeInBytes) { + return MA_INVALID_ARGS; /* <-- sizeInBytes will cause the read offset to overflow. */ + } + + /* Move the read pointer back to the start if necessary. */ + newReadOffsetLoopFlag = readOffsetLoopFlag; + if (newReadOffsetInBytes == pRB->subbufferSizeInBytes) { + newReadOffsetInBytes = 0; + newReadOffsetLoopFlag ^= 0x80000000; + } + + ma_atomic_exchange_32(&pRB->encodedReadOffset, ma_rb__construct_offset(newReadOffsetInBytes, newReadOffsetLoopFlag)); + + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_acquire_write(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + size_t bytesAvailable; + size_t bytesRequested; + + if (pRB == NULL || pSizeInBytes == NULL || ppBufferOut == NULL) { + return MA_INVALID_ARGS; + } + + /* The returned buffer should never overtake the read buffer. */ + readOffset = ma_atomic_load_32(&pRB->encodedReadOffset); + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + writeOffset = ma_atomic_load_32(&pRB->encodedWriteOffset); + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + /* + In the case of writing, if the write pointer and the read pointer are on the same loop iteration we can only + write up to the end of the buffer. Otherwise we can only write up to the read pointer. The write pointer should + never overtake the read pointer. + */ + if (writeOffsetLoopFlag == readOffsetLoopFlag) { + bytesAvailable = pRB->subbufferSizeInBytes - writeOffsetInBytes; + } else { + bytesAvailable = readOffsetInBytes - writeOffsetInBytes; + } + + bytesRequested = *pSizeInBytes; + if (bytesRequested > bytesAvailable) { + bytesRequested = bytesAvailable; + } + + *pSizeInBytes = bytesRequested; + *ppBufferOut = ma_rb__get_write_ptr(pRB); + + /* Clear the buffer if desired. */ + if (pRB->clearOnWriteAcquire) { + MA_ZERO_MEMORY(*ppBufferOut, *pSizeInBytes); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_commit_write(ma_rb* pRB, size_t sizeInBytes) +{ + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + ma_uint32 newWriteOffsetInBytes; + ma_uint32 newWriteOffsetLoopFlag; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + writeOffset = ma_atomic_load_32(&pRB->encodedWriteOffset); + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + /* Check that sizeInBytes is correct. It should never go beyond the end of the buffer. */ + newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + sizeInBytes); + if (newWriteOffsetInBytes > pRB->subbufferSizeInBytes) { + return MA_INVALID_ARGS; /* <-- sizeInBytes will cause the read offset to overflow. */ + } + + /* Move the read pointer back to the start if necessary. */ + newWriteOffsetLoopFlag = writeOffsetLoopFlag; + if (newWriteOffsetInBytes == pRB->subbufferSizeInBytes) { + newWriteOffsetInBytes = 0; + newWriteOffsetLoopFlag ^= 0x80000000; + } + + ma_atomic_exchange_32(&pRB->encodedWriteOffset, ma_rb__construct_offset(newWriteOffsetInBytes, newWriteOffsetLoopFlag)); + + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_seek_read(ma_rb* pRB, size_t offsetInBytes) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + ma_uint32 newReadOffsetInBytes; + ma_uint32 newReadOffsetLoopFlag; + + if (pRB == NULL || offsetInBytes > pRB->subbufferSizeInBytes) { + return MA_INVALID_ARGS; + } + + readOffset = ma_atomic_load_32(&pRB->encodedReadOffset); + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + writeOffset = ma_atomic_load_32(&pRB->encodedWriteOffset); + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + newReadOffsetLoopFlag = readOffsetLoopFlag; + + /* We cannot go past the write buffer. */ + if (readOffsetLoopFlag == writeOffsetLoopFlag) { + if ((readOffsetInBytes + offsetInBytes) > writeOffsetInBytes) { + newReadOffsetInBytes = writeOffsetInBytes; + } else { + newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes); + } + } else { + /* May end up looping. */ + if ((readOffsetInBytes + offsetInBytes) >= pRB->subbufferSizeInBytes) { + newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes) - pRB->subbufferSizeInBytes; + newReadOffsetLoopFlag ^= 0x80000000; /* <-- Looped. */ + } else { + newReadOffsetInBytes = (ma_uint32)(readOffsetInBytes + offsetInBytes); + } + } + + ma_atomic_exchange_32(&pRB->encodedReadOffset, ma_rb__construct_offset(newReadOffsetInBytes, newReadOffsetLoopFlag)); + return MA_SUCCESS; +} + +MA_API ma_result ma_rb_seek_write(ma_rb* pRB, size_t offsetInBytes) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + ma_uint32 newWriteOffsetInBytes; + ma_uint32 newWriteOffsetLoopFlag; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + readOffset = ma_atomic_load_32(&pRB->encodedReadOffset); + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + writeOffset = ma_atomic_load_32(&pRB->encodedWriteOffset); + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + newWriteOffsetLoopFlag = writeOffsetLoopFlag; + + /* We cannot go past the write buffer. */ + if (readOffsetLoopFlag == writeOffsetLoopFlag) { + /* May end up looping. */ + if ((writeOffsetInBytes + offsetInBytes) >= pRB->subbufferSizeInBytes) { + newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes) - pRB->subbufferSizeInBytes; + newWriteOffsetLoopFlag ^= 0x80000000; /* <-- Looped. */ + } else { + newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes); + } + } else { + if ((writeOffsetInBytes + offsetInBytes) > readOffsetInBytes) { + newWriteOffsetInBytes = readOffsetInBytes; + } else { + newWriteOffsetInBytes = (ma_uint32)(writeOffsetInBytes + offsetInBytes); + } + } + + ma_atomic_exchange_32(&pRB->encodedWriteOffset, ma_rb__construct_offset(newWriteOffsetInBytes, newWriteOffsetLoopFlag)); + return MA_SUCCESS; +} + +MA_API ma_int32 ma_rb_pointer_distance(ma_rb* pRB) +{ + ma_uint32 readOffset; + ma_uint32 readOffsetInBytes; + ma_uint32 readOffsetLoopFlag; + ma_uint32 writeOffset; + ma_uint32 writeOffsetInBytes; + ma_uint32 writeOffsetLoopFlag; + + if (pRB == NULL) { + return 0; + } + + readOffset = ma_atomic_load_32(&pRB->encodedReadOffset); + ma_rb__deconstruct_offset(readOffset, &readOffsetInBytes, &readOffsetLoopFlag); + + writeOffset = ma_atomic_load_32(&pRB->encodedWriteOffset); + ma_rb__deconstruct_offset(writeOffset, &writeOffsetInBytes, &writeOffsetLoopFlag); + + if (readOffsetLoopFlag == writeOffsetLoopFlag) { + return writeOffsetInBytes - readOffsetInBytes; + } else { + return writeOffsetInBytes + (pRB->subbufferSizeInBytes - readOffsetInBytes); + } +} + +MA_API ma_uint32 ma_rb_available_read(ma_rb* pRB) +{ + ma_int32 dist; + + if (pRB == NULL) { + return 0; + } + + dist = ma_rb_pointer_distance(pRB); + if (dist < 0) { + return 0; + } + + return dist; +} + +MA_API ma_uint32 ma_rb_available_write(ma_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return (ma_uint32)(ma_rb_get_subbuffer_size(pRB) - ma_rb_pointer_distance(pRB)); +} + +MA_API size_t ma_rb_get_subbuffer_size(ma_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return pRB->subbufferSizeInBytes; +} + +MA_API size_t ma_rb_get_subbuffer_stride(ma_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + if (pRB->subbufferStrideInBytes == 0) { + return (size_t)pRB->subbufferSizeInBytes; + } + + return (size_t)pRB->subbufferStrideInBytes; +} + +MA_API size_t ma_rb_get_subbuffer_offset(ma_rb* pRB, size_t subbufferIndex) +{ + if (pRB == NULL) { + return 0; + } + + return subbufferIndex * ma_rb_get_subbuffer_stride(pRB); +} + +MA_API void* ma_rb_get_subbuffer_ptr(ma_rb* pRB, size_t subbufferIndex, void* pBuffer) +{ + if (pRB == NULL) { + return NULL; + } + + return ma_offset_ptr(pBuffer, ma_rb_get_subbuffer_offset(pRB, subbufferIndex)); +} + + + +static ma_result ma_pcm_rb_data_source__on_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + /* Since there's no notion of an end, we don't ever want to return MA_AT_END here. But it is possible to return 0. */ + ma_pcm_rb* pRB = (ma_pcm_rb*)pDataSource; + ma_result result; + ma_uint64 totalFramesRead; + + MA_ASSERT(pRB != NULL); + + /* We need to run this in a loop since the ring buffer itself may loop. */ + totalFramesRead = 0; + while (totalFramesRead < frameCount) { + void* pMappedBuffer; + ma_uint32 mappedFrameCount; + ma_uint64 framesToRead = frameCount - totalFramesRead; + if (framesToRead > 0xFFFFFFFF) { + framesToRead = 0xFFFFFFFF; + } + + mappedFrameCount = (ma_uint32)framesToRead; + result = ma_pcm_rb_acquire_read(pRB, &mappedFrameCount, &pMappedBuffer); + if (result != MA_SUCCESS) { + break; + } + + if (mappedFrameCount == 0) { + break; /* <-- End of ring buffer. */ + } + + ma_copy_pcm_frames(ma_offset_pcm_frames_ptr(pFramesOut, totalFramesRead, pRB->format, pRB->channels), pMappedBuffer, mappedFrameCount, pRB->format, pRB->channels); + + result = ma_pcm_rb_commit_read(pRB, mappedFrameCount); + if (result != MA_SUCCESS) { + break; + } + + totalFramesRead += mappedFrameCount; + } + + /* + There is no notion of an "end" in a ring buffer. If we didn't have enough data to fill the requested frame + count we'll need to pad with silence. If we don't do this, totalFramesRead might equal 0 which will result + in the data source layer at a higher level translating this to MA_AT_END which is incorrect for a ring buffer. + */ + if (totalFramesRead < frameCount) { + ma_silence_pcm_frames(ma_offset_pcm_frames_ptr(pFramesOut, totalFramesRead, pRB->format, pRB->channels), (frameCount - totalFramesRead), pRB->format, pRB->channels); + totalFramesRead = frameCount; + } + + *pFramesRead = totalFramesRead; + return MA_SUCCESS; +} + +static ma_result ma_pcm_rb_data_source__on_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + ma_pcm_rb* pRB = (ma_pcm_rb*)pDataSource; + MA_ASSERT(pRB != NULL); + + if (pFormat != NULL) { + *pFormat = pRB->format; + } + + if (pChannels != NULL) { + *pChannels = pRB->channels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pRB->sampleRate; + } + + /* Just assume the default channel map. */ + if (pChannelMap != NULL) { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pRB->channels); + } + + return MA_SUCCESS; +} + +static ma_data_source_vtable ma_gRBDataSourceVTable = +{ + ma_pcm_rb_data_source__on_read, + NULL, /* onSeek */ + ma_pcm_rb_data_source__on_get_data_format, + NULL, /* onGetCursor */ + NULL, /* onGetLength */ + NULL, /* onSetLooping */ + 0 +}; + +static MA_INLINE ma_uint32 ma_pcm_rb_get_bpf(ma_pcm_rb* pRB) +{ + MA_ASSERT(pRB != NULL); + + return ma_get_bytes_per_frame(pRB->format, pRB->channels); +} + +MA_API ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subbufferSizeInFrames, ma_uint32 subbufferCount, ma_uint32 subbufferStrideInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB) +{ + ma_uint32 bpf; + ma_result result; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pRB); + + bpf = ma_get_bytes_per_frame(format, channels); + if (bpf == 0) { + return MA_INVALID_ARGS; + } + + result = ma_rb_init_ex(subbufferSizeInFrames*bpf, subbufferCount, subbufferStrideInFrames*bpf, pOptionalPreallocatedBuffer, pAllocationCallbacks, &pRB->rb); + if (result != MA_SUCCESS) { + return result; + } + + pRB->format = format; + pRB->channels = channels; + pRB->sampleRate = 0; /* The sample rate is not passed in as a parameter. */ + + /* The PCM ring buffer is a data source. We need to get that set up as well. */ + { + ma_data_source_config dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &ma_gRBDataSourceVTable; + + result = ma_data_source_init(&dataSourceConfig, &pRB->ds); + if (result != MA_SUCCESS) { + ma_rb_uninit(&pRB->rb); + return result; + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_pcm_rb_init(ma_format format, ma_uint32 channels, ma_uint32 bufferSizeInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB) +{ + return ma_pcm_rb_init_ex(format, channels, bufferSizeInFrames, 1, 0, pOptionalPreallocatedBuffer, pAllocationCallbacks, pRB); +} + +MA_API void ma_pcm_rb_uninit(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return; + } + + ma_data_source_uninit(&pRB->ds); + ma_rb_uninit(&pRB->rb); +} + +MA_API void ma_pcm_rb_reset(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return; + } + + ma_rb_reset(&pRB->rb); +} + +MA_API ma_result ma_pcm_rb_acquire_read(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut) +{ + size_t sizeInBytes; + ma_result result; + + if (pRB == NULL || pSizeInFrames == NULL) { + return MA_INVALID_ARGS; + } + + sizeInBytes = *pSizeInFrames * ma_pcm_rb_get_bpf(pRB); + + result = ma_rb_acquire_read(&pRB->rb, &sizeInBytes, ppBufferOut); + if (result != MA_SUCCESS) { + return result; + } + + *pSizeInFrames = (ma_uint32)(sizeInBytes / (size_t)ma_pcm_rb_get_bpf(pRB)); + return MA_SUCCESS; +} + +MA_API ma_result ma_pcm_rb_commit_read(ma_pcm_rb* pRB, ma_uint32 sizeInFrames) +{ + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + return ma_rb_commit_read(&pRB->rb, sizeInFrames * ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_result ma_pcm_rb_acquire_write(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut) +{ + size_t sizeInBytes; + ma_result result; + + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + sizeInBytes = *pSizeInFrames * ma_pcm_rb_get_bpf(pRB); + + result = ma_rb_acquire_write(&pRB->rb, &sizeInBytes, ppBufferOut); + if (result != MA_SUCCESS) { + return result; + } + + *pSizeInFrames = (ma_uint32)(sizeInBytes / ma_pcm_rb_get_bpf(pRB)); + return MA_SUCCESS; +} + +MA_API ma_result ma_pcm_rb_commit_write(ma_pcm_rb* pRB, ma_uint32 sizeInFrames) +{ + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + return ma_rb_commit_write(&pRB->rb, sizeInFrames * ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_result ma_pcm_rb_seek_read(ma_pcm_rb* pRB, ma_uint32 offsetInFrames) +{ + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + return ma_rb_seek_read(&pRB->rb, offsetInFrames * ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_result ma_pcm_rb_seek_write(ma_pcm_rb* pRB, ma_uint32 offsetInFrames) +{ + if (pRB == NULL) { + return MA_INVALID_ARGS; + } + + return ma_rb_seek_write(&pRB->rb, offsetInFrames * ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_int32 ma_pcm_rb_pointer_distance(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return ma_rb_pointer_distance(&pRB->rb) / ma_pcm_rb_get_bpf(pRB); +} + +MA_API ma_uint32 ma_pcm_rb_available_read(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return ma_rb_available_read(&pRB->rb) / ma_pcm_rb_get_bpf(pRB); +} + +MA_API ma_uint32 ma_pcm_rb_available_write(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return ma_rb_available_write(&pRB->rb) / ma_pcm_rb_get_bpf(pRB); +} + +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_size(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return (ma_uint32)(ma_rb_get_subbuffer_size(&pRB->rb) / ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_stride(ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return (ma_uint32)(ma_rb_get_subbuffer_stride(&pRB->rb) / ma_pcm_rb_get_bpf(pRB)); +} + +MA_API ma_uint32 ma_pcm_rb_get_subbuffer_offset(ma_pcm_rb* pRB, ma_uint32 subbufferIndex) +{ + if (pRB == NULL) { + return 0; + } + + return (ma_uint32)(ma_rb_get_subbuffer_offset(&pRB->rb, subbufferIndex) / ma_pcm_rb_get_bpf(pRB)); +} + +MA_API void* ma_pcm_rb_get_subbuffer_ptr(ma_pcm_rb* pRB, ma_uint32 subbufferIndex, void* pBuffer) +{ + if (pRB == NULL) { + return NULL; + } + + return ma_rb_get_subbuffer_ptr(&pRB->rb, subbufferIndex, pBuffer); +} + +MA_API ma_format ma_pcm_rb_get_format(const ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return ma_format_unknown; + } + + return pRB->format; +} + +MA_API ma_uint32 ma_pcm_rb_get_channels(const ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return pRB->channels; +} + +MA_API ma_uint32 ma_pcm_rb_get_sample_rate(const ma_pcm_rb* pRB) +{ + if (pRB == NULL) { + return 0; + } + + return pRB->sampleRate; +} + +MA_API void ma_pcm_rb_set_sample_rate(ma_pcm_rb* pRB, ma_uint32 sampleRate) +{ + if (pRB == NULL) { + return; + } + + pRB->sampleRate = sampleRate; +} + + + +MA_API ma_result ma_duplex_rb_init(ma_format captureFormat, ma_uint32 captureChannels, ma_uint32 sampleRate, ma_uint32 captureInternalSampleRate, ma_uint32 captureInternalPeriodSizeInFrames, const ma_allocation_callbacks* pAllocationCallbacks, ma_duplex_rb* pRB) +{ + ma_result result; + ma_uint32 sizeInFrames; + + sizeInFrames = (ma_uint32)ma_calculate_frame_count_after_resampling(sampleRate, captureInternalSampleRate, captureInternalPeriodSizeInFrames * 5); + if (sizeInFrames == 0) { + return MA_INVALID_ARGS; + } + + result = ma_pcm_rb_init(captureFormat, captureChannels, sizeInFrames, NULL, pAllocationCallbacks, &pRB->rb); + if (result != MA_SUCCESS) { + return result; + } + + /* Seek forward a bit so we have a bit of a buffer in case of desyncs. */ + ma_pcm_rb_seek_write((ma_pcm_rb*)pRB, captureInternalPeriodSizeInFrames * 2); + + return MA_SUCCESS; +} + +MA_API ma_result ma_duplex_rb_uninit(ma_duplex_rb* pRB) +{ + ma_pcm_rb_uninit((ma_pcm_rb*)pRB); + return MA_SUCCESS; +} + + + +/************************************************************************************************************************************************************** + +Miscellaneous Helpers + +**************************************************************************************************************************************************************/ +MA_API const char* ma_result_description(ma_result result) +{ + switch (result) + { + case MA_SUCCESS: return "No error"; + case MA_ERROR: return "Unknown error"; + case MA_INVALID_ARGS: return "Invalid argument"; + case MA_INVALID_OPERATION: return "Invalid operation"; + case MA_OUT_OF_MEMORY: return "Out of memory"; + case MA_OUT_OF_RANGE: return "Out of range"; + case MA_ACCESS_DENIED: return "Permission denied"; + case MA_DOES_NOT_EXIST: return "Resource does not exist"; + case MA_ALREADY_EXISTS: return "Resource already exists"; + case MA_TOO_MANY_OPEN_FILES: return "Too many open files"; + case MA_INVALID_FILE: return "Invalid file"; + case MA_TOO_BIG: return "Too large"; + case MA_PATH_TOO_LONG: return "Path too long"; + case MA_NAME_TOO_LONG: return "Name too long"; + case MA_NOT_DIRECTORY: return "Not a directory"; + case MA_IS_DIRECTORY: return "Is a directory"; + case MA_DIRECTORY_NOT_EMPTY: return "Directory not empty"; + case MA_AT_END: return "At end"; + case MA_NO_SPACE: return "No space available"; + case MA_BUSY: return "Device or resource busy"; + case MA_IO_ERROR: return "Input/output error"; + case MA_INTERRUPT: return "Interrupted"; + case MA_UNAVAILABLE: return "Resource unavailable"; + case MA_ALREADY_IN_USE: return "Resource already in use"; + case MA_BAD_ADDRESS: return "Bad address"; + case MA_BAD_SEEK: return "Illegal seek"; + case MA_BAD_PIPE: return "Broken pipe"; + case MA_DEADLOCK: return "Deadlock"; + case MA_TOO_MANY_LINKS: return "Too many links"; + case MA_NOT_IMPLEMENTED: return "Not implemented"; + case MA_NO_MESSAGE: return "No message of desired type"; + case MA_BAD_MESSAGE: return "Invalid message"; + case MA_NO_DATA_AVAILABLE: return "No data available"; + case MA_INVALID_DATA: return "Invalid data"; + case MA_TIMEOUT: return "Timeout"; + case MA_NO_NETWORK: return "Network unavailable"; + case MA_NOT_UNIQUE: return "Not unique"; + case MA_NOT_SOCKET: return "Socket operation on non-socket"; + case MA_NO_ADDRESS: return "Destination address required"; + case MA_BAD_PROTOCOL: return "Protocol wrong type for socket"; + case MA_PROTOCOL_UNAVAILABLE: return "Protocol not available"; + case MA_PROTOCOL_NOT_SUPPORTED: return "Protocol not supported"; + case MA_PROTOCOL_FAMILY_NOT_SUPPORTED: return "Protocol family not supported"; + case MA_ADDRESS_FAMILY_NOT_SUPPORTED: return "Address family not supported"; + case MA_SOCKET_NOT_SUPPORTED: return "Socket type not supported"; + case MA_CONNECTION_RESET: return "Connection reset"; + case MA_ALREADY_CONNECTED: return "Already connected"; + case MA_NOT_CONNECTED: return "Not connected"; + case MA_CONNECTION_REFUSED: return "Connection refused"; + case MA_NO_HOST: return "No host"; + case MA_IN_PROGRESS: return "Operation in progress"; + case MA_CANCELLED: return "Operation cancelled"; + case MA_MEMORY_ALREADY_MAPPED: return "Memory already mapped"; + + case MA_FORMAT_NOT_SUPPORTED: return "Format not supported"; + case MA_DEVICE_TYPE_NOT_SUPPORTED: return "Device type not supported"; + case MA_SHARE_MODE_NOT_SUPPORTED: return "Share mode not supported"; + case MA_NO_BACKEND: return "No backend"; + case MA_NO_DEVICE: return "No device"; + case MA_API_NOT_FOUND: return "API not found"; + case MA_INVALID_DEVICE_CONFIG: return "Invalid device config"; + + case MA_DEVICE_NOT_INITIALIZED: return "Device not initialized"; + case MA_DEVICE_NOT_STARTED: return "Device not started"; + + case MA_FAILED_TO_INIT_BACKEND: return "Failed to initialize backend"; + case MA_FAILED_TO_OPEN_BACKEND_DEVICE: return "Failed to open backend device"; + case MA_FAILED_TO_START_BACKEND_DEVICE: return "Failed to start backend device"; + case MA_FAILED_TO_STOP_BACKEND_DEVICE: return "Failed to stop backend device"; + + default: return "Unknown error"; + } +} + +MA_API void* ma_malloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + if (pAllocationCallbacks->onMalloc != NULL) { + return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData); + } else { + return NULL; /* Do not fall back to the default implementation. */ + } + } else { + return ma__malloc_default(sz, NULL); + } +} + +MA_API void* ma_calloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + void* p = ma_malloc(sz, pAllocationCallbacks); + if (p != NULL) { + MA_ZERO_MEMORY(p, sz); + } + + return p; +} + +MA_API void* ma_realloc(void* p, size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(p, sz, pAllocationCallbacks->pUserData); + } else { + return NULL; /* Do not fall back to the default implementation. */ + } + } else { + return ma__realloc_default(p, sz, NULL); + } +} + +MA_API void ma_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (p == NULL) { + return; + } + + if (pAllocationCallbacks != NULL) { + if (pAllocationCallbacks->onFree != NULL) { + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } else { + return; /* Do no fall back to the default implementation. */ + } + } else { + ma__free_default(p, NULL); + } +} + +MA_API void* ma_aligned_malloc(size_t sz, size_t alignment, const ma_allocation_callbacks* pAllocationCallbacks) +{ + size_t extraBytes; + void* pUnaligned; + void* pAligned; + + if (alignment == 0) { + return 0; + } + + extraBytes = alignment-1 + sizeof(void*); + + pUnaligned = ma_malloc(sz + extraBytes, pAllocationCallbacks); + if (pUnaligned == NULL) { + return NULL; + } + + pAligned = (void*)(((ma_uintptr)pUnaligned + extraBytes) & ~((ma_uintptr)(alignment-1))); + ((void**)pAligned)[-1] = pUnaligned; + + return pAligned; +} + +MA_API void ma_aligned_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_free(((void**)p)[-1], pAllocationCallbacks); +} + +MA_API const char* ma_get_format_name(ma_format format) +{ + switch (format) + { + case ma_format_unknown: return "Unknown"; + case ma_format_u8: return "8-bit Unsigned Integer"; + case ma_format_s16: return "16-bit Signed Integer"; + case ma_format_s24: return "24-bit Signed Integer (Tightly Packed)"; + case ma_format_s32: return "32-bit Signed Integer"; + case ma_format_f32: return "32-bit IEEE Floating Point"; + default: return "Invalid"; + } +} + +MA_API void ma_blend_f32(float* pOut, float* pInA, float* pInB, float factor, ma_uint32 channels) +{ + ma_uint32 i; + for (i = 0; i < channels; ++i) { + pOut[i] = ma_mix_f32(pInA[i], pInB[i], factor); + } +} + + +MA_API ma_uint32 ma_get_bytes_per_sample(ma_format format) +{ + ma_uint32 sizes[] = { + 0, /* unknown */ + 1, /* u8 */ + 2, /* s16 */ + 3, /* s24 */ + 4, /* s32 */ + 4, /* f32 */ + }; + return sizes[format]; +} + + + +#define MA_DATA_SOURCE_DEFAULT_RANGE_BEG 0 +#define MA_DATA_SOURCE_DEFAULT_RANGE_END ~((ma_uint64)0) +#define MA_DATA_SOURCE_DEFAULT_LOOP_POINT_BEG 0 +#define MA_DATA_SOURCE_DEFAULT_LOOP_POINT_END ~((ma_uint64)0) + +MA_API ma_data_source_config ma_data_source_config_init(void) +{ + ma_data_source_config config; + + MA_ZERO_OBJECT(&config); + + return config; +} + + +MA_API ma_result ma_data_source_init(const ma_data_source_config* pConfig, ma_data_source* pDataSource) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDataSourceBase); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->vtable == NULL) { + return MA_INVALID_ARGS; + } + + pDataSourceBase->vtable = pConfig->vtable; + pDataSourceBase->rangeBegInFrames = MA_DATA_SOURCE_DEFAULT_RANGE_BEG; + pDataSourceBase->rangeEndInFrames = MA_DATA_SOURCE_DEFAULT_RANGE_END; + pDataSourceBase->loopBegInFrames = MA_DATA_SOURCE_DEFAULT_LOOP_POINT_BEG; + pDataSourceBase->loopEndInFrames = MA_DATA_SOURCE_DEFAULT_LOOP_POINT_END; + pDataSourceBase->pCurrent = pDataSource; /* Always read from ourself by default. */ + pDataSourceBase->pNext = NULL; + pDataSourceBase->onGetNext = NULL; + + return MA_SUCCESS; +} + +MA_API void ma_data_source_uninit(ma_data_source* pDataSource) +{ + if (pDataSource == NULL) { + return; + } + + /* + This is placeholder in case we need this later. Data sources need to call this in their + uninitialization routine to ensure things work later on if something is added here. + */ +} + +static ma_result ma_data_source_resolve_current(ma_data_source* pDataSource, ma_data_source** ppCurrentDataSource) +{ + ma_data_source_base* pCurrentDataSource = (ma_data_source_base*)pDataSource; + + MA_ASSERT(pDataSource != NULL); + MA_ASSERT(ppCurrentDataSource != NULL); + + if (pCurrentDataSource->pCurrent == NULL) { + /* + The current data source is NULL. If we're using this in the context of a chain we need to return NULL + here so that we don't end up looping. Otherwise we just return the data source itself. + */ + if (pCurrentDataSource->pNext != NULL || pCurrentDataSource->onGetNext != NULL) { + pCurrentDataSource = NULL; + } else { + pCurrentDataSource = (ma_data_source_base*)pDataSource; /* Not being used in a chain. Make sure we just always read from the data source itself at all times. */ + } + } else { + pCurrentDataSource = (ma_data_source_base*)pCurrentDataSource->pCurrent; + } + + *ppCurrentDataSource = pCurrentDataSource; + + return MA_SUCCESS; +} + +static ma_result ma_data_source_read_pcm_frames_from_backend(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + MA_ASSERT(pDataSourceBase != NULL); + MA_ASSERT(pDataSourceBase->vtable != NULL); + MA_ASSERT(pDataSourceBase->vtable->onRead != NULL); + MA_ASSERT(pFramesRead != NULL); + + if (pFramesOut != NULL) { + return pDataSourceBase->vtable->onRead(pDataSourceBase, pFramesOut, frameCount, pFramesRead); + } else { + /* + No output buffer. Probably seeking forward. Read and discard. Can probably optimize this in terms of + onSeek and onGetCursor, but need to keep in mind that the data source may not implement these functions. + */ + ma_result result; + ma_uint64 framesRead; + ma_format format; + ma_uint32 channels; + ma_uint64 discardBufferCapInFrames; + ma_uint8 pDiscardBuffer[4096]; + + result = ma_data_source_get_data_format(pDataSource, &format, &channels, NULL, NULL, 0); + if (result != MA_SUCCESS) { + return result; + } + + discardBufferCapInFrames = sizeof(pDiscardBuffer) / ma_get_bytes_per_frame(format, channels); + + framesRead = 0; + while (framesRead < frameCount) { + ma_uint64 framesReadThisIteration = 0; + ma_uint64 framesToRead = frameCount - framesRead; + if (framesToRead > discardBufferCapInFrames) { + framesToRead = discardBufferCapInFrames; + } + + result = pDataSourceBase->vtable->onRead(pDataSourceBase, pDiscardBuffer, framesToRead, &framesReadThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + framesRead += framesReadThisIteration; + } + + *pFramesRead = framesRead; + + return MA_SUCCESS; + } +} + +static ma_result ma_data_source_read_pcm_frames_within_range(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + ma_result result; + ma_uint64 framesRead = 0; + ma_bool32 loop = ma_data_source_is_looping(pDataSource); + + if (pDataSourceBase == NULL) { + return MA_AT_END; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + MA_ASSERT(pDataSourceBase->vtable != NULL); + + if ((pDataSourceBase->vtable->flags & MA_DATA_SOURCE_SELF_MANAGED_RANGE_AND_LOOP_POINT) != 0 || (pDataSourceBase->rangeEndInFrames == ~((ma_uint64)0) && (pDataSourceBase->loopEndInFrames == ~((ma_uint64)0) || loop == MA_FALSE))) { + /* Either the data source is self-managing the range, or no range is set - just read like normal. The data source itself will tell us when the end is reached. */ + result = ma_data_source_read_pcm_frames_from_backend(pDataSource, pFramesOut, frameCount, &framesRead); + } else { + /* Need to clamp to within the range. */ + ma_uint64 relativeCursor; + ma_uint64 absoluteCursor; + + result = ma_data_source_get_cursor_in_pcm_frames(pDataSourceBase, &relativeCursor); + if (result != MA_SUCCESS) { + /* Failed to retrieve the cursor. Cannot read within a range or loop points. Just read like normal - this may happen for things like noise data sources where it doesn't really matter. */ + result = ma_data_source_read_pcm_frames_from_backend(pDataSource, pFramesOut, frameCount, &framesRead); + } else { + ma_uint64 rangeBeg; + ma_uint64 rangeEnd; + + /* We have the cursor. We need to make sure we don't read beyond our range. */ + rangeBeg = pDataSourceBase->rangeBegInFrames; + rangeEnd = pDataSourceBase->rangeEndInFrames; + + absoluteCursor = rangeBeg + relativeCursor; + + /* If looping, make sure we're within range. */ + if (loop) { + if (pDataSourceBase->loopEndInFrames != ~((ma_uint64)0)) { + rangeEnd = ma_min(rangeEnd, pDataSourceBase->rangeBegInFrames + pDataSourceBase->loopEndInFrames); + } + } + + if (frameCount > (rangeEnd - absoluteCursor) && rangeEnd != ~((ma_uint64)0)) { + frameCount = (rangeEnd - absoluteCursor); + } + + /* + If the cursor is sitting on the end of the range the frame count will be set to 0 which can + result in MA_INVALID_ARGS. In this case, we don't want to try reading, but instead return + MA_AT_END so the higher level function can know about it. + */ + if (frameCount > 0) { + result = ma_data_source_read_pcm_frames_from_backend(pDataSource, pFramesOut, frameCount, &framesRead); + } else { + result = MA_AT_END; /* The cursor is sitting on the end of the range which means we're at the end. */ + } + } + } + + if (pFramesRead != NULL) { + *pFramesRead = framesRead; + } + + /* We need to make sure MA_AT_END is returned if we hit the end of the range. */ + if (result == MA_SUCCESS && framesRead == 0) { + result = MA_AT_END; + } + + return result; +} + +MA_API ma_result ma_data_source_read_pcm_frames(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + ma_data_source_base* pCurrentDataSource; + void* pRunningFramesOut = pFramesOut; + ma_uint64 totalFramesProcessed = 0; + ma_format format; + ma_uint32 channels; + ma_uint32 emptyLoopCounter = 0; /* Keeps track of how many times 0 frames have been read. For infinite loop detection of sounds with no audio data. */ + ma_bool32 loop; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pDataSourceBase == NULL) { + return MA_INVALID_ARGS; + } + + loop = ma_data_source_is_looping(pDataSource); + + /* + We need to know the data format so we can advance the output buffer as we read frames. If this + fails, chaining will not work and we'll just read as much as we can from the current source. + */ + if (ma_data_source_get_data_format(pDataSource, &format, &channels, NULL, NULL, 0) != MA_SUCCESS) { + result = ma_data_source_resolve_current(pDataSource, (ma_data_source**)&pCurrentDataSource); + if (result != MA_SUCCESS) { + return result; + } + + return ma_data_source_read_pcm_frames_within_range(pCurrentDataSource, pFramesOut, frameCount, pFramesRead); + } + + /* + Looping is a bit of a special case. When the `loop` argument is true, chaining will not work and + only the current data source will be read from. + */ + + /* Keep reading until we've read as many frames as possible. */ + while (totalFramesProcessed < frameCount) { + ma_uint64 framesProcessed; + ma_uint64 framesRemaining = frameCount - totalFramesProcessed; + + /* We need to resolve the data source that we'll actually be reading from. */ + result = ma_data_source_resolve_current(pDataSource, (ma_data_source**)&pCurrentDataSource); + if (result != MA_SUCCESS) { + break; + } + + if (pCurrentDataSource == NULL) { + break; + } + + result = ma_data_source_read_pcm_frames_within_range(pCurrentDataSource, pRunningFramesOut, framesRemaining, &framesProcessed); + totalFramesProcessed += framesProcessed; + + /* + If we encountered an error from the read callback, make sure it's propagated to the caller. The caller may need to know whether or not MA_BUSY is returned which is + not necessarily considered an error. + */ + if (result != MA_SUCCESS && result != MA_AT_END) { + break; + } + + /* + We can determine if we've reached the end by checking if ma_data_source_read_pcm_frames_within_range() returned + MA_AT_END. To loop back to the start, all we need to do is seek back to the first frame. + */ + if (result == MA_AT_END) { + /* + The result needs to be reset back to MA_SUCCESS (from MA_AT_END) so that we don't + accidentally return MA_AT_END when data has been read in prior loop iterations. at the + end of this function, the result will be checked for MA_SUCCESS, and if the total + number of frames processed is 0, will be explicitly set to MA_AT_END. + */ + result = MA_SUCCESS; + + /* + We reached the end. If we're looping, we just loop back to the start of the current + data source. If we're not looping we need to check if we have another in the chain, and + if so, switch to it. + */ + if (loop) { + if (framesProcessed == 0) { + emptyLoopCounter += 1; + if (emptyLoopCounter > 1) { + break; /* Infinite loop detected. Get out. */ + } + } else { + emptyLoopCounter = 0; + } + + result = ma_data_source_seek_to_pcm_frame(pCurrentDataSource, pCurrentDataSource->loopBegInFrames); + if (result != MA_SUCCESS) { + break; /* Failed to loop. Abort. */ + } + + /* Don't return MA_AT_END for looping sounds. */ + result = MA_SUCCESS; + } else { + if (pCurrentDataSource->pNext != NULL) { + pDataSourceBase->pCurrent = pCurrentDataSource->pNext; + } else if (pCurrentDataSource->onGetNext != NULL) { + pDataSourceBase->pCurrent = pCurrentDataSource->onGetNext(pCurrentDataSource); + if (pDataSourceBase->pCurrent == NULL) { + break; /* Our callback did not return a next data source. We're done. */ + } + } else { + /* Reached the end of the chain. We're done. */ + break; + } + + /* The next data source needs to be rewound to ensure data is read in looping scenarios. */ + result = ma_data_source_seek_to_pcm_frame(pDataSourceBase->pCurrent, 0); + if (result != MA_SUCCESS) { + break; + } + } + } + + if (pRunningFramesOut != NULL) { + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesProcessed * ma_get_bytes_per_frame(format, channels)); + } + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesProcessed; + } + + MA_ASSERT(!(result == MA_AT_END && totalFramesProcessed > 0)); /* We should never be returning MA_AT_END if we read some data. */ + + if (result == MA_SUCCESS && totalFramesProcessed == 0) { + result = MA_AT_END; + } + + return result; +} + +MA_API ma_result ma_data_source_seek_pcm_frames(ma_data_source* pDataSource, ma_uint64 frameCount, ma_uint64* pFramesSeeked) +{ + return ma_data_source_read_pcm_frames(pDataSource, NULL, frameCount, pFramesSeeked); +} + +MA_API ma_result ma_data_source_seek_to_pcm_frame(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pDataSourceBase == NULL) { + return MA_INVALID_ARGS; + } + + if (pDataSourceBase->vtable->onSeek == NULL) { + return MA_NOT_IMPLEMENTED; + } + + if (frameIndex > pDataSourceBase->rangeEndInFrames) { + return MA_INVALID_OPERATION; /* Trying to seek too far forward. */ + } + + MA_ASSERT(pDataSourceBase->vtable != NULL); + + return pDataSourceBase->vtable->onSeek(pDataSource, pDataSourceBase->rangeBegInFrames + frameIndex); +} + +MA_API ma_result ma_data_source_seek_seconds(ma_data_source* pDataSource, float secondCount, float* pSecondsSeeked) +{ + ma_uint64 frameCount; + ma_uint64 framesSeeked = 0; + ma_uint32 sampleRate; + ma_result result; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_data_source_get_data_format(pDataSource, NULL, NULL, &sampleRate, NULL, 0); + if (result != MA_SUCCESS) { + return result; + } + + /* We need PCM frames instead of seconds */ + frameCount = (ma_uint64)(secondCount * sampleRate); + + result = ma_data_source_seek_pcm_frames(pDataSource, frameCount, &framesSeeked); + + /* VC6 doesn't support division between unsigned 64-bit integer and floating point number. Signed integer needed. This shouldn't affect anything in practice */ + *pSecondsSeeked = (ma_int64)framesSeeked / (float)sampleRate; + return result; +} + +MA_API ma_result ma_data_source_seek_to_second(ma_data_source* pDataSource, float seekPointInSeconds) +{ + ma_uint64 frameIndex; + ma_uint32 sampleRate; + ma_result result; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_data_source_get_data_format(pDataSource, NULL, NULL, &sampleRate, NULL, 0); + if (result != MA_SUCCESS) { + return result; + } + + /* We need PCM frames instead of seconds */ + frameIndex = (ma_uint64)(seekPointInSeconds * sampleRate); + + return ma_data_source_seek_to_pcm_frame(pDataSource, frameIndex); +} + +MA_API ma_result ma_data_source_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + ma_result result; + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + + /* Initialize to defaults for safety just in case the data source does not implement this callback. */ + if (pFormat != NULL) { + *pFormat = ma_format_unknown; + } + if (pChannels != NULL) { + *pChannels = 0; + } + if (pSampleRate != NULL) { + *pSampleRate = 0; + } + if (pChannelMap != NULL) { + MA_ZERO_MEMORY(pChannelMap, sizeof(*pChannelMap) * channelMapCap); + } + + if (pDataSourceBase == NULL) { + return MA_INVALID_ARGS; + } + + MA_ASSERT(pDataSourceBase->vtable != NULL); + + if (pDataSourceBase->vtable->onGetDataFormat == NULL) { + return MA_NOT_IMPLEMENTED; + } + + result = pDataSourceBase->vtable->onGetDataFormat(pDataSource, &format, &channels, &sampleRate, pChannelMap, channelMapCap); + if (result != MA_SUCCESS) { + return result; + } + + if (pFormat != NULL) { + *pFormat = format; + } + if (pChannels != NULL) { + *pChannels = channels; + } + if (pSampleRate != NULL) { + *pSampleRate = sampleRate; + } + + /* Channel map was passed in directly to the callback. This is safe due to the channelMapCap parameter. */ + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_source_get_cursor_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + ma_result result; + ma_uint64 cursor; + + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + if (pDataSourceBase == NULL) { + return MA_SUCCESS; + } + + MA_ASSERT(pDataSourceBase->vtable != NULL); + + if (pDataSourceBase->vtable->onGetCursor == NULL) { + return MA_NOT_IMPLEMENTED; + } + + result = pDataSourceBase->vtable->onGetCursor(pDataSourceBase, &cursor); + if (result != MA_SUCCESS) { + return result; + } + + /* The cursor needs to be made relative to the start of the range. */ + if (cursor < pDataSourceBase->rangeBegInFrames) { /* Safety check so we don't return some huge number. */ + *pCursor = 0; + } else { + *pCursor = cursor - pDataSourceBase->rangeBegInFrames; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_source_get_length_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pLength) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; + + if (pDataSourceBase == NULL) { + return MA_INVALID_ARGS; + } + + MA_ASSERT(pDataSourceBase->vtable != NULL); + + /* + If we have a range defined we'll use that to determine the length. This is one of rare times + where we'll actually trust the caller. If they've set the range, I think it's mostly safe to + assume they've set it based on some higher level knowledge of the structure of the sound bank. + */ + if (pDataSourceBase->rangeEndInFrames != ~((ma_uint64)0)) { + *pLength = pDataSourceBase->rangeEndInFrames - pDataSourceBase->rangeBegInFrames; + return MA_SUCCESS; + } + + /* + Getting here means a range is not defined so we'll need to get the data source itself to tell + us the length. + */ + if (pDataSourceBase->vtable->onGetLength == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pDataSourceBase->vtable->onGetLength(pDataSource, pLength); +} + +MA_API ma_result ma_data_source_get_cursor_in_seconds(ma_data_source* pDataSource, float* pCursor) +{ + ma_result result; + ma_uint64 cursorInPCMFrames; + ma_uint32 sampleRate; + + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + result = ma_data_source_get_cursor_in_pcm_frames(pDataSource, &cursorInPCMFrames); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_data_source_get_data_format(pDataSource, NULL, NULL, &sampleRate, NULL, 0); + if (result != MA_SUCCESS) { + return result; + } + + /* VC6 does not support division of unsigned 64-bit integers with floating point numbers. Need to use a signed number. This shouldn't effect anything in practice. */ + *pCursor = (ma_int64)cursorInPCMFrames / (float)sampleRate; + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_source_get_length_in_seconds(ma_data_source* pDataSource, float* pLength) +{ + ma_result result; + ma_uint64 lengthInPCMFrames; + ma_uint32 sampleRate; + + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; + + result = ma_data_source_get_length_in_pcm_frames(pDataSource, &lengthInPCMFrames); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_data_source_get_data_format(pDataSource, NULL, NULL, &sampleRate, NULL, 0); + if (result != MA_SUCCESS) { + return result; + } + + /* VC6 does not support division of unsigned 64-bit integers with floating point numbers. Need to use a signed number. This shouldn't effect anything in practice. */ + *pLength = (ma_int64)lengthInPCMFrames / (float)sampleRate; + + return MA_SUCCESS; +} + +MA_API ma_result ma_data_source_set_looping(ma_data_source* pDataSource, ma_bool32 isLooping) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + ma_atomic_exchange_32(&pDataSourceBase->isLooping, isLooping); + + MA_ASSERT(pDataSourceBase->vtable != NULL); + + /* If there's no callback for this just treat it as a successful no-op. */ + if (pDataSourceBase->vtable->onSetLooping == NULL) { + return MA_SUCCESS; + } + + return pDataSourceBase->vtable->onSetLooping(pDataSource, isLooping); +} + +MA_API ma_bool32 ma_data_source_is_looping(const ma_data_source* pDataSource) +{ + const ma_data_source_base* pDataSourceBase = (const ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return MA_FALSE; + } + + return ma_atomic_load_32(&pDataSourceBase->isLooping); +} + +MA_API ma_result ma_data_source_set_range_in_pcm_frames(ma_data_source* pDataSource, ma_uint64 rangeBegInFrames, ma_uint64 rangeEndInFrames) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + ma_result result; + ma_uint64 relativeCursor; + ma_uint64 absoluteCursor; + ma_bool32 doSeekAdjustment = MA_FALSE; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if (rangeEndInFrames < rangeBegInFrames) { + return MA_INVALID_ARGS; /* The end of the range must come after the beginning. */ + } + + /* + We may need to adjust the position of the cursor to ensure it's clamped to the range. Grab it now + so we can calculate its absolute position before we change the range. + */ + result = ma_data_source_get_cursor_in_pcm_frames(pDataSource, &relativeCursor); + if (result == MA_SUCCESS) { + doSeekAdjustment = MA_TRUE; + absoluteCursor = relativeCursor + pDataSourceBase->rangeBegInFrames; + } else { + /* + We couldn't get the position of the cursor. It probably means the data source has no notion + of a cursor. We'll just leave it at position 0. Don't treat this as an error. + */ + doSeekAdjustment = MA_FALSE; + relativeCursor = 0; + absoluteCursor = 0; + } + + pDataSourceBase->rangeBegInFrames = rangeBegInFrames; + pDataSourceBase->rangeEndInFrames = rangeEndInFrames; + + /* + The commented out logic below was intended to maintain loop points in response to a change in the + range. However, this is not useful because it results in the sound breaking when you move the range + outside of the old loop points. I'm simplifying this by simply resetting the loop points. The + caller is expected to update their loop points if they change the range. + + In practice this should be mostly a non-issue because the majority of the time the range will be + set once right after initialization. + */ + pDataSourceBase->loopBegInFrames = 0; + pDataSourceBase->loopEndInFrames = ~((ma_uint64)0); + + + /* + Seek to within range. Note that our seek positions here are relative to the new range. We don't want + to do this if we failed to retrieve the cursor earlier on because it probably means the data source + has no notion of a cursor. In practice the seek would probably fail (which we silently ignore), but + I'm just not even going to attempt it. + */ + if (doSeekAdjustment) { + if (absoluteCursor < rangeBegInFrames) { + ma_data_source_seek_to_pcm_frame(pDataSource, 0); + } else if (absoluteCursor > rangeEndInFrames) { + ma_data_source_seek_to_pcm_frame(pDataSource, rangeEndInFrames - rangeBegInFrames); + } + } + + return MA_SUCCESS; +} + +MA_API void ma_data_source_get_range_in_pcm_frames(const ma_data_source* pDataSource, ma_uint64* pRangeBegInFrames, ma_uint64* pRangeEndInFrames) +{ + const ma_data_source_base* pDataSourceBase = (const ma_data_source_base*)pDataSource; + + if (pRangeBegInFrames != NULL) { + *pRangeBegInFrames = 0; + } + if (pRangeEndInFrames != NULL) { + *pRangeEndInFrames = 0; + } + + if (pDataSource == NULL) { + return; + } + + if (pRangeBegInFrames != NULL) { + *pRangeBegInFrames = pDataSourceBase->rangeBegInFrames; + } + + if (pRangeEndInFrames != NULL) { + *pRangeEndInFrames = pDataSourceBase->rangeEndInFrames; + } +} + +MA_API ma_result ma_data_source_set_loop_point_in_pcm_frames(ma_data_source* pDataSource, ma_uint64 loopBegInFrames, ma_uint64 loopEndInFrames) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if (loopEndInFrames < loopBegInFrames) { + return MA_INVALID_ARGS; /* The end of the loop point must come after the beginning. */ + } + + if (loopEndInFrames > pDataSourceBase->rangeEndInFrames && loopEndInFrames != ~((ma_uint64)0)) { + return MA_INVALID_ARGS; /* The end of the loop point must not go beyond the range. */ + } + + pDataSourceBase->loopBegInFrames = loopBegInFrames; + pDataSourceBase->loopEndInFrames = loopEndInFrames; + + /* The end cannot exceed the range. */ + if (pDataSourceBase->loopEndInFrames > (pDataSourceBase->rangeEndInFrames - pDataSourceBase->rangeBegInFrames) && pDataSourceBase->loopEndInFrames != ~((ma_uint64)0)) { + pDataSourceBase->loopEndInFrames = (pDataSourceBase->rangeEndInFrames - pDataSourceBase->rangeBegInFrames); + } + + return MA_SUCCESS; +} + +MA_API void ma_data_source_get_loop_point_in_pcm_frames(const ma_data_source* pDataSource, ma_uint64* pLoopBegInFrames, ma_uint64* pLoopEndInFrames) +{ + const ma_data_source_base* pDataSourceBase = (const ma_data_source_base*)pDataSource; + + if (pLoopBegInFrames != NULL) { + *pLoopBegInFrames = 0; + } + if (pLoopEndInFrames != NULL) { + *pLoopEndInFrames = 0; + } + + if (pDataSource == NULL) { + return; + } + + if (pLoopBegInFrames != NULL) { + *pLoopBegInFrames = pDataSourceBase->loopBegInFrames; + } + + if (pLoopEndInFrames != NULL) { + *pLoopEndInFrames = pDataSourceBase->loopEndInFrames; + } +} + +MA_API ma_result ma_data_source_set_current(ma_data_source* pDataSource, ma_data_source* pCurrentDataSource) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + pDataSourceBase->pCurrent = pCurrentDataSource; + + return MA_SUCCESS; +} + +MA_API ma_data_source* ma_data_source_get_current(const ma_data_source* pDataSource) +{ + const ma_data_source_base* pDataSourceBase = (const ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return NULL; + } + + return pDataSourceBase->pCurrent; +} + +MA_API ma_result ma_data_source_set_next(ma_data_source* pDataSource, ma_data_source* pNextDataSource) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + pDataSourceBase->pNext = pNextDataSource; + + return MA_SUCCESS; +} + +MA_API ma_data_source* ma_data_source_get_next(const ma_data_source* pDataSource) +{ + const ma_data_source_base* pDataSourceBase = (const ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return NULL; + } + + return pDataSourceBase->pNext; +} + +MA_API ma_result ma_data_source_set_next_callback(ma_data_source* pDataSource, ma_data_source_get_next_proc onGetNext) +{ + ma_data_source_base* pDataSourceBase = (ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + pDataSourceBase->onGetNext = onGetNext; + + return MA_SUCCESS; +} + +MA_API ma_data_source_get_next_proc ma_data_source_get_next_callback(const ma_data_source* pDataSource) +{ + const ma_data_source_base* pDataSourceBase = (const ma_data_source_base*)pDataSource; + + if (pDataSource == NULL) { + return NULL; + } + + return pDataSourceBase->onGetNext; +} + + +static ma_result ma_audio_buffer_ref__data_source_on_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_audio_buffer_ref* pAudioBufferRef = (ma_audio_buffer_ref*)pDataSource; + ma_uint64 framesRead = ma_audio_buffer_ref_read_pcm_frames(pAudioBufferRef, pFramesOut, frameCount, MA_FALSE); + + if (pFramesRead != NULL) { + *pFramesRead = framesRead; + } + + if (framesRead < frameCount || framesRead == 0) { + return MA_AT_END; + } + + return MA_SUCCESS; +} + +static ma_result ma_audio_buffer_ref__data_source_on_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_audio_buffer_ref_seek_to_pcm_frame((ma_audio_buffer_ref*)pDataSource, frameIndex); +} + +static ma_result ma_audio_buffer_ref__data_source_on_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + ma_audio_buffer_ref* pAudioBufferRef = (ma_audio_buffer_ref*)pDataSource; + + *pFormat = pAudioBufferRef->format; + *pChannels = pAudioBufferRef->channels; + *pSampleRate = pAudioBufferRef->sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pAudioBufferRef->channels); + + return MA_SUCCESS; +} + +static ma_result ma_audio_buffer_ref__data_source_on_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + ma_audio_buffer_ref* pAudioBufferRef = (ma_audio_buffer_ref*)pDataSource; + + *pCursor = pAudioBufferRef->cursor; + + return MA_SUCCESS; +} + +static ma_result ma_audio_buffer_ref__data_source_on_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ + ma_audio_buffer_ref* pAudioBufferRef = (ma_audio_buffer_ref*)pDataSource; + + *pLength = pAudioBufferRef->sizeInFrames; + + return MA_SUCCESS; +} + +static ma_data_source_vtable g_ma_audio_buffer_ref_data_source_vtable = +{ + ma_audio_buffer_ref__data_source_on_read, + ma_audio_buffer_ref__data_source_on_seek, + ma_audio_buffer_ref__data_source_on_get_data_format, + ma_audio_buffer_ref__data_source_on_get_cursor, + ma_audio_buffer_ref__data_source_on_get_length, + NULL, /* onSetLooping */ + 0 +}; + +MA_API ma_result ma_audio_buffer_ref_init(ma_format format, ma_uint32 channels, const void* pData, ma_uint64 sizeInFrames, ma_audio_buffer_ref* pAudioBufferRef) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + if (pAudioBufferRef == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pAudioBufferRef); + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_audio_buffer_ref_data_source_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pAudioBufferRef->ds); + if (result != MA_SUCCESS) { + return result; + } + + pAudioBufferRef->format = format; + pAudioBufferRef->channels = channels; + pAudioBufferRef->sampleRate = 0; /* TODO: Version 0.12. Set this to sampleRate. */ + pAudioBufferRef->cursor = 0; + pAudioBufferRef->sizeInFrames = sizeInFrames; + pAudioBufferRef->pData = pData; + + return MA_SUCCESS; +} + +MA_API void ma_audio_buffer_ref_uninit(ma_audio_buffer_ref* pAudioBufferRef) +{ + if (pAudioBufferRef == NULL) { + return; + } + + ma_data_source_uninit(&pAudioBufferRef->ds); +} + +MA_API ma_result ma_audio_buffer_ref_set_data(ma_audio_buffer_ref* pAudioBufferRef, const void* pData, ma_uint64 sizeInFrames) +{ + if (pAudioBufferRef == NULL) { + return MA_INVALID_ARGS; + } + + pAudioBufferRef->cursor = 0; + pAudioBufferRef->sizeInFrames = sizeInFrames; + pAudioBufferRef->pData = pData; + + return MA_SUCCESS; +} + +MA_API ma_uint64 ma_audio_buffer_ref_read_pcm_frames(ma_audio_buffer_ref* pAudioBufferRef, void* pFramesOut, ma_uint64 frameCount, ma_bool32 loop) +{ + ma_uint64 totalFramesRead = 0; + + if (pAudioBufferRef == NULL) { + return 0; + } + + if (frameCount == 0) { + return 0; + } + + while (totalFramesRead < frameCount) { + ma_uint64 framesAvailable = pAudioBufferRef->sizeInFrames - pAudioBufferRef->cursor; + ma_uint64 framesRemaining = frameCount - totalFramesRead; + ma_uint64 framesToRead; + + framesToRead = framesRemaining; + if (framesToRead > framesAvailable) { + framesToRead = framesAvailable; + } + + if (pFramesOut != NULL) { + ma_copy_pcm_frames(ma_offset_ptr(pFramesOut, totalFramesRead * ma_get_bytes_per_frame(pAudioBufferRef->format, pAudioBufferRef->channels)), ma_offset_ptr(pAudioBufferRef->pData, pAudioBufferRef->cursor * ma_get_bytes_per_frame(pAudioBufferRef->format, pAudioBufferRef->channels)), framesToRead, pAudioBufferRef->format, pAudioBufferRef->channels); + } + + totalFramesRead += framesToRead; + + pAudioBufferRef->cursor += framesToRead; + if (pAudioBufferRef->cursor == pAudioBufferRef->sizeInFrames) { + if (loop) { + pAudioBufferRef->cursor = 0; + } else { + break; /* We've reached the end and we're not looping. Done. */ + } + } + + MA_ASSERT(pAudioBufferRef->cursor < pAudioBufferRef->sizeInFrames); + } + + return totalFramesRead; +} + +MA_API ma_result ma_audio_buffer_ref_seek_to_pcm_frame(ma_audio_buffer_ref* pAudioBufferRef, ma_uint64 frameIndex) +{ + if (pAudioBufferRef == NULL) { + return MA_INVALID_ARGS; + } + + if (frameIndex > pAudioBufferRef->sizeInFrames) { + return MA_INVALID_ARGS; + } + + pAudioBufferRef->cursor = (size_t)frameIndex; + + return MA_SUCCESS; +} + +MA_API ma_result ma_audio_buffer_ref_map(ma_audio_buffer_ref* pAudioBufferRef, void** ppFramesOut, ma_uint64* pFrameCount) +{ + ma_uint64 framesAvailable; + ma_uint64 frameCount = 0; + + if (ppFramesOut != NULL) { + *ppFramesOut = NULL; /* Safety. */ + } + + if (pFrameCount != NULL) { + frameCount = *pFrameCount; + *pFrameCount = 0; /* Safety. */ + } + + if (pAudioBufferRef == NULL || ppFramesOut == NULL || pFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + framesAvailable = pAudioBufferRef->sizeInFrames - pAudioBufferRef->cursor; + if (frameCount > framesAvailable) { + frameCount = framesAvailable; + } + + *ppFramesOut = ma_offset_ptr(pAudioBufferRef->pData, pAudioBufferRef->cursor * ma_get_bytes_per_frame(pAudioBufferRef->format, pAudioBufferRef->channels)); + *pFrameCount = frameCount; + + return MA_SUCCESS; +} + +MA_API ma_result ma_audio_buffer_ref_unmap(ma_audio_buffer_ref* pAudioBufferRef, ma_uint64 frameCount) +{ + ma_uint64 framesAvailable; + + if (pAudioBufferRef == NULL) { + return MA_INVALID_ARGS; + } + + framesAvailable = pAudioBufferRef->sizeInFrames - pAudioBufferRef->cursor; + if (frameCount > framesAvailable) { + return MA_INVALID_ARGS; /* The frame count was too big. This should never happen in an unmapping. Need to make sure the caller is aware of this. */ + } + + pAudioBufferRef->cursor += frameCount; + + if (pAudioBufferRef->cursor == pAudioBufferRef->sizeInFrames) { + return MA_AT_END; /* Successful. Need to tell the caller that the end has been reached so that it can loop if desired. */ + } else { + return MA_SUCCESS; + } +} + +MA_API ma_bool32 ma_audio_buffer_ref_at_end(const ma_audio_buffer_ref* pAudioBufferRef) +{ + if (pAudioBufferRef == NULL) { + return MA_FALSE; + } + + return pAudioBufferRef->cursor == pAudioBufferRef->sizeInFrames; +} + +MA_API ma_result ma_audio_buffer_ref_get_cursor_in_pcm_frames(const ma_audio_buffer_ref* pAudioBufferRef, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + if (pAudioBufferRef == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = pAudioBufferRef->cursor; + + return MA_SUCCESS; +} + +MA_API ma_result ma_audio_buffer_ref_get_length_in_pcm_frames(const ma_audio_buffer_ref* pAudioBufferRef, ma_uint64* pLength) +{ + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; + + if (pAudioBufferRef == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = pAudioBufferRef->sizeInFrames; + + return MA_SUCCESS; +} + +MA_API ma_result ma_audio_buffer_ref_get_available_frames(const ma_audio_buffer_ref* pAudioBufferRef, ma_uint64* pAvailableFrames) +{ + if (pAvailableFrames == NULL) { + return MA_INVALID_ARGS; + } + + *pAvailableFrames = 0; + + if (pAudioBufferRef == NULL) { + return MA_INVALID_ARGS; + } + + if (pAudioBufferRef->sizeInFrames <= pAudioBufferRef->cursor) { + *pAvailableFrames = 0; + } else { + *pAvailableFrames = pAudioBufferRef->sizeInFrames - pAudioBufferRef->cursor; + } + + return MA_SUCCESS; +} + + + + +MA_API ma_audio_buffer_config ma_audio_buffer_config_init(ma_format format, ma_uint32 channels, ma_uint64 sizeInFrames, const void* pData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_audio_buffer_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = 0; /* TODO: Version 0.12. Set this to sampleRate. */ + config.sizeInFrames = sizeInFrames; + config.pData = pData; + ma_allocation_callbacks_init_copy(&config.allocationCallbacks, pAllocationCallbacks); + + return config; +} + +static ma_result ma_audio_buffer_init_ex(const ma_audio_buffer_config* pConfig, ma_bool32 doCopy, ma_audio_buffer* pAudioBuffer) +{ + ma_result result; + + if (pAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_MEMORY(pAudioBuffer, sizeof(*pAudioBuffer) - sizeof(pAudioBuffer->_pExtraData)); /* Safety. Don't overwrite the extra data. */ + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->sizeInFrames == 0) { + return MA_INVALID_ARGS; /* Not allowing buffer sizes of 0 frames. */ + } + + result = ma_audio_buffer_ref_init(pConfig->format, pConfig->channels, NULL, 0, &pAudioBuffer->ref); + if (result != MA_SUCCESS) { + return result; + } + + /* TODO: Version 0.12. Set this in ma_audio_buffer_ref_init() instead of here. */ + pAudioBuffer->ref.sampleRate = pConfig->sampleRate; + + ma_allocation_callbacks_init_copy(&pAudioBuffer->allocationCallbacks, &pConfig->allocationCallbacks); + + if (doCopy) { + ma_uint64 allocationSizeInBytes; + void* pData; + + allocationSizeInBytes = pConfig->sizeInFrames * ma_get_bytes_per_frame(pConfig->format, pConfig->channels); + if (allocationSizeInBytes > MA_SIZE_MAX) { + return MA_OUT_OF_MEMORY; /* Too big. */ + } + + pData = ma_malloc((size_t)allocationSizeInBytes, &pAudioBuffer->allocationCallbacks); /* Safe cast to size_t. */ + if (pData == NULL) { + return MA_OUT_OF_MEMORY; + } + + if (pConfig->pData != NULL) { + ma_copy_pcm_frames(pData, pConfig->pData, pConfig->sizeInFrames, pConfig->format, pConfig->channels); + } else { + ma_silence_pcm_frames(pData, pConfig->sizeInFrames, pConfig->format, pConfig->channels); + } + + ma_audio_buffer_ref_set_data(&pAudioBuffer->ref, pData, pConfig->sizeInFrames); + pAudioBuffer->ownsData = MA_TRUE; + } else { + ma_audio_buffer_ref_set_data(&pAudioBuffer->ref, pConfig->pData, pConfig->sizeInFrames); + pAudioBuffer->ownsData = MA_FALSE; + } + + return MA_SUCCESS; +} + +static void ma_audio_buffer_uninit_ex(ma_audio_buffer* pAudioBuffer, ma_bool32 doFree) +{ + if (pAudioBuffer == NULL) { + return; + } + + if (pAudioBuffer->ownsData && pAudioBuffer->ref.pData != &pAudioBuffer->_pExtraData[0]) { + ma_free((void*)pAudioBuffer->ref.pData, &pAudioBuffer->allocationCallbacks); /* Naugty const cast, but OK in this case since we've guarded it with the ownsData check. */ + } + + if (doFree) { + ma_free(pAudioBuffer, &pAudioBuffer->allocationCallbacks); + } + + ma_audio_buffer_ref_uninit(&pAudioBuffer->ref); +} + +MA_API ma_result ma_audio_buffer_init(const ma_audio_buffer_config* pConfig, ma_audio_buffer* pAudioBuffer) +{ + return ma_audio_buffer_init_ex(pConfig, MA_FALSE, pAudioBuffer); +} + +MA_API ma_result ma_audio_buffer_init_copy(const ma_audio_buffer_config* pConfig, ma_audio_buffer* pAudioBuffer) +{ + return ma_audio_buffer_init_ex(pConfig, MA_TRUE, pAudioBuffer); +} + +MA_API ma_result ma_audio_buffer_alloc_and_init(const ma_audio_buffer_config* pConfig, ma_audio_buffer** ppAudioBuffer) +{ + ma_result result; + ma_audio_buffer* pAudioBuffer; + ma_audio_buffer_config innerConfig; /* We'll be making some changes to the config, so need to make a copy. */ + ma_uint64 allocationSizeInBytes; + + if (ppAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + *ppAudioBuffer = NULL; /* Safety. */ + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + innerConfig = *pConfig; + ma_allocation_callbacks_init_copy(&innerConfig.allocationCallbacks, &pConfig->allocationCallbacks); + + allocationSizeInBytes = sizeof(*pAudioBuffer) - sizeof(pAudioBuffer->_pExtraData) + (pConfig->sizeInFrames * ma_get_bytes_per_frame(pConfig->format, pConfig->channels)); + if (allocationSizeInBytes > MA_SIZE_MAX) { + return MA_OUT_OF_MEMORY; /* Too big. */ + } + + pAudioBuffer = (ma_audio_buffer*)ma_malloc((size_t)allocationSizeInBytes, &innerConfig.allocationCallbacks); /* Safe cast to size_t. */ + if (pAudioBuffer == NULL) { + return MA_OUT_OF_MEMORY; + } + + if (pConfig->pData != NULL) { + ma_copy_pcm_frames(&pAudioBuffer->_pExtraData[0], pConfig->pData, pConfig->sizeInFrames, pConfig->format, pConfig->channels); + } else { + ma_silence_pcm_frames(&pAudioBuffer->_pExtraData[0], pConfig->sizeInFrames, pConfig->format, pConfig->channels); + } + + innerConfig.pData = &pAudioBuffer->_pExtraData[0]; + + result = ma_audio_buffer_init_ex(&innerConfig, MA_FALSE, pAudioBuffer); + if (result != MA_SUCCESS) { + ma_free(pAudioBuffer, &innerConfig.allocationCallbacks); + return result; + } + + *ppAudioBuffer = pAudioBuffer; + + return MA_SUCCESS; +} + +MA_API void ma_audio_buffer_uninit(ma_audio_buffer* pAudioBuffer) +{ + ma_audio_buffer_uninit_ex(pAudioBuffer, MA_FALSE); +} + +MA_API void ma_audio_buffer_uninit_and_free(ma_audio_buffer* pAudioBuffer) +{ + ma_audio_buffer_uninit_ex(pAudioBuffer, MA_TRUE); +} + +MA_API ma_uint64 ma_audio_buffer_read_pcm_frames(ma_audio_buffer* pAudioBuffer, void* pFramesOut, ma_uint64 frameCount, ma_bool32 loop) +{ + if (pAudioBuffer == NULL) { + return 0; + } + + return ma_audio_buffer_ref_read_pcm_frames(&pAudioBuffer->ref, pFramesOut, frameCount, loop); +} + +MA_API ma_result ma_audio_buffer_seek_to_pcm_frame(ma_audio_buffer* pAudioBuffer, ma_uint64 frameIndex) +{ + if (pAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + return ma_audio_buffer_ref_seek_to_pcm_frame(&pAudioBuffer->ref, frameIndex); +} + +MA_API ma_result ma_audio_buffer_map(ma_audio_buffer* pAudioBuffer, void** ppFramesOut, ma_uint64* pFrameCount) +{ + if (ppFramesOut != NULL) { + *ppFramesOut = NULL; /* Safety. */ + } + + if (pAudioBuffer == NULL) { + if (pFrameCount != NULL) { + *pFrameCount = 0; + } + + return MA_INVALID_ARGS; + } + + return ma_audio_buffer_ref_map(&pAudioBuffer->ref, ppFramesOut, pFrameCount); +} + +MA_API ma_result ma_audio_buffer_unmap(ma_audio_buffer* pAudioBuffer, ma_uint64 frameCount) +{ + if (pAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + return ma_audio_buffer_ref_unmap(&pAudioBuffer->ref, frameCount); +} + +MA_API ma_bool32 ma_audio_buffer_at_end(const ma_audio_buffer* pAudioBuffer) +{ + if (pAudioBuffer == NULL) { + return MA_FALSE; + } + + return ma_audio_buffer_ref_at_end(&pAudioBuffer->ref); +} + +MA_API ma_result ma_audio_buffer_get_cursor_in_pcm_frames(const ma_audio_buffer* pAudioBuffer, ma_uint64* pCursor) +{ + if (pAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + return ma_audio_buffer_ref_get_cursor_in_pcm_frames(&pAudioBuffer->ref, pCursor); +} + +MA_API ma_result ma_audio_buffer_get_length_in_pcm_frames(const ma_audio_buffer* pAudioBuffer, ma_uint64* pLength) +{ + if (pAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + return ma_audio_buffer_ref_get_length_in_pcm_frames(&pAudioBuffer->ref, pLength); +} + +MA_API ma_result ma_audio_buffer_get_available_frames(const ma_audio_buffer* pAudioBuffer, ma_uint64* pAvailableFrames) +{ + if (pAvailableFrames == NULL) { + return MA_INVALID_ARGS; + } + + *pAvailableFrames = 0; + + if (pAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + return ma_audio_buffer_ref_get_available_frames(&pAudioBuffer->ref, pAvailableFrames); +} + + + + + +MA_API ma_result ma_paged_audio_buffer_data_init(ma_format format, ma_uint32 channels, ma_paged_audio_buffer_data* pData) +{ + if (pData == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pData); + + pData->format = format; + pData->channels = channels; + pData->pTail = &pData->head; + + return MA_SUCCESS; +} + +MA_API void ma_paged_audio_buffer_data_uninit(ma_paged_audio_buffer_data* pData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_paged_audio_buffer_page* pPage; + + if (pData == NULL) { + return; + } + + /* All pages need to be freed. */ + pPage = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&pData->head.pNext); + while (pPage != NULL) { + ma_paged_audio_buffer_page* pNext = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&pPage->pNext); + + ma_free(pPage, pAllocationCallbacks); + pPage = pNext; + } +} + +MA_API ma_paged_audio_buffer_page* ma_paged_audio_buffer_data_get_head(ma_paged_audio_buffer_data* pData) +{ + if (pData == NULL) { + return NULL; + } + + return &pData->head; +} + +MA_API ma_paged_audio_buffer_page* ma_paged_audio_buffer_data_get_tail(ma_paged_audio_buffer_data* pData) +{ + if (pData == NULL) { + return NULL; + } + + return pData->pTail; +} + +MA_API ma_result ma_paged_audio_buffer_data_get_length_in_pcm_frames(ma_paged_audio_buffer_data* pData, ma_uint64* pLength) +{ + ma_paged_audio_buffer_page* pPage; + + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; + + if (pData == NULL) { + return MA_INVALID_ARGS; + } + + /* Calculate the length from the linked list. */ + for (pPage = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&pData->head.pNext); pPage != NULL; pPage = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&pPage->pNext)) { + *pLength += pPage->sizeInFrames; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_paged_audio_buffer_data_allocate_page(ma_paged_audio_buffer_data* pData, ma_uint64 pageSizeInFrames, const void* pInitialData, const ma_allocation_callbacks* pAllocationCallbacks, ma_paged_audio_buffer_page** ppPage) +{ + ma_paged_audio_buffer_page* pPage; + ma_uint64 allocationSize; + + if (ppPage == NULL) { + return MA_INVALID_ARGS; + } + + *ppPage = NULL; + + if (pData == NULL) { + return MA_INVALID_ARGS; + } + + allocationSize = sizeof(*pPage) + (pageSizeInFrames * ma_get_bytes_per_frame(pData->format, pData->channels)); + if (allocationSize > MA_SIZE_MAX) { + return MA_OUT_OF_MEMORY; /* Too big. */ + } + + pPage = (ma_paged_audio_buffer_page*)ma_malloc((size_t)allocationSize, pAllocationCallbacks); /* Safe cast to size_t. */ + if (pPage == NULL) { + return MA_OUT_OF_MEMORY; + } + + pPage->pNext = NULL; + pPage->sizeInFrames = pageSizeInFrames; + + if (pInitialData != NULL) { + ma_copy_pcm_frames(pPage->pAudioData, pInitialData, pageSizeInFrames, pData->format, pData->channels); + } + + *ppPage = pPage; + + return MA_SUCCESS; +} + +MA_API ma_result ma_paged_audio_buffer_data_free_page(ma_paged_audio_buffer_data* pData, ma_paged_audio_buffer_page* pPage, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pData == NULL || pPage == NULL) { + return MA_INVALID_ARGS; + } + + /* It's assumed the page is not attached to the list. */ + ma_free(pPage, pAllocationCallbacks); + + return MA_SUCCESS; +} + +MA_API ma_result ma_paged_audio_buffer_data_append_page(ma_paged_audio_buffer_data* pData, ma_paged_audio_buffer_page* pPage) +{ + if (pData == NULL || pPage == NULL) { + return MA_INVALID_ARGS; + } + + /* This function assumes the page has been filled with audio data by this point. As soon as we append, the page will be available for reading. */ + + /* First thing to do is update the tail. */ + for (;;) { + ma_paged_audio_buffer_page* pOldTail = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&pData->pTail); + ma_paged_audio_buffer_page* pNewTail = pPage; + + if (ma_atomic_compare_exchange_weak_ptr((volatile void**)&pData->pTail, (void**)&pOldTail, pNewTail)) { + /* Here is where we append the page to the list. After this, the page is attached to the list and ready to be read from. */ + ma_atomic_exchange_ptr(&pOldTail->pNext, pPage); + break; /* Done. */ + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_paged_audio_buffer_data_allocate_and_append_page(ma_paged_audio_buffer_data* pData, ma_uint32 pageSizeInFrames, const void* pInitialData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_result result; + ma_paged_audio_buffer_page* pPage; + + result = ma_paged_audio_buffer_data_allocate_page(pData, pageSizeInFrames, pInitialData, pAllocationCallbacks, &pPage); + if (result != MA_SUCCESS) { + return result; + } + + return ma_paged_audio_buffer_data_append_page(pData, pPage); /* <-- Should never fail. */ +} + + +MA_API ma_paged_audio_buffer_config ma_paged_audio_buffer_config_init(ma_paged_audio_buffer_data* pData) +{ + ma_paged_audio_buffer_config config; + + MA_ZERO_OBJECT(&config); + config.pData = pData; + + return config; +} + + +static ma_result ma_paged_audio_buffer__data_source_on_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_paged_audio_buffer_read_pcm_frames((ma_paged_audio_buffer*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_paged_audio_buffer__data_source_on_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_paged_audio_buffer_seek_to_pcm_frame((ma_paged_audio_buffer*)pDataSource, frameIndex); +} + +static ma_result ma_paged_audio_buffer__data_source_on_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + ma_paged_audio_buffer* pPagedAudioBuffer = (ma_paged_audio_buffer*)pDataSource; + + *pFormat = pPagedAudioBuffer->pData->format; + *pChannels = pPagedAudioBuffer->pData->channels; + *pSampleRate = 0; /* There is no notion of a sample rate with audio buffers. */ + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pPagedAudioBuffer->pData->channels); + + return MA_SUCCESS; +} + +static ma_result ma_paged_audio_buffer__data_source_on_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_paged_audio_buffer_get_cursor_in_pcm_frames((ma_paged_audio_buffer*)pDataSource, pCursor); +} + +static ma_result ma_paged_audio_buffer__data_source_on_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_paged_audio_buffer_get_length_in_pcm_frames((ma_paged_audio_buffer*)pDataSource, pLength); +} + +static ma_data_source_vtable g_ma_paged_audio_buffer_data_source_vtable = +{ + ma_paged_audio_buffer__data_source_on_read, + ma_paged_audio_buffer__data_source_on_seek, + ma_paged_audio_buffer__data_source_on_get_data_format, + ma_paged_audio_buffer__data_source_on_get_cursor, + ma_paged_audio_buffer__data_source_on_get_length, + NULL, /* onSetLooping */ + 0 +}; + +MA_API ma_result ma_paged_audio_buffer_init(const ma_paged_audio_buffer_config* pConfig, ma_paged_audio_buffer* pPagedAudioBuffer) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + if (pPagedAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pPagedAudioBuffer); + + /* A config is required for the format and channel count. */ + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->pData == NULL) { + return MA_INVALID_ARGS; /* No underlying data specified. */ + } + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_paged_audio_buffer_data_source_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pPagedAudioBuffer->ds); + if (result != MA_SUCCESS) { + return result; + } + + pPagedAudioBuffer->pData = pConfig->pData; + pPagedAudioBuffer->pCurrent = ma_paged_audio_buffer_data_get_head(pConfig->pData); + pPagedAudioBuffer->relativeCursor = 0; + pPagedAudioBuffer->absoluteCursor = 0; + + return MA_SUCCESS; +} + +MA_API void ma_paged_audio_buffer_uninit(ma_paged_audio_buffer* pPagedAudioBuffer) +{ + if (pPagedAudioBuffer == NULL) { + return; + } + + /* Nothing to do. The data needs to be deleted separately. */ +} + +MA_API ma_result ma_paged_audio_buffer_read_pcm_frames(ma_paged_audio_buffer* pPagedAudioBuffer, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint64 totalFramesRead = 0; + ma_format format; + ma_uint32 channels; + + if (pPagedAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + format = pPagedAudioBuffer->pData->format; + channels = pPagedAudioBuffer->pData->channels; + + while (totalFramesRead < frameCount) { + /* Read from the current page. The buffer should never be in a state where this is NULL. */ + ma_uint64 framesRemainingInCurrentPage; + ma_uint64 framesRemainingToRead = frameCount - totalFramesRead; + ma_uint64 framesToReadThisIteration; + + MA_ASSERT(pPagedAudioBuffer->pCurrent != NULL); + + framesRemainingInCurrentPage = pPagedAudioBuffer->pCurrent->sizeInFrames - pPagedAudioBuffer->relativeCursor; + + framesToReadThisIteration = ma_min(framesRemainingInCurrentPage, framesRemainingToRead); + ma_copy_pcm_frames(ma_offset_pcm_frames_ptr(pFramesOut, totalFramesRead, format, channels), ma_offset_pcm_frames_ptr(pPagedAudioBuffer->pCurrent->pAudioData, pPagedAudioBuffer->relativeCursor, format, channels), framesToReadThisIteration, format, channels); + totalFramesRead += framesToReadThisIteration; + + pPagedAudioBuffer->absoluteCursor += framesToReadThisIteration; + pPagedAudioBuffer->relativeCursor += framesToReadThisIteration; + + /* Move to the next page if necessary. If there's no more pages, we need to return MA_AT_END. */ + MA_ASSERT(pPagedAudioBuffer->relativeCursor <= pPagedAudioBuffer->pCurrent->sizeInFrames); + + if (pPagedAudioBuffer->relativeCursor == pPagedAudioBuffer->pCurrent->sizeInFrames) { + /* We reached the end of the page. Need to move to the next. If there's no more pages, we're done. */ + ma_paged_audio_buffer_page* pNext = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&pPagedAudioBuffer->pCurrent->pNext); + if (pNext == NULL) { + result = MA_AT_END; + break; /* We've reached the end. */ + } else { + pPagedAudioBuffer->pCurrent = pNext; + pPagedAudioBuffer->relativeCursor = 0; + } + } + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + return result; +} + +MA_API ma_result ma_paged_audio_buffer_seek_to_pcm_frame(ma_paged_audio_buffer* pPagedAudioBuffer, ma_uint64 frameIndex) +{ + if (pPagedAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + if (frameIndex == pPagedAudioBuffer->absoluteCursor) { + return MA_SUCCESS; /* Nothing to do. */ + } + + if (frameIndex < pPagedAudioBuffer->absoluteCursor) { + /* Moving backwards. Need to move the cursor back to the start, and then move forward. */ + pPagedAudioBuffer->pCurrent = ma_paged_audio_buffer_data_get_head(pPagedAudioBuffer->pData); + pPagedAudioBuffer->absoluteCursor = 0; + pPagedAudioBuffer->relativeCursor = 0; + + /* Fall through to the forward seeking section below. */ + } + + if (frameIndex > pPagedAudioBuffer->absoluteCursor) { + /* Moving forward. */ + ma_paged_audio_buffer_page* pPage; + ma_uint64 runningCursor = 0; + + for (pPage = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&ma_paged_audio_buffer_data_get_head(pPagedAudioBuffer->pData)->pNext); pPage != NULL; pPage = (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(&pPage->pNext)) { + ma_uint64 pageRangeBeg = runningCursor; + ma_uint64 pageRangeEnd = pageRangeBeg + pPage->sizeInFrames; + + if (frameIndex >= pageRangeBeg) { + if (frameIndex < pageRangeEnd || (frameIndex == pageRangeEnd && pPage == (ma_paged_audio_buffer_page*)ma_atomic_load_ptr(ma_paged_audio_buffer_data_get_tail(pPagedAudioBuffer->pData)))) { /* A small edge case - allow seeking to the very end of the buffer. */ + /* We found the page. */ + pPagedAudioBuffer->pCurrent = pPage; + pPagedAudioBuffer->absoluteCursor = frameIndex; + pPagedAudioBuffer->relativeCursor = frameIndex - pageRangeBeg; + return MA_SUCCESS; + } + } + + runningCursor = pageRangeEnd; + } + + /* Getting here means we tried seeking too far forward. Don't change any state. */ + return MA_BAD_SEEK; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_paged_audio_buffer_get_cursor_in_pcm_frames(ma_paged_audio_buffer* pPagedAudioBuffer, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; /* Safety. */ + + if (pPagedAudioBuffer == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = pPagedAudioBuffer->absoluteCursor; + + return MA_SUCCESS; +} + +MA_API ma_result ma_paged_audio_buffer_get_length_in_pcm_frames(ma_paged_audio_buffer* pPagedAudioBuffer, ma_uint64* pLength) +{ + return ma_paged_audio_buffer_data_get_length_in_pcm_frames(pPagedAudioBuffer->pData, pLength); +} + + + +/************************************************************************************************************************************************************** + +VFS + +**************************************************************************************************************************************************************/ +MA_API ma_result ma_vfs_open(ma_vfs* pVFS, const char* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + + if (pFile == NULL) { + return MA_INVALID_ARGS; + } + + *pFile = NULL; + + if (pVFS == NULL || pFilePath == NULL || openMode == 0) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onOpen == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pCallbacks->onOpen(pVFS, pFilePath, openMode, pFile); +} + +MA_API ma_result ma_vfs_open_w(ma_vfs* pVFS, const wchar_t* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + + if (pFile == NULL) { + return MA_INVALID_ARGS; + } + + *pFile = NULL; + + if (pVFS == NULL || pFilePath == NULL || openMode == 0) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onOpenW == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pCallbacks->onOpenW(pVFS, pFilePath, openMode, pFile); +} + +MA_API ma_result ma_vfs_close(ma_vfs* pVFS, ma_vfs_file file) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + + if (pVFS == NULL || file == NULL) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onClose == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pCallbacks->onClose(pVFS, file); +} + +MA_API ma_result ma_vfs_read(ma_vfs* pVFS, ma_vfs_file file, void* pDst, size_t sizeInBytes, size_t* pBytesRead) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + ma_result result; + size_t bytesRead = 0; + + if (pBytesRead != NULL) { + *pBytesRead = 0; + } + + if (pVFS == NULL || file == NULL || pDst == NULL) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onRead == NULL) { + return MA_NOT_IMPLEMENTED; + } + + result = pCallbacks->onRead(pVFS, file, pDst, sizeInBytes, &bytesRead); + + if (pBytesRead != NULL) { + *pBytesRead = bytesRead; + } + + if (result == MA_SUCCESS && bytesRead == 0 && sizeInBytes > 0) { + result = MA_AT_END; + } + + return result; +} + +MA_API ma_result ma_vfs_write(ma_vfs* pVFS, ma_vfs_file file, const void* pSrc, size_t sizeInBytes, size_t* pBytesWritten) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + + if (pBytesWritten != NULL) { + *pBytesWritten = 0; + } + + if (pVFS == NULL || file == NULL || pSrc == NULL) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onWrite == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pCallbacks->onWrite(pVFS, file, pSrc, sizeInBytes, pBytesWritten); +} + +MA_API ma_result ma_vfs_seek(ma_vfs* pVFS, ma_vfs_file file, ma_int64 offset, ma_seek_origin origin) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + + if (pVFS == NULL || file == NULL) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onSeek == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pCallbacks->onSeek(pVFS, file, offset, origin); +} + +MA_API ma_result ma_vfs_tell(ma_vfs* pVFS, ma_vfs_file file, ma_int64* pCursor) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + if (pVFS == NULL || file == NULL) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onTell == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pCallbacks->onTell(pVFS, file, pCursor); +} + +MA_API ma_result ma_vfs_info(ma_vfs* pVFS, ma_vfs_file file, ma_file_info* pInfo) +{ + ma_vfs_callbacks* pCallbacks = (ma_vfs_callbacks*)pVFS; + + if (pInfo == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pInfo); + + if (pVFS == NULL || file == NULL) { + return MA_INVALID_ARGS; + } + + if (pCallbacks->onInfo == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pCallbacks->onInfo(pVFS, file, pInfo); +} + + +#if !defined(MA_USE_WIN32_FILEIO) && (defined(MA_WIN32) && defined(MA_WIN32_DESKTOP) && !defined(MA_NO_WIN32_FILEIO) && !defined(MA_POSIX)) + #define MA_USE_WIN32_FILEIO +#endif + +#if defined(MA_USE_WIN32_FILEIO) +/* +We need to dynamically load SetFilePointer or SetFilePointerEx because older versions of Windows do +not have the Ex version. We therefore need to do some dynamic branching depending on what's available. + +We load these when we load our first file from the default VFS. It's left open for the life of the +program and is left to the OS to uninitialize when the program terminates. +*/ +typedef DWORD (__stdcall * ma_SetFilePointer_proc)(HANDLE hFile, LONG lDistanceToMove, LONG* lpDistanceToMoveHigh, DWORD dwMoveMethod); +typedef BOOL (__stdcall * ma_SetFilePointerEx_proc)(HANDLE hFile, LARGE_INTEGER liDistanceToMove, LARGE_INTEGER* lpNewFilePointer, DWORD dwMoveMethod); + +static ma_handle hKernel32DLL = NULL; +static ma_SetFilePointer_proc ma_SetFilePointer = NULL; +static ma_SetFilePointerEx_proc ma_SetFilePointerEx = NULL; + +static void ma_win32_fileio_init(void) +{ + if (hKernel32DLL == NULL) { + hKernel32DLL = ma_dlopen(NULL, "kernel32.dll"); + if (hKernel32DLL != NULL) { + ma_SetFilePointer = (ma_SetFilePointer_proc) ma_dlsym(NULL, hKernel32DLL, "SetFilePointer"); + ma_SetFilePointerEx = (ma_SetFilePointerEx_proc)ma_dlsym(NULL, hKernel32DLL, "SetFilePointerEx"); + } + } +} + +static void ma_default_vfs__get_open_settings_win32(ma_uint32 openMode, DWORD* pDesiredAccess, DWORD* pShareMode, DWORD* pCreationDisposition) +{ + *pDesiredAccess = 0; + if ((openMode & MA_OPEN_MODE_READ) != 0) { + *pDesiredAccess |= GENERIC_READ; + } + if ((openMode & MA_OPEN_MODE_WRITE) != 0) { + *pDesiredAccess |= GENERIC_WRITE; + } + + *pShareMode = 0; + if ((openMode & MA_OPEN_MODE_READ) != 0) { + *pShareMode |= FILE_SHARE_READ; + } + + if ((openMode & MA_OPEN_MODE_WRITE) != 0) { + *pCreationDisposition = CREATE_ALWAYS; /* Opening in write mode. Truncate. */ + } else { + *pCreationDisposition = OPEN_EXISTING; /* Opening in read mode. File must exist. */ + } +} + +static ma_result ma_default_vfs_open__win32(ma_vfs* pVFS, const char* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + HANDLE hFile; + DWORD dwDesiredAccess; + DWORD dwShareMode; + DWORD dwCreationDisposition; + + (void)pVFS; + + /* Load some Win32 symbols dynamically so we can dynamically check for the existence of SetFilePointerEx. */ + ma_win32_fileio_init(); + + ma_default_vfs__get_open_settings_win32(openMode, &dwDesiredAccess, &dwShareMode, &dwCreationDisposition); + + hFile = CreateFileA(pFilePath, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, FILE_ATTRIBUTE_NORMAL, NULL); + if (hFile == INVALID_HANDLE_VALUE) { + return ma_result_from_GetLastError(GetLastError()); + } + + *pFile = hFile; + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_open_w__win32(ma_vfs* pVFS, const wchar_t* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + HANDLE hFile; + DWORD dwDesiredAccess; + DWORD dwShareMode; + DWORD dwCreationDisposition; + + (void)pVFS; + + /* Load some Win32 symbols dynamically so we can dynamically check for the existence of SetFilePointerEx. */ + ma_win32_fileio_init(); + + ma_default_vfs__get_open_settings_win32(openMode, &dwDesiredAccess, &dwShareMode, &dwCreationDisposition); + + hFile = CreateFileW(pFilePath, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, FILE_ATTRIBUTE_NORMAL, NULL); + if (hFile == INVALID_HANDLE_VALUE) { + return ma_result_from_GetLastError(GetLastError()); + } + + *pFile = hFile; + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_close__win32(ma_vfs* pVFS, ma_vfs_file file) +{ + (void)pVFS; + + if (CloseHandle((HANDLE)file) == 0) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + + +static ma_result ma_default_vfs_read__win32(ma_vfs* pVFS, ma_vfs_file file, void* pDst, size_t sizeInBytes, size_t* pBytesRead) +{ + ma_result result = MA_SUCCESS; + size_t totalBytesRead; + + (void)pVFS; + + totalBytesRead = 0; + while (totalBytesRead < sizeInBytes) { + size_t bytesRemaining; + DWORD bytesToRead; + DWORD bytesRead; + BOOL readResult; + + bytesRemaining = sizeInBytes - totalBytesRead; + if (bytesRemaining >= 0xFFFFFFFF) { + bytesToRead = 0xFFFFFFFF; + } else { + bytesToRead = (DWORD)bytesRemaining; + } + + readResult = ReadFile((HANDLE)file, ma_offset_ptr(pDst, totalBytesRead), bytesToRead, &bytesRead, NULL); + if (readResult == 1 && bytesRead == 0) { + result = MA_AT_END; + break; /* EOF */ + } + + totalBytesRead += bytesRead; + + if (bytesRead < bytesToRead) { + break; /* EOF */ + } + + if (readResult == 0) { + result = ma_result_from_GetLastError(GetLastError()); + break; + } + } + + if (pBytesRead != NULL) { + *pBytesRead = totalBytesRead; + } + + return result; +} + +static ma_result ma_default_vfs_write__win32(ma_vfs* pVFS, ma_vfs_file file, const void* pSrc, size_t sizeInBytes, size_t* pBytesWritten) +{ + ma_result result = MA_SUCCESS; + size_t totalBytesWritten; + + (void)pVFS; + + totalBytesWritten = 0; + while (totalBytesWritten < sizeInBytes) { + size_t bytesRemaining; + DWORD bytesToWrite; + DWORD bytesWritten; + BOOL writeResult; + + bytesRemaining = sizeInBytes - totalBytesWritten; + if (bytesRemaining >= 0xFFFFFFFF) { + bytesToWrite = 0xFFFFFFFF; + } else { + bytesToWrite = (DWORD)bytesRemaining; + } + + writeResult = WriteFile((HANDLE)file, ma_offset_ptr(pSrc, totalBytesWritten), bytesToWrite, &bytesWritten, NULL); + totalBytesWritten += bytesWritten; + + if (writeResult == 0) { + result = ma_result_from_GetLastError(GetLastError()); + break; + } + } + + if (pBytesWritten != NULL) { + *pBytesWritten = totalBytesWritten; + } + + return result; +} + + +static ma_result ma_default_vfs_seek__win32(ma_vfs* pVFS, ma_vfs_file file, ma_int64 offset, ma_seek_origin origin) +{ + LARGE_INTEGER liDistanceToMove; + DWORD dwMoveMethod; + BOOL result; + + (void)pVFS; + + liDistanceToMove.QuadPart = offset; + + /* */ if (origin == ma_seek_origin_current) { + dwMoveMethod = FILE_CURRENT; + } else if (origin == ma_seek_origin_end) { + dwMoveMethod = FILE_END; + } else { + dwMoveMethod = FILE_BEGIN; + } + + if (ma_SetFilePointerEx != NULL) { + result = ma_SetFilePointerEx((HANDLE)file, liDistanceToMove, NULL, dwMoveMethod); + } else if (ma_SetFilePointer != NULL) { + /* No SetFilePointerEx() so restrict to 31 bits. */ + if (offset > 0x7FFFFFFF) { + return MA_OUT_OF_RANGE; + } + + result = ma_SetFilePointer((HANDLE)file, (LONG)liDistanceToMove.QuadPart, NULL, dwMoveMethod); + } else { + return MA_NOT_IMPLEMENTED; + } + + if (result == 0) { + return ma_result_from_GetLastError(GetLastError()); + } + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_tell__win32(ma_vfs* pVFS, ma_vfs_file file, ma_int64* pCursor) +{ + LARGE_INTEGER liZero; + LARGE_INTEGER liTell; + BOOL result; + + (void)pVFS; + + liZero.QuadPart = 0; + + if (ma_SetFilePointerEx != NULL) { + result = ma_SetFilePointerEx((HANDLE)file, liZero, &liTell, FILE_CURRENT); + } else if (ma_SetFilePointer != NULL) { + LONG tell; + + result = ma_SetFilePointer((HANDLE)file, (LONG)liZero.QuadPart, &tell, FILE_CURRENT); + liTell.QuadPart = tell; + } else { + return MA_NOT_IMPLEMENTED; + } + + if (result == 0) { + return ma_result_from_GetLastError(GetLastError()); + } + + if (pCursor != NULL) { + *pCursor = liTell.QuadPart; + } + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_info__win32(ma_vfs* pVFS, ma_vfs_file file, ma_file_info* pInfo) +{ + BY_HANDLE_FILE_INFORMATION fi; + BOOL result; + + (void)pVFS; + + result = GetFileInformationByHandle((HANDLE)file, &fi); + if (result == 0) { + return ma_result_from_GetLastError(GetLastError()); + } + + pInfo->sizeInBytes = ((ma_uint64)fi.nFileSizeHigh << 32) | ((ma_uint64)fi.nFileSizeLow); + + return MA_SUCCESS; +} +#else +static ma_result ma_default_vfs_open__stdio(ma_vfs* pVFS, const char* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + ma_result result; + FILE* pFileStd; + const char* pOpenModeStr; + + MA_ASSERT(pFilePath != NULL); + MA_ASSERT(openMode != 0); + MA_ASSERT(pFile != NULL); + + (void)pVFS; + + if ((openMode & MA_OPEN_MODE_READ) != 0) { + if ((openMode & MA_OPEN_MODE_WRITE) != 0) { + pOpenModeStr = "r+"; + } else { + pOpenModeStr = "rb"; + } + } else { + pOpenModeStr = "wb"; + } + + result = ma_fopen(&pFileStd, pFilePath, pOpenModeStr); + if (result != MA_SUCCESS) { + return result; + } + + *pFile = pFileStd; + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_open_w__stdio(ma_vfs* pVFS, const wchar_t* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + ma_result result; + FILE* pFileStd; + const wchar_t* pOpenModeStr; + + MA_ASSERT(pFilePath != NULL); + MA_ASSERT(openMode != 0); + MA_ASSERT(pFile != NULL); + + (void)pVFS; + + if ((openMode & MA_OPEN_MODE_READ) != 0) { + if ((openMode & MA_OPEN_MODE_WRITE) != 0) { + pOpenModeStr = L"r+"; + } else { + pOpenModeStr = L"rb"; + } + } else { + pOpenModeStr = L"wb"; + } + + result = ma_wfopen(&pFileStd, pFilePath, pOpenModeStr, (pVFS != NULL) ? &((ma_default_vfs*)pVFS)->allocationCallbacks : NULL); + if (result != MA_SUCCESS) { + return result; + } + + *pFile = pFileStd; + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_close__stdio(ma_vfs* pVFS, ma_vfs_file file) +{ + MA_ASSERT(file != NULL); + + (void)pVFS; + + fclose((FILE*)file); + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_read__stdio(ma_vfs* pVFS, ma_vfs_file file, void* pDst, size_t sizeInBytes, size_t* pBytesRead) +{ + size_t result; + + MA_ASSERT(file != NULL); + MA_ASSERT(pDst != NULL); + + (void)pVFS; + + result = fread(pDst, 1, sizeInBytes, (FILE*)file); + + if (pBytesRead != NULL) { + *pBytesRead = result; + } + + if (result != sizeInBytes) { + if (result == 0 && feof((FILE*)file)) { + return MA_AT_END; + } else { + return ma_result_from_errno(ferror((FILE*)file)); + } + } + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_write__stdio(ma_vfs* pVFS, ma_vfs_file file, const void* pSrc, size_t sizeInBytes, size_t* pBytesWritten) +{ + size_t result; + + MA_ASSERT(file != NULL); + MA_ASSERT(pSrc != NULL); + + (void)pVFS; + + result = fwrite(pSrc, 1, sizeInBytes, (FILE*)file); + + if (pBytesWritten != NULL) { + *pBytesWritten = result; + } + + if (result != sizeInBytes) { + return ma_result_from_errno(ferror((FILE*)file)); + } + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_seek__stdio(ma_vfs* pVFS, ma_vfs_file file, ma_int64 offset, ma_seek_origin origin) +{ + int result; + int whence; + + MA_ASSERT(file != NULL); + + (void)pVFS; + + if (origin == ma_seek_origin_start) { + whence = SEEK_SET; + } else if (origin == ma_seek_origin_end) { + whence = SEEK_END; + } else { + whence = SEEK_CUR; + } + +#if defined(_WIN32) + #if defined(_MSC_VER) && _MSC_VER > 1200 + result = _fseeki64((FILE*)file, offset, whence); + #else + /* No _fseeki64() so restrict to 31 bits. */ + if (offset > 0x7FFFFFFF) { + return MA_OUT_OF_RANGE; + } + + result = fseek((FILE*)file, (int)offset, whence); + #endif +#else + result = fseek((FILE*)file, (long int)offset, whence); +#endif + if (result != 0) { + return MA_ERROR; + } + + return MA_SUCCESS; +} + +static ma_result ma_default_vfs_tell__stdio(ma_vfs* pVFS, ma_vfs_file file, ma_int64* pCursor) +{ + ma_int64 result; + + MA_ASSERT(file != NULL); + MA_ASSERT(pCursor != NULL); + + (void)pVFS; + +#if defined(_WIN32) + #if defined(_MSC_VER) && _MSC_VER > 1200 + result = _ftelli64((FILE*)file); + #else + result = ftell((FILE*)file); + #endif +#else + result = ftell((FILE*)file); +#endif + + *pCursor = result; + + return MA_SUCCESS; +} + +#if !defined(_MSC_VER) && !((defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE >= 1) || defined(_XOPEN_SOURCE) || defined(_POSIX_SOURCE)) && !defined(MA_BSD) +int fileno(FILE *stream); +#endif + +static ma_result ma_default_vfs_info__stdio(ma_vfs* pVFS, ma_vfs_file file, ma_file_info* pInfo) +{ + int fd; + struct stat info; + + MA_ASSERT(file != NULL); + MA_ASSERT(pInfo != NULL); + + (void)pVFS; + +#if defined(_MSC_VER) + fd = _fileno((FILE*)file); +#else + fd = fileno((FILE*)file); +#endif + + if (fstat(fd, &info) != 0) { + return ma_result_from_errno(errno); + } + + pInfo->sizeInBytes = info.st_size; + + return MA_SUCCESS; +} +#endif + + +static ma_result ma_default_vfs_open(ma_vfs* pVFS, const char* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + if (pFile == NULL) { + return MA_INVALID_ARGS; + } + + *pFile = NULL; + + if (pFilePath == NULL || openMode == 0) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_open__win32(pVFS, pFilePath, openMode, pFile); +#else + return ma_default_vfs_open__stdio(pVFS, pFilePath, openMode, pFile); +#endif +} + +static ma_result ma_default_vfs_open_w(ma_vfs* pVFS, const wchar_t* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + if (pFile == NULL) { + return MA_INVALID_ARGS; + } + + *pFile = NULL; + + if (pFilePath == NULL || openMode == 0) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_open_w__win32(pVFS, pFilePath, openMode, pFile); +#else + return ma_default_vfs_open_w__stdio(pVFS, pFilePath, openMode, pFile); +#endif +} + +static ma_result ma_default_vfs_close(ma_vfs* pVFS, ma_vfs_file file) +{ + if (file == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_close__win32(pVFS, file); +#else + return ma_default_vfs_close__stdio(pVFS, file); +#endif +} + +static ma_result ma_default_vfs_read(ma_vfs* pVFS, ma_vfs_file file, void* pDst, size_t sizeInBytes, size_t* pBytesRead) +{ + if (pBytesRead != NULL) { + *pBytesRead = 0; + } + + if (file == NULL || pDst == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_read__win32(pVFS, file, pDst, sizeInBytes, pBytesRead); +#else + return ma_default_vfs_read__stdio(pVFS, file, pDst, sizeInBytes, pBytesRead); +#endif +} + +static ma_result ma_default_vfs_write(ma_vfs* pVFS, ma_vfs_file file, const void* pSrc, size_t sizeInBytes, size_t* pBytesWritten) +{ + if (pBytesWritten != NULL) { + *pBytesWritten = 0; + } + + if (file == NULL || pSrc == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_write__win32(pVFS, file, pSrc, sizeInBytes, pBytesWritten); +#else + return ma_default_vfs_write__stdio(pVFS, file, pSrc, sizeInBytes, pBytesWritten); +#endif +} + +static ma_result ma_default_vfs_seek(ma_vfs* pVFS, ma_vfs_file file, ma_int64 offset, ma_seek_origin origin) +{ + if (file == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_seek__win32(pVFS, file, offset, origin); +#else + return ma_default_vfs_seek__stdio(pVFS, file, offset, origin); +#endif +} + +static ma_result ma_default_vfs_tell(ma_vfs* pVFS, ma_vfs_file file, ma_int64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + if (file == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_tell__win32(pVFS, file, pCursor); +#else + return ma_default_vfs_tell__stdio(pVFS, file, pCursor); +#endif +} + +static ma_result ma_default_vfs_info(ma_vfs* pVFS, ma_vfs_file file, ma_file_info* pInfo) +{ + if (pInfo == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pInfo); + + if (file == NULL) { + return MA_INVALID_ARGS; + } + +#if defined(MA_USE_WIN32_FILEIO) + return ma_default_vfs_info__win32(pVFS, file, pInfo); +#else + return ma_default_vfs_info__stdio(pVFS, file, pInfo); +#endif +} + + +MA_API ma_result ma_default_vfs_init(ma_default_vfs* pVFS, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pVFS == NULL) { + return MA_INVALID_ARGS; + } + + pVFS->cb.onOpen = ma_default_vfs_open; + pVFS->cb.onOpenW = ma_default_vfs_open_w; + pVFS->cb.onClose = ma_default_vfs_close; + pVFS->cb.onRead = ma_default_vfs_read; + pVFS->cb.onWrite = ma_default_vfs_write; + pVFS->cb.onSeek = ma_default_vfs_seek; + pVFS->cb.onTell = ma_default_vfs_tell; + pVFS->cb.onInfo = ma_default_vfs_info; + ma_allocation_callbacks_init_copy(&pVFS->allocationCallbacks, pAllocationCallbacks); + + return MA_SUCCESS; +} + + +MA_API ma_result ma_vfs_or_default_open(ma_vfs* pVFS, const char* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + if (pVFS != NULL) { + return ma_vfs_open(pVFS, pFilePath, openMode, pFile); + } else { + return ma_default_vfs_open(pVFS, pFilePath, openMode, pFile); + } +} + +MA_API ma_result ma_vfs_or_default_open_w(ma_vfs* pVFS, const wchar_t* pFilePath, ma_uint32 openMode, ma_vfs_file* pFile) +{ + if (pVFS != NULL) { + return ma_vfs_open_w(pVFS, pFilePath, openMode, pFile); + } else { + return ma_default_vfs_open_w(pVFS, pFilePath, openMode, pFile); + } +} + +MA_API ma_result ma_vfs_or_default_close(ma_vfs* pVFS, ma_vfs_file file) +{ + if (pVFS != NULL) { + return ma_vfs_close(pVFS, file); + } else { + return ma_default_vfs_close(pVFS, file); + } +} + +MA_API ma_result ma_vfs_or_default_read(ma_vfs* pVFS, ma_vfs_file file, void* pDst, size_t sizeInBytes, size_t* pBytesRead) +{ + if (pVFS != NULL) { + return ma_vfs_read(pVFS, file, pDst, sizeInBytes, pBytesRead); + } else { + return ma_default_vfs_read(pVFS, file, pDst, sizeInBytes, pBytesRead); + } +} + +MA_API ma_result ma_vfs_or_default_write(ma_vfs* pVFS, ma_vfs_file file, const void* pSrc, size_t sizeInBytes, size_t* pBytesWritten) +{ + if (pVFS != NULL) { + return ma_vfs_write(pVFS, file, pSrc, sizeInBytes, pBytesWritten); + } else { + return ma_default_vfs_write(pVFS, file, pSrc, sizeInBytes, pBytesWritten); + } +} + +MA_API ma_result ma_vfs_or_default_seek(ma_vfs* pVFS, ma_vfs_file file, ma_int64 offset, ma_seek_origin origin) +{ + if (pVFS != NULL) { + return ma_vfs_seek(pVFS, file, offset, origin); + } else { + return ma_default_vfs_seek(pVFS, file, offset, origin); + } +} + +MA_API ma_result ma_vfs_or_default_tell(ma_vfs* pVFS, ma_vfs_file file, ma_int64* pCursor) +{ + if (pVFS != NULL) { + return ma_vfs_tell(pVFS, file, pCursor); + } else { + return ma_default_vfs_tell(pVFS, file, pCursor); + } +} + +MA_API ma_result ma_vfs_or_default_info(ma_vfs* pVFS, ma_vfs_file file, ma_file_info* pInfo) +{ + if (pVFS != NULL) { + return ma_vfs_info(pVFS, file, pInfo); + } else { + return ma_default_vfs_info(pVFS, file, pInfo); + } +} + + + +static ma_result ma_vfs_open_and_read_file_ex(ma_vfs* pVFS, const char* pFilePath, const wchar_t* pFilePathW, void** ppData, size_t* pSize, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_result result; + ma_vfs_file file; + ma_file_info info; + void* pData; + size_t bytesRead; + + if (ppData != NULL) { + *ppData = NULL; + } + if (pSize != NULL) { + *pSize = 0; + } + + if (ppData == NULL) { + return MA_INVALID_ARGS; + } + + if (pFilePath != NULL) { + result = ma_vfs_or_default_open(pVFS, pFilePath, MA_OPEN_MODE_READ, &file); + } else { + result = ma_vfs_or_default_open_w(pVFS, pFilePathW, MA_OPEN_MODE_READ, &file); + } + if (result != MA_SUCCESS) { + return result; + } + + result = ma_vfs_or_default_info(pVFS, file, &info); + if (result != MA_SUCCESS) { + ma_vfs_or_default_close(pVFS, file); + return result; + } + + if (info.sizeInBytes > MA_SIZE_MAX) { + ma_vfs_or_default_close(pVFS, file); + return MA_TOO_BIG; + } + + pData = ma_malloc((size_t)info.sizeInBytes, pAllocationCallbacks); /* Safe cast. */ + if (pData == NULL) { + ma_vfs_or_default_close(pVFS, file); + return result; + } + + result = ma_vfs_or_default_read(pVFS, file, pData, (size_t)info.sizeInBytes, &bytesRead); /* Safe cast. */ + ma_vfs_or_default_close(pVFS, file); + + if (result != MA_SUCCESS) { + ma_free(pData, pAllocationCallbacks); + return result; + } + + if (pSize != NULL) { + *pSize = bytesRead; + } + + MA_ASSERT(ppData != NULL); + *ppData = pData; + + return MA_SUCCESS; +} + +MA_API ma_result ma_vfs_open_and_read_file(ma_vfs* pVFS, const char* pFilePath, void** ppData, size_t* pSize, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_vfs_open_and_read_file_ex(pVFS, pFilePath, NULL, ppData, pSize, pAllocationCallbacks); +} + +MA_API ma_result ma_vfs_open_and_read_file_w(ma_vfs* pVFS, const wchar_t* pFilePath, void** ppData, size_t* pSize, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_vfs_open_and_read_file_ex(pVFS, NULL, pFilePath, ppData, pSize, pAllocationCallbacks); +} + + + +/************************************************************************************************************************************************************** + +Decoding and Encoding Headers. These are auto-generated from a tool. + +**************************************************************************************************************************************************************/ +#if !defined(MA_NO_WAV) && (!defined(MA_NO_DECODING) || !defined(MA_NO_ENCODING)) +/* dr_wav_h begin */ +#ifndef ma_dr_wav_h +#define ma_dr_wav_h +#ifdef __cplusplus +extern "C" { +#endif +#define MA_DR_WAV_STRINGIFY(x) #x +#define MA_DR_WAV_XSTRINGIFY(x) MA_DR_WAV_STRINGIFY(x) +#define MA_DR_WAV_VERSION_MAJOR 0 +#define MA_DR_WAV_VERSION_MINOR 13 +#define MA_DR_WAV_VERSION_REVISION 18 +#define MA_DR_WAV_VERSION_STRING MA_DR_WAV_XSTRINGIFY(MA_DR_WAV_VERSION_MAJOR) "." MA_DR_WAV_XSTRINGIFY(MA_DR_WAV_VERSION_MINOR) "." MA_DR_WAV_XSTRINGIFY(MA_DR_WAV_VERSION_REVISION) +#include +#define MA_DR_WAVE_FORMAT_PCM 0x1 +#define MA_DR_WAVE_FORMAT_ADPCM 0x2 +#define MA_DR_WAVE_FORMAT_IEEE_FLOAT 0x3 +#define MA_DR_WAVE_FORMAT_ALAW 0x6 +#define MA_DR_WAVE_FORMAT_MULAW 0x7 +#define MA_DR_WAVE_FORMAT_DVI_ADPCM 0x11 +#define MA_DR_WAVE_FORMAT_EXTENSIBLE 0xFFFE +#define MA_DR_WAV_SEQUENTIAL 0x00000001 +#define MA_DR_WAV_WITH_METADATA 0x00000002 +MA_API void ma_dr_wav_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision); +MA_API const char* ma_dr_wav_version_string(void); +typedef enum +{ + ma_dr_wav_seek_origin_start, + ma_dr_wav_seek_origin_current +} ma_dr_wav_seek_origin; +typedef enum +{ + ma_dr_wav_container_riff, + ma_dr_wav_container_rifx, + ma_dr_wav_container_w64, + ma_dr_wav_container_rf64, + ma_dr_wav_container_aiff +} ma_dr_wav_container; +typedef struct +{ + union + { + ma_uint8 fourcc[4]; + ma_uint8 guid[16]; + } id; + ma_uint64 sizeInBytes; + unsigned int paddingSize; +} ma_dr_wav_chunk_header; +typedef struct +{ + ma_uint16 formatTag; + ma_uint16 channels; + ma_uint32 sampleRate; + ma_uint32 avgBytesPerSec; + ma_uint16 blockAlign; + ma_uint16 bitsPerSample; + ma_uint16 extendedSize; + ma_uint16 validBitsPerSample; + ma_uint32 channelMask; + ma_uint8 subFormat[16]; +} ma_dr_wav_fmt; +MA_API ma_uint16 ma_dr_wav_fmt_get_format(const ma_dr_wav_fmt* pFMT); +typedef size_t (* ma_dr_wav_read_proc)(void* pUserData, void* pBufferOut, size_t bytesToRead); +typedef size_t (* ma_dr_wav_write_proc)(void* pUserData, const void* pData, size_t bytesToWrite); +typedef ma_bool32 (* ma_dr_wav_seek_proc)(void* pUserData, int offset, ma_dr_wav_seek_origin origin); +typedef ma_uint64 (* ma_dr_wav_chunk_proc)(void* pChunkUserData, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pReadSeekUserData, const ma_dr_wav_chunk_header* pChunkHeader, ma_dr_wav_container container, const ma_dr_wav_fmt* pFMT); +typedef struct +{ + const ma_uint8* data; + size_t dataSize; + size_t currentReadPos; +} ma_dr_wav__memory_stream; +typedef struct +{ + void** ppData; + size_t* pDataSize; + size_t dataSize; + size_t dataCapacity; + size_t currentWritePos; +} ma_dr_wav__memory_stream_write; +typedef struct +{ + ma_dr_wav_container container; + ma_uint32 format; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint32 bitsPerSample; +} ma_dr_wav_data_format; +typedef enum +{ + ma_dr_wav_metadata_type_none = 0, + ma_dr_wav_metadata_type_unknown = 1 << 0, + ma_dr_wav_metadata_type_smpl = 1 << 1, + ma_dr_wav_metadata_type_inst = 1 << 2, + ma_dr_wav_metadata_type_cue = 1 << 3, + ma_dr_wav_metadata_type_acid = 1 << 4, + ma_dr_wav_metadata_type_bext = 1 << 5, + ma_dr_wav_metadata_type_list_label = 1 << 6, + ma_dr_wav_metadata_type_list_note = 1 << 7, + ma_dr_wav_metadata_type_list_labelled_cue_region = 1 << 8, + ma_dr_wav_metadata_type_list_info_software = 1 << 9, + ma_dr_wav_metadata_type_list_info_copyright = 1 << 10, + ma_dr_wav_metadata_type_list_info_title = 1 << 11, + ma_dr_wav_metadata_type_list_info_artist = 1 << 12, + ma_dr_wav_metadata_type_list_info_comment = 1 << 13, + ma_dr_wav_metadata_type_list_info_date = 1 << 14, + ma_dr_wav_metadata_type_list_info_genre = 1 << 15, + ma_dr_wav_metadata_type_list_info_album = 1 << 16, + ma_dr_wav_metadata_type_list_info_tracknumber = 1 << 17, + ma_dr_wav_metadata_type_list_all_info_strings = ma_dr_wav_metadata_type_list_info_software + | ma_dr_wav_metadata_type_list_info_copyright + | ma_dr_wav_metadata_type_list_info_title + | ma_dr_wav_metadata_type_list_info_artist + | ma_dr_wav_metadata_type_list_info_comment + | ma_dr_wav_metadata_type_list_info_date + | ma_dr_wav_metadata_type_list_info_genre + | ma_dr_wav_metadata_type_list_info_album + | ma_dr_wav_metadata_type_list_info_tracknumber, + ma_dr_wav_metadata_type_list_all_adtl = ma_dr_wav_metadata_type_list_label + | ma_dr_wav_metadata_type_list_note + | ma_dr_wav_metadata_type_list_labelled_cue_region, + ma_dr_wav_metadata_type_all = -2, + ma_dr_wav_metadata_type_all_including_unknown = -1 +} ma_dr_wav_metadata_type; +typedef enum +{ + ma_dr_wav_smpl_loop_type_forward = 0, + ma_dr_wav_smpl_loop_type_pingpong = 1, + ma_dr_wav_smpl_loop_type_backward = 2 +} ma_dr_wav_smpl_loop_type; +typedef struct +{ + ma_uint32 cuePointId; + ma_uint32 type; + ma_uint32 firstSampleByteOffset; + ma_uint32 lastSampleByteOffset; + ma_uint32 sampleFraction; + ma_uint32 playCount; +} ma_dr_wav_smpl_loop; +typedef struct +{ + ma_uint32 manufacturerId; + ma_uint32 productId; + ma_uint32 samplePeriodNanoseconds; + ma_uint32 midiUnityNote; + ma_uint32 midiPitchFraction; + ma_uint32 smpteFormat; + ma_uint32 smpteOffset; + ma_uint32 sampleLoopCount; + ma_uint32 samplerSpecificDataSizeInBytes; + ma_dr_wav_smpl_loop* pLoops; + ma_uint8* pSamplerSpecificData; +} ma_dr_wav_smpl; +typedef struct +{ + ma_int8 midiUnityNote; + ma_int8 fineTuneCents; + ma_int8 gainDecibels; + ma_int8 lowNote; + ma_int8 highNote; + ma_int8 lowVelocity; + ma_int8 highVelocity; +} ma_dr_wav_inst; +typedef struct +{ + ma_uint32 id; + ma_uint32 playOrderPosition; + ma_uint8 dataChunkId[4]; + ma_uint32 chunkStart; + ma_uint32 blockStart; + ma_uint32 sampleByteOffset; +} ma_dr_wav_cue_point; +typedef struct +{ + ma_uint32 cuePointCount; + ma_dr_wav_cue_point *pCuePoints; +} ma_dr_wav_cue; +typedef enum +{ + ma_dr_wav_acid_flag_one_shot = 1, + ma_dr_wav_acid_flag_root_note_set = 2, + ma_dr_wav_acid_flag_stretch = 4, + ma_dr_wav_acid_flag_disk_based = 8, + ma_dr_wav_acid_flag_acidizer = 16 +} ma_dr_wav_acid_flag; +typedef struct +{ + ma_uint32 flags; + ma_uint16 midiUnityNote; + ma_uint16 reserved1; + float reserved2; + ma_uint32 numBeats; + ma_uint16 meterDenominator; + ma_uint16 meterNumerator; + float tempo; +} ma_dr_wav_acid; +typedef struct +{ + ma_uint32 cuePointId; + ma_uint32 stringLength; + char* pString; +} ma_dr_wav_list_label_or_note; +typedef struct +{ + char* pDescription; + char* pOriginatorName; + char* pOriginatorReference; + char pOriginationDate[10]; + char pOriginationTime[8]; + ma_uint64 timeReference; + ma_uint16 version; + char* pCodingHistory; + ma_uint32 codingHistorySize; + ma_uint8* pUMID; + ma_uint16 loudnessValue; + ma_uint16 loudnessRange; + ma_uint16 maxTruePeakLevel; + ma_uint16 maxMomentaryLoudness; + ma_uint16 maxShortTermLoudness; +} ma_dr_wav_bext; +typedef struct +{ + ma_uint32 stringLength; + char* pString; +} ma_dr_wav_list_info_text; +typedef struct +{ + ma_uint32 cuePointId; + ma_uint32 sampleLength; + ma_uint8 purposeId[4]; + ma_uint16 country; + ma_uint16 language; + ma_uint16 dialect; + ma_uint16 codePage; + ma_uint32 stringLength; + char* pString; +} ma_dr_wav_list_labelled_cue_region; +typedef enum +{ + ma_dr_wav_metadata_location_invalid, + ma_dr_wav_metadata_location_top_level, + ma_dr_wav_metadata_location_inside_info_list, + ma_dr_wav_metadata_location_inside_adtl_list +} ma_dr_wav_metadata_location; +typedef struct +{ + ma_uint8 id[4]; + ma_dr_wav_metadata_location chunkLocation; + ma_uint32 dataSizeInBytes; + ma_uint8* pData; +} ma_dr_wav_unknown_metadata; +typedef struct +{ + ma_dr_wav_metadata_type type; + union + { + ma_dr_wav_cue cue; + ma_dr_wav_smpl smpl; + ma_dr_wav_acid acid; + ma_dr_wav_inst inst; + ma_dr_wav_bext bext; + ma_dr_wav_list_label_or_note labelOrNote; + ma_dr_wav_list_labelled_cue_region labelledCueRegion; + ma_dr_wav_list_info_text infoText; + ma_dr_wav_unknown_metadata unknown; + } data; +} ma_dr_wav_metadata; +typedef struct +{ + ma_dr_wav_read_proc onRead; + ma_dr_wav_write_proc onWrite; + ma_dr_wav_seek_proc onSeek; + void* pUserData; + ma_allocation_callbacks allocationCallbacks; + ma_dr_wav_container container; + ma_dr_wav_fmt fmt; + ma_uint32 sampleRate; + ma_uint16 channels; + ma_uint16 bitsPerSample; + ma_uint16 translatedFormatTag; + ma_uint64 totalPCMFrameCount; + ma_uint64 dataChunkDataSize; + ma_uint64 dataChunkDataPos; + ma_uint64 bytesRemaining; + ma_uint64 readCursorInPCMFrames; + ma_uint64 dataChunkDataSizeTargetWrite; + ma_bool32 isSequentialWrite; + ma_dr_wav_metadata* pMetadata; + ma_uint32 metadataCount; + ma_dr_wav__memory_stream memoryStream; + ma_dr_wav__memory_stream_write memoryStreamWrite; + struct + { + ma_uint32 bytesRemainingInBlock; + ma_uint16 predictor[2]; + ma_int32 delta[2]; + ma_int32 cachedFrames[4]; + ma_uint32 cachedFrameCount; + ma_int32 prevFrames[2][2]; + } msadpcm; + struct + { + ma_uint32 bytesRemainingInBlock; + ma_int32 predictor[2]; + ma_int32 stepIndex[2]; + ma_int32 cachedFrames[16]; + ma_uint32 cachedFrameCount; + } ima; + struct + { + ma_bool8 isLE; + ma_bool8 isUnsigned; + } aiff; +} ma_dr_wav; +MA_API ma_bool32 ma_dr_wav_init(ma_dr_wav* pWav, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_ex(ma_dr_wav* pWav, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, ma_dr_wav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_with_metadata(ma_dr_wav* pWav, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_write(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_dr_wav_write_proc onWrite, ma_dr_wav_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_write_sequential(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, ma_dr_wav_write_proc onWrite, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_write_sequential_pcm_frames(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, ma_dr_wav_write_proc onWrite, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_write_with_metadata(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_dr_wav_write_proc onWrite, ma_dr_wav_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks, ma_dr_wav_metadata* pMetadata, ma_uint32 metadataCount); +MA_API ma_uint64 ma_dr_wav_target_write_size_bytes(const ma_dr_wav_data_format* pFormat, ma_uint64 totalFrameCount, ma_dr_wav_metadata* pMetadata, ma_uint32 metadataCount); +MA_API ma_dr_wav_metadata* ma_dr_wav_take_ownership_of_metadata(ma_dr_wav* pWav); +MA_API ma_result ma_dr_wav_uninit(ma_dr_wav* pWav); +MA_API size_t ma_dr_wav_read_raw(ma_dr_wav* pWav, size_t bytesToRead, void* pBufferOut); +MA_API ma_uint64 ma_dr_wav_read_pcm_frames(ma_dr_wav* pWav, ma_uint64 framesToRead, void* pBufferOut); +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_le(ma_dr_wav* pWav, ma_uint64 framesToRead, void* pBufferOut); +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_be(ma_dr_wav* pWav, ma_uint64 framesToRead, void* pBufferOut); +MA_API ma_bool32 ma_dr_wav_seek_to_pcm_frame(ma_dr_wav* pWav, ma_uint64 targetFrameIndex); +MA_API ma_result ma_dr_wav_get_cursor_in_pcm_frames(ma_dr_wav* pWav, ma_uint64* pCursor); +MA_API ma_result ma_dr_wav_get_length_in_pcm_frames(ma_dr_wav* pWav, ma_uint64* pLength); +MA_API size_t ma_dr_wav_write_raw(ma_dr_wav* pWav, size_t bytesToWrite, const void* pData); +MA_API ma_uint64 ma_dr_wav_write_pcm_frames(ma_dr_wav* pWav, ma_uint64 framesToWrite, const void* pData); +MA_API ma_uint64 ma_dr_wav_write_pcm_frames_le(ma_dr_wav* pWav, ma_uint64 framesToWrite, const void* pData); +MA_API ma_uint64 ma_dr_wav_write_pcm_frames_be(ma_dr_wav* pWav, ma_uint64 framesToWrite, const void* pData); +#ifndef MA_DR_WAV_NO_CONVERSION_API +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s16(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut); +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s16le(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut); +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s16be(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut); +MA_API void ma_dr_wav_u8_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount); +MA_API void ma_dr_wav_s24_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount); +MA_API void ma_dr_wav_s32_to_s16(ma_int16* pOut, const ma_int32* pIn, size_t sampleCount); +MA_API void ma_dr_wav_f32_to_s16(ma_int16* pOut, const float* pIn, size_t sampleCount); +MA_API void ma_dr_wav_f64_to_s16(ma_int16* pOut, const double* pIn, size_t sampleCount); +MA_API void ma_dr_wav_alaw_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount); +MA_API void ma_dr_wav_mulaw_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount); +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_f32(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut); +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_f32le(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut); +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_f32be(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut); +MA_API void ma_dr_wav_u8_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount); +MA_API void ma_dr_wav_s16_to_f32(float* pOut, const ma_int16* pIn, size_t sampleCount); +MA_API void ma_dr_wav_s24_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount); +MA_API void ma_dr_wav_s32_to_f32(float* pOut, const ma_int32* pIn, size_t sampleCount); +MA_API void ma_dr_wav_f64_to_f32(float* pOut, const double* pIn, size_t sampleCount); +MA_API void ma_dr_wav_alaw_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount); +MA_API void ma_dr_wav_mulaw_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount); +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s32(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut); +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s32le(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut); +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s32be(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut); +MA_API void ma_dr_wav_u8_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount); +MA_API void ma_dr_wav_s16_to_s32(ma_int32* pOut, const ma_int16* pIn, size_t sampleCount); +MA_API void ma_dr_wav_s24_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount); +MA_API void ma_dr_wav_f32_to_s32(ma_int32* pOut, const float* pIn, size_t sampleCount); +MA_API void ma_dr_wav_f64_to_s32(ma_int32* pOut, const double* pIn, size_t sampleCount); +MA_API void ma_dr_wav_alaw_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount); +MA_API void ma_dr_wav_mulaw_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount); +#endif +#ifndef MA_DR_WAV_NO_STDIO +MA_API ma_bool32 ma_dr_wav_init_file(ma_dr_wav* pWav, const char* filename, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_file_ex(ma_dr_wav* pWav, const char* filename, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_file_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_file_ex_w(ma_dr_wav* pWav, const wchar_t* filename, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_file_with_metadata(ma_dr_wav* pWav, const char* filename, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_file_with_metadata_w(ma_dr_wav* pWav, const wchar_t* filename, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_file_write(ma_dr_wav* pWav, const char* filename, const ma_dr_wav_data_format* pFormat, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_file_write_sequential(ma_dr_wav* pWav, const char* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_file_write_sequential_pcm_frames(ma_dr_wav* pWav, const char* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_file_write_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_dr_wav_data_format* pFormat, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_file_write_sequential_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_file_write_sequential_pcm_frames_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +#endif +MA_API ma_bool32 ma_dr_wav_init_memory(ma_dr_wav* pWav, const void* data, size_t dataSize, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_memory_ex(ma_dr_wav* pWav, const void* data, size_t dataSize, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_memory_with_metadata(ma_dr_wav* pWav, const void* data, size_t dataSize, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_memory_write(ma_dr_wav* pWav, void** ppData, size_t* pDataSize, const ma_dr_wav_data_format* pFormat, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_memory_write_sequential(ma_dr_wav* pWav, void** ppData, size_t* pDataSize, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_wav_init_memory_write_sequential_pcm_frames(ma_dr_wav* pWav, void** ppData, size_t* pDataSize, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +#ifndef MA_DR_WAV_NO_CONVERSION_API +MA_API ma_int16* ma_dr_wav_open_and_read_pcm_frames_s16(ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API float* ma_dr_wav_open_and_read_pcm_frames_f32(ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_int32* ma_dr_wav_open_and_read_pcm_frames_s32(ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +#ifndef MA_DR_WAV_NO_STDIO +MA_API ma_int16* ma_dr_wav_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API float* ma_dr_wav_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_int32* ma_dr_wav_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_int16* ma_dr_wav_open_file_and_read_pcm_frames_s16_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API float* ma_dr_wav_open_file_and_read_pcm_frames_f32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_int32* ma_dr_wav_open_file_and_read_pcm_frames_s32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +#endif +MA_API ma_int16* ma_dr_wav_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API float* ma_dr_wav_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_int32* ma_dr_wav_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks); +#endif +MA_API void ma_dr_wav_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_uint16 ma_dr_wav_bytes_to_u16(const ma_uint8* data); +MA_API ma_int16 ma_dr_wav_bytes_to_s16(const ma_uint8* data); +MA_API ma_uint32 ma_dr_wav_bytes_to_u32(const ma_uint8* data); +MA_API ma_int32 ma_dr_wav_bytes_to_s32(const ma_uint8* data); +MA_API ma_uint64 ma_dr_wav_bytes_to_u64(const ma_uint8* data); +MA_API ma_int64 ma_dr_wav_bytes_to_s64(const ma_uint8* data); +MA_API float ma_dr_wav_bytes_to_f32(const ma_uint8* data); +MA_API ma_bool32 ma_dr_wav_guid_equal(const ma_uint8 a[16], const ma_uint8 b[16]); +MA_API ma_bool32 ma_dr_wav_fourcc_equal(const ma_uint8* a, const char* b); +#ifdef __cplusplus +} +#endif +#endif +/* dr_wav_h end */ +#endif /* MA_NO_WAV */ + +#if !defined(MA_NO_FLAC) && !defined(MA_NO_DECODING) +/* dr_flac_h begin */ +#ifndef ma_dr_flac_h +#define ma_dr_flac_h +#ifdef __cplusplus +extern "C" { +#endif +#define MA_DR_FLAC_STRINGIFY(x) #x +#define MA_DR_FLAC_XSTRINGIFY(x) MA_DR_FLAC_STRINGIFY(x) +#define MA_DR_FLAC_VERSION_MAJOR 0 +#define MA_DR_FLAC_VERSION_MINOR 12 +#define MA_DR_FLAC_VERSION_REVISION 43 +#define MA_DR_FLAC_VERSION_STRING MA_DR_FLAC_XSTRINGIFY(MA_DR_FLAC_VERSION_MAJOR) "." MA_DR_FLAC_XSTRINGIFY(MA_DR_FLAC_VERSION_MINOR) "." MA_DR_FLAC_XSTRINGIFY(MA_DR_FLAC_VERSION_REVISION) +#include +#if defined(_MSC_VER) && _MSC_VER >= 1700 + #define MA_DR_FLAC_DEPRECATED __declspec(deprecated) +#elif (defined(__GNUC__) && __GNUC__ >= 4) + #define MA_DR_FLAC_DEPRECATED __attribute__((deprecated)) +#elif defined(__has_feature) + #if __has_feature(attribute_deprecated) + #define MA_DR_FLAC_DEPRECATED __attribute__((deprecated)) + #else + #define MA_DR_FLAC_DEPRECATED + #endif +#else + #define MA_DR_FLAC_DEPRECATED +#endif +MA_API void ma_dr_flac_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision); +MA_API const char* ma_dr_flac_version_string(void); +#ifndef MA_DR_FLAC_BUFFER_SIZE +#define MA_DR_FLAC_BUFFER_SIZE 4096 +#endif +#ifdef MA_64BIT +typedef ma_uint64 ma_dr_flac_cache_t; +#else +typedef ma_uint32 ma_dr_flac_cache_t; +#endif +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_STREAMINFO 0 +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_PADDING 1 +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_APPLICATION 2 +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_SEEKTABLE 3 +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_VORBIS_COMMENT 4 +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_CUESHEET 5 +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_PICTURE 6 +#define MA_DR_FLAC_METADATA_BLOCK_TYPE_INVALID 127 +#define MA_DR_FLAC_PICTURE_TYPE_OTHER 0 +#define MA_DR_FLAC_PICTURE_TYPE_FILE_ICON 1 +#define MA_DR_FLAC_PICTURE_TYPE_OTHER_FILE_ICON 2 +#define MA_DR_FLAC_PICTURE_TYPE_COVER_FRONT 3 +#define MA_DR_FLAC_PICTURE_TYPE_COVER_BACK 4 +#define MA_DR_FLAC_PICTURE_TYPE_LEAFLET_PAGE 5 +#define MA_DR_FLAC_PICTURE_TYPE_MEDIA 6 +#define MA_DR_FLAC_PICTURE_TYPE_LEAD_ARTIST 7 +#define MA_DR_FLAC_PICTURE_TYPE_ARTIST 8 +#define MA_DR_FLAC_PICTURE_TYPE_CONDUCTOR 9 +#define MA_DR_FLAC_PICTURE_TYPE_BAND 10 +#define MA_DR_FLAC_PICTURE_TYPE_COMPOSER 11 +#define MA_DR_FLAC_PICTURE_TYPE_LYRICIST 12 +#define MA_DR_FLAC_PICTURE_TYPE_RECORDING_LOCATION 13 +#define MA_DR_FLAC_PICTURE_TYPE_DURING_RECORDING 14 +#define MA_DR_FLAC_PICTURE_TYPE_DURING_PERFORMANCE 15 +#define MA_DR_FLAC_PICTURE_TYPE_SCREEN_CAPTURE 16 +#define MA_DR_FLAC_PICTURE_TYPE_BRIGHT_COLORED_FISH 17 +#define MA_DR_FLAC_PICTURE_TYPE_ILLUSTRATION 18 +#define MA_DR_FLAC_PICTURE_TYPE_BAND_LOGOTYPE 19 +#define MA_DR_FLAC_PICTURE_TYPE_PUBLISHER_LOGOTYPE 20 +typedef enum +{ + ma_dr_flac_container_native, + ma_dr_flac_container_ogg, + ma_dr_flac_container_unknown +} ma_dr_flac_container; +typedef enum +{ + ma_dr_flac_seek_origin_start, + ma_dr_flac_seek_origin_current +} ma_dr_flac_seek_origin; +typedef struct +{ + ma_uint64 firstPCMFrame; + ma_uint64 flacFrameOffset; + ma_uint16 pcmFrameCount; +} ma_dr_flac_seekpoint; +typedef struct +{ + ma_uint16 minBlockSizeInPCMFrames; + ma_uint16 maxBlockSizeInPCMFrames; + ma_uint32 minFrameSizeInPCMFrames; + ma_uint32 maxFrameSizeInPCMFrames; + ma_uint32 sampleRate; + ma_uint8 channels; + ma_uint8 bitsPerSample; + ma_uint64 totalPCMFrameCount; + ma_uint8 md5[16]; +} ma_dr_flac_streaminfo; +typedef struct +{ + ma_uint32 type; + const void* pRawData; + ma_uint32 rawDataSize; + union + { + ma_dr_flac_streaminfo streaminfo; + struct + { + int unused; + } padding; + struct + { + ma_uint32 id; + const void* pData; + ma_uint32 dataSize; + } application; + struct + { + ma_uint32 seekpointCount; + const ma_dr_flac_seekpoint* pSeekpoints; + } seektable; + struct + { + ma_uint32 vendorLength; + const char* vendor; + ma_uint32 commentCount; + const void* pComments; + } vorbis_comment; + struct + { + char catalog[128]; + ma_uint64 leadInSampleCount; + ma_bool32 isCD; + ma_uint8 trackCount; + const void* pTrackData; + } cuesheet; + struct + { + ma_uint32 type; + ma_uint32 mimeLength; + const char* mime; + ma_uint32 descriptionLength; + const char* description; + ma_uint32 width; + ma_uint32 height; + ma_uint32 colorDepth; + ma_uint32 indexColorCount; + ma_uint32 pictureDataSize; + const ma_uint8* pPictureData; + } picture; + } data; +} ma_dr_flac_metadata; +typedef size_t (* ma_dr_flac_read_proc)(void* pUserData, void* pBufferOut, size_t bytesToRead); +typedef ma_bool32 (* ma_dr_flac_seek_proc)(void* pUserData, int offset, ma_dr_flac_seek_origin origin); +typedef void (* ma_dr_flac_meta_proc)(void* pUserData, ma_dr_flac_metadata* pMetadata); +typedef struct +{ + const ma_uint8* data; + size_t dataSize; + size_t currentReadPos; +} ma_dr_flac__memory_stream; +typedef struct +{ + ma_dr_flac_read_proc onRead; + ma_dr_flac_seek_proc onSeek; + void* pUserData; + size_t unalignedByteCount; + ma_dr_flac_cache_t unalignedCache; + ma_uint32 nextL2Line; + ma_uint32 consumedBits; + ma_dr_flac_cache_t cacheL2[MA_DR_FLAC_BUFFER_SIZE/sizeof(ma_dr_flac_cache_t)]; + ma_dr_flac_cache_t cache; + ma_uint16 crc16; + ma_dr_flac_cache_t crc16Cache; + ma_uint32 crc16CacheIgnoredBytes; +} ma_dr_flac_bs; +typedef struct +{ + ma_uint8 subframeType; + ma_uint8 wastedBitsPerSample; + ma_uint8 lpcOrder; + ma_int32* pSamplesS32; +} ma_dr_flac_subframe; +typedef struct +{ + ma_uint64 pcmFrameNumber; + ma_uint32 flacFrameNumber; + ma_uint32 sampleRate; + ma_uint16 blockSizeInPCMFrames; + ma_uint8 channelAssignment; + ma_uint8 bitsPerSample; + ma_uint8 crc8; +} ma_dr_flac_frame_header; +typedef struct +{ + ma_dr_flac_frame_header header; + ma_uint32 pcmFramesRemaining; + ma_dr_flac_subframe subframes[8]; +} ma_dr_flac_frame; +typedef struct +{ + ma_dr_flac_meta_proc onMeta; + void* pUserDataMD; + ma_allocation_callbacks allocationCallbacks; + ma_uint32 sampleRate; + ma_uint8 channels; + ma_uint8 bitsPerSample; + ma_uint16 maxBlockSizeInPCMFrames; + ma_uint64 totalPCMFrameCount; + ma_dr_flac_container container; + ma_uint32 seekpointCount; + ma_dr_flac_frame currentFLACFrame; + ma_uint64 currentPCMFrame; + ma_uint64 firstFLACFramePosInBytes; + ma_dr_flac__memory_stream memoryStream; + ma_int32* pDecodedSamples; + ma_dr_flac_seekpoint* pSeekpoints; + void* _oggbs; + ma_bool32 _noSeekTableSeek : 1; + ma_bool32 _noBinarySearchSeek : 1; + ma_bool32 _noBruteForceSeek : 1; + ma_dr_flac_bs bs; + ma_uint8 pExtraData[1]; +} ma_dr_flac; +MA_API ma_dr_flac* ma_dr_flac_open(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_dr_flac* ma_dr_flac_open_relaxed(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_container container, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_dr_flac* ma_dr_flac_open_with_metadata(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_dr_flac* ma_dr_flac_open_with_metadata_relaxed(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, ma_dr_flac_container container, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API void ma_dr_flac_close(ma_dr_flac* pFlac); +MA_API ma_uint64 ma_dr_flac_read_pcm_frames_s32(ma_dr_flac* pFlac, ma_uint64 framesToRead, ma_int32* pBufferOut); +MA_API ma_uint64 ma_dr_flac_read_pcm_frames_s16(ma_dr_flac* pFlac, ma_uint64 framesToRead, ma_int16* pBufferOut); +MA_API ma_uint64 ma_dr_flac_read_pcm_frames_f32(ma_dr_flac* pFlac, ma_uint64 framesToRead, float* pBufferOut); +MA_API ma_bool32 ma_dr_flac_seek_to_pcm_frame(ma_dr_flac* pFlac, ma_uint64 pcmFrameIndex); +#ifndef MA_DR_FLAC_NO_STDIO +MA_API ma_dr_flac* ma_dr_flac_open_file(const char* pFileName, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_dr_flac* ma_dr_flac_open_file_w(const wchar_t* pFileName, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_dr_flac* ma_dr_flac_open_file_with_metadata(const char* pFileName, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_dr_flac* ma_dr_flac_open_file_with_metadata_w(const wchar_t* pFileName, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); +#endif +MA_API ma_dr_flac* ma_dr_flac_open_memory(const void* pData, size_t dataSize, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_dr_flac* ma_dr_flac_open_memory_with_metadata(const void* pData, size_t dataSize, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_int32* ma_dr_flac_open_and_read_pcm_frames_s32(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_int16* ma_dr_flac_open_and_read_pcm_frames_s16(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API float* ma_dr_flac_open_and_read_pcm_frames_f32(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +#ifndef MA_DR_FLAC_NO_STDIO +MA_API ma_int32* ma_dr_flac_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_int16* ma_dr_flac_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API float* ma_dr_flac_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +#endif +MA_API ma_int32* ma_dr_flac_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_int16* ma_dr_flac_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API float* ma_dr_flac_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API void ma_dr_flac_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks); +typedef struct +{ + ma_uint32 countRemaining; + const char* pRunningData; +} ma_dr_flac_vorbis_comment_iterator; +MA_API void ma_dr_flac_init_vorbis_comment_iterator(ma_dr_flac_vorbis_comment_iterator* pIter, ma_uint32 commentCount, const void* pComments); +MA_API const char* ma_dr_flac_next_vorbis_comment(ma_dr_flac_vorbis_comment_iterator* pIter, ma_uint32* pCommentLengthOut); +typedef struct +{ + ma_uint32 countRemaining; + const char* pRunningData; +} ma_dr_flac_cuesheet_track_iterator; +typedef struct +{ + ma_uint64 offset; + ma_uint8 index; + ma_uint8 reserved[3]; +} ma_dr_flac_cuesheet_track_index; +typedef struct +{ + ma_uint64 offset; + ma_uint8 trackNumber; + char ISRC[12]; + ma_bool8 isAudio; + ma_bool8 preEmphasis; + ma_uint8 indexCount; + const ma_dr_flac_cuesheet_track_index* pIndexPoints; +} ma_dr_flac_cuesheet_track; +MA_API void ma_dr_flac_init_cuesheet_track_iterator(ma_dr_flac_cuesheet_track_iterator* pIter, ma_uint32 trackCount, const void* pTrackData); +MA_API ma_bool32 ma_dr_flac_next_cuesheet_track(ma_dr_flac_cuesheet_track_iterator* pIter, ma_dr_flac_cuesheet_track* pCuesheetTrack); +#ifdef __cplusplus +} +#endif +#endif +/* dr_flac_h end */ +#endif /* MA_NO_FLAC */ + +#if !defined(MA_NO_MP3) && !defined(MA_NO_DECODING) +/* dr_mp3_h begin */ +#ifndef ma_dr_mp3_h +#define ma_dr_mp3_h +#ifdef __cplusplus +extern "C" { +#endif +#define MA_DR_MP3_STRINGIFY(x) #x +#define MA_DR_MP3_XSTRINGIFY(x) MA_DR_MP3_STRINGIFY(x) +#define MA_DR_MP3_VERSION_MAJOR 0 +#define MA_DR_MP3_VERSION_MINOR 6 +#define MA_DR_MP3_VERSION_REVISION 40 +#define MA_DR_MP3_VERSION_STRING MA_DR_MP3_XSTRINGIFY(MA_DR_MP3_VERSION_MAJOR) "." MA_DR_MP3_XSTRINGIFY(MA_DR_MP3_VERSION_MINOR) "." MA_DR_MP3_XSTRINGIFY(MA_DR_MP3_VERSION_REVISION) +#include +#define MA_DR_MP3_MAX_PCM_FRAMES_PER_MP3_FRAME 1152 +#define MA_DR_MP3_MAX_SAMPLES_PER_FRAME (MA_DR_MP3_MAX_PCM_FRAMES_PER_MP3_FRAME*2) +MA_API void ma_dr_mp3_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision); +MA_API const char* ma_dr_mp3_version_string(void); +typedef struct +{ + int frame_bytes, channels, hz, layer, bitrate_kbps; +} ma_dr_mp3dec_frame_info; +typedef struct +{ + float mdct_overlap[2][9*32], qmf_state[15*2*32]; + int reserv, free_format_bytes; + ma_uint8 header[4], reserv_buf[511]; +} ma_dr_mp3dec; +MA_API void ma_dr_mp3dec_init(ma_dr_mp3dec *dec); +MA_API int ma_dr_mp3dec_decode_frame(ma_dr_mp3dec *dec, const ma_uint8 *mp3, int mp3_bytes, void *pcm, ma_dr_mp3dec_frame_info *info); +MA_API void ma_dr_mp3dec_f32_to_s16(const float *in, ma_int16 *out, size_t num_samples); +typedef enum +{ + ma_dr_mp3_seek_origin_start, + ma_dr_mp3_seek_origin_current +} ma_dr_mp3_seek_origin; +typedef struct +{ + ma_uint64 seekPosInBytes; + ma_uint64 pcmFrameIndex; + ma_uint16 mp3FramesToDiscard; + ma_uint16 pcmFramesToDiscard; +} ma_dr_mp3_seek_point; +typedef size_t (* ma_dr_mp3_read_proc)(void* pUserData, void* pBufferOut, size_t bytesToRead); +typedef ma_bool32 (* ma_dr_mp3_seek_proc)(void* pUserData, int offset, ma_dr_mp3_seek_origin origin); +typedef struct +{ + ma_uint32 channels; + ma_uint32 sampleRate; +} ma_dr_mp3_config; +typedef struct +{ + ma_dr_mp3dec decoder; + ma_uint32 channels; + ma_uint32 sampleRate; + ma_dr_mp3_read_proc onRead; + ma_dr_mp3_seek_proc onSeek; + void* pUserData; + ma_allocation_callbacks allocationCallbacks; + ma_uint32 mp3FrameChannels; + ma_uint32 mp3FrameSampleRate; + ma_uint32 pcmFramesConsumedInMP3Frame; + ma_uint32 pcmFramesRemainingInMP3Frame; + ma_uint8 pcmFrames[sizeof(float)*MA_DR_MP3_MAX_SAMPLES_PER_FRAME]; + ma_uint64 currentPCMFrame; + ma_uint64 streamCursor; + ma_dr_mp3_seek_point* pSeekPoints; + ma_uint32 seekPointCount; + size_t dataSize; + size_t dataCapacity; + size_t dataConsumed; + ma_uint8* pData; + ma_bool32 atEnd : 1; + struct + { + const ma_uint8* pData; + size_t dataSize; + size_t currentReadPos; + } memory; +} ma_dr_mp3; +MA_API ma_bool32 ma_dr_mp3_init(ma_dr_mp3* pMP3, ma_dr_mp3_read_proc onRead, ma_dr_mp3_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_mp3_init_memory(ma_dr_mp3* pMP3, const void* pData, size_t dataSize, const ma_allocation_callbacks* pAllocationCallbacks); +#ifndef MA_DR_MP3_NO_STDIO +MA_API ma_bool32 ma_dr_mp3_init_file(ma_dr_mp3* pMP3, const char* pFilePath, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_bool32 ma_dr_mp3_init_file_w(ma_dr_mp3* pMP3, const wchar_t* pFilePath, const ma_allocation_callbacks* pAllocationCallbacks); +#endif +MA_API void ma_dr_mp3_uninit(ma_dr_mp3* pMP3); +MA_API ma_uint64 ma_dr_mp3_read_pcm_frames_f32(ma_dr_mp3* pMP3, ma_uint64 framesToRead, float* pBufferOut); +MA_API ma_uint64 ma_dr_mp3_read_pcm_frames_s16(ma_dr_mp3* pMP3, ma_uint64 framesToRead, ma_int16* pBufferOut); +MA_API ma_bool32 ma_dr_mp3_seek_to_pcm_frame(ma_dr_mp3* pMP3, ma_uint64 frameIndex); +MA_API ma_uint64 ma_dr_mp3_get_pcm_frame_count(ma_dr_mp3* pMP3); +MA_API ma_uint64 ma_dr_mp3_get_mp3_frame_count(ma_dr_mp3* pMP3); +MA_API ma_bool32 ma_dr_mp3_get_mp3_and_pcm_frame_count(ma_dr_mp3* pMP3, ma_uint64* pMP3FrameCount, ma_uint64* pPCMFrameCount); +MA_API ma_bool32 ma_dr_mp3_calculate_seek_points(ma_dr_mp3* pMP3, ma_uint32* pSeekPointCount, ma_dr_mp3_seek_point* pSeekPoints); +MA_API ma_bool32 ma_dr_mp3_bind_seek_table(ma_dr_mp3* pMP3, ma_uint32 seekPointCount, ma_dr_mp3_seek_point* pSeekPoints); +MA_API float* ma_dr_mp3_open_and_read_pcm_frames_f32(ma_dr_mp3_read_proc onRead, ma_dr_mp3_seek_proc onSeek, void* pUserData, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_int16* ma_dr_mp3_open_and_read_pcm_frames_s16(ma_dr_mp3_read_proc onRead, ma_dr_mp3_seek_proc onSeek, void* pUserData, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API float* ma_dr_mp3_open_memory_and_read_pcm_frames_f32(const void* pData, size_t dataSize, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_int16* ma_dr_mp3_open_memory_and_read_pcm_frames_s16(const void* pData, size_t dataSize, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +#ifndef MA_DR_MP3_NO_STDIO +MA_API float* ma_dr_mp3_open_file_and_read_pcm_frames_f32(const char* filePath, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_int16* ma_dr_mp3_open_file_and_read_pcm_frames_s16(const char* filePath, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks); +#endif +MA_API void* ma_dr_mp3_malloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API void ma_dr_mp3_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks); +#ifdef __cplusplus +} +#endif +#endif +/* dr_mp3_h end */ +#endif /* MA_NO_MP3 */ + + +/************************************************************************************************************************************************************** + +Decoding + +**************************************************************************************************************************************************************/ +#ifndef MA_NO_DECODING + +static ma_result ma_decoder_read_bytes(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead, size_t* pBytesRead) +{ + MA_ASSERT(pDecoder != NULL); + + return pDecoder->onRead(pDecoder, pBufferOut, bytesToRead, pBytesRead); +} + +static ma_result ma_decoder_seek_bytes(ma_decoder* pDecoder, ma_int64 byteOffset, ma_seek_origin origin) +{ + MA_ASSERT(pDecoder != NULL); + + return pDecoder->onSeek(pDecoder, byteOffset, origin); +} + +static ma_result ma_decoder_tell_bytes(ma_decoder* pDecoder, ma_int64* pCursor) +{ + MA_ASSERT(pDecoder != NULL); + + if (pDecoder->onTell == NULL) { + return MA_NOT_IMPLEMENTED; + } + + return pDecoder->onTell(pDecoder, pCursor); +} + + +MA_API ma_decoding_backend_config ma_decoding_backend_config_init(ma_format preferredFormat, ma_uint32 seekPointCount) +{ + ma_decoding_backend_config config; + + MA_ZERO_OBJECT(&config); + config.preferredFormat = preferredFormat; + config.seekPointCount = seekPointCount; + + return config; +} + + +MA_API ma_decoder_config ma_decoder_config_init(ma_format outputFormat, ma_uint32 outputChannels, ma_uint32 outputSampleRate) +{ + ma_decoder_config config; + MA_ZERO_OBJECT(&config); + config.format = outputFormat; + config.channels = outputChannels; + config.sampleRate = outputSampleRate; + config.resampling = ma_resampler_config_init(ma_format_unknown, 0, 0, 0, ma_resample_algorithm_linear); /* Format/channels/rate doesn't matter here. */ + config.encodingFormat = ma_encoding_format_unknown; + + /* Note that we are intentionally leaving the channel map empty here which will cause the default channel map to be used. */ + + return config; +} + +MA_API ma_decoder_config ma_decoder_config_init_default(void) +{ + return ma_decoder_config_init(ma_format_unknown, 0, 0); +} + +MA_API ma_decoder_config ma_decoder_config_init_copy(const ma_decoder_config* pConfig) +{ + ma_decoder_config config; + if (pConfig != NULL) { + config = *pConfig; + } else { + MA_ZERO_OBJECT(&config); + } + + return config; +} + +static ma_result ma_decoder__init_data_converter(ma_decoder* pDecoder, const ma_decoder_config* pConfig) +{ + ma_result result; + ma_data_converter_config converterConfig; + ma_format internalFormat; + ma_uint32 internalChannels; + ma_uint32 internalSampleRate; + ma_channel internalChannelMap[MA_MAX_CHANNELS]; + + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pConfig != NULL); + + result = ma_data_source_get_data_format(pDecoder->pBackend, &internalFormat, &internalChannels, &internalSampleRate, internalChannelMap, ma_countof(internalChannelMap)); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the internal data format. */ + } + + + /* Make sure we're not asking for too many channels. */ + if (pConfig->channels > MA_MAX_CHANNELS) { + return MA_INVALID_ARGS; + } + + /* The internal channels should have already been validated at a higher level, but we'll do it again explicitly here for safety. */ + if (internalChannels > MA_MAX_CHANNELS) { + return MA_INVALID_ARGS; + } + + + /* Output format. */ + if (pConfig->format == ma_format_unknown) { + pDecoder->outputFormat = internalFormat; + } else { + pDecoder->outputFormat = pConfig->format; + } + + if (pConfig->channels == 0) { + pDecoder->outputChannels = internalChannels; + } else { + pDecoder->outputChannels = pConfig->channels; + } + + if (pConfig->sampleRate == 0) { + pDecoder->outputSampleRate = internalSampleRate; + } else { + pDecoder->outputSampleRate = pConfig->sampleRate; + } + + converterConfig = ma_data_converter_config_init( + internalFormat, pDecoder->outputFormat, + internalChannels, pDecoder->outputChannels, + internalSampleRate, pDecoder->outputSampleRate + ); + converterConfig.pChannelMapIn = internalChannelMap; + converterConfig.pChannelMapOut = pConfig->pChannelMap; + converterConfig.channelMixMode = pConfig->channelMixMode; + converterConfig.ditherMode = pConfig->ditherMode; + converterConfig.allowDynamicSampleRate = MA_FALSE; /* Never allow dynamic sample rate conversion. Setting this to true will disable passthrough optimizations. */ + converterConfig.resampling = pConfig->resampling; + + result = ma_data_converter_init(&converterConfig, &pDecoder->allocationCallbacks, &pDecoder->converter); + if (result != MA_SUCCESS) { + return result; + } + + /* + Now that we have the decoder we need to determine whether or not we need a heap-allocated cache. We'll + need this if the data converter does not support calculation of the required input frame count. To + determine support for this we'll just run a test. + */ + { + ma_uint64 unused; + + result = ma_data_converter_get_required_input_frame_count(&pDecoder->converter, 1, &unused); + if (result != MA_SUCCESS) { + /* + We were unable to calculate the required input frame count which means we'll need to use + a heap-allocated cache. + */ + ma_uint64 inputCacheCapSizeInBytes; + + pDecoder->inputCacheCap = MA_DATA_CONVERTER_STACK_BUFFER_SIZE / ma_get_bytes_per_frame(internalFormat, internalChannels); + + /* Not strictly necessary, but keeping here for safety in case we change the default value of pDecoder->inputCacheCap. */ + inputCacheCapSizeInBytes = pDecoder->inputCacheCap * ma_get_bytes_per_frame(internalFormat, internalChannels); + if (inputCacheCapSizeInBytes > MA_SIZE_MAX) { + ma_data_converter_uninit(&pDecoder->converter, &pDecoder->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + pDecoder->pInputCache = ma_malloc((size_t)inputCacheCapSizeInBytes, &pDecoder->allocationCallbacks); /* Safe cast to size_t. */ + if (pDecoder->pInputCache == NULL) { + ma_data_converter_uninit(&pDecoder->converter, &pDecoder->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + } + } + + return MA_SUCCESS; +} + + + +static ma_result ma_decoder_internal_on_read__custom(void* pUserData, void* pBufferOut, size_t bytesToRead, size_t* pBytesRead) +{ + ma_decoder* pDecoder = (ma_decoder*)pUserData; + MA_ASSERT(pDecoder != NULL); + + return ma_decoder_read_bytes(pDecoder, pBufferOut, bytesToRead, pBytesRead); +} + +static ma_result ma_decoder_internal_on_seek__custom(void* pUserData, ma_int64 offset, ma_seek_origin origin) +{ + ma_decoder* pDecoder = (ma_decoder*)pUserData; + MA_ASSERT(pDecoder != NULL); + + return ma_decoder_seek_bytes(pDecoder, offset, origin); +} + +static ma_result ma_decoder_internal_on_tell__custom(void* pUserData, ma_int64* pCursor) +{ + ma_decoder* pDecoder = (ma_decoder*)pUserData; + MA_ASSERT(pDecoder != NULL); + + return ma_decoder_tell_bytes(pDecoder, pCursor); +} + + +static ma_result ma_decoder_init_from_vtable__internal(const ma_decoding_backend_vtable* pVTable, void* pVTableUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoding_backend_config backendConfig; + ma_data_source* pBackend; + + MA_ASSERT(pVTable != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pVTable->onInit == NULL) { + return MA_NOT_IMPLEMENTED; + } + + backendConfig = ma_decoding_backend_config_init(pConfig->format, pConfig->seekPointCount); + + result = pVTable->onInit(pVTableUserData, ma_decoder_internal_on_read__custom, ma_decoder_internal_on_seek__custom, ma_decoder_internal_on_tell__custom, pDecoder, &backendConfig, &pDecoder->allocationCallbacks, &pBackend); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the backend from this vtable. */ + } + + /* Getting here means we were able to initialize the backend so we can now initialize the decoder. */ + pDecoder->pBackend = pBackend; + pDecoder->pBackendVTable = pVTable; + pDecoder->pBackendUserData = pConfig->pCustomBackendUserData; + + return MA_SUCCESS; +} + +static ma_result ma_decoder_init_from_file__internal(const ma_decoding_backend_vtable* pVTable, void* pVTableUserData, const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoding_backend_config backendConfig; + ma_data_source* pBackend; + + MA_ASSERT(pVTable != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pVTable->onInitFile == NULL) { + return MA_NOT_IMPLEMENTED; + } + + backendConfig = ma_decoding_backend_config_init(pConfig->format, pConfig->seekPointCount); + + result = pVTable->onInitFile(pVTableUserData, pFilePath, &backendConfig, &pDecoder->allocationCallbacks, &pBackend); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the backend from this vtable. */ + } + + /* Getting here means we were able to initialize the backend so we can now initialize the decoder. */ + pDecoder->pBackend = pBackend; + pDecoder->pBackendVTable = pVTable; + pDecoder->pBackendUserData = pConfig->pCustomBackendUserData; + + return MA_SUCCESS; +} + +static ma_result ma_decoder_init_from_file_w__internal(const ma_decoding_backend_vtable* pVTable, void* pVTableUserData, const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoding_backend_config backendConfig; + ma_data_source* pBackend; + + MA_ASSERT(pVTable != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pVTable->onInitFileW == NULL) { + return MA_NOT_IMPLEMENTED; + } + + backendConfig = ma_decoding_backend_config_init(pConfig->format, pConfig->seekPointCount); + + result = pVTable->onInitFileW(pVTableUserData, pFilePath, &backendConfig, &pDecoder->allocationCallbacks, &pBackend); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the backend from this vtable. */ + } + + /* Getting here means we were able to initialize the backend so we can now initialize the decoder. */ + pDecoder->pBackend = pBackend; + pDecoder->pBackendVTable = pVTable; + pDecoder->pBackendUserData = pConfig->pCustomBackendUserData; + + return MA_SUCCESS; +} + +static ma_result ma_decoder_init_from_memory__internal(const ma_decoding_backend_vtable* pVTable, void* pVTableUserData, const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoding_backend_config backendConfig; + ma_data_source* pBackend; + + MA_ASSERT(pVTable != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pVTable->onInitMemory == NULL) { + return MA_NOT_IMPLEMENTED; + } + + backendConfig = ma_decoding_backend_config_init(pConfig->format, pConfig->seekPointCount); + + result = pVTable->onInitMemory(pVTableUserData, pData, dataSize, &backendConfig, &pDecoder->allocationCallbacks, &pBackend); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the backend from this vtable. */ + } + + /* Getting here means we were able to initialize the backend so we can now initialize the decoder. */ + pDecoder->pBackend = pBackend; + pDecoder->pBackendVTable = pVTable; + pDecoder->pBackendUserData = pConfig->pCustomBackendUserData; + + return MA_SUCCESS; +} + + + +static ma_result ma_decoder_init_custom__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = MA_NO_BACKEND; + size_t ivtable; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pConfig->ppCustomBackendVTables == NULL) { + return MA_NO_BACKEND; + } + + /* The order each backend is listed is what defines the priority. */ + for (ivtable = 0; ivtable < pConfig->customBackendCount; ivtable += 1) { + const ma_decoding_backend_vtable* pVTable = pConfig->ppCustomBackendVTables[ivtable]; + if (pVTable != NULL) { + result = ma_decoder_init_from_vtable__internal(pVTable, pConfig->pCustomBackendUserData, pConfig, pDecoder); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } else { + /* Initialization failed. Move on to the next one, but seek back to the start first so the next vtable starts from the first byte of the file. */ + result = ma_decoder_seek_bytes(pDecoder, 0, ma_seek_origin_start); + if (result != MA_SUCCESS) { + return result; /* Failed to seek back to the start. */ + } + } + } else { + /* No vtable. */ + } + } + + /* Getting here means we couldn't find a backend. */ + return MA_NO_BACKEND; +} + +static ma_result ma_decoder_init_custom_from_file__internal(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = MA_NO_BACKEND; + size_t ivtable; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pConfig->ppCustomBackendVTables == NULL) { + return MA_NO_BACKEND; + } + + /* The order each backend is listed is what defines the priority. */ + for (ivtable = 0; ivtable < pConfig->customBackendCount; ivtable += 1) { + const ma_decoding_backend_vtable* pVTable = pConfig->ppCustomBackendVTables[ivtable]; + if (pVTable != NULL) { + result = ma_decoder_init_from_file__internal(pVTable, pConfig->pCustomBackendUserData, pFilePath, pConfig, pDecoder); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } + } else { + /* No vtable. */ + } + } + + /* Getting here means we couldn't find a backend. */ + return MA_NO_BACKEND; +} + +static ma_result ma_decoder_init_custom_from_file_w__internal(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = MA_NO_BACKEND; + size_t ivtable; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pConfig->ppCustomBackendVTables == NULL) { + return MA_NO_BACKEND; + } + + /* The order each backend is listed is what defines the priority. */ + for (ivtable = 0; ivtable < pConfig->customBackendCount; ivtable += 1) { + const ma_decoding_backend_vtable* pVTable = pConfig->ppCustomBackendVTables[ivtable]; + if (pVTable != NULL) { + result = ma_decoder_init_from_file_w__internal(pVTable, pConfig->pCustomBackendUserData, pFilePath, pConfig, pDecoder); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } + } else { + /* No vtable. */ + } + } + + /* Getting here means we couldn't find a backend. */ + return MA_NO_BACKEND; +} + +static ma_result ma_decoder_init_custom_from_memory__internal(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = MA_NO_BACKEND; + size_t ivtable; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + if (pConfig->ppCustomBackendVTables == NULL) { + return MA_NO_BACKEND; + } + + /* The order each backend is listed is what defines the priority. */ + for (ivtable = 0; ivtable < pConfig->customBackendCount; ivtable += 1) { + const ma_decoding_backend_vtable* pVTable = pConfig->ppCustomBackendVTables[ivtable]; + if (pVTable != NULL) { + result = ma_decoder_init_from_memory__internal(pVTable, pConfig->pCustomBackendUserData, pData, dataSize, pConfig, pDecoder); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } + } else { + /* No vtable. */ + } + } + + /* Getting here means we couldn't find a backend. */ + return MA_NO_BACKEND; +} + + +/* WAV */ +#ifdef ma_dr_wav_h +#define MA_HAS_WAV + +typedef struct +{ + ma_data_source_base ds; + ma_read_proc onRead; + ma_seek_proc onSeek; + ma_tell_proc onTell; + void* pReadSeekTellUserData; + ma_format format; /* Can be f32, s16 or s32. */ +#if !defined(MA_NO_WAV) + ma_dr_wav dr; +#endif +} ma_wav; + +MA_API ma_result ma_wav_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav); +MA_API ma_result ma_wav_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav); +MA_API ma_result ma_wav_init_file_w(const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav); +MA_API ma_result ma_wav_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav); +MA_API void ma_wav_uninit(ma_wav* pWav, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_wav_read_pcm_frames(ma_wav* pWav, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_result ma_wav_seek_to_pcm_frame(ma_wav* pWav, ma_uint64 frameIndex); +MA_API ma_result ma_wav_get_data_format(ma_wav* pWav, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_wav_get_cursor_in_pcm_frames(ma_wav* pWav, ma_uint64* pCursor); +MA_API ma_result ma_wav_get_length_in_pcm_frames(ma_wav* pWav, ma_uint64* pLength); + + +static ma_result ma_wav_ds_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_wav_read_pcm_frames((ma_wav*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_wav_ds_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_wav_seek_to_pcm_frame((ma_wav*)pDataSource, frameIndex); +} + +static ma_result ma_wav_ds_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + return ma_wav_get_data_format((ma_wav*)pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +static ma_result ma_wav_ds_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_wav_get_cursor_in_pcm_frames((ma_wav*)pDataSource, pCursor); +} + +static ma_result ma_wav_ds_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_wav_get_length_in_pcm_frames((ma_wav*)pDataSource, pLength); +} + +static ma_data_source_vtable g_ma_wav_ds_vtable = +{ + ma_wav_ds_read, + ma_wav_ds_seek, + ma_wav_ds_get_data_format, + ma_wav_ds_get_cursor, + ma_wav_ds_get_length, + NULL, /* onSetLooping */ + 0 +}; + + +#if !defined(MA_NO_WAV) +static size_t ma_wav_dr_callback__read(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + ma_wav* pWav = (ma_wav*)pUserData; + ma_result result; + size_t bytesRead; + + MA_ASSERT(pWav != NULL); + + result = pWav->onRead(pWav->pReadSeekTellUserData, pBufferOut, bytesToRead, &bytesRead); + (void)result; + + return bytesRead; +} + +static ma_bool32 ma_wav_dr_callback__seek(void* pUserData, int offset, ma_dr_wav_seek_origin origin) +{ + ma_wav* pWav = (ma_wav*)pUserData; + ma_result result; + ma_seek_origin maSeekOrigin; + + MA_ASSERT(pWav != NULL); + + maSeekOrigin = ma_seek_origin_start; + if (origin == ma_dr_wav_seek_origin_current) { + maSeekOrigin = ma_seek_origin_current; + } + + result = pWav->onSeek(pWav->pReadSeekTellUserData, offset, maSeekOrigin); + if (result != MA_SUCCESS) { + return MA_FALSE; + } + + return MA_TRUE; +} +#endif + +static ma_result ma_wav_init_internal(const ma_decoding_backend_config* pConfig, ma_wav* pWav) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pWav); + pWav->format = ma_format_unknown; /* Use closest match to source file by default. */ + + if (pConfig != NULL && (pConfig->preferredFormat == ma_format_f32 || pConfig->preferredFormat == ma_format_s16 || pConfig->preferredFormat == ma_format_s32)) { + pWav->format = pConfig->preferredFormat; + } else { + /* Getting here means something other than f32 and s16 was specified. Just leave this unset to use the default format. */ + } + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_wav_ds_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pWav->ds); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the base data source. */ + } + + return MA_SUCCESS; +} + +static ma_result ma_wav_post_init(ma_wav* pWav) +{ + /* + If an explicit format was not specified, try picking the closest match based on the internal + format. The format needs to be supported by miniaudio. + */ + if (pWav->format == ma_format_unknown) { + switch (pWav->dr.translatedFormatTag) + { + case MA_DR_WAVE_FORMAT_PCM: + { + if (pWav->dr.bitsPerSample == 8) { + pWav->format = ma_format_u8; + } else if (pWav->dr.bitsPerSample == 16) { + pWav->format = ma_format_s16; + } else if (pWav->dr.bitsPerSample == 24) { + pWav->format = ma_format_s24; + } else if (pWav->dr.bitsPerSample == 32) { + pWav->format = ma_format_s32; + } + } break; + + case MA_DR_WAVE_FORMAT_IEEE_FLOAT: + { + if (pWav->dr.bitsPerSample == 32) { + pWav->format = ma_format_f32; + } + } break; + + default: break; + } + + /* Fall back to f32 if we couldn't find anything. */ + if (pWav->format == ma_format_unknown) { + pWav->format = ma_format_f32; + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_wav_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav) +{ + ma_result result; + + result = ma_wav_init_internal(pConfig, pWav); + if (result != MA_SUCCESS) { + return result; + } + + if (onRead == NULL || onSeek == NULL) { + return MA_INVALID_ARGS; /* onRead and onSeek are mandatory. */ + } + + pWav->onRead = onRead; + pWav->onSeek = onSeek; + pWav->onTell = onTell; + pWav->pReadSeekTellUserData = pReadSeekTellUserData; + + #if !defined(MA_NO_WAV) + { + ma_bool32 wavResult; + + wavResult = ma_dr_wav_init(&pWav->dr, ma_wav_dr_callback__read, ma_wav_dr_callback__seek, pWav, pAllocationCallbacks); + if (wavResult != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_wav_post_init(pWav); + + return MA_SUCCESS; + } + #else + { + /* wav is disabled. */ + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_wav_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav) +{ + ma_result result; + + result = ma_wav_init_internal(pConfig, pWav); + if (result != MA_SUCCESS) { + return result; + } + + #if !defined(MA_NO_WAV) + { + ma_bool32 wavResult; + + wavResult = ma_dr_wav_init_file(&pWav->dr, pFilePath, pAllocationCallbacks); + if (wavResult != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_wav_post_init(pWav); + + return MA_SUCCESS; + } + #else + { + /* wav is disabled. */ + (void)pFilePath; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_wav_init_file_w(const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav) +{ + ma_result result; + + result = ma_wav_init_internal(pConfig, pWav); + if (result != MA_SUCCESS) { + return result; + } + + #if !defined(MA_NO_WAV) + { + ma_bool32 wavResult; + + wavResult = ma_dr_wav_init_file_w(&pWav->dr, pFilePath, pAllocationCallbacks); + if (wavResult != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_wav_post_init(pWav); + + return MA_SUCCESS; + } + #else + { + /* wav is disabled. */ + (void)pFilePath; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_wav_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_wav* pWav) +{ + ma_result result; + + result = ma_wav_init_internal(pConfig, pWav); + if (result != MA_SUCCESS) { + return result; + } + + #if !defined(MA_NO_WAV) + { + ma_bool32 wavResult; + + wavResult = ma_dr_wav_init_memory(&pWav->dr, pData, dataSize, pAllocationCallbacks); + if (wavResult != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_wav_post_init(pWav); + + return MA_SUCCESS; + } + #else + { + /* wav is disabled. */ + (void)pData; + (void)dataSize; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API void ma_wav_uninit(ma_wav* pWav, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pWav == NULL) { + return; + } + + (void)pAllocationCallbacks; + + #if !defined(MA_NO_WAV) + { + ma_dr_wav_uninit(&pWav->dr); + } + #else + { + /* wav is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + } + #endif + + ma_data_source_uninit(&pWav->ds); +} + +MA_API ma_result ma_wav_read_pcm_frames(ma_wav* pWav, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_WAV) + { + /* We always use floating point format. */ + ma_result result = MA_SUCCESS; /* Must be initialized to MA_SUCCESS. */ + ma_uint64 totalFramesRead = 0; + ma_format format; + + ma_wav_get_data_format(pWav, &format, NULL, NULL, NULL, 0); + + switch (format) + { + case ma_format_f32: + { + totalFramesRead = ma_dr_wav_read_pcm_frames_f32(&pWav->dr, frameCount, (float*)pFramesOut); + } break; + + case ma_format_s16: + { + totalFramesRead = ma_dr_wav_read_pcm_frames_s16(&pWav->dr, frameCount, (ma_int16*)pFramesOut); + } break; + + case ma_format_s32: + { + totalFramesRead = ma_dr_wav_read_pcm_frames_s32(&pWav->dr, frameCount, (ma_int32*)pFramesOut); + } break; + + /* Fallback to a raw read. */ + case ma_format_unknown: return MA_INVALID_OPERATION; /* <-- this should never be hit because initialization would just fall back to a supported format. */ + default: + { + totalFramesRead = ma_dr_wav_read_pcm_frames(&pWav->dr, frameCount, pFramesOut); + } break; + } + + /* In the future we'll update ma_dr_wav to return MA_AT_END for us. */ + if (totalFramesRead == 0) { + result = MA_AT_END; + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + if (result == MA_SUCCESS && totalFramesRead == 0) { + result = MA_AT_END; + } + + return result; + } + #else + { + /* wav is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)pFramesOut; + (void)frameCount; + (void)pFramesRead; + + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_wav_seek_to_pcm_frame(ma_wav* pWav, ma_uint64 frameIndex) +{ + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_WAV) + { + ma_bool32 wavResult; + + wavResult = ma_dr_wav_seek_to_pcm_frame(&pWav->dr, frameIndex); + if (wavResult != MA_TRUE) { + return MA_ERROR; + } + + return MA_SUCCESS; + } + #else + { + /* wav is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)frameIndex; + + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_wav_get_data_format(ma_wav* pWav, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + /* Defaults for safety. */ + if (pFormat != NULL) { + *pFormat = ma_format_unknown; + } + if (pChannels != NULL) { + *pChannels = 0; + } + if (pSampleRate != NULL) { + *pSampleRate = 0; + } + if (pChannelMap != NULL) { + MA_ZERO_MEMORY(pChannelMap, sizeof(*pChannelMap) * channelMapCap); + } + + if (pWav == NULL) { + return MA_INVALID_OPERATION; + } + + if (pFormat != NULL) { + *pFormat = pWav->format; + } + + #if !defined(MA_NO_WAV) + { + if (pChannels != NULL) { + *pChannels = pWav->dr.channels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pWav->dr.sampleRate; + } + + if (pChannelMap != NULL) { + ma_channel_map_init_standard(ma_standard_channel_map_microsoft, pChannelMap, channelMapCap, pWav->dr.channels); + } + + return MA_SUCCESS; + } + #else + { + /* wav is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_wav_get_cursor_in_pcm_frames(ma_wav* pWav, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; /* Safety. */ + + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_WAV) + { + ma_result wavResult = ma_dr_wav_get_cursor_in_pcm_frames(&pWav->dr, pCursor); + if (wavResult != MA_SUCCESS) { + return (ma_result)wavResult; /* ma_dr_wav result codes map to miniaudio's. */ + } + + return MA_SUCCESS; + } + #else + { + /* wav is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_wav_get_length_in_pcm_frames(ma_wav* pWav, ma_uint64* pLength) +{ + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; /* Safety. */ + + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_WAV) + { + ma_result wavResult = ma_dr_wav_get_length_in_pcm_frames(&pWav->dr, pLength); + if (wavResult != MA_SUCCESS) { + return (ma_result)wavResult; /* ma_dr_wav result codes map to miniaudio's. */ + } + + return MA_SUCCESS; + } + #else + { + /* wav is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } + #endif +} + + +static ma_result ma_decoding_backend_init__wav(void* pUserData, ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_wav* pWav; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pWav = (ma_wav*)ma_malloc(sizeof(*pWav), pAllocationCallbacks); + if (pWav == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_wav_init(onRead, onSeek, onTell, pReadSeekTellUserData, pConfig, pAllocationCallbacks, pWav); + if (result != MA_SUCCESS) { + ma_free(pWav, pAllocationCallbacks); + return result; + } + + *ppBackend = pWav; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_file__wav(void* pUserData, const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_wav* pWav; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pWav = (ma_wav*)ma_malloc(sizeof(*pWav), pAllocationCallbacks); + if (pWav == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_wav_init_file(pFilePath, pConfig, pAllocationCallbacks, pWav); + if (result != MA_SUCCESS) { + ma_free(pWav, pAllocationCallbacks); + return result; + } + + *ppBackend = pWav; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_file_w__wav(void* pUserData, const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_wav* pWav; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pWav = (ma_wav*)ma_malloc(sizeof(*pWav), pAllocationCallbacks); + if (pWav == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_wav_init_file_w(pFilePath, pConfig, pAllocationCallbacks, pWav); + if (result != MA_SUCCESS) { + ma_free(pWav, pAllocationCallbacks); + return result; + } + + *ppBackend = pWav; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_memory__wav(void* pUserData, const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_wav* pWav; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pWav = (ma_wav*)ma_malloc(sizeof(*pWav), pAllocationCallbacks); + if (pWav == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_wav_init_memory(pData, dataSize, pConfig, pAllocationCallbacks, pWav); + if (result != MA_SUCCESS) { + ma_free(pWav, pAllocationCallbacks); + return result; + } + + *ppBackend = pWav; + + return MA_SUCCESS; +} + +static void ma_decoding_backend_uninit__wav(void* pUserData, ma_data_source* pBackend, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_wav* pWav = (ma_wav*)pBackend; + + (void)pUserData; + + ma_wav_uninit(pWav, pAllocationCallbacks); + ma_free(pWav, pAllocationCallbacks); +} + +static ma_decoding_backend_vtable g_ma_decoding_backend_vtable_wav = +{ + ma_decoding_backend_init__wav, + ma_decoding_backend_init_file__wav, + ma_decoding_backend_init_file_w__wav, + ma_decoding_backend_init_memory__wav, + ma_decoding_backend_uninit__wav +}; + +static ma_result ma_decoder_init_wav__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_vtable__internal(&g_ma_decoding_backend_vtable_wav, NULL, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_wav_from_file__internal(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file__internal(&g_ma_decoding_backend_vtable_wav, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_wav_from_file_w__internal(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file_w__internal(&g_ma_decoding_backend_vtable_wav, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_wav_from_memory__internal(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_memory__internal(&g_ma_decoding_backend_vtable_wav, NULL, pData, dataSize, pConfig, pDecoder); +} +#endif /* ma_dr_wav_h */ + +/* FLAC */ +#ifdef ma_dr_flac_h +#define MA_HAS_FLAC + +typedef struct +{ + ma_data_source_base ds; + ma_read_proc onRead; + ma_seek_proc onSeek; + ma_tell_proc onTell; + void* pReadSeekTellUserData; + ma_format format; /* Can be f32, s16 or s32. */ +#if !defined(MA_NO_FLAC) + ma_dr_flac* dr; +#endif +} ma_flac; + +MA_API ma_result ma_flac_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac); +MA_API ma_result ma_flac_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac); +MA_API ma_result ma_flac_init_file_w(const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac); +MA_API ma_result ma_flac_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac); +MA_API void ma_flac_uninit(ma_flac* pFlac, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_flac_read_pcm_frames(ma_flac* pFlac, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_result ma_flac_seek_to_pcm_frame(ma_flac* pFlac, ma_uint64 frameIndex); +MA_API ma_result ma_flac_get_data_format(ma_flac* pFlac, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_flac_get_cursor_in_pcm_frames(ma_flac* pFlac, ma_uint64* pCursor); +MA_API ma_result ma_flac_get_length_in_pcm_frames(ma_flac* pFlac, ma_uint64* pLength); + + +static ma_result ma_flac_ds_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_flac_read_pcm_frames((ma_flac*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_flac_ds_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_flac_seek_to_pcm_frame((ma_flac*)pDataSource, frameIndex); +} + +static ma_result ma_flac_ds_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + return ma_flac_get_data_format((ma_flac*)pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +static ma_result ma_flac_ds_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_flac_get_cursor_in_pcm_frames((ma_flac*)pDataSource, pCursor); +} + +static ma_result ma_flac_ds_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_flac_get_length_in_pcm_frames((ma_flac*)pDataSource, pLength); +} + +static ma_data_source_vtable g_ma_flac_ds_vtable = +{ + ma_flac_ds_read, + ma_flac_ds_seek, + ma_flac_ds_get_data_format, + ma_flac_ds_get_cursor, + ma_flac_ds_get_length, + NULL, /* onSetLooping */ + 0 +}; + + +#if !defined(MA_NO_FLAC) +static size_t ma_flac_dr_callback__read(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + ma_flac* pFlac = (ma_flac*)pUserData; + ma_result result; + size_t bytesRead; + + MA_ASSERT(pFlac != NULL); + + result = pFlac->onRead(pFlac->pReadSeekTellUserData, pBufferOut, bytesToRead, &bytesRead); + (void)result; + + return bytesRead; +} + +static ma_bool32 ma_flac_dr_callback__seek(void* pUserData, int offset, ma_dr_flac_seek_origin origin) +{ + ma_flac* pFlac = (ma_flac*)pUserData; + ma_result result; + ma_seek_origin maSeekOrigin; + + MA_ASSERT(pFlac != NULL); + + maSeekOrigin = ma_seek_origin_start; + if (origin == ma_dr_flac_seek_origin_current) { + maSeekOrigin = ma_seek_origin_current; + } + + result = pFlac->onSeek(pFlac->pReadSeekTellUserData, offset, maSeekOrigin); + if (result != MA_SUCCESS) { + return MA_FALSE; + } + + return MA_TRUE; +} +#endif + +static ma_result ma_flac_init_internal(const ma_decoding_backend_config* pConfig, ma_flac* pFlac) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + if (pFlac == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pFlac); + pFlac->format = ma_format_f32; /* f32 by default. */ + + if (pConfig != NULL && (pConfig->preferredFormat == ma_format_f32 || pConfig->preferredFormat == ma_format_s16 || pConfig->preferredFormat == ma_format_s32)) { + pFlac->format = pConfig->preferredFormat; + } else { + /* Getting here means something other than f32 and s16 was specified. Just leave this unset to use the default format. */ + } + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_flac_ds_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pFlac->ds); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the base data source. */ + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_flac_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac) +{ + ma_result result; + + result = ma_flac_init_internal(pConfig, pFlac); + if (result != MA_SUCCESS) { + return result; + } + + if (onRead == NULL || onSeek == NULL) { + return MA_INVALID_ARGS; /* onRead and onSeek are mandatory. */ + } + + pFlac->onRead = onRead; + pFlac->onSeek = onSeek; + pFlac->onTell = onTell; + pFlac->pReadSeekTellUserData = pReadSeekTellUserData; + + #if !defined(MA_NO_FLAC) + { + pFlac->dr = ma_dr_flac_open(ma_flac_dr_callback__read, ma_flac_dr_callback__seek, pFlac, pAllocationCallbacks); + if (pFlac->dr == NULL) { + return MA_INVALID_FILE; + } + + return MA_SUCCESS; + } + #else + { + /* flac is disabled. */ + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_flac_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac) +{ + ma_result result; + + result = ma_flac_init_internal(pConfig, pFlac); + if (result != MA_SUCCESS) { + return result; + } + + #if !defined(MA_NO_FLAC) + { + pFlac->dr = ma_dr_flac_open_file(pFilePath, pAllocationCallbacks); + if (pFlac->dr == NULL) { + return MA_INVALID_FILE; + } + + return MA_SUCCESS; + } + #else + { + /* flac is disabled. */ + (void)pFilePath; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_flac_init_file_w(const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac) +{ + ma_result result; + + result = ma_flac_init_internal(pConfig, pFlac); + if (result != MA_SUCCESS) { + return result; + } + + #if !defined(MA_NO_FLAC) + { + pFlac->dr = ma_dr_flac_open_file_w(pFilePath, pAllocationCallbacks); + if (pFlac->dr == NULL) { + return MA_INVALID_FILE; + } + + return MA_SUCCESS; + } + #else + { + /* flac is disabled. */ + (void)pFilePath; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_flac_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_flac* pFlac) +{ + ma_result result; + + result = ma_flac_init_internal(pConfig, pFlac); + if (result != MA_SUCCESS) { + return result; + } + + #if !defined(MA_NO_FLAC) + { + pFlac->dr = ma_dr_flac_open_memory(pData, dataSize, pAllocationCallbacks); + if (pFlac->dr == NULL) { + return MA_INVALID_FILE; + } + + return MA_SUCCESS; + } + #else + { + /* flac is disabled. */ + (void)pData; + (void)dataSize; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API void ma_flac_uninit(ma_flac* pFlac, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFlac == NULL) { + return; + } + + (void)pAllocationCallbacks; + + #if !defined(MA_NO_FLAC) + { + ma_dr_flac_close(pFlac->dr); + } + #else + { + /* flac is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + } + #endif + + ma_data_source_uninit(&pFlac->ds); +} + +MA_API ma_result ma_flac_read_pcm_frames(ma_flac* pFlac, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pFlac == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_FLAC) + { + /* We always use floating point format. */ + ma_result result = MA_SUCCESS; /* Must be initialized to MA_SUCCESS. */ + ma_uint64 totalFramesRead = 0; + ma_format format; + + ma_flac_get_data_format(pFlac, &format, NULL, NULL, NULL, 0); + + switch (format) + { + case ma_format_f32: + { + totalFramesRead = ma_dr_flac_read_pcm_frames_f32(pFlac->dr, frameCount, (float*)pFramesOut); + } break; + + case ma_format_s16: + { + totalFramesRead = ma_dr_flac_read_pcm_frames_s16(pFlac->dr, frameCount, (ma_int16*)pFramesOut); + } break; + + case ma_format_s32: + { + totalFramesRead = ma_dr_flac_read_pcm_frames_s32(pFlac->dr, frameCount, (ma_int32*)pFramesOut); + } break; + + case ma_format_u8: + case ma_format_s24: + case ma_format_unknown: + default: + { + return MA_INVALID_OPERATION; + }; + } + + /* In the future we'll update ma_dr_flac to return MA_AT_END for us. */ + if (totalFramesRead == 0) { + result = MA_AT_END; + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + if (result == MA_SUCCESS && totalFramesRead == 0) { + result = MA_AT_END; + } + + return result; + } + #else + { + /* flac is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)pFramesOut; + (void)frameCount; + (void)pFramesRead; + + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_flac_seek_to_pcm_frame(ma_flac* pFlac, ma_uint64 frameIndex) +{ + if (pFlac == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_FLAC) + { + ma_bool32 flacResult; + + flacResult = ma_dr_flac_seek_to_pcm_frame(pFlac->dr, frameIndex); + if (flacResult != MA_TRUE) { + return MA_ERROR; + } + + return MA_SUCCESS; + } + #else + { + /* flac is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)frameIndex; + + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_flac_get_data_format(ma_flac* pFlac, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + /* Defaults for safety. */ + if (pFormat != NULL) { + *pFormat = ma_format_unknown; + } + if (pChannels != NULL) { + *pChannels = 0; + } + if (pSampleRate != NULL) { + *pSampleRate = 0; + } + if (pChannelMap != NULL) { + MA_ZERO_MEMORY(pChannelMap, sizeof(*pChannelMap) * channelMapCap); + } + + if (pFlac == NULL) { + return MA_INVALID_OPERATION; + } + + if (pFormat != NULL) { + *pFormat = pFlac->format; + } + + #if !defined(MA_NO_FLAC) + { + if (pChannels != NULL) { + *pChannels = pFlac->dr->channels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pFlac->dr->sampleRate; + } + + if (pChannelMap != NULL) { + ma_channel_map_init_standard(ma_standard_channel_map_microsoft, pChannelMap, channelMapCap, pFlac->dr->channels); + } + + return MA_SUCCESS; + } + #else + { + /* flac is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_flac_get_cursor_in_pcm_frames(ma_flac* pFlac, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; /* Safety. */ + + if (pFlac == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_FLAC) + { + *pCursor = pFlac->dr->currentPCMFrame; + + return MA_SUCCESS; + } + #else + { + /* flac is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_flac_get_length_in_pcm_frames(ma_flac* pFlac, ma_uint64* pLength) +{ + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; /* Safety. */ + + if (pFlac == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_FLAC) + { + *pLength = pFlac->dr->totalPCMFrameCount; + + return MA_SUCCESS; + } + #else + { + /* flac is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } + #endif +} + + +static ma_result ma_decoding_backend_init__flac(void* pUserData, ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_flac* pFlac; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pFlac = (ma_flac*)ma_malloc(sizeof(*pFlac), pAllocationCallbacks); + if (pFlac == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_flac_init(onRead, onSeek, onTell, pReadSeekTellUserData, pConfig, pAllocationCallbacks, pFlac); + if (result != MA_SUCCESS) { + ma_free(pFlac, pAllocationCallbacks); + return result; + } + + *ppBackend = pFlac; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_file__flac(void* pUserData, const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_flac* pFlac; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pFlac = (ma_flac*)ma_malloc(sizeof(*pFlac), pAllocationCallbacks); + if (pFlac == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_flac_init_file(pFilePath, pConfig, pAllocationCallbacks, pFlac); + if (result != MA_SUCCESS) { + ma_free(pFlac, pAllocationCallbacks); + return result; + } + + *ppBackend = pFlac; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_file_w__flac(void* pUserData, const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_flac* pFlac; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pFlac = (ma_flac*)ma_malloc(sizeof(*pFlac), pAllocationCallbacks); + if (pFlac == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_flac_init_file_w(pFilePath, pConfig, pAllocationCallbacks, pFlac); + if (result != MA_SUCCESS) { + ma_free(pFlac, pAllocationCallbacks); + return result; + } + + *ppBackend = pFlac; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_memory__flac(void* pUserData, const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_flac* pFlac; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pFlac = (ma_flac*)ma_malloc(sizeof(*pFlac), pAllocationCallbacks); + if (pFlac == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_flac_init_memory(pData, dataSize, pConfig, pAllocationCallbacks, pFlac); + if (result != MA_SUCCESS) { + ma_free(pFlac, pAllocationCallbacks); + return result; + } + + *ppBackend = pFlac; + + return MA_SUCCESS; +} + +static void ma_decoding_backend_uninit__flac(void* pUserData, ma_data_source* pBackend, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_flac* pFlac = (ma_flac*)pBackend; + + (void)pUserData; + + ma_flac_uninit(pFlac, pAllocationCallbacks); + ma_free(pFlac, pAllocationCallbacks); +} + +static ma_decoding_backend_vtable g_ma_decoding_backend_vtable_flac = +{ + ma_decoding_backend_init__flac, + ma_decoding_backend_init_file__flac, + ma_decoding_backend_init_file_w__flac, + ma_decoding_backend_init_memory__flac, + ma_decoding_backend_uninit__flac +}; + +static ma_result ma_decoder_init_flac__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_vtable__internal(&g_ma_decoding_backend_vtable_flac, NULL, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_flac_from_file__internal(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file__internal(&g_ma_decoding_backend_vtable_flac, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_flac_from_file_w__internal(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file_w__internal(&g_ma_decoding_backend_vtable_flac, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_flac_from_memory__internal(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_memory__internal(&g_ma_decoding_backend_vtable_flac, NULL, pData, dataSize, pConfig, pDecoder); +} +#endif /* ma_dr_flac_h */ + +/* MP3 */ +#ifdef ma_dr_mp3_h +#define MA_HAS_MP3 + +typedef struct +{ + ma_data_source_base ds; + ma_read_proc onRead; + ma_seek_proc onSeek; + ma_tell_proc onTell; + void* pReadSeekTellUserData; + ma_format format; /* Can be f32 or s16. */ +#if !defined(MA_NO_MP3) + ma_dr_mp3 dr; + ma_uint32 seekPointCount; + ma_dr_mp3_seek_point* pSeekPoints; /* Only used if seek table generation is used. */ +#endif +} ma_mp3; + +MA_API ma_result ma_mp3_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3); +MA_API ma_result ma_mp3_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3); +MA_API ma_result ma_mp3_init_file_w(const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3); +MA_API ma_result ma_mp3_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3); +MA_API void ma_mp3_uninit(ma_mp3* pMP3, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_mp3_read_pcm_frames(ma_mp3* pMP3, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_result ma_mp3_seek_to_pcm_frame(ma_mp3* pMP3, ma_uint64 frameIndex); +MA_API ma_result ma_mp3_get_data_format(ma_mp3* pMP3, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_mp3_get_cursor_in_pcm_frames(ma_mp3* pMP3, ma_uint64* pCursor); +MA_API ma_result ma_mp3_get_length_in_pcm_frames(ma_mp3* pMP3, ma_uint64* pLength); + + +static ma_result ma_mp3_ds_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_mp3_read_pcm_frames((ma_mp3*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_mp3_ds_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_mp3_seek_to_pcm_frame((ma_mp3*)pDataSource, frameIndex); +} + +static ma_result ma_mp3_ds_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + return ma_mp3_get_data_format((ma_mp3*)pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +static ma_result ma_mp3_ds_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_mp3_get_cursor_in_pcm_frames((ma_mp3*)pDataSource, pCursor); +} + +static ma_result ma_mp3_ds_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_mp3_get_length_in_pcm_frames((ma_mp3*)pDataSource, pLength); +} + +static ma_data_source_vtable g_ma_mp3_ds_vtable = +{ + ma_mp3_ds_read, + ma_mp3_ds_seek, + ma_mp3_ds_get_data_format, + ma_mp3_ds_get_cursor, + ma_mp3_ds_get_length, + NULL, /* onSetLooping */ + 0 +}; + + +#if !defined(MA_NO_MP3) +static size_t ma_mp3_dr_callback__read(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + ma_mp3* pMP3 = (ma_mp3*)pUserData; + ma_result result; + size_t bytesRead; + + MA_ASSERT(pMP3 != NULL); + + result = pMP3->onRead(pMP3->pReadSeekTellUserData, pBufferOut, bytesToRead, &bytesRead); + (void)result; + + return bytesRead; +} + +static ma_bool32 ma_mp3_dr_callback__seek(void* pUserData, int offset, ma_dr_mp3_seek_origin origin) +{ + ma_mp3* pMP3 = (ma_mp3*)pUserData; + ma_result result; + ma_seek_origin maSeekOrigin; + + MA_ASSERT(pMP3 != NULL); + + maSeekOrigin = ma_seek_origin_start; + if (origin == ma_dr_mp3_seek_origin_current) { + maSeekOrigin = ma_seek_origin_current; + } + + result = pMP3->onSeek(pMP3->pReadSeekTellUserData, offset, maSeekOrigin); + if (result != MA_SUCCESS) { + return MA_FALSE; + } + + return MA_TRUE; +} +#endif + +static ma_result ma_mp3_init_internal(const ma_decoding_backend_config* pConfig, ma_mp3* pMP3) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + if (pMP3 == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pMP3); + pMP3->format = ma_format_f32; /* f32 by default. */ + + if (pConfig != NULL && (pConfig->preferredFormat == ma_format_f32 || pConfig->preferredFormat == ma_format_s16)) { + pMP3->format = pConfig->preferredFormat; + } else { + /* Getting here means something other than f32 and s16 was specified. Just leave this unset to use the default format. */ + } + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_mp3_ds_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pMP3->ds); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the base data source. */ + } + + return MA_SUCCESS; +} + +static ma_result ma_mp3_generate_seek_table(ma_mp3* pMP3, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_bool32 mp3Result; + ma_uint32 seekPointCount = 0; + ma_dr_mp3_seek_point* pSeekPoints = NULL; + + MA_ASSERT(pMP3 != NULL); + MA_ASSERT(pConfig != NULL); + + seekPointCount = pConfig->seekPointCount; + if (seekPointCount > 0) { + pSeekPoints = (ma_dr_mp3_seek_point*)ma_malloc(sizeof(*pMP3->pSeekPoints) * seekPointCount, pAllocationCallbacks); + if (pSeekPoints == NULL) { + return MA_OUT_OF_MEMORY; + } + } + + mp3Result = ma_dr_mp3_calculate_seek_points(&pMP3->dr, &seekPointCount, pSeekPoints); + if (mp3Result != MA_TRUE) { + ma_free(pSeekPoints, pAllocationCallbacks); + return MA_ERROR; + } + + mp3Result = ma_dr_mp3_bind_seek_table(&pMP3->dr, seekPointCount, pSeekPoints); + if (mp3Result != MA_TRUE) { + ma_free(pSeekPoints, pAllocationCallbacks); + return MA_ERROR; + } + + pMP3->seekPointCount = seekPointCount; + pMP3->pSeekPoints = pSeekPoints; + + return MA_SUCCESS; +} + +static ma_result ma_mp3_post_init(ma_mp3* pMP3, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_result result; + + result = ma_mp3_generate_seek_table(pMP3, pConfig, pAllocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_mp3_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3) +{ + ma_result result; + + result = ma_mp3_init_internal(pConfig, pMP3); + if (result != MA_SUCCESS) { + return result; + } + + if (onRead == NULL || onSeek == NULL) { + return MA_INVALID_ARGS; /* onRead and onSeek are mandatory. */ + } + + pMP3->onRead = onRead; + pMP3->onSeek = onSeek; + pMP3->onTell = onTell; + pMP3->pReadSeekTellUserData = pReadSeekTellUserData; + + #if !defined(MA_NO_MP3) + { + ma_bool32 mp3Result; + + mp3Result = ma_dr_mp3_init(&pMP3->dr, ma_mp3_dr_callback__read, ma_mp3_dr_callback__seek, pMP3, pAllocationCallbacks); + if (mp3Result != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_mp3_post_init(pMP3, pConfig, pAllocationCallbacks); + + return MA_SUCCESS; + } + #else + { + /* mp3 is disabled. */ + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_mp3_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3) +{ + ma_result result; + + result = ma_mp3_init_internal(pConfig, pMP3); + if (result != MA_SUCCESS) { + return result; + } + + #if !defined(MA_NO_MP3) + { + ma_bool32 mp3Result; + + mp3Result = ma_dr_mp3_init_file(&pMP3->dr, pFilePath, pAllocationCallbacks); + if (mp3Result != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_mp3_post_init(pMP3, pConfig, pAllocationCallbacks); + + return MA_SUCCESS; + } + #else + { + /* mp3 is disabled. */ + (void)pFilePath; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_mp3_init_file_w(const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3) +{ + ma_result result; + + result = ma_mp3_init_internal(pConfig, pMP3); + if (result != MA_SUCCESS) { + return result; + } + + #if !defined(MA_NO_MP3) + { + ma_bool32 mp3Result; + + mp3Result = ma_dr_mp3_init_file_w(&pMP3->dr, pFilePath, pAllocationCallbacks); + if (mp3Result != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_mp3_post_init(pMP3, pConfig, pAllocationCallbacks); + + return MA_SUCCESS; + } + #else + { + /* mp3 is disabled. */ + (void)pFilePath; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_mp3_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_mp3* pMP3) +{ + ma_result result; + + result = ma_mp3_init_internal(pConfig, pMP3); + if (result != MA_SUCCESS) { + return result; + } + + #if !defined(MA_NO_MP3) + { + ma_bool32 mp3Result; + + mp3Result = ma_dr_mp3_init_memory(&pMP3->dr, pData, dataSize, pAllocationCallbacks); + if (mp3Result != MA_TRUE) { + return MA_INVALID_FILE; + } + + ma_mp3_post_init(pMP3, pConfig, pAllocationCallbacks); + + return MA_SUCCESS; + } + #else + { + /* mp3 is disabled. */ + (void)pData; + (void)dataSize; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API void ma_mp3_uninit(ma_mp3* pMP3, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pMP3 == NULL) { + return; + } + + #if !defined(MA_NO_MP3) + { + ma_dr_mp3_uninit(&pMP3->dr); + } + #else + { + /* mp3 is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + } + #endif + + /* Seek points need to be freed after the MP3 decoder has been uninitialized to ensure they're no longer being referenced. */ + ma_free(pMP3->pSeekPoints, pAllocationCallbacks); + + ma_data_source_uninit(&pMP3->ds); +} + +MA_API ma_result ma_mp3_read_pcm_frames(ma_mp3* pMP3, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pMP3 == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_MP3) + { + /* We always use floating point format. */ + ma_result result = MA_SUCCESS; /* Must be initialized to MA_SUCCESS. */ + ma_uint64 totalFramesRead = 0; + ma_format format; + + ma_mp3_get_data_format(pMP3, &format, NULL, NULL, NULL, 0); + + switch (format) + { + case ma_format_f32: + { + totalFramesRead = ma_dr_mp3_read_pcm_frames_f32(&pMP3->dr, frameCount, (float*)pFramesOut); + } break; + + case ma_format_s16: + { + totalFramesRead = ma_dr_mp3_read_pcm_frames_s16(&pMP3->dr, frameCount, (ma_int16*)pFramesOut); + } break; + + case ma_format_u8: + case ma_format_s24: + case ma_format_s32: + case ma_format_unknown: + default: + { + return MA_INVALID_OPERATION; + }; + } + + /* In the future we'll update ma_dr_mp3 to return MA_AT_END for us. */ + if (totalFramesRead == 0) { + result = MA_AT_END; + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + return result; + } + #else + { + /* mp3 is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)pFramesOut; + (void)frameCount; + (void)pFramesRead; + + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_mp3_seek_to_pcm_frame(ma_mp3* pMP3, ma_uint64 frameIndex) +{ + if (pMP3 == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_MP3) + { + ma_bool32 mp3Result; + + mp3Result = ma_dr_mp3_seek_to_pcm_frame(&pMP3->dr, frameIndex); + if (mp3Result != MA_TRUE) { + return MA_ERROR; + } + + return MA_SUCCESS; + } + #else + { + /* mp3 is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)frameIndex; + + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_mp3_get_data_format(ma_mp3* pMP3, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + /* Defaults for safety. */ + if (pFormat != NULL) { + *pFormat = ma_format_unknown; + } + if (pChannels != NULL) { + *pChannels = 0; + } + if (pSampleRate != NULL) { + *pSampleRate = 0; + } + if (pChannelMap != NULL) { + MA_ZERO_MEMORY(pChannelMap, sizeof(*pChannelMap) * channelMapCap); + } + + if (pMP3 == NULL) { + return MA_INVALID_OPERATION; + } + + if (pFormat != NULL) { + *pFormat = pMP3->format; + } + + #if !defined(MA_NO_MP3) + { + if (pChannels != NULL) { + *pChannels = pMP3->dr.channels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pMP3->dr.sampleRate; + } + + if (pChannelMap != NULL) { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pMP3->dr.channels); + } + + return MA_SUCCESS; + } + #else + { + /* mp3 is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_mp3_get_cursor_in_pcm_frames(ma_mp3* pMP3, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; /* Safety. */ + + if (pMP3 == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_MP3) + { + *pCursor = pMP3->dr.currentPCMFrame; + + return MA_SUCCESS; + } + #else + { + /* mp3 is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_mp3_get_length_in_pcm_frames(ma_mp3* pMP3, ma_uint64* pLength) +{ + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; /* Safety. */ + + if (pMP3 == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_MP3) + { + *pLength = ma_dr_mp3_get_pcm_frame_count(&pMP3->dr); + + return MA_SUCCESS; + } + #else + { + /* mp3 is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } + #endif +} + + +static ma_result ma_decoding_backend_init__mp3(void* pUserData, ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_mp3* pMP3; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pMP3 = (ma_mp3*)ma_malloc(sizeof(*pMP3), pAllocationCallbacks); + if (pMP3 == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_mp3_init(onRead, onSeek, onTell, pReadSeekTellUserData, pConfig, pAllocationCallbacks, pMP3); + if (result != MA_SUCCESS) { + ma_free(pMP3, pAllocationCallbacks); + return result; + } + + *ppBackend = pMP3; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_file__mp3(void* pUserData, const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_mp3* pMP3; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pMP3 = (ma_mp3*)ma_malloc(sizeof(*pMP3), pAllocationCallbacks); + if (pMP3 == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_mp3_init_file(pFilePath, pConfig, pAllocationCallbacks, pMP3); + if (result != MA_SUCCESS) { + ma_free(pMP3, pAllocationCallbacks); + return result; + } + + *ppBackend = pMP3; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_file_w__mp3(void* pUserData, const wchar_t* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_mp3* pMP3; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pMP3 = (ma_mp3*)ma_malloc(sizeof(*pMP3), pAllocationCallbacks); + if (pMP3 == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_mp3_init_file_w(pFilePath, pConfig, pAllocationCallbacks, pMP3); + if (result != MA_SUCCESS) { + ma_free(pMP3, pAllocationCallbacks); + return result; + } + + *ppBackend = pMP3; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_memory__mp3(void* pUserData, const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_mp3* pMP3; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pMP3 = (ma_mp3*)ma_malloc(sizeof(*pMP3), pAllocationCallbacks); + if (pMP3 == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_mp3_init_memory(pData, dataSize, pConfig, pAllocationCallbacks, pMP3); + if (result != MA_SUCCESS) { + ma_free(pMP3, pAllocationCallbacks); + return result; + } + + *ppBackend = pMP3; + + return MA_SUCCESS; +} + +static void ma_decoding_backend_uninit__mp3(void* pUserData, ma_data_source* pBackend, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_mp3* pMP3 = (ma_mp3*)pBackend; + + (void)pUserData; + + ma_mp3_uninit(pMP3, pAllocationCallbacks); + ma_free(pMP3, pAllocationCallbacks); +} + +static ma_decoding_backend_vtable g_ma_decoding_backend_vtable_mp3 = +{ + ma_decoding_backend_init__mp3, + ma_decoding_backend_init_file__mp3, + ma_decoding_backend_init_file_w__mp3, + ma_decoding_backend_init_memory__mp3, + ma_decoding_backend_uninit__mp3 +}; + +static ma_result ma_decoder_init_mp3__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_vtable__internal(&g_ma_decoding_backend_vtable_mp3, NULL, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_mp3_from_file__internal(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file__internal(&g_ma_decoding_backend_vtable_mp3, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_mp3_from_file_w__internal(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file_w__internal(&g_ma_decoding_backend_vtable_mp3, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_mp3_from_memory__internal(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_memory__internal(&g_ma_decoding_backend_vtable_mp3, NULL, pData, dataSize, pConfig, pDecoder); +} +#endif /* ma_dr_mp3_h */ + +/* Vorbis */ +#ifdef STB_VORBIS_INCLUDE_STB_VORBIS_H +#define MA_HAS_VORBIS + +/* The size in bytes of each chunk of data to read from the Vorbis stream. */ +#define MA_VORBIS_DATA_CHUNK_SIZE 4096 + +typedef struct +{ + ma_data_source_base ds; + ma_read_proc onRead; + ma_seek_proc onSeek; + ma_tell_proc onTell; + void* pReadSeekTellUserData; + ma_allocation_callbacks allocationCallbacks; /* Store the allocation callbacks within the structure because we may need to dynamically expand a buffer in ma_stbvorbis_read_pcm_frames() when using push mode. */ + ma_format format; /* Only f32 is allowed with stb_vorbis. */ + ma_uint32 channels; + ma_uint32 sampleRate; + ma_uint64 cursor; +#if !defined(MA_NO_VORBIS) + stb_vorbis* stb; + ma_bool32 usingPushMode; + struct + { + ma_uint8* pData; + size_t dataSize; + size_t dataCapacity; + size_t audioStartOffsetInBytes; + ma_uint32 framesConsumed; /* The number of frames consumed in ppPacketData. */ + ma_uint32 framesRemaining; /* The number of frames remaining in ppPacketData. */ + float** ppPacketData; + } push; +#endif +} ma_stbvorbis; + +MA_API ma_result ma_stbvorbis_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_stbvorbis* pVorbis); +MA_API ma_result ma_stbvorbis_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_stbvorbis* pVorbis); +MA_API ma_result ma_stbvorbis_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_stbvorbis* pVorbis); +MA_API void ma_stbvorbis_uninit(ma_stbvorbis* pVorbis, const ma_allocation_callbacks* pAllocationCallbacks); +MA_API ma_result ma_stbvorbis_read_pcm_frames(ma_stbvorbis* pVorbis, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead); +MA_API ma_result ma_stbvorbis_seek_to_pcm_frame(ma_stbvorbis* pVorbis, ma_uint64 frameIndex); +MA_API ma_result ma_stbvorbis_get_data_format(ma_stbvorbis* pVorbis, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap); +MA_API ma_result ma_stbvorbis_get_cursor_in_pcm_frames(ma_stbvorbis* pVorbis, ma_uint64* pCursor); +MA_API ma_result ma_stbvorbis_get_length_in_pcm_frames(ma_stbvorbis* pVorbis, ma_uint64* pLength); + + +static ma_result ma_stbvorbis_ds_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_stbvorbis_read_pcm_frames((ma_stbvorbis*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_stbvorbis_ds_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_stbvorbis_seek_to_pcm_frame((ma_stbvorbis*)pDataSource, frameIndex); +} + +static ma_result ma_stbvorbis_ds_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + return ma_stbvorbis_get_data_format((ma_stbvorbis*)pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +static ma_result ma_stbvorbis_ds_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_stbvorbis_get_cursor_in_pcm_frames((ma_stbvorbis*)pDataSource, pCursor); +} + +static ma_result ma_stbvorbis_ds_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_stbvorbis_get_length_in_pcm_frames((ma_stbvorbis*)pDataSource, pLength); +} + +static ma_data_source_vtable g_ma_stbvorbis_ds_vtable = +{ + ma_stbvorbis_ds_read, + ma_stbvorbis_ds_seek, + ma_stbvorbis_ds_get_data_format, + ma_stbvorbis_ds_get_cursor, + ma_stbvorbis_ds_get_length, + NULL, /* onSetLooping */ + 0 +}; + + +static ma_result ma_stbvorbis_init_internal(const ma_decoding_backend_config* pConfig, ma_stbvorbis* pVorbis) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + (void)pConfig; + + if (pVorbis == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pVorbis); + pVorbis->format = ma_format_f32; /* Only supporting f32. */ + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_stbvorbis_ds_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pVorbis->ds); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the base data source. */ + } + + return MA_SUCCESS; +} + +#if !defined(MA_NO_VORBIS) +static ma_result ma_stbvorbis_post_init(ma_stbvorbis* pVorbis) +{ + stb_vorbis_info info; + + MA_ASSERT(pVorbis != NULL); + + info = stb_vorbis_get_info(pVorbis->stb); + + pVorbis->channels = info.channels; + pVorbis->sampleRate = info.sample_rate; + + return MA_SUCCESS; +} + +static ma_result ma_stbvorbis_init_internal_decoder_push(ma_stbvorbis* pVorbis) +{ + ma_result result; + stb_vorbis* stb; + size_t dataSize = 0; + size_t dataCapacity = 0; + ma_uint8* pData = NULL; /* <-- Must be initialized to NULL. */ + + for (;;) { + int vorbisError; + int consumedDataSize; /* <-- Fill by stb_vorbis_open_pushdata(). */ + size_t bytesRead; + ma_uint8* pNewData; + + /* Allocate memory for the new chunk. */ + dataCapacity += MA_VORBIS_DATA_CHUNK_SIZE; + pNewData = (ma_uint8*)ma_realloc(pData, dataCapacity, &pVorbis->allocationCallbacks); + if (pNewData == NULL) { + ma_free(pData, &pVorbis->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + pData = pNewData; + + /* Read in the next chunk. */ + result = pVorbis->onRead(pVorbis->pReadSeekTellUserData, ma_offset_ptr(pData, dataSize), (dataCapacity - dataSize), &bytesRead); + dataSize += bytesRead; + + if (result != MA_SUCCESS) { + ma_free(pData, &pVorbis->allocationCallbacks); + return result; + } + + /* We have a maximum of 31 bits with stb_vorbis. */ + if (dataSize > INT_MAX) { + ma_free(pData, &pVorbis->allocationCallbacks); + return MA_TOO_BIG; + } + + stb = stb_vorbis_open_pushdata(pData, (int)dataSize, &consumedDataSize, &vorbisError, NULL); + if (stb != NULL) { + /* + Successfully opened the Vorbis decoder. We might have some leftover unprocessed + data so we'll need to move that down to the front. + */ + dataSize -= (size_t)consumedDataSize; /* Consume the data. */ + MA_MOVE_MEMORY(pData, ma_offset_ptr(pData, consumedDataSize), dataSize); + + /* + We need to track the start point so we can seek back to the start of the audio + data when seeking. + */ + pVorbis->push.audioStartOffsetInBytes = consumedDataSize; + + break; + } else { + /* Failed to open the decoder. */ + if (vorbisError == VORBIS_need_more_data) { + continue; + } else { + ma_free(pData, &pVorbis->allocationCallbacks); + return MA_ERROR; /* Failed to open the stb_vorbis decoder. */ + } + } + } + + MA_ASSERT(stb != NULL); + pVorbis->stb = stb; + pVorbis->push.pData = pData; + pVorbis->push.dataSize = dataSize; + pVorbis->push.dataCapacity = dataCapacity; + + return MA_SUCCESS; +} +#endif + +MA_API ma_result ma_stbvorbis_init(ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_stbvorbis* pVorbis) +{ + ma_result result; + + result = ma_stbvorbis_init_internal(pConfig, pVorbis); + if (result != MA_SUCCESS) { + return result; + } + + if (onRead == NULL || onSeek == NULL) { + return MA_INVALID_ARGS; /* onRead and onSeek are mandatory. */ + } + + pVorbis->onRead = onRead; + pVorbis->onSeek = onSeek; + pVorbis->onTell = onTell; + pVorbis->pReadSeekTellUserData = pReadSeekTellUserData; + ma_allocation_callbacks_init_copy(&pVorbis->allocationCallbacks, pAllocationCallbacks); + + #if !defined(MA_NO_VORBIS) + { + /* + stb_vorbis lacks a callback based API for its pulling API which means we're stuck with the + pushing API. In order for us to be able to successfully initialize the decoder we need to + supply it with enough data. We need to keep loading data until we have enough. + */ + result = ma_stbvorbis_init_internal_decoder_push(pVorbis); + if (result != MA_SUCCESS) { + return result; + } + + pVorbis->usingPushMode = MA_TRUE; + + result = ma_stbvorbis_post_init(pVorbis); + if (result != MA_SUCCESS) { + stb_vorbis_close(pVorbis->stb); + ma_free(pVorbis->push.pData, pAllocationCallbacks); + return result; + } + + return MA_SUCCESS; + } + #else + { + /* vorbis is disabled. */ + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_stbvorbis_init_file(const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_stbvorbis* pVorbis) +{ + ma_result result; + + result = ma_stbvorbis_init_internal(pConfig, pVorbis); + if (result != MA_SUCCESS) { + return result; + } + + #if !defined(MA_NO_VORBIS) + { + (void)pAllocationCallbacks; /* Don't know how to make use of this with stb_vorbis. */ + + /* We can use stb_vorbis' pull mode for file based streams. */ + pVorbis->stb = stb_vorbis_open_filename(pFilePath, NULL, NULL); + if (pVorbis->stb == NULL) { + return MA_INVALID_FILE; + } + + pVorbis->usingPushMode = MA_FALSE; + + result = ma_stbvorbis_post_init(pVorbis); + if (result != MA_SUCCESS) { + stb_vorbis_close(pVorbis->stb); + return result; + } + + return MA_SUCCESS; + } + #else + { + /* vorbis is disabled. */ + (void)pFilePath; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_stbvorbis_init_memory(const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_stbvorbis* pVorbis) +{ + ma_result result; + + result = ma_stbvorbis_init_internal(pConfig, pVorbis); + if (result != MA_SUCCESS) { + return result; + } + + #if !defined(MA_NO_VORBIS) + { + (void)pAllocationCallbacks; + + /* stb_vorbis uses an int as its size specifier, restricting it to 32-bit even on 64-bit systems. *sigh*. */ + if (dataSize > INT_MAX) { + return MA_TOO_BIG; + } + + pVorbis->stb = stb_vorbis_open_memory((const unsigned char*)pData, (int)dataSize, NULL, NULL); + if (pVorbis->stb == NULL) { + return MA_INVALID_FILE; + } + + pVorbis->usingPushMode = MA_FALSE; + + result = ma_stbvorbis_post_init(pVorbis); + if (result != MA_SUCCESS) { + stb_vorbis_close(pVorbis->stb); + return result; + } + + return MA_SUCCESS; + } + #else + { + /* vorbis is disabled. */ + (void)pData; + (void)dataSize; + (void)pAllocationCallbacks; + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API void ma_stbvorbis_uninit(ma_stbvorbis* pVorbis, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pVorbis == NULL) { + return; + } + + #if !defined(MA_NO_VORBIS) + { + stb_vorbis_close(pVorbis->stb); + + /* We'll have to clear some memory if we're using push mode. */ + if (pVorbis->usingPushMode) { + ma_free(pVorbis->push.pData, pAllocationCallbacks); + } + } + #else + { + /* vorbis is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + } + #endif + + ma_data_source_uninit(&pVorbis->ds); +} + +MA_API ma_result ma_stbvorbis_read_pcm_frames(ma_stbvorbis* pVorbis, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pVorbis == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_VORBIS) + { + /* We always use floating point format. */ + ma_result result = MA_SUCCESS; /* Must be initialized to MA_SUCCESS. */ + ma_uint64 totalFramesRead = 0; + ma_format format; + ma_uint32 channels; + + ma_stbvorbis_get_data_format(pVorbis, &format, &channels, NULL, NULL, 0); + + if (format == ma_format_f32) { + /* We read differently depending on whether or not we're using push mode. */ + if (pVorbis->usingPushMode) { + /* Push mode. This is the complex case. */ + float* pFramesOutF32 = (float*)pFramesOut; + + while (totalFramesRead < frameCount) { + /* The first thing to do is read from any already-cached frames. */ + ma_uint32 framesToReadFromCache = (ma_uint32)ma_min(pVorbis->push.framesRemaining, (frameCount - totalFramesRead)); /* Safe cast because pVorbis->framesRemaining is 32-bit. */ + + /* The output pointer can be null in which case we just treat it as a seek. */ + if (pFramesOut != NULL) { + ma_uint64 iFrame; + for (iFrame = 0; iFrame < framesToReadFromCache; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pVorbis->channels; iChannel += 1) { + pFramesOutF32[iChannel] = pVorbis->push.ppPacketData[iChannel][pVorbis->push.framesConsumed + iFrame]; + } + + pFramesOutF32 += pVorbis->channels; + } + } + + /* Update pointers and counters. */ + pVorbis->push.framesConsumed += framesToReadFromCache; + pVorbis->push.framesRemaining -= framesToReadFromCache; + totalFramesRead += framesToReadFromCache; + + /* Don't bother reading any more frames right now if we've just finished loading. */ + if (totalFramesRead == frameCount) { + break; + } + + MA_ASSERT(pVorbis->push.framesRemaining == 0); + + /* Getting here means we've run out of cached frames. We'll need to load some more. */ + for (;;) { + int samplesRead = 0; + int consumedDataSize; + + /* We need to case dataSize to an int, so make sure we can do it safely. */ + if (pVorbis->push.dataSize > INT_MAX) { + break; /* Too big. */ + } + + consumedDataSize = stb_vorbis_decode_frame_pushdata(pVorbis->stb, pVorbis->push.pData, (int)pVorbis->push.dataSize, NULL, &pVorbis->push.ppPacketData, &samplesRead); + if (consumedDataSize != 0) { + /* Successfully decoded a Vorbis frame. Consume the data. */ + pVorbis->push.dataSize -= (size_t)consumedDataSize; + MA_MOVE_MEMORY(pVorbis->push.pData, ma_offset_ptr(pVorbis->push.pData, consumedDataSize), pVorbis->push.dataSize); + + pVorbis->push.framesConsumed = 0; + pVorbis->push.framesRemaining = samplesRead; + + break; + } else { + /* Not enough data. Read more. */ + size_t bytesRead; + + /* Expand the data buffer if necessary. */ + if (pVorbis->push.dataCapacity == pVorbis->push.dataSize) { + size_t newCap = pVorbis->push.dataCapacity + MA_VORBIS_DATA_CHUNK_SIZE; + ma_uint8* pNewData; + + pNewData = (ma_uint8*)ma_realloc(pVorbis->push.pData, newCap, &pVorbis->allocationCallbacks); + if (pNewData == NULL) { + result = MA_OUT_OF_MEMORY; + break; + } + + pVorbis->push.pData = pNewData; + pVorbis->push.dataCapacity = newCap; + } + + /* We should have enough room to load some data. */ + result = pVorbis->onRead(pVorbis->pReadSeekTellUserData, ma_offset_ptr(pVorbis->push.pData, pVorbis->push.dataSize), (pVorbis->push.dataCapacity - pVorbis->push.dataSize), &bytesRead); + pVorbis->push.dataSize += bytesRead; + + if (result != MA_SUCCESS) { + break; /* Failed to read any data. Get out. */ + } + } + } + + /* If we don't have a success code at this point it means we've encountered an error or the end of the file has been reached (probably the latter). */ + if (result != MA_SUCCESS) { + break; + } + } + } else { + /* Pull mode. This is the simple case, but we still need to run in a loop because stb_vorbis loves using 32-bit instead of 64-bit. */ + while (totalFramesRead < frameCount) { + ma_uint64 framesRemaining = (frameCount - totalFramesRead); + int framesRead; + + if (framesRemaining > INT_MAX) { + framesRemaining = INT_MAX; + } + + framesRead = stb_vorbis_get_samples_float_interleaved(pVorbis->stb, channels, (float*)ma_offset_pcm_frames_ptr(pFramesOut, totalFramesRead, format, channels), (int)framesRemaining * channels); /* Safe cast. */ + totalFramesRead += framesRead; + + if (framesRead < (int)framesRemaining) { + break; /* Nothing left to read. Get out. */ + } + } + } + } else { + result = MA_INVALID_ARGS; + } + + pVorbis->cursor += totalFramesRead; + + if (totalFramesRead == 0) { + result = MA_AT_END; + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + if (result == MA_SUCCESS && totalFramesRead == 0) { + result = MA_AT_END; + } + + return result; + } + #else + { + /* vorbis is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)pFramesOut; + (void)frameCount; + (void)pFramesRead; + + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_stbvorbis_seek_to_pcm_frame(ma_stbvorbis* pVorbis, ma_uint64 frameIndex) +{ + if (pVorbis == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_VORBIS) + { + /* Different seeking methods depending on whether or not we're using push mode. */ + if (pVorbis->usingPushMode) { + /* Push mode. This is the complex case. */ + ma_result result; + float buffer[4096]; + + /* If we're seeking backwards, we need to seek back to the start and then brute-force forward. */ + if (frameIndex < pVorbis->cursor) { + if (frameIndex > 0x7FFFFFFF) { + return MA_INVALID_ARGS; /* Trying to seek beyond the 32-bit maximum of stb_vorbis. */ + } + + /* + This is wildly inefficient due to me having trouble getting sample exact seeking working + robustly with stb_vorbis_flush_pushdata(). The only way I can think to make this work + perfectly is to reinitialize the decoder. Note that we only enter this path when seeking + backwards. This will hopefully be removed once we get our own Vorbis decoder implemented. + */ + stb_vorbis_close(pVorbis->stb); + ma_free(pVorbis->push.pData, &pVorbis->allocationCallbacks); + + MA_ZERO_OBJECT(&pVorbis->push); + + /* Seek to the start of the file. */ + result = pVorbis->onSeek(pVorbis->pReadSeekTellUserData, 0, ma_seek_origin_start); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_stbvorbis_init_internal_decoder_push(pVorbis); + if (result != MA_SUCCESS) { + return result; + } + + /* At this point we should be sitting on the first frame. */ + pVorbis->cursor = 0; + } + + /* We're just brute-forcing this for now. */ + while (pVorbis->cursor < frameIndex) { + ma_uint64 framesRead; + ma_uint64 framesToRead = ma_countof(buffer)/pVorbis->channels; + if (framesToRead > (frameIndex - pVorbis->cursor)) { + framesToRead = (frameIndex - pVorbis->cursor); + } + + result = ma_stbvorbis_read_pcm_frames(pVorbis, buffer, framesToRead, &framesRead); + if (result != MA_SUCCESS) { + return result; + } + } + } else { + /* Pull mode. This is the simple case. */ + int vorbisResult; + + if (frameIndex > UINT_MAX) { + return MA_INVALID_ARGS; /* Trying to seek beyond the 32-bit maximum of stb_vorbis. */ + } + + vorbisResult = stb_vorbis_seek(pVorbis->stb, (unsigned int)frameIndex); /* Safe cast. */ + if (vorbisResult == 0) { + return MA_ERROR; /* See failed. */ + } + + pVorbis->cursor = frameIndex; + } + + return MA_SUCCESS; + } + #else + { + /* vorbis is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + + (void)frameIndex; + + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_stbvorbis_get_data_format(ma_stbvorbis* pVorbis, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + /* Defaults for safety. */ + if (pFormat != NULL) { + *pFormat = ma_format_unknown; + } + if (pChannels != NULL) { + *pChannels = 0; + } + if (pSampleRate != NULL) { + *pSampleRate = 0; + } + if (pChannelMap != NULL) { + MA_ZERO_MEMORY(pChannelMap, sizeof(*pChannelMap) * channelMapCap); + } + + if (pVorbis == NULL) { + return MA_INVALID_OPERATION; + } + + if (pFormat != NULL) { + *pFormat = pVorbis->format; + } + + #if !defined(MA_NO_VORBIS) + { + if (pChannels != NULL) { + *pChannels = pVorbis->channels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pVorbis->sampleRate; + } + + if (pChannelMap != NULL) { + ma_channel_map_init_standard(ma_standard_channel_map_vorbis, pChannelMap, channelMapCap, pVorbis->channels); + } + + return MA_SUCCESS; + } + #else + { + /* vorbis is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_stbvorbis_get_cursor_in_pcm_frames(ma_stbvorbis* pVorbis, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; /* Safety. */ + + if (pVorbis == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_VORBIS) + { + *pCursor = pVorbis->cursor; + + return MA_SUCCESS; + } + #else + { + /* vorbis is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } + #endif +} + +MA_API ma_result ma_stbvorbis_get_length_in_pcm_frames(ma_stbvorbis* pVorbis, ma_uint64* pLength) +{ + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; /* Safety. */ + + if (pVorbis == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_VORBIS) + { + if (pVorbis->usingPushMode) { + *pLength = 0; /* I don't know of a good way to determine this reliably with stb_vorbis and push mode. */ + } else { + *pLength = stb_vorbis_stream_length_in_samples(pVorbis->stb); + } + + return MA_SUCCESS; + } + #else + { + /* vorbis is disabled. Should never hit this since initialization would have failed. */ + MA_ASSERT(MA_FALSE); + return MA_NOT_IMPLEMENTED; + } + #endif +} + + +static ma_result ma_decoding_backend_init__stbvorbis(void* pUserData, ma_read_proc onRead, ma_seek_proc onSeek, ma_tell_proc onTell, void* pReadSeekTellUserData, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_stbvorbis* pVorbis; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pVorbis = (ma_stbvorbis*)ma_malloc(sizeof(*pVorbis), pAllocationCallbacks); + if (pVorbis == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_stbvorbis_init(onRead, onSeek, onTell, pReadSeekTellUserData, pConfig, pAllocationCallbacks, pVorbis); + if (result != MA_SUCCESS) { + ma_free(pVorbis, pAllocationCallbacks); + return result; + } + + *ppBackend = pVorbis; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_file__stbvorbis(void* pUserData, const char* pFilePath, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_stbvorbis* pVorbis; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pVorbis = (ma_stbvorbis*)ma_malloc(sizeof(*pVorbis), pAllocationCallbacks); + if (pVorbis == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_stbvorbis_init_file(pFilePath, pConfig, pAllocationCallbacks, pVorbis); + if (result != MA_SUCCESS) { + ma_free(pVorbis, pAllocationCallbacks); + return result; + } + + *ppBackend = pVorbis; + + return MA_SUCCESS; +} + +static ma_result ma_decoding_backend_init_memory__stbvorbis(void* pUserData, const void* pData, size_t dataSize, const ma_decoding_backend_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source** ppBackend) +{ + ma_result result; + ma_stbvorbis* pVorbis; + + (void)pUserData; /* For now not using pUserData, but once we start storing the vorbis decoder state within the ma_decoder structure this will be set to the decoder so we can avoid a malloc. */ + + /* For now we're just allocating the decoder backend on the heap. */ + pVorbis = (ma_stbvorbis*)ma_malloc(sizeof(*pVorbis), pAllocationCallbacks); + if (pVorbis == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_stbvorbis_init_memory(pData, dataSize, pConfig, pAllocationCallbacks, pVorbis); + if (result != MA_SUCCESS) { + ma_free(pVorbis, pAllocationCallbacks); + return result; + } + + *ppBackend = pVorbis; + + return MA_SUCCESS; +} + +static void ma_decoding_backend_uninit__stbvorbis(void* pUserData, ma_data_source* pBackend, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_stbvorbis* pVorbis = (ma_stbvorbis*)pBackend; + + (void)pUserData; + + ma_stbvorbis_uninit(pVorbis, pAllocationCallbacks); + ma_free(pVorbis, pAllocationCallbacks); +} + +static ma_decoding_backend_vtable g_ma_decoding_backend_vtable_stbvorbis = +{ + ma_decoding_backend_init__stbvorbis, + ma_decoding_backend_init_file__stbvorbis, + NULL, /* onInitFileW() */ + ma_decoding_backend_init_memory__stbvorbis, + ma_decoding_backend_uninit__stbvorbis +}; + +static ma_result ma_decoder_init_vorbis__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_vtable__internal(&g_ma_decoding_backend_vtable_stbvorbis, NULL, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_vorbis_from_file__internal(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file__internal(&g_ma_decoding_backend_vtable_stbvorbis, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_vorbis_from_file_w__internal(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_file_w__internal(&g_ma_decoding_backend_vtable_stbvorbis, NULL, pFilePath, pConfig, pDecoder); +} + +static ma_result ma_decoder_init_vorbis_from_memory__internal(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + return ma_decoder_init_from_memory__internal(&g_ma_decoding_backend_vtable_stbvorbis, NULL, pData, dataSize, pConfig, pDecoder); +} +#endif /* STB_VORBIS_INCLUDE_STB_VORBIS_H */ + + + +static ma_result ma_decoder__init_allocation_callbacks(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + MA_ASSERT(pDecoder != NULL); + + if (pConfig != NULL) { + return ma_allocation_callbacks_init_copy(&pDecoder->allocationCallbacks, &pConfig->allocationCallbacks); + } else { + pDecoder->allocationCallbacks = ma_allocation_callbacks_init_default(); + return MA_SUCCESS; + } +} + +static ma_result ma_decoder__data_source_on_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_decoder_read_pcm_frames((ma_decoder*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_decoder__data_source_on_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_decoder_seek_to_pcm_frame((ma_decoder*)pDataSource, frameIndex); +} + +static ma_result ma_decoder__data_source_on_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + return ma_decoder_get_data_format((ma_decoder*)pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +static ma_result ma_decoder__data_source_on_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_decoder_get_cursor_in_pcm_frames((ma_decoder*)pDataSource, pCursor); +} + +static ma_result ma_decoder__data_source_on_get_length(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_decoder_get_length_in_pcm_frames((ma_decoder*)pDataSource, pLength); +} + +static ma_data_source_vtable g_ma_decoder_data_source_vtable = +{ + ma_decoder__data_source_on_read, + ma_decoder__data_source_on_seek, + ma_decoder__data_source_on_get_data_format, + ma_decoder__data_source_on_get_cursor, + ma_decoder__data_source_on_get_length, + NULL, /* onSetLooping */ + 0 +}; + +static ma_result ma_decoder__preinit(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, ma_decoder_tell_proc onTell, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + MA_ASSERT(pConfig != NULL); + + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDecoder); + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_decoder_data_source_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pDecoder->ds); + if (result != MA_SUCCESS) { + return result; + } + + pDecoder->onRead = onRead; + pDecoder->onSeek = onSeek; + pDecoder->onTell = onTell; + pDecoder->pUserData = pUserData; + + result = ma_decoder__init_allocation_callbacks(pConfig, pDecoder); + if (result != MA_SUCCESS) { + ma_data_source_uninit(&pDecoder->ds); + return result; + } + + return MA_SUCCESS; +} + +static ma_result ma_decoder__postinit(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + + result = ma_decoder__init_data_converter(pDecoder, pConfig); + + /* If we failed post initialization we need to uninitialize the decoder before returning to prevent a memory leak. */ + if (result != MA_SUCCESS) { + ma_decoder_uninit(pDecoder); + return result; + } + + return result; +} + + +static ma_result ma_decoder_init__internal(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = MA_NO_BACKEND; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + /* Silence some warnings in the case that we don't have any decoder backends enabled. */ + (void)onRead; + (void)onSeek; + (void)pUserData; + + + /* If we've specified a specific encoding type, try that first. */ + if (pConfig->encodingFormat != ma_encoding_format_unknown) { + #ifdef MA_HAS_WAV + if (pConfig->encodingFormat == ma_encoding_format_wav) { + result = ma_decoder_init_wav__internal(pConfig, pDecoder); + } + #endif + #ifdef MA_HAS_FLAC + if (pConfig->encodingFormat == ma_encoding_format_flac) { + result = ma_decoder_init_flac__internal(pConfig, pDecoder); + } + #endif + #ifdef MA_HAS_MP3 + if (pConfig->encodingFormat == ma_encoding_format_mp3) { + result = ma_decoder_init_mp3__internal(pConfig, pDecoder); + } + #endif + #ifdef MA_HAS_VORBIS + if (pConfig->encodingFormat == ma_encoding_format_vorbis) { + result = ma_decoder_init_vorbis__internal(pConfig, pDecoder); + } + #endif + + /* If we weren't able to initialize the decoder, seek back to the start to give the next attempts a clean start. */ + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } + + if (result != MA_SUCCESS) { + /* Getting here means we couldn't load a specific decoding backend based on the encoding format. */ + + /* + We use trial and error to open a decoder. We prioritize custom decoders so that if they + implement the same encoding format they take priority over the built-in decoders. + */ + if (result != MA_SUCCESS) { + result = ma_decoder_init_custom__internal(pConfig, pDecoder); + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } + + /* + If we get to this point and we still haven't found a decoder, and the caller has requested a + specific encoding format, there's no hope for it. Abort. + */ + if (pConfig->encodingFormat != ma_encoding_format_unknown) { + return MA_NO_BACKEND; + } + + #ifdef MA_HAS_WAV + if (result != MA_SUCCESS) { + result = ma_decoder_init_wav__internal(pConfig, pDecoder); + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } + #endif + #ifdef MA_HAS_FLAC + if (result != MA_SUCCESS) { + result = ma_decoder_init_flac__internal(pConfig, pDecoder); + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } + #endif + #ifdef MA_HAS_MP3 + if (result != MA_SUCCESS) { + result = ma_decoder_init_mp3__internal(pConfig, pDecoder); + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } + #endif + #ifdef MA_HAS_VORBIS + if (result != MA_SUCCESS) { + result = ma_decoder_init_vorbis__internal(pConfig, pDecoder); + if (result != MA_SUCCESS) { + onSeek(pDecoder, 0, ma_seek_origin_start); + } + } + #endif + } + + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(pConfig, pDecoder); +} + +MA_API ma_result ma_decoder_init(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_decoder_config config; + ma_result result; + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder__preinit(onRead, onSeek, NULL, pUserData, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder_init__internal(onRead, onSeek, pUserData, &config, pDecoder); +} + + +static ma_result ma_decoder__on_read_memory(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead, size_t* pBytesRead) +{ + size_t bytesRemaining; + + MA_ASSERT(pDecoder->data.memory.dataSize >= pDecoder->data.memory.currentReadPos); + + if (pBytesRead != NULL) { + *pBytesRead = 0; + } + + bytesRemaining = pDecoder->data.memory.dataSize - pDecoder->data.memory.currentReadPos; + if (bytesToRead > bytesRemaining) { + bytesToRead = bytesRemaining; + } + + if (bytesRemaining == 0) { + return MA_AT_END; + } + + if (bytesToRead > 0) { + MA_COPY_MEMORY(pBufferOut, pDecoder->data.memory.pData + pDecoder->data.memory.currentReadPos, bytesToRead); + pDecoder->data.memory.currentReadPos += bytesToRead; + } + + if (pBytesRead != NULL) { + *pBytesRead = bytesToRead; + } + + return MA_SUCCESS; +} + +static ma_result ma_decoder__on_seek_memory(ma_decoder* pDecoder, ma_int64 byteOffset, ma_seek_origin origin) +{ + if (byteOffset > 0 && (ma_uint64)byteOffset > MA_SIZE_MAX) { + return MA_BAD_SEEK; + } + + if (origin == ma_seek_origin_current) { + if (byteOffset > 0) { + if (pDecoder->data.memory.currentReadPos + byteOffset > pDecoder->data.memory.dataSize) { + byteOffset = (ma_int64)(pDecoder->data.memory.dataSize - pDecoder->data.memory.currentReadPos); /* Trying to seek too far forward. */ + } + + pDecoder->data.memory.currentReadPos += (size_t)byteOffset; + } else { + if (pDecoder->data.memory.currentReadPos < (size_t)-byteOffset) { + byteOffset = -(ma_int64)pDecoder->data.memory.currentReadPos; /* Trying to seek too far backwards. */ + } + + pDecoder->data.memory.currentReadPos -= (size_t)-byteOffset; + } + } else { + if (origin == ma_seek_origin_end) { + if (byteOffset < 0) { + byteOffset = -byteOffset; + } + + if (byteOffset > (ma_int64)pDecoder->data.memory.dataSize) { + pDecoder->data.memory.currentReadPos = 0; /* Trying to seek too far back. */ + } else { + pDecoder->data.memory.currentReadPos = pDecoder->data.memory.dataSize - (size_t)byteOffset; + } + } else { + if ((size_t)byteOffset <= pDecoder->data.memory.dataSize) { + pDecoder->data.memory.currentReadPos = (size_t)byteOffset; + } else { + pDecoder->data.memory.currentReadPos = pDecoder->data.memory.dataSize; /* Trying to seek too far forward. */ + } + } + } + + return MA_SUCCESS; +} + +static ma_result ma_decoder__on_tell_memory(ma_decoder* pDecoder, ma_int64* pCursor) +{ + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pCursor != NULL); + + *pCursor = (ma_int64)pDecoder->data.memory.currentReadPos; + + return MA_SUCCESS; +} + +static ma_result ma_decoder__preinit_memory_wrapper(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result = ma_decoder__preinit(ma_decoder__on_read_memory, ma_decoder__on_seek_memory, ma_decoder__on_tell_memory, NULL, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + if (pData == NULL || dataSize == 0) { + return MA_INVALID_ARGS; + } + + pDecoder->data.memory.pData = (const ma_uint8*)pData; + pDecoder->data.memory.dataSize = dataSize; + pDecoder->data.memory.currentReadPos = 0; + + (void)pConfig; + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_init_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoder_config config; + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder__preinit(NULL, NULL, NULL, NULL, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + if (pData == NULL || dataSize == 0) { + return MA_INVALID_ARGS; + } + + /* If the backend has support for loading from a file path we'll want to use that. If that all fails we'll fall back to the VFS path. */ + result = MA_NO_BACKEND; + + if (config.encodingFormat != ma_encoding_format_unknown) { + #ifdef MA_HAS_WAV + if (config.encodingFormat == ma_encoding_format_wav) { + result = ma_decoder_init_wav_from_memory__internal(pData, dataSize, &config, pDecoder); + } + #endif + #ifdef MA_HAS_FLAC + if (config.encodingFormat == ma_encoding_format_flac) { + result = ma_decoder_init_flac_from_memory__internal(pData, dataSize, &config, pDecoder); + } + #endif + #ifdef MA_HAS_MP3 + if (config.encodingFormat == ma_encoding_format_mp3) { + result = ma_decoder_init_mp3_from_memory__internal(pData, dataSize, &config, pDecoder); + } + #endif + #ifdef MA_HAS_VORBIS + if (config.encodingFormat == ma_encoding_format_vorbis) { + result = ma_decoder_init_vorbis_from_memory__internal(pData, dataSize, &config, pDecoder); + } + #endif + } + + if (result != MA_SUCCESS) { + /* Getting here means we weren't able to initialize a decoder of a specific encoding format. */ + + /* + We use trial and error to open a decoder. We prioritize custom decoders so that if they + implement the same encoding format they take priority over the built-in decoders. + */ + result = ma_decoder_init_custom_from_memory__internal(pData, dataSize, &config, pDecoder); + + /* + If we get to this point and we still haven't found a decoder, and the caller has requested a + specific encoding format, there's no hope for it. Abort. + */ + if (result != MA_SUCCESS && config.encodingFormat != ma_encoding_format_unknown) { + return MA_NO_BACKEND; + } + + /* Use trial and error for stock decoders. */ + if (result != MA_SUCCESS) { + #ifdef MA_HAS_WAV + if (result != MA_SUCCESS) { + result = ma_decoder_init_wav_from_memory__internal(pData, dataSize, &config, pDecoder); + } + #endif + #ifdef MA_HAS_FLAC + if (result != MA_SUCCESS) { + result = ma_decoder_init_flac_from_memory__internal(pData, dataSize, &config, pDecoder); + } + #endif + #ifdef MA_HAS_MP3 + if (result != MA_SUCCESS) { + result = ma_decoder_init_mp3_from_memory__internal(pData, dataSize, &config, pDecoder); + } + #endif + #ifdef MA_HAS_VORBIS + if (result != MA_SUCCESS) { + result = ma_decoder_init_vorbis_from_memory__internal(pData, dataSize, &config, pDecoder); + } + #endif + } + } + + /* + If at this point we still haven't successfully initialized the decoder it most likely means + the backend doesn't have an implementation for loading from a file path. We'll try using + miniaudio's built-in file IO for loading file. + */ + if (result == MA_SUCCESS) { + /* Initialization was successful. Finish up. */ + result = ma_decoder__postinit(&config, pDecoder); + if (result != MA_SUCCESS) { + /* + The backend was initialized successfully, but for some reason post-initialization failed. This is most likely + due to an out of memory error. We're going to abort with an error here and not try to recover. + */ + if (pDecoder->pBackendVTable != NULL && pDecoder->pBackendVTable->onUninit != NULL) { + pDecoder->pBackendVTable->onUninit(pDecoder->pBackendUserData, &pDecoder->pBackend, &pDecoder->allocationCallbacks); + } + + return result; + } + } else { + /* Probably no implementation for loading from a block of memory. Use miniaudio's abstraction instead. */ + result = ma_decoder__preinit_memory_wrapper(pData, dataSize, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_decoder_init__internal(ma_decoder__on_read_memory, ma_decoder__on_seek_memory, NULL, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + + +#if defined(MA_HAS_WAV) || \ + defined(MA_HAS_MP3) || \ + defined(MA_HAS_FLAC) || \ + defined(MA_HAS_VORBIS) +#define MA_HAS_PATH_API +#endif + +#if defined(MA_HAS_PATH_API) +static const char* ma_path_file_name(const char* path) +{ + const char* fileName; + + if (path == NULL) { + return NULL; + } + + fileName = path; + + /* We just loop through the path until we find the last slash. */ + while (path[0] != '\0') { + if (path[0] == '/' || path[0] == '\\') { + fileName = path; + } + + path += 1; + } + + /* At this point the file name is sitting on a slash, so just move forward. */ + while (fileName[0] != '\0' && (fileName[0] == '/' || fileName[0] == '\\')) { + fileName += 1; + } + + return fileName; +} + +static const wchar_t* ma_path_file_name_w(const wchar_t* path) +{ + const wchar_t* fileName; + + if (path == NULL) { + return NULL; + } + + fileName = path; + + /* We just loop through the path until we find the last slash. */ + while (path[0] != '\0') { + if (path[0] == '/' || path[0] == '\\') { + fileName = path; + } + + path += 1; + } + + /* At this point the file name is sitting on a slash, so just move forward. */ + while (fileName[0] != '\0' && (fileName[0] == '/' || fileName[0] == '\\')) { + fileName += 1; + } + + return fileName; +} + + +static const char* ma_path_extension(const char* path) +{ + const char* extension; + const char* lastOccurance; + + if (path == NULL) { + path = ""; + } + + extension = ma_path_file_name(path); + lastOccurance = NULL; + + /* Just find the last '.' and return. */ + while (extension[0] != '\0') { + if (extension[0] == '.') { + extension += 1; + lastOccurance = extension; + } + + extension += 1; + } + + return (lastOccurance != NULL) ? lastOccurance : extension; +} + +static const wchar_t* ma_path_extension_w(const wchar_t* path) +{ + const wchar_t* extension; + const wchar_t* lastOccurance; + + if (path == NULL) { + path = L""; + } + + extension = ma_path_file_name_w(path); + lastOccurance = NULL; + + /* Just find the last '.' and return. */ + while (extension[0] != '\0') { + if (extension[0] == '.') { + extension += 1; + lastOccurance = extension; + } + + extension += 1; + } + + return (lastOccurance != NULL) ? lastOccurance : extension; +} + + +static ma_bool32 ma_path_extension_equal(const char* path, const char* extension) +{ + const char* ext1; + const char* ext2; + + if (path == NULL || extension == NULL) { + return MA_FALSE; + } + + ext1 = extension; + ext2 = ma_path_extension(path); + +#if defined(_MSC_VER) || defined(__DMC__) + return _stricmp(ext1, ext2) == 0; +#else + return strcasecmp(ext1, ext2) == 0; +#endif +} + +static ma_bool32 ma_path_extension_equal_w(const wchar_t* path, const wchar_t* extension) +{ + const wchar_t* ext1; + const wchar_t* ext2; + + if (path == NULL || extension == NULL) { + return MA_FALSE; + } + + ext1 = extension; + ext2 = ma_path_extension_w(path); + +#if defined(_MSC_VER) || defined(__WATCOMC__) || defined(__DMC__) + return _wcsicmp(ext1, ext2) == 0; +#else + /* + I'm not aware of a wide character version of strcasecmp(). I'm therefore converting the extensions to multibyte strings and comparing those. This + isn't the most efficient way to do it, but it should work OK. + */ + { + char ext1MB[4096]; + char ext2MB[4096]; + const wchar_t* pext1 = ext1; + const wchar_t* pext2 = ext2; + mbstate_t mbs1; + mbstate_t mbs2; + + MA_ZERO_OBJECT(&mbs1); + MA_ZERO_OBJECT(&mbs2); + + if (wcsrtombs(ext1MB, &pext1, sizeof(ext1MB), &mbs1) == (size_t)-1) { + return MA_FALSE; + } + if (wcsrtombs(ext2MB, &pext2, sizeof(ext2MB), &mbs2) == (size_t)-1) { + return MA_FALSE; + } + + return strcasecmp(ext1MB, ext2MB) == 0; + } +#endif +} +#endif /* MA_HAS_PATH_API */ + + + +static ma_result ma_decoder__on_read_vfs(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead, size_t* pBytesRead) +{ + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pBufferOut != NULL); + + return ma_vfs_or_default_read(pDecoder->data.vfs.pVFS, pDecoder->data.vfs.file, pBufferOut, bytesToRead, pBytesRead); +} + +static ma_result ma_decoder__on_seek_vfs(ma_decoder* pDecoder, ma_int64 offset, ma_seek_origin origin) +{ + MA_ASSERT(pDecoder != NULL); + + return ma_vfs_or_default_seek(pDecoder->data.vfs.pVFS, pDecoder->data.vfs.file, offset, origin); +} + +static ma_result ma_decoder__on_tell_vfs(ma_decoder* pDecoder, ma_int64* pCursor) +{ + MA_ASSERT(pDecoder != NULL); + + return ma_vfs_or_default_tell(pDecoder->data.vfs.pVFS, pDecoder->data.vfs.file, pCursor); +} + +static ma_result ma_decoder__preinit_vfs(ma_vfs* pVFS, const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_vfs_file file; + + result = ma_decoder__preinit(ma_decoder__on_read_vfs, ma_decoder__on_seek_vfs, ma_decoder__on_tell_vfs, NULL, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + if (pFilePath == NULL || pFilePath[0] == '\0') { + return MA_INVALID_ARGS; + } + + result = ma_vfs_or_default_open(pVFS, pFilePath, MA_OPEN_MODE_READ, &file); + if (result != MA_SUCCESS) { + return result; + } + + pDecoder->data.vfs.pVFS = pVFS; + pDecoder->data.vfs.file = file; + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_init_vfs(ma_vfs* pVFS, const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoder_config config; + + config = ma_decoder_config_init_copy(pConfig); + result = ma_decoder__preinit_vfs(pVFS, pFilePath, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + result = MA_NO_BACKEND; + + if (config.encodingFormat != ma_encoding_format_unknown) { + #ifdef MA_HAS_WAV + if (config.encodingFormat == ma_encoding_format_wav) { + result = ma_decoder_init_wav__internal(&config, pDecoder); + } + #endif + #ifdef MA_HAS_FLAC + if (config.encodingFormat == ma_encoding_format_flac) { + result = ma_decoder_init_flac__internal(&config, pDecoder); + } + #endif + #ifdef MA_HAS_MP3 + if (config.encodingFormat == ma_encoding_format_mp3) { + result = ma_decoder_init_mp3__internal(&config, pDecoder); + } + #endif + #ifdef MA_HAS_VORBIS + if (config.encodingFormat == ma_encoding_format_vorbis) { + result = ma_decoder_init_vorbis__internal(&config, pDecoder); + } + #endif + + /* Make sure we seek back to the start if we didn't initialize a decoder successfully so the next attempts have a fresh start. */ + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } + + if (result != MA_SUCCESS) { + /* Getting here means we weren't able to initialize a decoder of a specific encoding format. */ + + /* + We use trial and error to open a decoder. We prioritize custom decoders so that if they + implement the same encoding format they take priority over the built-in decoders. + */ + if (result != MA_SUCCESS) { + result = ma_decoder_init_custom__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } + + /* + If we get to this point and we still haven't found a decoder, and the caller has requested a + specific encoding format, there's no hope for it. Abort. + */ + if (config.encodingFormat != ma_encoding_format_unknown) { + return MA_NO_BACKEND; + } + + #ifdef MA_HAS_WAV + if (result != MA_SUCCESS && ma_path_extension_equal(pFilePath, "wav")) { + result = ma_decoder_init_wav__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } + #endif + #ifdef MA_HAS_FLAC + if (result != MA_SUCCESS && ma_path_extension_equal(pFilePath, "flac")) { + result = ma_decoder_init_flac__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } + #endif + #ifdef MA_HAS_MP3 + if (result != MA_SUCCESS && ma_path_extension_equal(pFilePath, "mp3")) { + result = ma_decoder_init_mp3__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } + #endif + } + + /* If we still haven't got a result just use trial and error. Otherwise we can finish up. */ + if (result != MA_SUCCESS) { + result = ma_decoder_init__internal(ma_decoder__on_read_vfs, ma_decoder__on_seek_vfs, NULL, &config, pDecoder); + } else { + result = ma_decoder__postinit(&config, pDecoder); + } + + if (result != MA_SUCCESS) { + if (pDecoder->data.vfs.file != NULL) { /* <-- Will be reset to NULL if ma_decoder_uninit() is called in one of the steps above which allows us to avoid a double close of the file. */ + ma_vfs_or_default_close(pVFS, pDecoder->data.vfs.file); + } + + return result; + } + + return MA_SUCCESS; +} + + +static ma_result ma_decoder__preinit_vfs_w(ma_vfs* pVFS, const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_vfs_file file; + + result = ma_decoder__preinit(ma_decoder__on_read_vfs, ma_decoder__on_seek_vfs, ma_decoder__on_tell_vfs, NULL, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + if (pFilePath == NULL || pFilePath[0] == '\0') { + return MA_INVALID_ARGS; + } + + result = ma_vfs_or_default_open_w(pVFS, pFilePath, MA_OPEN_MODE_READ, &file); + if (result != MA_SUCCESS) { + return result; + } + + pDecoder->data.vfs.pVFS = pVFS; + pDecoder->data.vfs.file = file; + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_init_vfs_w(ma_vfs* pVFS, const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoder_config config; + + config = ma_decoder_config_init_copy(pConfig); + result = ma_decoder__preinit_vfs_w(pVFS, pFilePath, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + result = MA_NO_BACKEND; + + if (config.encodingFormat != ma_encoding_format_unknown) { + #ifdef MA_HAS_WAV + if (config.encodingFormat == ma_encoding_format_wav) { + result = ma_decoder_init_wav__internal(&config, pDecoder); + } + #endif + #ifdef MA_HAS_FLAC + if (config.encodingFormat == ma_encoding_format_flac) { + result = ma_decoder_init_flac__internal(&config, pDecoder); + } + #endif + #ifdef MA_HAS_MP3 + if (config.encodingFormat == ma_encoding_format_mp3) { + result = ma_decoder_init_mp3__internal(&config, pDecoder); + } + #endif + #ifdef MA_HAS_VORBIS + if (config.encodingFormat == ma_encoding_format_vorbis) { + result = ma_decoder_init_vorbis__internal(&config, pDecoder); + } + #endif + + /* Make sure we seek back to the start if we didn't initialize a decoder successfully so the next attempts have a fresh start. */ + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } + + if (result != MA_SUCCESS) { + /* Getting here means we weren't able to initialize a decoder of a specific encoding format. */ + + /* + We use trial and error to open a decoder. We prioritize custom decoders so that if they + implement the same encoding format they take priority over the built-in decoders. + */ + if (result != MA_SUCCESS) { + result = ma_decoder_init_custom__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } + + /* + If we get to this point and we still haven't found a decoder, and the caller has requested a + specific encoding format, there's no hope for it. Abort. + */ + if (config.encodingFormat != ma_encoding_format_unknown) { + return MA_NO_BACKEND; + } + + #ifdef MA_HAS_WAV + if (result != MA_SUCCESS && ma_path_extension_equal_w(pFilePath, L"wav")) { + result = ma_decoder_init_wav__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } + #endif + #ifdef MA_HAS_FLAC + if (result != MA_SUCCESS && ma_path_extension_equal_w(pFilePath, L"flac")) { + result = ma_decoder_init_flac__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } + #endif + #ifdef MA_HAS_MP3 + if (result != MA_SUCCESS && ma_path_extension_equal_w(pFilePath, L"mp3")) { + result = ma_decoder_init_mp3__internal(&config, pDecoder); + if (result != MA_SUCCESS) { + ma_decoder__on_seek_vfs(pDecoder, 0, ma_seek_origin_start); + } + } + #endif + } + + /* If we still haven't got a result just use trial and error. Otherwise we can finish up. */ + if (result != MA_SUCCESS) { + result = ma_decoder_init__internal(ma_decoder__on_read_vfs, ma_decoder__on_seek_vfs, NULL, &config, pDecoder); + } else { + result = ma_decoder__postinit(&config, pDecoder); + } + + if (result != MA_SUCCESS) { + ma_vfs_or_default_close(pVFS, pDecoder->data.vfs.file); + return result; + } + + return MA_SUCCESS; +} + + +static ma_result ma_decoder__preinit_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + + result = ma_decoder__preinit(NULL, NULL, NULL, NULL, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + if (pFilePath == NULL || pFilePath[0] == '\0') { + return MA_INVALID_ARGS; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_init_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoder_config config; + + config = ma_decoder_config_init_copy(pConfig); + result = ma_decoder__preinit_file(pFilePath, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + /* If the backend has support for loading from a file path we'll want to use that. If that all fails we'll fall back to the VFS path. */ + result = MA_NO_BACKEND; + + if (config.encodingFormat != ma_encoding_format_unknown) { + #ifdef MA_HAS_WAV + if (config.encodingFormat == ma_encoding_format_wav) { + result = ma_decoder_init_wav_from_file__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_FLAC + if (config.encodingFormat == ma_encoding_format_flac) { + result = ma_decoder_init_flac_from_file__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_MP3 + if (config.encodingFormat == ma_encoding_format_mp3) { + result = ma_decoder_init_mp3_from_file__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_VORBIS + if (config.encodingFormat == ma_encoding_format_vorbis) { + result = ma_decoder_init_vorbis_from_file__internal(pFilePath, &config, pDecoder); + } + #endif + } + + if (result != MA_SUCCESS) { + /* Getting here means we weren't able to initialize a decoder of a specific encoding format. */ + + /* + We use trial and error to open a decoder. We prioritize custom decoders so that if they + implement the same encoding format they take priority over the built-in decoders. + */ + result = ma_decoder_init_custom_from_file__internal(pFilePath, &config, pDecoder); + + /* + If we get to this point and we still haven't found a decoder, and the caller has requested a + specific encoding format, there's no hope for it. Abort. + */ + if (result != MA_SUCCESS && config.encodingFormat != ma_encoding_format_unknown) { + return MA_NO_BACKEND; + } + + /* First try loading based on the file extension so we don't waste time opening and closing files. */ + #ifdef MA_HAS_WAV + if (result != MA_SUCCESS && ma_path_extension_equal(pFilePath, "wav")) { + result = ma_decoder_init_wav_from_file__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_FLAC + if (result != MA_SUCCESS && ma_path_extension_equal(pFilePath, "flac")) { + result = ma_decoder_init_flac_from_file__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_MP3 + if (result != MA_SUCCESS && ma_path_extension_equal(pFilePath, "mp3")) { + result = ma_decoder_init_mp3_from_file__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_VORBIS + if (result != MA_SUCCESS && ma_path_extension_equal(pFilePath, "ogg")) { + result = ma_decoder_init_vorbis_from_file__internal(pFilePath, &config, pDecoder); + } + #endif + + /* + If we still haven't got a result just use trial and error. Custom decoders have already been attempted, so here we + need only iterate over our stock decoders. + */ + if (result != MA_SUCCESS) { + #ifdef MA_HAS_WAV + if (result != MA_SUCCESS) { + result = ma_decoder_init_wav_from_file__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_FLAC + if (result != MA_SUCCESS) { + result = ma_decoder_init_flac_from_file__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_MP3 + if (result != MA_SUCCESS) { + result = ma_decoder_init_mp3_from_file__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_VORBIS + if (result != MA_SUCCESS) { + result = ma_decoder_init_vorbis_from_file__internal(pFilePath, &config, pDecoder); + } + #endif + } + } + + /* + If at this point we still haven't successfully initialized the decoder it most likely means + the backend doesn't have an implementation for loading from a file path. We'll try using + miniaudio's built-in file IO for loading file. + */ + if (result == MA_SUCCESS) { + /* Initialization was successful. Finish up. */ + result = ma_decoder__postinit(&config, pDecoder); + if (result != MA_SUCCESS) { + /* + The backend was initialized successfully, but for some reason post-initialization failed. This is most likely + due to an out of memory error. We're going to abort with an error here and not try to recover. + */ + if (pDecoder->pBackendVTable != NULL && pDecoder->pBackendVTable->onUninit != NULL) { + pDecoder->pBackendVTable->onUninit(pDecoder->pBackendUserData, &pDecoder->pBackend, &pDecoder->allocationCallbacks); + } + + return result; + } + } else { + /* Probably no implementation for loading from a file path. Use miniaudio's file IO instead. */ + result = ma_decoder_init_vfs(NULL, pFilePath, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +static ma_result ma_decoder__preinit_file_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + + result = ma_decoder__preinit(NULL, NULL, NULL, NULL, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + if (pFilePath == NULL || pFilePath[0] == '\0') { + return MA_INVALID_ARGS; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_init_file_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoder_config config; + + config = ma_decoder_config_init_copy(pConfig); + result = ma_decoder__preinit_file_w(pFilePath, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + /* If the backend has support for loading from a file path we'll want to use that. If that all fails we'll fall back to the VFS path. */ + result = MA_NO_BACKEND; + + if (config.encodingFormat != ma_encoding_format_unknown) { + #ifdef MA_HAS_WAV + if (config.encodingFormat == ma_encoding_format_wav) { + result = ma_decoder_init_wav_from_file_w__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_FLAC + if (config.encodingFormat == ma_encoding_format_flac) { + result = ma_decoder_init_flac_from_file_w__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_MP3 + if (config.encodingFormat == ma_encoding_format_mp3) { + result = ma_decoder_init_mp3_from_file_w__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_VORBIS + if (config.encodingFormat == ma_encoding_format_vorbis) { + result = ma_decoder_init_vorbis_from_file_w__internal(pFilePath, &config, pDecoder); + } + #endif + } + + if (result != MA_SUCCESS) { + /* Getting here means we weren't able to initialize a decoder of a specific encoding format. */ + + /* + We use trial and error to open a decoder. We prioritize custom decoders so that if they + implement the same encoding format they take priority over the built-in decoders. + */ + result = ma_decoder_init_custom_from_file_w__internal(pFilePath, &config, pDecoder); + + /* + If we get to this point and we still haven't found a decoder, and the caller has requested a + specific encoding format, there's no hope for it. Abort. + */ + if (result != MA_SUCCESS && config.encodingFormat != ma_encoding_format_unknown) { + return MA_NO_BACKEND; + } + + /* First try loading based on the file extension so we don't waste time opening and closing files. */ + #ifdef MA_HAS_WAV + if (result != MA_SUCCESS && ma_path_extension_equal_w(pFilePath, L"wav")) { + result = ma_decoder_init_wav_from_file_w__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_FLAC + if (result != MA_SUCCESS && ma_path_extension_equal_w(pFilePath, L"flac")) { + result = ma_decoder_init_flac_from_file_w__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_MP3 + if (result != MA_SUCCESS && ma_path_extension_equal_w(pFilePath, L"mp3")) { + result = ma_decoder_init_mp3_from_file_w__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_VORBIS + if (result != MA_SUCCESS && ma_path_extension_equal_w(pFilePath, L"ogg")) { + result = ma_decoder_init_vorbis_from_file_w__internal(pFilePath, &config, pDecoder); + } + #endif + + /* + If we still haven't got a result just use trial and error. Custom decoders have already been attempted, so here we + need only iterate over our stock decoders. + */ + if (result != MA_SUCCESS) { + #ifdef MA_HAS_WAV + if (result != MA_SUCCESS) { + result = ma_decoder_init_wav_from_file_w__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_FLAC + if (result != MA_SUCCESS) { + result = ma_decoder_init_flac_from_file_w__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_MP3 + if (result != MA_SUCCESS) { + result = ma_decoder_init_mp3_from_file_w__internal(pFilePath, &config, pDecoder); + } + #endif + #ifdef MA_HAS_VORBIS + if (result != MA_SUCCESS) { + result = ma_decoder_init_vorbis_from_file_w__internal(pFilePath, &config, pDecoder); + } + #endif + } + } + + /* + If at this point we still haven't successfully initialized the decoder it most likely means + the backend doesn't have an implementation for loading from a file path. We'll try using + miniaudio's built-in file IO for loading file. + */ + if (result == MA_SUCCESS) { + /* Initialization was successful. Finish up. */ + result = ma_decoder__postinit(&config, pDecoder); + if (result != MA_SUCCESS) { + /* + The backend was initialized successfully, but for some reason post-initialization failed. This is most likely + due to an out of memory error. We're going to abort with an error here and not try to recover. + */ + if (pDecoder->pBackendVTable != NULL && pDecoder->pBackendVTable->onUninit != NULL) { + pDecoder->pBackendVTable->onUninit(pDecoder->pBackendUserData, &pDecoder->pBackend, &pDecoder->allocationCallbacks); + } + + return result; + } + } else { + /* Probably no implementation for loading from a file path. Use miniaudio's file IO instead. */ + result = ma_decoder_init_vfs_w(NULL, pFilePath, pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_uninit(ma_decoder* pDecoder) +{ + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + if (pDecoder->pBackend != NULL) { + if (pDecoder->pBackendVTable != NULL && pDecoder->pBackendVTable->onUninit != NULL) { + pDecoder->pBackendVTable->onUninit(pDecoder->pBackendUserData, pDecoder->pBackend, &pDecoder->allocationCallbacks); + } + } + + if (pDecoder->onRead == ma_decoder__on_read_vfs) { + ma_vfs_or_default_close(pDecoder->data.vfs.pVFS, pDecoder->data.vfs.file); + pDecoder->data.vfs.file = NULL; + } + + ma_data_converter_uninit(&pDecoder->converter, &pDecoder->allocationCallbacks); + ma_data_source_uninit(&pDecoder->ds); + + if (pDecoder->pInputCache != NULL) { + ma_free(pDecoder->pInputCache, &pDecoder->allocationCallbacks); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_read_pcm_frames(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint64 totalFramesReadOut; + void* pRunningFramesOut; + + if (pFramesRead != NULL) { + *pFramesRead = 0; /* Safety. */ + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + if (pDecoder->pBackend == NULL) { + return MA_INVALID_OPERATION; + } + + /* Fast path. */ + if (pDecoder->converter.isPassthrough) { + result = ma_data_source_read_pcm_frames(pDecoder->pBackend, pFramesOut, frameCount, &totalFramesReadOut); + } else { + /* + Getting here means we need to do data conversion. If we're seeking forward and are _not_ doing resampling we can run this in a fast path. If we're doing resampling we + need to run through each sample because we need to ensure its internal cache is updated. + */ + if (pFramesOut == NULL && pDecoder->converter.hasResampler == MA_FALSE) { + result = ma_data_source_read_pcm_frames(pDecoder->pBackend, NULL, frameCount, &totalFramesReadOut); + } else { + /* Slow path. Need to run everything through the data converter. */ + ma_format internalFormat; + ma_uint32 internalChannels; + + totalFramesReadOut = 0; + pRunningFramesOut = pFramesOut; + + result = ma_data_source_get_data_format(pDecoder->pBackend, &internalFormat, &internalChannels, NULL, NULL, 0); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the internal format and channel count. */ + } + + /* + We run a different path depending on whether or not we are using a heap-allocated + intermediary buffer or not. If the data converter does not support the calculation of + the required number of input frames, we'll use the heap-allocated path. Otherwise we'll + use the stack-allocated path. + */ + if (pDecoder->pInputCache != NULL) { + /* We don't have a way of determining the required number of input frames, so need to persistently store input data in a cache. */ + while (totalFramesReadOut < frameCount) { + ma_uint64 framesToReadThisIterationIn; + ma_uint64 framesToReadThisIterationOut; + + /* If there's any data available in the cache, that needs to get processed first. */ + if (pDecoder->inputCacheRemaining > 0) { + framesToReadThisIterationOut = (frameCount - totalFramesReadOut); + framesToReadThisIterationIn = framesToReadThisIterationOut; + if (framesToReadThisIterationIn > pDecoder->inputCacheRemaining) { + framesToReadThisIterationIn = pDecoder->inputCacheRemaining; + } + + result = ma_data_converter_process_pcm_frames(&pDecoder->converter, ma_offset_pcm_frames_ptr(pDecoder->pInputCache, pDecoder->inputCacheConsumed, internalFormat, internalChannels), &framesToReadThisIterationIn, pRunningFramesOut, &framesToReadThisIterationOut); + if (result != MA_SUCCESS) { + break; + } + + pDecoder->inputCacheConsumed += framesToReadThisIterationIn; + pDecoder->inputCacheRemaining -= framesToReadThisIterationIn; + + totalFramesReadOut += framesToReadThisIterationOut; + + if (pRunningFramesOut != NULL) { + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesToReadThisIterationOut * ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels)); + } + + if (framesToReadThisIterationIn == 0 && framesToReadThisIterationOut == 0) { + break; /* We're done. */ + } + } + + /* Getting here means there's no data in the cache and we need to fill it up from the data source. */ + if (pDecoder->inputCacheRemaining == 0) { + pDecoder->inputCacheConsumed = 0; + + result = ma_data_source_read_pcm_frames(pDecoder->pBackend, pDecoder->pInputCache, pDecoder->inputCacheCap, &pDecoder->inputCacheRemaining); + if (result != MA_SUCCESS) { + break; + } + } + } + } else { + /* We have a way of determining the required number of input frames so just use the stack. */ + while (totalFramesReadOut < frameCount) { + ma_uint8 pIntermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In internal format. */ + ma_uint64 intermediaryBufferCap = sizeof(pIntermediaryBuffer) / ma_get_bytes_per_frame(internalFormat, internalChannels); + ma_uint64 framesToReadThisIterationIn; + ma_uint64 framesReadThisIterationIn; + ma_uint64 framesToReadThisIterationOut; + ma_uint64 framesReadThisIterationOut; + ma_uint64 requiredInputFrameCount; + + framesToReadThisIterationOut = (frameCount - totalFramesReadOut); + framesToReadThisIterationIn = framesToReadThisIterationOut; + if (framesToReadThisIterationIn > intermediaryBufferCap) { + framesToReadThisIterationIn = intermediaryBufferCap; + } + + ma_data_converter_get_required_input_frame_count(&pDecoder->converter, framesToReadThisIterationOut, &requiredInputFrameCount); + if (framesToReadThisIterationIn > requiredInputFrameCount) { + framesToReadThisIterationIn = requiredInputFrameCount; + } + + if (requiredInputFrameCount > 0) { + result = ma_data_source_read_pcm_frames(pDecoder->pBackend, pIntermediaryBuffer, framesToReadThisIterationIn, &framesReadThisIterationIn); + + /* + Note here that even if we've reached the end, we don't want to abort because there might be more output frames needing to be + generated from cached input data, which might happen if resampling is being performed. + */ + if (result != MA_SUCCESS && result != MA_AT_END) { + break; + } + } else { + framesReadThisIterationIn = 0; + pIntermediaryBuffer[0] = 0; /* <-- This is just to silence a static analysis warning. */ + } + + /* + At this point we have our decoded data in input format and now we need to convert to output format. Note that even if we didn't read any + input frames, we still want to try processing frames because there may some output frames generated from cached input data. + */ + framesReadThisIterationOut = framesToReadThisIterationOut; + result = ma_data_converter_process_pcm_frames(&pDecoder->converter, pIntermediaryBuffer, &framesReadThisIterationIn, pRunningFramesOut, &framesReadThisIterationOut); + if (result != MA_SUCCESS) { + break; + } + + totalFramesReadOut += framesReadThisIterationOut; + + if (pRunningFramesOut != NULL) { + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesReadThisIterationOut * ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels)); + } + + if (framesReadThisIterationIn == 0 && framesReadThisIterationOut == 0) { + break; /* We're done. */ + } + } + } + } + } + + pDecoder->readPointerInPCMFrames += totalFramesReadOut; + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesReadOut; + } + + if (result == MA_SUCCESS && totalFramesReadOut == 0) { + result = MA_AT_END; + } + + return result; +} + +MA_API ma_result ma_decoder_seek_to_pcm_frame(ma_decoder* pDecoder, ma_uint64 frameIndex) +{ + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + if (pDecoder->pBackend != NULL) { + ma_result result; + ma_uint64 internalFrameIndex; + ma_uint32 internalSampleRate; + ma_uint64 currentFrameIndex; + + result = ma_data_source_get_data_format(pDecoder->pBackend, NULL, NULL, &internalSampleRate, NULL, 0); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the internal sample rate. */ + } + + if (internalSampleRate == pDecoder->outputSampleRate) { + internalFrameIndex = frameIndex; + } else { + internalFrameIndex = ma_calculate_frame_count_after_resampling(internalSampleRate, pDecoder->outputSampleRate, frameIndex); + } + + /* Only seek if we're requesting a different frame to what we're currently sitting on. */ + ma_data_source_get_cursor_in_pcm_frames(pDecoder->pBackend, ¤tFrameIndex); + if (currentFrameIndex != internalFrameIndex) { + result = ma_data_source_seek_to_pcm_frame(pDecoder->pBackend, internalFrameIndex); + if (result == MA_SUCCESS) { + pDecoder->readPointerInPCMFrames = frameIndex; + } + + /* Reset the data converter so that any cached data in the resampler is cleared. */ + ma_data_converter_reset(&pDecoder->converter); + } + + return result; + } + + /* Should never get here, but if we do it means onSeekToPCMFrame was not set by the backend. */ + return MA_INVALID_ARGS; +} + +MA_API ma_result ma_decoder_get_data_format(ma_decoder* pDecoder, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + if (pFormat != NULL) { + *pFormat = pDecoder->outputFormat; + } + + if (pChannels != NULL) { + *pChannels = pDecoder->outputChannels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pDecoder->outputSampleRate; + } + + if (pChannelMap != NULL) { + ma_data_converter_get_output_channel_map(&pDecoder->converter, pChannelMap, channelMapCap); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_get_cursor_in_pcm_frames(ma_decoder* pDecoder, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = pDecoder->readPointerInPCMFrames; + + return MA_SUCCESS; +} + +MA_API ma_result ma_decoder_get_length_in_pcm_frames(ma_decoder* pDecoder, ma_uint64* pLength) +{ + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; + + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + if (pDecoder->pBackend != NULL) { + ma_result result; + ma_uint64 internalLengthInPCMFrames; + ma_uint32 internalSampleRate; + + result = ma_data_source_get_length_in_pcm_frames(pDecoder->pBackend, &internalLengthInPCMFrames); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the internal length. */ + } + + result = ma_data_source_get_data_format(pDecoder->pBackend, NULL, NULL, &internalSampleRate, NULL, 0); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the internal sample rate. */ + } + + if (internalSampleRate == pDecoder->outputSampleRate) { + *pLength = internalLengthInPCMFrames; + } else { + *pLength = ma_calculate_frame_count_after_resampling(pDecoder->outputSampleRate, internalSampleRate, internalLengthInPCMFrames); + } + + return MA_SUCCESS; + } else { + return MA_NO_BACKEND; + } +} + +MA_API ma_result ma_decoder_get_available_frames(ma_decoder* pDecoder, ma_uint64* pAvailableFrames) +{ + ma_result result; + ma_uint64 totalFrameCount; + + if (pAvailableFrames == NULL) { + return MA_INVALID_ARGS; + } + + *pAvailableFrames = 0; + + if (pDecoder == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_decoder_get_length_in_pcm_frames(pDecoder, &totalFrameCount); + if (result != MA_SUCCESS) { + return result; + } + + if (totalFrameCount <= pDecoder->readPointerInPCMFrames) { + *pAvailableFrames = 0; + } else { + *pAvailableFrames = totalFrameCount - pDecoder->readPointerInPCMFrames; + } + + return MA_SUCCESS; +} + + +static ma_result ma_decoder__full_decode_and_uninit(ma_decoder* pDecoder, ma_decoder_config* pConfigOut, ma_uint64* pFrameCountOut, void** ppPCMFramesOut) +{ + ma_result result; + ma_uint64 totalFrameCount; + ma_uint64 bpf; + ma_uint64 dataCapInFrames; + void* pPCMFramesOut; + + MA_ASSERT(pDecoder != NULL); + + totalFrameCount = 0; + bpf = ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels); + + /* The frame count is unknown until we try reading. Thus, we just run in a loop. */ + dataCapInFrames = 0; + pPCMFramesOut = NULL; + for (;;) { + ma_uint64 frameCountToTryReading; + ma_uint64 framesJustRead; + + /* Make room if there's not enough. */ + if (totalFrameCount == dataCapInFrames) { + void* pNewPCMFramesOut; + ma_uint64 newDataCapInFrames = dataCapInFrames*2; + if (newDataCapInFrames == 0) { + newDataCapInFrames = 4096; + } + + if ((newDataCapInFrames * bpf) > MA_SIZE_MAX) { + ma_free(pPCMFramesOut, &pDecoder->allocationCallbacks); + return MA_TOO_BIG; + } + + pNewPCMFramesOut = (void*)ma_realloc(pPCMFramesOut, (size_t)(newDataCapInFrames * bpf), &pDecoder->allocationCallbacks); + if (pNewPCMFramesOut == NULL) { + ma_free(pPCMFramesOut, &pDecoder->allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + dataCapInFrames = newDataCapInFrames; + pPCMFramesOut = pNewPCMFramesOut; + } + + frameCountToTryReading = dataCapInFrames - totalFrameCount; + MA_ASSERT(frameCountToTryReading > 0); + + result = ma_decoder_read_pcm_frames(pDecoder, (ma_uint8*)pPCMFramesOut + (totalFrameCount * bpf), frameCountToTryReading, &framesJustRead); + totalFrameCount += framesJustRead; + + if (result != MA_SUCCESS) { + break; + } + + if (framesJustRead < frameCountToTryReading) { + break; + } + } + + + if (pConfigOut != NULL) { + pConfigOut->format = pDecoder->outputFormat; + pConfigOut->channels = pDecoder->outputChannels; + pConfigOut->sampleRate = pDecoder->outputSampleRate; + } + + if (ppPCMFramesOut != NULL) { + *ppPCMFramesOut = pPCMFramesOut; + } else { + ma_free(pPCMFramesOut, &pDecoder->allocationCallbacks); + } + + if (pFrameCountOut != NULL) { + *pFrameCountOut = totalFrameCount; + } + + ma_decoder_uninit(pDecoder); + return MA_SUCCESS; +} + +MA_API ma_result ma_decode_from_vfs(ma_vfs* pVFS, const char* pFilePath, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut) +{ + ma_result result; + ma_decoder_config config; + ma_decoder decoder; + + if (pFrameCountOut != NULL) { + *pFrameCountOut = 0; + } + if (ppPCMFramesOut != NULL) { + *ppPCMFramesOut = NULL; + } + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder_init_vfs(pVFS, pFilePath, &config, &decoder); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_decoder__full_decode_and_uninit(&decoder, pConfig, pFrameCountOut, ppPCMFramesOut); + + return result; +} + +MA_API ma_result ma_decode_file(const char* pFilePath, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut) +{ + return ma_decode_from_vfs(NULL, pFilePath, pConfig, pFrameCountOut, ppPCMFramesOut); +} + +MA_API ma_result ma_decode_memory(const void* pData, size_t dataSize, ma_decoder_config* pConfig, ma_uint64* pFrameCountOut, void** ppPCMFramesOut) +{ + ma_decoder_config config; + ma_decoder decoder; + ma_result result; + + if (pFrameCountOut != NULL) { + *pFrameCountOut = 0; + } + if (ppPCMFramesOut != NULL) { + *ppPCMFramesOut = NULL; + } + + if (pData == NULL || dataSize == 0) { + return MA_INVALID_ARGS; + } + + config = ma_decoder_config_init_copy(pConfig); + + result = ma_decoder_init_memory(pData, dataSize, &config, &decoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__full_decode_and_uninit(&decoder, pConfig, pFrameCountOut, ppPCMFramesOut); +} +#endif /* MA_NO_DECODING */ + + +#ifndef MA_NO_ENCODING + +#if defined(MA_HAS_WAV) +static size_t ma_encoder__internal_on_write_wav(void* pUserData, const void* pData, size_t bytesToWrite) +{ + ma_encoder* pEncoder = (ma_encoder*)pUserData; + size_t bytesWritten = 0; + + MA_ASSERT(pEncoder != NULL); + + pEncoder->onWrite(pEncoder, pData, bytesToWrite, &bytesWritten); + return bytesWritten; +} + +static ma_bool32 ma_encoder__internal_on_seek_wav(void* pUserData, int offset, ma_dr_wav_seek_origin origin) +{ + ma_encoder* pEncoder = (ma_encoder*)pUserData; + ma_result result; + + MA_ASSERT(pEncoder != NULL); + + result = pEncoder->onSeek(pEncoder, offset, (origin == ma_dr_wav_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current); + if (result != MA_SUCCESS) { + return MA_FALSE; + } else { + return MA_TRUE; + } +} + +static ma_result ma_encoder__on_init_wav(ma_encoder* pEncoder) +{ + ma_dr_wav_data_format wavFormat; + ma_allocation_callbacks allocationCallbacks; + ma_dr_wav* pWav; + + MA_ASSERT(pEncoder != NULL); + + pWav = (ma_dr_wav*)ma_malloc(sizeof(*pWav), &pEncoder->config.allocationCallbacks); + if (pWav == NULL) { + return MA_OUT_OF_MEMORY; + } + + wavFormat.container = ma_dr_wav_container_riff; + wavFormat.channels = pEncoder->config.channels; + wavFormat.sampleRate = pEncoder->config.sampleRate; + wavFormat.bitsPerSample = ma_get_bytes_per_sample(pEncoder->config.format) * 8; + if (pEncoder->config.format == ma_format_f32) { + wavFormat.format = MA_DR_WAVE_FORMAT_IEEE_FLOAT; + } else { + wavFormat.format = MA_DR_WAVE_FORMAT_PCM; + } + + allocationCallbacks.pUserData = pEncoder->config.allocationCallbacks.pUserData; + allocationCallbacks.onMalloc = pEncoder->config.allocationCallbacks.onMalloc; + allocationCallbacks.onRealloc = pEncoder->config.allocationCallbacks.onRealloc; + allocationCallbacks.onFree = pEncoder->config.allocationCallbacks.onFree; + + if (!ma_dr_wav_init_write(pWav, &wavFormat, ma_encoder__internal_on_write_wav, ma_encoder__internal_on_seek_wav, pEncoder, &allocationCallbacks)) { + return MA_ERROR; + } + + pEncoder->pInternalEncoder = pWav; + + return MA_SUCCESS; +} + +static void ma_encoder__on_uninit_wav(ma_encoder* pEncoder) +{ + ma_dr_wav* pWav; + + MA_ASSERT(pEncoder != NULL); + + pWav = (ma_dr_wav*)pEncoder->pInternalEncoder; + MA_ASSERT(pWav != NULL); + + ma_dr_wav_uninit(pWav); + ma_free(pWav, &pEncoder->config.allocationCallbacks); +} + +static ma_result ma_encoder__on_write_pcm_frames_wav(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount, ma_uint64* pFramesWritten) +{ + ma_dr_wav* pWav; + ma_uint64 framesWritten; + + MA_ASSERT(pEncoder != NULL); + + pWav = (ma_dr_wav*)pEncoder->pInternalEncoder; + MA_ASSERT(pWav != NULL); + + framesWritten = ma_dr_wav_write_pcm_frames(pWav, frameCount, pFramesIn); + + if (pFramesWritten != NULL) { + *pFramesWritten = framesWritten; + } + + return MA_SUCCESS; +} +#endif + +MA_API ma_encoder_config ma_encoder_config_init(ma_encoding_format encodingFormat, ma_format format, ma_uint32 channels, ma_uint32 sampleRate) +{ + ma_encoder_config config; + + MA_ZERO_OBJECT(&config); + config.encodingFormat = encodingFormat; + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + + return config; +} + +MA_API ma_result ma_encoder_preinit(const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + ma_result result; + + if (pEncoder == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pEncoder); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->format == ma_format_unknown || pConfig->channels == 0 || pConfig->sampleRate == 0) { + return MA_INVALID_ARGS; + } + + pEncoder->config = *pConfig; + + result = ma_allocation_callbacks_init_copy(&pEncoder->config.allocationCallbacks, &pConfig->allocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_encoder_init__internal(ma_encoder_write_proc onWrite, ma_encoder_seek_proc onSeek, void* pUserData, ma_encoder* pEncoder) +{ + ma_result result = MA_SUCCESS; + + /* This assumes ma_encoder_preinit() has been called prior. */ + MA_ASSERT(pEncoder != NULL); + + if (onWrite == NULL || onSeek == NULL) { + return MA_INVALID_ARGS; + } + + pEncoder->onWrite = onWrite; + pEncoder->onSeek = onSeek; + pEncoder->pUserData = pUserData; + + switch (pEncoder->config.encodingFormat) + { + case ma_encoding_format_wav: + { + #if defined(MA_HAS_WAV) + pEncoder->onInit = ma_encoder__on_init_wav; + pEncoder->onUninit = ma_encoder__on_uninit_wav; + pEncoder->onWritePCMFrames = ma_encoder__on_write_pcm_frames_wav; + #else + result = MA_NO_BACKEND; + #endif + } break; + + default: + { + result = MA_INVALID_ARGS; + } break; + } + + /* Getting here means we should have our backend callbacks set up. */ + if (result == MA_SUCCESS) { + result = pEncoder->onInit(pEncoder); + } + + return result; +} + +static ma_result ma_encoder__on_write_vfs(ma_encoder* pEncoder, const void* pBufferIn, size_t bytesToWrite, size_t* pBytesWritten) +{ + return ma_vfs_or_default_write(pEncoder->data.vfs.pVFS, pEncoder->data.vfs.file, pBufferIn, bytesToWrite, pBytesWritten); +} + +static ma_result ma_encoder__on_seek_vfs(ma_encoder* pEncoder, ma_int64 offset, ma_seek_origin origin) +{ + return ma_vfs_or_default_seek(pEncoder->data.vfs.pVFS, pEncoder->data.vfs.file, offset, origin); +} + +MA_API ma_result ma_encoder_init_vfs(ma_vfs* pVFS, const char* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + ma_result result; + ma_vfs_file file; + + result = ma_encoder_preinit(pConfig, pEncoder); + if (result != MA_SUCCESS) { + return result; + } + + /* Now open the file. If this fails we don't need to uninitialize the encoder. */ + result = ma_vfs_or_default_open(pVFS, pFilePath, MA_OPEN_MODE_WRITE, &file); + if (result != MA_SUCCESS) { + return result; + } + + pEncoder->data.vfs.pVFS = pVFS; + pEncoder->data.vfs.file = file; + + result = ma_encoder_init__internal(ma_encoder__on_write_vfs, ma_encoder__on_seek_vfs, NULL, pEncoder); + if (result != MA_SUCCESS) { + ma_vfs_or_default_close(pVFS, file); + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_encoder_init_vfs_w(ma_vfs* pVFS, const wchar_t* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + ma_result result; + ma_vfs_file file; + + result = ma_encoder_preinit(pConfig, pEncoder); + if (result != MA_SUCCESS) { + return result; + } + + /* Now open the file. If this fails we don't need to uninitialize the encoder. */ + result = ma_vfs_or_default_open_w(pVFS, pFilePath, MA_OPEN_MODE_WRITE, &file); + if (result != MA_SUCCESS) { + return result; + } + + pEncoder->data.vfs.pVFS = pVFS; + pEncoder->data.vfs.file = file; + + result = ma_encoder_init__internal(ma_encoder__on_write_vfs, ma_encoder__on_seek_vfs, NULL, pEncoder); + if (result != MA_SUCCESS) { + ma_vfs_or_default_close(pVFS, file); + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_encoder_init_file(const char* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + return ma_encoder_init_vfs(NULL, pFilePath, pConfig, pEncoder); +} + +MA_API ma_result ma_encoder_init_file_w(const wchar_t* pFilePath, const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + return ma_encoder_init_vfs_w(NULL, pFilePath, pConfig, pEncoder); +} + +MA_API ma_result ma_encoder_init(ma_encoder_write_proc onWrite, ma_encoder_seek_proc onSeek, void* pUserData, const ma_encoder_config* pConfig, ma_encoder* pEncoder) +{ + ma_result result; + + result = ma_encoder_preinit(pConfig, pEncoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_encoder_init__internal(onWrite, onSeek, pUserData, pEncoder); +} + + +MA_API void ma_encoder_uninit(ma_encoder* pEncoder) +{ + if (pEncoder == NULL) { + return; + } + + if (pEncoder->onUninit) { + pEncoder->onUninit(pEncoder); + } + + /* If we have a file handle, close it. */ + if (pEncoder->onWrite == ma_encoder__on_write_vfs) { + ma_vfs_or_default_close(pEncoder->data.vfs.pVFS, pEncoder->data.vfs.file); + pEncoder->data.vfs.file = NULL; + } +} + + +MA_API ma_result ma_encoder_write_pcm_frames(ma_encoder* pEncoder, const void* pFramesIn, ma_uint64 frameCount, ma_uint64* pFramesWritten) +{ + if (pFramesWritten != NULL) { + *pFramesWritten = 0; + } + + if (pEncoder == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; + } + + return pEncoder->onWritePCMFrames(pEncoder, pFramesIn, frameCount, pFramesWritten); +} +#endif /* MA_NO_ENCODING */ + + + +/************************************************************************************************************************************************************** + +Generation + +**************************************************************************************************************************************************************/ +#ifndef MA_NO_GENERATION +MA_API ma_waveform_config ma_waveform_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_waveform_type type, double amplitude, double frequency) +{ + ma_waveform_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.type = type; + config.amplitude = amplitude; + config.frequency = frequency; + + return config; +} + +static ma_result ma_waveform__data_source_on_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_waveform_read_pcm_frames((ma_waveform*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_waveform__data_source_on_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_waveform_seek_to_pcm_frame((ma_waveform*)pDataSource, frameIndex); +} + +static ma_result ma_waveform__data_source_on_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + ma_waveform* pWaveform = (ma_waveform*)pDataSource; + + *pFormat = pWaveform->config.format; + *pChannels = pWaveform->config.channels; + *pSampleRate = pWaveform->config.sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pWaveform->config.channels); + + return MA_SUCCESS; +} + +static ma_result ma_waveform__data_source_on_get_cursor(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + ma_waveform* pWaveform = (ma_waveform*)pDataSource; + + *pCursor = (ma_uint64)(pWaveform->time / pWaveform->advance); + + return MA_SUCCESS; +} + +static double ma_waveform__calculate_advance(ma_uint32 sampleRate, double frequency) +{ + return (1.0 / (sampleRate / frequency)); +} + +static void ma_waveform__update_advance(ma_waveform* pWaveform) +{ + pWaveform->advance = ma_waveform__calculate_advance(pWaveform->config.sampleRate, pWaveform->config.frequency); +} + +static ma_data_source_vtable g_ma_waveform_data_source_vtable = +{ + ma_waveform__data_source_on_read, + ma_waveform__data_source_on_seek, + ma_waveform__data_source_on_get_data_format, + ma_waveform__data_source_on_get_cursor, + NULL, /* onGetLength. There's no notion of a length in waveforms. */ + NULL, /* onSetLooping */ + 0 +}; + +MA_API ma_result ma_waveform_init(const ma_waveform_config* pConfig, ma_waveform* pWaveform) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pWaveform); + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_waveform_data_source_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pWaveform->ds); + if (result != MA_SUCCESS) { + return result; + } + + pWaveform->config = *pConfig; + pWaveform->advance = ma_waveform__calculate_advance(pWaveform->config.sampleRate, pWaveform->config.frequency); + pWaveform->time = 0; + + return MA_SUCCESS; +} + +MA_API void ma_waveform_uninit(ma_waveform* pWaveform) +{ + if (pWaveform == NULL) { + return; + } + + ma_data_source_uninit(&pWaveform->ds); +} + +MA_API ma_result ma_waveform_set_amplitude(ma_waveform* pWaveform, double amplitude) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.amplitude = amplitude; + return MA_SUCCESS; +} + +MA_API ma_result ma_waveform_set_frequency(ma_waveform* pWaveform, double frequency) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.frequency = frequency; + ma_waveform__update_advance(pWaveform); + + return MA_SUCCESS; +} + +MA_API ma_result ma_waveform_set_type(ma_waveform* pWaveform, ma_waveform_type type) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.type = type; + return MA_SUCCESS; +} + +MA_API ma_result ma_waveform_set_sample_rate(ma_waveform* pWaveform, ma_uint32 sampleRate) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.sampleRate = sampleRate; + ma_waveform__update_advance(pWaveform); + + return MA_SUCCESS; +} + +static float ma_waveform_sine_f32(double time, double amplitude) +{ + return (float)(ma_sind(MA_TAU_D * time) * amplitude); +} + +static ma_int16 ma_waveform_sine_s16(double time, double amplitude) +{ + return ma_pcm_sample_f32_to_s16(ma_waveform_sine_f32(time, amplitude)); +} + +static float ma_waveform_square_f32(double time, double dutyCycle, double amplitude) +{ + double f = time - (ma_int64)time; + double r; + + if (f < dutyCycle) { + r = amplitude; + } else { + r = -amplitude; + } + + return (float)r; +} + +static ma_int16 ma_waveform_square_s16(double time, double dutyCycle, double amplitude) +{ + return ma_pcm_sample_f32_to_s16(ma_waveform_square_f32(time, dutyCycle, amplitude)); +} + +static float ma_waveform_triangle_f32(double time, double amplitude) +{ + double f = time - (ma_int64)time; + double r; + + r = 2 * ma_abs(2 * (f - 0.5)) - 1; + + return (float)(r * amplitude); +} + +static ma_int16 ma_waveform_triangle_s16(double time, double amplitude) +{ + return ma_pcm_sample_f32_to_s16(ma_waveform_triangle_f32(time, amplitude)); +} + +static float ma_waveform_sawtooth_f32(double time, double amplitude) +{ + double f = time - (ma_int64)time; + double r; + + r = 2 * (f - 0.5); + + return (float)(r * amplitude); +} + +static ma_int16 ma_waveform_sawtooth_s16(double time, double amplitude) +{ + return ma_pcm_sample_f32_to_s16(ma_waveform_sawtooth_f32(time, amplitude)); +} + +static void ma_waveform_read_pcm_frames__sine(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint64 iChannel; + ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format); + ma_uint32 bpf = bps * pWaveform->config.channels; + + MA_ASSERT(pWaveform != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pWaveform->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_sine_f32(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else if (pWaveform->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_waveform_sine_s16(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_sine_f32(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } +} + +static void ma_waveform_read_pcm_frames__square(ma_waveform* pWaveform, double dutyCycle, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint64 iChannel; + ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format); + ma_uint32 bpf = bps * pWaveform->config.channels; + + MA_ASSERT(pWaveform != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pWaveform->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_square_f32(pWaveform->time, dutyCycle, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else if (pWaveform->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_waveform_square_s16(pWaveform->time, dutyCycle, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_square_f32(pWaveform->time, dutyCycle, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } +} + +static void ma_waveform_read_pcm_frames__triangle(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint64 iChannel; + ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format); + ma_uint32 bpf = bps * pWaveform->config.channels; + + MA_ASSERT(pWaveform != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pWaveform->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_triangle_f32(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else if (pWaveform->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_waveform_triangle_s16(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_triangle_f32(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } +} + +static void ma_waveform_read_pcm_frames__sawtooth(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint64 iChannel; + ma_uint32 bps = ma_get_bytes_per_sample(pWaveform->config.format); + ma_uint32 bpf = bps * pWaveform->config.channels; + + MA_ASSERT(pWaveform != NULL); + MA_ASSERT(pFramesOut != NULL); + + if (pWaveform->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_sawtooth_f32(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutF32[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else if (pWaveform->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_waveform_sawtooth_s16(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + pFramesOutS16[iFrame*pWaveform->config.channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_waveform_sawtooth_f32(pWaveform->time, pWaveform->config.amplitude); + pWaveform->time += pWaveform->advance; + + for (iChannel = 0; iChannel < pWaveform->config.channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pWaveform->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } +} + +MA_API ma_result ma_waveform_read_pcm_frames(ma_waveform* pWaveform, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + if (pFramesOut != NULL) { + switch (pWaveform->config.type) + { + case ma_waveform_type_sine: + { + ma_waveform_read_pcm_frames__sine(pWaveform, pFramesOut, frameCount); + } break; + + case ma_waveform_type_square: + { + ma_waveform_read_pcm_frames__square(pWaveform, 0.5, pFramesOut, frameCount); + } break; + + case ma_waveform_type_triangle: + { + ma_waveform_read_pcm_frames__triangle(pWaveform, pFramesOut, frameCount); + } break; + + case ma_waveform_type_sawtooth: + { + ma_waveform_read_pcm_frames__sawtooth(pWaveform, pFramesOut, frameCount); + } break; + + default: return MA_INVALID_OPERATION; /* Unknown waveform type. */ + } + } else { + pWaveform->time += pWaveform->advance * (ma_int64)frameCount; /* Cast to int64 required for VC6. Won't affect anything in practice. */ + } + + if (pFramesRead != NULL) { + *pFramesRead = frameCount; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_waveform_seek_to_pcm_frame(ma_waveform* pWaveform, ma_uint64 frameIndex) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->time = pWaveform->advance * (ma_int64)frameIndex; /* Casting for VC6. Won't be an issue in practice. */ + + return MA_SUCCESS; +} + +MA_API ma_pulsewave_config ma_pulsewave_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double dutyCycle, double amplitude, double frequency) +{ + ma_pulsewave_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.dutyCycle = dutyCycle; + config.amplitude = amplitude; + config.frequency = frequency; + + return config; +} + +MA_API ma_result ma_pulsewave_init(const ma_pulsewave_config* pConfig, ma_pulsewave* pWaveform) +{ + ma_result result; + ma_waveform_config config; + + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pWaveform); + + config = ma_waveform_config_init( + pConfig->format, + pConfig->channels, + pConfig->sampleRate, + ma_waveform_type_square, + pConfig->amplitude, + pConfig->frequency + ); + + result = ma_waveform_init(&config, &pWaveform->waveform); + ma_pulsewave_set_duty_cycle(pWaveform, pConfig->dutyCycle); + + return result; +} + +MA_API void ma_pulsewave_uninit(ma_pulsewave* pWaveform) +{ + if (pWaveform == NULL) { + return; + } + + ma_waveform_uninit(&pWaveform->waveform); +} + +MA_API ma_result ma_pulsewave_read_pcm_frames(ma_pulsewave* pWaveform, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + if (pFramesOut != NULL) { + ma_waveform_read_pcm_frames__square(&pWaveform->waveform, pWaveform->config.dutyCycle, pFramesOut, frameCount); + } else { + pWaveform->waveform.time += pWaveform->waveform.advance * (ma_int64)frameCount; /* Cast to int64 required for VC6. Won't affect anything in practice. */ + } + + if (pFramesRead != NULL) { + *pFramesRead = frameCount; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_pulsewave_seek_to_pcm_frame(ma_pulsewave* pWaveform, ma_uint64 frameIndex) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + ma_waveform_seek_to_pcm_frame(&pWaveform->waveform, frameIndex); + + return MA_SUCCESS; +} + +MA_API ma_result ma_pulsewave_set_amplitude(ma_pulsewave* pWaveform, double amplitude) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.amplitude = amplitude; + ma_waveform_set_amplitude(&pWaveform->waveform, amplitude); + + return MA_SUCCESS; +} + +MA_API ma_result ma_pulsewave_set_frequency(ma_pulsewave* pWaveform, double frequency) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.frequency = frequency; + ma_waveform_set_frequency(&pWaveform->waveform, frequency); + + return MA_SUCCESS; +} + +MA_API ma_result ma_pulsewave_set_sample_rate(ma_pulsewave* pWaveform, ma_uint32 sampleRate) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.sampleRate = sampleRate; + ma_waveform_set_sample_rate(&pWaveform->waveform, sampleRate); + + return MA_SUCCESS; +} + +MA_API ma_result ma_pulsewave_set_duty_cycle(ma_pulsewave* pWaveform, double dutyCycle) +{ + if (pWaveform == NULL) { + return MA_INVALID_ARGS; + } + + pWaveform->config.dutyCycle = dutyCycle; + + return MA_SUCCESS; +} + + + +MA_API ma_noise_config ma_noise_config_init(ma_format format, ma_uint32 channels, ma_noise_type type, ma_int32 seed, double amplitude) +{ + ma_noise_config config; + MA_ZERO_OBJECT(&config); + + config.format = format; + config.channels = channels; + config.type = type; + config.seed = seed; + config.amplitude = amplitude; + + if (config.seed == 0) { + config.seed = MA_DEFAULT_LCG_SEED; + } + + return config; +} + + +static ma_result ma_noise__data_source_on_read(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_noise_read_pcm_frames((ma_noise*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_noise__data_source_on_seek(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + /* No-op. Just pretend to be successful. */ + (void)pDataSource; + (void)frameIndex; + return MA_SUCCESS; +} + +static ma_result ma_noise__data_source_on_get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + ma_noise* pNoise = (ma_noise*)pDataSource; + + *pFormat = pNoise->config.format; + *pChannels = pNoise->config.channels; + *pSampleRate = 0; /* There is no notion of sample rate with noise generation. */ + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pNoise->config.channels); + + return MA_SUCCESS; +} + +static ma_data_source_vtable g_ma_noise_data_source_vtable = +{ + ma_noise__data_source_on_read, + ma_noise__data_source_on_seek, /* No-op for noise. */ + ma_noise__data_source_on_get_data_format, + NULL, /* onGetCursor. No notion of a cursor for noise. */ + NULL, /* onGetLength. No notion of a length for noise. */ + NULL, /* onSetLooping */ + 0 +}; + + +#ifndef MA_PINK_NOISE_BIN_SIZE +#define MA_PINK_NOISE_BIN_SIZE 16 +#endif + +typedef struct +{ + size_t sizeInBytes; + struct + { + size_t binOffset; + size_t accumulationOffset; + size_t counterOffset; + } pink; + struct + { + size_t accumulationOffset; + } brownian; +} ma_noise_heap_layout; + +static ma_result ma_noise_get_heap_layout(const ma_noise_config* pConfig, ma_noise_heap_layout* pHeapLayout) +{ + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->channels == 0) { + return MA_INVALID_ARGS; + } + + pHeapLayout->sizeInBytes = 0; + + /* Pink. */ + if (pConfig->type == ma_noise_type_pink) { + /* bin */ + pHeapLayout->pink.binOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(double*) * pConfig->channels; + pHeapLayout->sizeInBytes += sizeof(double ) * pConfig->channels * MA_PINK_NOISE_BIN_SIZE; + + /* accumulation */ + pHeapLayout->pink.accumulationOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(double) * pConfig->channels; + + /* counter */ + pHeapLayout->pink.counterOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(ma_uint32) * pConfig->channels; + } + + /* Brownian. */ + if (pConfig->type == ma_noise_type_brownian) { + /* accumulation */ + pHeapLayout->brownian.accumulationOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += sizeof(double) * pConfig->channels; + } + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_noise_get_heap_size(const ma_noise_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_noise_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_noise_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_noise_init_preallocated(const ma_noise_config* pConfig, void* pHeap, ma_noise* pNoise) +{ + ma_result result; + ma_noise_heap_layout heapLayout; + ma_data_source_config dataSourceConfig; + ma_uint32 iChannel; + + if (pNoise == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNoise); + + result = ma_noise_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pNoise->_pHeap = pHeap; + MA_ZERO_MEMORY(pNoise->_pHeap, heapLayout.sizeInBytes); + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_noise_data_source_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pNoise->ds); + if (result != MA_SUCCESS) { + return result; + } + + pNoise->config = *pConfig; + ma_lcg_seed(&pNoise->lcg, pConfig->seed); + + if (pNoise->config.type == ma_noise_type_pink) { + pNoise->state.pink.bin = (double** )ma_offset_ptr(pHeap, heapLayout.pink.binOffset); + pNoise->state.pink.accumulation = (double* )ma_offset_ptr(pHeap, heapLayout.pink.accumulationOffset); + pNoise->state.pink.counter = (ma_uint32*)ma_offset_ptr(pHeap, heapLayout.pink.counterOffset); + + for (iChannel = 0; iChannel < pConfig->channels; iChannel += 1) { + pNoise->state.pink.bin[iChannel] = (double*)ma_offset_ptr(pHeap, heapLayout.pink.binOffset + (sizeof(double*) * pConfig->channels) + (sizeof(double) * MA_PINK_NOISE_BIN_SIZE * iChannel)); + pNoise->state.pink.accumulation[iChannel] = 0; + pNoise->state.pink.counter[iChannel] = 1; + } + } + + if (pNoise->config.type == ma_noise_type_brownian) { + pNoise->state.brownian.accumulation = (double*)ma_offset_ptr(pHeap, heapLayout.brownian.accumulationOffset); + + for (iChannel = 0; iChannel < pConfig->channels; iChannel += 1) { + pNoise->state.brownian.accumulation[iChannel] = 0; + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_noise_init(const ma_noise_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_noise* pNoise) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_noise_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_noise_init_preallocated(pConfig, pHeap, pNoise); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pNoise->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_noise_uninit(ma_noise* pNoise, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pNoise == NULL) { + return; + } + + ma_data_source_uninit(&pNoise->ds); + + if (pNoise->_ownsHeap) { + ma_free(pNoise->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_result ma_noise_set_amplitude(ma_noise* pNoise, double amplitude) +{ + if (pNoise == NULL) { + return MA_INVALID_ARGS; + } + + pNoise->config.amplitude = amplitude; + return MA_SUCCESS; +} + +MA_API ma_result ma_noise_set_seed(ma_noise* pNoise, ma_int32 seed) +{ + if (pNoise == NULL) { + return MA_INVALID_ARGS; + } + + pNoise->lcg.state = seed; + return MA_SUCCESS; +} + + +MA_API ma_result ma_noise_set_type(ma_noise* pNoise, ma_noise_type type) +{ + if (pNoise == NULL) { + return MA_INVALID_ARGS; + } + + /* + This function should never have been implemented in the first place. Changing the type dynamically is not + supported. Instead you need to uninitialize and reinitialize a fresh `ma_noise` object. This function + will be removed in version 0.12. + */ + MA_ASSERT(MA_FALSE); + (void)type; + + return MA_INVALID_OPERATION; +} + +static MA_INLINE float ma_noise_f32_white(ma_noise* pNoise) +{ + return (float)(ma_lcg_rand_f64(&pNoise->lcg) * pNoise->config.amplitude); +} + +static MA_INLINE ma_int16 ma_noise_s16_white(ma_noise* pNoise) +{ + return ma_pcm_sample_f32_to_s16(ma_noise_f32_white(pNoise)); +} + +static MA_INLINE ma_uint64 ma_noise_read_pcm_frames__white(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannel; + const ma_uint32 channels = pNoise->config.channels; + MA_ASSUME(channels > 0); + + if (pNoise->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_white(pNoise); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutF32[iFrame*channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutF32[iFrame*channels + iChannel] = ma_noise_f32_white(pNoise); + } + } + } + } else if (pNoise->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_noise_s16_white(pNoise); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutS16[iFrame*channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutS16[iFrame*channels + iChannel] = ma_noise_s16_white(pNoise); + } + } + } + } else { + const ma_uint32 bps = ma_get_bytes_per_sample(pNoise->config.format); + const ma_uint32 bpf = bps * channels; + + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_white(pNoise); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + float s = ma_noise_f32_white(pNoise); + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } + } + + return frameCount; +} + + +static MA_INLINE unsigned int ma_tzcnt32(unsigned int x) +{ + unsigned int n; + + /* Special case for odd numbers since they should happen about half the time. */ + if (x & 0x1) { + return 0; + } + + if (x == 0) { + return sizeof(x) << 3; + } + + n = 1; + if ((x & 0x0000FFFF) == 0) { x >>= 16; n += 16; } + if ((x & 0x000000FF) == 0) { x >>= 8; n += 8; } + if ((x & 0x0000000F) == 0) { x >>= 4; n += 4; } + if ((x & 0x00000003) == 0) { x >>= 2; n += 2; } + n -= x & 0x00000001; + + return n; +} + +/* +Pink noise generation based on Tonic (public domain) with modifications. https://github.com/TonicAudio/Tonic/blob/master/src/Tonic/Noise.h + +This is basically _the_ reference for pink noise from what I've found: http://www.firstpr.com.au/dsp/pink-noise/ +*/ +static MA_INLINE float ma_noise_f32_pink(ma_noise* pNoise, ma_uint32 iChannel) +{ + double result; + double binPrev; + double binNext; + unsigned int ibin; + + ibin = ma_tzcnt32(pNoise->state.pink.counter[iChannel]) & (MA_PINK_NOISE_BIN_SIZE - 1); + + binPrev = pNoise->state.pink.bin[iChannel][ibin]; + binNext = ma_lcg_rand_f64(&pNoise->lcg); + pNoise->state.pink.bin[iChannel][ibin] = binNext; + + pNoise->state.pink.accumulation[iChannel] += (binNext - binPrev); + pNoise->state.pink.counter[iChannel] += 1; + + result = (ma_lcg_rand_f64(&pNoise->lcg) + pNoise->state.pink.accumulation[iChannel]); + result /= 10; + + return (float)(result * pNoise->config.amplitude); +} + +static MA_INLINE ma_int16 ma_noise_s16_pink(ma_noise* pNoise, ma_uint32 iChannel) +{ + return ma_pcm_sample_f32_to_s16(ma_noise_f32_pink(pNoise, iChannel)); +} + +static MA_INLINE ma_uint64 ma_noise_read_pcm_frames__pink(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannel; + const ma_uint32 channels = pNoise->config.channels; + MA_ASSUME(channels > 0); + + if (pNoise->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_pink(pNoise, 0); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutF32[iFrame*channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutF32[iFrame*channels + iChannel] = ma_noise_f32_pink(pNoise, iChannel); + } + } + } + } else if (pNoise->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_noise_s16_pink(pNoise, 0); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutS16[iFrame*channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutS16[iFrame*channels + iChannel] = ma_noise_s16_pink(pNoise, iChannel); + } + } + } + } else { + const ma_uint32 bps = ma_get_bytes_per_sample(pNoise->config.format); + const ma_uint32 bpf = bps * channels; + + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_pink(pNoise, 0); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + float s = ma_noise_f32_pink(pNoise, iChannel); + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } + } + + return frameCount; +} + + +static MA_INLINE float ma_noise_f32_brownian(ma_noise* pNoise, ma_uint32 iChannel) +{ + double result; + + result = (ma_lcg_rand_f64(&pNoise->lcg) + pNoise->state.brownian.accumulation[iChannel]); + result /= 1.005; /* Don't escape the -1..1 range on average. */ + + pNoise->state.brownian.accumulation[iChannel] = result; + result /= 20; + + return (float)(result * pNoise->config.amplitude); +} + +static MA_INLINE ma_int16 ma_noise_s16_brownian(ma_noise* pNoise, ma_uint32 iChannel) +{ + return ma_pcm_sample_f32_to_s16(ma_noise_f32_brownian(pNoise, iChannel)); +} + +static MA_INLINE ma_uint64 ma_noise_read_pcm_frames__brownian(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount) +{ + ma_uint64 iFrame; + ma_uint32 iChannel; + const ma_uint32 channels = pNoise->config.channels; + MA_ASSUME(channels > 0); + + if (pNoise->config.format == ma_format_f32) { + float* pFramesOutF32 = (float*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_brownian(pNoise, 0); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutF32[iFrame*channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutF32[iFrame*channels + iChannel] = ma_noise_f32_brownian(pNoise, iChannel); + } + } + } + } else if (pNoise->config.format == ma_format_s16) { + ma_int16* pFramesOutS16 = (ma_int16*)pFramesOut; + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_int16 s = ma_noise_s16_brownian(pNoise, 0); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutS16[iFrame*channels + iChannel] = s; + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + pFramesOutS16[iFrame*channels + iChannel] = ma_noise_s16_brownian(pNoise, iChannel); + } + } + } + } else { + const ma_uint32 bps = ma_get_bytes_per_sample(pNoise->config.format); + const ma_uint32 bpf = bps * channels; + + if (pNoise->config.duplicateChannels) { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + float s = ma_noise_f32_brownian(pNoise, 0); + for (iChannel = 0; iChannel < channels; iChannel += 1) { + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } else { + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannel = 0; iChannel < channels; iChannel += 1) { + float s = ma_noise_f32_brownian(pNoise, iChannel); + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), pNoise->config.format, &s, ma_format_f32, 1, ma_dither_mode_none); + } + } + } + } + + return frameCount; +} + +MA_API ma_result ma_noise_read_pcm_frames(ma_noise* pNoise, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_uint64 framesRead = 0; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + if (pNoise == NULL) { + return MA_INVALID_ARGS; + } + + /* The output buffer is allowed to be NULL. Since we aren't tracking cursors or anything we can just do nothing and pretend to be successful. */ + if (pFramesOut == NULL) { + framesRead = frameCount; + } else { + switch (pNoise->config.type) { + case ma_noise_type_white: framesRead = ma_noise_read_pcm_frames__white (pNoise, pFramesOut, frameCount); break; + case ma_noise_type_pink: framesRead = ma_noise_read_pcm_frames__pink (pNoise, pFramesOut, frameCount); break; + case ma_noise_type_brownian: framesRead = ma_noise_read_pcm_frames__brownian(pNoise, pFramesOut, frameCount); break; + default: return MA_INVALID_OPERATION; /* Unknown noise type. */ + } + } + + if (pFramesRead != NULL) { + *pFramesRead = framesRead; + } + + return MA_SUCCESS; +} +#endif /* MA_NO_GENERATION */ + + + +#ifndef MA_NO_RESOURCE_MANAGER +#ifndef MA_RESOURCE_MANAGER_PAGE_SIZE_IN_MILLISECONDS +#define MA_RESOURCE_MANAGER_PAGE_SIZE_IN_MILLISECONDS 1000 +#endif + +#ifndef MA_JOB_TYPE_RESOURCE_MANAGER_QUEUE_CAPACITY +#define MA_JOB_TYPE_RESOURCE_MANAGER_QUEUE_CAPACITY 1024 +#endif + +MA_API ma_resource_manager_pipeline_notifications ma_resource_manager_pipeline_notifications_init(void) +{ + ma_resource_manager_pipeline_notifications notifications; + + MA_ZERO_OBJECT(¬ifications); + + return notifications; +} + +static void ma_resource_manager_pipeline_notifications_signal_all_notifications(const ma_resource_manager_pipeline_notifications* pPipelineNotifications) +{ + if (pPipelineNotifications == NULL) { + return; + } + + if (pPipelineNotifications->init.pNotification) { ma_async_notification_signal(pPipelineNotifications->init.pNotification); } + if (pPipelineNotifications->done.pNotification) { ma_async_notification_signal(pPipelineNotifications->done.pNotification); } +} + +static void ma_resource_manager_pipeline_notifications_acquire_all_fences(const ma_resource_manager_pipeline_notifications* pPipelineNotifications) +{ + if (pPipelineNotifications == NULL) { + return; + } + + if (pPipelineNotifications->init.pFence != NULL) { ma_fence_acquire(pPipelineNotifications->init.pFence); } + if (pPipelineNotifications->done.pFence != NULL) { ma_fence_acquire(pPipelineNotifications->done.pFence); } +} + +static void ma_resource_manager_pipeline_notifications_release_all_fences(const ma_resource_manager_pipeline_notifications* pPipelineNotifications) +{ + if (pPipelineNotifications == NULL) { + return; + } + + if (pPipelineNotifications->init.pFence != NULL) { ma_fence_release(pPipelineNotifications->init.pFence); } + if (pPipelineNotifications->done.pFence != NULL) { ma_fence_release(pPipelineNotifications->done.pFence); } +} + + + +#ifndef MA_DEFAULT_HASH_SEED +#define MA_DEFAULT_HASH_SEED 42 +#endif + +/* MurmurHash3. Based on code from https://github.com/PeterScott/murmur3/blob/master/murmur3.c (public domain). */ +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) + #pragma GCC diagnostic push + #if __GNUC__ >= 7 + #pragma GCC diagnostic ignored "-Wimplicit-fallthrough" + #endif +#endif + +static MA_INLINE ma_uint32 ma_rotl32(ma_uint32 x, ma_int8 r) +{ + return (x << r) | (x >> (32 - r)); +} + +static MA_INLINE ma_uint32 ma_hash_getblock(const ma_uint32* blocks, int i) +{ + ma_uint32 block; + + /* Try silencing a sanitization warning about unaligned access by doing a memcpy() instead of assignment. */ + MA_COPY_MEMORY(&block, ma_offset_ptr(blocks, i * sizeof(block)), sizeof(block)); + + if (ma_is_little_endian()) { + return block; + } else { + return ma_swap_endian_uint32(block); + } +} + +static MA_INLINE ma_uint32 ma_hash_fmix32(ma_uint32 h) +{ + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + + return h; +} + +static ma_uint32 ma_hash_32(const void* key, int len, ma_uint32 seed) +{ + const ma_uint8* data = (const ma_uint8*)key; + const ma_uint32* blocks; + const ma_uint8* tail; + const int nblocks = len / 4; + ma_uint32 h1 = seed; + ma_uint32 c1 = 0xcc9e2d51; + ma_uint32 c2 = 0x1b873593; + ma_uint32 k1; + int i; + + blocks = (const ma_uint32 *)(data + nblocks*4); + + for(i = -nblocks; i; i++) { + k1 = ma_hash_getblock(blocks,i); + + k1 *= c1; + k1 = ma_rotl32(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = ma_rotl32(h1, 13); + h1 = h1*5 + 0xe6546b64; + } + + + tail = (const ma_uint8*)(data + nblocks*4); + + k1 = 0; + switch(len & 3) { + case 3: k1 ^= tail[2] << 16; + case 2: k1 ^= tail[1] << 8; + case 1: k1 ^= tail[0]; + k1 *= c1; k1 = ma_rotl32(k1, 15); k1 *= c2; h1 ^= k1; + }; + + + h1 ^= len; + h1 = ma_hash_fmix32(h1); + + return h1; +} + +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) + #pragma GCC diagnostic push +#endif +/* End MurmurHash3 */ + +static ma_uint32 ma_hash_string_32(const char* str) +{ + return ma_hash_32(str, (int)strlen(str), MA_DEFAULT_HASH_SEED); +} + +static ma_uint32 ma_hash_string_w_32(const wchar_t* str) +{ + return ma_hash_32(str, (int)wcslen(str) * sizeof(*str), MA_DEFAULT_HASH_SEED); +} + + + + +/* +Basic BST Functions +*/ +static ma_result ma_resource_manager_data_buffer_node_search(ma_resource_manager* pResourceManager, ma_uint32 hashedName32, ma_resource_manager_data_buffer_node** ppDataBufferNode) +{ + ma_resource_manager_data_buffer_node* pCurrentNode; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(ppDataBufferNode != NULL); + + pCurrentNode = pResourceManager->pRootDataBufferNode; + while (pCurrentNode != NULL) { + if (hashedName32 == pCurrentNode->hashedName32) { + break; /* Found. */ + } else if (hashedName32 < pCurrentNode->hashedName32) { + pCurrentNode = pCurrentNode->pChildLo; + } else { + pCurrentNode = pCurrentNode->pChildHi; + } + } + + *ppDataBufferNode = pCurrentNode; + + if (pCurrentNode == NULL) { + return MA_DOES_NOT_EXIST; + } else { + return MA_SUCCESS; + } +} + +static ma_result ma_resource_manager_data_buffer_node_insert_point(ma_resource_manager* pResourceManager, ma_uint32 hashedName32, ma_resource_manager_data_buffer_node** ppInsertPoint) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager_data_buffer_node* pCurrentNode; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(ppInsertPoint != NULL); + + *ppInsertPoint = NULL; + + if (pResourceManager->pRootDataBufferNode == NULL) { + return MA_SUCCESS; /* No items. */ + } + + /* We need to find the node that will become the parent of the new node. If a node is found that already has the same hashed name we need to return MA_ALREADY_EXISTS. */ + pCurrentNode = pResourceManager->pRootDataBufferNode; + while (pCurrentNode != NULL) { + if (hashedName32 == pCurrentNode->hashedName32) { + result = MA_ALREADY_EXISTS; + break; + } else { + if (hashedName32 < pCurrentNode->hashedName32) { + if (pCurrentNode->pChildLo == NULL) { + result = MA_SUCCESS; + break; + } else { + pCurrentNode = pCurrentNode->pChildLo; + } + } else { + if (pCurrentNode->pChildHi == NULL) { + result = MA_SUCCESS; + break; + } else { + pCurrentNode = pCurrentNode->pChildHi; + } + } + } + } + + *ppInsertPoint = pCurrentNode; + return result; +} + +static ma_result ma_resource_manager_data_buffer_node_insert_at(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode, ma_resource_manager_data_buffer_node* pInsertPoint) +{ + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + + /* The key must have been set before calling this function. */ + MA_ASSERT(pDataBufferNode->hashedName32 != 0); + + if (pInsertPoint == NULL) { + /* It's the first node. */ + pResourceManager->pRootDataBufferNode = pDataBufferNode; + } else { + /* It's not the first node. It needs to be inserted. */ + if (pDataBufferNode->hashedName32 < pInsertPoint->hashedName32) { + MA_ASSERT(pInsertPoint->pChildLo == NULL); + pInsertPoint->pChildLo = pDataBufferNode; + } else { + MA_ASSERT(pInsertPoint->pChildHi == NULL); + pInsertPoint->pChildHi = pDataBufferNode; + } + } + + pDataBufferNode->pParent = pInsertPoint; + + return MA_SUCCESS; +} + +#if 0 /* Unused for now. */ +static ma_result ma_resource_manager_data_buffer_node_insert(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + ma_result result; + ma_resource_manager_data_buffer_node* pInsertPoint; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + + result = ma_resource_manager_data_buffer_node_insert_point(pResourceManager, pDataBufferNode->hashedName32, &pInsertPoint); + if (result != MA_SUCCESS) { + return MA_INVALID_ARGS; + } + + return ma_resource_manager_data_buffer_node_insert_at(pResourceManager, pDataBufferNode, pInsertPoint); +} +#endif + +static MA_INLINE ma_resource_manager_data_buffer_node* ma_resource_manager_data_buffer_node_find_min(ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + ma_resource_manager_data_buffer_node* pCurrentNode; + + MA_ASSERT(pDataBufferNode != NULL); + + pCurrentNode = pDataBufferNode; + while (pCurrentNode->pChildLo != NULL) { + pCurrentNode = pCurrentNode->pChildLo; + } + + return pCurrentNode; +} + +static MA_INLINE ma_resource_manager_data_buffer_node* ma_resource_manager_data_buffer_node_find_max(ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + ma_resource_manager_data_buffer_node* pCurrentNode; + + MA_ASSERT(pDataBufferNode != NULL); + + pCurrentNode = pDataBufferNode; + while (pCurrentNode->pChildHi != NULL) { + pCurrentNode = pCurrentNode->pChildHi; + } + + return pCurrentNode; +} + +static MA_INLINE ma_resource_manager_data_buffer_node* ma_resource_manager_data_buffer_node_find_inorder_successor(ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + MA_ASSERT(pDataBufferNode != NULL); + MA_ASSERT(pDataBufferNode->pChildHi != NULL); + + return ma_resource_manager_data_buffer_node_find_min(pDataBufferNode->pChildHi); +} + +static MA_INLINE ma_resource_manager_data_buffer_node* ma_resource_manager_data_buffer_node_find_inorder_predecessor(ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + MA_ASSERT(pDataBufferNode != NULL); + MA_ASSERT(pDataBufferNode->pChildLo != NULL); + + return ma_resource_manager_data_buffer_node_find_max(pDataBufferNode->pChildLo); +} + +static ma_result ma_resource_manager_data_buffer_node_remove(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + + if (pDataBufferNode->pChildLo == NULL) { + if (pDataBufferNode->pChildHi == NULL) { + /* Simple case - deleting a buffer with no children. */ + if (pDataBufferNode->pParent == NULL) { + MA_ASSERT(pResourceManager->pRootDataBufferNode == pDataBufferNode); /* There is only a single buffer in the tree which should be equal to the root node. */ + pResourceManager->pRootDataBufferNode = NULL; + } else { + if (pDataBufferNode->pParent->pChildLo == pDataBufferNode) { + pDataBufferNode->pParent->pChildLo = NULL; + } else { + pDataBufferNode->pParent->pChildHi = NULL; + } + } + } else { + /* Node has one child - pChildHi != NULL. */ + pDataBufferNode->pChildHi->pParent = pDataBufferNode->pParent; + + if (pDataBufferNode->pParent == NULL) { + MA_ASSERT(pResourceManager->pRootDataBufferNode == pDataBufferNode); + pResourceManager->pRootDataBufferNode = pDataBufferNode->pChildHi; + } else { + if (pDataBufferNode->pParent->pChildLo == pDataBufferNode) { + pDataBufferNode->pParent->pChildLo = pDataBufferNode->pChildHi; + } else { + pDataBufferNode->pParent->pChildHi = pDataBufferNode->pChildHi; + } + } + } + } else { + if (pDataBufferNode->pChildHi == NULL) { + /* Node has one child - pChildLo != NULL. */ + pDataBufferNode->pChildLo->pParent = pDataBufferNode->pParent; + + if (pDataBufferNode->pParent == NULL) { + MA_ASSERT(pResourceManager->pRootDataBufferNode == pDataBufferNode); + pResourceManager->pRootDataBufferNode = pDataBufferNode->pChildLo; + } else { + if (pDataBufferNode->pParent->pChildLo == pDataBufferNode) { + pDataBufferNode->pParent->pChildLo = pDataBufferNode->pChildLo; + } else { + pDataBufferNode->pParent->pChildHi = pDataBufferNode->pChildLo; + } + } + } else { + /* Complex case - deleting a node with two children. */ + ma_resource_manager_data_buffer_node* pReplacementDataBufferNode; + + /* For now we are just going to use the in-order successor as the replacement, but we may want to try to keep this balanced by switching between the two. */ + pReplacementDataBufferNode = ma_resource_manager_data_buffer_node_find_inorder_successor(pDataBufferNode); + MA_ASSERT(pReplacementDataBufferNode != NULL); + + /* + Now that we have our replacement node we can make the change. The simple way to do this would be to just exchange the values, and then remove the replacement + node, however we track specific nodes via pointers which means we can't just swap out the values. We need to instead just change the pointers around. The + replacement node should have at most 1 child. Therefore, we can detach it in terms of our simpler cases above. What we're essentially doing is detaching the + replacement node and reinserting it into the same position as the deleted node. + */ + MA_ASSERT(pReplacementDataBufferNode->pParent != NULL); /* The replacement node should never be the root which means it should always have a parent. */ + MA_ASSERT(pReplacementDataBufferNode->pChildLo == NULL); /* Because we used in-order successor. This would be pChildHi == NULL if we used in-order predecessor. */ + + if (pReplacementDataBufferNode->pChildHi == NULL) { + if (pReplacementDataBufferNode->pParent->pChildLo == pReplacementDataBufferNode) { + pReplacementDataBufferNode->pParent->pChildLo = NULL; + } else { + pReplacementDataBufferNode->pParent->pChildHi = NULL; + } + } else { + pReplacementDataBufferNode->pChildHi->pParent = pReplacementDataBufferNode->pParent; + if (pReplacementDataBufferNode->pParent->pChildLo == pReplacementDataBufferNode) { + pReplacementDataBufferNode->pParent->pChildLo = pReplacementDataBufferNode->pChildHi; + } else { + pReplacementDataBufferNode->pParent->pChildHi = pReplacementDataBufferNode->pChildHi; + } + } + + + /* The replacement node has essentially been detached from the binary tree, so now we need to replace the old data buffer with it. The first thing to update is the parent */ + if (pDataBufferNode->pParent != NULL) { + if (pDataBufferNode->pParent->pChildLo == pDataBufferNode) { + pDataBufferNode->pParent->pChildLo = pReplacementDataBufferNode; + } else { + pDataBufferNode->pParent->pChildHi = pReplacementDataBufferNode; + } + } + + /* Now need to update the replacement node's pointers. */ + pReplacementDataBufferNode->pParent = pDataBufferNode->pParent; + pReplacementDataBufferNode->pChildLo = pDataBufferNode->pChildLo; + pReplacementDataBufferNode->pChildHi = pDataBufferNode->pChildHi; + + /* Now the children of the replacement node need to have their parent pointers updated. */ + if (pReplacementDataBufferNode->pChildLo != NULL) { + pReplacementDataBufferNode->pChildLo->pParent = pReplacementDataBufferNode; + } + if (pReplacementDataBufferNode->pChildHi != NULL) { + pReplacementDataBufferNode->pChildHi->pParent = pReplacementDataBufferNode; + } + + /* Now the root node needs to be updated. */ + if (pResourceManager->pRootDataBufferNode == pDataBufferNode) { + pResourceManager->pRootDataBufferNode = pReplacementDataBufferNode; + } + } + } + + return MA_SUCCESS; +} + +#if 0 /* Unused for now. */ +static ma_result ma_resource_manager_data_buffer_node_remove_by_key(ma_resource_manager* pResourceManager, ma_uint32 hashedName32) +{ + ma_result result; + ma_resource_manager_data_buffer_node* pDataBufferNode; + + result = ma_resource_manager_data_buffer_search(pResourceManager, hashedName32, &pDataBufferNode); + if (result != MA_SUCCESS) { + return result; /* Could not find the data buffer. */ + } + + return ma_resource_manager_data_buffer_remove(pResourceManager, pDataBufferNode); +} +#endif + +static ma_resource_manager_data_supply_type ma_resource_manager_data_buffer_node_get_data_supply_type(ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + return (ma_resource_manager_data_supply_type)ma_atomic_load_i32(&pDataBufferNode->data.type); +} + +static void ma_resource_manager_data_buffer_node_set_data_supply_type(ma_resource_manager_data_buffer_node* pDataBufferNode, ma_resource_manager_data_supply_type supplyType) +{ + ma_atomic_exchange_i32(&pDataBufferNode->data.type, supplyType); +} + +static ma_result ma_resource_manager_data_buffer_node_increment_ref(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode, ma_uint32* pNewRefCount) +{ + ma_uint32 refCount; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + + (void)pResourceManager; + + refCount = ma_atomic_fetch_add_32(&pDataBufferNode->refCount, 1) + 1; + + if (pNewRefCount != NULL) { + *pNewRefCount = refCount; + } + + return MA_SUCCESS; +} + +static ma_result ma_resource_manager_data_buffer_node_decrement_ref(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode, ma_uint32* pNewRefCount) +{ + ma_uint32 refCount; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + + (void)pResourceManager; + + refCount = ma_atomic_fetch_sub_32(&pDataBufferNode->refCount, 1) - 1; + + if (pNewRefCount != NULL) { + *pNewRefCount = refCount; + } + + return MA_SUCCESS; +} + +static void ma_resource_manager_data_buffer_node_free(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + + if (pDataBufferNode->isDataOwnedByResourceManager) { + if (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBufferNode) == ma_resource_manager_data_supply_type_encoded) { + ma_free((void*)pDataBufferNode->data.backend.encoded.pData, &pResourceManager->config.allocationCallbacks); + pDataBufferNode->data.backend.encoded.pData = NULL; + pDataBufferNode->data.backend.encoded.sizeInBytes = 0; + } else if (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBufferNode) == ma_resource_manager_data_supply_type_decoded) { + ma_free((void*)pDataBufferNode->data.backend.decoded.pData, &pResourceManager->config.allocationCallbacks); + pDataBufferNode->data.backend.decoded.pData = NULL; + pDataBufferNode->data.backend.decoded.totalFrameCount = 0; + } else if (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBufferNode) == ma_resource_manager_data_supply_type_decoded_paged) { + ma_paged_audio_buffer_data_uninit(&pDataBufferNode->data.backend.decodedPaged.data, &pResourceManager->config.allocationCallbacks); + } else { + /* Should never hit this if the node was successfully initialized. */ + MA_ASSERT(pDataBufferNode->result != MA_SUCCESS); + } + } + + /* The data buffer itself needs to be freed. */ + ma_free(pDataBufferNode, &pResourceManager->config.allocationCallbacks); +} + +static ma_result ma_resource_manager_data_buffer_node_result(const ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + MA_ASSERT(pDataBufferNode != NULL); + + return (ma_result)ma_atomic_load_i32((ma_result*)&pDataBufferNode->result); /* Need a naughty const-cast here. */ +} + + +static ma_bool32 ma_resource_manager_is_threading_enabled(const ma_resource_manager* pResourceManager) +{ + MA_ASSERT(pResourceManager != NULL); + + return (pResourceManager->config.flags & MA_RESOURCE_MANAGER_FLAG_NO_THREADING) == 0; +} + + +typedef struct +{ + union + { + ma_async_notification_event e; + ma_async_notification_poll p; + } backend; /* Must be the first member. */ + ma_resource_manager* pResourceManager; +} ma_resource_manager_inline_notification; + +static ma_result ma_resource_manager_inline_notification_init(ma_resource_manager* pResourceManager, ma_resource_manager_inline_notification* pNotification) +{ + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pNotification != NULL); + + pNotification->pResourceManager = pResourceManager; + + if (ma_resource_manager_is_threading_enabled(pResourceManager)) { + return ma_async_notification_event_init(&pNotification->backend.e); + } else { + return ma_async_notification_poll_init(&pNotification->backend.p); + } +} + +static void ma_resource_manager_inline_notification_uninit(ma_resource_manager_inline_notification* pNotification) +{ + MA_ASSERT(pNotification != NULL); + + if (ma_resource_manager_is_threading_enabled(pNotification->pResourceManager)) { + ma_async_notification_event_uninit(&pNotification->backend.e); + } else { + /* No need to uninitialize a polling notification. */ + } +} + +static void ma_resource_manager_inline_notification_wait(ma_resource_manager_inline_notification* pNotification) +{ + MA_ASSERT(pNotification != NULL); + + if (ma_resource_manager_is_threading_enabled(pNotification->pResourceManager)) { + ma_async_notification_event_wait(&pNotification->backend.e); + } else { + while (ma_async_notification_poll_is_signalled(&pNotification->backend.p) == MA_FALSE) { + ma_result result = ma_resource_manager_process_next_job(pNotification->pResourceManager); + if (result == MA_NO_DATA_AVAILABLE || result == MA_CANCELLED) { + break; + } + } + } +} + +static void ma_resource_manager_inline_notification_wait_and_uninit(ma_resource_manager_inline_notification* pNotification) +{ + ma_resource_manager_inline_notification_wait(pNotification); + ma_resource_manager_inline_notification_uninit(pNotification); +} + + +static void ma_resource_manager_data_buffer_bst_lock(ma_resource_manager* pResourceManager) +{ + MA_ASSERT(pResourceManager != NULL); + + if (ma_resource_manager_is_threading_enabled(pResourceManager)) { + #ifndef MA_NO_THREADING + { + ma_mutex_lock(&pResourceManager->dataBufferBSTLock); + } + #else + { + MA_ASSERT(MA_FALSE); /* Should never hit this. */ + } + #endif + } else { + /* Threading not enabled. Do nothing. */ + } +} + +static void ma_resource_manager_data_buffer_bst_unlock(ma_resource_manager* pResourceManager) +{ + MA_ASSERT(pResourceManager != NULL); + + if (ma_resource_manager_is_threading_enabled(pResourceManager)) { + #ifndef MA_NO_THREADING + { + ma_mutex_unlock(&pResourceManager->dataBufferBSTLock); + } + #else + { + MA_ASSERT(MA_FALSE); /* Should never hit this. */ + } + #endif + } else { + /* Threading not enabled. Do nothing. */ + } +} + +#ifndef MA_NO_THREADING +static ma_thread_result MA_THREADCALL ma_resource_manager_job_thread(void* pUserData) +{ + ma_resource_manager* pResourceManager = (ma_resource_manager*)pUserData; + MA_ASSERT(pResourceManager != NULL); + + for (;;) { + ma_result result; + ma_job job; + + result = ma_resource_manager_next_job(pResourceManager, &job); + if (result != MA_SUCCESS) { + break; + } + + /* Terminate if we got a quit message. */ + if (job.toc.breakup.code == MA_JOB_TYPE_QUIT) { + break; + } + + ma_job_process(&job); + } + + return (ma_thread_result)0; +} +#endif + +MA_API ma_resource_manager_config ma_resource_manager_config_init(void) +{ + ma_resource_manager_config config; + + MA_ZERO_OBJECT(&config); + config.decodedFormat = ma_format_unknown; + config.decodedChannels = 0; + config.decodedSampleRate = 0; + config.jobThreadCount = 1; /* A single miniaudio-managed job thread by default. */ + config.jobQueueCapacity = MA_JOB_TYPE_RESOURCE_MANAGER_QUEUE_CAPACITY; + + /* Flags. */ + config.flags = 0; + #ifdef MA_NO_THREADING + { + /* Threading is disabled at compile time so disable threading at runtime as well by default. */ + config.flags |= MA_RESOURCE_MANAGER_FLAG_NO_THREADING; + config.jobThreadCount = 0; + } + #endif + + return config; +} + + +MA_API ma_result ma_resource_manager_init(const ma_resource_manager_config* pConfig, ma_resource_manager* pResourceManager) +{ + ma_result result; + ma_job_queue_config jobQueueConfig; + + if (pResourceManager == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pResourceManager); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + #ifndef MA_NO_THREADING + { + if (pConfig->jobThreadCount > ma_countof(pResourceManager->jobThreads)) { + return MA_INVALID_ARGS; /* Requesting too many job threads. */ + } + } + #endif + + pResourceManager->config = *pConfig; + ma_allocation_callbacks_init_copy(&pResourceManager->config.allocationCallbacks, &pConfig->allocationCallbacks); + + /* Get the log set up early so we can start using it as soon as possible. */ + if (pResourceManager->config.pLog == NULL) { + result = ma_log_init(&pResourceManager->config.allocationCallbacks, &pResourceManager->log); + if (result == MA_SUCCESS) { + pResourceManager->config.pLog = &pResourceManager->log; + } else { + pResourceManager->config.pLog = NULL; /* Logging is unavailable. */ + } + } + + if (pResourceManager->config.pVFS == NULL) { + result = ma_default_vfs_init(&pResourceManager->defaultVFS, &pResourceManager->config.allocationCallbacks); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the default file system. */ + } + + pResourceManager->config.pVFS = &pResourceManager->defaultVFS; + } + + /* If threading has been disabled at compile time, enforce it at run time as well. */ + #ifdef MA_NO_THREADING + { + pResourceManager->config.flags |= MA_RESOURCE_MANAGER_FLAG_NO_THREADING; + } + #endif + + /* We need to force MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING if MA_RESOURCE_MANAGER_FLAG_NO_THREADING is set. */ + if ((pResourceManager->config.flags & MA_RESOURCE_MANAGER_FLAG_NO_THREADING) != 0) { + pResourceManager->config.flags |= MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING; + + /* We cannot allow job threads when MA_RESOURCE_MANAGER_FLAG_NO_THREADING has been set. This is an invalid use case. */ + if (pResourceManager->config.jobThreadCount > 0) { + return MA_INVALID_ARGS; + } + } + + /* Job queue. */ + jobQueueConfig.capacity = pResourceManager->config.jobQueueCapacity; + jobQueueConfig.flags = 0; + if ((pResourceManager->config.flags & MA_RESOURCE_MANAGER_FLAG_NON_BLOCKING) != 0) { + if (pResourceManager->config.jobThreadCount > 0) { + return MA_INVALID_ARGS; /* Non-blocking mode is only valid for self-managed job threads. */ + } + + jobQueueConfig.flags |= MA_JOB_QUEUE_FLAG_NON_BLOCKING; + } + + result = ma_job_queue_init(&jobQueueConfig, &pResourceManager->config.allocationCallbacks, &pResourceManager->jobQueue); + if (result != MA_SUCCESS) { + return result; + } + + + /* Custom decoding backends. */ + if (pConfig->ppCustomDecodingBackendVTables != NULL && pConfig->customDecodingBackendCount > 0) { + size_t sizeInBytes = sizeof(*pResourceManager->config.ppCustomDecodingBackendVTables) * pConfig->customDecodingBackendCount; + ma_decoding_backend_vtable** ppCustomDecodingBackendVTables; + + ppCustomDecodingBackendVTables = (ma_decoding_backend_vtable**)ma_malloc(sizeInBytes, &pResourceManager->config.allocationCallbacks); + if (pResourceManager->config.ppCustomDecodingBackendVTables == NULL) { + ma_job_queue_uninit(&pResourceManager->jobQueue, &pResourceManager->config.allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + MA_COPY_MEMORY(ppCustomDecodingBackendVTables, pConfig->ppCustomDecodingBackendVTables, sizeInBytes); + + pResourceManager->config.ppCustomDecodingBackendVTables = ppCustomDecodingBackendVTables; + pResourceManager->config.customDecodingBackendCount = pConfig->customDecodingBackendCount; + pResourceManager->config.pCustomDecodingBackendUserData = pConfig->pCustomDecodingBackendUserData; + } + + + + /* Here is where we initialize our threading stuff. We don't do this if we don't support threading. */ + if (ma_resource_manager_is_threading_enabled(pResourceManager)) { + #ifndef MA_NO_THREADING + { + ma_uint32 iJobThread; + + /* Data buffer lock. */ + result = ma_mutex_init(&pResourceManager->dataBufferBSTLock); + if (result != MA_SUCCESS) { + ma_job_queue_uninit(&pResourceManager->jobQueue, &pResourceManager->config.allocationCallbacks); + return result; + } + + /* Create the job threads last to ensure the threads has access to valid data. */ + for (iJobThread = 0; iJobThread < pResourceManager->config.jobThreadCount; iJobThread += 1) { + result = ma_thread_create(&pResourceManager->jobThreads[iJobThread], ma_thread_priority_normal, pResourceManager->config.jobThreadStackSize, ma_resource_manager_job_thread, pResourceManager, &pResourceManager->config.allocationCallbacks); + if (result != MA_SUCCESS) { + ma_mutex_uninit(&pResourceManager->dataBufferBSTLock); + ma_job_queue_uninit(&pResourceManager->jobQueue, &pResourceManager->config.allocationCallbacks); + return result; + } + } + } + #else + { + /* Threading is disabled at compile time. We should never get here because validation checks should have already been performed. */ + MA_ASSERT(MA_FALSE); + } + #endif + } + + return MA_SUCCESS; +} + + +static void ma_resource_manager_delete_all_data_buffer_nodes(ma_resource_manager* pResourceManager) +{ + MA_ASSERT(pResourceManager); + + /* If everything was done properly, there shouldn't be any active data buffers. */ + while (pResourceManager->pRootDataBufferNode != NULL) { + ma_resource_manager_data_buffer_node* pDataBufferNode = pResourceManager->pRootDataBufferNode; + ma_resource_manager_data_buffer_node_remove(pResourceManager, pDataBufferNode); + + /* The data buffer has been removed from the BST, so now we need to free its data. */ + ma_resource_manager_data_buffer_node_free(pResourceManager, pDataBufferNode); + } +} + +MA_API void ma_resource_manager_uninit(ma_resource_manager* pResourceManager) +{ + if (pResourceManager == NULL) { + return; + } + + /* + Job threads need to be killed first. To do this we need to post a quit message to the message queue and then wait for the thread. The quit message will never be removed from the + queue which means it will never not be returned after being encountered for the first time which means all threads will eventually receive it. + */ + ma_resource_manager_post_job_quit(pResourceManager); + + /* Wait for every job to finish before continuing to ensure nothing is sill trying to access any of our objects below. */ + if (ma_resource_manager_is_threading_enabled(pResourceManager)) { + #ifndef MA_NO_THREADING + { + ma_uint32 iJobThread; + + for (iJobThread = 0; iJobThread < pResourceManager->config.jobThreadCount; iJobThread += 1) { + ma_thread_wait(&pResourceManager->jobThreads[iJobThread]); + } + } + #else + { + MA_ASSERT(MA_FALSE); /* Should never hit this. */ + } + #endif + } + + /* At this point the thread should have returned and no other thread should be accessing our data. We can now delete all data buffers. */ + ma_resource_manager_delete_all_data_buffer_nodes(pResourceManager); + + /* The job queue is no longer needed. */ + ma_job_queue_uninit(&pResourceManager->jobQueue, &pResourceManager->config.allocationCallbacks); + + /* We're no longer doing anything with data buffers so the lock can now be uninitialized. */ + if (ma_resource_manager_is_threading_enabled(pResourceManager)) { + #ifndef MA_NO_THREADING + { + ma_mutex_uninit(&pResourceManager->dataBufferBSTLock); + } + #else + { + MA_ASSERT(MA_FALSE); /* Should never hit this. */ + } + #endif + } + + ma_free((ma_decoding_backend_vtable**)pResourceManager->config.ppCustomDecodingBackendVTables, &pResourceManager->config.allocationCallbacks); /* <-- Naughty const-cast, but this is safe. */ + + if (pResourceManager->config.pLog == &pResourceManager->log) { + ma_log_uninit(&pResourceManager->log); + } +} + +MA_API ma_log* ma_resource_manager_get_log(ma_resource_manager* pResourceManager) +{ + if (pResourceManager == NULL) { + return NULL; + } + + return pResourceManager->config.pLog; +} + + + +MA_API ma_resource_manager_data_source_config ma_resource_manager_data_source_config_init(void) +{ + ma_resource_manager_data_source_config config; + + MA_ZERO_OBJECT(&config); + config.rangeBegInPCMFrames = MA_DATA_SOURCE_DEFAULT_RANGE_BEG; + config.rangeEndInPCMFrames = MA_DATA_SOURCE_DEFAULT_RANGE_END; + config.loopPointBegInPCMFrames = MA_DATA_SOURCE_DEFAULT_LOOP_POINT_BEG; + config.loopPointEndInPCMFrames = MA_DATA_SOURCE_DEFAULT_LOOP_POINT_END; + config.isLooping = MA_FALSE; + + return config; +} + + +static ma_decoder_config ma_resource_manager__init_decoder_config(ma_resource_manager* pResourceManager) +{ + ma_decoder_config config; + + config = ma_decoder_config_init(pResourceManager->config.decodedFormat, pResourceManager->config.decodedChannels, pResourceManager->config.decodedSampleRate); + config.allocationCallbacks = pResourceManager->config.allocationCallbacks; + config.ppCustomBackendVTables = pResourceManager->config.ppCustomDecodingBackendVTables; + config.customBackendCount = pResourceManager->config.customDecodingBackendCount; + config.pCustomBackendUserData = pResourceManager->config.pCustomDecodingBackendUserData; + + return config; +} + +static ma_result ma_resource_manager__init_decoder(ma_resource_manager* pResourceManager, const char* pFilePath, const wchar_t* pFilePathW, ma_decoder* pDecoder) +{ + ma_result result; + ma_decoder_config config; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pFilePath != NULL || pFilePathW != NULL); + MA_ASSERT(pDecoder != NULL); + + config = ma_resource_manager__init_decoder_config(pResourceManager); + + if (pFilePath != NULL) { + result = ma_decoder_init_vfs(pResourceManager->config.pVFS, pFilePath, &config, pDecoder); + if (result != MA_SUCCESS) { + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_WARNING, "Failed to load file \"%s\". %s.\n", pFilePath, ma_result_description(result)); + return result; + } + } else { + result = ma_decoder_init_vfs_w(pResourceManager->config.pVFS, pFilePathW, &config, pDecoder); + if (result != MA_SUCCESS) { + #if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(_MSC_VER) + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_WARNING, "Failed to load file \"%ls\". %s.\n", pFilePathW, ma_result_description(result)); + #endif + return result; + } + } + + return MA_SUCCESS; +} + +static ma_bool32 ma_resource_manager_data_buffer_has_connector(ma_resource_manager_data_buffer* pDataBuffer) +{ + return ma_atomic_bool32_get(&pDataBuffer->isConnectorInitialized); +} + +static ma_data_source* ma_resource_manager_data_buffer_get_connector(ma_resource_manager_data_buffer* pDataBuffer) +{ + if (ma_resource_manager_data_buffer_has_connector(pDataBuffer) == MA_FALSE) { + return NULL; /* Connector not yet initialized. */ + } + + switch (pDataBuffer->pNode->data.type) + { + case ma_resource_manager_data_supply_type_encoded: return &pDataBuffer->connector.decoder; + case ma_resource_manager_data_supply_type_decoded: return &pDataBuffer->connector.buffer; + case ma_resource_manager_data_supply_type_decoded_paged: return &pDataBuffer->connector.pagedBuffer; + + case ma_resource_manager_data_supply_type_unknown: + default: + { + ma_log_postf(ma_resource_manager_get_log(pDataBuffer->pResourceManager), MA_LOG_LEVEL_ERROR, "Failed to retrieve data buffer connector. Unknown data supply type.\n"); + return NULL; + }; + }; +} + +static ma_result ma_resource_manager_data_buffer_init_connector(ma_resource_manager_data_buffer* pDataBuffer, const ma_resource_manager_data_source_config* pConfig, ma_async_notification* pInitNotification, ma_fence* pInitFence) +{ + ma_result result; + + MA_ASSERT(pDataBuffer != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(ma_resource_manager_data_buffer_has_connector(pDataBuffer) == MA_FALSE); + + /* The underlying data buffer must be initialized before we'll be able to know how to initialize the backend. */ + result = ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode); + if (result != MA_SUCCESS && result != MA_BUSY) { + return result; /* The data buffer is in an erroneous state. */ + } + + /* + We need to initialize either a ma_decoder or an ma_audio_buffer depending on whether or not the backing data is encoded or decoded. These act as the + "instance" to the data and are used to form the connection between underlying data buffer and the data source. If the data buffer is decoded, we can use + an ma_audio_buffer. This enables us to use memory mapping when mixing which saves us a bit of data movement overhead. + */ + switch (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode)) + { + case ma_resource_manager_data_supply_type_encoded: /* Connector is a decoder. */ + { + ma_decoder_config config; + config = ma_resource_manager__init_decoder_config(pDataBuffer->pResourceManager); + result = ma_decoder_init_memory(pDataBuffer->pNode->data.backend.encoded.pData, pDataBuffer->pNode->data.backend.encoded.sizeInBytes, &config, &pDataBuffer->connector.decoder); + } break; + + case ma_resource_manager_data_supply_type_decoded: /* Connector is an audio buffer. */ + { + ma_audio_buffer_config config; + config = ma_audio_buffer_config_init(pDataBuffer->pNode->data.backend.decoded.format, pDataBuffer->pNode->data.backend.decoded.channels, pDataBuffer->pNode->data.backend.decoded.totalFrameCount, pDataBuffer->pNode->data.backend.decoded.pData, NULL); + result = ma_audio_buffer_init(&config, &pDataBuffer->connector.buffer); + } break; + + case ma_resource_manager_data_supply_type_decoded_paged: /* Connector is a paged audio buffer. */ + { + ma_paged_audio_buffer_config config; + config = ma_paged_audio_buffer_config_init(&pDataBuffer->pNode->data.backend.decodedPaged.data); + result = ma_paged_audio_buffer_init(&config, &pDataBuffer->connector.pagedBuffer); + } break; + + case ma_resource_manager_data_supply_type_unknown: + default: + { + /* Unknown data supply type. Should never happen. Need to post an error here. */ + return MA_INVALID_ARGS; + }; + } + + /* + Initialization of the connector is when we can fire the init notification. This will give the application access to + the format/channels/rate of the data source. + */ + if (result == MA_SUCCESS) { + /* + The resource manager supports the ability to set the range and loop settings via a config at + initialization time. This results in an case where the ranges could be set explicitly via + ma_data_source_set_*() before we get to this point here. If this happens, we'll end up + hitting a case where we just override those settings which results in what feels like a bug. + + To address this we only change the relevant properties if they're not equal to defaults. If + they're equal to defaults there's no need to change them anyway. If they're *not* set to the + default values, we can assume the user has set the range and loop settings via the config. If + they're doing their own calls to ma_data_source_set_*() in addition to setting them via the + config, that's entirely on the caller and any synchronization issue becomes their problem. + */ + if (pConfig->rangeBegInPCMFrames != MA_DATA_SOURCE_DEFAULT_RANGE_BEG || pConfig->rangeEndInPCMFrames != MA_DATA_SOURCE_DEFAULT_RANGE_END) { + ma_data_source_set_range_in_pcm_frames(pDataBuffer, pConfig->rangeBegInPCMFrames, pConfig->rangeEndInPCMFrames); + } + + if (pConfig->loopPointBegInPCMFrames != MA_DATA_SOURCE_DEFAULT_LOOP_POINT_BEG || pConfig->loopPointEndInPCMFrames != MA_DATA_SOURCE_DEFAULT_LOOP_POINT_END) { + ma_data_source_set_loop_point_in_pcm_frames(pDataBuffer, pConfig->loopPointBegInPCMFrames, pConfig->loopPointEndInPCMFrames); + } + + if (pConfig->isLooping != MA_FALSE) { + ma_data_source_set_looping(pDataBuffer, pConfig->isLooping); + } + + ma_atomic_bool32_set(&pDataBuffer->isConnectorInitialized, MA_TRUE); + + if (pInitNotification != NULL) { + ma_async_notification_signal(pInitNotification); + } + + if (pInitFence != NULL) { + ma_fence_release(pInitFence); + } + } + + /* At this point the backend should be initialized. We do *not* want to set pDataSource->result here - that needs to be done at a higher level to ensure it's done as the last step. */ + return result; +} + +static ma_result ma_resource_manager_data_buffer_uninit_connector(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer* pDataBuffer) +{ + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBuffer != NULL); + + (void)pResourceManager; + + switch (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode)) + { + case ma_resource_manager_data_supply_type_encoded: /* Connector is a decoder. */ + { + ma_decoder_uninit(&pDataBuffer->connector.decoder); + } break; + + case ma_resource_manager_data_supply_type_decoded: /* Connector is an audio buffer. */ + { + ma_audio_buffer_uninit(&pDataBuffer->connector.buffer); + } break; + + case ma_resource_manager_data_supply_type_decoded_paged: /* Connector is a paged audio buffer. */ + { + ma_paged_audio_buffer_uninit(&pDataBuffer->connector.pagedBuffer); + } break; + + case ma_resource_manager_data_supply_type_unknown: + default: + { + /* Unknown data supply type. Should never happen. Need to post an error here. */ + return MA_INVALID_ARGS; + }; + } + + return MA_SUCCESS; +} + +static ma_uint32 ma_resource_manager_data_buffer_node_next_execution_order(ma_resource_manager_data_buffer_node* pDataBufferNode) +{ + MA_ASSERT(pDataBufferNode != NULL); + return ma_atomic_fetch_add_32(&pDataBufferNode->executionCounter, 1); +} + +static ma_result ma_resource_manager_data_buffer_node_init_supply_encoded(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode, const char* pFilePath, const wchar_t* pFilePathW) +{ + ma_result result; + size_t dataSizeInBytes; + void* pData; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + MA_ASSERT(pFilePath != NULL || pFilePathW != NULL); + + result = ma_vfs_open_and_read_file_ex(pResourceManager->config.pVFS, pFilePath, pFilePathW, &pData, &dataSizeInBytes, &pResourceManager->config.allocationCallbacks); + if (result != MA_SUCCESS) { + if (pFilePath != NULL) { + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_WARNING, "Failed to load file \"%s\". %s.\n", pFilePath, ma_result_description(result)); + } else { + #if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(_MSC_VER) + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_WARNING, "Failed to load file \"%ls\". %s.\n", pFilePathW, ma_result_description(result)); + #endif + } + + return result; + } + + pDataBufferNode->data.backend.encoded.pData = pData; + pDataBufferNode->data.backend.encoded.sizeInBytes = dataSizeInBytes; + ma_resource_manager_data_buffer_node_set_data_supply_type(pDataBufferNode, ma_resource_manager_data_supply_type_encoded); /* <-- Must be set last. */ + + return MA_SUCCESS; +} + +static ma_result ma_resource_manager_data_buffer_node_init_supply_decoded(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode, const char* pFilePath, const wchar_t* pFilePathW, ma_uint32 flags, ma_decoder** ppDecoder) +{ + ma_result result = MA_SUCCESS; + ma_decoder* pDecoder; + ma_uint64 totalFrameCount; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + MA_ASSERT(ppDecoder != NULL); + MA_ASSERT(pFilePath != NULL || pFilePathW != NULL); + + *ppDecoder = NULL; /* For safety. */ + + pDecoder = (ma_decoder*)ma_malloc(sizeof(*pDecoder), &pResourceManager->config.allocationCallbacks); + if (pDecoder == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_resource_manager__init_decoder(pResourceManager, pFilePath, pFilePathW, pDecoder); + if (result != MA_SUCCESS) { + ma_free(pDecoder, &pResourceManager->config.allocationCallbacks); + return result; + } + + /* + At this point we have the decoder and we now need to initialize the data supply. This will + be either a decoded buffer, or a decoded paged buffer. A regular buffer is just one big heap + allocated buffer, whereas a paged buffer is a linked list of paged-sized buffers. The latter + is used when the length of a sound is unknown until a full decode has been performed. + */ + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_UNKNOWN_LENGTH) == 0) { + result = ma_decoder_get_length_in_pcm_frames(pDecoder, &totalFrameCount); + if (result != MA_SUCCESS) { + return result; + } + } else { + totalFrameCount = 0; + } + + if (totalFrameCount > 0) { + /* It's a known length. The data supply is a regular decoded buffer. */ + ma_uint64 dataSizeInBytes; + void* pData; + + dataSizeInBytes = totalFrameCount * ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels); + if (dataSizeInBytes > MA_SIZE_MAX) { + ma_decoder_uninit(pDecoder); + ma_free(pDecoder, &pResourceManager->config.allocationCallbacks); + return MA_TOO_BIG; + } + + pData = ma_malloc((size_t)dataSizeInBytes, &pResourceManager->config.allocationCallbacks); + if (pData == NULL) { + ma_decoder_uninit(pDecoder); + ma_free(pDecoder, &pResourceManager->config.allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + /* The buffer needs to be initialized to silence in case the caller reads from it. */ + ma_silence_pcm_frames(pData, totalFrameCount, pDecoder->outputFormat, pDecoder->outputChannels); + + /* Data has been allocated and the data supply can now be initialized. */ + pDataBufferNode->data.backend.decoded.pData = pData; + pDataBufferNode->data.backend.decoded.totalFrameCount = totalFrameCount; + pDataBufferNode->data.backend.decoded.format = pDecoder->outputFormat; + pDataBufferNode->data.backend.decoded.channels = pDecoder->outputChannels; + pDataBufferNode->data.backend.decoded.sampleRate = pDecoder->outputSampleRate; + pDataBufferNode->data.backend.decoded.decodedFrameCount = 0; + ma_resource_manager_data_buffer_node_set_data_supply_type(pDataBufferNode, ma_resource_manager_data_supply_type_decoded); /* <-- Must be set last. */ + } else { + /* + It's an unknown length. The data supply is a paged decoded buffer. Setting this up is + actually easier than the non-paged decoded buffer because we just need to initialize + a ma_paged_audio_buffer object. + */ + result = ma_paged_audio_buffer_data_init(pDecoder->outputFormat, pDecoder->outputChannels, &pDataBufferNode->data.backend.decodedPaged.data); + if (result != MA_SUCCESS) { + ma_decoder_uninit(pDecoder); + ma_free(pDecoder, &pResourceManager->config.allocationCallbacks); + return result; + } + + pDataBufferNode->data.backend.decodedPaged.sampleRate = pDecoder->outputSampleRate; + pDataBufferNode->data.backend.decodedPaged.decodedFrameCount = 0; + ma_resource_manager_data_buffer_node_set_data_supply_type(pDataBufferNode, ma_resource_manager_data_supply_type_decoded_paged); /* <-- Must be set last. */ + } + + *ppDecoder = pDecoder; + + return MA_SUCCESS; +} + +static ma_result ma_resource_manager_data_buffer_node_decode_next_page(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode, ma_decoder* pDecoder) +{ + ma_result result = MA_SUCCESS; + ma_uint64 pageSizeInFrames; + ma_uint64 framesToTryReading; + ma_uint64 framesRead; + + MA_ASSERT(pResourceManager != NULL); + MA_ASSERT(pDataBufferNode != NULL); + MA_ASSERT(pDecoder != NULL); + + /* We need to know the size of a page in frames to know how many frames to decode. */ + pageSizeInFrames = MA_RESOURCE_MANAGER_PAGE_SIZE_IN_MILLISECONDS * (pDecoder->outputSampleRate/1000); + framesToTryReading = pageSizeInFrames; + + /* + Here is where we do the decoding of the next page. We'll run a slightly different path depending + on whether or not we're using a flat or paged buffer because the allocation of the page differs + between the two. For a flat buffer it's an offset to an already-allocated buffer. For a paged + buffer, we need to allocate a new page and attach it to the linked list. + */ + switch (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBufferNode)) + { + case ma_resource_manager_data_supply_type_decoded: + { + /* The destination buffer is an offset to the existing buffer. Don't read more than we originally retrieved when we first initialized the decoder. */ + void* pDst; + ma_uint64 framesRemaining = pDataBufferNode->data.backend.decoded.totalFrameCount - pDataBufferNode->data.backend.decoded.decodedFrameCount; + if (framesToTryReading > framesRemaining) { + framesToTryReading = framesRemaining; + } + + if (framesToTryReading > 0) { + pDst = ma_offset_ptr( + pDataBufferNode->data.backend.decoded.pData, + pDataBufferNode->data.backend.decoded.decodedFrameCount * ma_get_bytes_per_frame(pDataBufferNode->data.backend.decoded.format, pDataBufferNode->data.backend.decoded.channels) + ); + MA_ASSERT(pDst != NULL); + + result = ma_decoder_read_pcm_frames(pDecoder, pDst, framesToTryReading, &framesRead); + if (framesRead > 0) { + pDataBufferNode->data.backend.decoded.decodedFrameCount += framesRead; + } + } else { + framesRead = 0; + } + } break; + + case ma_resource_manager_data_supply_type_decoded_paged: + { + /* The destination buffer is a freshly allocated page. */ + ma_paged_audio_buffer_page* pPage; + + result = ma_paged_audio_buffer_data_allocate_page(&pDataBufferNode->data.backend.decodedPaged.data, framesToTryReading, NULL, &pResourceManager->config.allocationCallbacks, &pPage); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_decoder_read_pcm_frames(pDecoder, pPage->pAudioData, framesToTryReading, &framesRead); + if (result == MA_SUCCESS && framesRead > 0) { + pPage->sizeInFrames = framesRead; + + result = ma_paged_audio_buffer_data_append_page(&pDataBufferNode->data.backend.decodedPaged.data, pPage); + if (result == MA_SUCCESS) { + pDataBufferNode->data.backend.decodedPaged.decodedFrameCount += framesRead; + } else { + /* Failed to append the page. Just abort and set the status to MA_AT_END. */ + ma_paged_audio_buffer_data_free_page(&pDataBufferNode->data.backend.decodedPaged.data, pPage, &pResourceManager->config.allocationCallbacks); + result = MA_AT_END; + } + } else { + /* No frames were read. Free the page and just set the status to MA_AT_END. */ + ma_paged_audio_buffer_data_free_page(&pDataBufferNode->data.backend.decodedPaged.data, pPage, &pResourceManager->config.allocationCallbacks); + result = MA_AT_END; + } + } break; + + case ma_resource_manager_data_supply_type_encoded: + case ma_resource_manager_data_supply_type_unknown: + default: + { + /* Unexpected data supply type. */ + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_ERROR, "Unexpected data supply type (%d) when decoding page.", ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBufferNode)); + return MA_ERROR; + }; + } + + if (result == MA_SUCCESS && framesRead == 0) { + result = MA_AT_END; + } + + return result; +} + +static ma_result ma_resource_manager_data_buffer_node_acquire_critical_section(ma_resource_manager* pResourceManager, const char* pFilePath, const wchar_t* pFilePathW, ma_uint32 hashedName32, ma_uint32 flags, const ma_resource_manager_data_supply* pExistingData, ma_fence* pInitFence, ma_fence* pDoneFence, ma_resource_manager_inline_notification* pInitNotification, ma_resource_manager_data_buffer_node** ppDataBufferNode) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager_data_buffer_node* pDataBufferNode = NULL; + ma_resource_manager_data_buffer_node* pInsertPoint; + + if (ppDataBufferNode != NULL) { + *ppDataBufferNode = NULL; + } + + result = ma_resource_manager_data_buffer_node_insert_point(pResourceManager, hashedName32, &pInsertPoint); + if (result == MA_ALREADY_EXISTS) { + /* The node already exists. We just need to increment the reference count. */ + pDataBufferNode = pInsertPoint; + + result = ma_resource_manager_data_buffer_node_increment_ref(pResourceManager, pDataBufferNode, NULL); + if (result != MA_SUCCESS) { + return result; /* Should never happen. Failed to increment the reference count. */ + } + + result = MA_ALREADY_EXISTS; + goto done; + } else { + /* + The node does not already exist. We need to post a LOAD_DATA_BUFFER_NODE job here. This + needs to be done inside the critical section to ensure an uninitialization of the node + does not occur before initialization on another thread. + */ + pDataBufferNode = (ma_resource_manager_data_buffer_node*)ma_malloc(sizeof(*pDataBufferNode), &pResourceManager->config.allocationCallbacks); + if (pDataBufferNode == NULL) { + return MA_OUT_OF_MEMORY; + } + + MA_ZERO_OBJECT(pDataBufferNode); + pDataBufferNode->hashedName32 = hashedName32; + pDataBufferNode->refCount = 1; /* Always set to 1 by default (this is our first reference). */ + + if (pExistingData == NULL) { + pDataBufferNode->data.type = ma_resource_manager_data_supply_type_unknown; /* <-- We won't know this until we start decoding. */ + pDataBufferNode->result = MA_BUSY; /* Must be set to MA_BUSY before we leave the critical section, so might as well do it now. */ + pDataBufferNode->isDataOwnedByResourceManager = MA_TRUE; + } else { + pDataBufferNode->data = *pExistingData; + pDataBufferNode->result = MA_SUCCESS; /* Not loading asynchronously, so just set the status */ + pDataBufferNode->isDataOwnedByResourceManager = MA_FALSE; + } + + result = ma_resource_manager_data_buffer_node_insert_at(pResourceManager, pDataBufferNode, pInsertPoint); + if (result != MA_SUCCESS) { + ma_free(pDataBufferNode, &pResourceManager->config.allocationCallbacks); + return result; /* Should never happen. Failed to insert the data buffer into the BST. */ + } + + /* + Here is where we'll post the job, but only if we're loading asynchronously. If we're + loading synchronously we'll defer loading to a later stage, outside of the critical + section. + */ + if (pDataBufferNode->isDataOwnedByResourceManager && (flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC) != 0) { + /* Loading asynchronously. Post the job. */ + ma_job job; + char* pFilePathCopy = NULL; + wchar_t* pFilePathWCopy = NULL; + + /* We need a copy of the file path. We should probably make this more efficient, but for now we'll do a transient memory allocation. */ + if (pFilePath != NULL) { + pFilePathCopy = ma_copy_string(pFilePath, &pResourceManager->config.allocationCallbacks); + } else { + pFilePathWCopy = ma_copy_string_w(pFilePathW, &pResourceManager->config.allocationCallbacks); + } + + if (pFilePathCopy == NULL && pFilePathWCopy == NULL) { + ma_resource_manager_data_buffer_node_remove(pResourceManager, pDataBufferNode); + ma_free(pDataBufferNode, &pResourceManager->config.allocationCallbacks); + return MA_OUT_OF_MEMORY; + } + + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + ma_resource_manager_inline_notification_init(pResourceManager, pInitNotification); + } + + /* Acquire init and done fences before posting the job. These will be unacquired by the job thread. */ + if (pInitFence != NULL) { ma_fence_acquire(pInitFence); } + if (pDoneFence != NULL) { ma_fence_acquire(pDoneFence); } + + /* We now have everything we need to post the job to the job thread. */ + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER_NODE); + job.order = ma_resource_manager_data_buffer_node_next_execution_order(pDataBufferNode); + job.data.resourceManager.loadDataBufferNode.pResourceManager = pResourceManager; + job.data.resourceManager.loadDataBufferNode.pDataBufferNode = pDataBufferNode; + job.data.resourceManager.loadDataBufferNode.pFilePath = pFilePathCopy; + job.data.resourceManager.loadDataBufferNode.pFilePathW = pFilePathWCopy; + job.data.resourceManager.loadDataBufferNode.flags = flags; + job.data.resourceManager.loadDataBufferNode.pInitNotification = ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) ? pInitNotification : NULL; + job.data.resourceManager.loadDataBufferNode.pDoneNotification = NULL; + job.data.resourceManager.loadDataBufferNode.pInitFence = pInitFence; + job.data.resourceManager.loadDataBufferNode.pDoneFence = pDoneFence; + + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + result = ma_job_process(&job); + } else { + result = ma_resource_manager_post_job(pResourceManager, &job); + } + + if (result != MA_SUCCESS) { + /* Failed to post job. Probably ran out of memory. */ + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_ERROR, "Failed to post MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER_NODE job. %s.\n", ma_result_description(result)); + + /* + Fences were acquired before posting the job, but since the job was not able to + be posted, we need to make sure we release them so nothing gets stuck waiting. + */ + if (pInitFence != NULL) { ma_fence_release(pInitFence); } + if (pDoneFence != NULL) { ma_fence_release(pDoneFence); } + + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + ma_resource_manager_inline_notification_uninit(pInitNotification); + } else { + /* These will have been freed by the job thread, but with WAIT_INIT they will already have happened since the job has already been handled. */ + ma_free(pFilePathCopy, &pResourceManager->config.allocationCallbacks); + ma_free(pFilePathWCopy, &pResourceManager->config.allocationCallbacks); + } + + ma_resource_manager_data_buffer_node_remove(pResourceManager, pDataBufferNode); + ma_free(pDataBufferNode, &pResourceManager->config.allocationCallbacks); + + return result; + } + } + } + +done: + if (ppDataBufferNode != NULL) { + *ppDataBufferNode = pDataBufferNode; + } + + return result; +} + +static ma_result ma_resource_manager_data_buffer_node_acquire(ma_resource_manager* pResourceManager, const char* pFilePath, const wchar_t* pFilePathW, ma_uint32 hashedName32, ma_uint32 flags, const ma_resource_manager_data_supply* pExistingData, ma_fence* pInitFence, ma_fence* pDoneFence, ma_resource_manager_data_buffer_node** ppDataBufferNode) +{ + ma_result result = MA_SUCCESS; + ma_bool32 nodeAlreadyExists = MA_FALSE; + ma_resource_manager_data_buffer_node* pDataBufferNode = NULL; + ma_resource_manager_inline_notification initNotification; /* Used when the WAIT_INIT flag is set. */ + + if (ppDataBufferNode != NULL) { + *ppDataBufferNode = NULL; /* Safety. */ + } + + if (pResourceManager == NULL || (pFilePath == NULL && pFilePathW == NULL && hashedName32 == 0)) { + return MA_INVALID_ARGS; + } + + /* If we're specifying existing data, it must be valid. */ + if (pExistingData != NULL && pExistingData->type == ma_resource_manager_data_supply_type_unknown) { + return MA_INVALID_ARGS; + } + + /* If we don't support threading, remove the ASYNC flag to make the rest of this a bit simpler. */ + if (ma_resource_manager_is_threading_enabled(pResourceManager) == MA_FALSE) { + flags &= ~MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC; + } + + if (hashedName32 == 0) { + if (pFilePath != NULL) { + hashedName32 = ma_hash_string_32(pFilePath); + } else { + hashedName32 = ma_hash_string_w_32(pFilePathW); + } + } + + /* + Here is where we either increment the node's reference count or allocate a new one and add it + to the BST. When allocating a new node, we need to make sure the LOAD_DATA_BUFFER_NODE job is + posted inside the critical section just in case the caller immediately uninitializes the node + as this will ensure the FREE_DATA_BUFFER_NODE job is given an execution order such that the + node is not uninitialized before initialization. + */ + ma_resource_manager_data_buffer_bst_lock(pResourceManager); + { + result = ma_resource_manager_data_buffer_node_acquire_critical_section(pResourceManager, pFilePath, pFilePathW, hashedName32, flags, pExistingData, pInitFence, pDoneFence, &initNotification, &pDataBufferNode); + } + ma_resource_manager_data_buffer_bst_unlock(pResourceManager); + + if (result == MA_ALREADY_EXISTS) { + nodeAlreadyExists = MA_TRUE; + result = MA_SUCCESS; + } else { + if (result != MA_SUCCESS) { + return result; + } + } + + /* + If we're loading synchronously, we'll need to load everything now. When loading asynchronously, + a job will have been posted inside the BST critical section so that an uninitialization can be + allocated an appropriate execution order thereby preventing it from being uninitialized before + the node is initialized by the decoding thread(s). + */ + if (nodeAlreadyExists == MA_FALSE) { /* Don't need to try loading anything if the node already exists. */ + if (pFilePath == NULL && pFilePathW == NULL) { + /* + If this path is hit, it means a buffer is being copied (i.e. initialized from only the + hashed name), but that node has been freed in the meantime, probably from some other + thread. This is an invalid operation. + */ + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_WARNING, "Cloning data buffer node failed because the source node was released. The source node must remain valid until the cloning has completed.\n"); + result = MA_INVALID_OPERATION; + goto done; + } + + if (pDataBufferNode->isDataOwnedByResourceManager) { + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC) == 0) { + /* Loading synchronously. Load the sound in it's entirety here. */ + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE) == 0) { + /* No decoding. This is the simple case - just store the file contents in memory. */ + result = ma_resource_manager_data_buffer_node_init_supply_encoded(pResourceManager, pDataBufferNode, pFilePath, pFilePathW); + if (result != MA_SUCCESS) { + goto done; + } + } else { + /* Decoding. We do this the same way as we do when loading asynchronously. */ + ma_decoder* pDecoder; + result = ma_resource_manager_data_buffer_node_init_supply_decoded(pResourceManager, pDataBufferNode, pFilePath, pFilePathW, flags, &pDecoder); + if (result != MA_SUCCESS) { + goto done; + } + + /* We have the decoder, now decode page by page just like we do when loading asynchronously. */ + for (;;) { + /* Decode next page. */ + result = ma_resource_manager_data_buffer_node_decode_next_page(pResourceManager, pDataBufferNode, pDecoder); + if (result != MA_SUCCESS) { + break; /* Will return MA_AT_END when the last page has been decoded. */ + } + } + + /* Reaching the end needs to be considered successful. */ + if (result == MA_AT_END) { + result = MA_SUCCESS; + } + + /* + At this point the data buffer is either fully decoded or some error occurred. Either + way, the decoder is no longer necessary. + */ + ma_decoder_uninit(pDecoder); + ma_free(pDecoder, &pResourceManager->config.allocationCallbacks); + } + + /* Getting here means we were successful. Make sure the status of the node is updated accordingly. */ + ma_atomic_exchange_i32(&pDataBufferNode->result, result); + } else { + /* Loading asynchronously. We may need to wait for initialization. */ + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + ma_resource_manager_inline_notification_wait(&initNotification); + } + } + } else { + /* The data is not managed by the resource manager so there's nothing else to do. */ + MA_ASSERT(pExistingData != NULL); + } + } + +done: + /* If we failed to initialize the data buffer we need to free it. */ + if (result != MA_SUCCESS) { + if (nodeAlreadyExists == MA_FALSE) { + ma_resource_manager_data_buffer_node_remove(pResourceManager, pDataBufferNode); + ma_free(pDataBufferNode, &pResourceManager->config.allocationCallbacks); + } + } + + /* + The init notification needs to be uninitialized. This will be used if the node does not already + exist, and we've specified ASYNC | WAIT_INIT. + */ + if (nodeAlreadyExists == MA_FALSE && pDataBufferNode->isDataOwnedByResourceManager && (flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC) != 0) { + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + ma_resource_manager_inline_notification_uninit(&initNotification); + } + } + + if (ppDataBufferNode != NULL) { + *ppDataBufferNode = pDataBufferNode; + } + + return result; +} + +static ma_result ma_resource_manager_data_buffer_node_unacquire(ma_resource_manager* pResourceManager, ma_resource_manager_data_buffer_node* pDataBufferNode, const char* pName, const wchar_t* pNameW) +{ + ma_result result = MA_SUCCESS; + ma_uint32 refCount = 0xFFFFFFFF; /* The new reference count of the node after decrementing. Initialize to non-0 to be safe we don't fall into the freeing path. */ + ma_uint32 hashedName32 = 0; + + if (pResourceManager == NULL) { + return MA_INVALID_ARGS; + } + + if (pDataBufferNode == NULL) { + if (pName == NULL && pNameW == NULL) { + return MA_INVALID_ARGS; + } + + if (pName != NULL) { + hashedName32 = ma_hash_string_32(pName); + } else { + hashedName32 = ma_hash_string_w_32(pNameW); + } + } + + /* + The first thing to do is decrement the reference counter of the node. Then, if the reference + count is zero, we need to free the node. If the node is still in the process of loading, we'll + need to post a job to the job queue to free the node. Otherwise we'll just do it here. + */ + ma_resource_manager_data_buffer_bst_lock(pResourceManager); + { + /* Might need to find the node. Must be done inside the critical section. */ + if (pDataBufferNode == NULL) { + result = ma_resource_manager_data_buffer_node_search(pResourceManager, hashedName32, &pDataBufferNode); + if (result != MA_SUCCESS) { + goto stage2; /* Couldn't find the node. */ + } + } + + result = ma_resource_manager_data_buffer_node_decrement_ref(pResourceManager, pDataBufferNode, &refCount); + if (result != MA_SUCCESS) { + goto stage2; /* Should never happen. */ + } + + if (refCount == 0) { + result = ma_resource_manager_data_buffer_node_remove(pResourceManager, pDataBufferNode); + if (result != MA_SUCCESS) { + goto stage2; /* An error occurred when trying to remove the data buffer. This should never happen. */ + } + } + } + ma_resource_manager_data_buffer_bst_unlock(pResourceManager); + +stage2: + if (result != MA_SUCCESS) { + return result; + } + + /* + Here is where we need to free the node. We don't want to do this inside the critical section + above because we want to keep that as small as possible for multi-threaded efficiency. + */ + if (refCount == 0) { + if (ma_resource_manager_data_buffer_node_result(pDataBufferNode) == MA_BUSY) { + /* The sound is still loading. We need to delay the freeing of the node to a safe time. */ + ma_job job; + + /* We need to mark the node as unavailable for the sake of the resource manager worker threads. */ + ma_atomic_exchange_i32(&pDataBufferNode->result, MA_UNAVAILABLE); + + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER_NODE); + job.order = ma_resource_manager_data_buffer_node_next_execution_order(pDataBufferNode); + job.data.resourceManager.freeDataBufferNode.pResourceManager = pResourceManager; + job.data.resourceManager.freeDataBufferNode.pDataBufferNode = pDataBufferNode; + + result = ma_resource_manager_post_job(pResourceManager, &job); + if (result != MA_SUCCESS) { + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_ERROR, "Failed to post MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER_NODE job. %s.\n", ma_result_description(result)); + return result; + } + + /* If we don't support threading, process the job queue here. */ + if (ma_resource_manager_is_threading_enabled(pResourceManager) == MA_FALSE) { + while (ma_resource_manager_data_buffer_node_result(pDataBufferNode) == MA_BUSY) { + result = ma_resource_manager_process_next_job(pResourceManager); + if (result == MA_NO_DATA_AVAILABLE || result == MA_CANCELLED) { + result = MA_SUCCESS; + break; + } + } + } else { + /* Threading is enabled. The job queue will deal with the rest of the cleanup from here. */ + } + } else { + /* The sound isn't loading so we can just free the node here. */ + ma_resource_manager_data_buffer_node_free(pResourceManager, pDataBufferNode); + } + } + + return result; +} + + + +static ma_uint32 ma_resource_manager_data_buffer_next_execution_order(ma_resource_manager_data_buffer* pDataBuffer) +{ + MA_ASSERT(pDataBuffer != NULL); + return ma_atomic_fetch_add_32(&pDataBuffer->executionCounter, 1); +} + +static ma_result ma_resource_manager_data_buffer_cb__read_pcm_frames(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_resource_manager_data_buffer_read_pcm_frames((ma_resource_manager_data_buffer*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_resource_manager_data_buffer_cb__seek_to_pcm_frame(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_resource_manager_data_buffer_seek_to_pcm_frame((ma_resource_manager_data_buffer*)pDataSource, frameIndex); +} + +static ma_result ma_resource_manager_data_buffer_cb__get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + return ma_resource_manager_data_buffer_get_data_format((ma_resource_manager_data_buffer*)pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +static ma_result ma_resource_manager_data_buffer_cb__get_cursor_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_resource_manager_data_buffer_get_cursor_in_pcm_frames((ma_resource_manager_data_buffer*)pDataSource, pCursor); +} + +static ma_result ma_resource_manager_data_buffer_cb__get_length_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_resource_manager_data_buffer_get_length_in_pcm_frames((ma_resource_manager_data_buffer*)pDataSource, pLength); +} + +static ma_result ma_resource_manager_data_buffer_cb__set_looping(ma_data_source* pDataSource, ma_bool32 isLooping) +{ + ma_resource_manager_data_buffer* pDataBuffer = (ma_resource_manager_data_buffer*)pDataSource; + MA_ASSERT(pDataBuffer != NULL); + + ma_atomic_exchange_32(&pDataBuffer->isLooping, isLooping); + + /* The looping state needs to be set on the connector as well or else looping won't work when we read audio data. */ + ma_data_source_set_looping(ma_resource_manager_data_buffer_get_connector(pDataBuffer), isLooping); + + return MA_SUCCESS; +} + +static ma_data_source_vtable g_ma_resource_manager_data_buffer_vtable = +{ + ma_resource_manager_data_buffer_cb__read_pcm_frames, + ma_resource_manager_data_buffer_cb__seek_to_pcm_frame, + ma_resource_manager_data_buffer_cb__get_data_format, + ma_resource_manager_data_buffer_cb__get_cursor_in_pcm_frames, + ma_resource_manager_data_buffer_cb__get_length_in_pcm_frames, + ma_resource_manager_data_buffer_cb__set_looping, + 0 +}; + +static ma_result ma_resource_manager_data_buffer_init_ex_internal(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_uint32 hashedName32, ma_resource_manager_data_buffer* pDataBuffer) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager_data_buffer_node* pDataBufferNode; + ma_data_source_config dataSourceConfig; + ma_bool32 async; + ma_uint32 flags; + ma_resource_manager_pipeline_notifications notifications; + + if (pDataBuffer == NULL) { + if (pConfig != NULL && pConfig->pNotifications != NULL) { + ma_resource_manager_pipeline_notifications_signal_all_notifications(pConfig->pNotifications); + } + + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDataBuffer); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->pNotifications != NULL) { + notifications = *pConfig->pNotifications; /* From here on out we should be referencing `notifications` instead of `pNotifications`. Set this to NULL to catch errors at testing time. */ + } else { + MA_ZERO_OBJECT(¬ifications); + } + + /* For safety, always remove the ASYNC flag if threading is disabled on the resource manager. */ + flags = pConfig->flags; + if (ma_resource_manager_is_threading_enabled(pResourceManager) == MA_FALSE) { + flags &= ~MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC; + } + + if (pConfig->isLooping) { + flags |= MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_LOOPING; + } + + async = (flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC) != 0; + + /* + Fences need to be acquired before doing anything. These must be acquired and released outside of + the node to ensure there's no holes where ma_fence_wait() could prematurely return before the + data buffer has completed initialization. + + When loading asynchronously, the node acquisition routine below will acquire the fences on this + thread and then release them on the async thread when the operation is complete. + + These fences are always released at the "done" tag at the end of this function. They'll be + acquired a second if loading asynchronously. This double acquisition system is just done to + simplify code maintenance. + */ + ma_resource_manager_pipeline_notifications_acquire_all_fences(¬ifications); + { + /* We first need to acquire a node. If ASYNC is not set, this will not return until the entire sound has been loaded. */ + result = ma_resource_manager_data_buffer_node_acquire(pResourceManager, pConfig->pFilePath, pConfig->pFilePathW, hashedName32, flags, NULL, notifications.init.pFence, notifications.done.pFence, &pDataBufferNode); + if (result != MA_SUCCESS) { + ma_resource_manager_pipeline_notifications_signal_all_notifications(¬ifications); + goto done; + } + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_resource_manager_data_buffer_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pDataBuffer->ds); + if (result != MA_SUCCESS) { + ma_resource_manager_data_buffer_node_unacquire(pResourceManager, pDataBufferNode, NULL, NULL); + ma_resource_manager_pipeline_notifications_signal_all_notifications(¬ifications); + goto done; + } + + pDataBuffer->pResourceManager = pResourceManager; + pDataBuffer->pNode = pDataBufferNode; + pDataBuffer->flags = flags; + pDataBuffer->result = MA_BUSY; /* Always default to MA_BUSY for safety. It'll be overwritten when loading completes or an error occurs. */ + + /* If we're loading asynchronously we need to post a job to the job queue to initialize the connector. */ + if (async == MA_FALSE || ma_resource_manager_data_buffer_node_result(pDataBufferNode) == MA_SUCCESS) { + /* Loading synchronously or the data has already been fully loaded. We can just initialize the connector from here without a job. */ + result = ma_resource_manager_data_buffer_init_connector(pDataBuffer, pConfig, NULL, NULL); + ma_atomic_exchange_i32(&pDataBuffer->result, result); + + ma_resource_manager_pipeline_notifications_signal_all_notifications(¬ifications); + goto done; + } else { + /* The node's data supply isn't initialized yet. The caller has requested that we load asynchronously so we need to post a job to do this. */ + ma_job job; + ma_resource_manager_inline_notification initNotification; /* Used when the WAIT_INIT flag is set. */ + + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + ma_resource_manager_inline_notification_init(pResourceManager, &initNotification); + } + + /* + The status of the data buffer needs to be set to MA_BUSY before posting the job so that the + worker thread is aware of its busy state. If the LOAD_DATA_BUFFER job sees a status other + than MA_BUSY, it'll assume an error and fall through to an early exit. + */ + ma_atomic_exchange_i32(&pDataBuffer->result, MA_BUSY); + + /* Acquire fences a second time. These will be released by the async thread. */ + ma_resource_manager_pipeline_notifications_acquire_all_fences(¬ifications); + + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER); + job.order = ma_resource_manager_data_buffer_next_execution_order(pDataBuffer); + job.data.resourceManager.loadDataBuffer.pDataBuffer = pDataBuffer; + job.data.resourceManager.loadDataBuffer.pInitNotification = ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) ? &initNotification : notifications.init.pNotification; + job.data.resourceManager.loadDataBuffer.pDoneNotification = notifications.done.pNotification; + job.data.resourceManager.loadDataBuffer.pInitFence = notifications.init.pFence; + job.data.resourceManager.loadDataBuffer.pDoneFence = notifications.done.pFence; + job.data.resourceManager.loadDataBuffer.rangeBegInPCMFrames = pConfig->rangeBegInPCMFrames; + job.data.resourceManager.loadDataBuffer.rangeEndInPCMFrames = pConfig->rangeEndInPCMFrames; + job.data.resourceManager.loadDataBuffer.loopPointBegInPCMFrames = pConfig->loopPointBegInPCMFrames; + job.data.resourceManager.loadDataBuffer.loopPointEndInPCMFrames = pConfig->loopPointEndInPCMFrames; + job.data.resourceManager.loadDataBuffer.isLooping = (flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_LOOPING) != 0; + + /* If we need to wait for initialization to complete we can just process the job in place. */ + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + result = ma_job_process(&job); + } else { + result = ma_resource_manager_post_job(pResourceManager, &job); + } + + if (result != MA_SUCCESS) { + /* We failed to post the job. Most likely there isn't enough room in the queue's buffer. */ + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_ERROR, "Failed to post MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_BUFFER job. %s.\n", ma_result_description(result)); + ma_atomic_exchange_i32(&pDataBuffer->result, result); + + /* Release the fences after the result has been set on the data buffer. */ + ma_resource_manager_pipeline_notifications_release_all_fences(¬ifications); + } else { + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + ma_resource_manager_inline_notification_wait(&initNotification); + + if (notifications.init.pNotification != NULL) { + ma_async_notification_signal(notifications.init.pNotification); + } + + /* NOTE: Do not release the init fence here. It will have been done by the job. */ + + /* Make sure we return an error if initialization failed on the async thread. */ + result = ma_resource_manager_data_buffer_result(pDataBuffer); + if (result == MA_BUSY) { + result = MA_SUCCESS; + } + } + } + + if ((flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + ma_resource_manager_inline_notification_uninit(&initNotification); + } + } + + if (result != MA_SUCCESS) { + ma_resource_manager_data_buffer_node_unacquire(pResourceManager, pDataBufferNode, NULL, NULL); + goto done; + } + } +done: + if (result == MA_SUCCESS) { + if (pConfig->initialSeekPointInPCMFrames > 0) { + ma_resource_manager_data_buffer_seek_to_pcm_frame(pDataBuffer, pConfig->initialSeekPointInPCMFrames); + } + } + + ma_resource_manager_pipeline_notifications_release_all_fences(¬ifications); + + return result; +} + +MA_API ma_result ma_resource_manager_data_buffer_init_ex(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_resource_manager_data_buffer* pDataBuffer) +{ + return ma_resource_manager_data_buffer_init_ex_internal(pResourceManager, pConfig, 0, pDataBuffer); +} + +MA_API ma_result ma_resource_manager_data_buffer_init(ma_resource_manager* pResourceManager, const char* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_buffer* pDataBuffer) +{ + ma_resource_manager_data_source_config config; + + config = ma_resource_manager_data_source_config_init(); + config.pFilePath = pFilePath; + config.flags = flags; + config.pNotifications = pNotifications; + + return ma_resource_manager_data_buffer_init_ex(pResourceManager, &config, pDataBuffer); +} + +MA_API ma_result ma_resource_manager_data_buffer_init_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_buffer* pDataBuffer) +{ + ma_resource_manager_data_source_config config; + + config = ma_resource_manager_data_source_config_init(); + config.pFilePathW = pFilePath; + config.flags = flags; + config.pNotifications = pNotifications; + + return ma_resource_manager_data_buffer_init_ex(pResourceManager, &config, pDataBuffer); +} + +MA_API ma_result ma_resource_manager_data_buffer_init_copy(ma_resource_manager* pResourceManager, const ma_resource_manager_data_buffer* pExistingDataBuffer, ma_resource_manager_data_buffer* pDataBuffer) +{ + ma_resource_manager_data_source_config config; + + if (pExistingDataBuffer == NULL) { + return MA_INVALID_ARGS; + } + + MA_ASSERT(pExistingDataBuffer->pNode != NULL); /* <-- If you've triggered this, you've passed in an invalid existing data buffer. */ + + config = ma_resource_manager_data_source_config_init(); + config.flags = pExistingDataBuffer->flags; + + return ma_resource_manager_data_buffer_init_ex_internal(pResourceManager, &config, pExistingDataBuffer->pNode->hashedName32, pDataBuffer); +} + +static ma_result ma_resource_manager_data_buffer_uninit_internal(ma_resource_manager_data_buffer* pDataBuffer) +{ + MA_ASSERT(pDataBuffer != NULL); + + /* The connector should be uninitialized first. */ + ma_resource_manager_data_buffer_uninit_connector(pDataBuffer->pResourceManager, pDataBuffer); + + /* With the connector uninitialized we can unacquire the node. */ + ma_resource_manager_data_buffer_node_unacquire(pDataBuffer->pResourceManager, pDataBuffer->pNode, NULL, NULL); + + /* The base data source needs to be uninitialized as well. */ + ma_data_source_uninit(&pDataBuffer->ds); + + return MA_SUCCESS; +} + +MA_API ma_result ma_resource_manager_data_buffer_uninit(ma_resource_manager_data_buffer* pDataBuffer) +{ + ma_result result; + + if (pDataBuffer == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_resource_manager_data_buffer_result(pDataBuffer) == MA_SUCCESS) { + /* The data buffer can be deleted synchronously. */ + return ma_resource_manager_data_buffer_uninit_internal(pDataBuffer); + } else { + /* + The data buffer needs to be deleted asynchronously because it's still loading. With the status set to MA_UNAVAILABLE, no more pages will + be loaded and the uninitialization should happen fairly quickly. Since the caller owns the data buffer, we need to wait for this event + to get processed before returning. + */ + ma_resource_manager_inline_notification notification; + ma_job job; + + /* + We need to mark the node as unavailable so we don't try reading from it anymore, but also to + let the loading thread know that it needs to abort it's loading procedure. + */ + ma_atomic_exchange_i32(&pDataBuffer->result, MA_UNAVAILABLE); + + result = ma_resource_manager_inline_notification_init(pDataBuffer->pResourceManager, ¬ification); + if (result != MA_SUCCESS) { + return result; /* Failed to create the notification. This should rarely, if ever, happen. */ + } + + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER); + job.order = ma_resource_manager_data_buffer_next_execution_order(pDataBuffer); + job.data.resourceManager.freeDataBuffer.pDataBuffer = pDataBuffer; + job.data.resourceManager.freeDataBuffer.pDoneNotification = ¬ification; + job.data.resourceManager.freeDataBuffer.pDoneFence = NULL; + + result = ma_resource_manager_post_job(pDataBuffer->pResourceManager, &job); + if (result != MA_SUCCESS) { + ma_resource_manager_inline_notification_uninit(¬ification); + return result; + } + + ma_resource_manager_inline_notification_wait_and_uninit(¬ification); + } + + return result; +} + +MA_API ma_result ma_resource_manager_data_buffer_read_pcm_frames(ma_resource_manager_data_buffer* pDataBuffer, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint64 framesRead = 0; + ma_bool32 isDecodedBufferBusy = MA_FALSE; + + /* Safety. */ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + /* + We cannot be using the data buffer after it's been uninitialized. If you trigger this assert it means you're trying to read from the data buffer after + it's been uninitialized or is in the process of uninitializing. + */ + MA_ASSERT(ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) != MA_UNAVAILABLE); + + /* If the node is not initialized we need to abort with a busy code. */ + if (ma_resource_manager_data_buffer_has_connector(pDataBuffer) == MA_FALSE) { + return MA_BUSY; /* Still loading. */ + } + + /* + If we've got a seek scheduled we'll want to do that before reading. However, for paged buffers, there's + a chance that the sound hasn't yet been decoded up to the seek point will result in the seek failing. If + this happens, we need to keep the seek scheduled and return MA_BUSY. + */ + if (pDataBuffer->seekToCursorOnNextRead) { + pDataBuffer->seekToCursorOnNextRead = MA_FALSE; + + result = ma_data_source_seek_to_pcm_frame(ma_resource_manager_data_buffer_get_connector(pDataBuffer), pDataBuffer->seekTargetInPCMFrames); + if (result != MA_SUCCESS) { + if (result == MA_BAD_SEEK && ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode) == ma_resource_manager_data_supply_type_decoded_paged) { + pDataBuffer->seekToCursorOnNextRead = MA_TRUE; /* Keep the seek scheduled. We just haven't loaded enough data yet to do the seek properly. */ + return MA_BUSY; + } + + return result; + } + } + + /* + For decoded buffers (not paged) we need to check beforehand how many frames we have available. We cannot + exceed this amount. We'll read as much as we can, and then return MA_BUSY. + */ + if (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode) == ma_resource_manager_data_supply_type_decoded) { + ma_uint64 availableFrames; + + isDecodedBufferBusy = (ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) == MA_BUSY); + + if (ma_resource_manager_data_buffer_get_available_frames(pDataBuffer, &availableFrames) == MA_SUCCESS) { + /* Don't try reading more than the available frame count if the data buffer node is still loading. */ + if (isDecodedBufferBusy) { + if (frameCount > availableFrames) { + frameCount = availableFrames; + + /* + If there's no frames available we want to set the status to MA_AT_END. The logic below + will check if the node is busy, and if so, change it to MA_BUSY. The reason we do this + is because we don't want to call `ma_data_source_read_pcm_frames()` if the frame count + is 0 because that'll result in a situation where it's possible MA_AT_END won't get + returned. + */ + if (frameCount == 0) { + result = MA_AT_END; + } + } else { + isDecodedBufferBusy = MA_FALSE; /* We have enough frames available in the buffer to avoid a MA_BUSY status. */ + } + } else { + /* + Getting here means the buffer has been fully loaded. We can just pass the frame count straight + into ma_data_source_read_pcm_frames() below and let ma_data_source handle it. + */ + } + } + } + + /* Don't attempt to read anything if we've got no frames available. */ + if (frameCount > 0) { + result = ma_data_source_read_pcm_frames(ma_resource_manager_data_buffer_get_connector(pDataBuffer), pFramesOut, frameCount, &framesRead); + } + + /* + If we returned MA_AT_END, but the node is still loading, we don't want to return that code or else the caller will interpret the sound + as at the end and terminate decoding. + */ + if (result == MA_AT_END) { + if (ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) == MA_BUSY) { + result = MA_BUSY; + } + } + + if (isDecodedBufferBusy) { + result = MA_BUSY; + } + + if (pFramesRead != NULL) { + *pFramesRead = framesRead; + } + + if (result == MA_SUCCESS && framesRead == 0) { + result = MA_AT_END; + } + + return result; +} + +MA_API ma_result ma_resource_manager_data_buffer_seek_to_pcm_frame(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64 frameIndex) +{ + ma_result result; + + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) != MA_UNAVAILABLE); + + /* If we haven't yet got a connector we need to abort. */ + if (ma_resource_manager_data_buffer_has_connector(pDataBuffer) == MA_FALSE) { + pDataBuffer->seekTargetInPCMFrames = frameIndex; + pDataBuffer->seekToCursorOnNextRead = MA_TRUE; + return MA_BUSY; /* Still loading. */ + } + + result = ma_data_source_seek_to_pcm_frame(ma_resource_manager_data_buffer_get_connector(pDataBuffer), frameIndex); + if (result != MA_SUCCESS) { + return result; + } + + pDataBuffer->seekTargetInPCMFrames = ~(ma_uint64)0; /* <-- For identification purposes. */ + pDataBuffer->seekToCursorOnNextRead = MA_FALSE; + + return MA_SUCCESS; +} + +MA_API ma_result ma_resource_manager_data_buffer_get_data_format(ma_resource_manager_data_buffer* pDataBuffer, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) != MA_UNAVAILABLE); + + switch (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode)) + { + case ma_resource_manager_data_supply_type_encoded: + { + return ma_data_source_get_data_format(&pDataBuffer->connector.decoder, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); + }; + + case ma_resource_manager_data_supply_type_decoded: + { + *pFormat = pDataBuffer->pNode->data.backend.decoded.format; + *pChannels = pDataBuffer->pNode->data.backend.decoded.channels; + *pSampleRate = pDataBuffer->pNode->data.backend.decoded.sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pDataBuffer->pNode->data.backend.decoded.channels); + return MA_SUCCESS; + }; + + case ma_resource_manager_data_supply_type_decoded_paged: + { + *pFormat = pDataBuffer->pNode->data.backend.decodedPaged.data.format; + *pChannels = pDataBuffer->pNode->data.backend.decodedPaged.data.channels; + *pSampleRate = pDataBuffer->pNode->data.backend.decodedPaged.sampleRate; + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, pDataBuffer->pNode->data.backend.decoded.channels); + return MA_SUCCESS; + }; + + case ma_resource_manager_data_supply_type_unknown: + { + return MA_BUSY; /* Still loading. */ + }; + + default: + { + /* Unknown supply type. Should never hit this. */ + return MA_INVALID_ARGS; + } + } +} + +MA_API ma_result ma_resource_manager_data_buffer_get_cursor_in_pcm_frames(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64* pCursor) +{ + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) != MA_UNAVAILABLE); + + if (pDataBuffer == NULL || pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + switch (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode)) + { + case ma_resource_manager_data_supply_type_encoded: + { + return ma_decoder_get_cursor_in_pcm_frames(&pDataBuffer->connector.decoder, pCursor); + }; + + case ma_resource_manager_data_supply_type_decoded: + { + return ma_audio_buffer_get_cursor_in_pcm_frames(&pDataBuffer->connector.buffer, pCursor); + }; + + case ma_resource_manager_data_supply_type_decoded_paged: + { + return ma_paged_audio_buffer_get_cursor_in_pcm_frames(&pDataBuffer->connector.pagedBuffer, pCursor); + }; + + case ma_resource_manager_data_supply_type_unknown: + { + return MA_BUSY; + }; + + default: + { + return MA_INVALID_ARGS; + } + } +} + +MA_API ma_result ma_resource_manager_data_buffer_get_length_in_pcm_frames(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64* pLength) +{ + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) != MA_UNAVAILABLE); + + if (pDataBuffer == NULL || pLength == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode) == ma_resource_manager_data_supply_type_unknown) { + return MA_BUSY; /* Still loading. */ + } + + return ma_data_source_get_length_in_pcm_frames(ma_resource_manager_data_buffer_get_connector(pDataBuffer), pLength); +} + +MA_API ma_result ma_resource_manager_data_buffer_result(const ma_resource_manager_data_buffer* pDataBuffer) +{ + if (pDataBuffer == NULL) { + return MA_INVALID_ARGS; + } + + return (ma_result)ma_atomic_load_i32((ma_result*)&pDataBuffer->result); /* Need a naughty const-cast here. */ +} + +MA_API ma_result ma_resource_manager_data_buffer_set_looping(ma_resource_manager_data_buffer* pDataBuffer, ma_bool32 isLooping) +{ + return ma_data_source_set_looping(pDataBuffer, isLooping); +} + +MA_API ma_bool32 ma_resource_manager_data_buffer_is_looping(const ma_resource_manager_data_buffer* pDataBuffer) +{ + return ma_data_source_is_looping(pDataBuffer); +} + +MA_API ma_result ma_resource_manager_data_buffer_get_available_frames(ma_resource_manager_data_buffer* pDataBuffer, ma_uint64* pAvailableFrames) +{ + if (pAvailableFrames == NULL) { + return MA_INVALID_ARGS; + } + + *pAvailableFrames = 0; + + if (pDataBuffer == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode) == ma_resource_manager_data_supply_type_unknown) { + if (ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode) == MA_BUSY) { + return MA_BUSY; + } else { + return MA_INVALID_OPERATION; /* No connector. */ + } + } + + switch (ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode)) + { + case ma_resource_manager_data_supply_type_encoded: + { + return ma_decoder_get_available_frames(&pDataBuffer->connector.decoder, pAvailableFrames); + }; + + case ma_resource_manager_data_supply_type_decoded: + { + return ma_audio_buffer_get_available_frames(&pDataBuffer->connector.buffer, pAvailableFrames); + }; + + case ma_resource_manager_data_supply_type_decoded_paged: + { + ma_uint64 cursor; + ma_paged_audio_buffer_get_cursor_in_pcm_frames(&pDataBuffer->connector.pagedBuffer, &cursor); + + if (pDataBuffer->pNode->data.backend.decodedPaged.decodedFrameCount > cursor) { + *pAvailableFrames = pDataBuffer->pNode->data.backend.decodedPaged.decodedFrameCount - cursor; + } else { + *pAvailableFrames = 0; + } + + return MA_SUCCESS; + }; + + case ma_resource_manager_data_supply_type_unknown: + default: + { + /* Unknown supply type. Should never hit this. */ + return MA_INVALID_ARGS; + } + } +} + +MA_API ma_result ma_resource_manager_register_file(ma_resource_manager* pResourceManager, const char* pFilePath, ma_uint32 flags) +{ + return ma_resource_manager_data_buffer_node_acquire(pResourceManager, pFilePath, NULL, 0, flags, NULL, NULL, NULL, NULL); +} + +MA_API ma_result ma_resource_manager_register_file_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath, ma_uint32 flags) +{ + return ma_resource_manager_data_buffer_node_acquire(pResourceManager, NULL, pFilePath, 0, flags, NULL, NULL, NULL, NULL); +} + + +static ma_result ma_resource_manager_register_data(ma_resource_manager* pResourceManager, const char* pName, const wchar_t* pNameW, ma_resource_manager_data_supply* pExistingData) +{ + return ma_resource_manager_data_buffer_node_acquire(pResourceManager, pName, pNameW, 0, 0, pExistingData, NULL, NULL, NULL); +} + +static ma_result ma_resource_manager_register_decoded_data_internal(ma_resource_manager* pResourceManager, const char* pName, const wchar_t* pNameW, const void* pData, ma_uint64 frameCount, ma_format format, ma_uint32 channels, ma_uint32 sampleRate) +{ + ma_resource_manager_data_supply data; + data.type = ma_resource_manager_data_supply_type_decoded; + data.backend.decoded.pData = pData; + data.backend.decoded.totalFrameCount = frameCount; + data.backend.decoded.format = format; + data.backend.decoded.channels = channels; + data.backend.decoded.sampleRate = sampleRate; + + return ma_resource_manager_register_data(pResourceManager, pName, pNameW, &data); +} + +MA_API ma_result ma_resource_manager_register_decoded_data(ma_resource_manager* pResourceManager, const char* pName, const void* pData, ma_uint64 frameCount, ma_format format, ma_uint32 channels, ma_uint32 sampleRate) +{ + return ma_resource_manager_register_decoded_data_internal(pResourceManager, pName, NULL, pData, frameCount, format, channels, sampleRate); +} + +MA_API ma_result ma_resource_manager_register_decoded_data_w(ma_resource_manager* pResourceManager, const wchar_t* pName, const void* pData, ma_uint64 frameCount, ma_format format, ma_uint32 channels, ma_uint32 sampleRate) +{ + return ma_resource_manager_register_decoded_data_internal(pResourceManager, NULL, pName, pData, frameCount, format, channels, sampleRate); +} + + +static ma_result ma_resource_manager_register_encoded_data_internal(ma_resource_manager* pResourceManager, const char* pName, const wchar_t* pNameW, const void* pData, size_t sizeInBytes) +{ + ma_resource_manager_data_supply data; + data.type = ma_resource_manager_data_supply_type_encoded; + data.backend.encoded.pData = pData; + data.backend.encoded.sizeInBytes = sizeInBytes; + + return ma_resource_manager_register_data(pResourceManager, pName, pNameW, &data); +} + +MA_API ma_result ma_resource_manager_register_encoded_data(ma_resource_manager* pResourceManager, const char* pName, const void* pData, size_t sizeInBytes) +{ + return ma_resource_manager_register_encoded_data_internal(pResourceManager, pName, NULL, pData, sizeInBytes); +} + +MA_API ma_result ma_resource_manager_register_encoded_data_w(ma_resource_manager* pResourceManager, const wchar_t* pName, const void* pData, size_t sizeInBytes) +{ + return ma_resource_manager_register_encoded_data_internal(pResourceManager, NULL, pName, pData, sizeInBytes); +} + + +MA_API ma_result ma_resource_manager_unregister_file(ma_resource_manager* pResourceManager, const char* pFilePath) +{ + return ma_resource_manager_unregister_data(pResourceManager, pFilePath); +} + +MA_API ma_result ma_resource_manager_unregister_file_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath) +{ + return ma_resource_manager_unregister_data_w(pResourceManager, pFilePath); +} + +MA_API ma_result ma_resource_manager_unregister_data(ma_resource_manager* pResourceManager, const char* pName) +{ + return ma_resource_manager_data_buffer_node_unacquire(pResourceManager, NULL, pName, NULL); +} + +MA_API ma_result ma_resource_manager_unregister_data_w(ma_resource_manager* pResourceManager, const wchar_t* pName) +{ + return ma_resource_manager_data_buffer_node_unacquire(pResourceManager, NULL, NULL, pName); +} + + +static ma_uint32 ma_resource_manager_data_stream_next_execution_order(ma_resource_manager_data_stream* pDataStream) +{ + MA_ASSERT(pDataStream != NULL); + return ma_atomic_fetch_add_32(&pDataStream->executionCounter, 1); +} + +static ma_bool32 ma_resource_manager_data_stream_is_decoder_at_end(const ma_resource_manager_data_stream* pDataStream) +{ + MA_ASSERT(pDataStream != NULL); + return ma_atomic_load_32((ma_bool32*)&pDataStream->isDecoderAtEnd); +} + +static ma_uint32 ma_resource_manager_data_stream_seek_counter(const ma_resource_manager_data_stream* pDataStream) +{ + MA_ASSERT(pDataStream != NULL); + return ma_atomic_load_32((ma_uint32*)&pDataStream->seekCounter); +} + + +static ma_result ma_resource_manager_data_stream_cb__read_pcm_frames(ma_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + return ma_resource_manager_data_stream_read_pcm_frames((ma_resource_manager_data_stream*)pDataSource, pFramesOut, frameCount, pFramesRead); +} + +static ma_result ma_resource_manager_data_stream_cb__seek_to_pcm_frame(ma_data_source* pDataSource, ma_uint64 frameIndex) +{ + return ma_resource_manager_data_stream_seek_to_pcm_frame((ma_resource_manager_data_stream*)pDataSource, frameIndex); +} + +static ma_result ma_resource_manager_data_stream_cb__get_data_format(ma_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + return ma_resource_manager_data_stream_get_data_format((ma_resource_manager_data_stream*)pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +static ma_result ma_resource_manager_data_stream_cb__get_cursor_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pCursor) +{ + return ma_resource_manager_data_stream_get_cursor_in_pcm_frames((ma_resource_manager_data_stream*)pDataSource, pCursor); +} + +static ma_result ma_resource_manager_data_stream_cb__get_length_in_pcm_frames(ma_data_source* pDataSource, ma_uint64* pLength) +{ + return ma_resource_manager_data_stream_get_length_in_pcm_frames((ma_resource_manager_data_stream*)pDataSource, pLength); +} + +static ma_result ma_resource_manager_data_stream_cb__set_looping(ma_data_source* pDataSource, ma_bool32 isLooping) +{ + ma_resource_manager_data_stream* pDataStream = (ma_resource_manager_data_stream*)pDataSource; + MA_ASSERT(pDataStream != NULL); + + ma_atomic_exchange_32(&pDataStream->isLooping, isLooping); + + return MA_SUCCESS; +} + +static ma_data_source_vtable g_ma_resource_manager_data_stream_vtable = +{ + ma_resource_manager_data_stream_cb__read_pcm_frames, + ma_resource_manager_data_stream_cb__seek_to_pcm_frame, + ma_resource_manager_data_stream_cb__get_data_format, + ma_resource_manager_data_stream_cb__get_cursor_in_pcm_frames, + ma_resource_manager_data_stream_cb__get_length_in_pcm_frames, + ma_resource_manager_data_stream_cb__set_looping, + 0 /*MA_DATA_SOURCE_SELF_MANAGED_RANGE_AND_LOOP_POINT*/ +}; + +static void ma_resource_manager_data_stream_set_absolute_cursor(ma_resource_manager_data_stream* pDataStream, ma_uint64 absoluteCursor) +{ + /* Loop if possible. */ + if (absoluteCursor > pDataStream->totalLengthInPCMFrames && pDataStream->totalLengthInPCMFrames > 0) { + absoluteCursor = absoluteCursor % pDataStream->totalLengthInPCMFrames; + } + + ma_atomic_exchange_64(&pDataStream->absoluteCursor, absoluteCursor); +} + +MA_API ma_result ma_resource_manager_data_stream_init_ex(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_resource_manager_data_stream* pDataStream) +{ + ma_result result; + ma_data_source_config dataSourceConfig; + char* pFilePathCopy = NULL; + wchar_t* pFilePathWCopy = NULL; + ma_job job; + ma_bool32 waitBeforeReturning = MA_FALSE; + ma_resource_manager_inline_notification waitNotification; + ma_resource_manager_pipeline_notifications notifications; + ma_uint32 flags; + + if (pDataStream == NULL) { + if (pConfig != NULL && pConfig->pNotifications != NULL) { + ma_resource_manager_pipeline_notifications_signal_all_notifications(pConfig->pNotifications); + } + + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDataStream); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->pNotifications != NULL) { + notifications = *pConfig->pNotifications; /* From here on out, `notifications` should be used instead of `pNotifications`. Setting this to NULL to catch any errors at testing time. */ + } else { + MA_ZERO_OBJECT(¬ifications); + } + + dataSourceConfig = ma_data_source_config_init(); + dataSourceConfig.vtable = &g_ma_resource_manager_data_stream_vtable; + + result = ma_data_source_init(&dataSourceConfig, &pDataStream->ds); + if (result != MA_SUCCESS) { + ma_resource_manager_pipeline_notifications_signal_all_notifications(¬ifications); + return result; + } + + flags = pConfig->flags; + if (pConfig->isLooping) { + flags |= MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_LOOPING; + } + + pDataStream->pResourceManager = pResourceManager; + pDataStream->flags = pConfig->flags; + pDataStream->result = MA_BUSY; + + ma_data_source_set_range_in_pcm_frames(pDataStream, pConfig->rangeBegInPCMFrames, pConfig->rangeEndInPCMFrames); + ma_data_source_set_loop_point_in_pcm_frames(pDataStream, pConfig->loopPointBegInPCMFrames, pConfig->loopPointEndInPCMFrames); + ma_data_source_set_looping(pDataStream, (flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_LOOPING) != 0); + + if (pResourceManager == NULL || (pConfig->pFilePath == NULL && pConfig->pFilePathW == NULL)) { + ma_resource_manager_pipeline_notifications_signal_all_notifications(¬ifications); + return MA_INVALID_ARGS; + } + + /* We want all access to the VFS and the internal decoder to happen on the job thread just to keep things easier to manage for the VFS. */ + + /* We need a copy of the file path. We should probably make this more efficient, but for now we'll do a transient memory allocation. */ + if (pConfig->pFilePath != NULL) { + pFilePathCopy = ma_copy_string(pConfig->pFilePath, &pResourceManager->config.allocationCallbacks); + } else { + pFilePathWCopy = ma_copy_string_w(pConfig->pFilePathW, &pResourceManager->config.allocationCallbacks); + } + + if (pFilePathCopy == NULL && pFilePathWCopy == NULL) { + ma_resource_manager_pipeline_notifications_signal_all_notifications(¬ifications); + return MA_OUT_OF_MEMORY; + } + + /* + We need to check for the presence of MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC. If it's not set, we need to wait before returning. Otherwise we + can return immediately. Likewise, we'll also check for MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT and do the same. + */ + if ((pConfig->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC) == 0 || (pConfig->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT) != 0) { + waitBeforeReturning = MA_TRUE; + ma_resource_manager_inline_notification_init(pResourceManager, &waitNotification); + } + + ma_resource_manager_pipeline_notifications_acquire_all_fences(¬ifications); + + /* Set the absolute cursor to our initial seek position so retrieval of the cursor returns a good value. */ + ma_resource_manager_data_stream_set_absolute_cursor(pDataStream, pConfig->initialSeekPointInPCMFrames); + + /* We now have everything we need to post the job. This is the last thing we need to do from here. The rest will be done by the job thread. */ + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_LOAD_DATA_STREAM); + job.order = ma_resource_manager_data_stream_next_execution_order(pDataStream); + job.data.resourceManager.loadDataStream.pDataStream = pDataStream; + job.data.resourceManager.loadDataStream.pFilePath = pFilePathCopy; + job.data.resourceManager.loadDataStream.pFilePathW = pFilePathWCopy; + job.data.resourceManager.loadDataStream.initialSeekPoint = pConfig->initialSeekPointInPCMFrames; + job.data.resourceManager.loadDataStream.pInitNotification = (waitBeforeReturning == MA_TRUE) ? &waitNotification : notifications.init.pNotification; + job.data.resourceManager.loadDataStream.pInitFence = notifications.init.pFence; + result = ma_resource_manager_post_job(pResourceManager, &job); + if (result != MA_SUCCESS) { + ma_resource_manager_pipeline_notifications_signal_all_notifications(¬ifications); + ma_resource_manager_pipeline_notifications_release_all_fences(¬ifications); + + if (waitBeforeReturning) { + ma_resource_manager_inline_notification_uninit(&waitNotification); + } + + ma_free(pFilePathCopy, &pResourceManager->config.allocationCallbacks); + ma_free(pFilePathWCopy, &pResourceManager->config.allocationCallbacks); + return result; + } + + /* Wait if needed. */ + if (waitBeforeReturning) { + ma_resource_manager_inline_notification_wait_and_uninit(&waitNotification); + + if (notifications.init.pNotification != NULL) { + ma_async_notification_signal(notifications.init.pNotification); + } + + /* + If there was an error during initialization make sure we return that result here. We don't want to do this + if we're not waiting because it will most likely be in a busy state. + */ + if (pDataStream->result != MA_SUCCESS) { + return pDataStream->result; + } + + /* NOTE: Do not release pInitFence here. That will be done by the job. */ + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_resource_manager_data_stream_init(ma_resource_manager* pResourceManager, const char* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_stream* pDataStream) +{ + ma_resource_manager_data_source_config config; + + config = ma_resource_manager_data_source_config_init(); + config.pFilePath = pFilePath; + config.flags = flags; + config.pNotifications = pNotifications; + + return ma_resource_manager_data_stream_init_ex(pResourceManager, &config, pDataStream); +} + +MA_API ma_result ma_resource_manager_data_stream_init_w(ma_resource_manager* pResourceManager, const wchar_t* pFilePath, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_stream* pDataStream) +{ + ma_resource_manager_data_source_config config; + + config = ma_resource_manager_data_source_config_init(); + config.pFilePathW = pFilePath; + config.flags = flags; + config.pNotifications = pNotifications; + + return ma_resource_manager_data_stream_init_ex(pResourceManager, &config, pDataStream); +} + +MA_API ma_result ma_resource_manager_data_stream_uninit(ma_resource_manager_data_stream* pDataStream) +{ + ma_resource_manager_inline_notification freeEvent; + ma_job job; + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + /* The first thing to do is set the result to unavailable. This will prevent future page decoding. */ + ma_atomic_exchange_i32(&pDataStream->result, MA_UNAVAILABLE); + + /* + We need to post a job to ensure we're not in the middle or decoding or anything. Because the object is owned by the caller, we'll need + to wait for it to complete before returning which means we need an event. + */ + ma_resource_manager_inline_notification_init(pDataStream->pResourceManager, &freeEvent); + + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_STREAM); + job.order = ma_resource_manager_data_stream_next_execution_order(pDataStream); + job.data.resourceManager.freeDataStream.pDataStream = pDataStream; + job.data.resourceManager.freeDataStream.pDoneNotification = &freeEvent; + job.data.resourceManager.freeDataStream.pDoneFence = NULL; + ma_resource_manager_post_job(pDataStream->pResourceManager, &job); + + /* We need to wait for the job to finish processing before we return. */ + ma_resource_manager_inline_notification_wait_and_uninit(&freeEvent); + + return MA_SUCCESS; +} + + +static ma_uint32 ma_resource_manager_data_stream_get_page_size_in_frames(ma_resource_manager_data_stream* pDataStream) +{ + MA_ASSERT(pDataStream != NULL); + MA_ASSERT(pDataStream->isDecoderInitialized == MA_TRUE); + + return MA_RESOURCE_MANAGER_PAGE_SIZE_IN_MILLISECONDS * (pDataStream->decoder.outputSampleRate/1000); +} + +static void* ma_resource_manager_data_stream_get_page_data_pointer(ma_resource_manager_data_stream* pDataStream, ma_uint32 pageIndex, ma_uint32 relativeCursor) +{ + MA_ASSERT(pDataStream != NULL); + MA_ASSERT(pDataStream->isDecoderInitialized == MA_TRUE); + MA_ASSERT(pageIndex == 0 || pageIndex == 1); + + return ma_offset_ptr(pDataStream->pPageData, ((ma_resource_manager_data_stream_get_page_size_in_frames(pDataStream) * pageIndex) + relativeCursor) * ma_get_bytes_per_frame(pDataStream->decoder.outputFormat, pDataStream->decoder.outputChannels)); +} + +static void ma_resource_manager_data_stream_fill_page(ma_resource_manager_data_stream* pDataStream, ma_uint32 pageIndex) +{ + ma_result result = MA_SUCCESS; + ma_uint64 pageSizeInFrames; + ma_uint64 totalFramesReadForThisPage = 0; + void* pPageData = ma_resource_manager_data_stream_get_page_data_pointer(pDataStream, pageIndex, 0); + + pageSizeInFrames = ma_resource_manager_data_stream_get_page_size_in_frames(pDataStream); + + /* The decoder needs to inherit the stream's looping and range state. */ + { + ma_uint64 rangeBeg; + ma_uint64 rangeEnd; + ma_uint64 loopPointBeg; + ma_uint64 loopPointEnd; + + ma_data_source_set_looping(&pDataStream->decoder, ma_resource_manager_data_stream_is_looping(pDataStream)); + + ma_data_source_get_range_in_pcm_frames(pDataStream, &rangeBeg, &rangeEnd); + ma_data_source_set_range_in_pcm_frames(&pDataStream->decoder, rangeBeg, rangeEnd); + + ma_data_source_get_loop_point_in_pcm_frames(pDataStream, &loopPointBeg, &loopPointEnd); + ma_data_source_set_loop_point_in_pcm_frames(&pDataStream->decoder, loopPointBeg, loopPointEnd); + } + + /* Just read straight from the decoder. It will deal with ranges and looping for us. */ + result = ma_data_source_read_pcm_frames(&pDataStream->decoder, pPageData, pageSizeInFrames, &totalFramesReadForThisPage); + if (result == MA_AT_END || totalFramesReadForThisPage < pageSizeInFrames) { + ma_atomic_exchange_32(&pDataStream->isDecoderAtEnd, MA_TRUE); + } + + ma_atomic_exchange_32(&pDataStream->pageFrameCount[pageIndex], (ma_uint32)totalFramesReadForThisPage); + ma_atomic_exchange_32(&pDataStream->isPageValid[pageIndex], MA_TRUE); +} + +static void ma_resource_manager_data_stream_fill_pages(ma_resource_manager_data_stream* pDataStream) +{ + ma_uint32 iPage; + + MA_ASSERT(pDataStream != NULL); + + for (iPage = 0; iPage < 2; iPage += 1) { + ma_resource_manager_data_stream_fill_page(pDataStream, iPage); + } +} + + +static ma_result ma_resource_manager_data_stream_map(ma_resource_manager_data_stream* pDataStream, void** ppFramesOut, ma_uint64* pFrameCount) +{ + ma_uint64 framesAvailable; + ma_uint64 frameCount = 0; + + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_stream_result(pDataStream) != MA_UNAVAILABLE); + + if (pFrameCount != NULL) { + frameCount = *pFrameCount; + *pFrameCount = 0; + } + if (ppFramesOut != NULL) { + *ppFramesOut = NULL; + } + + if (pDataStream == NULL || ppFramesOut == NULL || pFrameCount == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_resource_manager_data_stream_result(pDataStream) != MA_SUCCESS) { + return MA_INVALID_OPERATION; + } + + /* Don't attempt to read while we're in the middle of seeking. Tell the caller that we're busy. */ + if (ma_resource_manager_data_stream_seek_counter(pDataStream) > 0) { + return MA_BUSY; + } + + /* If the page we're on is invalid it means we've caught up to the job thread. */ + if (ma_atomic_load_32(&pDataStream->isPageValid[pDataStream->currentPageIndex]) == MA_FALSE) { + framesAvailable = 0; + } else { + /* + The page we're on is valid so we must have some frames available. We need to make sure that we don't overflow into the next page, even if it's valid. The reason is + that the unmap process will only post an update for one page at a time. Keeping mapping tied to page boundaries makes this simpler. + */ + ma_uint32 currentPageFrameCount = ma_atomic_load_32(&pDataStream->pageFrameCount[pDataStream->currentPageIndex]); + MA_ASSERT(currentPageFrameCount >= pDataStream->relativeCursor); + + framesAvailable = currentPageFrameCount - pDataStream->relativeCursor; + } + + /* If there's no frames available and the result is set to MA_AT_END we need to return MA_AT_END. */ + if (framesAvailable == 0) { + if (ma_resource_manager_data_stream_is_decoder_at_end(pDataStream)) { + return MA_AT_END; + } else { + return MA_BUSY; /* There are no frames available, but we're not marked as EOF so we might have caught up to the job thread. Need to return MA_BUSY and wait for more data. */ + } + } + + MA_ASSERT(framesAvailable > 0); + + if (frameCount > framesAvailable) { + frameCount = framesAvailable; + } + + *ppFramesOut = ma_resource_manager_data_stream_get_page_data_pointer(pDataStream, pDataStream->currentPageIndex, pDataStream->relativeCursor); + *pFrameCount = frameCount; + + return MA_SUCCESS; +} + +static ma_result ma_resource_manager_data_stream_unmap(ma_resource_manager_data_stream* pDataStream, ma_uint64 frameCount) +{ + ma_uint32 newRelativeCursor; + ma_uint32 pageSizeInFrames; + ma_job job; + + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_stream_result(pDataStream) != MA_UNAVAILABLE); + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_resource_manager_data_stream_result(pDataStream) != MA_SUCCESS) { + return MA_INVALID_OPERATION; + } + + /* The frame count should always fit inside a 32-bit integer. */ + if (frameCount > 0xFFFFFFFF) { + return MA_INVALID_ARGS; + } + + pageSizeInFrames = ma_resource_manager_data_stream_get_page_size_in_frames(pDataStream); + + /* The absolute cursor needs to be updated for ma_resource_manager_data_stream_get_cursor_in_pcm_frames(). */ + ma_resource_manager_data_stream_set_absolute_cursor(pDataStream, ma_atomic_load_64(&pDataStream->absoluteCursor) + frameCount); + + /* Here is where we need to check if we need to load a new page, and if so, post a job to load it. */ + newRelativeCursor = pDataStream->relativeCursor + (ma_uint32)frameCount; + + /* If the new cursor has flowed over to the next page we need to mark the old one as invalid and post an event for it. */ + if (newRelativeCursor >= pageSizeInFrames) { + newRelativeCursor -= pageSizeInFrames; + + /* Here is where we post the job start decoding. */ + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_STREAM); + job.order = ma_resource_manager_data_stream_next_execution_order(pDataStream); + job.data.resourceManager.pageDataStream.pDataStream = pDataStream; + job.data.resourceManager.pageDataStream.pageIndex = pDataStream->currentPageIndex; + + /* The page needs to be marked as invalid so that the public API doesn't try reading from it. */ + ma_atomic_exchange_32(&pDataStream->isPageValid[pDataStream->currentPageIndex], MA_FALSE); + + /* Before posting the job we need to make sure we set some state. */ + pDataStream->relativeCursor = newRelativeCursor; + pDataStream->currentPageIndex = (pDataStream->currentPageIndex + 1) & 0x01; + return ma_resource_manager_post_job(pDataStream->pResourceManager, &job); + } else { + /* We haven't moved into a new page so we can just move the cursor forward. */ + pDataStream->relativeCursor = newRelativeCursor; + return MA_SUCCESS; + } +} + + +MA_API ma_result ma_resource_manager_data_stream_read_pcm_frames(ma_resource_manager_data_stream* pDataStream, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint64 totalFramesProcessed; + ma_format format; + ma_uint32 channels; + + /* Safety. */ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (frameCount == 0) { + return MA_INVALID_ARGS; + } + + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_stream_result(pDataStream) != MA_UNAVAILABLE); + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_resource_manager_data_stream_result(pDataStream) != MA_SUCCESS) { + return MA_INVALID_OPERATION; + } + + /* Don't attempt to read while we're in the middle of seeking. Tell the caller that we're busy. */ + if (ma_resource_manager_data_stream_seek_counter(pDataStream) > 0) { + return MA_BUSY; + } + + ma_resource_manager_data_stream_get_data_format(pDataStream, &format, &channels, NULL, NULL, 0); + + /* Reading is implemented in terms of map/unmap. We need to run this in a loop because mapping is clamped against page boundaries. */ + totalFramesProcessed = 0; + while (totalFramesProcessed < frameCount) { + void* pMappedFrames; + ma_uint64 mappedFrameCount; + + mappedFrameCount = frameCount - totalFramesProcessed; + result = ma_resource_manager_data_stream_map(pDataStream, &pMappedFrames, &mappedFrameCount); + if (result != MA_SUCCESS) { + break; + } + + /* Copy the mapped data to the output buffer if we have one. It's allowed for pFramesOut to be NULL in which case a relative forward seek is performed. */ + if (pFramesOut != NULL) { + ma_copy_pcm_frames(ma_offset_pcm_frames_ptr(pFramesOut, totalFramesProcessed, format, channels), pMappedFrames, mappedFrameCount, format, channels); + } + + totalFramesProcessed += mappedFrameCount; + + result = ma_resource_manager_data_stream_unmap(pDataStream, mappedFrameCount); + if (result != MA_SUCCESS) { + break; /* This is really bad - will only get an error here if we failed to post a job to the queue for loading the next page. */ + } + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesProcessed; + } + + if (result == MA_SUCCESS && totalFramesProcessed == 0) { + result = MA_AT_END; + } + + return result; +} + +MA_API ma_result ma_resource_manager_data_stream_seek_to_pcm_frame(ma_resource_manager_data_stream* pDataStream, ma_uint64 frameIndex) +{ + ma_job job; + ma_result streamResult; + + streamResult = ma_resource_manager_data_stream_result(pDataStream); + + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(streamResult != MA_UNAVAILABLE); + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + if (streamResult != MA_SUCCESS && streamResult != MA_BUSY) { + return MA_INVALID_OPERATION; + } + + /* If we're not already seeking and we're sitting on the same frame, just make this a no-op. */ + if (ma_atomic_load_32(&pDataStream->seekCounter) == 0) { + if (ma_atomic_load_64(&pDataStream->absoluteCursor) == frameIndex) { + return MA_SUCCESS; + } + } + + + /* Increment the seek counter first to indicate to read_paged_pcm_frames() and map_paged_pcm_frames() that we are in the middle of a seek and MA_BUSY should be returned. */ + ma_atomic_fetch_add_32(&pDataStream->seekCounter, 1); + + /* Update the absolute cursor so that ma_resource_manager_data_stream_get_cursor_in_pcm_frames() returns the new position. */ + ma_resource_manager_data_stream_set_absolute_cursor(pDataStream, frameIndex); + + /* + We need to clear our currently loaded pages so that the stream starts playback from the new seek point as soon as possible. These are for the purpose of the public + API and will be ignored by the seek job. The seek job will operate on the assumption that both pages have been marked as invalid and the cursor is at the start of + the first page. + */ + pDataStream->relativeCursor = 0; + pDataStream->currentPageIndex = 0; + ma_atomic_exchange_32(&pDataStream->isPageValid[0], MA_FALSE); + ma_atomic_exchange_32(&pDataStream->isPageValid[1], MA_FALSE); + + /* Make sure the data stream is not marked as at the end or else if we seek in response to hitting the end, we won't be able to read any more data. */ + ma_atomic_exchange_32(&pDataStream->isDecoderAtEnd, MA_FALSE); + + /* + The public API is not allowed to touch the internal decoder so we need to use a job to perform the seek. When seeking, the job thread will assume both pages + are invalid and any content contained within them will be discarded and replaced with newly decoded data. + */ + job = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_SEEK_DATA_STREAM); + job.order = ma_resource_manager_data_stream_next_execution_order(pDataStream); + job.data.resourceManager.seekDataStream.pDataStream = pDataStream; + job.data.resourceManager.seekDataStream.frameIndex = frameIndex; + return ma_resource_manager_post_job(pDataStream->pResourceManager, &job); +} + +MA_API ma_result ma_resource_manager_data_stream_get_data_format(ma_resource_manager_data_stream* pDataStream, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_stream_result(pDataStream) != MA_UNAVAILABLE); + + if (pFormat != NULL) { + *pFormat = ma_format_unknown; + } + + if (pChannels != NULL) { + *pChannels = 0; + } + + if (pSampleRate != NULL) { + *pSampleRate = 0; + } + + if (pChannelMap != NULL) { + MA_ZERO_MEMORY(pChannelMap, sizeof(*pChannelMap) * channelMapCap); + } + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + if (ma_resource_manager_data_stream_result(pDataStream) != MA_SUCCESS) { + return MA_INVALID_OPERATION; + } + + /* + We're being a little bit naughty here and accessing the internal decoder from the public API. The output data format is constant, and we've defined this function + such that the application is responsible for ensuring it's not called while uninitializing so it should be safe. + */ + return ma_data_source_get_data_format(&pDataStream->decoder, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); +} + +MA_API ma_result ma_resource_manager_data_stream_get_cursor_in_pcm_frames(ma_resource_manager_data_stream* pDataStream, ma_uint64* pCursor) +{ + ma_result result; + + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + + *pCursor = 0; + + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(ma_resource_manager_data_stream_result(pDataStream) != MA_UNAVAILABLE); + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + /* + If the stream is in an erroneous state we need to return an invalid operation. We can allow + this to be called when the data stream is in a busy state because the caller may have asked + for an initial seek position and it's convenient to return that as the cursor position. + */ + result = ma_resource_manager_data_stream_result(pDataStream); + if (result != MA_SUCCESS && result != MA_BUSY) { + return MA_INVALID_OPERATION; + } + + *pCursor = ma_atomic_load_64(&pDataStream->absoluteCursor); + + return MA_SUCCESS; +} + +MA_API ma_result ma_resource_manager_data_stream_get_length_in_pcm_frames(ma_resource_manager_data_stream* pDataStream, ma_uint64* pLength) +{ + ma_result streamResult; + + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + + *pLength = 0; + + streamResult = ma_resource_manager_data_stream_result(pDataStream); + + /* We cannot be using the data source after it's been uninitialized. */ + MA_ASSERT(streamResult != MA_UNAVAILABLE); + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + if (streamResult != MA_SUCCESS) { + return streamResult; + } + + /* + We most definitely do not want to be calling ma_decoder_get_length_in_pcm_frames() directly. Instead we want to use a cached value that we + calculated when we initialized it on the job thread. + */ + *pLength = pDataStream->totalLengthInPCMFrames; + if (*pLength == 0) { + return MA_NOT_IMPLEMENTED; /* Some decoders may not have a known length. */ + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_resource_manager_data_stream_result(const ma_resource_manager_data_stream* pDataStream) +{ + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + return (ma_result)ma_atomic_load_i32(&pDataStream->result); +} + +MA_API ma_result ma_resource_manager_data_stream_set_looping(ma_resource_manager_data_stream* pDataStream, ma_bool32 isLooping) +{ + return ma_data_source_set_looping(pDataStream, isLooping); +} + +MA_API ma_bool32 ma_resource_manager_data_stream_is_looping(const ma_resource_manager_data_stream* pDataStream) +{ + if (pDataStream == NULL) { + return MA_FALSE; + } + + return ma_atomic_load_32((ma_bool32*)&pDataStream->isLooping); /* Naughty const-cast. Value won't change from here in practice (maybe from another thread). */ +} + +MA_API ma_result ma_resource_manager_data_stream_get_available_frames(ma_resource_manager_data_stream* pDataStream, ma_uint64* pAvailableFrames) +{ + ma_uint32 pageIndex0; + ma_uint32 pageIndex1; + ma_uint32 relativeCursor; + ma_uint64 availableFrames; + + if (pAvailableFrames == NULL) { + return MA_INVALID_ARGS; + } + + *pAvailableFrames = 0; + + if (pDataStream == NULL) { + return MA_INVALID_ARGS; + } + + pageIndex0 = pDataStream->currentPageIndex; + pageIndex1 = (pDataStream->currentPageIndex + 1) & 0x01; + relativeCursor = pDataStream->relativeCursor; + + availableFrames = 0; + if (ma_atomic_load_32(&pDataStream->isPageValid[pageIndex0])) { + availableFrames += ma_atomic_load_32(&pDataStream->pageFrameCount[pageIndex0]) - relativeCursor; + if (ma_atomic_load_32(&pDataStream->isPageValid[pageIndex1])) { + availableFrames += ma_atomic_load_32(&pDataStream->pageFrameCount[pageIndex1]); + } + } + + *pAvailableFrames = availableFrames; + return MA_SUCCESS; +} + + +static ma_result ma_resource_manager_data_source_preinit(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_resource_manager_data_source* pDataSource) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDataSource); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pResourceManager == NULL) { + return MA_INVALID_ARGS; + } + + pDataSource->flags = pConfig->flags; + if (pConfig->isLooping) { + pDataSource->flags |= MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_LOOPING; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_resource_manager_data_source_init_ex(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source_config* pConfig, ma_resource_manager_data_source* pDataSource) +{ + ma_result result; + + result = ma_resource_manager_data_source_preinit(pResourceManager, pConfig, pDataSource); + if (result != MA_SUCCESS) { + return result; + } + + /* The data source itself is just a data stream or a data buffer. */ + if ((pConfig->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_init_ex(pResourceManager, pConfig, &pDataSource->backend.stream); + } else { + return ma_resource_manager_data_buffer_init_ex(pResourceManager, pConfig, &pDataSource->backend.buffer); + } +} + +MA_API ma_result ma_resource_manager_data_source_init(ma_resource_manager* pResourceManager, const char* pName, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_source* pDataSource) +{ + ma_resource_manager_data_source_config config; + + config = ma_resource_manager_data_source_config_init(); + config.pFilePath = pName; + config.flags = flags; + config.pNotifications = pNotifications; + + return ma_resource_manager_data_source_init_ex(pResourceManager, &config, pDataSource); +} + +MA_API ma_result ma_resource_manager_data_source_init_w(ma_resource_manager* pResourceManager, const wchar_t* pName, ma_uint32 flags, const ma_resource_manager_pipeline_notifications* pNotifications, ma_resource_manager_data_source* pDataSource) +{ + ma_resource_manager_data_source_config config; + + config = ma_resource_manager_data_source_config_init(); + config.pFilePathW = pName; + config.flags = flags; + config.pNotifications = pNotifications; + + return ma_resource_manager_data_source_init_ex(pResourceManager, &config, pDataSource); +} + +MA_API ma_result ma_resource_manager_data_source_init_copy(ma_resource_manager* pResourceManager, const ma_resource_manager_data_source* pExistingDataSource, ma_resource_manager_data_source* pDataSource) +{ + ma_result result; + ma_resource_manager_data_source_config config; + + if (pExistingDataSource == NULL) { + return MA_INVALID_ARGS; + } + + config = ma_resource_manager_data_source_config_init(); + config.flags = pExistingDataSource->flags; + + result = ma_resource_manager_data_source_preinit(pResourceManager, &config, pDataSource); + if (result != MA_SUCCESS) { + return result; + } + + /* Copying can only be done from data buffers. Streams cannot be copied. */ + if ((pExistingDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return MA_INVALID_OPERATION; + } + + return ma_resource_manager_data_buffer_init_copy(pResourceManager, &pExistingDataSource->backend.buffer, &pDataSource->backend.buffer); +} + +MA_API ma_result ma_resource_manager_data_source_uninit(ma_resource_manager_data_source* pDataSource) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + /* All we need to is uninitialize the underlying data buffer or data stream. */ + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_uninit(&pDataSource->backend.stream); + } else { + return ma_resource_manager_data_buffer_uninit(&pDataSource->backend.buffer); + } +} + +MA_API ma_result ma_resource_manager_data_source_read_pcm_frames(ma_resource_manager_data_source* pDataSource, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + /* Safety. */ + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_read_pcm_frames(&pDataSource->backend.stream, pFramesOut, frameCount, pFramesRead); + } else { + return ma_resource_manager_data_buffer_read_pcm_frames(&pDataSource->backend.buffer, pFramesOut, frameCount, pFramesRead); + } +} + +MA_API ma_result ma_resource_manager_data_source_seek_to_pcm_frame(ma_resource_manager_data_source* pDataSource, ma_uint64 frameIndex) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_seek_to_pcm_frame(&pDataSource->backend.stream, frameIndex); + } else { + return ma_resource_manager_data_buffer_seek_to_pcm_frame(&pDataSource->backend.buffer, frameIndex); + } +} + +MA_API ma_result ma_resource_manager_data_source_map(ma_resource_manager_data_source* pDataSource, void** ppFramesOut, ma_uint64* pFrameCount) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_map(&pDataSource->backend.stream, ppFramesOut, pFrameCount); + } else { + return MA_NOT_IMPLEMENTED; /* Mapping not supported with data buffers. */ + } +} + +MA_API ma_result ma_resource_manager_data_source_unmap(ma_resource_manager_data_source* pDataSource, ma_uint64 frameCount) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_unmap(&pDataSource->backend.stream, frameCount); + } else { + return MA_NOT_IMPLEMENTED; /* Mapping not supported with data buffers. */ + } +} + +MA_API ma_result ma_resource_manager_data_source_get_data_format(ma_resource_manager_data_source* pDataSource, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_get_data_format(&pDataSource->backend.stream, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); + } else { + return ma_resource_manager_data_buffer_get_data_format(&pDataSource->backend.buffer, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); + } +} + +MA_API ma_result ma_resource_manager_data_source_get_cursor_in_pcm_frames(ma_resource_manager_data_source* pDataSource, ma_uint64* pCursor) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_get_cursor_in_pcm_frames(&pDataSource->backend.stream, pCursor); + } else { + return ma_resource_manager_data_buffer_get_cursor_in_pcm_frames(&pDataSource->backend.buffer, pCursor); + } +} + +MA_API ma_result ma_resource_manager_data_source_get_length_in_pcm_frames(ma_resource_manager_data_source* pDataSource, ma_uint64* pLength) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_get_length_in_pcm_frames(&pDataSource->backend.stream, pLength); + } else { + return ma_resource_manager_data_buffer_get_length_in_pcm_frames(&pDataSource->backend.buffer, pLength); + } +} + +MA_API ma_result ma_resource_manager_data_source_result(const ma_resource_manager_data_source* pDataSource) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_result(&pDataSource->backend.stream); + } else { + return ma_resource_manager_data_buffer_result(&pDataSource->backend.buffer); + } +} + +MA_API ma_result ma_resource_manager_data_source_set_looping(ma_resource_manager_data_source* pDataSource, ma_bool32 isLooping) +{ + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_set_looping(&pDataSource->backend.stream, isLooping); + } else { + return ma_resource_manager_data_buffer_set_looping(&pDataSource->backend.buffer, isLooping); + } +} + +MA_API ma_bool32 ma_resource_manager_data_source_is_looping(const ma_resource_manager_data_source* pDataSource) +{ + if (pDataSource == NULL) { + return MA_FALSE; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_is_looping(&pDataSource->backend.stream); + } else { + return ma_resource_manager_data_buffer_is_looping(&pDataSource->backend.buffer); + } +} + +MA_API ma_result ma_resource_manager_data_source_get_available_frames(ma_resource_manager_data_source* pDataSource, ma_uint64* pAvailableFrames) +{ + if (pAvailableFrames == NULL) { + return MA_INVALID_ARGS; + } + + *pAvailableFrames = 0; + + if (pDataSource == NULL) { + return MA_INVALID_ARGS; + } + + if ((pDataSource->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM) != 0) { + return ma_resource_manager_data_stream_get_available_frames(&pDataSource->backend.stream, pAvailableFrames); + } else { + return ma_resource_manager_data_buffer_get_available_frames(&pDataSource->backend.buffer, pAvailableFrames); + } +} + + +MA_API ma_result ma_resource_manager_post_job(ma_resource_manager* pResourceManager, const ma_job* pJob) +{ + if (pResourceManager == NULL) { + return MA_INVALID_ARGS; + } + + return ma_job_queue_post(&pResourceManager->jobQueue, pJob); +} + +MA_API ma_result ma_resource_manager_post_job_quit(ma_resource_manager* pResourceManager) +{ + ma_job job = ma_job_init(MA_JOB_TYPE_QUIT); + return ma_resource_manager_post_job(pResourceManager, &job); +} + +MA_API ma_result ma_resource_manager_next_job(ma_resource_manager* pResourceManager, ma_job* pJob) +{ + if (pResourceManager == NULL) { + return MA_INVALID_ARGS; + } + + return ma_job_queue_next(&pResourceManager->jobQueue, pJob); +} + + +static ma_result ma_job_process__resource_manager__load_data_buffer_node(ma_job* pJob) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager* pResourceManager; + ma_resource_manager_data_buffer_node* pDataBufferNode; + + MA_ASSERT(pJob != NULL); + + pResourceManager = (ma_resource_manager*)pJob->data.resourceManager.loadDataBufferNode.pResourceManager; + MA_ASSERT(pResourceManager != NULL); + + pDataBufferNode = (ma_resource_manager_data_buffer_node*)pJob->data.resourceManager.loadDataBufferNode.pDataBufferNode; + MA_ASSERT(pDataBufferNode != NULL); + MA_ASSERT(pDataBufferNode->isDataOwnedByResourceManager == MA_TRUE); /* The data should always be owned by the resource manager. */ + + /* The data buffer is not getting deleted, but we may be getting executed out of order. If so, we need to push the job back onto the queue and return. */ + if (pJob->order != ma_atomic_load_32(&pDataBufferNode->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Attempting to execute out of order. Probably interleaved with a MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER job. */ + } + + /* First thing we need to do is check whether or not the data buffer is getting deleted. If so we just abort. */ + if (ma_resource_manager_data_buffer_node_result(pDataBufferNode) != MA_BUSY) { + result = ma_resource_manager_data_buffer_node_result(pDataBufferNode); /* The data buffer may be getting deleted before it's even been loaded. */ + goto done; + } + + /* + We're ready to start loading. Essentially what we're doing here is initializing the data supply + of the node. Once this is complete, data buffers can have their connectors initialized which + will allow then to have audio data read from them. + + Note that when the data supply type has been moved away from "unknown", that is when other threads + will determine that the node is available for data delivery and the data buffer connectors can be + initialized. Therefore, it's important that it is set after the data supply has been initialized. + */ + if ((pJob->data.resourceManager.loadDataBufferNode.flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE) != 0) { + /* + Decoding. This is the complex case because we're not going to be doing the entire decoding + process here. Instead it's going to be split of multiple jobs and loaded in pages. The + reason for this is to evenly distribute decoding time across multiple sounds, rather than + having one huge sound hog all the available processing resources. + + The first thing we do is initialize a decoder. This is allocated on the heap and is passed + around to the paging jobs. When the last paging job has completed it's processing, it'll + free the decoder for us. + + This job does not do any actual decoding. It instead just posts a PAGE_DATA_BUFFER_NODE job + which is where the actual decoding work will be done. However, once this job is complete, + the node will be in a state where data buffer connectors can be initialized. + */ + ma_decoder* pDecoder; /* <-- Free'd on the last page decode. */ + ma_job pageDataBufferNodeJob; + + /* Allocate the decoder by initializing a decoded data supply. */ + result = ma_resource_manager_data_buffer_node_init_supply_decoded(pResourceManager, pDataBufferNode, pJob->data.resourceManager.loadDataBufferNode.pFilePath, pJob->data.resourceManager.loadDataBufferNode.pFilePathW, pJob->data.resourceManager.loadDataBufferNode.flags, &pDecoder); + + /* + Don't ever propagate an MA_BUSY result code or else the resource manager will think the + node is just busy decoding rather than in an error state. This should never happen, but + including this logic for safety just in case. + */ + if (result == MA_BUSY) { + result = MA_ERROR; + } + + if (result != MA_SUCCESS) { + if (pJob->data.resourceManager.loadDataBufferNode.pFilePath != NULL) { + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_WARNING, "Failed to initialize data supply for \"%s\". %s.\n", pJob->data.resourceManager.loadDataBufferNode.pFilePath, ma_result_description(result)); + } else { + #if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(_MSC_VER) + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_WARNING, "Failed to initialize data supply for \"%ls\", %s.\n", pJob->data.resourceManager.loadDataBufferNode.pFilePathW, ma_result_description(result)); + #endif + } + + goto done; + } + + /* + At this point the node's data supply is initialized and other threads can start initializing + their data buffer connectors. However, no data will actually be available until we start to + actually decode it. To do this, we need to post a paging job which is where the decoding + work is done. + + Note that if an error occurred at an earlier point, this section will have been skipped. + */ + pageDataBufferNodeJob = ma_job_init(MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_BUFFER_NODE); + pageDataBufferNodeJob.order = ma_resource_manager_data_buffer_node_next_execution_order(pDataBufferNode); + pageDataBufferNodeJob.data.resourceManager.pageDataBufferNode.pResourceManager = pResourceManager; + pageDataBufferNodeJob.data.resourceManager.pageDataBufferNode.pDataBufferNode = pDataBufferNode; + pageDataBufferNodeJob.data.resourceManager.pageDataBufferNode.pDecoder = pDecoder; + pageDataBufferNodeJob.data.resourceManager.pageDataBufferNode.pDoneNotification = pJob->data.resourceManager.loadDataBufferNode.pDoneNotification; + pageDataBufferNodeJob.data.resourceManager.pageDataBufferNode.pDoneFence = pJob->data.resourceManager.loadDataBufferNode.pDoneFence; + + /* The job has been set up so it can now be posted. */ + result = ma_resource_manager_post_job(pResourceManager, &pageDataBufferNodeJob); + + /* + When we get here, we want to make sure the result code is set to MA_BUSY. The reason for + this is that the result will be copied over to the node's internal result variable. In + this case, since the decoding is still in-progress, we need to make sure the result code + is set to MA_BUSY. + */ + if (result != MA_SUCCESS) { + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_ERROR, "Failed to post MA_JOB_TYPE_RESOURCE_MANAGER_PAGE_DATA_BUFFER_NODE job. %s\n", ma_result_description(result)); + ma_decoder_uninit(pDecoder); + ma_free(pDecoder, &pResourceManager->config.allocationCallbacks); + } else { + result = MA_BUSY; + } + } else { + /* No decoding. This is the simple case. We need only read the file content into memory and we're done. */ + result = ma_resource_manager_data_buffer_node_init_supply_encoded(pResourceManager, pDataBufferNode, pJob->data.resourceManager.loadDataBufferNode.pFilePath, pJob->data.resourceManager.loadDataBufferNode.pFilePathW); + } + + +done: + /* File paths are no longer needed. */ + ma_free(pJob->data.resourceManager.loadDataBufferNode.pFilePath, &pResourceManager->config.allocationCallbacks); + ma_free(pJob->data.resourceManager.loadDataBufferNode.pFilePathW, &pResourceManager->config.allocationCallbacks); + + /* + We need to set the result to at the very end to ensure no other threads try reading the data before we've fully initialized the object. Other threads + are going to be inspecting this variable to determine whether or not they're ready to read data. We can only change the result if it's set to MA_BUSY + because otherwise we may be changing away from an error code which would be bad. An example is if the application creates a data buffer, but then + immediately deletes it before we've got to this point. In this case, pDataBuffer->result will be MA_UNAVAILABLE, and setting it to MA_SUCCESS or any + other error code would cause the buffer to look like it's in a state that it's not. + */ + ma_atomic_compare_and_swap_i32(&pDataBufferNode->result, MA_BUSY, result); + + /* At this point initialization is complete and we can signal the notification if any. */ + if (pJob->data.resourceManager.loadDataBufferNode.pInitNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.loadDataBufferNode.pInitNotification); + } + if (pJob->data.resourceManager.loadDataBufferNode.pInitFence != NULL) { + ma_fence_release(pJob->data.resourceManager.loadDataBufferNode.pInitFence); + } + + /* If we have a success result it means we've fully loaded the buffer. This will happen in the non-decoding case. */ + if (result != MA_BUSY) { + if (pJob->data.resourceManager.loadDataBufferNode.pDoneNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.loadDataBufferNode.pDoneNotification); + } + if (pJob->data.resourceManager.loadDataBufferNode.pDoneFence != NULL) { + ma_fence_release(pJob->data.resourceManager.loadDataBufferNode.pDoneFence); + } + } + + /* Increment the node's execution pointer so that the next jobs can be processed. This is how we keep decoding of pages in-order. */ + ma_atomic_fetch_add_32(&pDataBufferNode->executionPointer, 1); + + /* A busy result should be considered successful from the point of view of the job system. */ + if (result == MA_BUSY) { + result = MA_SUCCESS; + } + + return result; +} + +static ma_result ma_job_process__resource_manager__free_data_buffer_node(ma_job* pJob) +{ + ma_resource_manager* pResourceManager; + ma_resource_manager_data_buffer_node* pDataBufferNode; + + MA_ASSERT(pJob != NULL); + + pResourceManager = (ma_resource_manager*)pJob->data.resourceManager.freeDataBufferNode.pResourceManager; + MA_ASSERT(pResourceManager != NULL); + + pDataBufferNode = (ma_resource_manager_data_buffer_node*)pJob->data.resourceManager.freeDataBufferNode.pDataBufferNode; + MA_ASSERT(pDataBufferNode != NULL); + + if (pJob->order != ma_atomic_load_32(&pDataBufferNode->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Out of order. */ + } + + ma_resource_manager_data_buffer_node_free(pResourceManager, pDataBufferNode); + + /* The event needs to be signalled last. */ + if (pJob->data.resourceManager.freeDataBufferNode.pDoneNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.freeDataBufferNode.pDoneNotification); + } + + if (pJob->data.resourceManager.freeDataBufferNode.pDoneFence != NULL) { + ma_fence_release(pJob->data.resourceManager.freeDataBufferNode.pDoneFence); + } + + ma_atomic_fetch_add_32(&pDataBufferNode->executionPointer, 1); + return MA_SUCCESS; +} + +static ma_result ma_job_process__resource_manager__page_data_buffer_node(ma_job* pJob) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager* pResourceManager; + ma_resource_manager_data_buffer_node* pDataBufferNode; + + MA_ASSERT(pJob != NULL); + + pResourceManager = (ma_resource_manager*)pJob->data.resourceManager.pageDataBufferNode.pResourceManager; + MA_ASSERT(pResourceManager != NULL); + + pDataBufferNode = (ma_resource_manager_data_buffer_node*)pJob->data.resourceManager.pageDataBufferNode.pDataBufferNode; + MA_ASSERT(pDataBufferNode != NULL); + + if (pJob->order != ma_atomic_load_32(&pDataBufferNode->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Out of order. */ + } + + /* Don't do any more decoding if the data buffer has started the uninitialization process. */ + result = ma_resource_manager_data_buffer_node_result(pDataBufferNode); + if (result != MA_BUSY) { + goto done; + } + + /* We're ready to decode the next page. */ + result = ma_resource_manager_data_buffer_node_decode_next_page(pResourceManager, pDataBufferNode, (ma_decoder*)pJob->data.resourceManager.pageDataBufferNode.pDecoder); + + /* + If we have a success code by this point, we want to post another job. We're going to set the + result back to MA_BUSY to make it clear that there's still more to load. + */ + if (result == MA_SUCCESS) { + ma_job newJob; + newJob = *pJob; /* Everything is the same as the input job, except the execution order. */ + newJob.order = ma_resource_manager_data_buffer_node_next_execution_order(pDataBufferNode); /* We need a fresh execution order. */ + + result = ma_resource_manager_post_job(pResourceManager, &newJob); + + /* Since the sound isn't yet fully decoded we want the status to be set to busy. */ + if (result == MA_SUCCESS) { + result = MA_BUSY; + } + } + +done: + /* If there's still more to decode the result will be set to MA_BUSY. Otherwise we can free the decoder. */ + if (result != MA_BUSY) { + ma_decoder_uninit((ma_decoder*)pJob->data.resourceManager.pageDataBufferNode.pDecoder); + ma_free(pJob->data.resourceManager.pageDataBufferNode.pDecoder, &pResourceManager->config.allocationCallbacks); + } + + /* If we reached the end we need to treat it as successful. */ + if (result == MA_AT_END) { + result = MA_SUCCESS; + } + + /* Make sure we set the result of node in case some error occurred. */ + ma_atomic_compare_and_swap_i32(&pDataBufferNode->result, MA_BUSY, result); + + /* Signal the notification after setting the result in case the notification callback wants to inspect the result code. */ + if (result != MA_BUSY) { + if (pJob->data.resourceManager.pageDataBufferNode.pDoneNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.pageDataBufferNode.pDoneNotification); + } + + if (pJob->data.resourceManager.pageDataBufferNode.pDoneFence != NULL) { + ma_fence_release(pJob->data.resourceManager.pageDataBufferNode.pDoneFence); + } + } + + ma_atomic_fetch_add_32(&pDataBufferNode->executionPointer, 1); + return result; +} + + +static ma_result ma_job_process__resource_manager__load_data_buffer(ma_job* pJob) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager* pResourceManager; + ma_resource_manager_data_buffer* pDataBuffer; + ma_resource_manager_data_supply_type dataSupplyType = ma_resource_manager_data_supply_type_unknown; + ma_bool32 isConnectorInitialized = MA_FALSE; + + /* + All we're doing here is checking if the node has finished loading. If not, we just re-post the job + and keep waiting. Otherwise we increment the execution counter and set the buffer's result code. + */ + MA_ASSERT(pJob != NULL); + + pDataBuffer = (ma_resource_manager_data_buffer*)pJob->data.resourceManager.loadDataBuffer.pDataBuffer; + MA_ASSERT(pDataBuffer != NULL); + + pResourceManager = pDataBuffer->pResourceManager; + + if (pJob->order != ma_atomic_load_32(&pDataBuffer->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Attempting to execute out of order. Probably interleaved with a MA_JOB_TYPE_RESOURCE_MANAGER_FREE_DATA_BUFFER job. */ + } + + /* + First thing we need to do is check whether or not the data buffer is getting deleted. If so we + just abort, but making sure we increment the execution pointer. + */ + result = ma_resource_manager_data_buffer_result(pDataBuffer); + if (result != MA_BUSY) { + goto done; /* <-- This will ensure the execution pointer is incremented. */ + } else { + result = MA_SUCCESS; /* <-- Make sure this is reset. */ + (void)result; /* <-- This is to suppress a static analysis diagnostic about "result" not being used. But for safety when I do future maintenance I don't want to delete that assignment. */ + } + + /* Try initializing the connector if we haven't already. */ + isConnectorInitialized = ma_resource_manager_data_buffer_has_connector(pDataBuffer); + if (isConnectorInitialized == MA_FALSE) { + dataSupplyType = ma_resource_manager_data_buffer_node_get_data_supply_type(pDataBuffer->pNode); + + if (dataSupplyType != ma_resource_manager_data_supply_type_unknown) { + /* We can now initialize the connector. If this fails, we need to abort. It's very rare for this to fail. */ + ma_resource_manager_data_source_config dataSourceConfig; /* For setting initial looping state and range. */ + dataSourceConfig = ma_resource_manager_data_source_config_init(); + dataSourceConfig.rangeBegInPCMFrames = pJob->data.resourceManager.loadDataBuffer.rangeBegInPCMFrames; + dataSourceConfig.rangeEndInPCMFrames = pJob->data.resourceManager.loadDataBuffer.rangeEndInPCMFrames; + dataSourceConfig.loopPointBegInPCMFrames = pJob->data.resourceManager.loadDataBuffer.loopPointBegInPCMFrames; + dataSourceConfig.loopPointEndInPCMFrames = pJob->data.resourceManager.loadDataBuffer.loopPointEndInPCMFrames; + dataSourceConfig.isLooping = pJob->data.resourceManager.loadDataBuffer.isLooping; + + result = ma_resource_manager_data_buffer_init_connector(pDataBuffer, &dataSourceConfig, pJob->data.resourceManager.loadDataBuffer.pInitNotification, pJob->data.resourceManager.loadDataBuffer.pInitFence); + if (result != MA_SUCCESS) { + ma_log_postf(ma_resource_manager_get_log(pResourceManager), MA_LOG_LEVEL_ERROR, "Failed to initialize connector for data buffer. %s.\n", ma_result_description(result)); + goto done; + } + } else { + /* Don't have a known data supply type. Most likely the data buffer node is still loading, but it could be that an error occurred. */ + } + } else { + /* The connector is already initialized. Nothing to do here. */ + } + + /* + If the data node is still loading, we need to repost the job and *not* increment the execution + pointer (i.e. we need to not fall through to the "done" label). + + There is a hole between here and the where the data connector is initialized where the data + buffer node may have finished initializing. We need to check for this by checking the result of + the data buffer node and whether or not we had an unknown data supply type at the time of + trying to initialize the data connector. + */ + result = ma_resource_manager_data_buffer_node_result(pDataBuffer->pNode); + if (result == MA_BUSY || (result == MA_SUCCESS && isConnectorInitialized == MA_FALSE && dataSupplyType == ma_resource_manager_data_supply_type_unknown)) { + return ma_resource_manager_post_job(pResourceManager, pJob); + } + +done: + /* Only move away from a busy code so that we don't trash any existing error codes. */ + ma_atomic_compare_and_swap_i32(&pDataBuffer->result, MA_BUSY, result); + + /* Only signal the other threads after the result has been set just for cleanliness sake. */ + if (pJob->data.resourceManager.loadDataBuffer.pDoneNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.loadDataBuffer.pDoneNotification); + } + if (pJob->data.resourceManager.loadDataBuffer.pDoneFence != NULL) { + ma_fence_release(pJob->data.resourceManager.loadDataBuffer.pDoneFence); + } + + /* + If at this point the data buffer has not had it's connector initialized, it means the + notification event was never signalled which means we need to signal it here. + */ + if (ma_resource_manager_data_buffer_has_connector(pDataBuffer) == MA_FALSE && result != MA_SUCCESS) { + if (pJob->data.resourceManager.loadDataBuffer.pInitNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.loadDataBuffer.pInitNotification); + } + if (pJob->data.resourceManager.loadDataBuffer.pInitFence != NULL) { + ma_fence_release(pJob->data.resourceManager.loadDataBuffer.pInitFence); + } + } + + ma_atomic_fetch_add_32(&pDataBuffer->executionPointer, 1); + return result; +} + +static ma_result ma_job_process__resource_manager__free_data_buffer(ma_job* pJob) +{ + ma_resource_manager* pResourceManager; + ma_resource_manager_data_buffer* pDataBuffer; + + MA_ASSERT(pJob != NULL); + + pDataBuffer = (ma_resource_manager_data_buffer*)pJob->data.resourceManager.freeDataBuffer.pDataBuffer; + MA_ASSERT(pDataBuffer != NULL); + + pResourceManager = pDataBuffer->pResourceManager; + + if (pJob->order != ma_atomic_load_32(&pDataBuffer->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Out of order. */ + } + + ma_resource_manager_data_buffer_uninit_internal(pDataBuffer); + + /* The event needs to be signalled last. */ + if (pJob->data.resourceManager.freeDataBuffer.pDoneNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.freeDataBuffer.pDoneNotification); + } + + if (pJob->data.resourceManager.freeDataBuffer.pDoneFence != NULL) { + ma_fence_release(pJob->data.resourceManager.freeDataBuffer.pDoneFence); + } + + ma_atomic_fetch_add_32(&pDataBuffer->executionPointer, 1); + return MA_SUCCESS; +} + +static ma_result ma_job_process__resource_manager__load_data_stream(ma_job* pJob) +{ + ma_result result = MA_SUCCESS; + ma_decoder_config decoderConfig; + ma_uint32 pageBufferSizeInBytes; + ma_resource_manager* pResourceManager; + ma_resource_manager_data_stream* pDataStream; + + MA_ASSERT(pJob != NULL); + + pDataStream = (ma_resource_manager_data_stream*)pJob->data.resourceManager.loadDataStream.pDataStream; + MA_ASSERT(pDataStream != NULL); + + pResourceManager = pDataStream->pResourceManager; + + if (pJob->order != ma_atomic_load_32(&pDataStream->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Out of order. */ + } + + if (ma_resource_manager_data_stream_result(pDataStream) != MA_BUSY) { + result = MA_INVALID_OPERATION; /* Most likely the data stream is being uninitialized. */ + goto done; + } + + /* We need to initialize the decoder first so we can determine the size of the pages. */ + decoderConfig = ma_resource_manager__init_decoder_config(pResourceManager); + + if (pJob->data.resourceManager.loadDataStream.pFilePath != NULL) { + result = ma_decoder_init_vfs(pResourceManager->config.pVFS, pJob->data.resourceManager.loadDataStream.pFilePath, &decoderConfig, &pDataStream->decoder); + } else { + result = ma_decoder_init_vfs_w(pResourceManager->config.pVFS, pJob->data.resourceManager.loadDataStream.pFilePathW, &decoderConfig, &pDataStream->decoder); + } + if (result != MA_SUCCESS) { + goto done; + } + + /* Retrieve the total length of the file before marking the decoder as loaded. */ + if ((pDataStream->flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_UNKNOWN_LENGTH) == 0) { + result = ma_decoder_get_length_in_pcm_frames(&pDataStream->decoder, &pDataStream->totalLengthInPCMFrames); + if (result != MA_SUCCESS) { + goto done; /* Failed to retrieve the length. */ + } + } else { + pDataStream->totalLengthInPCMFrames = 0; + } + + /* + Only mark the decoder as initialized when the length of the decoder has been retrieved because that can possibly require a scan over the whole file + and we don't want to have another thread trying to access the decoder while it's scanning. + */ + pDataStream->isDecoderInitialized = MA_TRUE; + + /* We have the decoder so we can now initialize our page buffer. */ + pageBufferSizeInBytes = ma_resource_manager_data_stream_get_page_size_in_frames(pDataStream) * 2 * ma_get_bytes_per_frame(pDataStream->decoder.outputFormat, pDataStream->decoder.outputChannels); + + pDataStream->pPageData = ma_malloc(pageBufferSizeInBytes, &pResourceManager->config.allocationCallbacks); + if (pDataStream->pPageData == NULL) { + ma_decoder_uninit(&pDataStream->decoder); + result = MA_OUT_OF_MEMORY; + goto done; + } + + /* Seek to our initial seek point before filling the initial pages. */ + ma_decoder_seek_to_pcm_frame(&pDataStream->decoder, pJob->data.resourceManager.loadDataStream.initialSeekPoint); + + /* We have our decoder and our page buffer, so now we need to fill our pages. */ + ma_resource_manager_data_stream_fill_pages(pDataStream); + + /* And now we're done. We want to make sure the result is MA_SUCCESS. */ + result = MA_SUCCESS; + +done: + ma_free(pJob->data.resourceManager.loadDataStream.pFilePath, &pResourceManager->config.allocationCallbacks); + ma_free(pJob->data.resourceManager.loadDataStream.pFilePathW, &pResourceManager->config.allocationCallbacks); + + /* We can only change the status away from MA_BUSY. If it's set to anything else it means an error has occurred somewhere or the uninitialization process has started (most likely). */ + ma_atomic_compare_and_swap_i32(&pDataStream->result, MA_BUSY, result); + + /* Only signal the other threads after the result has been set just for cleanliness sake. */ + if (pJob->data.resourceManager.loadDataStream.pInitNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.loadDataStream.pInitNotification); + } + if (pJob->data.resourceManager.loadDataStream.pInitFence != NULL) { + ma_fence_release(pJob->data.resourceManager.loadDataStream.pInitFence); + } + + ma_atomic_fetch_add_32(&pDataStream->executionPointer, 1); + return result; +} + +static ma_result ma_job_process__resource_manager__free_data_stream(ma_job* pJob) +{ + ma_resource_manager* pResourceManager; + ma_resource_manager_data_stream* pDataStream; + + MA_ASSERT(pJob != NULL); + + pDataStream = (ma_resource_manager_data_stream*)pJob->data.resourceManager.freeDataStream.pDataStream; + MA_ASSERT(pDataStream != NULL); + + pResourceManager = pDataStream->pResourceManager; + + if (pJob->order != ma_atomic_load_32(&pDataStream->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Out of order. */ + } + + /* If our status is not MA_UNAVAILABLE we have a bug somewhere. */ + MA_ASSERT(ma_resource_manager_data_stream_result(pDataStream) == MA_UNAVAILABLE); + + if (pDataStream->isDecoderInitialized) { + ma_decoder_uninit(&pDataStream->decoder); + } + + if (pDataStream->pPageData != NULL) { + ma_free(pDataStream->pPageData, &pResourceManager->config.allocationCallbacks); + pDataStream->pPageData = NULL; /* Just in case... */ + } + + ma_data_source_uninit(&pDataStream->ds); + + /* The event needs to be signalled last. */ + if (pJob->data.resourceManager.freeDataStream.pDoneNotification != NULL) { + ma_async_notification_signal(pJob->data.resourceManager.freeDataStream.pDoneNotification); + } + if (pJob->data.resourceManager.freeDataStream.pDoneFence != NULL) { + ma_fence_release(pJob->data.resourceManager.freeDataStream.pDoneFence); + } + + /*ma_atomic_fetch_add_32(&pDataStream->executionPointer, 1);*/ + return MA_SUCCESS; +} + +static ma_result ma_job_process__resource_manager__page_data_stream(ma_job* pJob) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager* pResourceManager; + ma_resource_manager_data_stream* pDataStream; + + MA_ASSERT(pJob != NULL); + + pDataStream = (ma_resource_manager_data_stream*)pJob->data.resourceManager.pageDataStream.pDataStream; + MA_ASSERT(pDataStream != NULL); + + pResourceManager = pDataStream->pResourceManager; + + if (pJob->order != ma_atomic_load_32(&pDataStream->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Out of order. */ + } + + /* For streams, the status should be MA_SUCCESS. */ + if (ma_resource_manager_data_stream_result(pDataStream) != MA_SUCCESS) { + result = MA_INVALID_OPERATION; + goto done; + } + + ma_resource_manager_data_stream_fill_page(pDataStream, pJob->data.resourceManager.pageDataStream.pageIndex); + +done: + ma_atomic_fetch_add_32(&pDataStream->executionPointer, 1); + return result; +} + +static ma_result ma_job_process__resource_manager__seek_data_stream(ma_job* pJob) +{ + ma_result result = MA_SUCCESS; + ma_resource_manager* pResourceManager; + ma_resource_manager_data_stream* pDataStream; + + MA_ASSERT(pJob != NULL); + + pDataStream = (ma_resource_manager_data_stream*)pJob->data.resourceManager.seekDataStream.pDataStream; + MA_ASSERT(pDataStream != NULL); + + pResourceManager = pDataStream->pResourceManager; + + if (pJob->order != ma_atomic_load_32(&pDataStream->executionPointer)) { + return ma_resource_manager_post_job(pResourceManager, pJob); /* Out of order. */ + } + + /* For streams the status should be MA_SUCCESS for this to do anything. */ + if (ma_resource_manager_data_stream_result(pDataStream) != MA_SUCCESS || pDataStream->isDecoderInitialized == MA_FALSE) { + result = MA_INVALID_OPERATION; + goto done; + } + + /* + With seeking we just assume both pages are invalid and the relative frame cursor at position 0. This is basically exactly the same as loading, except + instead of initializing the decoder, we seek to a frame. + */ + ma_decoder_seek_to_pcm_frame(&pDataStream->decoder, pJob->data.resourceManager.seekDataStream.frameIndex); + + /* After seeking we'll need to reload the pages. */ + ma_resource_manager_data_stream_fill_pages(pDataStream); + + /* We need to let the public API know that we're done seeking. */ + ma_atomic_fetch_sub_32(&pDataStream->seekCounter, 1); + +done: + ma_atomic_fetch_add_32(&pDataStream->executionPointer, 1); + return result; +} + +MA_API ma_result ma_resource_manager_process_job(ma_resource_manager* pResourceManager, ma_job* pJob) +{ + if (pResourceManager == NULL || pJob == NULL) { + return MA_INVALID_ARGS; + } + + return ma_job_process(pJob); +} + +MA_API ma_result ma_resource_manager_process_next_job(ma_resource_manager* pResourceManager) +{ + ma_result result; + ma_job job; + + if (pResourceManager == NULL) { + return MA_INVALID_ARGS; + } + + /* This will return MA_CANCELLED if the next job is a quit job. */ + result = ma_resource_manager_next_job(pResourceManager, &job); + if (result != MA_SUCCESS) { + return result; + } + + return ma_job_process(&job); +} +#else +/* We'll get here if the resource manager is being excluded from the build. We need to define the job processing callbacks as no-ops. */ +static ma_result ma_job_process__resource_manager__load_data_buffer_node(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__free_data_buffer_node(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__page_data_buffer_node(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__load_data_buffer(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__free_data_buffer(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__load_data_stream(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__free_data_stream(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__page_data_stream(ma_job* pJob) { return ma_job_process__noop(pJob); } +static ma_result ma_job_process__resource_manager__seek_data_stream(ma_job* pJob) { return ma_job_process__noop(pJob); } +#endif /* MA_NO_RESOURCE_MANAGER */ + + +#ifndef MA_NO_NODE_GRAPH + +static ma_stack* ma_stack_init(size_t sizeInBytes, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_stack* pStack; + + if (sizeInBytes == 0) { + return NULL; + } + + pStack = (ma_stack*)ma_malloc(sizeof(*pStack) - sizeof(pStack->_data) + sizeInBytes, pAllocationCallbacks); + if (pStack == NULL) { + return NULL; + } + + pStack->offset = 0; + pStack->sizeInBytes = sizeInBytes; + + return pStack; +} + +static void ma_stack_uninit(ma_stack* pStack, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pStack == NULL) { + return; + } + + ma_free(pStack, pAllocationCallbacks); +} + +static void* ma_stack_alloc(ma_stack* pStack, size_t sz) +{ + /* The size of the allocation is stored in the memory directly before the pointer. This needs to include padding to keep it aligned to ma_uintptr */ + void* p = (void*)((char*)pStack->_data + pStack->offset); + size_t* pSize = (size_t*)p; + + sz = (sz + (sizeof(ma_uintptr) - 1)) & ~(sizeof(ma_uintptr) - 1); /* Padding. */ + if (pStack->offset + sz + sizeof(size_t) > pStack->sizeInBytes) { + return NULL; /* Out of memory. */ + } + + pStack->offset += sz + sizeof(size_t); + + *pSize = sz; + return (void*)((char*)p + sizeof(size_t)); +} + +static void ma_stack_free(ma_stack* pStack, void* p) +{ + size_t* pSize; + + if (p == NULL) { + return; + } + + pSize = (size_t*)p - 1; + pStack->offset -= *pSize + sizeof(size_t); +} + + + +/* 10ms @ 48K = 480. Must never exceed 65535. */ +#ifndef MA_DEFAULT_NODE_CACHE_CAP_IN_FRAMES_PER_BUS +#define MA_DEFAULT_NODE_CACHE_CAP_IN_FRAMES_PER_BUS 480 +#endif + +#ifndef MA_DEFAULT_PREMIX_STACK_SIZE_PER_CHANNEL +#define MA_DEFAULT_PREMIX_STACK_SIZE_PER_CHANNEL 524288 +#endif + +static ma_result ma_node_read_pcm_frames(ma_node* pNode, ma_uint32 outputBusIndex, float* pFramesOut, ma_uint32 frameCount, ma_uint32* pFramesRead, ma_uint64 globalTime); + +MA_API void ma_debug_fill_pcm_frames_with_sine_wave(float* pFramesOut, ma_uint32 frameCount, ma_format format, ma_uint32 channels, ma_uint32 sampleRate) +{ + #ifndef MA_NO_GENERATION + { + ma_waveform_config waveformConfig; + ma_waveform waveform; + + waveformConfig = ma_waveform_config_init(format, channels, sampleRate, ma_waveform_type_sine, 1.0, 400); + ma_waveform_init(&waveformConfig, &waveform); + ma_waveform_read_pcm_frames(&waveform, pFramesOut, frameCount, NULL); + } + #else + { + (void)pFramesOut; + (void)frameCount; + (void)format; + (void)channels; + (void)sampleRate; + #if defined(MA_DEBUG_OUTPUT) + { + #if _MSC_VER + #pragma message ("ma_debug_fill_pcm_frames_with_sine_wave() will do nothing because MA_NO_GENERATION is enabled.") + #endif + } + #endif + } + #endif +} + + + +MA_API ma_node_graph_config ma_node_graph_config_init(ma_uint32 channels) +{ + ma_node_graph_config config; + + MA_ZERO_OBJECT(&config); + config.channels = channels; + config.processingSizeInFrames = 0; + + return config; +} + + +static void ma_node_graph_set_is_reading(ma_node_graph* pNodeGraph, ma_bool32 isReading) +{ + MA_ASSERT(pNodeGraph != NULL); + ma_atomic_exchange_32(&pNodeGraph->isReading, isReading); +} + +#if 0 +static ma_bool32 ma_node_graph_is_reading(ma_node_graph* pNodeGraph) +{ + MA_ASSERT(pNodeGraph != NULL); + return ma_atomic_load_32(&pNodeGraph->isReading); +} +#endif + + +static void ma_node_graph_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_node_graph* pNodeGraph = (ma_node_graph*)pNode; + ma_uint64 framesRead; + + ma_node_graph_read_pcm_frames(pNodeGraph, ppFramesOut[0], *pFrameCountOut, &framesRead); + + *pFrameCountOut = (ma_uint32)framesRead; /* Safe cast. */ + + (void)ppFramesIn; + (void)pFrameCountIn; +} + +static ma_node_vtable g_node_graph_node_vtable = +{ + ma_node_graph_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 0, /* 0 input buses. */ + 1, /* 1 output bus. */ + 0 /* Flags. */ +}; + +static void ma_node_graph_endpoint_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + MA_ASSERT(pNode != NULL); + MA_ASSERT(ma_node_get_input_bus_count(pNode) == 1); + MA_ASSERT(ma_node_get_output_bus_count(pNode) == 1); + + /* Input channel count needs to be the same as the output channel count. */ + MA_ASSERT(ma_node_get_input_channels(pNode, 0) == ma_node_get_output_channels(pNode, 0)); + + /* We don't need to do anything here because it's a passthrough. */ + (void)pNode; + (void)ppFramesIn; + (void)pFrameCountIn; + (void)ppFramesOut; + (void)pFrameCountOut; + +#if 0 + /* The data has already been mixed. We just need to move it to the output buffer. */ + if (ppFramesIn != NULL) { + ma_copy_pcm_frames(ppFramesOut[0], ppFramesIn[0], *pFrameCountOut, ma_format_f32, ma_node_get_output_channels(pNode, 0)); + } +#endif +} + +static ma_node_vtable g_node_graph_endpoint_vtable = +{ + ma_node_graph_endpoint_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* 1 input bus. */ + 1, /* 1 output bus. */ + MA_NODE_FLAG_PASSTHROUGH /* Flags. The endpoint is a passthrough. */ +}; + +MA_API ma_result ma_node_graph_init(const ma_node_graph_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_node_graph* pNodeGraph) +{ + ma_result result; + ma_node_config baseConfig; + ma_node_config endpointConfig; + + if (pNodeGraph == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNodeGraph); + pNodeGraph->processingSizeInFrames = pConfig->processingSizeInFrames; + + /* Base node so we can use the node graph as a node into another graph. */ + baseConfig = ma_node_config_init(); + baseConfig.vtable = &g_node_graph_node_vtable; + baseConfig.pOutputChannels = &pConfig->channels; + + result = ma_node_init(pNodeGraph, &baseConfig, pAllocationCallbacks, &pNodeGraph->base); + if (result != MA_SUCCESS) { + return result; + } + + + /* Endpoint. */ + endpointConfig = ma_node_config_init(); + endpointConfig.vtable = &g_node_graph_endpoint_vtable; + endpointConfig.pInputChannels = &pConfig->channels; + endpointConfig.pOutputChannels = &pConfig->channels; + + result = ma_node_init(pNodeGraph, &endpointConfig, pAllocationCallbacks, &pNodeGraph->endpoint); + if (result != MA_SUCCESS) { + ma_node_uninit(&pNodeGraph->base, pAllocationCallbacks); + return result; + } + + + /* Processing cache. */ + if (pConfig->processingSizeInFrames > 0) { + pNodeGraph->pProcessingCache = (float*)ma_malloc(pConfig->processingSizeInFrames * pConfig->channels * sizeof(float), pAllocationCallbacks); + if (pNodeGraph->pProcessingCache == NULL) { + ma_node_uninit(&pNodeGraph->endpoint, pAllocationCallbacks); + ma_node_uninit(&pNodeGraph->base, pAllocationCallbacks); + return MA_OUT_OF_MEMORY; + } + } + + + /* + We need a pre-mix stack. The size of this stack is configurable via the config. The default value depends on the channel count. + */ + { + size_t preMixStackSizeInBytes = pConfig->preMixStackSizeInBytes; + if (preMixStackSizeInBytes == 0) { + preMixStackSizeInBytes = pConfig->channels * MA_DEFAULT_PREMIX_STACK_SIZE_PER_CHANNEL; + } + + pNodeGraph->pPreMixStack = ma_stack_init(preMixStackSizeInBytes, pAllocationCallbacks); + if (pNodeGraph->pPreMixStack == NULL) { + ma_node_uninit(&pNodeGraph->endpoint, pAllocationCallbacks); + ma_node_uninit(&pNodeGraph->base, pAllocationCallbacks); + if (pNodeGraph->pProcessingCache != NULL) { + ma_free(pNodeGraph->pProcessingCache, pAllocationCallbacks); + } + + return MA_OUT_OF_MEMORY; + } + } + + + return MA_SUCCESS; +} + +MA_API void ma_node_graph_uninit(ma_node_graph* pNodeGraph, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pNodeGraph == NULL) { + return; + } + + ma_node_uninit(&pNodeGraph->endpoint, pAllocationCallbacks); + ma_node_uninit(&pNodeGraph->base, pAllocationCallbacks); + + if (pNodeGraph->pProcessingCache != NULL) { + ma_free(pNodeGraph->pProcessingCache, pAllocationCallbacks); + pNodeGraph->pProcessingCache = NULL; + } + + if (pNodeGraph->pPreMixStack != NULL) { + ma_stack_uninit(pNodeGraph->pPreMixStack, pAllocationCallbacks); + pNodeGraph->pPreMixStack = NULL; + } +} + +MA_API ma_node* ma_node_graph_get_endpoint(ma_node_graph* pNodeGraph) +{ + if (pNodeGraph == NULL) { + return NULL; + } + + return &pNodeGraph->endpoint; +} + +MA_API ma_result ma_node_graph_read_pcm_frames(ma_node_graph* pNodeGraph, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_result result = MA_SUCCESS; + ma_uint64 totalFramesRead; + ma_uint32 channels; + + if (pFramesRead != NULL) { + *pFramesRead = 0; /* Safety. */ + } + + if (pNodeGraph == NULL) { + return MA_INVALID_ARGS; + } + + channels = ma_node_get_output_channels(&pNodeGraph->endpoint, 0); + + + /* We'll be nice and try to do a full read of all frameCount frames. */ + totalFramesRead = 0; + while (totalFramesRead < frameCount) { + ma_uint32 framesJustRead; + ma_uint64 framesToRead; + float* pRunningFramesOut; + + framesToRead = frameCount - totalFramesRead; + if (framesToRead > 0xFFFFFFFF) { + framesToRead = 0xFFFFFFFF; + } + + pRunningFramesOut = (float*)ma_offset_pcm_frames_ptr(pFramesOut, totalFramesRead, ma_format_f32, channels); + + /* If there's anything in the cache, consume that first. */ + if (pNodeGraph->processingCacheFramesRemaining > 0) { + ma_uint32 framesToReadFromCache; + + framesToReadFromCache = (ma_uint32)framesToRead; + if (framesToReadFromCache > pNodeGraph->processingCacheFramesRemaining) { + framesToReadFromCache = pNodeGraph->processingCacheFramesRemaining; + } + + MA_COPY_MEMORY(pRunningFramesOut, pNodeGraph->pProcessingCache, framesToReadFromCache * channels * sizeof(float)); + MA_MOVE_MEMORY(pNodeGraph->pProcessingCache, pNodeGraph->pProcessingCache + (framesToReadFromCache * channels), (pNodeGraph->processingCacheFramesRemaining - framesToReadFromCache) * channels * sizeof(float)); + pNodeGraph->processingCacheFramesRemaining -= framesToReadFromCache; + + totalFramesRead += framesToReadFromCache; + continue; + } else { + /* + If processingSizeInFrames is non-zero, we need to make sure we always read in chunks of that size. If the frame count is less than + that, we need to read into the cache and then continue on. + */ + float* pReadDst = pRunningFramesOut; + + if (pNodeGraph->processingSizeInFrames > 0) { + if (framesToRead < pNodeGraph->processingSizeInFrames) { + pReadDst = pNodeGraph->pProcessingCache; /* We need to read into the cache because otherwise we'll overflow the output buffer. */ + } + + framesToRead = pNodeGraph->processingSizeInFrames; + } + + ma_node_graph_set_is_reading(pNodeGraph, MA_TRUE); + { + result = ma_node_read_pcm_frames(&pNodeGraph->endpoint, 0, pReadDst, (ma_uint32)framesToRead, &framesJustRead, ma_node_get_time(&pNodeGraph->endpoint)); + } + ma_node_graph_set_is_reading(pNodeGraph, MA_FALSE); + + /* + Do not increment the total frames read counter if we read into the cache. We use this to determine how many frames have + been written to the final output buffer. + */ + if (pReadDst == pNodeGraph->pProcessingCache) { + /* We read into the cache. */ + pNodeGraph->processingCacheFramesRemaining = framesJustRead; + } else { + /* We read straight into the output buffer. */ + totalFramesRead += framesJustRead; + } + + if (result != MA_SUCCESS) { + break; + } + + /* Abort if we weren't able to read any frames or else we risk getting stuck in a loop. */ + if (framesJustRead == 0) { + break; + } + } + } + + /* Let's go ahead and silence any leftover frames just for some added safety to ensure the caller doesn't try emitting garbage out of the speakers. */ + if (totalFramesRead < frameCount) { + ma_silence_pcm_frames(ma_offset_pcm_frames_ptr(pFramesOut, totalFramesRead, ma_format_f32, channels), (frameCount - totalFramesRead), ma_format_f32, channels); + } + + if (pFramesRead != NULL) { + *pFramesRead = totalFramesRead; + } + + return result; +} + +MA_API ma_uint32 ma_node_graph_get_channels(const ma_node_graph* pNodeGraph) +{ + if (pNodeGraph == NULL) { + return 0; + } + + return ma_node_get_output_channels(&pNodeGraph->endpoint, 0); +} + +MA_API ma_uint64 ma_node_graph_get_time(const ma_node_graph* pNodeGraph) +{ + if (pNodeGraph == NULL) { + return 0; + } + + return ma_node_get_time(&pNodeGraph->endpoint); /* Global time is just the local time of the endpoint. */ +} + +MA_API ma_result ma_node_graph_set_time(ma_node_graph* pNodeGraph, ma_uint64 globalTime) +{ + if (pNodeGraph == NULL) { + return MA_INVALID_ARGS; + } + + return ma_node_set_time(&pNodeGraph->endpoint, globalTime); /* Global time is just the local time of the endpoint. */ +} + + +#define MA_NODE_OUTPUT_BUS_FLAG_HAS_READ 0x01 /* Whether or not this bus ready to read more data. Only used on nodes with multiple output buses. */ + +static ma_result ma_node_output_bus_init(ma_node* pNode, ma_uint32 outputBusIndex, ma_uint32 channels, ma_node_output_bus* pOutputBus) +{ + MA_ASSERT(pOutputBus != NULL); + MA_ASSERT(outputBusIndex < MA_MAX_NODE_BUS_COUNT); + MA_ASSERT(outputBusIndex < ma_node_get_output_bus_count(pNode)); + MA_ASSERT(channels < 256); + + MA_ZERO_OBJECT(pOutputBus); + + if (channels == 0) { + return MA_INVALID_ARGS; + } + + pOutputBus->pNode = pNode; + pOutputBus->outputBusIndex = (ma_uint8)outputBusIndex; + pOutputBus->channels = (ma_uint8)channels; + pOutputBus->flags = MA_NODE_OUTPUT_BUS_FLAG_HAS_READ; /* <-- Important that this flag is set by default. */ + pOutputBus->volume = 1; + + return MA_SUCCESS; +} + +static void ma_node_output_bus_lock(ma_node_output_bus* pOutputBus) +{ + ma_spinlock_lock(&pOutputBus->lock); +} + +static void ma_node_output_bus_unlock(ma_node_output_bus* pOutputBus) +{ + ma_spinlock_unlock(&pOutputBus->lock); +} + + +static ma_uint32 ma_node_output_bus_get_channels(const ma_node_output_bus* pOutputBus) +{ + return pOutputBus->channels; +} + + +static void ma_node_output_bus_set_has_read(ma_node_output_bus* pOutputBus, ma_bool32 hasRead) +{ + if (hasRead) { + ma_atomic_fetch_or_32(&pOutputBus->flags, MA_NODE_OUTPUT_BUS_FLAG_HAS_READ); + } else { + ma_atomic_fetch_and_32(&pOutputBus->flags, (ma_uint32)~MA_NODE_OUTPUT_BUS_FLAG_HAS_READ); + } +} + +static ma_bool32 ma_node_output_bus_has_read(ma_node_output_bus* pOutputBus) +{ + return (ma_atomic_load_32(&pOutputBus->flags) & MA_NODE_OUTPUT_BUS_FLAG_HAS_READ) != 0; +} + + +static void ma_node_output_bus_set_is_attached(ma_node_output_bus* pOutputBus, ma_bool32 isAttached) +{ + ma_atomic_exchange_32(&pOutputBus->isAttached, isAttached); +} + +static ma_bool32 ma_node_output_bus_is_attached(ma_node_output_bus* pOutputBus) +{ + return ma_atomic_load_32(&pOutputBus->isAttached); +} + + +static ma_result ma_node_output_bus_set_volume(ma_node_output_bus* pOutputBus, float volume) +{ + MA_ASSERT(pOutputBus != NULL); + + if (volume < 0.0f) { + volume = 0.0f; + } + + ma_atomic_exchange_f32(&pOutputBus->volume, volume); + + return MA_SUCCESS; +} + +static float ma_node_output_bus_get_volume(const ma_node_output_bus* pOutputBus) +{ + return ma_atomic_load_f32((float*)&pOutputBus->volume); +} + + +static ma_result ma_node_input_bus_init(ma_uint32 channels, ma_node_input_bus* pInputBus) +{ + MA_ASSERT(pInputBus != NULL); + MA_ASSERT(channels < 256); + + MA_ZERO_OBJECT(pInputBus); + + if (channels == 0) { + return MA_INVALID_ARGS; + } + + pInputBus->channels = (ma_uint8)channels; + + return MA_SUCCESS; +} + +static void ma_node_input_bus_lock(ma_node_input_bus* pInputBus) +{ + MA_ASSERT(pInputBus != NULL); + + ma_spinlock_lock(&pInputBus->lock); +} + +static void ma_node_input_bus_unlock(ma_node_input_bus* pInputBus) +{ + MA_ASSERT(pInputBus != NULL); + + ma_spinlock_unlock(&pInputBus->lock); +} + + +static void ma_node_input_bus_next_begin(ma_node_input_bus* pInputBus) +{ + ma_atomic_fetch_add_32(&pInputBus->nextCounter, 1); +} + +static void ma_node_input_bus_next_end(ma_node_input_bus* pInputBus) +{ + ma_atomic_fetch_sub_32(&pInputBus->nextCounter, 1); +} + +static ma_uint32 ma_node_input_bus_get_next_counter(ma_node_input_bus* pInputBus) +{ + return ma_atomic_load_32(&pInputBus->nextCounter); +} + + +static ma_uint32 ma_node_input_bus_get_channels(const ma_node_input_bus* pInputBus) +{ + return pInputBus->channels; +} + + +static void ma_node_input_bus_detach__no_output_bus_lock(ma_node_input_bus* pInputBus, ma_node_output_bus* pOutputBus) +{ + MA_ASSERT(pInputBus != NULL); + MA_ASSERT(pOutputBus != NULL); + + /* + Mark the output bus as detached first. This will prevent future iterations on the audio thread + from iterating this output bus. + */ + ma_node_output_bus_set_is_attached(pOutputBus, MA_FALSE); + + /* + We cannot use the output bus lock here since it'll be getting used at a higher level, but we do + still need to use the input bus lock since we'll be updating pointers on two different output + buses. The same rules apply here as the attaching case. Although we're using a lock here, we're + *not* using a lock when iterating over the list in the audio thread. We therefore need to craft + this in a way such that the iteration on the audio thread doesn't break. + + The first thing to do is swap out the "next" pointer of the previous output bus with the + new "next" output bus. This is the operation that matters for iteration on the audio thread. + After that, the previous pointer on the new "next" pointer needs to be updated, after which + point the linked list will be in a good state. + */ + ma_node_input_bus_lock(pInputBus); + { + ma_node_output_bus* pOldPrev = (ma_node_output_bus*)ma_atomic_load_ptr(&pOutputBus->pPrev); + ma_node_output_bus* pOldNext = (ma_node_output_bus*)ma_atomic_load_ptr(&pOutputBus->pNext); + + if (pOldPrev != NULL) { + ma_atomic_exchange_ptr(&pOldPrev->pNext, pOldNext); /* <-- This is where the output bus is detached from the list. */ + } + if (pOldNext != NULL) { + ma_atomic_exchange_ptr(&pOldNext->pPrev, pOldPrev); /* <-- This is required for detachment. */ + } + } + ma_node_input_bus_unlock(pInputBus); + + /* At this point the output bus is detached and the linked list is completely unaware of it. Reset some data for safety. */ + ma_atomic_exchange_ptr(&pOutputBus->pNext, NULL); /* Using atomic exchanges here, mainly for the benefit of analysis tools which don't always recognize spinlocks. */ + ma_atomic_exchange_ptr(&pOutputBus->pPrev, NULL); /* As above. */ + pOutputBus->pInputNode = NULL; + pOutputBus->inputNodeInputBusIndex = 0; + + + /* + For thread-safety reasons, we don't want to be returning from this straight away. We need to + wait for the audio thread to finish with the output bus. There's two things we need to wait + for. The first is the part that selects the next output bus in the list, and the other is the + part that reads from the output bus. Basically all we're doing is waiting for the input bus + to stop referencing the output bus. + + We're doing this part last because we want the section above to run while the audio thread + is finishing up with the output bus, just for efficiency reasons. We marked the output bus as + detached right at the top of this function which is going to prevent the audio thread from + iterating the output bus again. + */ + + /* Part 1: Wait for the current iteration to complete. */ + while (ma_node_input_bus_get_next_counter(pInputBus) > 0) { + ma_yield(); + } + + /* Part 2: Wait for any reads to complete. */ + while (ma_atomic_load_32(&pOutputBus->refCount) > 0) { + ma_yield(); + } + + /* + At this point we're done detaching and we can be guaranteed that the audio thread is not going + to attempt to reference this output bus again (until attached again). + */ +} + +#if 0 /* Not used at the moment, but leaving here in case I need it later. */ +static void ma_node_input_bus_detach(ma_node_input_bus* pInputBus, ma_node_output_bus* pOutputBus) +{ + MA_ASSERT(pInputBus != NULL); + MA_ASSERT(pOutputBus != NULL); + + ma_node_output_bus_lock(pOutputBus); + { + ma_node_input_bus_detach__no_output_bus_lock(pInputBus, pOutputBus); + } + ma_node_output_bus_unlock(pOutputBus); +} +#endif + +static void ma_node_input_bus_attach(ma_node_input_bus* pInputBus, ma_node_output_bus* pOutputBus, ma_node* pNewInputNode, ma_uint32 inputNodeInputBusIndex) +{ + MA_ASSERT(pInputBus != NULL); + MA_ASSERT(pOutputBus != NULL); + + ma_node_output_bus_lock(pOutputBus); + { + ma_node_output_bus* pOldInputNode = (ma_node_output_bus*)ma_atomic_load_ptr(&pOutputBus->pInputNode); + + /* Detach from any existing attachment first if necessary. */ + if (pOldInputNode != NULL) { + ma_node_input_bus_detach__no_output_bus_lock(pInputBus, pOutputBus); + } + + /* + At this point we can be sure the output bus is not attached to anything. The linked list in the + old input bus has been updated so that pOutputBus will not get iterated again. + */ + pOutputBus->pInputNode = pNewInputNode; /* No need for an atomic assignment here because modification of this variable always happens within a lock. */ + pOutputBus->inputNodeInputBusIndex = (ma_uint8)inputNodeInputBusIndex; + + /* + Now we need to attach the output bus to the linked list. This involves updating two pointers on + two different output buses so I'm going to go ahead and keep this simple and just use a lock. + There are ways to do this without a lock, but it's just too hard to maintain for its value. + + Although we're locking here, it's important to remember that we're *not* locking when iterating + and reading audio data since that'll be running on the audio thread. As a result we need to be + careful how we craft this so that we don't break iteration. What we're going to do is always + attach the new item so that it becomes the first item in the list. That way, as we're iterating + we won't break any links in the list and iteration will continue safely. The detaching case will + also be crafted in a way as to not break list iteration. It's important to remember to use + atomic exchanges here since no locking is happening on the audio thread during iteration. + */ + ma_node_input_bus_lock(pInputBus); + { + ma_node_output_bus* pNewPrev = &pInputBus->head; + ma_node_output_bus* pNewNext = (ma_node_output_bus*)ma_atomic_load_ptr(&pInputBus->head.pNext); + + /* Update the local output bus. */ + ma_atomic_exchange_ptr(&pOutputBus->pPrev, pNewPrev); + ma_atomic_exchange_ptr(&pOutputBus->pNext, pNewNext); + + /* Update the other output buses to point back to the local output bus. */ + ma_atomic_exchange_ptr(&pInputBus->head.pNext, pOutputBus); /* <-- This is where the output bus is actually attached to the input bus. */ + + /* Do the previous pointer last. This is only used for detachment. */ + if (pNewNext != NULL) { + ma_atomic_exchange_ptr(&pNewNext->pPrev, pOutputBus); + } + } + ma_node_input_bus_unlock(pInputBus); + + /* + Mark the node as attached last. This is used to controlling whether or the output bus will be + iterated on the audio thread. Mainly required for detachment purposes. + */ + ma_node_output_bus_set_is_attached(pOutputBus, MA_TRUE); + } + ma_node_output_bus_unlock(pOutputBus); +} + +static ma_node_output_bus* ma_node_input_bus_next(ma_node_input_bus* pInputBus, ma_node_output_bus* pOutputBus) +{ + ma_node_output_bus* pNext; + + MA_ASSERT(pInputBus != NULL); + + if (pOutputBus == NULL) { + return NULL; + } + + ma_node_input_bus_next_begin(pInputBus); + { + pNext = pOutputBus; + for (;;) { + pNext = (ma_node_output_bus*)ma_atomic_load_ptr(&pNext->pNext); + if (pNext == NULL) { + break; /* Reached the end. */ + } + + if (ma_node_output_bus_is_attached(pNext) == MA_FALSE) { + continue; /* The node is not attached. Keep checking. */ + } + + /* The next node has been selected. */ + break; + } + + /* We need to increment the reference count of the selected node. */ + if (pNext != NULL) { + ma_atomic_fetch_add_32(&pNext->refCount, 1); + } + + /* The previous node is no longer being referenced. */ + ma_atomic_fetch_sub_32(&pOutputBus->refCount, 1); + } + ma_node_input_bus_next_end(pInputBus); + + return pNext; +} + +static ma_node_output_bus* ma_node_input_bus_first(ma_node_input_bus* pInputBus) +{ + return ma_node_input_bus_next(pInputBus, &pInputBus->head); +} + + + +static ma_result ma_node_input_bus_read_pcm_frames(ma_node* pInputNode, ma_node_input_bus* pInputBus, float* pFramesOut, ma_uint32 frameCount, ma_uint32* pFramesRead, ma_uint64 globalTime) +{ + ma_result result = MA_SUCCESS; + ma_node_output_bus* pOutputBus; + ma_node_output_bus* pFirst; + ma_uint32 inputChannels; + ma_bool32 doesOutputBufferHaveContent = MA_FALSE; + + /* + This will be called from the audio thread which means we can't be doing any locking. Basically, + this function will not perform any locking, whereas attaching and detaching will, but crafted in + such a way that we don't need to perform any locking here. The important thing to remember is + to always iterate in a forward direction. + + In order to process any data we need to first read from all input buses. That's where this + function comes in. This iterates over each of the attachments and accumulates/mixes them. We + also convert the channels to the nodes output channel count before mixing. We want to do this + channel conversion so that the caller of this function can invoke the processing callback + without having to do it themselves. + + When we iterate over each of the attachments on the input bus, we need to read as much data as + we can from each of them so that we don't end up with holes between each of the attachments. To + do this, we need to read from each attachment in a loop and read as many frames as we can, up + to `frameCount`. + */ + MA_ASSERT(pInputNode != NULL); + MA_ASSERT(pFramesRead != NULL); /* pFramesRead is critical and must always be specified. On input it's undefined and on output it'll be set to the number of frames actually read. */ + + *pFramesRead = 0; /* Safety. */ + + inputChannels = ma_node_input_bus_get_channels(pInputBus); + + /* + We need to be careful with how we call ma_node_input_bus_first() and ma_node_input_bus_next(). They + are both critical to our lock-free thread-safety system. We can only call ma_node_input_bus_first() + once per iteration, however we have an optimization to checks whether or not it's the first item in + the list. We therefore need to store a pointer to the first item rather than repeatedly calling + ma_node_input_bus_first(). It's safe to keep hold of this pointer, so long as we don't dereference it + after calling ma_node_input_bus_next(), which we won't be. + */ + pFirst = ma_node_input_bus_first(pInputBus); + if (pFirst == NULL) { + return MA_SUCCESS; /* No attachments. Read nothing. */ + } + + for (pOutputBus = pFirst; pOutputBus != NULL; pOutputBus = ma_node_input_bus_next(pInputBus, pOutputBus)) { + ma_uint32 framesProcessed = 0; + ma_bool32 isSilentOutput = MA_FALSE; + + MA_ASSERT(pOutputBus->pNode != NULL); + MA_ASSERT(((ma_node_base*)pOutputBus->pNode)->vtable != NULL); + + isSilentOutput = (((ma_node_base*)pOutputBus->pNode)->vtable->flags & MA_NODE_FLAG_SILENT_OUTPUT) != 0; + + if (pFramesOut != NULL) { + /* Read. */ + while (framesProcessed < frameCount) { + float* pRunningFramesOut; + ma_uint32 framesToRead; + ma_uint32 framesJustRead = 0; + + framesToRead = frameCount - framesProcessed; + pRunningFramesOut = ma_offset_pcm_frames_ptr_f32(pFramesOut, framesProcessed, inputChannels); + + if (doesOutputBufferHaveContent == MA_FALSE) { + /* Fast path. First attachment. We just read straight into the output buffer (no mixing required). */ + result = ma_node_read_pcm_frames(pOutputBus->pNode, pOutputBus->outputBusIndex, pRunningFramesOut, framesToRead, &framesJustRead, globalTime + framesProcessed); + } else { + /* Slow path. Not the first attachment. Mixing required. */ + ma_uint32 preMixBufferCapInFrames = ((ma_node_base*)pInputNode)->cachedDataCapInFramesPerBus; + float* pPreMixBuffer = (float*)ma_stack_alloc(((ma_node_base*)pInputNode)->pNodeGraph->pPreMixStack, preMixBufferCapInFrames * inputChannels * sizeof(float)); + + if (pPreMixBuffer == NULL) { + /* + If you're hitting this assert it means you've got an unusually deep chain of nodes, you've got an excessively large processing + size, or you have a combination of both, and as a result have run out of stack space. You can increase this using the + preMixStackSizeInBytes variable in ma_node_graph_config. If you're using ma_engine, you can do it via the preMixStackSizeInBytes + variable in ma_engine_config. It defaults to 512KB per output channel. + */ + MA_ASSERT(MA_FALSE); + } else { + if (framesToRead > preMixBufferCapInFrames) { + framesToRead = preMixBufferCapInFrames; + } + + result = ma_node_read_pcm_frames(pOutputBus->pNode, pOutputBus->outputBusIndex, pPreMixBuffer, framesToRead, &framesJustRead, globalTime + framesProcessed); + if (result == MA_SUCCESS || result == MA_AT_END) { + if (isSilentOutput == MA_FALSE) { /* Don't mix if the node outputs silence. */ + ma_mix_pcm_frames_f32(pRunningFramesOut, pPreMixBuffer, framesJustRead, inputChannels, /*volume*/1); + } + } + + /* The pre-mix buffer is no longer required. */ + ma_stack_free(((ma_node_base*)pInputNode)->pNodeGraph->pPreMixStack, pPreMixBuffer); + pPreMixBuffer = NULL; + } + } + + framesProcessed += framesJustRead; + + /* If we reached the end or otherwise failed to read any data we need to finish up with this output node. */ + if (result != MA_SUCCESS) { + break; + } + + /* If we didn't read anything, abort so we don't get stuck in a loop. */ + if (framesJustRead == 0) { + break; + } + } + + /* If it's the first attachment we didn't do any mixing. Any leftover samples need to be silenced. */ + if (pOutputBus == pFirst && framesProcessed < frameCount) { + ma_silence_pcm_frames(ma_offset_pcm_frames_ptr(pFramesOut, framesProcessed, ma_format_f32, inputChannels), (frameCount - framesProcessed), ma_format_f32, inputChannels); + } + + if (isSilentOutput == MA_FALSE) { + doesOutputBufferHaveContent = MA_TRUE; + } + } else { + /* Seek. */ + ma_node_read_pcm_frames(pOutputBus->pNode, pOutputBus->outputBusIndex, NULL, frameCount, &framesProcessed, globalTime); + } + } + + /* If we didn't output anything, output silence. */ + if (doesOutputBufferHaveContent == MA_FALSE && pFramesOut != NULL) { + ma_silence_pcm_frames(pFramesOut, frameCount, ma_format_f32, inputChannels); + } + + /* In this path we always "process" the entire amount. */ + *pFramesRead = frameCount; + + return result; +} + + +MA_API ma_node_config ma_node_config_init(void) +{ + ma_node_config config; + + MA_ZERO_OBJECT(&config); + config.initialState = ma_node_state_started; /* Nodes are started by default. */ + config.inputBusCount = MA_NODE_BUS_COUNT_UNKNOWN; + config.outputBusCount = MA_NODE_BUS_COUNT_UNKNOWN; + + return config; +} + +static ma_uint16 ma_node_config_get_cache_size_in_frames(const ma_node_config* pConfig, const ma_node_graph* pNodeGraph) +{ + ma_uint32 cacheSizeInFrames; + + (void)pConfig; + + if (pNodeGraph->processingSizeInFrames > 0) { + cacheSizeInFrames = pNodeGraph->processingSizeInFrames; + } else { + cacheSizeInFrames = MA_DEFAULT_NODE_CACHE_CAP_IN_FRAMES_PER_BUS; + } + + if (cacheSizeInFrames > 0xFFFF) { + cacheSizeInFrames = 0xFFFF; + } + + return (ma_uint16)cacheSizeInFrames; +} + + + +static ma_result ma_node_detach_full(ma_node* pNode); + +static float* ma_node_get_cached_input_ptr(ma_node* pNode, ma_uint32 inputBusIndex) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_uint32 iInputBus; + float* pBasePtr; + + MA_ASSERT(pNodeBase != NULL); + + /* Input data is stored at the front of the buffer. */ + pBasePtr = pNodeBase->pCachedData; + for (iInputBus = 0; iInputBus < inputBusIndex; iInputBus += 1) { + pBasePtr += pNodeBase->cachedDataCapInFramesPerBus * ma_node_input_bus_get_channels(&pNodeBase->pInputBuses[iInputBus]); + } + + return pBasePtr; +} + +static float* ma_node_get_cached_output_ptr(ma_node* pNode, ma_uint32 outputBusIndex) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_uint32 iInputBus; + ma_uint32 iOutputBus; + float* pBasePtr; + + MA_ASSERT(pNodeBase != NULL); + + /* Cached output data starts after the input data. */ + pBasePtr = pNodeBase->pCachedData; + for (iInputBus = 0; iInputBus < ma_node_get_input_bus_count(pNodeBase); iInputBus += 1) { + pBasePtr += pNodeBase->cachedDataCapInFramesPerBus * ma_node_input_bus_get_channels(&pNodeBase->pInputBuses[iInputBus]); + } + + for (iOutputBus = 0; iOutputBus < outputBusIndex; iOutputBus += 1) { + pBasePtr += pNodeBase->cachedDataCapInFramesPerBus * ma_node_output_bus_get_channels(&pNodeBase->pOutputBuses[iOutputBus]); + } + + return pBasePtr; +} + + +typedef struct +{ + size_t sizeInBytes; + size_t inputBusOffset; + size_t outputBusOffset; + size_t cachedDataOffset; + ma_uint32 inputBusCount; /* So it doesn't have to be calculated twice. */ + ma_uint32 outputBusCount; /* So it doesn't have to be calculated twice. */ +} ma_node_heap_layout; + +static ma_result ma_node_translate_bus_counts(const ma_node_config* pConfig, ma_uint32* pInputBusCount, ma_uint32* pOutputBusCount) +{ + ma_uint32 inputBusCount; + ma_uint32 outputBusCount; + + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pInputBusCount != NULL); + MA_ASSERT(pOutputBusCount != NULL); + + /* Bus counts are determined by the vtable, unless they're set to `MA_NODE_BUS_COUNT_UNKNWON`, in which case they're taken from the config. */ + if (pConfig->vtable->inputBusCount == MA_NODE_BUS_COUNT_UNKNOWN) { + inputBusCount = pConfig->inputBusCount; + } else { + inputBusCount = pConfig->vtable->inputBusCount; + + if (pConfig->inputBusCount != MA_NODE_BUS_COUNT_UNKNOWN && pConfig->inputBusCount != pConfig->vtable->inputBusCount) { + return MA_INVALID_ARGS; /* Invalid configuration. You must not specify a conflicting bus count between the node's config and the vtable. */ + } + } + + if (pConfig->vtable->outputBusCount == MA_NODE_BUS_COUNT_UNKNOWN) { + outputBusCount = pConfig->outputBusCount; + } else { + outputBusCount = pConfig->vtable->outputBusCount; + + if (pConfig->outputBusCount != MA_NODE_BUS_COUNT_UNKNOWN && pConfig->outputBusCount != pConfig->vtable->outputBusCount) { + return MA_INVALID_ARGS; /* Invalid configuration. You must not specify a conflicting bus count between the node's config and the vtable. */ + } + } + + /* Bus counts must be within limits. */ + if (inputBusCount > MA_MAX_NODE_BUS_COUNT || outputBusCount > MA_MAX_NODE_BUS_COUNT) { + return MA_INVALID_ARGS; + } + + + /* We must have channel counts for each bus. */ + if ((inputBusCount > 0 && pConfig->pInputChannels == NULL) || (outputBusCount > 0 && pConfig->pOutputChannels == NULL)) { + return MA_INVALID_ARGS; /* You must specify channel counts for each input and output bus. */ + } + + + /* Some special rules for passthrough nodes. */ + if ((pConfig->vtable->flags & MA_NODE_FLAG_PASSTHROUGH) != 0) { + if ((pConfig->vtable->inputBusCount != 0 && pConfig->vtable->inputBusCount != 1) || pConfig->vtable->outputBusCount != 1) { + return MA_INVALID_ARGS; /* Passthrough nodes must have exactly 1 output bus and either 0 or 1 input bus. */ + } + + if (pConfig->pInputChannels[0] != pConfig->pOutputChannels[0]) { + return MA_INVALID_ARGS; /* Passthrough nodes must have the same number of channels between input and output nodes. */ + } + } + + + *pInputBusCount = inputBusCount; + *pOutputBusCount = outputBusCount; + + return MA_SUCCESS; +} + +static ma_result ma_node_get_heap_layout(ma_node_graph* pNodeGraph, const ma_node_config* pConfig, ma_node_heap_layout* pHeapLayout) +{ + ma_result result; + ma_uint32 inputBusCount; + ma_uint32 outputBusCount; + + MA_ASSERT(pHeapLayout != NULL); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL || pConfig->vtable == NULL || pConfig->vtable->onProcess == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_node_translate_bus_counts(pConfig, &inputBusCount, &outputBusCount); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->sizeInBytes = 0; + + /* Input buses. */ + if (inputBusCount > MA_MAX_NODE_LOCAL_BUS_COUNT) { + pHeapLayout->inputBusOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(sizeof(ma_node_input_bus) * inputBusCount); + } else { + pHeapLayout->inputBusOffset = MA_SIZE_MAX; /* MA_SIZE_MAX indicates that no heap allocation is required for the input bus. */ + } + + /* Output buses. */ + if (outputBusCount > MA_MAX_NODE_LOCAL_BUS_COUNT) { + pHeapLayout->outputBusOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(sizeof(ma_node_output_bus) * outputBusCount); + } else { + pHeapLayout->outputBusOffset = MA_SIZE_MAX; + } + + /* + Cached audio data. + + We need to allocate memory for caching both input and output data. We have an optimization + where no caching is necessary for specific conditions: + + - The node has 0 inputs and 1 output. + + When a node meets the above conditions, no cache is allocated. + + The size choice for this buffer is a little bit finicky. We don't want to be too wasteful by + allocating too much, but at the same time we want it be large enough so that enough frames can + be processed for each call to ma_node_read_pcm_frames() so that it keeps things efficient. For + now I'm going with 10ms @ 48K which is 480 frames per bus. This is configurable at compile + time. It might also be worth investigating whether or not this can be configured at run time. + */ + if (inputBusCount == 0 && outputBusCount == 1) { + /* Fast path. No cache needed. */ + pHeapLayout->cachedDataOffset = MA_SIZE_MAX; + } else { + /* Slow path. Cache needed. */ + size_t cachedDataSizeInBytes = 0; + ma_uint32 cacheCapInFrames; + ma_uint32 iBus; + + /* The capacity of the cache is based on our callback processing size. */ + cacheCapInFrames = ma_node_config_get_cache_size_in_frames(pConfig, pNodeGraph); + + for (iBus = 0; iBus < inputBusCount; iBus += 1) { + cachedDataSizeInBytes += cacheCapInFrames * ma_get_bytes_per_frame(ma_format_f32, pConfig->pInputChannels[iBus]); + } + + for (iBus = 0; iBus < outputBusCount; iBus += 1) { + cachedDataSizeInBytes += cacheCapInFrames * ma_get_bytes_per_frame(ma_format_f32, pConfig->pOutputChannels[iBus]); + } + + pHeapLayout->cachedDataOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(cachedDataSizeInBytes); + } + + + /* + Not technically part of the heap, but we can output the input and output bus counts so we can + avoid a redundant call to ma_node_translate_bus_counts(). + */ + pHeapLayout->inputBusCount = inputBusCount; + pHeapLayout->outputBusCount = outputBusCount; + + /* Make sure allocation size is aligned. */ + pHeapLayout->sizeInBytes = ma_align_64(pHeapLayout->sizeInBytes); + + return MA_SUCCESS; +} + +MA_API ma_result ma_node_get_heap_size(ma_node_graph* pNodeGraph, const ma_node_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_node_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_node_get_heap_layout(pNodeGraph, pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_node_init_preallocated(ma_node_graph* pNodeGraph, const ma_node_config* pConfig, void* pHeap, ma_node* pNode) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_result result; + ma_node_heap_layout heapLayout; + ma_uint32 iInputBus; + ma_uint32 iOutputBus; + + if (pNodeBase == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNodeBase); + + result = ma_node_get_heap_layout(pNodeGraph, pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + pNodeBase->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pNodeBase->pNodeGraph = pNodeGraph; + pNodeBase->vtable = pConfig->vtable; + pNodeBase->state = pConfig->initialState; + pNodeBase->stateTimes[ma_node_state_started] = 0; + pNodeBase->stateTimes[ma_node_state_stopped] = (ma_uint64)(ma_int64)-1; /* Weird casting for VC6 compatibility. */ + pNodeBase->inputBusCount = heapLayout.inputBusCount; + pNodeBase->outputBusCount = heapLayout.outputBusCount; + + if (heapLayout.inputBusOffset != MA_SIZE_MAX) { + pNodeBase->pInputBuses = (ma_node_input_bus*)ma_offset_ptr(pHeap, heapLayout.inputBusOffset); + } else { + pNodeBase->pInputBuses = pNodeBase->_inputBuses; + } + + if (heapLayout.outputBusOffset != MA_SIZE_MAX) { + pNodeBase->pOutputBuses = (ma_node_output_bus*)ma_offset_ptr(pHeap, heapLayout.outputBusOffset); + } else { + pNodeBase->pOutputBuses = pNodeBase->_outputBuses; + } + + if (heapLayout.cachedDataOffset != MA_SIZE_MAX) { + pNodeBase->pCachedData = (float*)ma_offset_ptr(pHeap, heapLayout.cachedDataOffset); + pNodeBase->cachedDataCapInFramesPerBus = ma_node_config_get_cache_size_in_frames(pConfig, pNodeGraph); + } else { + pNodeBase->pCachedData = NULL; + } + + + /* We need to run an initialization step for each input and output bus. */ + for (iInputBus = 0; iInputBus < ma_node_get_input_bus_count(pNodeBase); iInputBus += 1) { + result = ma_node_input_bus_init(pConfig->pInputChannels[iInputBus], &pNodeBase->pInputBuses[iInputBus]); + if (result != MA_SUCCESS) { + return result; + } + } + + for (iOutputBus = 0; iOutputBus < ma_node_get_output_bus_count(pNodeBase); iOutputBus += 1) { + result = ma_node_output_bus_init(pNodeBase, iOutputBus, pConfig->pOutputChannels[iOutputBus], &pNodeBase->pOutputBuses[iOutputBus]); + if (result != MA_SUCCESS) { + return result; + } + } + + + /* The cached data needs to be initialized to silence (or a sine wave tone if we're debugging). */ + if (pNodeBase->pCachedData != NULL) { + ma_uint32 iBus; + + #if 1 /* Toggle this between 0 and 1 to turn debugging on or off. 1 = fill with a sine wave for debugging; 0 = fill with silence. */ + /* For safety we'll go ahead and default the buffer to silence. */ + for (iBus = 0; iBus < ma_node_get_input_bus_count(pNodeBase); iBus += 1) { + ma_silence_pcm_frames(ma_node_get_cached_input_ptr(pNode, iBus), pNodeBase->cachedDataCapInFramesPerBus, ma_format_f32, ma_node_input_bus_get_channels(&pNodeBase->pInputBuses[iBus])); + } + for (iBus = 0; iBus < ma_node_get_output_bus_count(pNodeBase); iBus += 1) { + ma_silence_pcm_frames(ma_node_get_cached_output_ptr(pNode, iBus), pNodeBase->cachedDataCapInFramesPerBus, ma_format_f32, ma_node_output_bus_get_channels(&pNodeBase->pOutputBuses[iBus])); + } + #else + /* For debugging. Default to a sine wave. */ + for (iBus = 0; iBus < ma_node_get_input_bus_count(pNodeBase); iBus += 1) { + ma_debug_fill_pcm_frames_with_sine_wave(ma_node_get_cached_input_ptr(pNode, iBus), pNodeBase->cachedDataCapInFramesPerBus, ma_format_f32, ma_node_input_bus_get_channels(&pNodeBase->pInputBuses[iBus]), 48000); + } + for (iBus = 0; iBus < ma_node_get_output_bus_count(pNodeBase); iBus += 1) { + ma_debug_fill_pcm_frames_with_sine_wave(ma_node_get_cached_output_ptr(pNode, iBus), pNodeBase->cachedDataCapInFramesPerBus, ma_format_f32, ma_node_output_bus_get_channels(&pNodeBase->pOutputBuses[iBus]), 48000); + } + #endif + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_node_init(ma_node_graph* pNodeGraph, const ma_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_node* pNode) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_node_get_heap_size(pNodeGraph, pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_node_init_preallocated(pNodeGraph, pConfig, pHeap, pNode); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + ((ma_node_base*)pNode)->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_node_uninit(ma_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + + if (pNodeBase == NULL) { + return; + } + + /* + The first thing we need to do is fully detach the node. This will detach all inputs and + outputs. We need to do this first because it will sever the connection with the node graph and + allow us to complete uninitialization without needing to worry about thread-safety with the + audio thread. The detachment process will wait for any local processing of the node to finish. + */ + ma_node_detach_full(pNode); + + /* + At this point the node should be completely unreferenced by the node graph and we can finish up + the uninitialization process without needing to worry about thread-safety. + */ + if (pNodeBase->_ownsHeap) { + ma_free(pNodeBase->_pHeap, pAllocationCallbacks); + } +} + +MA_API ma_node_graph* ma_node_get_node_graph(const ma_node* pNode) +{ + if (pNode == NULL) { + return NULL; + } + + return ((const ma_node_base*)pNode)->pNodeGraph; +} + +MA_API ma_uint32 ma_node_get_input_bus_count(const ma_node* pNode) +{ + if (pNode == NULL) { + return 0; + } + + return ((ma_node_base*)pNode)->inputBusCount; +} + +MA_API ma_uint32 ma_node_get_output_bus_count(const ma_node* pNode) +{ + if (pNode == NULL) { + return 0; + } + + return ((ma_node_base*)pNode)->outputBusCount; +} + + +MA_API ma_uint32 ma_node_get_input_channels(const ma_node* pNode, ma_uint32 inputBusIndex) +{ + const ma_node_base* pNodeBase = (const ma_node_base*)pNode; + + if (pNode == NULL) { + return 0; + } + + if (inputBusIndex >= ma_node_get_input_bus_count(pNode)) { + return 0; /* Invalid bus index. */ + } + + return ma_node_input_bus_get_channels(&pNodeBase->pInputBuses[inputBusIndex]); +} + +MA_API ma_uint32 ma_node_get_output_channels(const ma_node* pNode, ma_uint32 outputBusIndex) +{ + const ma_node_base* pNodeBase = (const ma_node_base*)pNode; + + if (pNode == NULL) { + return 0; + } + + if (outputBusIndex >= ma_node_get_output_bus_count(pNode)) { + return 0; /* Invalid bus index. */ + } + + return ma_node_output_bus_get_channels(&pNodeBase->pOutputBuses[outputBusIndex]); +} + + +static ma_result ma_node_detach_full(ma_node* pNode) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_uint32 iInputBus; + + if (pNodeBase == NULL) { + return MA_INVALID_ARGS; + } + + /* + Make sure the node is completely detached first. This will not return until the output bus is + guaranteed to no longer be referenced by the audio thread. + */ + ma_node_detach_all_output_buses(pNode); + + /* + At this point all output buses will have been detached from the graph and we can be guaranteed + that none of its input nodes will be getting processed by the graph. We can detach these + without needing to worry about the audio thread touching them. + */ + for (iInputBus = 0; iInputBus < ma_node_get_input_bus_count(pNode); iInputBus += 1) { + ma_node_input_bus* pInputBus; + ma_node_output_bus* pOutputBus; + + pInputBus = &pNodeBase->pInputBuses[iInputBus]; + + /* + This is important. We cannot be using ma_node_input_bus_first() or ma_node_input_bus_next(). Those + functions are specifically for the audio thread. We'll instead just manually iterate using standard + linked list logic. We don't need to worry about the audio thread referencing these because the step + above severed the connection to the graph. + */ + for (pOutputBus = (ma_node_output_bus*)ma_atomic_load_ptr(&pInputBus->head.pNext); pOutputBus != NULL; pOutputBus = (ma_node_output_bus*)ma_atomic_load_ptr(&pInputBus->head.pNext)) { + ma_node_detach_output_bus(pOutputBus->pNode, pOutputBus->outputBusIndex); /* This won't do any waiting in practice and should be efficient. */ + } + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_node_detach_output_bus(ma_node* pNode, ma_uint32 outputBusIndex) +{ + ma_result result = MA_SUCCESS; + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_node_base* pInputNodeBase; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + if (outputBusIndex >= ma_node_get_output_bus_count(pNode)) { + return MA_INVALID_ARGS; /* Invalid output bus index. */ + } + + /* We need to lock the output bus because we need to inspect the input node and grab its input bus. */ + ma_node_output_bus_lock(&pNodeBase->pOutputBuses[outputBusIndex]); + { + pInputNodeBase = (ma_node_base*)pNodeBase->pOutputBuses[outputBusIndex].pInputNode; + if (pInputNodeBase != NULL) { + ma_node_input_bus_detach__no_output_bus_lock(&pInputNodeBase->pInputBuses[pNodeBase->pOutputBuses[outputBusIndex].inputNodeInputBusIndex], &pNodeBase->pOutputBuses[outputBusIndex]); + } + } + ma_node_output_bus_unlock(&pNodeBase->pOutputBuses[outputBusIndex]); + + return result; +} + +MA_API ma_result ma_node_detach_all_output_buses(ma_node* pNode) +{ + ma_uint32 iOutputBus; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + for (iOutputBus = 0; iOutputBus < ma_node_get_output_bus_count(pNode); iOutputBus += 1) { + ma_node_detach_output_bus(pNode, iOutputBus); + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_node_attach_output_bus(ma_node* pNode, ma_uint32 outputBusIndex, ma_node* pOtherNode, ma_uint32 otherNodeInputBusIndex) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_node_base* pOtherNodeBase = (ma_node_base*)pOtherNode; + + if (pNodeBase == NULL || pOtherNodeBase == NULL) { + return MA_INVALID_ARGS; + } + + if (pNodeBase == pOtherNodeBase) { + return MA_INVALID_OPERATION; /* Cannot attach a node to itself. */ + } + + if (outputBusIndex >= ma_node_get_output_bus_count(pNode) || otherNodeInputBusIndex >= ma_node_get_input_bus_count(pOtherNode)) { + return MA_INVALID_OPERATION; /* Invalid bus index. */ + } + + /* The output channel count of the output node must be the same as the input channel count of the input node. */ + if (ma_node_get_output_channels(pNode, outputBusIndex) != ma_node_get_input_channels(pOtherNode, otherNodeInputBusIndex)) { + return MA_INVALID_OPERATION; /* Channel count is incompatible. */ + } + + /* This will deal with detaching if the output bus is already attached to something. */ + ma_node_input_bus_attach(&pOtherNodeBase->pInputBuses[otherNodeInputBusIndex], &pNodeBase->pOutputBuses[outputBusIndex], pOtherNode, otherNodeInputBusIndex); + + return MA_SUCCESS; +} + +MA_API ma_result ma_node_set_output_bus_volume(ma_node* pNode, ma_uint32 outputBusIndex, float volume) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + + if (pNodeBase == NULL) { + return MA_INVALID_ARGS; + } + + if (outputBusIndex >= ma_node_get_output_bus_count(pNode)) { + return MA_INVALID_ARGS; /* Invalid bus index. */ + } + + return ma_node_output_bus_set_volume(&pNodeBase->pOutputBuses[outputBusIndex], volume); +} + +MA_API float ma_node_get_output_bus_volume(const ma_node* pNode, ma_uint32 outputBusIndex) +{ + const ma_node_base* pNodeBase = (const ma_node_base*)pNode; + + if (pNodeBase == NULL) { + return 0; + } + + if (outputBusIndex >= ma_node_get_output_bus_count(pNode)) { + return 0; /* Invalid bus index. */ + } + + return ma_node_output_bus_get_volume(&pNodeBase->pOutputBuses[outputBusIndex]); +} + +MA_API ma_result ma_node_set_state(ma_node* pNode, ma_node_state state) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + + if (pNodeBase == NULL) { + return MA_INVALID_ARGS; + } + + ma_atomic_exchange_i32(&pNodeBase->state, state); + + return MA_SUCCESS; +} + +MA_API ma_node_state ma_node_get_state(const ma_node* pNode) +{ + const ma_node_base* pNodeBase = (const ma_node_base*)pNode; + + if (pNodeBase == NULL) { + return ma_node_state_stopped; + } + + return (ma_node_state)ma_atomic_load_i32(&pNodeBase->state); +} + +MA_API ma_result ma_node_set_state_time(ma_node* pNode, ma_node_state state, ma_uint64 globalTime) +{ + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + /* Validation check for safety since we'll be using this as an index into stateTimes[]. */ + if (state != ma_node_state_started && state != ma_node_state_stopped) { + return MA_INVALID_ARGS; + } + + ma_atomic_exchange_64(&((ma_node_base*)pNode)->stateTimes[state], globalTime); + + return MA_SUCCESS; +} + +MA_API ma_uint64 ma_node_get_state_time(const ma_node* pNode, ma_node_state state) +{ + if (pNode == NULL) { + return 0; + } + + /* Validation check for safety since we'll be using this as an index into stateTimes[]. */ + if (state != ma_node_state_started && state != ma_node_state_stopped) { + return 0; + } + + return ma_atomic_load_64(&((ma_node_base*)pNode)->stateTimes[state]); +} + +MA_API ma_node_state ma_node_get_state_by_time(const ma_node* pNode, ma_uint64 globalTime) +{ + if (pNode == NULL) { + return ma_node_state_stopped; + } + + return ma_node_get_state_by_time_range(pNode, globalTime, globalTime); +} + +MA_API ma_node_state ma_node_get_state_by_time_range(const ma_node* pNode, ma_uint64 globalTimeBeg, ma_uint64 globalTimeEnd) +{ + ma_node_state state; + + if (pNode == NULL) { + return ma_node_state_stopped; + } + + state = ma_node_get_state(pNode); + + /* An explicitly stopped node is always stopped. */ + if (state == ma_node_state_stopped) { + return ma_node_state_stopped; + } + + /* + Getting here means the node is marked as started, but it may still not be truly started due to + its start time not having been reached yet. Also, the stop time may have also been reached in + which case it'll be considered stopped. + */ + if (ma_node_get_state_time(pNode, ma_node_state_started) > globalTimeBeg) { + return ma_node_state_stopped; /* Start time has not yet been reached. */ + } + + if (ma_node_get_state_time(pNode, ma_node_state_stopped) <= globalTimeEnd) { + return ma_node_state_stopped; /* Stop time has been reached. */ + } + + /* Getting here means the node is marked as started and is within its start/stop times. */ + return ma_node_state_started; +} + +MA_API ma_uint64 ma_node_get_time(const ma_node* pNode) +{ + if (pNode == NULL) { + return 0; + } + + return ma_atomic_load_64(&((ma_node_base*)pNode)->localTime); +} + +MA_API ma_result ma_node_set_time(ma_node* pNode, ma_uint64 localTime) +{ + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + ma_atomic_exchange_64(&((ma_node_base*)pNode)->localTime, localTime); + + return MA_SUCCESS; +} + + + +static void ma_node_process_pcm_frames_internal(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + + MA_ASSERT(pNode != NULL); + + if (pNodeBase->vtable->onProcess) { + pNodeBase->vtable->onProcess(pNode, ppFramesIn, pFrameCountIn, ppFramesOut, pFrameCountOut); + } +} + +static ma_result ma_node_read_pcm_frames(ma_node* pNode, ma_uint32 outputBusIndex, float* pFramesOut, ma_uint32 frameCount, ma_uint32* pFramesRead, ma_uint64 globalTime) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_result result = MA_SUCCESS; + ma_uint32 iInputBus; + ma_uint32 iOutputBus; + ma_uint32 inputBusCount; + ma_uint32 outputBusCount; + ma_uint32 totalFramesRead = 0; + float* ppFramesIn[MA_MAX_NODE_BUS_COUNT]; + float* ppFramesOut[MA_MAX_NODE_BUS_COUNT]; + ma_uint64 globalTimeBeg; + ma_uint64 globalTimeEnd; + ma_uint64 startTime; + ma_uint64 stopTime; + ma_uint32 timeOffsetBeg; + ma_uint32 timeOffsetEnd; + ma_uint32 frameCountIn; + ma_uint32 frameCountOut; + + /* + pFramesRead is mandatory. It must be used to determine how many frames were read. It's normal and + expected that the number of frames read may be different to that requested. Therefore, the caller + must look at this value to correctly determine how many frames were read. + */ + MA_ASSERT(pFramesRead != NULL); /* <-- If you've triggered this assert, you're using this function wrong. You *must* use this variable and inspect it after the call returns. */ + if (pFramesRead == NULL) { + return MA_INVALID_ARGS; + } + + *pFramesRead = 0; /* Safety. */ + + if (pNodeBase == NULL) { + return MA_INVALID_ARGS; + } + + if (outputBusIndex >= ma_node_get_output_bus_count(pNodeBase)) { + return MA_INVALID_ARGS; /* Invalid output bus index. */ + } + + /* Don't do anything if we're in a stopped state. */ + if (ma_node_get_state_by_time_range(pNode, globalTime, globalTime + frameCount) != ma_node_state_started) { + return MA_SUCCESS; /* We're in a stopped state. This is not an error - we just need to not read anything. */ + } + + + globalTimeBeg = globalTime; + globalTimeEnd = globalTime + frameCount; + startTime = ma_node_get_state_time(pNode, ma_node_state_started); + stopTime = ma_node_get_state_time(pNode, ma_node_state_stopped); + + /* + At this point we know that we are inside our start/stop times. However, we may need to adjust + our frame count and output pointer to accommodate since we could be straddling the time period + that this function is getting called for. + + It's possible (and likely) that the start time does not line up with the output buffer. We + therefore need to offset it by a number of frames to accommodate. The same thing applies for + the stop time. + */ + timeOffsetBeg = (globalTimeBeg < startTime) ? (ma_uint32)(globalTimeEnd - startTime) : 0; + timeOffsetEnd = (globalTimeEnd > stopTime) ? (ma_uint32)(globalTimeEnd - stopTime) : 0; + + /* Trim based on the start offset. We need to silence the start of the buffer. */ + if (timeOffsetBeg > 0) { + ma_silence_pcm_frames(pFramesOut, timeOffsetBeg, ma_format_f32, ma_node_get_output_channels(pNode, outputBusIndex)); + pFramesOut += timeOffsetBeg * ma_node_get_output_channels(pNode, outputBusIndex); + frameCount -= timeOffsetBeg; + } + + /* Trim based on the end offset. We don't need to silence the tail section because we'll just have a reduced value written to pFramesRead. */ + if (timeOffsetEnd > 0) { + frameCount -= timeOffsetEnd; + } + + + /* We run on different paths depending on the bus counts. */ + inputBusCount = ma_node_get_input_bus_count(pNode); + outputBusCount = ma_node_get_output_bus_count(pNode); + + /* + Run a simplified path when there are no inputs and one output. In this case there's nothing to + actually read and we can go straight to output. This is a very common scenario because the vast + majority of data source nodes will use this setup so this optimization I think is worthwhile. + */ + if (inputBusCount == 0 && outputBusCount == 1) { + /* Fast path. No need to read from input and no need for any caching. */ + frameCountIn = 0; + frameCountOut = frameCount; /* Just read as much as we can. The callback will return what was actually read. */ + + ppFramesOut[0] = pFramesOut; + + /* + If it's a passthrough we won't be expecting the callback to output anything, so we'll + need to pre-silence the output buffer. + */ + if ((pNodeBase->vtable->flags & MA_NODE_FLAG_PASSTHROUGH) != 0) { + ma_silence_pcm_frames(pFramesOut, frameCount, ma_format_f32, ma_node_get_output_channels(pNode, outputBusIndex)); + } + + ma_node_process_pcm_frames_internal(pNode, NULL, &frameCountIn, ppFramesOut, &frameCountOut); + totalFramesRead = frameCountOut; + } else { + /* Slow path. Need to read input data. */ + if ((pNodeBase->vtable->flags & MA_NODE_FLAG_PASSTHROUGH) != 0) { + /* + Fast path. We're running a passthrough. We need to read directly into the output buffer, but + still fire the callback so that event handling and trigger nodes can do their thing. Since + it's a passthrough there's no need for any kind of caching logic. + */ + MA_ASSERT(outputBusCount == inputBusCount); + MA_ASSERT(outputBusCount == 1); + MA_ASSERT(outputBusIndex == 0); + + /* We just read directly from input bus to output buffer, and then afterwards fire the callback. */ + ppFramesOut[0] = pFramesOut; + ppFramesIn[0] = ppFramesOut[0]; + + result = ma_node_input_bus_read_pcm_frames(pNodeBase, &pNodeBase->pInputBuses[0], ppFramesIn[0], frameCount, &totalFramesRead, globalTime); + if (result == MA_SUCCESS) { + /* Even though it's a passthrough, we still need to fire the callback. */ + frameCountIn = totalFramesRead; + frameCountOut = totalFramesRead; + + if (totalFramesRead > 0) { + ma_node_process_pcm_frames_internal(pNode, (const float**)ppFramesIn, &frameCountIn, ppFramesOut, &frameCountOut); /* From GCC: expected 'const float **' but argument is of type 'float **'. Shouldn't this be implicit? Explicit cast to silence the warning. */ + } + + /* + A passthrough should never have modified the input and output frame counts. If you're + triggering these asserts you need to fix your processing callback. + */ + MA_ASSERT(frameCountIn == totalFramesRead); + MA_ASSERT(frameCountOut == totalFramesRead); + } + } else { + /* Slow path. Need to do caching. */ + ma_uint32 framesToProcessIn; + ma_uint32 framesToProcessOut; + ma_bool32 consumeNullInput = MA_FALSE; + + /* + We use frameCount as a basis for the number of frames to read since that's what's being + requested, however we still need to clamp it to whatever can fit in the cache. + + This will also be used as the basis for determining how many input frames to read. This is + not ideal because it can result in too many input frames being read which introduces latency. + To solve this, nodes can implement an optional callback called onGetRequiredInputFrameCount + which is used as hint to miniaudio as to how many input frames it needs to read at a time. This + callback is completely optional, and if it's not set, miniaudio will assume `frameCount`. + + This function will be called multiple times for each period of time, once for each output node. + We cannot read from each input node each time this function is called. Instead we need to check + whether or not this is first output bus to be read from for this time period, and if so, read + from our input data. + + To determine whether or not we're ready to read data, we check a flag. There will be one flag + for each output. When the flag is set, it means data has been read previously and that we're + ready to advance time forward for our input nodes by reading fresh data. + */ + framesToProcessOut = frameCount; + if (framesToProcessOut > pNodeBase->cachedDataCapInFramesPerBus) { + framesToProcessOut = pNodeBase->cachedDataCapInFramesPerBus; + } + + framesToProcessIn = frameCount; + if (pNodeBase->vtable->onGetRequiredInputFrameCount) { + pNodeBase->vtable->onGetRequiredInputFrameCount(pNode, framesToProcessOut, &framesToProcessIn); /* <-- It does not matter if this fails. */ + } + if (framesToProcessIn > pNodeBase->cachedDataCapInFramesPerBus) { + framesToProcessIn = pNodeBase->cachedDataCapInFramesPerBus; + } + + + MA_ASSERT(framesToProcessIn <= 0xFFFF); + MA_ASSERT(framesToProcessOut <= 0xFFFF); + + if (ma_node_output_bus_has_read(&pNodeBase->pOutputBuses[outputBusIndex])) { + /* Getting here means we need to do another round of processing. */ + pNodeBase->cachedFrameCountOut = 0; + + for (;;) { + frameCountOut = 0; + + /* + We need to prepare our output frame pointers for processing. In the same iteration we need + to mark every output bus as unread so that future calls to this function for different buses + for the current time period don't pull in data when they should instead be reading from cache. + */ + for (iOutputBus = 0; iOutputBus < outputBusCount; iOutputBus += 1) { + ma_node_output_bus_set_has_read(&pNodeBase->pOutputBuses[iOutputBus], MA_FALSE); /* <-- This is what tells the next calls to this function for other output buses for this time period to read from cache instead of pulling in more data. */ + ppFramesOut[iOutputBus] = ma_node_get_cached_output_ptr(pNode, iOutputBus); + } + + /* We only need to read from input buses if there isn't already some data in the cache. */ + if (pNodeBase->cachedFrameCountIn == 0) { + ma_uint32 maxFramesReadIn = 0; + + /* Here is where we pull in data from the input buses. This is what will trigger an advance in time. */ + for (iInputBus = 0; iInputBus < inputBusCount; iInputBus += 1) { + ma_uint32 framesRead; + + /* The first thing to do is get the offset within our bulk allocation to store this input data. */ + ppFramesIn[iInputBus] = ma_node_get_cached_input_ptr(pNode, iInputBus); + + /* Once we've determined our destination pointer we can read. Note that we must inspect the number of frames read and fill any leftovers with silence for safety. */ + result = ma_node_input_bus_read_pcm_frames(pNodeBase, &pNodeBase->pInputBuses[iInputBus], ppFramesIn[iInputBus], framesToProcessIn, &framesRead, globalTime); + if (result != MA_SUCCESS) { + /* It doesn't really matter if we fail because we'll just fill with silence. */ + framesRead = 0; /* Just for safety, but I don't think it's really needed. */ + } + + /* TODO: Minor optimization opportunity here. If no frames were read and the buffer is already filled with silence, no need to re-silence it. */ + /* Any leftover frames need to silenced for safety. */ + if (framesRead < framesToProcessIn) { + ma_silence_pcm_frames(ppFramesIn[iInputBus] + (framesRead * ma_node_get_input_channels(pNodeBase, iInputBus)), (framesToProcessIn - framesRead), ma_format_f32, ma_node_get_input_channels(pNodeBase, iInputBus)); + } + + maxFramesReadIn = ma_max(maxFramesReadIn, framesRead); + } + + /* This was a fresh load of input data so reset our consumption counter. */ + pNodeBase->consumedFrameCountIn = 0; + + /* + We don't want to keep processing if there's nothing to process, so set the number of cached + input frames to the maximum number we read from each attachment (the lesser will be padded + with silence). If we didn't read anything, this will be set to 0 and the entire buffer will + have been assigned to silence. This being equal to 0 is an important property for us because + it allows us to detect when NULL can be passed into the processing callback for the input + buffer for the purpose of continuous processing. + */ + pNodeBase->cachedFrameCountIn = (ma_uint16)maxFramesReadIn; + } else { + /* We don't need to read anything, but we do need to prepare our input frame pointers. */ + for (iInputBus = 0; iInputBus < inputBusCount; iInputBus += 1) { + ppFramesIn[iInputBus] = ma_node_get_cached_input_ptr(pNode, iInputBus) + (pNodeBase->consumedFrameCountIn * ma_node_get_input_channels(pNodeBase, iInputBus)); + } + } + + /* + At this point we have our input data so now we need to do some processing. Sneaky little + optimization here - we can set the pointer to the output buffer for this output bus so + that the final copy into the output buffer is done directly by onProcess(). + */ + if (pFramesOut != NULL) { + ppFramesOut[outputBusIndex] = ma_offset_pcm_frames_ptr_f32(pFramesOut, pNodeBase->cachedFrameCountOut, ma_node_get_output_channels(pNode, outputBusIndex)); + } + + + /* Give the processing function the entire capacity of the output buffer. */ + frameCountOut = (framesToProcessOut - pNodeBase->cachedFrameCountOut); + + /* + We need to treat nodes with continuous processing a little differently. For these ones, + we always want to fire the callback with the requested number of frames, regardless of + pNodeBase->cachedFrameCountIn, which could be 0. Also, we want to check if we can pass + in NULL for the input buffer to the callback. + */ + if ((pNodeBase->vtable->flags & MA_NODE_FLAG_CONTINUOUS_PROCESSING) != 0) { + /* We're using continuous processing. Make sure we specify the whole frame count at all times. */ + frameCountIn = framesToProcessIn; /* Give the processing function as much input data as we've got in the buffer, including any silenced padding from short reads. */ + + if ((pNodeBase->vtable->flags & MA_NODE_FLAG_ALLOW_NULL_INPUT) != 0 && pNodeBase->consumedFrameCountIn == 0 && pNodeBase->cachedFrameCountIn == 0) { + consumeNullInput = MA_TRUE; + } else { + consumeNullInput = MA_FALSE; + } + + /* + Since we're using continuous processing we're always passing in a full frame count + regardless of how much input data was read. If this is greater than what we read as + input, we'll end up with an underflow. We instead need to make sure our cached frame + count is set to the number of frames we'll be passing to the data callback. Not + doing this will result in an underflow when we "consume" the cached data later on. + + Note that this check needs to be done after the "consumeNullInput" check above because + we use the property of cachedFrameCountIn being 0 to determine whether or not we + should be passing in a null pointer to the processing callback for when the node is + configured with MA_NODE_FLAG_ALLOW_NULL_INPUT. + */ + if (pNodeBase->cachedFrameCountIn < (ma_uint16)frameCountIn) { + pNodeBase->cachedFrameCountIn = (ma_uint16)frameCountIn; + } + } else { + frameCountIn = pNodeBase->cachedFrameCountIn; /* Give the processing function as much valid input data as we've got. */ + consumeNullInput = MA_FALSE; + } + + /* + Process data slightly differently depending on whether or not we're consuming NULL + input (checked just above). + */ + if (consumeNullInput) { + ma_node_process_pcm_frames_internal(pNode, NULL, &frameCountIn, ppFramesOut, &frameCountOut); + } else { + /* + We want to skip processing if there's no input data, but we can only do that safely if + we know that there is no chance of any output frames being produced. If continuous + processing is being used, this won't be a problem because the input frame count will + always be non-0. However, if continuous processing is *not* enabled and input and output + data is processed at different rates, we still need to process that last input frame + because there could be a few excess output frames needing to be produced from cached + data. The `MA_NODE_FLAG_DIFFERENT_PROCESSING_RATES` flag is used as the indicator for + determining whether or not we need to process the node even when there are no input + frames available right now. + */ + if (frameCountIn > 0 || (pNodeBase->vtable->flags & MA_NODE_FLAG_DIFFERENT_PROCESSING_RATES) != 0) { + ma_node_process_pcm_frames_internal(pNode, (const float**)ppFramesIn, &frameCountIn, ppFramesOut, &frameCountOut); /* From GCC: expected 'const float **' but argument is of type 'float **'. Shouldn't this be implicit? Explicit cast to silence the warning. */ + } else { + frameCountOut = 0; /* No data was processed. */ + } + } + + /* + Thanks to our sneaky optimization above we don't need to do any data copying directly into + the output buffer - the onProcess() callback just did that for us. We do, however, need to + apply the number of input and output frames that were processed. Note that due to continuous + processing above, we need to do explicit checks here. If we just consumed a NULL input + buffer it means that no actual input data was processed from the internal buffers and we + don't want to be modifying any counters. + */ + if (consumeNullInput == MA_FALSE) { + pNodeBase->consumedFrameCountIn += (ma_uint16)frameCountIn; + pNodeBase->cachedFrameCountIn -= (ma_uint16)frameCountIn; + } + + /* The cached output frame count is always equal to what we just read. */ + pNodeBase->cachedFrameCountOut += (ma_uint16)frameCountOut; + + /* If we couldn't process any data, we're done. The loop needs to be terminated here or else we'll get stuck in a loop. */ + if (pNodeBase->cachedFrameCountOut == framesToProcessOut || (frameCountOut == 0 && frameCountIn == 0)) { + break; + } + } + } else { + /* + We're not needing to read anything from the input buffer so just read directly from our + already-processed data. + */ + if (pFramesOut != NULL) { + ma_copy_pcm_frames(pFramesOut, ma_node_get_cached_output_ptr(pNodeBase, outputBusIndex), pNodeBase->cachedFrameCountOut, ma_format_f32, ma_node_get_output_channels(pNodeBase, outputBusIndex)); + } + } + + /* The number of frames read is always equal to the number of cached output frames. */ + totalFramesRead = pNodeBase->cachedFrameCountOut; + + /* Now that we've read the data, make sure our read flag is set. */ + ma_node_output_bus_set_has_read(&pNodeBase->pOutputBuses[outputBusIndex], MA_TRUE); + } + } + + /* Apply volume, if necessary. */ + ma_apply_volume_factor_f32(pFramesOut, totalFramesRead * ma_node_get_output_channels(pNodeBase, outputBusIndex), ma_node_output_bus_get_volume(&pNodeBase->pOutputBuses[outputBusIndex])); + + /* Advance our local time forward. */ + ma_atomic_fetch_add_64(&pNodeBase->localTime, (ma_uint64)totalFramesRead); + + *pFramesRead = totalFramesRead + timeOffsetBeg; /* Must include the silenced section at the start of the buffer. */ + return result; +} + + + + +/* Data source node. */ +MA_API ma_data_source_node_config ma_data_source_node_config_init(ma_data_source* pDataSource) +{ + ma_data_source_node_config config; + + MA_ZERO_OBJECT(&config); + config.nodeConfig = ma_node_config_init(); + config.pDataSource = pDataSource; + + return config; +} + + +static void ma_data_source_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_data_source_node* pDataSourceNode = (ma_data_source_node*)pNode; + ma_format format; + ma_uint32 channels; + ma_uint32 frameCount; + ma_uint64 framesRead = 0; + + MA_ASSERT(pDataSourceNode != NULL); + MA_ASSERT(pDataSourceNode->pDataSource != NULL); + MA_ASSERT(ma_node_get_input_bus_count(pDataSourceNode) == 0); + MA_ASSERT(ma_node_get_output_bus_count(pDataSourceNode) == 1); + + /* We don't want to read from ppFramesIn at all. Instead we read from the data source. */ + (void)ppFramesIn; + (void)pFrameCountIn; + + frameCount = *pFrameCountOut; + + /* miniaudio should never be calling this with a frame count of zero. */ + MA_ASSERT(frameCount > 0); + + if (ma_data_source_get_data_format(pDataSourceNode->pDataSource, &format, &channels, NULL, NULL, 0) == MA_SUCCESS) { /* <-- Don't care about sample rate here. */ + /* The node graph system requires samples be in floating point format. This is checked in ma_data_source_node_init(). */ + MA_ASSERT(format == ma_format_f32); + (void)format; /* Just to silence some static analysis tools. */ + + ma_data_source_read_pcm_frames(pDataSourceNode->pDataSource, ppFramesOut[0], frameCount, &framesRead); + } + + *pFrameCountOut = (ma_uint32)framesRead; +} + +static ma_node_vtable g_ma_data_source_node_vtable = +{ + ma_data_source_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 0, /* 0 input buses. */ + 1, /* 1 output bus. */ + 0 +}; + +MA_API ma_result ma_data_source_node_init(ma_node_graph* pNodeGraph, const ma_data_source_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_data_source_node* pDataSourceNode) +{ + ma_result result; + ma_format format; /* For validating the format, which must be ma_format_f32. */ + ma_uint32 channels; /* For specifying the channel count of the output bus. */ + ma_node_config baseConfig; + + if (pDataSourceNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDataSourceNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_data_source_get_data_format(pConfig->pDataSource, &format, &channels, NULL, NULL, 0); /* Don't care about sample rate. This will check pDataSource for NULL. */ + if (result != MA_SUCCESS) { + return result; + } + + MA_ASSERT(format == ma_format_f32); /* <-- If you've triggered this it means your data source is not outputting floating-point samples. You must configure your data source to use ma_format_f32. */ + if (format != ma_format_f32) { + return MA_INVALID_ARGS; /* Invalid format. */ + } + + /* The channel count is defined by the data source. If the caller has manually changed the channels we just ignore it. */ + baseConfig = pConfig->nodeConfig; + baseConfig.vtable = &g_ma_data_source_node_vtable; /* Explicitly set the vtable here to prevent callers from setting it incorrectly. */ + + /* + The channel count is defined by the data source. It is invalid for the caller to manually set + the channel counts in the config. `ma_data_source_node_config_init()` will have defaulted the + channel count pointer to NULL which is how it must remain. If you trigger any of these asserts + it means you're explicitly setting the channel count. Instead, configure the output channel + count of your data source to be the necessary channel count. + */ + if (baseConfig.pOutputChannels != NULL) { + return MA_INVALID_ARGS; + } + + baseConfig.pOutputChannels = &channels; + + result = ma_node_init(pNodeGraph, &baseConfig, pAllocationCallbacks, &pDataSourceNode->base); + if (result != MA_SUCCESS) { + return result; + } + + pDataSourceNode->pDataSource = pConfig->pDataSource; + + return MA_SUCCESS; +} + +MA_API void ma_data_source_node_uninit(ma_data_source_node* pDataSourceNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_node_uninit(&pDataSourceNode->base, pAllocationCallbacks); +} + +MA_API ma_result ma_data_source_node_set_looping(ma_data_source_node* pDataSourceNode, ma_bool32 isLooping) +{ + if (pDataSourceNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_data_source_set_looping(pDataSourceNode->pDataSource, isLooping); +} + +MA_API ma_bool32 ma_data_source_node_is_looping(ma_data_source_node* pDataSourceNode) +{ + if (pDataSourceNode == NULL) { + return MA_FALSE; + } + + return ma_data_source_is_looping(pDataSourceNode->pDataSource); +} + + + +/* Splitter Node. */ +MA_API ma_splitter_node_config ma_splitter_node_config_init(ma_uint32 channels) +{ + ma_splitter_node_config config; + + MA_ZERO_OBJECT(&config); + config.nodeConfig = ma_node_config_init(); + config.channels = channels; + config.outputBusCount = 2; + + return config; +} + + +static void ma_splitter_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_node_base* pNodeBase = (ma_node_base*)pNode; + ma_uint32 iOutputBus; + ma_uint32 channels; + + MA_ASSERT(pNodeBase != NULL); + MA_ASSERT(ma_node_get_input_bus_count(pNodeBase) == 1); + + /* We don't need to consider the input frame count - it'll be the same as the output frame count and we process everything. */ + (void)pFrameCountIn; + + /* NOTE: This assumes the same number of channels for all inputs and outputs. This was checked in ma_splitter_node_init(). */ + channels = ma_node_get_input_channels(pNodeBase, 0); + + /* Splitting is just copying the first input bus and copying it over to each output bus. */ + for (iOutputBus = 0; iOutputBus < ma_node_get_output_bus_count(pNodeBase); iOutputBus += 1) { + ma_copy_pcm_frames(ppFramesOut[iOutputBus], ppFramesIn[0], *pFrameCountOut, ma_format_f32, channels); + } +} + +static ma_node_vtable g_ma_splitter_node_vtable = +{ + ma_splitter_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* 1 input bus. */ + MA_NODE_BUS_COUNT_UNKNOWN, /* The output bus count is specified on a per-node basis. */ + 0 +}; + +MA_API ma_result ma_splitter_node_init(ma_node_graph* pNodeGraph, const ma_splitter_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_splitter_node* pSplitterNode) +{ + ma_result result; + ma_node_config baseConfig; + ma_uint32 pInputChannels[1]; + ma_uint32 pOutputChannels[MA_MAX_NODE_BUS_COUNT]; + ma_uint32 iOutputBus; + + if (pSplitterNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pSplitterNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->outputBusCount > MA_MAX_NODE_BUS_COUNT) { + return MA_INVALID_ARGS; /* Too many output buses. */ + } + + /* Splitters require the same number of channels between inputs and outputs. */ + pInputChannels[0] = pConfig->channels; + for (iOutputBus = 0; iOutputBus < pConfig->outputBusCount; iOutputBus += 1) { + pOutputChannels[iOutputBus] = pConfig->channels; + } + + baseConfig = pConfig->nodeConfig; + baseConfig.vtable = &g_ma_splitter_node_vtable; + baseConfig.pInputChannels = pInputChannels; + baseConfig.pOutputChannels = pOutputChannels; + baseConfig.outputBusCount = pConfig->outputBusCount; + + result = ma_node_init(pNodeGraph, &baseConfig, pAllocationCallbacks, &pSplitterNode->base); + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the base node. */ + } + + return MA_SUCCESS; +} + +MA_API void ma_splitter_node_uninit(ma_splitter_node* pSplitterNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_node_uninit(pSplitterNode, pAllocationCallbacks); +} + + +/* +Biquad Node +*/ +MA_API ma_biquad_node_config ma_biquad_node_config_init(ma_uint32 channels, float b0, float b1, float b2, float a0, float a1, float a2) +{ + ma_biquad_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.biquad = ma_biquad_config_init(ma_format_f32, channels, b0, b1, b2, a0, a1, a2); + + return config; +} + +static void ma_biquad_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_biquad_node* pLPFNode = (ma_biquad_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_biquad_process_pcm_frames(&pLPFNode->biquad, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_biquad_node_vtable = +{ + ma_biquad_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_biquad_node_init(ma_node_graph* pNodeGraph, const ma_biquad_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_biquad_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->biquad.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_biquad_init(&pConfig->biquad, pAllocationCallbacks, &pNode->biquad); + if (result != MA_SUCCESS) { + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_biquad_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->biquad.channels; + baseNodeConfig.pOutputChannels = &pConfig->biquad.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_biquad_node_reinit(const ma_biquad_config* pConfig, ma_biquad_node* pNode) +{ + ma_biquad_node* pLPFNode = (ma_biquad_node*)pNode; + + MA_ASSERT(pNode != NULL); + + return ma_biquad_reinit(pConfig, &pLPFNode->biquad); +} + +MA_API void ma_biquad_node_uninit(ma_biquad_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_biquad_node* pLPFNode = (ma_biquad_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_biquad_uninit(&pLPFNode->biquad, pAllocationCallbacks); +} + + + +/* +Low Pass Filter Node +*/ +MA_API ma_lpf_node_config ma_lpf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_lpf_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.lpf = ma_lpf_config_init(ma_format_f32, channels, sampleRate, cutoffFrequency, order); + + return config; +} + +static void ma_lpf_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_lpf_node* pLPFNode = (ma_lpf_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_lpf_process_pcm_frames(&pLPFNode->lpf, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_lpf_node_vtable = +{ + ma_lpf_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_lpf_node_init(ma_node_graph* pNodeGraph, const ma_lpf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_lpf_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->lpf.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_lpf_init(&pConfig->lpf, pAllocationCallbacks, &pNode->lpf); + if (result != MA_SUCCESS) { + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_lpf_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->lpf.channels; + baseNodeConfig.pOutputChannels = &pConfig->lpf.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_lpf_node_reinit(const ma_lpf_config* pConfig, ma_lpf_node* pNode) +{ + ma_lpf_node* pLPFNode = (ma_lpf_node*)pNode; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_lpf_reinit(pConfig, &pLPFNode->lpf); +} + +MA_API void ma_lpf_node_uninit(ma_lpf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_lpf_node* pLPFNode = (ma_lpf_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_lpf_uninit(&pLPFNode->lpf, pAllocationCallbacks); +} + + + +/* +High Pass Filter Node +*/ +MA_API ma_hpf_node_config ma_hpf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_hpf_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.hpf = ma_hpf_config_init(ma_format_f32, channels, sampleRate, cutoffFrequency, order); + + return config; +} + +static void ma_hpf_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_hpf_node* pHPFNode = (ma_hpf_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_hpf_process_pcm_frames(&pHPFNode->hpf, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_hpf_node_vtable = +{ + ma_hpf_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_hpf_node_init(ma_node_graph* pNodeGraph, const ma_hpf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hpf_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->hpf.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_hpf_init(&pConfig->hpf, pAllocationCallbacks, &pNode->hpf); + if (result != MA_SUCCESS) { + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_hpf_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->hpf.channels; + baseNodeConfig.pOutputChannels = &pConfig->hpf.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_hpf_node_reinit(const ma_hpf_config* pConfig, ma_hpf_node* pNode) +{ + ma_hpf_node* pHPFNode = (ma_hpf_node*)pNode; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_hpf_reinit(pConfig, &pHPFNode->hpf); +} + +MA_API void ma_hpf_node_uninit(ma_hpf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_hpf_node* pHPFNode = (ma_hpf_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_hpf_uninit(&pHPFNode->hpf, pAllocationCallbacks); +} + + + + +/* +Band Pass Filter Node +*/ +MA_API ma_bpf_node_config ma_bpf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency, ma_uint32 order) +{ + ma_bpf_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.bpf = ma_bpf_config_init(ma_format_f32, channels, sampleRate, cutoffFrequency, order); + + return config; +} + +static void ma_bpf_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_bpf_node* pBPFNode = (ma_bpf_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_bpf_process_pcm_frames(&pBPFNode->bpf, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_bpf_node_vtable = +{ + ma_bpf_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_bpf_node_init(ma_node_graph* pNodeGraph, const ma_bpf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_bpf_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->bpf.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_bpf_init(&pConfig->bpf, pAllocationCallbacks, &pNode->bpf); + if (result != MA_SUCCESS) { + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_bpf_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->bpf.channels; + baseNodeConfig.pOutputChannels = &pConfig->bpf.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_bpf_node_reinit(const ma_bpf_config* pConfig, ma_bpf_node* pNode) +{ + ma_bpf_node* pBPFNode = (ma_bpf_node*)pNode; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_bpf_reinit(pConfig, &pBPFNode->bpf); +} + +MA_API void ma_bpf_node_uninit(ma_bpf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_bpf_node* pBPFNode = (ma_bpf_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_bpf_uninit(&pBPFNode->bpf, pAllocationCallbacks); +} + + + +/* +Notching Filter Node +*/ +MA_API ma_notch_node_config ma_notch_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double q, double frequency) +{ + ma_notch_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.notch = ma_notch2_config_init(ma_format_f32, channels, sampleRate, q, frequency); + + return config; +} + +static void ma_notch_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_notch_node* pBPFNode = (ma_notch_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_notch2_process_pcm_frames(&pBPFNode->notch, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_notch_node_vtable = +{ + ma_notch_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_notch_node_init(ma_node_graph* pNodeGraph, const ma_notch_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_notch_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->notch.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_notch2_init(&pConfig->notch, pAllocationCallbacks, &pNode->notch); + if (result != MA_SUCCESS) { + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_notch_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->notch.channels; + baseNodeConfig.pOutputChannels = &pConfig->notch.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_notch_node_reinit(const ma_notch_config* pConfig, ma_notch_node* pNode) +{ + ma_notch_node* pNotchNode = (ma_notch_node*)pNode; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_notch2_reinit(pConfig, &pNotchNode->notch); +} + +MA_API void ma_notch_node_uninit(ma_notch_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_notch_node* pNotchNode = (ma_notch_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_notch2_uninit(&pNotchNode->notch, pAllocationCallbacks); +} + + + +/* +Peaking Filter Node +*/ +MA_API ma_peak_node_config ma_peak_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency) +{ + ma_peak_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.peak = ma_peak2_config_init(ma_format_f32, channels, sampleRate, gainDB, q, frequency); + + return config; +} + +static void ma_peak_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_peak_node* pBPFNode = (ma_peak_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_peak2_process_pcm_frames(&pBPFNode->peak, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_peak_node_vtable = +{ + ma_peak_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_peak_node_init(ma_node_graph* pNodeGraph, const ma_peak_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_peak_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->peak.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_peak2_init(&pConfig->peak, pAllocationCallbacks, &pNode->peak); + if (result != MA_SUCCESS) { + ma_node_uninit(pNode, pAllocationCallbacks); + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_peak_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->peak.channels; + baseNodeConfig.pOutputChannels = &pConfig->peak.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_peak_node_reinit(const ma_peak_config* pConfig, ma_peak_node* pNode) +{ + ma_peak_node* pPeakNode = (ma_peak_node*)pNode; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_peak2_reinit(pConfig, &pPeakNode->peak); +} + +MA_API void ma_peak_node_uninit(ma_peak_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_peak_node* pPeakNode = (ma_peak_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_peak2_uninit(&pPeakNode->peak, pAllocationCallbacks); +} + + + +/* +Low Shelf Filter Node +*/ +MA_API ma_loshelf_node_config ma_loshelf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency) +{ + ma_loshelf_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.loshelf = ma_loshelf2_config_init(ma_format_f32, channels, sampleRate, gainDB, q, frequency); + + return config; +} + +static void ma_loshelf_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_loshelf_node* pBPFNode = (ma_loshelf_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_loshelf2_process_pcm_frames(&pBPFNode->loshelf, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_loshelf_node_vtable = +{ + ma_loshelf_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_loshelf_node_init(ma_node_graph* pNodeGraph, const ma_loshelf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_loshelf_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->loshelf.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_loshelf2_init(&pConfig->loshelf, pAllocationCallbacks, &pNode->loshelf); + if (result != MA_SUCCESS) { + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_loshelf_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->loshelf.channels; + baseNodeConfig.pOutputChannels = &pConfig->loshelf.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_loshelf_node_reinit(const ma_loshelf_config* pConfig, ma_loshelf_node* pNode) +{ + ma_loshelf_node* pLoshelfNode = (ma_loshelf_node*)pNode; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_loshelf2_reinit(pConfig, &pLoshelfNode->loshelf); +} + +MA_API void ma_loshelf_node_uninit(ma_loshelf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_loshelf_node* pLoshelfNode = (ma_loshelf_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_loshelf2_uninit(&pLoshelfNode->loshelf, pAllocationCallbacks); +} + + + +/* +High Shelf Filter Node +*/ +MA_API ma_hishelf_node_config ma_hishelf_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, double gainDB, double q, double frequency) +{ + ma_hishelf_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.hishelf = ma_hishelf2_config_init(ma_format_f32, channels, sampleRate, gainDB, q, frequency); + + return config; +} + +static void ma_hishelf_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_hishelf_node* pBPFNode = (ma_hishelf_node*)pNode; + + MA_ASSERT(pNode != NULL); + (void)pFrameCountIn; + + ma_hishelf2_process_pcm_frames(&pBPFNode->hishelf, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_hishelf_node_vtable = +{ + ma_hishelf_node_process_pcm_frames, + NULL, /* onGetRequiredInputFrameCount */ + 1, /* One input. */ + 1, /* One output. */ + 0 /* Default flags. */ +}; + +MA_API ma_result ma_hishelf_node_init(ma_node_graph* pNodeGraph, const ma_hishelf_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_hishelf_node* pNode) +{ + ma_result result; + ma_node_config baseNodeConfig; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pNode); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->hishelf.format != ma_format_f32) { + return MA_INVALID_ARGS; /* The format must be f32. */ + } + + result = ma_hishelf2_init(&pConfig->hishelf, pAllocationCallbacks, &pNode->hishelf); + if (result != MA_SUCCESS) { + return result; + } + + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_hishelf_node_vtable; + baseNodeConfig.pInputChannels = &pConfig->hishelf.channels; + baseNodeConfig.pOutputChannels = &pConfig->hishelf.channels; + + result = ma_node_init(pNodeGraph, &baseNodeConfig, pAllocationCallbacks, pNode); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + +MA_API ma_result ma_hishelf_node_reinit(const ma_hishelf_config* pConfig, ma_hishelf_node* pNode) +{ + ma_hishelf_node* pHishelfNode = (ma_hishelf_node*)pNode; + + if (pNode == NULL) { + return MA_INVALID_ARGS; + } + + return ma_hishelf2_reinit(pConfig, &pHishelfNode->hishelf); +} + +MA_API void ma_hishelf_node_uninit(ma_hishelf_node* pNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_hishelf_node* pHishelfNode = (ma_hishelf_node*)pNode; + + if (pNode == NULL) { + return; + } + + ma_node_uninit(pNode, pAllocationCallbacks); + ma_hishelf2_uninit(&pHishelfNode->hishelf, pAllocationCallbacks); +} + + + + +MA_API ma_delay_node_config ma_delay_node_config_init(ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 delayInFrames, float decay) +{ + ma_delay_node_config config; + + config.nodeConfig = ma_node_config_init(); + config.delay = ma_delay_config_init(channels, sampleRate, delayInFrames, decay); + + return config; +} + + +static void ma_delay_node_process_pcm_frames(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_delay_node* pDelayNode = (ma_delay_node*)pNode; + + (void)pFrameCountIn; + + ma_delay_process_pcm_frames(&pDelayNode->delay, ppFramesOut[0], ppFramesIn[0], *pFrameCountOut); +} + +static ma_node_vtable g_ma_delay_node_vtable = +{ + ma_delay_node_process_pcm_frames, + NULL, + 1, /* 1 input channels. */ + 1, /* 1 output channel. */ + MA_NODE_FLAG_CONTINUOUS_PROCESSING /* Delay requires continuous processing to ensure the tail get's processed. */ +}; + +MA_API ma_result ma_delay_node_init(ma_node_graph* pNodeGraph, const ma_delay_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_delay_node* pDelayNode) +{ + ma_result result; + ma_node_config baseConfig; + + if (pDelayNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pDelayNode); + + result = ma_delay_init(&pConfig->delay, pAllocationCallbacks, &pDelayNode->delay); + if (result != MA_SUCCESS) { + return result; + } + + baseConfig = pConfig->nodeConfig; + baseConfig.vtable = &g_ma_delay_node_vtable; + baseConfig.pInputChannels = &pConfig->delay.channels; + baseConfig.pOutputChannels = &pConfig->delay.channels; + + result = ma_node_init(pNodeGraph, &baseConfig, pAllocationCallbacks, &pDelayNode->baseNode); + if (result != MA_SUCCESS) { + ma_delay_uninit(&pDelayNode->delay, pAllocationCallbacks); + return result; + } + + return result; +} + +MA_API void ma_delay_node_uninit(ma_delay_node* pDelayNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pDelayNode == NULL) { + return; + } + + /* The base node is always uninitialized first. */ + ma_node_uninit(pDelayNode, pAllocationCallbacks); + ma_delay_uninit(&pDelayNode->delay, pAllocationCallbacks); +} + +MA_API void ma_delay_node_set_wet(ma_delay_node* pDelayNode, float value) +{ + if (pDelayNode == NULL) { + return; + } + + ma_delay_set_wet(&pDelayNode->delay, value); +} + +MA_API float ma_delay_node_get_wet(const ma_delay_node* pDelayNode) +{ + if (pDelayNode == NULL) { + return 0; + } + + return ma_delay_get_wet(&pDelayNode->delay); +} + +MA_API void ma_delay_node_set_dry(ma_delay_node* pDelayNode, float value) +{ + if (pDelayNode == NULL) { + return; + } + + ma_delay_set_dry(&pDelayNode->delay, value); +} + +MA_API float ma_delay_node_get_dry(const ma_delay_node* pDelayNode) +{ + if (pDelayNode == NULL) { + return 0; + } + + return ma_delay_get_dry(&pDelayNode->delay); +} + +MA_API void ma_delay_node_set_decay(ma_delay_node* pDelayNode, float value) +{ + if (pDelayNode == NULL) { + return; + } + + ma_delay_set_decay(&pDelayNode->delay, value); +} + +MA_API float ma_delay_node_get_decay(const ma_delay_node* pDelayNode) +{ + if (pDelayNode == NULL) { + return 0; + } + + return ma_delay_get_decay(&pDelayNode->delay); +} +#endif /* MA_NO_NODE_GRAPH */ + + +/* SECTION: miniaudio_engine.c */ +#if !defined(MA_NO_ENGINE) && !defined(MA_NO_NODE_GRAPH) +/************************************************************************************************************************************************************** + +Engine + +**************************************************************************************************************************************************************/ +#define MA_SEEK_TARGET_NONE (~(ma_uint64)0) + + +static void ma_sound_set_at_end(ma_sound* pSound, ma_bool32 atEnd) +{ + MA_ASSERT(pSound != NULL); + ma_atomic_exchange_32(&pSound->atEnd, atEnd); + + /* Fire any callbacks or events. */ + if (atEnd) { + if (pSound->endCallback != NULL) { + pSound->endCallback(pSound->pEndCallbackUserData, pSound); + } + } +} + +static ma_bool32 ma_sound_get_at_end(const ma_sound* pSound) +{ + MA_ASSERT(pSound != NULL); + return ma_atomic_load_32(&pSound->atEnd); +} + + +MA_API ma_engine_node_config ma_engine_node_config_init(ma_engine* pEngine, ma_engine_node_type type, ma_uint32 flags) +{ + ma_engine_node_config config; + + MA_ZERO_OBJECT(&config); + config.pEngine = pEngine; + config.type = type; + config.isPitchDisabled = (flags & MA_SOUND_FLAG_NO_PITCH) != 0; + config.isSpatializationDisabled = (flags & MA_SOUND_FLAG_NO_SPATIALIZATION) != 0; + config.monoExpansionMode = pEngine->monoExpansionMode; + + return config; +} + + +static void ma_engine_node_update_pitch_if_required(ma_engine_node* pEngineNode) +{ + ma_bool32 isUpdateRequired = MA_FALSE; + float newPitch; + + MA_ASSERT(pEngineNode != NULL); + + newPitch = ma_atomic_load_explicit_f32(&pEngineNode->pitch, ma_atomic_memory_order_acquire); + + if (pEngineNode->oldPitch != newPitch) { + pEngineNode->oldPitch = newPitch; + isUpdateRequired = MA_TRUE; + } + + if (pEngineNode->oldDopplerPitch != pEngineNode->spatializer.dopplerPitch) { + pEngineNode->oldDopplerPitch = pEngineNode->spatializer.dopplerPitch; + isUpdateRequired = MA_TRUE; + } + + if (isUpdateRequired) { + float basePitch = (float)pEngineNode->sampleRate / ma_engine_get_sample_rate(pEngineNode->pEngine); + ma_linear_resampler_set_rate_ratio(&pEngineNode->resampler, basePitch * pEngineNode->oldPitch * pEngineNode->oldDopplerPitch); + } +} + +static ma_bool32 ma_engine_node_is_pitching_enabled(const ma_engine_node* pEngineNode) +{ + MA_ASSERT(pEngineNode != NULL); + + /* Don't try to be clever by skipping resampling in the pitch=1 case or else you'll glitch when moving away from 1. */ + return !ma_atomic_load_explicit_32(&pEngineNode->isPitchDisabled, ma_atomic_memory_order_acquire); +} + +static ma_bool32 ma_engine_node_is_spatialization_enabled(const ma_engine_node* pEngineNode) +{ + MA_ASSERT(pEngineNode != NULL); + + return !ma_atomic_load_explicit_32(&pEngineNode->isSpatializationDisabled, ma_atomic_memory_order_acquire); +} + +static ma_uint64 ma_engine_node_get_required_input_frame_count(const ma_engine_node* pEngineNode, ma_uint64 outputFrameCount) +{ + ma_uint64 inputFrameCount = 0; + + if (ma_engine_node_is_pitching_enabled(pEngineNode)) { + ma_result result = ma_linear_resampler_get_required_input_frame_count(&pEngineNode->resampler, outputFrameCount, &inputFrameCount); + if (result != MA_SUCCESS) { + inputFrameCount = 0; + } + } else { + inputFrameCount = outputFrameCount; /* No resampling, so 1:1. */ + } + + return inputFrameCount; +} + +static ma_result ma_engine_node_set_volume(ma_engine_node* pEngineNode, float volume) +{ + if (pEngineNode == NULL) { + return MA_INVALID_ARGS; + } + + ma_atomic_float_set(&pEngineNode->volume, volume); + + /* If we're not smoothing we should bypass the volume gainer entirely. */ + if (pEngineNode->volumeSmoothTimeInPCMFrames == 0) { + /* We should always have an active spatializer because it can be enabled and disabled dynamically. We can just use that for holding our volume. */ + ma_spatializer_set_master_volume(&pEngineNode->spatializer, volume); + } else { + /* We're using volume smoothing, so apply the master volume to the gainer. */ + ma_gainer_set_gain(&pEngineNode->volumeGainer, volume); + } + + return MA_SUCCESS; +} + +static ma_result ma_engine_node_get_volume(const ma_engine_node* pEngineNode, float* pVolume) +{ + if (pVolume == NULL) { + return MA_INVALID_ARGS; + } + + *pVolume = 0.0f; + + if (pEngineNode == NULL) { + return MA_INVALID_ARGS; + } + + *pVolume = ma_atomic_float_get((ma_atomic_float*)&pEngineNode->volume); + + return MA_SUCCESS; +} + + +static void ma_engine_node_process_pcm_frames__general(ma_engine_node* pEngineNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + ma_uint32 frameCountIn; + ma_uint32 frameCountOut; + ma_uint32 totalFramesProcessedIn; + ma_uint32 totalFramesProcessedOut; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_bool32 isPitchingEnabled; + ma_bool32 isFadingEnabled; + ma_bool32 isSpatializationEnabled; + ma_bool32 isPanningEnabled; + ma_bool32 isVolumeSmoothingEnabled; + + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + + channelsIn = ma_spatializer_get_input_channels(&pEngineNode->spatializer); + channelsOut = ma_spatializer_get_output_channels(&pEngineNode->spatializer); + + totalFramesProcessedIn = 0; + totalFramesProcessedOut = 0; + + /* Update the fader if applicable. */ + { + ma_uint64 fadeLengthInFrames = ma_atomic_uint64_get(&pEngineNode->fadeSettings.fadeLengthInFrames); + if (fadeLengthInFrames != ~(ma_uint64)0) { + float fadeVolumeBeg = ma_atomic_float_get(&pEngineNode->fadeSettings.volumeBeg); + float fadeVolumeEnd = ma_atomic_float_get(&pEngineNode->fadeSettings.volumeEnd); + ma_int64 fadeStartOffsetInFrames = (ma_int64)ma_atomic_uint64_get(&pEngineNode->fadeSettings.absoluteGlobalTimeInFrames); + if (fadeStartOffsetInFrames == (ma_int64)(~(ma_uint64)0)) { + fadeStartOffsetInFrames = 0; + } else { + fadeStartOffsetInFrames -= ma_engine_get_time_in_pcm_frames(pEngineNode->pEngine); + } + + ma_fader_set_fade_ex(&pEngineNode->fader, fadeVolumeBeg, fadeVolumeEnd, fadeLengthInFrames, fadeStartOffsetInFrames); + + /* Reset the fade length so we don't erroneously apply it again. */ + ma_atomic_uint64_set(&pEngineNode->fadeSettings.fadeLengthInFrames, ~(ma_uint64)0); + } + } + + isPitchingEnabled = ma_engine_node_is_pitching_enabled(pEngineNode); + isFadingEnabled = pEngineNode->fader.volumeBeg != 1 || pEngineNode->fader.volumeEnd != 1; + isSpatializationEnabled = ma_engine_node_is_spatialization_enabled(pEngineNode); + isPanningEnabled = pEngineNode->panner.pan != 0 && channelsOut != 1; + isVolumeSmoothingEnabled = pEngineNode->volumeSmoothTimeInPCMFrames > 0; + + /* Keep going while we've still got data available for processing. */ + while (totalFramesProcessedOut < frameCountOut) { + /* + We need to process in a specific order. We always do resampling first because it's likely + we're going to be increasing the channel count after spatialization. Also, I want to do + fading based on the output sample rate. + + We'll first read into a buffer from the resampler. Then we'll do all processing that + operates on the on the input channel count. We'll then get the spatializer to output to + the output buffer and then do all effects from that point directly in the output buffer + in-place. + + Note that we're always running the resampler if pitching is enabled, even when the pitch + is 1. If we try to be clever and skip resampling when the pitch is 1, we'll get a glitch + when we move away from 1, back to 1, and then away from 1 again. We'll want to implement + any pitch=1 optimizations in the resampler itself. + + There's a small optimization here that we'll utilize since it might be a fairly common + case. When the input and output channel counts are the same, we'll read straight into the + output buffer from the resampler and do everything in-place. + */ + const float* pRunningFramesIn; + float* pRunningFramesOut; + float* pWorkingBuffer; /* This is the buffer that we'll be processing frames in. This is in input channels. */ + float temp[MA_DATA_CONVERTER_STACK_BUFFER_SIZE / sizeof(float)]; + ma_uint32 tempCapInFrames = ma_countof(temp) / channelsIn; + ma_uint32 framesAvailableIn; + ma_uint32 framesAvailableOut; + ma_uint32 framesJustProcessedIn; + ma_uint32 framesJustProcessedOut; + ma_bool32 isWorkingBufferValid = MA_FALSE; + + framesAvailableIn = frameCountIn - totalFramesProcessedIn; + framesAvailableOut = frameCountOut - totalFramesProcessedOut; + + pRunningFramesIn = ma_offset_pcm_frames_const_ptr_f32(ppFramesIn[0], totalFramesProcessedIn, channelsIn); + pRunningFramesOut = ma_offset_pcm_frames_ptr_f32(ppFramesOut[0], totalFramesProcessedOut, channelsOut); + + if (channelsIn == channelsOut) { + /* Fast path. Channel counts are the same. No need for an intermediary input buffer. */ + pWorkingBuffer = pRunningFramesOut; + } else { + /* Slow path. Channel counts are different. Need to use an intermediary input buffer. */ + pWorkingBuffer = temp; + if (framesAvailableOut > tempCapInFrames) { + framesAvailableOut = tempCapInFrames; + } + } + + /* First is resampler. */ + if (isPitchingEnabled) { + ma_uint64 resampleFrameCountIn = framesAvailableIn; + ma_uint64 resampleFrameCountOut = framesAvailableOut; + + ma_linear_resampler_process_pcm_frames(&pEngineNode->resampler, pRunningFramesIn, &resampleFrameCountIn, pWorkingBuffer, &resampleFrameCountOut); + isWorkingBufferValid = MA_TRUE; + + framesJustProcessedIn = (ma_uint32)resampleFrameCountIn; + framesJustProcessedOut = (ma_uint32)resampleFrameCountOut; + } else { + framesJustProcessedIn = ma_min(framesAvailableIn, framesAvailableOut); + framesJustProcessedOut = framesJustProcessedIn; /* When no resampling is being performed, the number of output frames is the same as input frames. */ + } + + /* Fading. */ + if (isFadingEnabled) { + if (isWorkingBufferValid) { + ma_fader_process_pcm_frames(&pEngineNode->fader, pWorkingBuffer, pWorkingBuffer, framesJustProcessedOut); /* In-place processing. */ + } else { + ma_fader_process_pcm_frames(&pEngineNode->fader, pWorkingBuffer, pRunningFramesIn, framesJustProcessedOut); + isWorkingBufferValid = MA_TRUE; + } + } + + /* + If we're using smoothing, we won't be applying volume via the spatializer, but instead from a ma_gainer. In this case + we'll want to apply our volume now. + */ + if (isVolumeSmoothingEnabled) { + if (isWorkingBufferValid) { + ma_gainer_process_pcm_frames(&pEngineNode->volumeGainer, pWorkingBuffer, pWorkingBuffer, framesJustProcessedOut); + } else { + ma_gainer_process_pcm_frames(&pEngineNode->volumeGainer, pWorkingBuffer, pRunningFramesIn, framesJustProcessedOut); + isWorkingBufferValid = MA_TRUE; + } + } + + /* + If at this point we still haven't actually done anything with the working buffer we need + to just read straight from the input buffer. + */ + if (isWorkingBufferValid == MA_FALSE) { + pWorkingBuffer = (float*)pRunningFramesIn; /* Naughty const cast, but it's safe at this point because we won't ever be writing to it from this point out. */ + } + + /* Spatialization. */ + if (isSpatializationEnabled) { + ma_uint32 iListener; + + /* + When determining the listener to use, we first check to see if the sound is pinned to a + specific listener. If so, we use that. Otherwise we just use the closest listener. + */ + if (pEngineNode->pinnedListenerIndex != MA_LISTENER_INDEX_CLOSEST && pEngineNode->pinnedListenerIndex < ma_engine_get_listener_count(pEngineNode->pEngine)) { + iListener = pEngineNode->pinnedListenerIndex; + } else { + ma_vec3f spatializerPosition = ma_spatializer_get_position(&pEngineNode->spatializer); + iListener = ma_engine_find_closest_listener(pEngineNode->pEngine, spatializerPosition.x, spatializerPosition.y, spatializerPosition.z); + } + + ma_spatializer_process_pcm_frames(&pEngineNode->spatializer, &pEngineNode->pEngine->listeners[iListener], pRunningFramesOut, pWorkingBuffer, framesJustProcessedOut); + } else { + /* No spatialization, but we still need to do channel conversion and master volume. */ + float volume; + ma_engine_node_get_volume(pEngineNode, &volume); /* Should never fail. */ + + if (channelsIn == channelsOut) { + /* No channel conversion required. Just copy straight to the output buffer. */ + if (isVolumeSmoothingEnabled) { + /* Volume has already been applied. Just copy straight to the output buffer. */ + ma_copy_pcm_frames(pRunningFramesOut, pWorkingBuffer, framesJustProcessedOut * channelsOut, ma_format_f32, channelsOut); + } else { + /* Volume has not been applied yet. Copy and apply volume in the same pass. */ + ma_copy_and_apply_volume_factor_f32(pRunningFramesOut, pWorkingBuffer, framesJustProcessedOut * channelsOut, volume); + } + } else { + /* Channel conversion required. TODO: Add support for channel maps here. */ + ma_channel_map_apply_f32(pRunningFramesOut, NULL, channelsOut, pWorkingBuffer, NULL, channelsIn, framesJustProcessedOut, ma_channel_mix_mode_simple, pEngineNode->monoExpansionMode); + + /* If we're using smoothing, the volume will have already been applied. */ + if (!isVolumeSmoothingEnabled) { + ma_apply_volume_factor_f32(pRunningFramesOut, framesJustProcessedOut * channelsOut, volume); + } + } + } + + /* At this point we can guarantee that the output buffer contains valid data. We can process everything in place now. */ + + /* Panning. */ + if (isPanningEnabled) { + ma_panner_process_pcm_frames(&pEngineNode->panner, pRunningFramesOut, pRunningFramesOut, framesJustProcessedOut); /* In-place processing. */ + } + + /* We're done for this chunk. */ + totalFramesProcessedIn += framesJustProcessedIn; + totalFramesProcessedOut += framesJustProcessedOut; + + /* If we didn't process any output frames this iteration it means we've either run out of input data, or run out of room in the output buffer. */ + if (framesJustProcessedOut == 0) { + break; + } + } + + /* At this point we're done processing. */ + *pFrameCountIn = totalFramesProcessedIn; + *pFrameCountOut = totalFramesProcessedOut; +} + +static void ma_engine_node_process_pcm_frames__sound(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + /* For sounds, we need to first read from the data source. Then we need to apply the engine effects (pan, pitch, fades, etc.). */ + ma_result result = MA_SUCCESS; + ma_sound* pSound = (ma_sound*)pNode; + ma_uint32 frameCount = *pFrameCountOut; + ma_uint32 totalFramesRead = 0; + ma_format dataSourceFormat; + ma_uint32 dataSourceChannels; + ma_uint8 temp[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 tempCapInFrames; + ma_uint64 seekTarget; + + /* This is a data source node which means no input buses. */ + (void)ppFramesIn; + (void)pFrameCountIn; + + /* If we're marked at the end we need to stop the sound and do nothing. */ + if (ma_sound_at_end(pSound)) { + ma_sound_stop(pSound); + *pFrameCountOut = 0; + return; + } + + /* If we're seeking, do so now before reading. */ + seekTarget = ma_atomic_load_64(&pSound->seekTarget); + if (seekTarget != MA_SEEK_TARGET_NONE) { + ma_data_source_seek_to_pcm_frame(pSound->pDataSource, seekTarget); + + /* Any time-dependant effects need to have their times updated. */ + ma_node_set_time(pSound, seekTarget); + + ma_atomic_exchange_64(&pSound->seekTarget, MA_SEEK_TARGET_NONE); + } + + /* + We want to update the pitch once. For sounds, this can be either at the start or at the end. If + we don't force this to only ever be updating once, we could end up in a situation where + retrieving the required input frame count ends up being different to what we actually retrieve. + What could happen is that the required input frame count is calculated, the pitch is update, + and then this processing function is called resulting in a different number of input frames + being processed. Do not call this in ma_engine_node_process_pcm_frames__general() or else + you'll hit the aforementioned bug. + */ + ma_engine_node_update_pitch_if_required(&pSound->engineNode); + + /* + For the convenience of the caller, we're doing to allow data sources to use non-floating-point formats and channel counts that differ + from the main engine. + */ + result = ma_data_source_get_data_format(pSound->pDataSource, &dataSourceFormat, &dataSourceChannels, NULL, NULL, 0); + if (result == MA_SUCCESS) { + tempCapInFrames = sizeof(temp) / ma_get_bytes_per_frame(dataSourceFormat, dataSourceChannels); + + /* Keep reading until we've read as much as was requested or we reach the end of the data source. */ + while (totalFramesRead < frameCount) { + ma_uint32 framesRemaining = frameCount - totalFramesRead; + ma_uint32 framesToRead; + ma_uint64 framesJustRead; + ma_uint32 frameCountIn; + ma_uint32 frameCountOut; + const float* pRunningFramesIn; + float* pRunningFramesOut; + + /* + The first thing we need to do is read into the temporary buffer. We can calculate exactly + how many input frames we'll need after resampling. + */ + framesToRead = (ma_uint32)ma_engine_node_get_required_input_frame_count(&pSound->engineNode, framesRemaining); + if (framesToRead > tempCapInFrames) { + framesToRead = tempCapInFrames; + } + + result = ma_data_source_read_pcm_frames(pSound->pDataSource, temp, framesToRead, &framesJustRead); + + /* If we reached the end of the sound we'll want to mark it as at the end and stop it. This should never be returned for looping sounds. */ + if (result == MA_AT_END) { + ma_sound_set_at_end(pSound, MA_TRUE); /* This will be set to false in ma_sound_start(). */ + } + + pRunningFramesOut = ma_offset_pcm_frames_ptr_f32(ppFramesOut[0], totalFramesRead, ma_node_get_output_channels(pNode, 0)); + + frameCountIn = (ma_uint32)framesJustRead; + frameCountOut = framesRemaining; + + /* Convert if necessary. */ + if (dataSourceFormat == ma_format_f32) { + /* Fast path. No data conversion necessary. */ + pRunningFramesIn = (float*)temp; + ma_engine_node_process_pcm_frames__general(&pSound->engineNode, &pRunningFramesIn, &frameCountIn, &pRunningFramesOut, &frameCountOut); + } else { + /* Slow path. Need to do sample format conversion to f32. If we give the f32 buffer the same count as the first temp buffer, we're guaranteed it'll be large enough. */ + float tempf32[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* Do not do `MA_DATA_CONVERTER_STACK_BUFFER_SIZE/sizeof(float)` here like we've done in other places. */ + ma_convert_pcm_frames_format(tempf32, ma_format_f32, temp, dataSourceFormat, framesJustRead, dataSourceChannels, ma_dither_mode_none); + + /* Now that we have our samples in f32 format we can process like normal. */ + pRunningFramesIn = tempf32; + ma_engine_node_process_pcm_frames__general(&pSound->engineNode, &pRunningFramesIn, &frameCountIn, &pRunningFramesOut, &frameCountOut); + } + + /* We should have processed all of our input frames since we calculated the required number of input frames at the top. */ + MA_ASSERT(frameCountIn == framesJustRead); + totalFramesRead += (ma_uint32)frameCountOut; /* Safe cast. */ + + if (result != MA_SUCCESS || ma_sound_at_end(pSound)) { + break; /* Might have reached the end. */ + } + } + } + + *pFrameCountOut = totalFramesRead; +} + +static void ma_engine_node_process_pcm_frames__group(ma_node* pNode, const float** ppFramesIn, ma_uint32* pFrameCountIn, float** ppFramesOut, ma_uint32* pFrameCountOut) +{ + /* + Make sure the pitch is updated before trying to read anything. It's important that this is done + only once and not in ma_engine_node_process_pcm_frames__general(). The reason for this is that + ma_engine_node_process_pcm_frames__general() will call ma_engine_node_get_required_input_frame_count(), + and if another thread modifies the pitch just after that call it can result in a glitch due to + the input rate changing. + */ + ma_engine_node_update_pitch_if_required((ma_engine_node*)pNode); + + /* For groups, the input data has already been read and we just need to apply the effect. */ + ma_engine_node_process_pcm_frames__general((ma_engine_node*)pNode, ppFramesIn, pFrameCountIn, ppFramesOut, pFrameCountOut); +} + +static ma_result ma_engine_node_get_required_input_frame_count__group(ma_node* pNode, ma_uint32 outputFrameCount, ma_uint32* pInputFrameCount) +{ + ma_uint64 inputFrameCount; + + MA_ASSERT(pInputFrameCount != NULL); + + /* Our pitch will affect this calculation. We need to update it. */ + ma_engine_node_update_pitch_if_required((ma_engine_node*)pNode); + + inputFrameCount = ma_engine_node_get_required_input_frame_count((ma_engine_node*)pNode, outputFrameCount); + if (inputFrameCount > 0xFFFFFFFF) { + inputFrameCount = 0xFFFFFFFF; /* Will never happen because miniaudio will only ever process in relatively small chunks. */ + } + + *pInputFrameCount = (ma_uint32)inputFrameCount; + + return MA_SUCCESS; +} + + +static ma_node_vtable g_ma_engine_node_vtable__sound = +{ + ma_engine_node_process_pcm_frames__sound, + NULL, /* onGetRequiredInputFrameCount */ + 0, /* Sounds are data source nodes which means they have zero inputs (their input is drawn from the data source itself). */ + 1, /* Sounds have one output bus. */ + 0 /* Default flags. */ +}; + +static ma_node_vtable g_ma_engine_node_vtable__group = +{ + ma_engine_node_process_pcm_frames__group, + ma_engine_node_get_required_input_frame_count__group, + 1, /* Groups have one input bus. */ + 1, /* Groups have one output bus. */ + MA_NODE_FLAG_DIFFERENT_PROCESSING_RATES /* The engine node does resampling so should let miniaudio know about it. */ +}; + + + +static ma_node_config ma_engine_node_base_node_config_init(const ma_engine_node_config* pConfig) +{ + ma_node_config baseNodeConfig; + + if (pConfig->type == ma_engine_node_type_sound) { + /* Sound. */ + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_engine_node_vtable__sound; + baseNodeConfig.initialState = ma_node_state_stopped; /* Sounds are stopped by default. */ + } else { + /* Group. */ + baseNodeConfig = ma_node_config_init(); + baseNodeConfig.vtable = &g_ma_engine_node_vtable__group; + baseNodeConfig.initialState = ma_node_state_started; /* Groups are started by default. */ + } + + return baseNodeConfig; +} + +static ma_spatializer_config ma_engine_node_spatializer_config_init(const ma_node_config* pBaseNodeConfig) +{ + return ma_spatializer_config_init(pBaseNodeConfig->pInputChannels[0], pBaseNodeConfig->pOutputChannels[0]); +} + +typedef struct +{ + size_t sizeInBytes; + size_t baseNodeOffset; + size_t resamplerOffset; + size_t spatializerOffset; + size_t gainerOffset; +} ma_engine_node_heap_layout; + +static ma_result ma_engine_node_get_heap_layout(const ma_engine_node_config* pConfig, ma_engine_node_heap_layout* pHeapLayout) +{ + ma_result result; + size_t tempHeapSize; + ma_node_config baseNodeConfig; + ma_linear_resampler_config resamplerConfig; + ma_spatializer_config spatializerConfig; + ma_gainer_config gainerConfig; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_channel defaultStereoChannelMap[2] = {MA_CHANNEL_SIDE_LEFT, MA_CHANNEL_SIDE_RIGHT}; /* <-- Consistent with the default channel map of a stereo listener. Means channel conversion can run on a fast path. */ + + MA_ASSERT(pHeapLayout); + + MA_ZERO_OBJECT(pHeapLayout); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + if (pConfig->pEngine == NULL) { + return MA_INVALID_ARGS; /* An engine must be specified. */ + } + + pHeapLayout->sizeInBytes = 0; + + channelsIn = (pConfig->channelsIn != 0) ? pConfig->channelsIn : ma_engine_get_channels(pConfig->pEngine); + channelsOut = (pConfig->channelsOut != 0) ? pConfig->channelsOut : ma_engine_get_channels(pConfig->pEngine); + + + /* Base node. */ + baseNodeConfig = ma_engine_node_base_node_config_init(pConfig); + baseNodeConfig.pInputChannels = &channelsIn; + baseNodeConfig.pOutputChannels = &channelsOut; + + result = ma_node_get_heap_size(ma_engine_get_node_graph(pConfig->pEngine), &baseNodeConfig, &tempHeapSize); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the size of the heap for the base node. */ + } + + pHeapLayout->baseNodeOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(tempHeapSize); + + + /* Resmapler. */ + resamplerConfig = ma_linear_resampler_config_init(ma_format_f32, channelsIn, 1, 1); /* Input and output sample rates don't affect the calculation of the heap size. */ + resamplerConfig.lpfOrder = 0; + + result = ma_linear_resampler_get_heap_size(&resamplerConfig, &tempHeapSize); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the size of the heap for the resampler. */ + } + + pHeapLayout->resamplerOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(tempHeapSize); + + + /* Spatializer. */ + spatializerConfig = ma_engine_node_spatializer_config_init(&baseNodeConfig); + + if (spatializerConfig.channelsIn == 2) { + spatializerConfig.pChannelMapIn = defaultStereoChannelMap; + } + + result = ma_spatializer_get_heap_size(&spatializerConfig, &tempHeapSize); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the size of the heap for the spatializer. */ + } + + pHeapLayout->spatializerOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(tempHeapSize); + + + /* Gainer. Will not be used if we are not using smoothing. */ + if (pConfig->volumeSmoothTimeInPCMFrames > 0) { + gainerConfig = ma_gainer_config_init(channelsIn, pConfig->volumeSmoothTimeInPCMFrames); + + result = ma_gainer_get_heap_size(&gainerConfig, &tempHeapSize); + if (result != MA_SUCCESS) { + return result; + } + + pHeapLayout->gainerOffset = pHeapLayout->sizeInBytes; + pHeapLayout->sizeInBytes += ma_align_64(tempHeapSize); + } + + + return MA_SUCCESS; +} + +MA_API ma_result ma_engine_node_get_heap_size(const ma_engine_node_config* pConfig, size_t* pHeapSizeInBytes) +{ + ma_result result; + ma_engine_node_heap_layout heapLayout; + + if (pHeapSizeInBytes == NULL) { + return MA_INVALID_ARGS; + } + + *pHeapSizeInBytes = 0; + + result = ma_engine_node_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + *pHeapSizeInBytes = heapLayout.sizeInBytes; + + return MA_SUCCESS; +} + +MA_API ma_result ma_engine_node_init_preallocated(const ma_engine_node_config* pConfig, void* pHeap, ma_engine_node* pEngineNode) +{ + ma_result result; + ma_engine_node_heap_layout heapLayout; + ma_node_config baseNodeConfig; + ma_linear_resampler_config resamplerConfig; + ma_fader_config faderConfig; + ma_spatializer_config spatializerConfig; + ma_panner_config pannerConfig; + ma_gainer_config gainerConfig; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_channel defaultStereoChannelMap[2] = {MA_CHANNEL_SIDE_LEFT, MA_CHANNEL_SIDE_RIGHT}; /* <-- Consistent with the default channel map of a stereo listener. Means channel conversion can run on a fast path. */ + + if (pEngineNode == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pEngineNode); + + result = ma_engine_node_get_heap_layout(pConfig, &heapLayout); + if (result != MA_SUCCESS) { + return result; + } + + if (pConfig->pinnedListenerIndex != MA_LISTENER_INDEX_CLOSEST && pConfig->pinnedListenerIndex >= ma_engine_get_listener_count(pConfig->pEngine)) { + return MA_INVALID_ARGS; /* Invalid listener. */ + } + + pEngineNode->_pHeap = pHeap; + MA_ZERO_MEMORY(pHeap, heapLayout.sizeInBytes); + + pEngineNode->pEngine = pConfig->pEngine; + pEngineNode->sampleRate = (pConfig->sampleRate > 0) ? pConfig->sampleRate : ma_engine_get_sample_rate(pEngineNode->pEngine); + pEngineNode->volumeSmoothTimeInPCMFrames = pConfig->volumeSmoothTimeInPCMFrames; + pEngineNode->monoExpansionMode = pConfig->monoExpansionMode; + ma_atomic_float_set(&pEngineNode->volume, 1); + pEngineNode->pitch = 1; + pEngineNode->oldPitch = 1; + pEngineNode->oldDopplerPitch = 1; + pEngineNode->isPitchDisabled = pConfig->isPitchDisabled; + pEngineNode->isSpatializationDisabled = pConfig->isSpatializationDisabled; + pEngineNode->pinnedListenerIndex = pConfig->pinnedListenerIndex; + ma_atomic_float_set(&pEngineNode->fadeSettings.volumeBeg, 1); + ma_atomic_float_set(&pEngineNode->fadeSettings.volumeEnd, 1); + ma_atomic_uint64_set(&pEngineNode->fadeSettings.fadeLengthInFrames, (~(ma_uint64)0)); + ma_atomic_uint64_set(&pEngineNode->fadeSettings.absoluteGlobalTimeInFrames, (~(ma_uint64)0)); /* <-- Indicates that the fade should start immediately. */ + + channelsIn = (pConfig->channelsIn != 0) ? pConfig->channelsIn : ma_engine_get_channels(pConfig->pEngine); + channelsOut = (pConfig->channelsOut != 0) ? pConfig->channelsOut : ma_engine_get_channels(pConfig->pEngine); + + /* + If the sample rate of the sound is different to the engine, make sure pitching is enabled so that the resampler + is activated. Not doing this will result in the sound not being resampled if MA_SOUND_FLAG_NO_PITCH is used. + */ + if (pEngineNode->sampleRate != ma_engine_get_sample_rate(pEngineNode->pEngine)) { + pEngineNode->isPitchDisabled = MA_FALSE; + } + + + /* Base node. */ + baseNodeConfig = ma_engine_node_base_node_config_init(pConfig); + baseNodeConfig.pInputChannels = &channelsIn; + baseNodeConfig.pOutputChannels = &channelsOut; + + result = ma_node_init_preallocated(&pConfig->pEngine->nodeGraph, &baseNodeConfig, ma_offset_ptr(pHeap, heapLayout.baseNodeOffset), &pEngineNode->baseNode); + if (result != MA_SUCCESS) { + goto error0; + } + + + /* + We can now initialize the effects we need in order to implement the engine node. There's a + defined order of operations here, mainly centered around when we convert our channels from the + data source's native channel count to the engine's channel count. As a rule, we want to do as + much computation as possible before spatialization because there's a chance that will increase + the channel count, thereby increasing the amount of work needing to be done to process. + */ + + /* We'll always do resampling first. */ + resamplerConfig = ma_linear_resampler_config_init(ma_format_f32, baseNodeConfig.pInputChannels[0], pEngineNode->sampleRate, ma_engine_get_sample_rate(pEngineNode->pEngine)); + resamplerConfig.lpfOrder = 0; /* <-- Need to disable low-pass filtering for pitch shifting for now because there's cases where the biquads are becoming unstable. Need to figure out a better fix for this. */ + + result = ma_linear_resampler_init_preallocated(&resamplerConfig, ma_offset_ptr(pHeap, heapLayout.resamplerOffset), &pEngineNode->resampler); + if (result != MA_SUCCESS) { + goto error1; + } + + + /* After resampling will come the fader. */ + faderConfig = ma_fader_config_init(ma_format_f32, baseNodeConfig.pInputChannels[0], ma_engine_get_sample_rate(pEngineNode->pEngine)); + + result = ma_fader_init(&faderConfig, &pEngineNode->fader); + if (result != MA_SUCCESS) { + goto error2; + } + + + /* + Spatialization comes next. We spatialize based on the node's output channel count. It's up the caller to + ensure channels counts link up correctly in the node graph. + */ + spatializerConfig = ma_engine_node_spatializer_config_init(&baseNodeConfig); + spatializerConfig.gainSmoothTimeInFrames = pEngineNode->pEngine->gainSmoothTimeInFrames; + + if (spatializerConfig.channelsIn == 2) { + spatializerConfig.pChannelMapIn = defaultStereoChannelMap; + } + + result = ma_spatializer_init_preallocated(&spatializerConfig, ma_offset_ptr(pHeap, heapLayout.spatializerOffset), &pEngineNode->spatializer); + if (result != MA_SUCCESS) { + goto error2; + } + + + /* + After spatialization comes panning. We need to do this after spatialization because otherwise we wouldn't + be able to pan mono sounds. + */ + pannerConfig = ma_panner_config_init(ma_format_f32, baseNodeConfig.pOutputChannels[0]); + + result = ma_panner_init(&pannerConfig, &pEngineNode->panner); + if (result != MA_SUCCESS) { + goto error3; + } + + + /* We'll need a gainer for smoothing out volume changes if we have a non-zero smooth time. We apply this before converting to the output channel count. */ + if (pConfig->volumeSmoothTimeInPCMFrames > 0) { + gainerConfig = ma_gainer_config_init(channelsIn, pConfig->volumeSmoothTimeInPCMFrames); + + result = ma_gainer_init_preallocated(&gainerConfig, ma_offset_ptr(pHeap, heapLayout.gainerOffset), &pEngineNode->volumeGainer); + if (result != MA_SUCCESS) { + goto error3; + } + } + + + return MA_SUCCESS; + + /* No need for allocation callbacks here because we use a preallocated heap. */ +error3: ma_spatializer_uninit(&pEngineNode->spatializer, NULL); +error2: ma_linear_resampler_uninit(&pEngineNode->resampler, NULL); +error1: ma_node_uninit(&pEngineNode->baseNode, NULL); +error0: return result; +} + +MA_API ma_result ma_engine_node_init(const ma_engine_node_config* pConfig, const ma_allocation_callbacks* pAllocationCallbacks, ma_engine_node* pEngineNode) +{ + ma_result result; + size_t heapSizeInBytes; + void* pHeap; + + result = ma_engine_node_get_heap_size(pConfig, &heapSizeInBytes); + if (result != MA_SUCCESS) { + return result; + } + + if (heapSizeInBytes > 0) { + pHeap = ma_malloc(heapSizeInBytes, pAllocationCallbacks); + if (pHeap == NULL) { + return MA_OUT_OF_MEMORY; + } + } else { + pHeap = NULL; + } + + result = ma_engine_node_init_preallocated(pConfig, pHeap, pEngineNode); + if (result != MA_SUCCESS) { + ma_free(pHeap, pAllocationCallbacks); + return result; + } + + pEngineNode->_ownsHeap = MA_TRUE; + return MA_SUCCESS; +} + +MA_API void ma_engine_node_uninit(ma_engine_node* pEngineNode, const ma_allocation_callbacks* pAllocationCallbacks) +{ + /* + The base node always needs to be uninitialized first to ensure it's detached from the graph completely before we + destroy anything that might be in the middle of being used by the processing function. + */ + ma_node_uninit(&pEngineNode->baseNode, pAllocationCallbacks); + + /* Now that the node has been uninitialized we can safely uninitialize the rest. */ + if (pEngineNode->volumeSmoothTimeInPCMFrames > 0) { + ma_gainer_uninit(&pEngineNode->volumeGainer, pAllocationCallbacks); + } + + ma_spatializer_uninit(&pEngineNode->spatializer, pAllocationCallbacks); + ma_linear_resampler_uninit(&pEngineNode->resampler, pAllocationCallbacks); + + /* Free the heap last. */ + if (pEngineNode->_ownsHeap) { + ma_free(pEngineNode->_pHeap, pAllocationCallbacks); + } +} + + +MA_API ma_sound_config ma_sound_config_init(void) +{ + return ma_sound_config_init_2(NULL); +} + +MA_API ma_sound_config ma_sound_config_init_2(ma_engine* pEngine) +{ + ma_sound_config config; + + MA_ZERO_OBJECT(&config); + + if (pEngine != NULL) { + config.monoExpansionMode = pEngine->monoExpansionMode; + } else { + config.monoExpansionMode = ma_mono_expansion_mode_default; + } + + config.rangeEndInPCMFrames = ~((ma_uint64)0); + config.loopPointEndInPCMFrames = ~((ma_uint64)0); + + return config; +} + +MA_API ma_sound_group_config ma_sound_group_config_init(void) +{ + return ma_sound_group_config_init_2(NULL); +} + +MA_API ma_sound_group_config ma_sound_group_config_init_2(ma_engine* pEngine) +{ + ma_sound_group_config config; + + MA_ZERO_OBJECT(&config); + + if (pEngine != NULL) { + config.monoExpansionMode = pEngine->monoExpansionMode; + } else { + config.monoExpansionMode = ma_mono_expansion_mode_default; + } + + return config; +} + + +MA_API ma_engine_config ma_engine_config_init(void) +{ + ma_engine_config config; + + MA_ZERO_OBJECT(&config); + config.listenerCount = 1; /* Always want at least one listener. */ + config.monoExpansionMode = ma_mono_expansion_mode_default; + + return config; +} + + +#if !defined(MA_NO_DEVICE_IO) +static void ma_engine_data_callback_internal(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount) +{ + ma_engine* pEngine = (ma_engine*)pDevice->pUserData; + + (void)pFramesIn; + + /* + Experiment: Try processing a resource manager job if we're on the Emscripten build. + + This serves two purposes: + + 1) It ensures jobs are actually processed at some point since we cannot guarantee that the + caller is doing the right thing and calling ma_resource_manager_process_next_job(); and + + 2) It's an attempt at working around an issue where processing jobs on the Emscripten main + loop doesn't work as well as it should. When trying to load sounds without the `DECODE` + flag or with the `ASYNC` flag, the sound data is just not able to be loaded in time + before the callback is processed. I think it's got something to do with the single- + threaded nature of Web, but I'm not entirely sure. + */ + #if !defined(MA_NO_RESOURCE_MANAGER) && defined(MA_EMSCRIPTEN) + { + if (pEngine->pResourceManager != NULL) { + if ((pEngine->pResourceManager->config.flags & MA_RESOURCE_MANAGER_FLAG_NO_THREADING) != 0) { + ma_resource_manager_process_next_job(pEngine->pResourceManager); + } + } + } + #endif + + ma_engine_read_pcm_frames(pEngine, pFramesOut, frameCount, NULL); +} + +static ma_uint32 ma_device__get_processing_size_in_frames(ma_device* pDevice) +{ + /* + The processing size is the period size. The device can have a fixed sized processing size, or + it can be decided by the backend in which case it can be variable. + */ + if (pDevice->playback.intermediaryBufferCap > 0) { + /* Using a fixed sized processing callback. */ + return pDevice->playback.intermediaryBufferCap; + } else { + /* Not using a fixed sized processing callback. Need to estimate the processing size based on the backend. */ + return pDevice->playback.internalPeriodSizeInFrames; + } +} +#endif + +MA_API ma_result ma_engine_init(const ma_engine_config* pConfig, ma_engine* pEngine) +{ + ma_result result; + ma_node_graph_config nodeGraphConfig; + ma_engine_config engineConfig; + ma_spatializer_listener_config listenerConfig; + ma_uint32 iListener; + + if (pEngine == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pEngine); + + /* The config is allowed to be NULL in which case we use defaults for everything. */ + if (pConfig != NULL) { + engineConfig = *pConfig; + } else { + engineConfig = ma_engine_config_init(); + } + + pEngine->monoExpansionMode = engineConfig.monoExpansionMode; + pEngine->defaultVolumeSmoothTimeInPCMFrames = engineConfig.defaultVolumeSmoothTimeInPCMFrames; + pEngine->onProcess = engineConfig.onProcess; + pEngine->pProcessUserData = engineConfig.pProcessUserData; + ma_allocation_callbacks_init_copy(&pEngine->allocationCallbacks, &engineConfig.allocationCallbacks); + + #if !defined(MA_NO_RESOURCE_MANAGER) + { + pEngine->pResourceManager = engineConfig.pResourceManager; + } + #endif + + #if !defined(MA_NO_DEVICE_IO) + { + pEngine->pDevice = engineConfig.pDevice; + + /* If we don't have a device, we need one. */ + if (pEngine->pDevice == NULL && engineConfig.noDevice == MA_FALSE) { + ma_device_config deviceConfig; + + pEngine->pDevice = (ma_device*)ma_malloc(sizeof(*pEngine->pDevice), &pEngine->allocationCallbacks); + if (pEngine->pDevice == NULL) { + return MA_OUT_OF_MEMORY; + } + + deviceConfig = ma_device_config_init(ma_device_type_playback); + deviceConfig.playback.pDeviceID = engineConfig.pPlaybackDeviceID; + deviceConfig.playback.format = ma_format_f32; + deviceConfig.playback.channels = engineConfig.channels; + deviceConfig.sampleRate = engineConfig.sampleRate; + deviceConfig.dataCallback = (engineConfig.dataCallback != NULL) ? engineConfig.dataCallback : ma_engine_data_callback_internal; + deviceConfig.pUserData = pEngine; + deviceConfig.notificationCallback = engineConfig.notificationCallback; + deviceConfig.periodSizeInFrames = engineConfig.periodSizeInFrames; + deviceConfig.periodSizeInMilliseconds = engineConfig.periodSizeInMilliseconds; + deviceConfig.noPreSilencedOutputBuffer = MA_TRUE; /* We'll always be outputting to every frame in the callback so there's no need for a pre-silenced buffer. */ + deviceConfig.noClip = MA_TRUE; /* The engine will do clipping itself. */ + + if (engineConfig.pContext == NULL) { + ma_context_config contextConfig = ma_context_config_init(); + contextConfig.allocationCallbacks = pEngine->allocationCallbacks; + contextConfig.pLog = engineConfig.pLog; + + /* If the engine config does not specify a log, use the resource manager's if we have one. */ + #ifndef MA_NO_RESOURCE_MANAGER + { + if (contextConfig.pLog == NULL && engineConfig.pResourceManager != NULL) { + contextConfig.pLog = ma_resource_manager_get_log(engineConfig.pResourceManager); + } + } + #endif + + result = ma_device_init_ex(NULL, 0, &contextConfig, &deviceConfig, pEngine->pDevice); + } else { + result = ma_device_init(engineConfig.pContext, &deviceConfig, pEngine->pDevice); + } + + if (result != MA_SUCCESS) { + ma_free(pEngine->pDevice, &pEngine->allocationCallbacks); + pEngine->pDevice = NULL; + return result; + } + + pEngine->ownsDevice = MA_TRUE; + } + + /* Update the channel count and sample rate of the engine config so we can reference it below. */ + if (pEngine->pDevice != NULL) { + engineConfig.channels = pEngine->pDevice->playback.channels; + engineConfig.sampleRate = pEngine->pDevice->sampleRate; + + /* + The processing size used by the engine is determined by engineConfig.periodSizeInFrames. We want + to make this equal to what the device is using for it's period size. If we don't do that, it's + possible that the node graph will split it's processing into multiple passes which can introduce + glitching. + */ + engineConfig.periodSizeInFrames = ma_device__get_processing_size_in_frames(pEngine->pDevice); + } + } + #endif + + if (engineConfig.channels == 0 || engineConfig.sampleRate == 0) { + return MA_INVALID_ARGS; + } + + pEngine->sampleRate = engineConfig.sampleRate; + + /* The engine always uses either the log that was passed into the config, or the context's log is available. */ + if (engineConfig.pLog != NULL) { + pEngine->pLog = engineConfig.pLog; + } else { + #if !defined(MA_NO_DEVICE_IO) + { + pEngine->pLog = ma_device_get_log(pEngine->pDevice); + } + #else + { + pEngine->pLog = NULL; + } + #endif + } + + + /* The engine is a node graph. This needs to be initialized after we have the device so we can determine the channel count. */ + nodeGraphConfig = ma_node_graph_config_init(engineConfig.channels); + nodeGraphConfig.processingSizeInFrames = engineConfig.periodSizeInFrames; + nodeGraphConfig.preMixStackSizeInBytes = engineConfig.preMixStackSizeInBytes; + + result = ma_node_graph_init(&nodeGraphConfig, &pEngine->allocationCallbacks, &pEngine->nodeGraph); + if (result != MA_SUCCESS) { + goto on_error_1; + } + + + /* We need at least one listener. */ + if (engineConfig.listenerCount == 0) { + engineConfig.listenerCount = 1; + } + + if (engineConfig.listenerCount > MA_ENGINE_MAX_LISTENERS) { + result = MA_INVALID_ARGS; /* Too many listeners. */ + goto on_error_1; + } + + for (iListener = 0; iListener < engineConfig.listenerCount; iListener += 1) { + listenerConfig = ma_spatializer_listener_config_init(ma_node_graph_get_channels(&pEngine->nodeGraph)); + + /* + If we're using a device, use the device's channel map for the listener. Otherwise just use + miniaudio's default channel map. + */ + #if !defined(MA_NO_DEVICE_IO) + { + if (pEngine->pDevice != NULL) { + /* + Temporarily disabled. There is a subtle bug here where front-left and front-right + will be used by the device's channel map, but this is not what we want to use for + spatialization. Instead we want to use side-left and side-right. I need to figure + out a better solution for this. For now, disabling the use of device channel maps. + */ + /*listenerConfig.pChannelMapOut = pEngine->pDevice->playback.channelMap;*/ + } + } + #endif + + result = ma_spatializer_listener_init(&listenerConfig, &pEngine->allocationCallbacks, &pEngine->listeners[iListener]); /* TODO: Change this to a pre-allocated heap. */ + if (result != MA_SUCCESS) { + goto on_error_2; + } + + pEngine->listenerCount += 1; + } + + + /* Gain smoothing for spatialized sounds. */ + pEngine->gainSmoothTimeInFrames = engineConfig.gainSmoothTimeInFrames; + if (pEngine->gainSmoothTimeInFrames == 0) { + ma_uint32 gainSmoothTimeInMilliseconds = engineConfig.gainSmoothTimeInMilliseconds; + if (gainSmoothTimeInMilliseconds == 0) { + gainSmoothTimeInMilliseconds = 8; + } + + pEngine->gainSmoothTimeInFrames = (gainSmoothTimeInMilliseconds * ma_engine_get_sample_rate(pEngine)) / 1000; /* 8ms by default. */ + } + + + /* We need a resource manager. */ + #ifndef MA_NO_RESOURCE_MANAGER + { + if (pEngine->pResourceManager == NULL) { + ma_resource_manager_config resourceManagerConfig; + + pEngine->pResourceManager = (ma_resource_manager*)ma_malloc(sizeof(*pEngine->pResourceManager), &pEngine->allocationCallbacks); + if (pEngine->pResourceManager == NULL) { + result = MA_OUT_OF_MEMORY; + goto on_error_2; + } + + resourceManagerConfig = ma_resource_manager_config_init(); + resourceManagerConfig.pLog = pEngine->pLog; /* Always use the engine's log for internally-managed resource managers. */ + resourceManagerConfig.decodedFormat = ma_format_f32; + resourceManagerConfig.decodedChannels = 0; /* Leave the decoded channel count as 0 so we can get good spatialization. */ + resourceManagerConfig.decodedSampleRate = ma_engine_get_sample_rate(pEngine); + ma_allocation_callbacks_init_copy(&resourceManagerConfig.allocationCallbacks, &pEngine->allocationCallbacks); + resourceManagerConfig.pVFS = engineConfig.pResourceManagerVFS; + + /* The Emscripten build cannot use threads unless it's targeting pthreads. */ + #if defined(MA_EMSCRIPTEN) && !defined(__EMSCRIPTEN_PTHREADS__) + { + resourceManagerConfig.jobThreadCount = 0; + resourceManagerConfig.flags |= MA_RESOURCE_MANAGER_FLAG_NO_THREADING; + } + #endif + + result = ma_resource_manager_init(&resourceManagerConfig, pEngine->pResourceManager); + if (result != MA_SUCCESS) { + goto on_error_3; + } + + pEngine->ownsResourceManager = MA_TRUE; + } + } + #endif + + /* Setup some stuff for inlined sounds. That is sounds played with ma_engine_play_sound(). */ + pEngine->inlinedSoundLock = 0; + pEngine->pInlinedSoundHead = NULL; + + /* Start the engine if required. This should always be the last step. */ + #if !defined(MA_NO_DEVICE_IO) + { + if (engineConfig.noAutoStart == MA_FALSE && pEngine->pDevice != NULL) { + result = ma_engine_start(pEngine); + if (result != MA_SUCCESS) { + goto on_error_4; /* Failed to start the engine. */ + } + } + } + #endif + + return MA_SUCCESS; + +#if !defined(MA_NO_DEVICE_IO) +on_error_4: +#endif +#if !defined(MA_NO_RESOURCE_MANAGER) +on_error_3: + if (pEngine->ownsResourceManager) { + ma_free(pEngine->pResourceManager, &pEngine->allocationCallbacks); + } +#endif /* MA_NO_RESOURCE_MANAGER */ +on_error_2: + for (iListener = 0; iListener < pEngine->listenerCount; iListener += 1) { + ma_spatializer_listener_uninit(&pEngine->listeners[iListener], &pEngine->allocationCallbacks); + } + + ma_node_graph_uninit(&pEngine->nodeGraph, &pEngine->allocationCallbacks); +on_error_1: + #if !defined(MA_NO_DEVICE_IO) + { + if (pEngine->ownsDevice) { + ma_device_uninit(pEngine->pDevice); + ma_free(pEngine->pDevice, &pEngine->allocationCallbacks); + } + } + #endif + + return result; +} + +MA_API void ma_engine_uninit(ma_engine* pEngine) +{ + ma_uint32 iListener; + + if (pEngine == NULL) { + return; + } + + /* The device must be uninitialized before the node graph to ensure the audio thread doesn't try accessing it. */ + #if !defined(MA_NO_DEVICE_IO) + { + if (pEngine->ownsDevice) { + ma_device_uninit(pEngine->pDevice); + ma_free(pEngine->pDevice, &pEngine->allocationCallbacks); + } else { + if (pEngine->pDevice != NULL) { + ma_device_stop(pEngine->pDevice); + } + } + } + #endif + + /* + All inlined sounds need to be deleted. I'm going to use a lock here just to future proof in case + I want to do some kind of garbage collection later on. + */ + ma_spinlock_lock(&pEngine->inlinedSoundLock); + { + for (;;) { + ma_sound_inlined* pSoundToDelete = pEngine->pInlinedSoundHead; + if (pSoundToDelete == NULL) { + break; /* Done. */ + } + + pEngine->pInlinedSoundHead = pSoundToDelete->pNext; + + ma_sound_uninit(&pSoundToDelete->sound); + ma_free(pSoundToDelete, &pEngine->allocationCallbacks); + } + } + ma_spinlock_unlock(&pEngine->inlinedSoundLock); + + for (iListener = 0; iListener < pEngine->listenerCount; iListener += 1) { + ma_spatializer_listener_uninit(&pEngine->listeners[iListener], &pEngine->allocationCallbacks); + } + + /* Make sure the node graph is uninitialized after the audio thread has been shutdown to prevent accessing of the node graph after being uninitialized. */ + ma_node_graph_uninit(&pEngine->nodeGraph, &pEngine->allocationCallbacks); + + /* Uninitialize the resource manager last to ensure we don't have a thread still trying to access it. */ +#ifndef MA_NO_RESOURCE_MANAGER + if (pEngine->ownsResourceManager) { + ma_resource_manager_uninit(pEngine->pResourceManager); + ma_free(pEngine->pResourceManager, &pEngine->allocationCallbacks); + } +#endif +} + +MA_API ma_result ma_engine_read_pcm_frames(ma_engine* pEngine, void* pFramesOut, ma_uint64 frameCount, ma_uint64* pFramesRead) +{ + ma_result result; + ma_uint64 framesRead = 0; + + if (pFramesRead != NULL) { + *pFramesRead = 0; + } + + result = ma_node_graph_read_pcm_frames(&pEngine->nodeGraph, pFramesOut, frameCount, &framesRead); + if (result != MA_SUCCESS) { + return result; + } + + if (pFramesRead != NULL) { + *pFramesRead = framesRead; + } + + if (pEngine->onProcess) { + pEngine->onProcess(pEngine->pProcessUserData, (float*)pFramesOut, framesRead); /* Safe cast to float* because the engine always works on floating point samples. */ + } + + return MA_SUCCESS; +} + +MA_API ma_node_graph* ma_engine_get_node_graph(ma_engine* pEngine) +{ + if (pEngine == NULL) { + return NULL; + } + + return &pEngine->nodeGraph; +} + +#if !defined(MA_NO_RESOURCE_MANAGER) +MA_API ma_resource_manager* ma_engine_get_resource_manager(ma_engine* pEngine) +{ + if (pEngine == NULL) { + return NULL; + } + + #if !defined(MA_NO_RESOURCE_MANAGER) + { + return pEngine->pResourceManager; + } + #else + { + return NULL; + } + #endif +} +#endif + +MA_API ma_device* ma_engine_get_device(ma_engine* pEngine) +{ + if (pEngine == NULL) { + return NULL; + } + + #if !defined(MA_NO_DEVICE_IO) + { + return pEngine->pDevice; + } + #else + { + return NULL; + } + #endif +} + +MA_API ma_log* ma_engine_get_log(ma_engine* pEngine) +{ + if (pEngine == NULL) { + return NULL; + } + + if (pEngine->pLog != NULL) { + return pEngine->pLog; + } else { + #if !defined(MA_NO_DEVICE_IO) + { + return ma_device_get_log(ma_engine_get_device(pEngine)); + } + #else + { + return NULL; + } + #endif + } +} + +MA_API ma_node* ma_engine_get_endpoint(ma_engine* pEngine) +{ + return ma_node_graph_get_endpoint(&pEngine->nodeGraph); +} + +MA_API ma_uint64 ma_engine_get_time_in_pcm_frames(const ma_engine* pEngine) +{ + return ma_node_graph_get_time(&pEngine->nodeGraph); +} + +MA_API ma_uint64 ma_engine_get_time_in_milliseconds(const ma_engine* pEngine) +{ + return ma_engine_get_time_in_pcm_frames(pEngine) * 1000 / ma_engine_get_sample_rate(pEngine); +} + +MA_API ma_result ma_engine_set_time_in_pcm_frames(ma_engine* pEngine, ma_uint64 globalTime) +{ + return ma_node_graph_set_time(&pEngine->nodeGraph, globalTime); +} + +MA_API ma_result ma_engine_set_time_in_milliseconds(ma_engine* pEngine, ma_uint64 globalTime) +{ + return ma_engine_set_time_in_pcm_frames(pEngine, globalTime * ma_engine_get_sample_rate(pEngine) / 1000); +} + +MA_API ma_uint64 ma_engine_get_time(const ma_engine* pEngine) +{ + return ma_engine_get_time_in_pcm_frames(pEngine); +} + +MA_API ma_result ma_engine_set_time(ma_engine* pEngine, ma_uint64 globalTime) +{ + return ma_engine_set_time_in_pcm_frames(pEngine, globalTime); +} + +MA_API ma_uint32 ma_engine_get_channels(const ma_engine* pEngine) +{ + return ma_node_graph_get_channels(&pEngine->nodeGraph); +} + +MA_API ma_uint32 ma_engine_get_sample_rate(const ma_engine* pEngine) +{ + if (pEngine == NULL) { + return 0; + } + + return pEngine->sampleRate; +} + + +MA_API ma_result ma_engine_start(ma_engine* pEngine) +{ + ma_result result; + + if (pEngine == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_DEVICE_IO) + { + if (pEngine->pDevice != NULL) { + result = ma_device_start(pEngine->pDevice); + } else { + result = MA_INVALID_OPERATION; /* The engine is running without a device which means there's no real notion of "starting" the engine. */ + } + } + #else + { + result = MA_INVALID_OPERATION; /* Device IO is disabled, so there's no real notion of "starting" the engine. */ + } + #endif + + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_engine_stop(ma_engine* pEngine) +{ + ma_result result; + + if (pEngine == NULL) { + return MA_INVALID_ARGS; + } + + #if !defined(MA_NO_DEVICE_IO) + { + if (pEngine->pDevice != NULL) { + result = ma_device_stop(pEngine->pDevice); + } else { + result = MA_INVALID_OPERATION; /* The engine is running without a device which means there's no real notion of "stopping" the engine. */ + } + } + #else + { + result = MA_INVALID_OPERATION; /* Device IO is disabled, so there's no real notion of "stopping" the engine. */ + } + #endif + + if (result != MA_SUCCESS) { + return result; + } + + return MA_SUCCESS; +} + +MA_API ma_result ma_engine_set_volume(ma_engine* pEngine, float volume) +{ + if (pEngine == NULL) { + return MA_INVALID_ARGS; + } + + return ma_node_set_output_bus_volume(ma_node_graph_get_endpoint(&pEngine->nodeGraph), 0, volume); +} + +MA_API float ma_engine_get_volume(ma_engine* pEngine) +{ + if (pEngine == NULL) { + return 0; + } + + return ma_node_get_output_bus_volume(ma_node_graph_get_endpoint(&pEngine->nodeGraph), 0); +} + +MA_API ma_result ma_engine_set_gain_db(ma_engine* pEngine, float gainDB) +{ + return ma_engine_set_volume(pEngine, ma_volume_db_to_linear(gainDB)); +} + +MA_API float ma_engine_get_gain_db(ma_engine* pEngine) +{ + return ma_volume_linear_to_db(ma_engine_get_volume(pEngine)); +} + + +MA_API ma_uint32 ma_engine_get_listener_count(const ma_engine* pEngine) +{ + if (pEngine == NULL) { + return 0; + } + + return pEngine->listenerCount; +} + +MA_API ma_uint32 ma_engine_find_closest_listener(const ma_engine* pEngine, float absolutePosX, float absolutePosY, float absolutePosZ) +{ + ma_uint32 iListener; + ma_uint32 iListenerClosest; + float closestLen2 = MA_FLT_MAX; + + if (pEngine == NULL || pEngine->listenerCount == 1) { + return 0; + } + + iListenerClosest = 0; + for (iListener = 0; iListener < pEngine->listenerCount; iListener += 1) { + if (ma_engine_listener_is_enabled(pEngine, iListener)) { + float len2 = ma_vec3f_len2(ma_vec3f_sub(ma_spatializer_listener_get_position(&pEngine->listeners[iListener]), ma_vec3f_init_3f(absolutePosX, absolutePosY, absolutePosZ))); + if (closestLen2 > len2) { + closestLen2 = len2; + iListenerClosest = iListener; + } + } + } + + MA_ASSERT(iListenerClosest < 255); + return iListenerClosest; +} + +MA_API void ma_engine_listener_set_position(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return; + } + + ma_spatializer_listener_set_position(&pEngine->listeners[listenerIndex], x, y, z); +} + +MA_API ma_vec3f ma_engine_listener_get_position(const ma_engine* pEngine, ma_uint32 listenerIndex) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_spatializer_listener_get_position(&pEngine->listeners[listenerIndex]); +} + +MA_API void ma_engine_listener_set_direction(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return; + } + + ma_spatializer_listener_set_direction(&pEngine->listeners[listenerIndex], x, y, z); +} + +MA_API ma_vec3f ma_engine_listener_get_direction(const ma_engine* pEngine, ma_uint32 listenerIndex) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return ma_vec3f_init_3f(0, 0, -1); + } + + return ma_spatializer_listener_get_direction(&pEngine->listeners[listenerIndex]); +} + +MA_API void ma_engine_listener_set_velocity(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return; + } + + ma_spatializer_listener_set_velocity(&pEngine->listeners[listenerIndex], x, y, z); +} + +MA_API ma_vec3f ma_engine_listener_get_velocity(const ma_engine* pEngine, ma_uint32 listenerIndex) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_spatializer_listener_get_velocity(&pEngine->listeners[listenerIndex]); +} + +MA_API void ma_engine_listener_set_cone(ma_engine* pEngine, ma_uint32 listenerIndex, float innerAngleInRadians, float outerAngleInRadians, float outerGain) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return; + } + + ma_spatializer_listener_set_cone(&pEngine->listeners[listenerIndex], innerAngleInRadians, outerAngleInRadians, outerGain); +} + +MA_API void ma_engine_listener_get_cone(const ma_engine* pEngine, ma_uint32 listenerIndex, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain) +{ + if (pInnerAngleInRadians != NULL) { + *pInnerAngleInRadians = 0; + } + + if (pOuterAngleInRadians != NULL) { + *pOuterAngleInRadians = 0; + } + + if (pOuterGain != NULL) { + *pOuterGain = 0; + } + + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return; + } + + ma_spatializer_listener_get_cone(&pEngine->listeners[listenerIndex], pInnerAngleInRadians, pOuterAngleInRadians, pOuterGain); +} + +MA_API void ma_engine_listener_set_world_up(ma_engine* pEngine, ma_uint32 listenerIndex, float x, float y, float z) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return; + } + + ma_spatializer_listener_set_world_up(&pEngine->listeners[listenerIndex], x, y, z); +} + +MA_API ma_vec3f ma_engine_listener_get_world_up(const ma_engine* pEngine, ma_uint32 listenerIndex) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return ma_vec3f_init_3f(0, 1, 0); + } + + return ma_spatializer_listener_get_world_up(&pEngine->listeners[listenerIndex]); +} + +MA_API void ma_engine_listener_set_enabled(ma_engine* pEngine, ma_uint32 listenerIndex, ma_bool32 isEnabled) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return; + } + + ma_spatializer_listener_set_enabled(&pEngine->listeners[listenerIndex], isEnabled); +} + +MA_API ma_bool32 ma_engine_listener_is_enabled(const ma_engine* pEngine, ma_uint32 listenerIndex) +{ + if (pEngine == NULL || listenerIndex >= pEngine->listenerCount) { + return MA_FALSE; + } + + return ma_spatializer_listener_is_enabled(&pEngine->listeners[listenerIndex]); +} + + +#ifndef MA_NO_RESOURCE_MANAGER +MA_API ma_result ma_engine_play_sound_ex(ma_engine* pEngine, const char* pFilePath, ma_node* pNode, ma_uint32 nodeInputBusIndex) +{ + ma_result result = MA_SUCCESS; + ma_sound_inlined* pSound = NULL; + ma_sound_inlined* pNextSound = NULL; + + if (pEngine == NULL || pFilePath == NULL) { + return MA_INVALID_ARGS; + } + + /* Attach to the endpoint node if nothing is specified. */ + if (pNode == NULL) { + pNode = ma_node_graph_get_endpoint(&pEngine->nodeGraph); + nodeInputBusIndex = 0; + } + + /* + We want to check if we can recycle an already-allocated inlined sound. Since this is just a + helper I'm not *too* concerned about performance here and I'm happy to use a lock to keep + the implementation simple. Maybe this can be optimized later if there's enough demand, but + if this function is being used it probably means the caller doesn't really care too much. + + What we do is check the atEnd flag. When this is true, we can recycle the sound. Otherwise + we just keep iterating. If we reach the end without finding a sound to recycle we just + allocate a new one. This doesn't scale well for a massive number of sounds being played + simultaneously as we don't ever actually free the sound objects. Some kind of garbage + collection routine might be valuable for this which I'll think about. + */ + ma_spinlock_lock(&pEngine->inlinedSoundLock); + { + ma_uint32 soundFlags = 0; + + for (pNextSound = pEngine->pInlinedSoundHead; pNextSound != NULL; pNextSound = pNextSound->pNext) { + if (ma_sound_at_end(&pNextSound->sound)) { + /* + The sound is at the end which means it's available for recycling. All we need to do + is uninitialize it and reinitialize it. All we're doing is recycling memory. + */ + pSound = pNextSound; + ma_atomic_fetch_sub_32(&pEngine->inlinedSoundCount, 1); + break; + } + } + + if (pSound != NULL) { + /* + We actually want to detach the sound from the list here. The reason is because we want the sound + to be in a consistent state at the non-recycled case to simplify the logic below. + */ + if (pEngine->pInlinedSoundHead == pSound) { + pEngine->pInlinedSoundHead = pSound->pNext; + } + + if (pSound->pPrev != NULL) { + pSound->pPrev->pNext = pSound->pNext; + } + if (pSound->pNext != NULL) { + pSound->pNext->pPrev = pSound->pPrev; + } + + /* Now the previous sound needs to be uninitialized. */ + ma_sound_uninit(&pNextSound->sound); + } else { + /* No sound available for recycling. Allocate one now. */ + pSound = (ma_sound_inlined*)ma_malloc(sizeof(*pSound), &pEngine->allocationCallbacks); + } + + if (pSound != NULL) { /* Safety check for the allocation above. */ + /* + At this point we should have memory allocated for the inlined sound. We just need + to initialize it like a normal sound now. + */ + soundFlags |= MA_SOUND_FLAG_ASYNC; /* For inlined sounds we don't want to be sitting around waiting for stuff to load so force an async load. */ + soundFlags |= MA_SOUND_FLAG_NO_DEFAULT_ATTACHMENT; /* We want specific control over where the sound is attached in the graph. We'll attach it manually just before playing the sound. */ + soundFlags |= MA_SOUND_FLAG_NO_PITCH; /* Pitching isn't usable with inlined sounds, so disable it to save on speed. */ + soundFlags |= MA_SOUND_FLAG_NO_SPATIALIZATION; /* Not currently doing spatialization with inlined sounds, but this might actually change later. For now disable spatialization. Will be removed if we ever add support for spatialization here. */ + + result = ma_sound_init_from_file(pEngine, pFilePath, soundFlags, NULL, NULL, &pSound->sound); + if (result == MA_SUCCESS) { + /* Now attach the sound to the graph. */ + result = ma_node_attach_output_bus(pSound, 0, pNode, nodeInputBusIndex); + if (result == MA_SUCCESS) { + /* At this point the sound should be loaded and we can go ahead and add it to the list. The new item becomes the new head. */ + pSound->pNext = pEngine->pInlinedSoundHead; + pSound->pPrev = NULL; + + pEngine->pInlinedSoundHead = pSound; /* <-- This is what attaches the sound to the list. */ + if (pSound->pNext != NULL) { + pSound->pNext->pPrev = pSound; + } + } else { + ma_free(pSound, &pEngine->allocationCallbacks); + } + } else { + ma_free(pSound, &pEngine->allocationCallbacks); + } + } else { + result = MA_OUT_OF_MEMORY; + } + } + ma_spinlock_unlock(&pEngine->inlinedSoundLock); + + if (result != MA_SUCCESS) { + return result; + } + + /* Finally we can start playing the sound. */ + result = ma_sound_start(&pSound->sound); + if (result != MA_SUCCESS) { + /* Failed to start the sound. We need to mark it for recycling and return an error. */ + ma_atomic_exchange_32(&pSound->sound.atEnd, MA_TRUE); + return result; + } + + ma_atomic_fetch_add_32(&pEngine->inlinedSoundCount, 1); + return result; +} + +MA_API ma_result ma_engine_play_sound(ma_engine* pEngine, const char* pFilePath, ma_sound_group* pGroup) +{ + return ma_engine_play_sound_ex(pEngine, pFilePath, pGroup, 0); +} +#endif + + +static ma_result ma_sound_preinit(ma_engine* pEngine, ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pSound); + pSound->seekTarget = MA_SEEK_TARGET_NONE; + + if (pEngine == NULL) { + return MA_INVALID_ARGS; + } + + return MA_SUCCESS; +} + +static ma_result ma_sound_init_from_data_source_internal(ma_engine* pEngine, const ma_sound_config* pConfig, ma_sound* pSound) +{ + ma_result result; + ma_engine_node_config engineNodeConfig; + ma_engine_node_type type; /* Will be set to ma_engine_node_type_group if no data source is specified. */ + + /* Do not clear pSound to zero here - that's done at a higher level with ma_sound_preinit(). */ + MA_ASSERT(pEngine != NULL); + MA_ASSERT(pSound != NULL); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + pSound->pDataSource = pConfig->pDataSource; + + if (pConfig->pDataSource != NULL) { + type = ma_engine_node_type_sound; + } else { + type = ma_engine_node_type_group; + } + + /* + Sounds are engine nodes. Before we can initialize this we need to determine the channel count. + If we can't do this we need to abort. It's up to the caller to ensure they're using a data + source that provides this information upfront. + */ + engineNodeConfig = ma_engine_node_config_init(pEngine, type, pConfig->flags); + engineNodeConfig.channelsIn = pConfig->channelsIn; + engineNodeConfig.channelsOut = pConfig->channelsOut; + engineNodeConfig.volumeSmoothTimeInPCMFrames = pConfig->volumeSmoothTimeInPCMFrames; + engineNodeConfig.monoExpansionMode = pConfig->monoExpansionMode; + + if (engineNodeConfig.volumeSmoothTimeInPCMFrames == 0) { + engineNodeConfig.volumeSmoothTimeInPCMFrames = pEngine->defaultVolumeSmoothTimeInPCMFrames; + } + + /* If we're loading from a data source the input channel count needs to be the data source's native channel count. */ + if (pConfig->pDataSource != NULL) { + result = ma_data_source_get_data_format(pConfig->pDataSource, NULL, &engineNodeConfig.channelsIn, &engineNodeConfig.sampleRate, NULL, 0); + if (result != MA_SUCCESS) { + return result; /* Failed to retrieve the channel count. */ + } + + if (engineNodeConfig.channelsIn == 0) { + return MA_INVALID_OPERATION; /* Invalid channel count. */ + } + + if (engineNodeConfig.channelsOut == MA_SOUND_SOURCE_CHANNEL_COUNT) { + engineNodeConfig.channelsOut = engineNodeConfig.channelsIn; + } + } + + + /* Getting here means we should have a valid channel count and we can initialize the engine node. */ + result = ma_engine_node_init(&engineNodeConfig, &pEngine->allocationCallbacks, &pSound->engineNode); + if (result != MA_SUCCESS) { + return result; + } + + /* If no attachment is specified, attach the sound straight to the endpoint. */ + if (pConfig->pInitialAttachment == NULL) { + /* No group. Attach straight to the endpoint by default, unless the caller has requested that it not. */ + if ((pConfig->flags & MA_SOUND_FLAG_NO_DEFAULT_ATTACHMENT) == 0) { + result = ma_node_attach_output_bus(pSound, 0, ma_node_graph_get_endpoint(&pEngine->nodeGraph), 0); + } + } else { + /* An attachment is specified. Attach to it by default. The sound has only a single output bus, and the config will specify which input bus to attach to. */ + result = ma_node_attach_output_bus(pSound, 0, pConfig->pInitialAttachment, pConfig->initialAttachmentInputBusIndex); + } + + if (result != MA_SUCCESS) { + ma_engine_node_uninit(&pSound->engineNode, &pEngine->allocationCallbacks); + return result; + } + + + /* Apply initial range and looping state to the data source if applicable. */ + if (pConfig->rangeBegInPCMFrames != 0 || pConfig->rangeEndInPCMFrames != ~((ma_uint64)0)) { + ma_data_source_set_range_in_pcm_frames(ma_sound_get_data_source(pSound), pConfig->rangeBegInPCMFrames, pConfig->rangeEndInPCMFrames); + } + + if (pConfig->loopPointBegInPCMFrames != 0 || pConfig->loopPointEndInPCMFrames != ~((ma_uint64)0)) { + ma_data_source_set_range_in_pcm_frames(ma_sound_get_data_source(pSound), pConfig->loopPointBegInPCMFrames, pConfig->loopPointEndInPCMFrames); + } + + ma_sound_set_looping(pSound, pConfig->isLooping || ((pConfig->flags & MA_SOUND_FLAG_LOOPING) != 0)); + + return MA_SUCCESS; +} + +#ifndef MA_NO_RESOURCE_MANAGER +MA_API ma_result ma_sound_init_from_file_internal(ma_engine* pEngine, const ma_sound_config* pConfig, ma_sound* pSound) +{ + ma_result result = MA_SUCCESS; + ma_uint32 flags; + ma_sound_config config; + ma_resource_manager_pipeline_notifications notifications; + + /* + The engine requires knowledge of the channel count of the underlying data source before it can + initialize the sound. Therefore, we need to make the resource manager wait until initialization + of the underlying data source to be initialized so we can get access to the channel count. To + do this, the MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT is forced. + + Because we're initializing the data source before the sound, there's a chance the notification + will get triggered before this function returns. This is OK, so long as the caller is aware of + it and can avoid accessing the sound from within the notification. + */ + flags = pConfig->flags | MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_WAIT_INIT; + if (pConfig->isLooping) { + flags |= MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_LOOPING; + } + + pSound->pResourceManagerDataSource = (ma_resource_manager_data_source*)ma_malloc(sizeof(*pSound->pResourceManagerDataSource), &pEngine->allocationCallbacks); + if (pSound->pResourceManagerDataSource == NULL) { + return MA_OUT_OF_MEMORY; + } + + /* Removed in 0.12. Set pDoneFence on the notifications. */ + notifications = pConfig->initNotifications; + if (pConfig->pDoneFence != NULL && notifications.done.pFence == NULL) { + notifications.done.pFence = pConfig->pDoneFence; + } + + /* + We must wrap everything around the fence if one was specified. This ensures ma_fence_wait() does + not return prematurely before the sound has finished initializing. + */ + if (notifications.done.pFence) { ma_fence_acquire(notifications.done.pFence); } + { + ma_resource_manager_data_source_config resourceManagerDataSourceConfig = ma_resource_manager_data_source_config_init(); + resourceManagerDataSourceConfig.pFilePath = pConfig->pFilePath; + resourceManagerDataSourceConfig.pFilePathW = pConfig->pFilePathW; + resourceManagerDataSourceConfig.flags = flags; + resourceManagerDataSourceConfig.pNotifications = ¬ifications; + resourceManagerDataSourceConfig.initialSeekPointInPCMFrames = pConfig->initialSeekPointInPCMFrames; + resourceManagerDataSourceConfig.rangeBegInPCMFrames = pConfig->rangeBegInPCMFrames; + resourceManagerDataSourceConfig.rangeEndInPCMFrames = pConfig->rangeEndInPCMFrames; + resourceManagerDataSourceConfig.loopPointBegInPCMFrames = pConfig->loopPointBegInPCMFrames; + resourceManagerDataSourceConfig.loopPointEndInPCMFrames = pConfig->loopPointEndInPCMFrames; + resourceManagerDataSourceConfig.isLooping = (flags & MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_LOOPING) != 0; + + result = ma_resource_manager_data_source_init_ex(pEngine->pResourceManager, &resourceManagerDataSourceConfig, pSound->pResourceManagerDataSource); + if (result != MA_SUCCESS) { + goto done; + } + + pSound->ownsDataSource = MA_TRUE; /* <-- Important. Not setting this will result in the resource manager data source never getting uninitialized. */ + + /* We need to use a slightly customized version of the config so we'll need to make a copy. */ + config = *pConfig; + config.pFilePath = NULL; + config.pFilePathW = NULL; + config.pDataSource = pSound->pResourceManagerDataSource; + + result = ma_sound_init_from_data_source_internal(pEngine, &config, pSound); + if (result != MA_SUCCESS) { + ma_resource_manager_data_source_uninit(pSound->pResourceManagerDataSource); + ma_free(pSound->pResourceManagerDataSource, &pEngine->allocationCallbacks); + MA_ZERO_OBJECT(pSound); + goto done; + } + } +done: + if (notifications.done.pFence) { ma_fence_release(notifications.done.pFence); } + return result; +} + +MA_API ma_result ma_sound_init_from_file(ma_engine* pEngine, const char* pFilePath, ma_uint32 flags, ma_sound_group* pGroup, ma_fence* pDoneFence, ma_sound* pSound) +{ + ma_sound_config config; + + if (pFilePath == NULL) { + return MA_INVALID_ARGS; + } + + config = ma_sound_config_init_2(pEngine); + config.pFilePath = pFilePath; + config.flags = flags; + config.pInitialAttachment = pGroup; + config.pDoneFence = pDoneFence; + + return ma_sound_init_ex(pEngine, &config, pSound); +} + +MA_API ma_result ma_sound_init_from_file_w(ma_engine* pEngine, const wchar_t* pFilePath, ma_uint32 flags, ma_sound_group* pGroup, ma_fence* pDoneFence, ma_sound* pSound) +{ + ma_sound_config config; + + if (pFilePath == NULL) { + return MA_INVALID_ARGS; + } + + config = ma_sound_config_init_2(pEngine); + config.pFilePathW = pFilePath; + config.flags = flags; + config.pInitialAttachment = pGroup; + config.pDoneFence = pDoneFence; + + return ma_sound_init_ex(pEngine, &config, pSound); +} + +MA_API ma_result ma_sound_init_copy(ma_engine* pEngine, const ma_sound* pExistingSound, ma_uint32 flags, ma_sound_group* pGroup, ma_sound* pSound) +{ + ma_result result; + ma_sound_config config; + + result = ma_sound_preinit(pEngine, pSound); + if (result != MA_SUCCESS) { + return result; + } + + if (pExistingSound == NULL) { + return MA_INVALID_ARGS; + } + + /* Cloning only works for data buffers (not streams) that are loaded from the resource manager. */ + if (pExistingSound->pResourceManagerDataSource == NULL) { + return MA_INVALID_OPERATION; + } + + /* + We need to make a clone of the data source. If the data source is not a data buffer (i.e. a stream) + this will fail. + */ + pSound->pResourceManagerDataSource = (ma_resource_manager_data_source*)ma_malloc(sizeof(*pSound->pResourceManagerDataSource), &pEngine->allocationCallbacks); + if (pSound->pResourceManagerDataSource == NULL) { + return MA_OUT_OF_MEMORY; + } + + result = ma_resource_manager_data_source_init_copy(pEngine->pResourceManager, pExistingSound->pResourceManagerDataSource, pSound->pResourceManagerDataSource); + if (result != MA_SUCCESS) { + ma_free(pSound->pResourceManagerDataSource, &pEngine->allocationCallbacks); + return result; + } + + config = ma_sound_config_init_2(pEngine); + config.pDataSource = pSound->pResourceManagerDataSource; + config.flags = flags; + config.pInitialAttachment = pGroup; + config.monoExpansionMode = pExistingSound->engineNode.monoExpansionMode; + config.volumeSmoothTimeInPCMFrames = pExistingSound->engineNode.volumeSmoothTimeInPCMFrames; + + result = ma_sound_init_from_data_source_internal(pEngine, &config, pSound); + if (result != MA_SUCCESS) { + ma_resource_manager_data_source_uninit(pSound->pResourceManagerDataSource); + ma_free(pSound->pResourceManagerDataSource, &pEngine->allocationCallbacks); + MA_ZERO_OBJECT(pSound); + return result; + } + + /* Make sure the sound is marked as the owner of the data source or else it will never get uninitialized. */ + pSound->ownsDataSource = MA_TRUE; + + return MA_SUCCESS; +} +#endif + +MA_API ma_result ma_sound_init_from_data_source(ma_engine* pEngine, ma_data_source* pDataSource, ma_uint32 flags, ma_sound_group* pGroup, ma_sound* pSound) +{ + ma_sound_config config = ma_sound_config_init_2(pEngine); + config.pDataSource = pDataSource; + config.flags = flags; + config.pInitialAttachment = pGroup; + return ma_sound_init_ex(pEngine, &config, pSound); +} + +MA_API ma_result ma_sound_init_ex(ma_engine* pEngine, const ma_sound_config* pConfig, ma_sound* pSound) +{ + ma_result result; + + result = ma_sound_preinit(pEngine, pSound); + if (result != MA_SUCCESS) { + return result; + } + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + pSound->endCallback = pConfig->endCallback; + pSound->pEndCallbackUserData = pConfig->pEndCallbackUserData; + + /* We need to load the sound differently depending on whether or not we're loading from a file. */ +#ifndef MA_NO_RESOURCE_MANAGER + if (pConfig->pFilePath != NULL || pConfig->pFilePathW != NULL) { + return ma_sound_init_from_file_internal(pEngine, pConfig, pSound); + } else +#endif + { + /* + Getting here means we're not loading from a file. We may be loading from an already-initialized + data source, or none at all. If we aren't specifying any data source, we'll be initializing + the equivalent to a group. ma_data_source_init_from_data_source_internal() will deal with this + for us, so no special treatment required here. + */ + return ma_sound_init_from_data_source_internal(pEngine, pConfig, pSound); + } +} + +MA_API void ma_sound_uninit(ma_sound* pSound) +{ + if (pSound == NULL) { + return; + } + + /* + Always uninitialize the node first. This ensures it's detached from the graph and does not return until it has done + so which makes thread safety beyond this point trivial. + */ + ma_engine_node_uninit(&pSound->engineNode, &pSound->engineNode.pEngine->allocationCallbacks); + + /* Once the sound is detached from the group we can guarantee that it won't be referenced by the mixer thread which means it's safe for us to destroy the data source. */ +#ifndef MA_NO_RESOURCE_MANAGER + if (pSound->ownsDataSource) { + ma_resource_manager_data_source_uninit(pSound->pResourceManagerDataSource); + ma_free(pSound->pResourceManagerDataSource, &pSound->engineNode.pEngine->allocationCallbacks); + pSound->pDataSource = NULL; + } +#else + MA_ASSERT(pSound->ownsDataSource == MA_FALSE); +#endif +} + +MA_API ma_engine* ma_sound_get_engine(const ma_sound* pSound) +{ + if (pSound == NULL) { + return NULL; + } + + return pSound->engineNode.pEngine; +} + +MA_API ma_data_source* ma_sound_get_data_source(const ma_sound* pSound) +{ + if (pSound == NULL) { + return NULL; + } + + return pSound->pDataSource; +} + +MA_API ma_result ma_sound_start(ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* If the sound is already playing, do nothing. */ + if (ma_sound_is_playing(pSound)) { + return MA_SUCCESS; + } + + /* If the sound is at the end it means we want to start from the start again. */ + if (ma_sound_at_end(pSound)) { + ma_result result = ma_data_source_seek_to_pcm_frame(pSound->pDataSource, 0); + if (result != MA_SUCCESS && result != MA_NOT_IMPLEMENTED) { + return result; /* Failed to seek back to the start. */ + } + + /* Make sure we clear the end indicator. */ + ma_atomic_exchange_32(&pSound->atEnd, MA_FALSE); + } + + /* Make sure the sound is started. If there's a start delay, the sound won't actually start until the start time is reached. */ + ma_node_set_state(pSound, ma_node_state_started); + + return MA_SUCCESS; +} + +MA_API ma_result ma_sound_stop(ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* This will stop the sound immediately. Use ma_sound_set_stop_time() to stop the sound at a specific time. */ + ma_node_set_state(pSound, ma_node_state_stopped); + + return MA_SUCCESS; +} + +MA_API ma_result ma_sound_stop_with_fade_in_pcm_frames(ma_sound* pSound, ma_uint64 fadeLengthInFrames) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* Stopping with a fade out requires us to schedule the stop into the future by the fade length. */ + ma_sound_set_stop_time_with_fade_in_pcm_frames(pSound, ma_engine_get_time_in_pcm_frames(ma_sound_get_engine(pSound)) + fadeLengthInFrames, fadeLengthInFrames); + + return MA_SUCCESS; +} + +MA_API ma_result ma_sound_stop_with_fade_in_milliseconds(ma_sound* pSound, ma_uint64 fadeLengthInMilliseconds) +{ + ma_uint64 sampleRate; + + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + sampleRate = ma_engine_get_sample_rate(ma_sound_get_engine(pSound)); + + return ma_sound_stop_with_fade_in_pcm_frames(pSound, (fadeLengthInMilliseconds * sampleRate) / 1000); +} + +MA_API void ma_sound_set_volume(ma_sound* pSound, float volume) +{ + if (pSound == NULL) { + return; + } + + ma_engine_node_set_volume(&pSound->engineNode, volume); +} + +MA_API float ma_sound_get_volume(const ma_sound* pSound) +{ + float volume = 0; + + if (pSound == NULL) { + return 0; + } + + ma_engine_node_get_volume(&pSound->engineNode, &volume); + + return volume; +} + +MA_API void ma_sound_set_pan(ma_sound* pSound, float pan) +{ + if (pSound == NULL) { + return; + } + + ma_panner_set_pan(&pSound->engineNode.panner, pan); +} + +MA_API float ma_sound_get_pan(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_panner_get_pan(&pSound->engineNode.panner); +} + +MA_API void ma_sound_set_pan_mode(ma_sound* pSound, ma_pan_mode panMode) +{ + if (pSound == NULL) { + return; + } + + ma_panner_set_mode(&pSound->engineNode.panner, panMode); +} + +MA_API ma_pan_mode ma_sound_get_pan_mode(const ma_sound* pSound) +{ + if (pSound == NULL) { + return ma_pan_mode_balance; + } + + return ma_panner_get_mode(&pSound->engineNode.panner); +} + +MA_API void ma_sound_set_pitch(ma_sound* pSound, float pitch) +{ + if (pSound == NULL) { + return; + } + + if (pitch <= 0) { + return; + } + + ma_atomic_exchange_explicit_f32(&pSound->engineNode.pitch, pitch, ma_atomic_memory_order_release); +} + +MA_API float ma_sound_get_pitch(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_atomic_load_f32(&pSound->engineNode.pitch); /* Naughty const-cast for this. */ +} + +MA_API void ma_sound_set_spatialization_enabled(ma_sound* pSound, ma_bool32 enabled) +{ + if (pSound == NULL) { + return; + } + + ma_atomic_exchange_explicit_32(&pSound->engineNode.isSpatializationDisabled, !enabled, ma_atomic_memory_order_release); +} + +MA_API ma_bool32 ma_sound_is_spatialization_enabled(const ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_FALSE; + } + + return ma_engine_node_is_spatialization_enabled(&pSound->engineNode); +} + +MA_API void ma_sound_set_pinned_listener_index(ma_sound* pSound, ma_uint32 listenerIndex) +{ + if (pSound == NULL || listenerIndex >= ma_engine_get_listener_count(ma_sound_get_engine(pSound))) { + return; + } + + ma_atomic_exchange_explicit_32(&pSound->engineNode.pinnedListenerIndex, listenerIndex, ma_atomic_memory_order_release); +} + +MA_API ma_uint32 ma_sound_get_pinned_listener_index(const ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_LISTENER_INDEX_CLOSEST; + } + + return ma_atomic_load_explicit_32(&pSound->engineNode.pinnedListenerIndex, ma_atomic_memory_order_acquire); +} + +MA_API ma_uint32 ma_sound_get_listener_index(const ma_sound* pSound) +{ + ma_uint32 listenerIndex; + + if (pSound == NULL) { + return 0; + } + + listenerIndex = ma_sound_get_pinned_listener_index(pSound); + if (listenerIndex == MA_LISTENER_INDEX_CLOSEST) { + ma_vec3f position = ma_sound_get_position(pSound); + return ma_engine_find_closest_listener(ma_sound_get_engine(pSound), position.x, position.y, position.z); + } + + return listenerIndex; +} + +MA_API ma_vec3f ma_sound_get_direction_to_listener(const ma_sound* pSound) +{ + ma_vec3f relativePos; + ma_engine* pEngine; + + if (pSound == NULL) { + return ma_vec3f_init_3f(0, 0, -1); + } + + pEngine = ma_sound_get_engine(pSound); + if (pEngine == NULL) { + return ma_vec3f_init_3f(0, 0, -1); + } + + ma_spatializer_get_relative_position_and_direction(&pSound->engineNode.spatializer, &pEngine->listeners[ma_sound_get_listener_index(pSound)], &relativePos, NULL); + + return ma_vec3f_normalize(ma_vec3f_neg(relativePos)); +} + +MA_API void ma_sound_set_position(ma_sound* pSound, float x, float y, float z) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_position(&pSound->engineNode.spatializer, x, y, z); +} + +MA_API ma_vec3f ma_sound_get_position(const ma_sound* pSound) +{ + if (pSound == NULL) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_spatializer_get_position(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_direction(ma_sound* pSound, float x, float y, float z) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_direction(&pSound->engineNode.spatializer, x, y, z); +} + +MA_API ma_vec3f ma_sound_get_direction(const ma_sound* pSound) +{ + if (pSound == NULL) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_spatializer_get_direction(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_velocity(ma_sound* pSound, float x, float y, float z) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_velocity(&pSound->engineNode.spatializer, x, y, z); +} + +MA_API ma_vec3f ma_sound_get_velocity(const ma_sound* pSound) +{ + if (pSound == NULL) { + return ma_vec3f_init_3f(0, 0, 0); + } + + return ma_spatializer_get_velocity(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_attenuation_model(ma_sound* pSound, ma_attenuation_model attenuationModel) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_attenuation_model(&pSound->engineNode.spatializer, attenuationModel); +} + +MA_API ma_attenuation_model ma_sound_get_attenuation_model(const ma_sound* pSound) +{ + if (pSound == NULL) { + return ma_attenuation_model_none; + } + + return ma_spatializer_get_attenuation_model(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_positioning(ma_sound* pSound, ma_positioning positioning) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_positioning(&pSound->engineNode.spatializer, positioning); +} + +MA_API ma_positioning ma_sound_get_positioning(const ma_sound* pSound) +{ + if (pSound == NULL) { + return ma_positioning_absolute; + } + + return ma_spatializer_get_positioning(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_rolloff(ma_sound* pSound, float rolloff) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_rolloff(&pSound->engineNode.spatializer, rolloff); +} + +MA_API float ma_sound_get_rolloff(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_spatializer_get_rolloff(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_min_gain(ma_sound* pSound, float minGain) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_min_gain(&pSound->engineNode.spatializer, minGain); +} + +MA_API float ma_sound_get_min_gain(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_spatializer_get_min_gain(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_max_gain(ma_sound* pSound, float maxGain) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_max_gain(&pSound->engineNode.spatializer, maxGain); +} + +MA_API float ma_sound_get_max_gain(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_spatializer_get_max_gain(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_min_distance(ma_sound* pSound, float minDistance) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_min_distance(&pSound->engineNode.spatializer, minDistance); +} + +MA_API float ma_sound_get_min_distance(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_spatializer_get_min_distance(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_max_distance(ma_sound* pSound, float maxDistance) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_max_distance(&pSound->engineNode.spatializer, maxDistance); +} + +MA_API float ma_sound_get_max_distance(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_spatializer_get_max_distance(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_cone(ma_sound* pSound, float innerAngleInRadians, float outerAngleInRadians, float outerGain) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_cone(&pSound->engineNode.spatializer, innerAngleInRadians, outerAngleInRadians, outerGain); +} + +MA_API void ma_sound_get_cone(const ma_sound* pSound, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain) +{ + if (pInnerAngleInRadians != NULL) { + *pInnerAngleInRadians = 0; + } + + if (pOuterAngleInRadians != NULL) { + *pOuterAngleInRadians = 0; + } + + if (pOuterGain != NULL) { + *pOuterGain = 0; + } + + if (pSound == NULL) { + return; + } + + ma_spatializer_get_cone(&pSound->engineNode.spatializer, pInnerAngleInRadians, pOuterAngleInRadians, pOuterGain); +} + +MA_API void ma_sound_set_doppler_factor(ma_sound* pSound, float dopplerFactor) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_doppler_factor(&pSound->engineNode.spatializer, dopplerFactor); +} + +MA_API float ma_sound_get_doppler_factor(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_spatializer_get_doppler_factor(&pSound->engineNode.spatializer); +} + +MA_API void ma_sound_set_directional_attenuation_factor(ma_sound* pSound, float directionalAttenuationFactor) +{ + if (pSound == NULL) { + return; + } + + ma_spatializer_set_directional_attenuation_factor(&pSound->engineNode.spatializer, directionalAttenuationFactor); +} + +MA_API float ma_sound_get_directional_attenuation_factor(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 1; + } + + return ma_spatializer_get_directional_attenuation_factor(&pSound->engineNode.spatializer); +} + + +MA_API void ma_sound_set_fade_in_pcm_frames(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInFrames) +{ + if (pSound == NULL) { + return; + } + + ma_sound_set_fade_start_in_pcm_frames(pSound, volumeBeg, volumeEnd, fadeLengthInFrames, (~(ma_uint64)0)); +} + +MA_API void ma_sound_set_fade_in_milliseconds(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInMilliseconds) +{ + if (pSound == NULL) { + return; + } + + ma_sound_set_fade_in_pcm_frames(pSound, volumeBeg, volumeEnd, (fadeLengthInMilliseconds * pSound->engineNode.fader.config.sampleRate) / 1000); +} + +MA_API void ma_sound_set_fade_start_in_pcm_frames(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInFrames, ma_uint64 absoluteGlobalTimeInFrames) +{ + if (pSound == NULL) { + return; + } + + /* + We don't want to update the fader at this point because we need to use the engine's current time + to derive the fader's start offset. The timer is being updated on the audio thread so in order to + do this as accurately as possible we'll need to defer this to the audio thread. + */ + ma_atomic_float_set(&pSound->engineNode.fadeSettings.volumeBeg, volumeBeg); + ma_atomic_float_set(&pSound->engineNode.fadeSettings.volumeEnd, volumeEnd); + ma_atomic_uint64_set(&pSound->engineNode.fadeSettings.fadeLengthInFrames, fadeLengthInFrames); + ma_atomic_uint64_set(&pSound->engineNode.fadeSettings.absoluteGlobalTimeInFrames, absoluteGlobalTimeInFrames); +} + +MA_API void ma_sound_set_fade_start_in_milliseconds(ma_sound* pSound, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInMilliseconds, ma_uint64 absoluteGlobalTimeInMilliseconds) +{ + ma_uint32 sampleRate; + + if (pSound == NULL) { + return; + } + + sampleRate = ma_engine_get_sample_rate(ma_sound_get_engine(pSound)); + + ma_sound_set_fade_start_in_pcm_frames(pSound, volumeBeg, volumeEnd, (fadeLengthInMilliseconds * sampleRate) / 1000, (absoluteGlobalTimeInMilliseconds * sampleRate) / 1000); +} + +MA_API float ma_sound_get_current_fade_volume(const ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + return ma_fader_get_current_volume(&pSound->engineNode.fader); +} + +MA_API void ma_sound_set_start_time_in_pcm_frames(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInFrames) +{ + if (pSound == NULL) { + return; + } + + ma_node_set_state_time(pSound, ma_node_state_started, absoluteGlobalTimeInFrames); +} + +MA_API void ma_sound_set_start_time_in_milliseconds(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInMilliseconds) +{ + if (pSound == NULL) { + return; + } + + ma_sound_set_start_time_in_pcm_frames(pSound, absoluteGlobalTimeInMilliseconds * ma_engine_get_sample_rate(ma_sound_get_engine(pSound)) / 1000); +} + +MA_API void ma_sound_set_stop_time_in_pcm_frames(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInFrames) +{ + if (pSound == NULL) { + return; + } + + ma_sound_set_stop_time_with_fade_in_pcm_frames(pSound, absoluteGlobalTimeInFrames, 0); +} + +MA_API void ma_sound_set_stop_time_in_milliseconds(ma_sound* pSound, ma_uint64 absoluteGlobalTimeInMilliseconds) +{ + if (pSound == NULL) { + return; + } + + ma_sound_set_stop_time_in_pcm_frames(pSound, absoluteGlobalTimeInMilliseconds * ma_engine_get_sample_rate(ma_sound_get_engine(pSound)) / 1000); +} + +MA_API void ma_sound_set_stop_time_with_fade_in_pcm_frames(ma_sound* pSound, ma_uint64 stopAbsoluteGlobalTimeInFrames, ma_uint64 fadeLengthInFrames) +{ + if (pSound == NULL) { + return; + } + + if (fadeLengthInFrames > 0) { + if (fadeLengthInFrames > stopAbsoluteGlobalTimeInFrames) { + fadeLengthInFrames = stopAbsoluteGlobalTimeInFrames; + } + + ma_sound_set_fade_start_in_pcm_frames(pSound, -1, 0, fadeLengthInFrames, stopAbsoluteGlobalTimeInFrames - fadeLengthInFrames); + } + + ma_node_set_state_time(pSound, ma_node_state_stopped, stopAbsoluteGlobalTimeInFrames); +} + +MA_API void ma_sound_set_stop_time_with_fade_in_milliseconds(ma_sound* pSound, ma_uint64 stopAbsoluteGlobalTimeInMilliseconds, ma_uint64 fadeLengthInMilliseconds) +{ + ma_uint32 sampleRate; + + if (pSound == NULL) { + return; + } + + sampleRate = ma_engine_get_sample_rate(ma_sound_get_engine(pSound)); + + ma_sound_set_stop_time_with_fade_in_pcm_frames(pSound, (stopAbsoluteGlobalTimeInMilliseconds * sampleRate) / 1000, (fadeLengthInMilliseconds * sampleRate) / 1000); +} + +MA_API ma_bool32 ma_sound_is_playing(const ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_FALSE; + } + + return ma_node_get_state_by_time(pSound, ma_engine_get_time_in_pcm_frames(ma_sound_get_engine(pSound))) == ma_node_state_started; +} + +MA_API ma_uint64 ma_sound_get_time_in_pcm_frames(const ma_sound* pSound) +{ + if (pSound == NULL) { + return 0; + } + + return ma_node_get_time(pSound); +} + +MA_API ma_uint64 ma_sound_get_time_in_milliseconds(const ma_sound* pSound) +{ + return ma_sound_get_time_in_pcm_frames(pSound) * 1000 / ma_engine_get_sample_rate(ma_sound_get_engine(pSound)); +} + +MA_API void ma_sound_set_looping(ma_sound* pSound, ma_bool32 isLooping) +{ + if (pSound == NULL) { + return; + } + + /* Looping is only a valid concept if the sound is backed by a data source. */ + if (pSound->pDataSource == NULL) { + return; + } + + /* The looping state needs to be applied to the data source in order for any looping to actually happen. */ + ma_data_source_set_looping(pSound->pDataSource, isLooping); +} + +MA_API ma_bool32 ma_sound_is_looping(const ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_FALSE; + } + + /* There is no notion of looping for sounds that are not backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_FALSE; + } + + return ma_data_source_is_looping(pSound->pDataSource); +} + +MA_API ma_bool32 ma_sound_at_end(const ma_sound* pSound) +{ + if (pSound == NULL) { + return MA_FALSE; + } + + /* There is no notion of an end of a sound if it's not backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_FALSE; + } + + return ma_sound_get_at_end(pSound); +} + +MA_API ma_result ma_sound_seek_to_pcm_frame(ma_sound* pSound, ma_uint64 frameIndex) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* Seeking is only valid for sounds that are backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_INVALID_OPERATION; + } + + /* We can't be seeking while reading at the same time. We just set the seek target and get the mixing thread to do the actual seek. */ + ma_atomic_exchange_64(&pSound->seekTarget, frameIndex); + + return MA_SUCCESS; +} + +MA_API ma_result ma_sound_seek_to_second(ma_sound* pSound, float seekPointInSeconds) +{ + ma_uint64 frameIndex; + ma_uint32 sampleRate; + ma_result result; + + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + result = ma_sound_get_data_format(pSound, NULL, NULL, &sampleRate, NULL, 0); + if (result != MA_SUCCESS) { + return result; + } + + /* We need PCM frames. We need to convert first */ + frameIndex = (ma_uint64)(seekPointInSeconds * sampleRate); + + return ma_sound_seek_to_pcm_frame(pSound, frameIndex); +} + +MA_API ma_result ma_sound_get_data_format(ma_sound* pSound, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap, size_t channelMapCap) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* The data format is retrieved directly from the data source if the sound is backed by one. Otherwise we pull it from the node. */ + if (pSound->pDataSource == NULL) { + ma_uint32 channels; + + if (pFormat != NULL) { + *pFormat = ma_format_f32; + } + + channels = ma_node_get_input_channels(&pSound->engineNode, 0); + if (pChannels != NULL) { + *pChannels = channels; + } + + if (pSampleRate != NULL) { + *pSampleRate = pSound->engineNode.resampler.config.sampleRateIn; + } + + if (pChannelMap != NULL) { + ma_channel_map_init_standard(ma_standard_channel_map_default, pChannelMap, channelMapCap, channels); + } + + return MA_SUCCESS; + } else { + return ma_data_source_get_data_format(pSound->pDataSource, pFormat, pChannels, pSampleRate, pChannelMap, channelMapCap); + } +} + +MA_API ma_result ma_sound_get_cursor_in_pcm_frames(ma_sound* pSound, ma_uint64* pCursor) +{ + ma_uint64 seekTarget; + + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* The notion of a cursor is only valid for sounds that are backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_INVALID_OPERATION; + } + + seekTarget = ma_atomic_load_64(&pSound->seekTarget); + if (seekTarget != MA_SEEK_TARGET_NONE) { + *pCursor = seekTarget; + return MA_SUCCESS; + } else { + return ma_data_source_get_cursor_in_pcm_frames(pSound->pDataSource, pCursor); + } +} + +MA_API ma_result ma_sound_get_length_in_pcm_frames(ma_sound* pSound, ma_uint64* pLength) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* The notion of a sound length is only valid for sounds that are backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_INVALID_OPERATION; + } + + return ma_data_source_get_length_in_pcm_frames(pSound->pDataSource, pLength); +} + +MA_API ma_result ma_sound_get_cursor_in_seconds(ma_sound* pSound, float* pCursor) +{ + ma_result result; + ma_uint64 cursorInPCMFrames; + ma_uint32 sampleRate; + + if (pCursor != NULL) { + *pCursor = 0; + } + + result = ma_sound_get_cursor_in_pcm_frames(pSound, &cursorInPCMFrames); + if (result != MA_SUCCESS) { + return result; + } + + result = ma_sound_get_data_format(pSound, NULL, NULL, &sampleRate, NULL, 0); + if (result != MA_SUCCESS) { + return result; + } + + /* VC6 does not support division of unsigned 64-bit integers with floating point numbers. Need to use a signed number. This shouldn't effect anything in practice. */ + *pCursor = (ma_int64)cursorInPCMFrames / (float)sampleRate; + + return MA_SUCCESS; +} + +MA_API ma_result ma_sound_get_length_in_seconds(ma_sound* pSound, float* pLength) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* The notion of a sound length is only valid for sounds that are backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_INVALID_OPERATION; + } + + return ma_data_source_get_length_in_seconds(pSound->pDataSource, pLength); +} + +MA_API ma_result ma_sound_set_end_callback(ma_sound* pSound, ma_sound_end_proc callback, void* pUserData) +{ + if (pSound == NULL) { + return MA_INVALID_ARGS; + } + + /* The notion of an end is only valid for sounds that are backed by a data source. */ + if (pSound->pDataSource == NULL) { + return MA_INVALID_OPERATION; + } + + pSound->endCallback = callback; + pSound->pEndCallbackUserData = pUserData; + + return MA_SUCCESS; +} + + +MA_API ma_result ma_sound_group_init(ma_engine* pEngine, ma_uint32 flags, ma_sound_group* pParentGroup, ma_sound_group* pGroup) +{ + ma_sound_group_config config = ma_sound_group_config_init_2(pEngine); + config.flags = flags; + config.pInitialAttachment = pParentGroup; + return ma_sound_group_init_ex(pEngine, &config, pGroup); +} + +MA_API ma_result ma_sound_group_init_ex(ma_engine* pEngine, const ma_sound_group_config* pConfig, ma_sound_group* pGroup) +{ + ma_sound_config soundConfig; + + if (pGroup == NULL) { + return MA_INVALID_ARGS; + } + + MA_ZERO_OBJECT(pGroup); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; + } + + /* A sound group is just a sound without a data source. */ + soundConfig = *pConfig; + soundConfig.pFilePath = NULL; + soundConfig.pFilePathW = NULL; + soundConfig.pDataSource = NULL; + + /* + Groups need to have spatialization disabled by default because I think it'll be pretty rare + that programs will want to spatialize groups (but not unheard of). Certainly it feels like + disabling this by default feels like the right option. Spatialization can be enabled with a + call to ma_sound_group_set_spatialization_enabled(). + */ + soundConfig.flags |= MA_SOUND_FLAG_NO_SPATIALIZATION; + + return ma_sound_init_ex(pEngine, &soundConfig, pGroup); +} + +MA_API void ma_sound_group_uninit(ma_sound_group* pGroup) +{ + ma_sound_uninit(pGroup); +} + +MA_API ma_engine* ma_sound_group_get_engine(const ma_sound_group* pGroup) +{ + return ma_sound_get_engine(pGroup); +} + +MA_API ma_result ma_sound_group_start(ma_sound_group* pGroup) +{ + return ma_sound_start(pGroup); +} + +MA_API ma_result ma_sound_group_stop(ma_sound_group* pGroup) +{ + return ma_sound_stop(pGroup); +} + +MA_API void ma_sound_group_set_volume(ma_sound_group* pGroup, float volume) +{ + ma_sound_set_volume(pGroup, volume); +} + +MA_API float ma_sound_group_get_volume(const ma_sound_group* pGroup) +{ + return ma_sound_get_volume(pGroup); +} + +MA_API void ma_sound_group_set_pan(ma_sound_group* pGroup, float pan) +{ + ma_sound_set_pan(pGroup, pan); +} + +MA_API float ma_sound_group_get_pan(const ma_sound_group* pGroup) +{ + return ma_sound_get_pan(pGroup); +} + +MA_API void ma_sound_group_set_pan_mode(ma_sound_group* pGroup, ma_pan_mode panMode) +{ + ma_sound_set_pan_mode(pGroup, panMode); +} + +MA_API ma_pan_mode ma_sound_group_get_pan_mode(const ma_sound_group* pGroup) +{ + return ma_sound_get_pan_mode(pGroup); +} + +MA_API void ma_sound_group_set_pitch(ma_sound_group* pGroup, float pitch) +{ + ma_sound_set_pitch(pGroup, pitch); +} + +MA_API float ma_sound_group_get_pitch(const ma_sound_group* pGroup) +{ + return ma_sound_get_pitch(pGroup); +} + +MA_API void ma_sound_group_set_spatialization_enabled(ma_sound_group* pGroup, ma_bool32 enabled) +{ + ma_sound_set_spatialization_enabled(pGroup, enabled); +} + +MA_API ma_bool32 ma_sound_group_is_spatialization_enabled(const ma_sound_group* pGroup) +{ + return ma_sound_is_spatialization_enabled(pGroup); +} + +MA_API void ma_sound_group_set_pinned_listener_index(ma_sound_group* pGroup, ma_uint32 listenerIndex) +{ + ma_sound_set_pinned_listener_index(pGroup, listenerIndex); +} + +MA_API ma_uint32 ma_sound_group_get_pinned_listener_index(const ma_sound_group* pGroup) +{ + return ma_sound_get_pinned_listener_index(pGroup); +} + +MA_API ma_uint32 ma_sound_group_get_listener_index(const ma_sound_group* pGroup) +{ + return ma_sound_get_listener_index(pGroup); +} + +MA_API ma_vec3f ma_sound_group_get_direction_to_listener(const ma_sound_group* pGroup) +{ + return ma_sound_get_direction_to_listener(pGroup); +} + +MA_API void ma_sound_group_set_position(ma_sound_group* pGroup, float x, float y, float z) +{ + ma_sound_set_position(pGroup, x, y, z); +} + +MA_API ma_vec3f ma_sound_group_get_position(const ma_sound_group* pGroup) +{ + return ma_sound_get_position(pGroup); +} + +MA_API void ma_sound_group_set_direction(ma_sound_group* pGroup, float x, float y, float z) +{ + ma_sound_set_direction(pGroup, x, y, z); +} + +MA_API ma_vec3f ma_sound_group_get_direction(const ma_sound_group* pGroup) +{ + return ma_sound_get_direction(pGroup); +} + +MA_API void ma_sound_group_set_velocity(ma_sound_group* pGroup, float x, float y, float z) +{ + ma_sound_set_velocity(pGroup, x, y, z); +} + +MA_API ma_vec3f ma_sound_group_get_velocity(const ma_sound_group* pGroup) +{ + return ma_sound_get_velocity(pGroup); +} + +MA_API void ma_sound_group_set_attenuation_model(ma_sound_group* pGroup, ma_attenuation_model attenuationModel) +{ + ma_sound_set_attenuation_model(pGroup, attenuationModel); +} + +MA_API ma_attenuation_model ma_sound_group_get_attenuation_model(const ma_sound_group* pGroup) +{ + return ma_sound_get_attenuation_model(pGroup); +} + +MA_API void ma_sound_group_set_positioning(ma_sound_group* pGroup, ma_positioning positioning) +{ + ma_sound_set_positioning(pGroup, positioning); +} + +MA_API ma_positioning ma_sound_group_get_positioning(const ma_sound_group* pGroup) +{ + return ma_sound_get_positioning(pGroup); +} + +MA_API void ma_sound_group_set_rolloff(ma_sound_group* pGroup, float rolloff) +{ + ma_sound_set_rolloff(pGroup, rolloff); +} + +MA_API float ma_sound_group_get_rolloff(const ma_sound_group* pGroup) +{ + return ma_sound_get_rolloff(pGroup); +} + +MA_API void ma_sound_group_set_min_gain(ma_sound_group* pGroup, float minGain) +{ + ma_sound_set_min_gain(pGroup, minGain); +} + +MA_API float ma_sound_group_get_min_gain(const ma_sound_group* pGroup) +{ + return ma_sound_get_min_gain(pGroup); +} + +MA_API void ma_sound_group_set_max_gain(ma_sound_group* pGroup, float maxGain) +{ + ma_sound_set_max_gain(pGroup, maxGain); +} + +MA_API float ma_sound_group_get_max_gain(const ma_sound_group* pGroup) +{ + return ma_sound_get_max_gain(pGroup); +} + +MA_API void ma_sound_group_set_min_distance(ma_sound_group* pGroup, float minDistance) +{ + ma_sound_set_min_distance(pGroup, minDistance); +} + +MA_API float ma_sound_group_get_min_distance(const ma_sound_group* pGroup) +{ + return ma_sound_get_min_distance(pGroup); +} + +MA_API void ma_sound_group_set_max_distance(ma_sound_group* pGroup, float maxDistance) +{ + ma_sound_set_max_distance(pGroup, maxDistance); +} + +MA_API float ma_sound_group_get_max_distance(const ma_sound_group* pGroup) +{ + return ma_sound_get_max_distance(pGroup); +} + +MA_API void ma_sound_group_set_cone(ma_sound_group* pGroup, float innerAngleInRadians, float outerAngleInRadians, float outerGain) +{ + ma_sound_set_cone(pGroup, innerAngleInRadians, outerAngleInRadians, outerGain); +} + +MA_API void ma_sound_group_get_cone(const ma_sound_group* pGroup, float* pInnerAngleInRadians, float* pOuterAngleInRadians, float* pOuterGain) +{ + ma_sound_get_cone(pGroup, pInnerAngleInRadians, pOuterAngleInRadians, pOuterGain); +} + +MA_API void ma_sound_group_set_doppler_factor(ma_sound_group* pGroup, float dopplerFactor) +{ + ma_sound_set_doppler_factor(pGroup, dopplerFactor); +} + +MA_API float ma_sound_group_get_doppler_factor(const ma_sound_group* pGroup) +{ + return ma_sound_get_doppler_factor(pGroup); +} + +MA_API void ma_sound_group_set_directional_attenuation_factor(ma_sound_group* pGroup, float directionalAttenuationFactor) +{ + ma_sound_set_directional_attenuation_factor(pGroup, directionalAttenuationFactor); +} + +MA_API float ma_sound_group_get_directional_attenuation_factor(const ma_sound_group* pGroup) +{ + return ma_sound_get_directional_attenuation_factor(pGroup); +} + +MA_API void ma_sound_group_set_fade_in_pcm_frames(ma_sound_group* pGroup, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInFrames) +{ + ma_sound_set_fade_in_pcm_frames(pGroup, volumeBeg, volumeEnd, fadeLengthInFrames); +} + +MA_API void ma_sound_group_set_fade_in_milliseconds(ma_sound_group* pGroup, float volumeBeg, float volumeEnd, ma_uint64 fadeLengthInMilliseconds) +{ + ma_sound_set_fade_in_milliseconds(pGroup, volumeBeg, volumeEnd, fadeLengthInMilliseconds); +} + +MA_API float ma_sound_group_get_current_fade_volume(ma_sound_group* pGroup) +{ + return ma_sound_get_current_fade_volume(pGroup); +} + +MA_API void ma_sound_group_set_start_time_in_pcm_frames(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInFrames) +{ + ma_sound_set_start_time_in_pcm_frames(pGroup, absoluteGlobalTimeInFrames); +} + +MA_API void ma_sound_group_set_start_time_in_milliseconds(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInMilliseconds) +{ + ma_sound_set_start_time_in_milliseconds(pGroup, absoluteGlobalTimeInMilliseconds); +} + +MA_API void ma_sound_group_set_stop_time_in_pcm_frames(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInFrames) +{ + ma_sound_set_stop_time_in_pcm_frames(pGroup, absoluteGlobalTimeInFrames); +} + +MA_API void ma_sound_group_set_stop_time_in_milliseconds(ma_sound_group* pGroup, ma_uint64 absoluteGlobalTimeInMilliseconds) +{ + ma_sound_set_stop_time_in_milliseconds(pGroup, absoluteGlobalTimeInMilliseconds); +} + +MA_API ma_bool32 ma_sound_group_is_playing(const ma_sound_group* pGroup) +{ + return ma_sound_is_playing(pGroup); +} + +MA_API ma_uint64 ma_sound_group_get_time_in_pcm_frames(const ma_sound_group* pGroup) +{ + return ma_sound_get_time_in_pcm_frames(pGroup); +} +#endif /* MA_NO_ENGINE */ +/* END SECTION: miniaudio_engine.c */ + + + +/************************************************************************************************************************************************************** +*************************************************************************************************************************************************************** + +Auto Generated +============== +All code below is auto-generated from a tool. This mostly consists of decoding backend implementations such as ma_dr_wav, ma_dr_flac, etc. If you find a bug in the +code below please report the bug to the respective repository for the relevant project (probably dr_libs). + +*************************************************************************************************************************************************************** +**************************************************************************************************************************************************************/ +#if !defined(MA_NO_WAV) && (!defined(MA_NO_DECODING) || !defined(MA_NO_ENCODING)) +#if !defined(MA_DR_WAV_IMPLEMENTATION) +/* dr_wav_c begin */ +#ifndef ma_dr_wav_c +#define ma_dr_wav_c +#ifdef __MRC__ +#pragma options opt off +#endif +#include +#include +#include +#ifndef MA_DR_WAV_NO_STDIO +#include +#ifndef MA_DR_WAV_NO_WCHAR +#include +#endif +#endif +#ifndef MA_DR_WAV_ASSERT +#include +#define MA_DR_WAV_ASSERT(expression) assert(expression) +#endif +#ifndef MA_DR_WAV_MALLOC +#define MA_DR_WAV_MALLOC(sz) malloc((sz)) +#endif +#ifndef MA_DR_WAV_REALLOC +#define MA_DR_WAV_REALLOC(p, sz) realloc((p), (sz)) +#endif +#ifndef MA_DR_WAV_FREE +#define MA_DR_WAV_FREE(p) free((p)) +#endif +#ifndef MA_DR_WAV_COPY_MEMORY +#define MA_DR_WAV_COPY_MEMORY(dst, src, sz) memcpy((dst), (src), (sz)) +#endif +#ifndef MA_DR_WAV_ZERO_MEMORY +#define MA_DR_WAV_ZERO_MEMORY(p, sz) memset((p), 0, (sz)) +#endif +#ifndef MA_DR_WAV_ZERO_OBJECT +#define MA_DR_WAV_ZERO_OBJECT(p) MA_DR_WAV_ZERO_MEMORY((p), sizeof(*p)) +#endif +#define ma_dr_wav_countof(x) (sizeof(x) / sizeof(x[0])) +#define ma_dr_wav_align(x, a) ((((x) + (a) - 1) / (a)) * (a)) +#define ma_dr_wav_min(a, b) (((a) < (b)) ? (a) : (b)) +#define ma_dr_wav_max(a, b) (((a) > (b)) ? (a) : (b)) +#define ma_dr_wav_clamp(x, lo, hi) (ma_dr_wav_max((lo), ma_dr_wav_min((hi), (x)))) +#define ma_dr_wav_offset_ptr(p, offset) (((ma_uint8*)(p)) + (offset)) +#define MA_DR_WAV_MAX_SIMD_VECTOR_SIZE 32 +#define MA_DR_WAV_INT64_MIN ((ma_int64) ((ma_uint64)0x80000000 << 32)) +#define MA_DR_WAV_INT64_MAX ((ma_int64)(((ma_uint64)0x7FFFFFFF << 32) | 0xFFFFFFFF)) +#if defined(_MSC_VER) && _MSC_VER >= 1400 + #define MA_DR_WAV_HAS_BYTESWAP16_INTRINSIC + #define MA_DR_WAV_HAS_BYTESWAP32_INTRINSIC + #define MA_DR_WAV_HAS_BYTESWAP64_INTRINSIC +#elif defined(__clang__) + #if defined(__has_builtin) + #if __has_builtin(__builtin_bswap16) + #define MA_DR_WAV_HAS_BYTESWAP16_INTRINSIC + #endif + #if __has_builtin(__builtin_bswap32) + #define MA_DR_WAV_HAS_BYTESWAP32_INTRINSIC + #endif + #if __has_builtin(__builtin_bswap64) + #define MA_DR_WAV_HAS_BYTESWAP64_INTRINSIC + #endif + #endif +#elif defined(__GNUC__) + #if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define MA_DR_WAV_HAS_BYTESWAP32_INTRINSIC + #define MA_DR_WAV_HAS_BYTESWAP64_INTRINSIC + #endif + #if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) + #define MA_DR_WAV_HAS_BYTESWAP16_INTRINSIC + #endif +#endif +MA_API void ma_dr_wav_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision) +{ + if (pMajor) { + *pMajor = MA_DR_WAV_VERSION_MAJOR; + } + if (pMinor) { + *pMinor = MA_DR_WAV_VERSION_MINOR; + } + if (pRevision) { + *pRevision = MA_DR_WAV_VERSION_REVISION; + } +} +MA_API const char* ma_dr_wav_version_string(void) +{ + return MA_DR_WAV_VERSION_STRING; +} +#ifndef MA_DR_WAV_MAX_SAMPLE_RATE +#define MA_DR_WAV_MAX_SAMPLE_RATE 384000 +#endif +#ifndef MA_DR_WAV_MAX_CHANNELS +#define MA_DR_WAV_MAX_CHANNELS 256 +#endif +#ifndef MA_DR_WAV_MAX_BITS_PER_SAMPLE +#define MA_DR_WAV_MAX_BITS_PER_SAMPLE 64 +#endif +static const ma_uint8 ma_dr_wavGUID_W64_RIFF[16] = {0x72,0x69,0x66,0x66, 0x2E,0x91, 0xCF,0x11, 0xA5,0xD6, 0x28,0xDB,0x04,0xC1,0x00,0x00}; +static const ma_uint8 ma_dr_wavGUID_W64_WAVE[16] = {0x77,0x61,0x76,0x65, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; +static const ma_uint8 ma_dr_wavGUID_W64_FMT [16] = {0x66,0x6D,0x74,0x20, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; +static const ma_uint8 ma_dr_wavGUID_W64_FACT[16] = {0x66,0x61,0x63,0x74, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; +static const ma_uint8 ma_dr_wavGUID_W64_DATA[16] = {0x64,0x61,0x74,0x61, 0xF3,0xAC, 0xD3,0x11, 0x8C,0xD1, 0x00,0xC0,0x4F,0x8E,0xDB,0x8A}; +static MA_INLINE int ma_dr_wav__is_little_endian(void) +{ +#if defined(MA_X86) || defined(MA_X64) + return MA_TRUE; +#elif defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN + return MA_TRUE; +#else + int n = 1; + return (*(char*)&n) == 1; +#endif +} +static MA_INLINE void ma_dr_wav_bytes_to_guid(const ma_uint8* data, ma_uint8* guid) +{ + int i; + for (i = 0; i < 16; ++i) { + guid[i] = data[i]; + } +} +static MA_INLINE ma_uint16 ma_dr_wav__bswap16(ma_uint16 n) +{ +#ifdef MA_DR_WAV_HAS_BYTESWAP16_INTRINSIC + #if defined(_MSC_VER) + return _byteswap_ushort(n); + #elif defined(__GNUC__) || defined(__clang__) + return __builtin_bswap16(n); + #else + #error "This compiler does not support the byte swap intrinsic." + #endif +#else + return ((n & 0xFF00) >> 8) | + ((n & 0x00FF) << 8); +#endif +} +static MA_INLINE ma_uint32 ma_dr_wav__bswap32(ma_uint32 n) +{ +#ifdef MA_DR_WAV_HAS_BYTESWAP32_INTRINSIC + #if defined(_MSC_VER) + return _byteswap_ulong(n); + #elif defined(__GNUC__) || defined(__clang__) + #if defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(MA_64BIT) + ma_uint32 r; + __asm__ __volatile__ ( + #if defined(MA_64BIT) + "rev %w[out], %w[in]" : [out]"=r"(r) : [in]"r"(n) + #else + "rev %[out], %[in]" : [out]"=r"(r) : [in]"r"(n) + #endif + ); + return r; + #else + return __builtin_bswap32(n); + #endif + #else + #error "This compiler does not support the byte swap intrinsic." + #endif +#else + return ((n & 0xFF000000) >> 24) | + ((n & 0x00FF0000) >> 8) | + ((n & 0x0000FF00) << 8) | + ((n & 0x000000FF) << 24); +#endif +} +static MA_INLINE ma_uint64 ma_dr_wav__bswap64(ma_uint64 n) +{ +#ifdef MA_DR_WAV_HAS_BYTESWAP64_INTRINSIC + #if defined(_MSC_VER) + return _byteswap_uint64(n); + #elif defined(__GNUC__) || defined(__clang__) + return __builtin_bswap64(n); + #else + #error "This compiler does not support the byte swap intrinsic." + #endif +#else + return ((n & ((ma_uint64)0xFF000000 << 32)) >> 56) | + ((n & ((ma_uint64)0x00FF0000 << 32)) >> 40) | + ((n & ((ma_uint64)0x0000FF00 << 32)) >> 24) | + ((n & ((ma_uint64)0x000000FF << 32)) >> 8) | + ((n & ((ma_uint64)0xFF000000 )) << 8) | + ((n & ((ma_uint64)0x00FF0000 )) << 24) | + ((n & ((ma_uint64)0x0000FF00 )) << 40) | + ((n & ((ma_uint64)0x000000FF )) << 56); +#endif +} +static MA_INLINE ma_int16 ma_dr_wav__bswap_s16(ma_int16 n) +{ + return (ma_int16)ma_dr_wav__bswap16((ma_uint16)n); +} +static MA_INLINE void ma_dr_wav__bswap_samples_s16(ma_int16* pSamples, ma_uint64 sampleCount) +{ + ma_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamples[iSample] = ma_dr_wav__bswap_s16(pSamples[iSample]); + } +} +static MA_INLINE void ma_dr_wav__bswap_s24(ma_uint8* p) +{ + ma_uint8 t; + t = p[0]; + p[0] = p[2]; + p[2] = t; +} +static MA_INLINE void ma_dr_wav__bswap_samples_s24(ma_uint8* pSamples, ma_uint64 sampleCount) +{ + ma_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + ma_uint8* pSample = pSamples + (iSample*3); + ma_dr_wav__bswap_s24(pSample); + } +} +static MA_INLINE ma_int32 ma_dr_wav__bswap_s32(ma_int32 n) +{ + return (ma_int32)ma_dr_wav__bswap32((ma_uint32)n); +} +static MA_INLINE void ma_dr_wav__bswap_samples_s32(ma_int32* pSamples, ma_uint64 sampleCount) +{ + ma_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamples[iSample] = ma_dr_wav__bswap_s32(pSamples[iSample]); + } +} +static MA_INLINE ma_int64 ma_dr_wav__bswap_s64(ma_int64 n) +{ + return (ma_int64)ma_dr_wav__bswap64((ma_uint64)n); +} +static MA_INLINE void ma_dr_wav__bswap_samples_s64(ma_int64* pSamples, ma_uint64 sampleCount) +{ + ma_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamples[iSample] = ma_dr_wav__bswap_s64(pSamples[iSample]); + } +} +static MA_INLINE float ma_dr_wav__bswap_f32(float n) +{ + union { + ma_uint32 i; + float f; + } x; + x.f = n; + x.i = ma_dr_wav__bswap32(x.i); + return x.f; +} +static MA_INLINE void ma_dr_wav__bswap_samples_f32(float* pSamples, ma_uint64 sampleCount) +{ + ma_uint64 iSample; + for (iSample = 0; iSample < sampleCount; iSample += 1) { + pSamples[iSample] = ma_dr_wav__bswap_f32(pSamples[iSample]); + } +} +static MA_INLINE void ma_dr_wav__bswap_samples(void* pSamples, ma_uint64 sampleCount, ma_uint32 bytesPerSample) +{ + switch (bytesPerSample) + { + case 1: + { + } break; + case 2: + { + ma_dr_wav__bswap_samples_s16((ma_int16*)pSamples, sampleCount); + } break; + case 3: + { + ma_dr_wav__bswap_samples_s24((ma_uint8*)pSamples, sampleCount); + } break; + case 4: + { + ma_dr_wav__bswap_samples_s32((ma_int32*)pSamples, sampleCount); + } break; + case 8: + { + ma_dr_wav__bswap_samples_s64((ma_int64*)pSamples, sampleCount); + } break; + default: + { + MA_DR_WAV_ASSERT(MA_FALSE); + } break; + } +} +MA_PRIVATE MA_INLINE ma_bool32 ma_dr_wav_is_container_be(ma_dr_wav_container container) +{ + if (container == ma_dr_wav_container_rifx || container == ma_dr_wav_container_aiff) { + return MA_TRUE; + } else { + return MA_FALSE; + } +} +MA_PRIVATE MA_INLINE ma_uint16 ma_dr_wav_bytes_to_u16_le(const ma_uint8* data) +{ + return ((ma_uint16)data[0] << 0) | ((ma_uint16)data[1] << 8); +} +MA_PRIVATE MA_INLINE ma_uint16 ma_dr_wav_bytes_to_u16_be(const ma_uint8* data) +{ + return ((ma_uint16)data[1] << 0) | ((ma_uint16)data[0] << 8); +} +MA_PRIVATE MA_INLINE ma_uint16 ma_dr_wav_bytes_to_u16_ex(const ma_uint8* data, ma_dr_wav_container container) +{ + if (ma_dr_wav_is_container_be(container)) { + return ma_dr_wav_bytes_to_u16_be(data); + } else { + return ma_dr_wav_bytes_to_u16_le(data); + } +} +MA_PRIVATE MA_INLINE ma_uint32 ma_dr_wav_bytes_to_u32_le(const ma_uint8* data) +{ + return ((ma_uint32)data[0] << 0) | ((ma_uint32)data[1] << 8) | ((ma_uint32)data[2] << 16) | ((ma_uint32)data[3] << 24); +} +MA_PRIVATE MA_INLINE ma_uint32 ma_dr_wav_bytes_to_u32_be(const ma_uint8* data) +{ + return ((ma_uint32)data[3] << 0) | ((ma_uint32)data[2] << 8) | ((ma_uint32)data[1] << 16) | ((ma_uint32)data[0] << 24); +} +MA_PRIVATE MA_INLINE ma_uint32 ma_dr_wav_bytes_to_u32_ex(const ma_uint8* data, ma_dr_wav_container container) +{ + if (ma_dr_wav_is_container_be(container)) { + return ma_dr_wav_bytes_to_u32_be(data); + } else { + return ma_dr_wav_bytes_to_u32_le(data); + } +} +MA_PRIVATE ma_int64 ma_dr_wav_aiff_extented_to_s64(const ma_uint8* data) +{ + ma_uint32 exponent = ((ma_uint32)data[0] << 8) | data[1]; + ma_uint64 hi = ((ma_uint64)data[2] << 24) | ((ma_uint64)data[3] << 16) | ((ma_uint64)data[4] << 8) | ((ma_uint64)data[5] << 0); + ma_uint64 lo = ((ma_uint64)data[6] << 24) | ((ma_uint64)data[7] << 16) | ((ma_uint64)data[8] << 8) | ((ma_uint64)data[9] << 0); + ma_uint64 significand = (hi << 32) | lo; + int sign = exponent >> 15; + exponent &= 0x7FFF; + if (exponent == 0 && significand == 0) { + return 0; + } else if (exponent == 0x7FFF) { + return sign ? MA_DR_WAV_INT64_MIN : MA_DR_WAV_INT64_MAX; + } + exponent -= 16383; + if (exponent > 63) { + return sign ? MA_DR_WAV_INT64_MIN : MA_DR_WAV_INT64_MAX; + } else if (exponent < 1) { + return 0; + } + significand >>= (63 - exponent); + if (sign) { + return -(ma_int64)significand; + } else { + return (ma_int64)significand; + } +} +MA_PRIVATE void* ma_dr_wav__malloc_default(size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_DR_WAV_MALLOC(sz); +} +MA_PRIVATE void* ma_dr_wav__realloc_default(void* p, size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_DR_WAV_REALLOC(p, sz); +} +MA_PRIVATE void ma_dr_wav__free_default(void* p, void* pUserData) +{ + (void)pUserData; + MA_DR_WAV_FREE(p); +} +MA_PRIVATE void* ma_dr_wav__malloc_from_callbacks(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + if (pAllocationCallbacks->onMalloc != NULL) { + return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData); + } + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData); + } + return NULL; +} +MA_PRIVATE void* ma_dr_wav__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData); + } + if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) { + void* p2; + p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData); + if (p2 == NULL) { + return NULL; + } + if (p != NULL) { + MA_DR_WAV_COPY_MEMORY(p2, p, szOld); + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } + return p2; + } + return NULL; +} +MA_PRIVATE void ma_dr_wav__free_from_callbacks(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (p == NULL || pAllocationCallbacks == NULL) { + return; + } + if (pAllocationCallbacks->onFree != NULL) { + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } +} +MA_PRIVATE ma_allocation_callbacks ma_dr_wav_copy_allocation_callbacks_or_defaults(const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + return *pAllocationCallbacks; + } else { + ma_allocation_callbacks allocationCallbacks; + allocationCallbacks.pUserData = NULL; + allocationCallbacks.onMalloc = ma_dr_wav__malloc_default; + allocationCallbacks.onRealloc = ma_dr_wav__realloc_default; + allocationCallbacks.onFree = ma_dr_wav__free_default; + return allocationCallbacks; + } +} +static MA_INLINE ma_bool32 ma_dr_wav__is_compressed_format_tag(ma_uint16 formatTag) +{ + return + formatTag == MA_DR_WAVE_FORMAT_ADPCM || + formatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM; +} +MA_PRIVATE unsigned int ma_dr_wav__chunk_padding_size_riff(ma_uint64 chunkSize) +{ + return (unsigned int)(chunkSize % 2); +} +MA_PRIVATE unsigned int ma_dr_wav__chunk_padding_size_w64(ma_uint64 chunkSize) +{ + return (unsigned int)(chunkSize % 8); +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__msadpcm(ma_dr_wav* pWav, ma_uint64 samplesToRead, ma_int16* pBufferOut); +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__ima(ma_dr_wav* pWav, ma_uint64 samplesToRead, ma_int16* pBufferOut); +MA_PRIVATE ma_bool32 ma_dr_wav_init_write__internal(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount); +MA_PRIVATE ma_result ma_dr_wav__read_chunk_header(ma_dr_wav_read_proc onRead, void* pUserData, ma_dr_wav_container container, ma_uint64* pRunningBytesReadOut, ma_dr_wav_chunk_header* pHeaderOut) +{ + if (container == ma_dr_wav_container_riff || container == ma_dr_wav_container_rifx || container == ma_dr_wav_container_rf64 || container == ma_dr_wav_container_aiff) { + ma_uint8 sizeInBytes[4]; + if (onRead(pUserData, pHeaderOut->id.fourcc, 4) != 4) { + return MA_AT_END; + } + if (onRead(pUserData, sizeInBytes, 4) != 4) { + return MA_INVALID_FILE; + } + pHeaderOut->sizeInBytes = ma_dr_wav_bytes_to_u32_ex(sizeInBytes, container); + pHeaderOut->paddingSize = ma_dr_wav__chunk_padding_size_riff(pHeaderOut->sizeInBytes); + *pRunningBytesReadOut += 8; + } else if (container == ma_dr_wav_container_w64) { + ma_uint8 sizeInBytes[8]; + if (onRead(pUserData, pHeaderOut->id.guid, 16) != 16) { + return MA_AT_END; + } + if (onRead(pUserData, sizeInBytes, 8) != 8) { + return MA_INVALID_FILE; + } + pHeaderOut->sizeInBytes = ma_dr_wav_bytes_to_u64(sizeInBytes) - 24; + pHeaderOut->paddingSize = ma_dr_wav__chunk_padding_size_w64(pHeaderOut->sizeInBytes); + *pRunningBytesReadOut += 24; + } else { + return MA_INVALID_FILE; + } + return MA_SUCCESS; +} +MA_PRIVATE ma_bool32 ma_dr_wav__seek_forward(ma_dr_wav_seek_proc onSeek, ma_uint64 offset, void* pUserData) +{ + ma_uint64 bytesRemainingToSeek = offset; + while (bytesRemainingToSeek > 0) { + if (bytesRemainingToSeek > 0x7FFFFFFF) { + if (!onSeek(pUserData, 0x7FFFFFFF, ma_dr_wav_seek_origin_current)) { + return MA_FALSE; + } + bytesRemainingToSeek -= 0x7FFFFFFF; + } else { + if (!onSeek(pUserData, (int)bytesRemainingToSeek, ma_dr_wav_seek_origin_current)) { + return MA_FALSE; + } + bytesRemainingToSeek = 0; + } + } + return MA_TRUE; +} +MA_PRIVATE ma_bool32 ma_dr_wav__seek_from_start(ma_dr_wav_seek_proc onSeek, ma_uint64 offset, void* pUserData) +{ + if (offset <= 0x7FFFFFFF) { + return onSeek(pUserData, (int)offset, ma_dr_wav_seek_origin_start); + } + if (!onSeek(pUserData, 0x7FFFFFFF, ma_dr_wav_seek_origin_start)) { + return MA_FALSE; + } + offset -= 0x7FFFFFFF; + for (;;) { + if (offset <= 0x7FFFFFFF) { + return onSeek(pUserData, (int)offset, ma_dr_wav_seek_origin_current); + } + if (!onSeek(pUserData, 0x7FFFFFFF, ma_dr_wav_seek_origin_current)) { + return MA_FALSE; + } + offset -= 0x7FFFFFFF; + } +} +MA_PRIVATE size_t ma_dr_wav__on_read(ma_dr_wav_read_proc onRead, void* pUserData, void* pBufferOut, size_t bytesToRead, ma_uint64* pCursor) +{ + size_t bytesRead; + MA_DR_WAV_ASSERT(onRead != NULL); + MA_DR_WAV_ASSERT(pCursor != NULL); + bytesRead = onRead(pUserData, pBufferOut, bytesToRead); + *pCursor += bytesRead; + return bytesRead; +} +#if 0 +MA_PRIVATE ma_bool32 ma_dr_wav__on_seek(ma_dr_wav_seek_proc onSeek, void* pUserData, int offset, ma_dr_wav_seek_origin origin, ma_uint64* pCursor) +{ + MA_DR_WAV_ASSERT(onSeek != NULL); + MA_DR_WAV_ASSERT(pCursor != NULL); + if (!onSeek(pUserData, offset, origin)) { + return MA_FALSE; + } + if (origin == ma_dr_wav_seek_origin_start) { + *pCursor = offset; + } else { + *pCursor += offset; + } + return MA_TRUE; +} +#endif +#define MA_DR_WAV_SMPL_BYTES 36 +#define MA_DR_WAV_SMPL_LOOP_BYTES 24 +#define MA_DR_WAV_INST_BYTES 7 +#define MA_DR_WAV_ACID_BYTES 24 +#define MA_DR_WAV_CUE_BYTES 4 +#define MA_DR_WAV_BEXT_BYTES 602 +#define MA_DR_WAV_BEXT_DESCRIPTION_BYTES 256 +#define MA_DR_WAV_BEXT_ORIGINATOR_NAME_BYTES 32 +#define MA_DR_WAV_BEXT_ORIGINATOR_REF_BYTES 32 +#define MA_DR_WAV_BEXT_RESERVED_BYTES 180 +#define MA_DR_WAV_BEXT_UMID_BYTES 64 +#define MA_DR_WAV_CUE_POINT_BYTES 24 +#define MA_DR_WAV_LIST_LABEL_OR_NOTE_BYTES 4 +#define MA_DR_WAV_LIST_LABELLED_TEXT_BYTES 20 +#define MA_DR_WAV_METADATA_ALIGNMENT 8 +typedef enum +{ + ma_dr_wav__metadata_parser_stage_count, + ma_dr_wav__metadata_parser_stage_read +} ma_dr_wav__metadata_parser_stage; +typedef struct +{ + ma_dr_wav_read_proc onRead; + ma_dr_wav_seek_proc onSeek; + void *pReadSeekUserData; + ma_dr_wav__metadata_parser_stage stage; + ma_dr_wav_metadata *pMetadata; + ma_uint32 metadataCount; + ma_uint8 *pData; + ma_uint8 *pDataCursor; + ma_uint64 metadataCursor; + ma_uint64 extraCapacity; +} ma_dr_wav__metadata_parser; +MA_PRIVATE size_t ma_dr_wav__metadata_memory_capacity(ma_dr_wav__metadata_parser* pParser) +{ + ma_uint64 cap = sizeof(ma_dr_wav_metadata) * (ma_uint64)pParser->metadataCount + pParser->extraCapacity; + if (cap > MA_SIZE_MAX) { + return 0; + } + return (size_t)cap; +} +MA_PRIVATE ma_uint8* ma_dr_wav__metadata_get_memory(ma_dr_wav__metadata_parser* pParser, size_t size, size_t align) +{ + ma_uint8* pResult; + if (align) { + ma_uintptr modulo = (ma_uintptr)pParser->pDataCursor % align; + if (modulo != 0) { + pParser->pDataCursor += align - modulo; + } + } + pResult = pParser->pDataCursor; + MA_DR_WAV_ASSERT((pResult + size) <= (pParser->pData + ma_dr_wav__metadata_memory_capacity(pParser))); + pParser->pDataCursor += size; + return pResult; +} +MA_PRIVATE void ma_dr_wav__metadata_request_extra_memory_for_stage_2(ma_dr_wav__metadata_parser* pParser, size_t bytes, size_t align) +{ + size_t extra = bytes + (align ? (align - 1) : 0); + pParser->extraCapacity += extra; +} +MA_PRIVATE ma_result ma_dr_wav__metadata_alloc(ma_dr_wav__metadata_parser* pParser, ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pParser->extraCapacity != 0 || pParser->metadataCount != 0) { + pAllocationCallbacks->onFree(pParser->pData, pAllocationCallbacks->pUserData); + pParser->pData = (ma_uint8*)pAllocationCallbacks->onMalloc(ma_dr_wav__metadata_memory_capacity(pParser), pAllocationCallbacks->pUserData); + pParser->pDataCursor = pParser->pData; + if (pParser->pData == NULL) { + return MA_OUT_OF_MEMORY; + } + pParser->pMetadata = (ma_dr_wav_metadata*)ma_dr_wav__metadata_get_memory(pParser, sizeof(ma_dr_wav_metadata) * pParser->metadataCount, 1); + pParser->metadataCursor = 0; + } + return MA_SUCCESS; +} +MA_PRIVATE size_t ma_dr_wav__metadata_parser_read(ma_dr_wav__metadata_parser* pParser, void* pBufferOut, size_t bytesToRead, ma_uint64* pCursor) +{ + if (pCursor != NULL) { + return ma_dr_wav__on_read(pParser->onRead, pParser->pReadSeekUserData, pBufferOut, bytesToRead, pCursor); + } else { + return pParser->onRead(pParser->pReadSeekUserData, pBufferOut, bytesToRead); + } +} +MA_PRIVATE ma_uint64 ma_dr_wav__read_smpl_to_metadata_obj(ma_dr_wav__metadata_parser* pParser, const ma_dr_wav_chunk_header* pChunkHeader, ma_dr_wav_metadata* pMetadata) +{ + ma_uint8 smplHeaderData[MA_DR_WAV_SMPL_BYTES]; + ma_uint64 totalBytesRead = 0; + size_t bytesJustRead; + if (pMetadata == NULL) { + return 0; + } + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, smplHeaderData, sizeof(smplHeaderData), &totalBytesRead); + MA_DR_WAV_ASSERT(pParser->stage == ma_dr_wav__metadata_parser_stage_read); + MA_DR_WAV_ASSERT(pChunkHeader != NULL); + if (pMetadata != NULL && bytesJustRead == sizeof(smplHeaderData)) { + ma_uint32 iSampleLoop; + pMetadata->type = ma_dr_wav_metadata_type_smpl; + pMetadata->data.smpl.manufacturerId = ma_dr_wav_bytes_to_u32(smplHeaderData + 0); + pMetadata->data.smpl.productId = ma_dr_wav_bytes_to_u32(smplHeaderData + 4); + pMetadata->data.smpl.samplePeriodNanoseconds = ma_dr_wav_bytes_to_u32(smplHeaderData + 8); + pMetadata->data.smpl.midiUnityNote = ma_dr_wav_bytes_to_u32(smplHeaderData + 12); + pMetadata->data.smpl.midiPitchFraction = ma_dr_wav_bytes_to_u32(smplHeaderData + 16); + pMetadata->data.smpl.smpteFormat = ma_dr_wav_bytes_to_u32(smplHeaderData + 20); + pMetadata->data.smpl.smpteOffset = ma_dr_wav_bytes_to_u32(smplHeaderData + 24); + pMetadata->data.smpl.sampleLoopCount = ma_dr_wav_bytes_to_u32(smplHeaderData + 28); + pMetadata->data.smpl.samplerSpecificDataSizeInBytes = ma_dr_wav_bytes_to_u32(smplHeaderData + 32); + if (pMetadata->data.smpl.sampleLoopCount == (pChunkHeader->sizeInBytes - MA_DR_WAV_SMPL_BYTES) / MA_DR_WAV_SMPL_LOOP_BYTES) { + pMetadata->data.smpl.pLoops = (ma_dr_wav_smpl_loop*)ma_dr_wav__metadata_get_memory(pParser, sizeof(ma_dr_wav_smpl_loop) * pMetadata->data.smpl.sampleLoopCount, MA_DR_WAV_METADATA_ALIGNMENT); + for (iSampleLoop = 0; iSampleLoop < pMetadata->data.smpl.sampleLoopCount; ++iSampleLoop) { + ma_uint8 smplLoopData[MA_DR_WAV_SMPL_LOOP_BYTES]; + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, smplLoopData, sizeof(smplLoopData), &totalBytesRead); + if (bytesJustRead == sizeof(smplLoopData)) { + pMetadata->data.smpl.pLoops[iSampleLoop].cuePointId = ma_dr_wav_bytes_to_u32(smplLoopData + 0); + pMetadata->data.smpl.pLoops[iSampleLoop].type = ma_dr_wav_bytes_to_u32(smplLoopData + 4); + pMetadata->data.smpl.pLoops[iSampleLoop].firstSampleByteOffset = ma_dr_wav_bytes_to_u32(smplLoopData + 8); + pMetadata->data.smpl.pLoops[iSampleLoop].lastSampleByteOffset = ma_dr_wav_bytes_to_u32(smplLoopData + 12); + pMetadata->data.smpl.pLoops[iSampleLoop].sampleFraction = ma_dr_wav_bytes_to_u32(smplLoopData + 16); + pMetadata->data.smpl.pLoops[iSampleLoop].playCount = ma_dr_wav_bytes_to_u32(smplLoopData + 20); + } else { + break; + } + } + if (pMetadata->data.smpl.samplerSpecificDataSizeInBytes > 0) { + pMetadata->data.smpl.pSamplerSpecificData = ma_dr_wav__metadata_get_memory(pParser, pMetadata->data.smpl.samplerSpecificDataSizeInBytes, 1); + MA_DR_WAV_ASSERT(pMetadata->data.smpl.pSamplerSpecificData != NULL); + ma_dr_wav__metadata_parser_read(pParser, pMetadata->data.smpl.pSamplerSpecificData, pMetadata->data.smpl.samplerSpecificDataSizeInBytes, &totalBytesRead); + } + } + } + return totalBytesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav__read_cue_to_metadata_obj(ma_dr_wav__metadata_parser* pParser, const ma_dr_wav_chunk_header* pChunkHeader, ma_dr_wav_metadata* pMetadata) +{ + ma_uint8 cueHeaderSectionData[MA_DR_WAV_CUE_BYTES]; + ma_uint64 totalBytesRead = 0; + size_t bytesJustRead; + if (pMetadata == NULL) { + return 0; + } + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, cueHeaderSectionData, sizeof(cueHeaderSectionData), &totalBytesRead); + MA_DR_WAV_ASSERT(pParser->stage == ma_dr_wav__metadata_parser_stage_read); + if (bytesJustRead == sizeof(cueHeaderSectionData)) { + pMetadata->type = ma_dr_wav_metadata_type_cue; + pMetadata->data.cue.cuePointCount = ma_dr_wav_bytes_to_u32(cueHeaderSectionData); + if (pMetadata->data.cue.cuePointCount == (pChunkHeader->sizeInBytes - MA_DR_WAV_CUE_BYTES) / MA_DR_WAV_CUE_POINT_BYTES) { + pMetadata->data.cue.pCuePoints = (ma_dr_wav_cue_point*)ma_dr_wav__metadata_get_memory(pParser, sizeof(ma_dr_wav_cue_point) * pMetadata->data.cue.cuePointCount, MA_DR_WAV_METADATA_ALIGNMENT); + MA_DR_WAV_ASSERT(pMetadata->data.cue.pCuePoints != NULL); + if (pMetadata->data.cue.cuePointCount > 0) { + ma_uint32 iCuePoint; + for (iCuePoint = 0; iCuePoint < pMetadata->data.cue.cuePointCount; ++iCuePoint) { + ma_uint8 cuePointData[MA_DR_WAV_CUE_POINT_BYTES]; + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, cuePointData, sizeof(cuePointData), &totalBytesRead); + if (bytesJustRead == sizeof(cuePointData)) { + pMetadata->data.cue.pCuePoints[iCuePoint].id = ma_dr_wav_bytes_to_u32(cuePointData + 0); + pMetadata->data.cue.pCuePoints[iCuePoint].playOrderPosition = ma_dr_wav_bytes_to_u32(cuePointData + 4); + pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId[0] = cuePointData[8]; + pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId[1] = cuePointData[9]; + pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId[2] = cuePointData[10]; + pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId[3] = cuePointData[11]; + pMetadata->data.cue.pCuePoints[iCuePoint].chunkStart = ma_dr_wav_bytes_to_u32(cuePointData + 12); + pMetadata->data.cue.pCuePoints[iCuePoint].blockStart = ma_dr_wav_bytes_to_u32(cuePointData + 16); + pMetadata->data.cue.pCuePoints[iCuePoint].sampleByteOffset = ma_dr_wav_bytes_to_u32(cuePointData + 20); + } else { + break; + } + } + } + } + } + return totalBytesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav__read_inst_to_metadata_obj(ma_dr_wav__metadata_parser* pParser, ma_dr_wav_metadata* pMetadata) +{ + ma_uint8 instData[MA_DR_WAV_INST_BYTES]; + ma_uint64 bytesRead; + if (pMetadata == NULL) { + return 0; + } + bytesRead = ma_dr_wav__metadata_parser_read(pParser, instData, sizeof(instData), NULL); + MA_DR_WAV_ASSERT(pParser->stage == ma_dr_wav__metadata_parser_stage_read); + if (bytesRead == sizeof(instData)) { + pMetadata->type = ma_dr_wav_metadata_type_inst; + pMetadata->data.inst.midiUnityNote = (ma_int8)instData[0]; + pMetadata->data.inst.fineTuneCents = (ma_int8)instData[1]; + pMetadata->data.inst.gainDecibels = (ma_int8)instData[2]; + pMetadata->data.inst.lowNote = (ma_int8)instData[3]; + pMetadata->data.inst.highNote = (ma_int8)instData[4]; + pMetadata->data.inst.lowVelocity = (ma_int8)instData[5]; + pMetadata->data.inst.highVelocity = (ma_int8)instData[6]; + } + return bytesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav__read_acid_to_metadata_obj(ma_dr_wav__metadata_parser* pParser, ma_dr_wav_metadata* pMetadata) +{ + ma_uint8 acidData[MA_DR_WAV_ACID_BYTES]; + ma_uint64 bytesRead; + if (pMetadata == NULL) { + return 0; + } + bytesRead = ma_dr_wav__metadata_parser_read(pParser, acidData, sizeof(acidData), NULL); + MA_DR_WAV_ASSERT(pParser->stage == ma_dr_wav__metadata_parser_stage_read); + if (bytesRead == sizeof(acidData)) { + pMetadata->type = ma_dr_wav_metadata_type_acid; + pMetadata->data.acid.flags = ma_dr_wav_bytes_to_u32(acidData + 0); + pMetadata->data.acid.midiUnityNote = ma_dr_wav_bytes_to_u16(acidData + 4); + pMetadata->data.acid.reserved1 = ma_dr_wav_bytes_to_u16(acidData + 6); + pMetadata->data.acid.reserved2 = ma_dr_wav_bytes_to_f32(acidData + 8); + pMetadata->data.acid.numBeats = ma_dr_wav_bytes_to_u32(acidData + 12); + pMetadata->data.acid.meterDenominator = ma_dr_wav_bytes_to_u16(acidData + 16); + pMetadata->data.acid.meterNumerator = ma_dr_wav_bytes_to_u16(acidData + 18); + pMetadata->data.acid.tempo = ma_dr_wav_bytes_to_f32(acidData + 20); + } + return bytesRead; +} +MA_PRIVATE size_t ma_dr_wav__strlen(const char* str) +{ + size_t result = 0; + while (*str++) { + result += 1; + } + return result; +} +MA_PRIVATE size_t ma_dr_wav__strlen_clamped(const char* str, size_t maxToRead) +{ + size_t result = 0; + while (*str++ && result < maxToRead) { + result += 1; + } + return result; +} +MA_PRIVATE char* ma_dr_wav__metadata_copy_string(ma_dr_wav__metadata_parser* pParser, const char* str, size_t maxToRead) +{ + size_t len = ma_dr_wav__strlen_clamped(str, maxToRead); + if (len) { + char* result = (char*)ma_dr_wav__metadata_get_memory(pParser, len + 1, 1); + MA_DR_WAV_ASSERT(result != NULL); + MA_DR_WAV_COPY_MEMORY(result, str, len); + result[len] = '\0'; + return result; + } else { + return NULL; + } +} +typedef struct +{ + const void* pBuffer; + size_t sizeInBytes; + size_t cursor; +} ma_dr_wav_buffer_reader; +MA_PRIVATE ma_result ma_dr_wav_buffer_reader_init(const void* pBuffer, size_t sizeInBytes, ma_dr_wav_buffer_reader* pReader) +{ + MA_DR_WAV_ASSERT(pBuffer != NULL); + MA_DR_WAV_ASSERT(pReader != NULL); + MA_DR_WAV_ZERO_OBJECT(pReader); + pReader->pBuffer = pBuffer; + pReader->sizeInBytes = sizeInBytes; + pReader->cursor = 0; + return MA_SUCCESS; +} +MA_PRIVATE const void* ma_dr_wav_buffer_reader_ptr(const ma_dr_wav_buffer_reader* pReader) +{ + MA_DR_WAV_ASSERT(pReader != NULL); + return ma_dr_wav_offset_ptr(pReader->pBuffer, pReader->cursor); +} +MA_PRIVATE ma_result ma_dr_wav_buffer_reader_seek(ma_dr_wav_buffer_reader* pReader, size_t bytesToSeek) +{ + MA_DR_WAV_ASSERT(pReader != NULL); + if (pReader->cursor + bytesToSeek > pReader->sizeInBytes) { + return MA_BAD_SEEK; + } + pReader->cursor += bytesToSeek; + return MA_SUCCESS; +} +MA_PRIVATE ma_result ma_dr_wav_buffer_reader_read(ma_dr_wav_buffer_reader* pReader, void* pDst, size_t bytesToRead, size_t* pBytesRead) +{ + ma_result result = MA_SUCCESS; + size_t bytesRemaining; + MA_DR_WAV_ASSERT(pReader != NULL); + if (pBytesRead != NULL) { + *pBytesRead = 0; + } + bytesRemaining = (pReader->sizeInBytes - pReader->cursor); + if (bytesToRead > bytesRemaining) { + bytesToRead = bytesRemaining; + } + if (pDst == NULL) { + result = ma_dr_wav_buffer_reader_seek(pReader, bytesToRead); + } else { + MA_DR_WAV_COPY_MEMORY(pDst, ma_dr_wav_buffer_reader_ptr(pReader), bytesToRead); + pReader->cursor += bytesToRead; + } + MA_DR_WAV_ASSERT(pReader->cursor <= pReader->sizeInBytes); + if (result == MA_SUCCESS) { + if (pBytesRead != NULL) { + *pBytesRead = bytesToRead; + } + } + return MA_SUCCESS; +} +MA_PRIVATE ma_result ma_dr_wav_buffer_reader_read_u16(ma_dr_wav_buffer_reader* pReader, ma_uint16* pDst) +{ + ma_result result; + size_t bytesRead; + ma_uint8 data[2]; + MA_DR_WAV_ASSERT(pReader != NULL); + MA_DR_WAV_ASSERT(pDst != NULL); + *pDst = 0; + result = ma_dr_wav_buffer_reader_read(pReader, data, sizeof(*pDst), &bytesRead); + if (result != MA_SUCCESS || bytesRead != sizeof(*pDst)) { + return result; + } + *pDst = ma_dr_wav_bytes_to_u16(data); + return MA_SUCCESS; +} +MA_PRIVATE ma_result ma_dr_wav_buffer_reader_read_u32(ma_dr_wav_buffer_reader* pReader, ma_uint32* pDst) +{ + ma_result result; + size_t bytesRead; + ma_uint8 data[4]; + MA_DR_WAV_ASSERT(pReader != NULL); + MA_DR_WAV_ASSERT(pDst != NULL); + *pDst = 0; + result = ma_dr_wav_buffer_reader_read(pReader, data, sizeof(*pDst), &bytesRead); + if (result != MA_SUCCESS || bytesRead != sizeof(*pDst)) { + return result; + } + *pDst = ma_dr_wav_bytes_to_u32(data); + return MA_SUCCESS; +} +MA_PRIVATE ma_uint64 ma_dr_wav__read_bext_to_metadata_obj(ma_dr_wav__metadata_parser* pParser, ma_dr_wav_metadata* pMetadata, ma_uint64 chunkSize) +{ + ma_uint8 bextData[MA_DR_WAV_BEXT_BYTES]; + size_t bytesRead = ma_dr_wav__metadata_parser_read(pParser, bextData, sizeof(bextData), NULL); + MA_DR_WAV_ASSERT(pParser->stage == ma_dr_wav__metadata_parser_stage_read); + if (bytesRead == sizeof(bextData)) { + ma_dr_wav_buffer_reader reader; + ma_uint32 timeReferenceLow; + ma_uint32 timeReferenceHigh; + size_t extraBytes; + pMetadata->type = ma_dr_wav_metadata_type_bext; + if (ma_dr_wav_buffer_reader_init(bextData, bytesRead, &reader) == MA_SUCCESS) { + pMetadata->data.bext.pDescription = ma_dr_wav__metadata_copy_string(pParser, (const char*)ma_dr_wav_buffer_reader_ptr(&reader), MA_DR_WAV_BEXT_DESCRIPTION_BYTES); + ma_dr_wav_buffer_reader_seek(&reader, MA_DR_WAV_BEXT_DESCRIPTION_BYTES); + pMetadata->data.bext.pOriginatorName = ma_dr_wav__metadata_copy_string(pParser, (const char*)ma_dr_wav_buffer_reader_ptr(&reader), MA_DR_WAV_BEXT_ORIGINATOR_NAME_BYTES); + ma_dr_wav_buffer_reader_seek(&reader, MA_DR_WAV_BEXT_ORIGINATOR_NAME_BYTES); + pMetadata->data.bext.pOriginatorReference = ma_dr_wav__metadata_copy_string(pParser, (const char*)ma_dr_wav_buffer_reader_ptr(&reader), MA_DR_WAV_BEXT_ORIGINATOR_REF_BYTES); + ma_dr_wav_buffer_reader_seek(&reader, MA_DR_WAV_BEXT_ORIGINATOR_REF_BYTES); + ma_dr_wav_buffer_reader_read(&reader, pMetadata->data.bext.pOriginationDate, sizeof(pMetadata->data.bext.pOriginationDate), NULL); + ma_dr_wav_buffer_reader_read(&reader, pMetadata->data.bext.pOriginationTime, sizeof(pMetadata->data.bext.pOriginationTime), NULL); + ma_dr_wav_buffer_reader_read_u32(&reader, &timeReferenceLow); + ma_dr_wav_buffer_reader_read_u32(&reader, &timeReferenceHigh); + pMetadata->data.bext.timeReference = ((ma_uint64)timeReferenceHigh << 32) + timeReferenceLow; + ma_dr_wav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.version); + pMetadata->data.bext.pUMID = ma_dr_wav__metadata_get_memory(pParser, MA_DR_WAV_BEXT_UMID_BYTES, 1); + ma_dr_wav_buffer_reader_read(&reader, pMetadata->data.bext.pUMID, MA_DR_WAV_BEXT_UMID_BYTES, NULL); + ma_dr_wav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.loudnessValue); + ma_dr_wav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.loudnessRange); + ma_dr_wav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.maxTruePeakLevel); + ma_dr_wav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.maxMomentaryLoudness); + ma_dr_wav_buffer_reader_read_u16(&reader, &pMetadata->data.bext.maxShortTermLoudness); + MA_DR_WAV_ASSERT((ma_dr_wav_offset_ptr(ma_dr_wav_buffer_reader_ptr(&reader), MA_DR_WAV_BEXT_RESERVED_BYTES)) == (bextData + MA_DR_WAV_BEXT_BYTES)); + extraBytes = (size_t)(chunkSize - MA_DR_WAV_BEXT_BYTES); + if (extraBytes > 0) { + pMetadata->data.bext.pCodingHistory = (char*)ma_dr_wav__metadata_get_memory(pParser, extraBytes + 1, 1); + MA_DR_WAV_ASSERT(pMetadata->data.bext.pCodingHistory != NULL); + bytesRead += ma_dr_wav__metadata_parser_read(pParser, pMetadata->data.bext.pCodingHistory, extraBytes, NULL); + pMetadata->data.bext.codingHistorySize = (ma_uint32)ma_dr_wav__strlen(pMetadata->data.bext.pCodingHistory); + } else { + pMetadata->data.bext.pCodingHistory = NULL; + pMetadata->data.bext.codingHistorySize = 0; + } + } + } + return bytesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav__read_list_label_or_note_to_metadata_obj(ma_dr_wav__metadata_parser* pParser, ma_dr_wav_metadata* pMetadata, ma_uint64 chunkSize, ma_dr_wav_metadata_type type) +{ + ma_uint8 cueIDBuffer[MA_DR_WAV_LIST_LABEL_OR_NOTE_BYTES]; + ma_uint64 totalBytesRead = 0; + size_t bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, cueIDBuffer, sizeof(cueIDBuffer), &totalBytesRead); + MA_DR_WAV_ASSERT(pParser->stage == ma_dr_wav__metadata_parser_stage_read); + if (bytesJustRead == sizeof(cueIDBuffer)) { + ma_uint32 sizeIncludingNullTerminator; + pMetadata->type = type; + pMetadata->data.labelOrNote.cuePointId = ma_dr_wav_bytes_to_u32(cueIDBuffer); + sizeIncludingNullTerminator = (ma_uint32)chunkSize - MA_DR_WAV_LIST_LABEL_OR_NOTE_BYTES; + if (sizeIncludingNullTerminator > 0) { + pMetadata->data.labelOrNote.stringLength = sizeIncludingNullTerminator - 1; + pMetadata->data.labelOrNote.pString = (char*)ma_dr_wav__metadata_get_memory(pParser, sizeIncludingNullTerminator, 1); + MA_DR_WAV_ASSERT(pMetadata->data.labelOrNote.pString != NULL); + ma_dr_wav__metadata_parser_read(pParser, pMetadata->data.labelOrNote.pString, sizeIncludingNullTerminator, &totalBytesRead); + } else { + pMetadata->data.labelOrNote.stringLength = 0; + pMetadata->data.labelOrNote.pString = NULL; + } + } + return totalBytesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav__read_list_labelled_cue_region_to_metadata_obj(ma_dr_wav__metadata_parser* pParser, ma_dr_wav_metadata* pMetadata, ma_uint64 chunkSize) +{ + ma_uint8 buffer[MA_DR_WAV_LIST_LABELLED_TEXT_BYTES]; + ma_uint64 totalBytesRead = 0; + size_t bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, buffer, sizeof(buffer), &totalBytesRead); + MA_DR_WAV_ASSERT(pParser->stage == ma_dr_wav__metadata_parser_stage_read); + if (bytesJustRead == sizeof(buffer)) { + ma_uint32 sizeIncludingNullTerminator; + pMetadata->type = ma_dr_wav_metadata_type_list_labelled_cue_region; + pMetadata->data.labelledCueRegion.cuePointId = ma_dr_wav_bytes_to_u32(buffer + 0); + pMetadata->data.labelledCueRegion.sampleLength = ma_dr_wav_bytes_to_u32(buffer + 4); + pMetadata->data.labelledCueRegion.purposeId[0] = buffer[8]; + pMetadata->data.labelledCueRegion.purposeId[1] = buffer[9]; + pMetadata->data.labelledCueRegion.purposeId[2] = buffer[10]; + pMetadata->data.labelledCueRegion.purposeId[3] = buffer[11]; + pMetadata->data.labelledCueRegion.country = ma_dr_wav_bytes_to_u16(buffer + 12); + pMetadata->data.labelledCueRegion.language = ma_dr_wav_bytes_to_u16(buffer + 14); + pMetadata->data.labelledCueRegion.dialect = ma_dr_wav_bytes_to_u16(buffer + 16); + pMetadata->data.labelledCueRegion.codePage = ma_dr_wav_bytes_to_u16(buffer + 18); + sizeIncludingNullTerminator = (ma_uint32)chunkSize - MA_DR_WAV_LIST_LABELLED_TEXT_BYTES; + if (sizeIncludingNullTerminator > 0) { + pMetadata->data.labelledCueRegion.stringLength = sizeIncludingNullTerminator - 1; + pMetadata->data.labelledCueRegion.pString = (char*)ma_dr_wav__metadata_get_memory(pParser, sizeIncludingNullTerminator, 1); + MA_DR_WAV_ASSERT(pMetadata->data.labelledCueRegion.pString != NULL); + ma_dr_wav__metadata_parser_read(pParser, pMetadata->data.labelledCueRegion.pString, sizeIncludingNullTerminator, &totalBytesRead); + } else { + pMetadata->data.labelledCueRegion.stringLength = 0; + pMetadata->data.labelledCueRegion.pString = NULL; + } + } + return totalBytesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav__metadata_process_info_text_chunk(ma_dr_wav__metadata_parser* pParser, ma_uint64 chunkSize, ma_dr_wav_metadata_type type) +{ + ma_uint64 bytesRead = 0; + ma_uint32 stringSizeWithNullTerminator = (ma_uint32)chunkSize; + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + pParser->metadataCount += 1; + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, stringSizeWithNullTerminator, 1); + } else { + ma_dr_wav_metadata* pMetadata = &pParser->pMetadata[pParser->metadataCursor]; + pMetadata->type = type; + if (stringSizeWithNullTerminator > 0) { + pMetadata->data.infoText.stringLength = stringSizeWithNullTerminator - 1; + pMetadata->data.infoText.pString = (char*)ma_dr_wav__metadata_get_memory(pParser, stringSizeWithNullTerminator, 1); + MA_DR_WAV_ASSERT(pMetadata->data.infoText.pString != NULL); + bytesRead = ma_dr_wav__metadata_parser_read(pParser, pMetadata->data.infoText.pString, (size_t)stringSizeWithNullTerminator, NULL); + if (bytesRead == chunkSize) { + pParser->metadataCursor += 1; + } else { + } + } else { + pMetadata->data.infoText.stringLength = 0; + pMetadata->data.infoText.pString = NULL; + pParser->metadataCursor += 1; + } + } + return bytesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav__metadata_process_unknown_chunk(ma_dr_wav__metadata_parser* pParser, const ma_uint8* pChunkId, ma_uint64 chunkSize, ma_dr_wav_metadata_location location) +{ + ma_uint64 bytesRead = 0; + if (location == ma_dr_wav_metadata_location_invalid) { + return 0; + } + if (ma_dr_wav_fourcc_equal(pChunkId, "data") || ma_dr_wav_fourcc_equal(pChunkId, "fmt ") || ma_dr_wav_fourcc_equal(pChunkId, "fact")) { + return 0; + } + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + pParser->metadataCount += 1; + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, (size_t)chunkSize, 1); + } else { + ma_dr_wav_metadata* pMetadata = &pParser->pMetadata[pParser->metadataCursor]; + pMetadata->type = ma_dr_wav_metadata_type_unknown; + pMetadata->data.unknown.chunkLocation = location; + pMetadata->data.unknown.id[0] = pChunkId[0]; + pMetadata->data.unknown.id[1] = pChunkId[1]; + pMetadata->data.unknown.id[2] = pChunkId[2]; + pMetadata->data.unknown.id[3] = pChunkId[3]; + pMetadata->data.unknown.dataSizeInBytes = (ma_uint32)chunkSize; + pMetadata->data.unknown.pData = (ma_uint8 *)ma_dr_wav__metadata_get_memory(pParser, (size_t)chunkSize, 1); + MA_DR_WAV_ASSERT(pMetadata->data.unknown.pData != NULL); + bytesRead = ma_dr_wav__metadata_parser_read(pParser, pMetadata->data.unknown.pData, pMetadata->data.unknown.dataSizeInBytes, NULL); + if (bytesRead == pMetadata->data.unknown.dataSizeInBytes) { + pParser->metadataCursor += 1; + } else { + } + } + return bytesRead; +} +MA_PRIVATE ma_bool32 ma_dr_wav__chunk_matches(ma_dr_wav_metadata_type allowedMetadataTypes, const ma_uint8* pChunkID, ma_dr_wav_metadata_type type, const char* pID) +{ + return (allowedMetadataTypes & type) && ma_dr_wav_fourcc_equal(pChunkID, pID); +} +MA_PRIVATE ma_uint64 ma_dr_wav__metadata_process_chunk(ma_dr_wav__metadata_parser* pParser, const ma_dr_wav_chunk_header* pChunkHeader, ma_dr_wav_metadata_type allowedMetadataTypes) +{ + const ma_uint8 *pChunkID = pChunkHeader->id.fourcc; + ma_uint64 bytesRead = 0; + if (ma_dr_wav__chunk_matches(allowedMetadataTypes, pChunkID, ma_dr_wav_metadata_type_smpl, "smpl")) { + if (pChunkHeader->sizeInBytes >= MA_DR_WAV_SMPL_BYTES) { + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + ma_uint8 buffer[4]; + size_t bytesJustRead; + if (!pParser->onSeek(pParser->pReadSeekUserData, 28, ma_dr_wav_seek_origin_current)) { + return bytesRead; + } + bytesRead += 28; + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, buffer, sizeof(buffer), &bytesRead); + if (bytesJustRead == sizeof(buffer)) { + ma_uint32 loopCount = ma_dr_wav_bytes_to_u32(buffer); + ma_uint64 calculatedLoopCount; + calculatedLoopCount = (pChunkHeader->sizeInBytes - MA_DR_WAV_SMPL_BYTES) / MA_DR_WAV_SMPL_LOOP_BYTES; + if (calculatedLoopCount == loopCount) { + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, buffer, sizeof(buffer), &bytesRead); + if (bytesJustRead == sizeof(buffer)) { + ma_uint32 samplerSpecificDataSizeInBytes = ma_dr_wav_bytes_to_u32(buffer); + pParser->metadataCount += 1; + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, sizeof(ma_dr_wav_smpl_loop) * loopCount, MA_DR_WAV_METADATA_ALIGNMENT); + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, samplerSpecificDataSizeInBytes, 1); + } + } else { + } + } + } else { + bytesRead = ma_dr_wav__read_smpl_to_metadata_obj(pParser, pChunkHeader, &pParser->pMetadata[pParser->metadataCursor]); + if (bytesRead == pChunkHeader->sizeInBytes) { + pParser->metadataCursor += 1; + } else { + } + } + } else { + } + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, pChunkID, ma_dr_wav_metadata_type_inst, "inst")) { + if (pChunkHeader->sizeInBytes == MA_DR_WAV_INST_BYTES) { + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + pParser->metadataCount += 1; + } else { + bytesRead = ma_dr_wav__read_inst_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor]); + if (bytesRead == pChunkHeader->sizeInBytes) { + pParser->metadataCursor += 1; + } else { + } + } + } else { + } + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, pChunkID, ma_dr_wav_metadata_type_acid, "acid")) { + if (pChunkHeader->sizeInBytes == MA_DR_WAV_ACID_BYTES) { + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + pParser->metadataCount += 1; + } else { + bytesRead = ma_dr_wav__read_acid_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor]); + if (bytesRead == pChunkHeader->sizeInBytes) { + pParser->metadataCursor += 1; + } else { + } + } + } else { + } + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, pChunkID, ma_dr_wav_metadata_type_cue, "cue ")) { + if (pChunkHeader->sizeInBytes >= MA_DR_WAV_CUE_BYTES) { + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + size_t cueCount; + pParser->metadataCount += 1; + cueCount = (size_t)(pChunkHeader->sizeInBytes - MA_DR_WAV_CUE_BYTES) / MA_DR_WAV_CUE_POINT_BYTES; + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, sizeof(ma_dr_wav_cue_point) * cueCount, MA_DR_WAV_METADATA_ALIGNMENT); + } else { + bytesRead = ma_dr_wav__read_cue_to_metadata_obj(pParser, pChunkHeader, &pParser->pMetadata[pParser->metadataCursor]); + if (bytesRead == pChunkHeader->sizeInBytes) { + pParser->metadataCursor += 1; + } else { + } + } + } else { + } + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, pChunkID, ma_dr_wav_metadata_type_bext, "bext")) { + if (pChunkHeader->sizeInBytes >= MA_DR_WAV_BEXT_BYTES) { + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + char buffer[MA_DR_WAV_BEXT_DESCRIPTION_BYTES + 1]; + size_t allocSizeNeeded = MA_DR_WAV_BEXT_UMID_BYTES; + size_t bytesJustRead; + buffer[MA_DR_WAV_BEXT_DESCRIPTION_BYTES] = '\0'; + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, buffer, MA_DR_WAV_BEXT_DESCRIPTION_BYTES, &bytesRead); + if (bytesJustRead != MA_DR_WAV_BEXT_DESCRIPTION_BYTES) { + return bytesRead; + } + allocSizeNeeded += ma_dr_wav__strlen(buffer) + 1; + buffer[MA_DR_WAV_BEXT_ORIGINATOR_NAME_BYTES] = '\0'; + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, buffer, MA_DR_WAV_BEXT_ORIGINATOR_NAME_BYTES, &bytesRead); + if (bytesJustRead != MA_DR_WAV_BEXT_ORIGINATOR_NAME_BYTES) { + return bytesRead; + } + allocSizeNeeded += ma_dr_wav__strlen(buffer) + 1; + buffer[MA_DR_WAV_BEXT_ORIGINATOR_REF_BYTES] = '\0'; + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, buffer, MA_DR_WAV_BEXT_ORIGINATOR_REF_BYTES, &bytesRead); + if (bytesJustRead != MA_DR_WAV_BEXT_ORIGINATOR_REF_BYTES) { + return bytesRead; + } + allocSizeNeeded += ma_dr_wav__strlen(buffer) + 1; + allocSizeNeeded += (size_t)pChunkHeader->sizeInBytes - MA_DR_WAV_BEXT_BYTES; + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, allocSizeNeeded, 1); + pParser->metadataCount += 1; + } else { + bytesRead = ma_dr_wav__read_bext_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor], pChunkHeader->sizeInBytes); + if (bytesRead == pChunkHeader->sizeInBytes) { + pParser->metadataCursor += 1; + } else { + } + } + } else { + } + } else if (ma_dr_wav_fourcc_equal(pChunkID, "LIST") || ma_dr_wav_fourcc_equal(pChunkID, "list")) { + ma_dr_wav_metadata_location listType = ma_dr_wav_metadata_location_invalid; + while (bytesRead < pChunkHeader->sizeInBytes) { + ma_uint8 subchunkId[4]; + ma_uint8 subchunkSizeBuffer[4]; + ma_uint64 subchunkDataSize; + ma_uint64 subchunkBytesRead = 0; + ma_uint64 bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, subchunkId, sizeof(subchunkId), &bytesRead); + if (bytesJustRead != sizeof(subchunkId)) { + break; + } + if (ma_dr_wav_fourcc_equal(subchunkId, "adtl")) { + listType = ma_dr_wav_metadata_location_inside_adtl_list; + continue; + } else if (ma_dr_wav_fourcc_equal(subchunkId, "INFO")) { + listType = ma_dr_wav_metadata_location_inside_info_list; + continue; + } + bytesJustRead = ma_dr_wav__metadata_parser_read(pParser, subchunkSizeBuffer, sizeof(subchunkSizeBuffer), &bytesRead); + if (bytesJustRead != sizeof(subchunkSizeBuffer)) { + break; + } + subchunkDataSize = ma_dr_wav_bytes_to_u32(subchunkSizeBuffer); + if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_label, "labl") || ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_note, "note")) { + if (subchunkDataSize >= MA_DR_WAV_LIST_LABEL_OR_NOTE_BYTES) { + ma_uint64 stringSizeWithNullTerm = subchunkDataSize - MA_DR_WAV_LIST_LABEL_OR_NOTE_BYTES; + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + pParser->metadataCount += 1; + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, (size_t)stringSizeWithNullTerm, 1); + } else { + subchunkBytesRead = ma_dr_wav__read_list_label_or_note_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor], subchunkDataSize, ma_dr_wav_fourcc_equal(subchunkId, "labl") ? ma_dr_wav_metadata_type_list_label : ma_dr_wav_metadata_type_list_note); + if (subchunkBytesRead == subchunkDataSize) { + pParser->metadataCursor += 1; + } else { + } + } + } else { + } + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_labelled_cue_region, "ltxt")) { + if (subchunkDataSize >= MA_DR_WAV_LIST_LABELLED_TEXT_BYTES) { + ma_uint64 stringSizeWithNullTerminator = subchunkDataSize - MA_DR_WAV_LIST_LABELLED_TEXT_BYTES; + if (pParser->stage == ma_dr_wav__metadata_parser_stage_count) { + pParser->metadataCount += 1; + ma_dr_wav__metadata_request_extra_memory_for_stage_2(pParser, (size_t)stringSizeWithNullTerminator, 1); + } else { + subchunkBytesRead = ma_dr_wav__read_list_labelled_cue_region_to_metadata_obj(pParser, &pParser->pMetadata[pParser->metadataCursor], subchunkDataSize); + if (subchunkBytesRead == subchunkDataSize) { + pParser->metadataCursor += 1; + } else { + } + } + } else { + } + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_software, "ISFT")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_software); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_copyright, "ICOP")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_copyright); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_title, "INAM")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_title); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_artist, "IART")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_artist); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_comment, "ICMT")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_comment); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_date, "ICRD")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_date); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_genre, "IGNR")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_genre); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_album, "IPRD")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_album); + } else if (ma_dr_wav__chunk_matches(allowedMetadataTypes, subchunkId, ma_dr_wav_metadata_type_list_info_tracknumber, "ITRK")) { + subchunkBytesRead = ma_dr_wav__metadata_process_info_text_chunk(pParser, subchunkDataSize, ma_dr_wav_metadata_type_list_info_tracknumber); + } else if ((allowedMetadataTypes & ma_dr_wav_metadata_type_unknown) != 0) { + subchunkBytesRead = ma_dr_wav__metadata_process_unknown_chunk(pParser, subchunkId, subchunkDataSize, listType); + } + bytesRead += subchunkBytesRead; + MA_DR_WAV_ASSERT(subchunkBytesRead <= subchunkDataSize); + if (subchunkBytesRead < subchunkDataSize) { + ma_uint64 bytesToSeek = subchunkDataSize - subchunkBytesRead; + if (!pParser->onSeek(pParser->pReadSeekUserData, (int)bytesToSeek, ma_dr_wav_seek_origin_current)) { + break; + } + bytesRead += bytesToSeek; + } + if ((subchunkDataSize % 2) == 1) { + if (!pParser->onSeek(pParser->pReadSeekUserData, 1, ma_dr_wav_seek_origin_current)) { + break; + } + bytesRead += 1; + } + } + } else if ((allowedMetadataTypes & ma_dr_wav_metadata_type_unknown) != 0) { + bytesRead = ma_dr_wav__metadata_process_unknown_chunk(pParser, pChunkID, pChunkHeader->sizeInBytes, ma_dr_wav_metadata_location_top_level); + } + return bytesRead; +} +MA_PRIVATE ma_uint32 ma_dr_wav_get_bytes_per_pcm_frame(ma_dr_wav* pWav) +{ + ma_uint32 bytesPerFrame; + if ((pWav->bitsPerSample & 0x7) == 0) { + bytesPerFrame = (pWav->bitsPerSample * pWav->fmt.channels) >> 3; + } else { + bytesPerFrame = pWav->fmt.blockAlign; + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ALAW || pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_MULAW) { + if (bytesPerFrame != pWav->fmt.channels) { + return 0; + } + } + return bytesPerFrame; +} +MA_API ma_uint16 ma_dr_wav_fmt_get_format(const ma_dr_wav_fmt* pFMT) +{ + if (pFMT == NULL) { + return 0; + } + if (pFMT->formatTag != MA_DR_WAVE_FORMAT_EXTENSIBLE) { + return pFMT->formatTag; + } else { + return ma_dr_wav_bytes_to_u16(pFMT->subFormat); + } +} +MA_PRIVATE ma_bool32 ma_dr_wav_preinit(ma_dr_wav* pWav, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pReadSeekUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pWav == NULL || onRead == NULL || onSeek == NULL) { + return MA_FALSE; + } + MA_DR_WAV_ZERO_MEMORY(pWav, sizeof(*pWav)); + pWav->onRead = onRead; + pWav->onSeek = onSeek; + pWav->pUserData = pReadSeekUserData; + pWav->allocationCallbacks = ma_dr_wav_copy_allocation_callbacks_or_defaults(pAllocationCallbacks); + if (pWav->allocationCallbacks.onFree == NULL || (pWav->allocationCallbacks.onMalloc == NULL && pWav->allocationCallbacks.onRealloc == NULL)) { + return MA_FALSE; + } + return MA_TRUE; +} +MA_PRIVATE ma_bool32 ma_dr_wav_init__internal(ma_dr_wav* pWav, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags) +{ + ma_result result; + ma_uint64 cursor; + ma_bool32 sequential; + ma_uint8 riff[4]; + ma_dr_wav_fmt fmt; + unsigned short translatedFormatTag; + ma_uint64 dataChunkSize = 0; + ma_uint64 sampleCountFromFactChunk = 0; + ma_uint64 metadataStartPos; + ma_dr_wav__metadata_parser metadataParser; + ma_bool8 isProcessingMetadata = MA_FALSE; + ma_bool8 foundChunk_fmt = MA_FALSE; + ma_bool8 foundChunk_data = MA_FALSE; + ma_bool8 isAIFCFormType = MA_FALSE; + ma_uint64 aiffFrameCount = 0; + cursor = 0; + sequential = (flags & MA_DR_WAV_SEQUENTIAL) != 0; + MA_DR_WAV_ZERO_OBJECT(&fmt); + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, riff, sizeof(riff), &cursor) != sizeof(riff)) { + return MA_FALSE; + } + if (ma_dr_wav_fourcc_equal(riff, "RIFF")) { + pWav->container = ma_dr_wav_container_riff; + } else if (ma_dr_wav_fourcc_equal(riff, "RIFX")) { + pWav->container = ma_dr_wav_container_rifx; + } else if (ma_dr_wav_fourcc_equal(riff, "riff")) { + int i; + ma_uint8 riff2[12]; + pWav->container = ma_dr_wav_container_w64; + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, riff2, sizeof(riff2), &cursor) != sizeof(riff2)) { + return MA_FALSE; + } + for (i = 0; i < 12; ++i) { + if (riff2[i] != ma_dr_wavGUID_W64_RIFF[i+4]) { + return MA_FALSE; + } + } + } else if (ma_dr_wav_fourcc_equal(riff, "RF64")) { + pWav->container = ma_dr_wav_container_rf64; + } else if (ma_dr_wav_fourcc_equal(riff, "FORM")) { + pWav->container = ma_dr_wav_container_aiff; + } else { + return MA_FALSE; + } + if (pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx || pWav->container == ma_dr_wav_container_rf64) { + ma_uint8 chunkSizeBytes[4]; + ma_uint8 wave[4]; + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) { + return MA_FALSE; + } + if (pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx) { + if (ma_dr_wav_bytes_to_u32_ex(chunkSizeBytes, pWav->container) < 36) { + } + } else if (pWav->container == ma_dr_wav_container_rf64) { + if (ma_dr_wav_bytes_to_u32_le(chunkSizeBytes) != 0xFFFFFFFF) { + return MA_FALSE; + } + } else { + return MA_FALSE; + } + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) { + return MA_FALSE; + } + if (!ma_dr_wav_fourcc_equal(wave, "WAVE")) { + return MA_FALSE; + } + } else if (pWav->container == ma_dr_wav_container_w64) { + ma_uint8 chunkSizeBytes[8]; + ma_uint8 wave[16]; + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) { + return MA_FALSE; + } + if (ma_dr_wav_bytes_to_u64(chunkSizeBytes) < 80) { + return MA_FALSE; + } + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, wave, sizeof(wave), &cursor) != sizeof(wave)) { + return MA_FALSE; + } + if (!ma_dr_wav_guid_equal(wave, ma_dr_wavGUID_W64_WAVE)) { + return MA_FALSE; + } + } else if (pWav->container == ma_dr_wav_container_aiff) { + ma_uint8 chunkSizeBytes[4]; + ma_uint8 aiff[4]; + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, chunkSizeBytes, sizeof(chunkSizeBytes), &cursor) != sizeof(chunkSizeBytes)) { + return MA_FALSE; + } + if (ma_dr_wav_bytes_to_u32_be(chunkSizeBytes) < 18) { + return MA_FALSE; + } + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, aiff, sizeof(aiff), &cursor) != sizeof(aiff)) { + return MA_FALSE; + } + if (ma_dr_wav_fourcc_equal(aiff, "AIFF")) { + isAIFCFormType = MA_FALSE; + } else if (ma_dr_wav_fourcc_equal(aiff, "AIFC")) { + isAIFCFormType = MA_TRUE; + } else { + return MA_FALSE; + } + } else { + return MA_FALSE; + } + if (pWav->container == ma_dr_wav_container_rf64) { + ma_uint8 sizeBytes[8]; + ma_uint64 bytesRemainingInChunk; + ma_dr_wav_chunk_header header; + result = ma_dr_wav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); + if (result != MA_SUCCESS) { + return MA_FALSE; + } + if (!ma_dr_wav_fourcc_equal(header.id.fourcc, "ds64")) { + return MA_FALSE; + } + bytesRemainingInChunk = header.sizeInBytes + header.paddingSize; + if (!ma_dr_wav__seek_forward(pWav->onSeek, 8, pWav->pUserData)) { + return MA_FALSE; + } + bytesRemainingInChunk -= 8; + cursor += 8; + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, sizeBytes, sizeof(sizeBytes), &cursor) != sizeof(sizeBytes)) { + return MA_FALSE; + } + bytesRemainingInChunk -= 8; + dataChunkSize = ma_dr_wav_bytes_to_u64(sizeBytes); + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, sizeBytes, sizeof(sizeBytes), &cursor) != sizeof(sizeBytes)) { + return MA_FALSE; + } + bytesRemainingInChunk -= 8; + sampleCountFromFactChunk = ma_dr_wav_bytes_to_u64(sizeBytes); + if (!ma_dr_wav__seek_forward(pWav->onSeek, bytesRemainingInChunk, pWav->pUserData)) { + return MA_FALSE; + } + cursor += bytesRemainingInChunk; + } + metadataStartPos = cursor; + isProcessingMetadata = !sequential && ((flags & MA_DR_WAV_WITH_METADATA) != 0); + if (pWav->container != ma_dr_wav_container_riff && pWav->container != ma_dr_wav_container_rf64) { + isProcessingMetadata = MA_FALSE; + } + MA_DR_WAV_ZERO_MEMORY(&metadataParser, sizeof(metadataParser)); + if (isProcessingMetadata) { + metadataParser.onRead = pWav->onRead; + metadataParser.onSeek = pWav->onSeek; + metadataParser.pReadSeekUserData = pWav->pUserData; + metadataParser.stage = ma_dr_wav__metadata_parser_stage_count; + } + for (;;) { + ma_dr_wav_chunk_header header; + ma_uint64 chunkSize; + result = ma_dr_wav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); + if (result != MA_SUCCESS) { + break; + } + chunkSize = header.sizeInBytes; + if (!sequential && onChunk != NULL) { + ma_uint64 callbackBytesRead = onChunk(pChunkUserData, pWav->onRead, pWav->onSeek, pWav->pUserData, &header, pWav->container, &fmt); + if (callbackBytesRead > 0) { + if (ma_dr_wav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData) == MA_FALSE) { + return MA_FALSE; + } + } + } + if (((pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx || pWav->container == ma_dr_wav_container_rf64) && ma_dr_wav_fourcc_equal(header.id.fourcc, "fmt ")) || + ((pWav->container == ma_dr_wav_container_w64) && ma_dr_wav_guid_equal(header.id.guid, ma_dr_wavGUID_W64_FMT))) { + ma_uint8 fmtData[16]; + foundChunk_fmt = MA_TRUE; + if (pWav->onRead(pWav->pUserData, fmtData, sizeof(fmtData)) != sizeof(fmtData)) { + return MA_FALSE; + } + cursor += sizeof(fmtData); + fmt.formatTag = ma_dr_wav_bytes_to_u16_ex(fmtData + 0, pWav->container); + fmt.channels = ma_dr_wav_bytes_to_u16_ex(fmtData + 2, pWav->container); + fmt.sampleRate = ma_dr_wav_bytes_to_u32_ex(fmtData + 4, pWav->container); + fmt.avgBytesPerSec = ma_dr_wav_bytes_to_u32_ex(fmtData + 8, pWav->container); + fmt.blockAlign = ma_dr_wav_bytes_to_u16_ex(fmtData + 12, pWav->container); + fmt.bitsPerSample = ma_dr_wav_bytes_to_u16_ex(fmtData + 14, pWav->container); + fmt.extendedSize = 0; + fmt.validBitsPerSample = 0; + fmt.channelMask = 0; + MA_DR_WAV_ZERO_MEMORY(fmt.subFormat, sizeof(fmt.subFormat)); + if (header.sizeInBytes > 16) { + ma_uint8 fmt_cbSize[2]; + int bytesReadSoFar = 0; + if (pWav->onRead(pWav->pUserData, fmt_cbSize, sizeof(fmt_cbSize)) != sizeof(fmt_cbSize)) { + return MA_FALSE; + } + cursor += sizeof(fmt_cbSize); + bytesReadSoFar = 18; + fmt.extendedSize = ma_dr_wav_bytes_to_u16_ex(fmt_cbSize, pWav->container); + if (fmt.extendedSize > 0) { + if (fmt.formatTag == MA_DR_WAVE_FORMAT_EXTENSIBLE) { + if (fmt.extendedSize != 22) { + return MA_FALSE; + } + } + if (fmt.formatTag == MA_DR_WAVE_FORMAT_EXTENSIBLE) { + ma_uint8 fmtext[22]; + if (pWav->onRead(pWav->pUserData, fmtext, fmt.extendedSize) != fmt.extendedSize) { + return MA_FALSE; + } + fmt.validBitsPerSample = ma_dr_wav_bytes_to_u16_ex(fmtext + 0, pWav->container); + fmt.channelMask = ma_dr_wav_bytes_to_u32_ex(fmtext + 2, pWav->container); + ma_dr_wav_bytes_to_guid(fmtext + 6, fmt.subFormat); + } else { + if (pWav->onSeek(pWav->pUserData, fmt.extendedSize, ma_dr_wav_seek_origin_current) == MA_FALSE) { + return MA_FALSE; + } + } + cursor += fmt.extendedSize; + bytesReadSoFar += fmt.extendedSize; + } + if (pWav->onSeek(pWav->pUserData, (int)(header.sizeInBytes - bytesReadSoFar), ma_dr_wav_seek_origin_current) == MA_FALSE) { + return MA_FALSE; + } + cursor += (header.sizeInBytes - bytesReadSoFar); + } + if (header.paddingSize > 0) { + if (ma_dr_wav__seek_forward(pWav->onSeek, header.paddingSize, pWav->pUserData) == MA_FALSE) { + break; + } + cursor += header.paddingSize; + } + continue; + } + if (((pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx || pWav->container == ma_dr_wav_container_rf64) && ma_dr_wav_fourcc_equal(header.id.fourcc, "data")) || + ((pWav->container == ma_dr_wav_container_w64) && ma_dr_wav_guid_equal(header.id.guid, ma_dr_wavGUID_W64_DATA))) { + foundChunk_data = MA_TRUE; + pWav->dataChunkDataPos = cursor; + if (pWav->container != ma_dr_wav_container_rf64) { + dataChunkSize = chunkSize; + } + if (sequential || !isProcessingMetadata) { + break; + } else { + chunkSize += header.paddingSize; + if (ma_dr_wav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == MA_FALSE) { + break; + } + cursor += chunkSize; + continue; + } + } + if (((pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx || pWav->container == ma_dr_wav_container_rf64) && ma_dr_wav_fourcc_equal(header.id.fourcc, "fact")) || + ((pWav->container == ma_dr_wav_container_w64) && ma_dr_wav_guid_equal(header.id.guid, ma_dr_wavGUID_W64_FACT))) { + if (pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx) { + ma_uint8 sampleCount[4]; + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, &sampleCount, 4, &cursor) != 4) { + return MA_FALSE; + } + chunkSize -= 4; + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM) { + sampleCountFromFactChunk = ma_dr_wav_bytes_to_u32_ex(sampleCount, pWav->container); + } else { + sampleCountFromFactChunk = 0; + } + } else if (pWav->container == ma_dr_wav_container_w64) { + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, &sampleCountFromFactChunk, 8, &cursor) != 8) { + return MA_FALSE; + } + chunkSize -= 8; + } else if (pWav->container == ma_dr_wav_container_rf64) { + } + chunkSize += header.paddingSize; + if (ma_dr_wav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == MA_FALSE) { + break; + } + cursor += chunkSize; + continue; + } + if (pWav->container == ma_dr_wav_container_aiff && ma_dr_wav_fourcc_equal(header.id.fourcc, "COMM")) { + ma_uint8 commData[24]; + ma_uint32 commDataBytesToRead; + ma_uint16 channels; + ma_uint32 frameCount; + ma_uint16 sampleSizeInBits; + ma_int64 sampleRate; + ma_uint16 compressionFormat; + foundChunk_fmt = MA_TRUE; + if (isAIFCFormType) { + commDataBytesToRead = 24; + if (header.sizeInBytes < commDataBytesToRead) { + return MA_FALSE; + } + } else { + commDataBytesToRead = 18; + if (header.sizeInBytes != commDataBytesToRead) { + return MA_FALSE; + } + } + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, commData, commDataBytesToRead, &cursor) != commDataBytesToRead) { + return MA_FALSE; + } + channels = ma_dr_wav_bytes_to_u16_ex (commData + 0, pWav->container); + frameCount = ma_dr_wav_bytes_to_u32_ex (commData + 2, pWav->container); + sampleSizeInBits = ma_dr_wav_bytes_to_u16_ex (commData + 6, pWav->container); + sampleRate = ma_dr_wav_aiff_extented_to_s64(commData + 8); + if (sampleRate < 0 || sampleRate > 0xFFFFFFFF) { + return MA_FALSE; + } + if (isAIFCFormType) { + const ma_uint8* type = commData + 18; + if (ma_dr_wav_fourcc_equal(type, "NONE")) { + compressionFormat = MA_DR_WAVE_FORMAT_PCM; + } else if (ma_dr_wav_fourcc_equal(type, "raw ")) { + compressionFormat = MA_DR_WAVE_FORMAT_PCM; + if (sampleSizeInBits == 8) { + pWav->aiff.isUnsigned = MA_TRUE; + } + } else if (ma_dr_wav_fourcc_equal(type, "sowt")) { + compressionFormat = MA_DR_WAVE_FORMAT_PCM; + pWav->aiff.isLE = MA_TRUE; + } else if (ma_dr_wav_fourcc_equal(type, "fl32") || ma_dr_wav_fourcc_equal(type, "fl64") || ma_dr_wav_fourcc_equal(type, "FL32") || ma_dr_wav_fourcc_equal(type, "FL64")) { + compressionFormat = MA_DR_WAVE_FORMAT_IEEE_FLOAT; + } else if (ma_dr_wav_fourcc_equal(type, "alaw") || ma_dr_wav_fourcc_equal(type, "ALAW")) { + compressionFormat = MA_DR_WAVE_FORMAT_ALAW; + } else if (ma_dr_wav_fourcc_equal(type, "ulaw") || ma_dr_wav_fourcc_equal(type, "ULAW")) { + compressionFormat = MA_DR_WAVE_FORMAT_MULAW; + } else if (ma_dr_wav_fourcc_equal(type, "ima4")) { + compressionFormat = MA_DR_WAVE_FORMAT_DVI_ADPCM; + sampleSizeInBits = 4; + (void)compressionFormat; + (void)sampleSizeInBits; + return MA_FALSE; + } else { + return MA_FALSE; + } + } else { + compressionFormat = MA_DR_WAVE_FORMAT_PCM; + } + aiffFrameCount = frameCount; + fmt.formatTag = compressionFormat; + fmt.channels = channels; + fmt.sampleRate = (ma_uint32)sampleRate; + fmt.bitsPerSample = sampleSizeInBits; + fmt.blockAlign = (ma_uint16)(fmt.channels * fmt.bitsPerSample / 8); + fmt.avgBytesPerSec = fmt.blockAlign * fmt.sampleRate; + if (fmt.blockAlign == 0 && compressionFormat == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + fmt.blockAlign = 34 * fmt.channels; + } + if (compressionFormat == MA_DR_WAVE_FORMAT_ALAW || compressionFormat == MA_DR_WAVE_FORMAT_MULAW) { + if (fmt.bitsPerSample > 8) { + fmt.bitsPerSample = 8; + fmt.blockAlign = fmt.channels; + } + } + fmt.bitsPerSample += (fmt.bitsPerSample & 7); + if (isAIFCFormType) { + if (ma_dr_wav__seek_forward(pWav->onSeek, (chunkSize - commDataBytesToRead), pWav->pUserData) == MA_FALSE) { + return MA_FALSE; + } + cursor += (chunkSize - commDataBytesToRead); + } + continue; + } + if (pWav->container == ma_dr_wav_container_aiff && ma_dr_wav_fourcc_equal(header.id.fourcc, "SSND")) { + ma_uint8 offsetAndBlockSizeData[8]; + ma_uint32 offset; + foundChunk_data = MA_TRUE; + if (ma_dr_wav__on_read(pWav->onRead, pWav->pUserData, offsetAndBlockSizeData, sizeof(offsetAndBlockSizeData), &cursor) != sizeof(offsetAndBlockSizeData)) { + return MA_FALSE; + } + offset = ma_dr_wav_bytes_to_u32_ex(offsetAndBlockSizeData + 0, pWav->container); + if (ma_dr_wav__seek_forward(pWav->onSeek, offset, pWav->pUserData) == MA_FALSE) { + return MA_FALSE; + } + cursor += offset; + pWav->dataChunkDataPos = cursor; + dataChunkSize = chunkSize; + if (sequential || !isProcessingMetadata) { + break; + } else { + if (ma_dr_wav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == MA_FALSE) { + break; + } + cursor += chunkSize; + continue; + } + } + if (isProcessingMetadata) { + ma_dr_wav__metadata_process_chunk(&metadataParser, &header, ma_dr_wav_metadata_type_all_including_unknown); + if (ma_dr_wav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData) == MA_FALSE) { + break; + } + } + chunkSize += header.paddingSize; + if (ma_dr_wav__seek_forward(pWav->onSeek, chunkSize, pWav->pUserData) == MA_FALSE) { + break; + } + cursor += chunkSize; + } + if (!foundChunk_fmt || !foundChunk_data) { + return MA_FALSE; + } + if ((fmt.sampleRate == 0 || fmt.sampleRate > MA_DR_WAV_MAX_SAMPLE_RATE ) || + (fmt.channels == 0 || fmt.channels > MA_DR_WAV_MAX_CHANNELS ) || + (fmt.bitsPerSample == 0 || fmt.bitsPerSample > MA_DR_WAV_MAX_BITS_PER_SAMPLE) || + fmt.blockAlign == 0) { + return MA_FALSE; + } + translatedFormatTag = fmt.formatTag; + if (translatedFormatTag == MA_DR_WAVE_FORMAT_EXTENSIBLE) { + translatedFormatTag = ma_dr_wav_bytes_to_u16_ex(fmt.subFormat + 0, pWav->container); + } + if (!sequential) { + if (!ma_dr_wav__seek_from_start(pWav->onSeek, pWav->dataChunkDataPos, pWav->pUserData)) { + return MA_FALSE; + } + cursor = pWav->dataChunkDataPos; + } + if (isProcessingMetadata && metadataParser.metadataCount > 0) { + if (ma_dr_wav__seek_from_start(pWav->onSeek, metadataStartPos, pWav->pUserData) == MA_FALSE) { + return MA_FALSE; + } + result = ma_dr_wav__metadata_alloc(&metadataParser, &pWav->allocationCallbacks); + if (result != MA_SUCCESS) { + return MA_FALSE; + } + metadataParser.stage = ma_dr_wav__metadata_parser_stage_read; + for (;;) { + ma_dr_wav_chunk_header header; + ma_uint64 metadataBytesRead; + result = ma_dr_wav__read_chunk_header(pWav->onRead, pWav->pUserData, pWav->container, &cursor, &header); + if (result != MA_SUCCESS) { + break; + } + metadataBytesRead = ma_dr_wav__metadata_process_chunk(&metadataParser, &header, ma_dr_wav_metadata_type_all_including_unknown); + if (ma_dr_wav__seek_forward(pWav->onSeek, (header.sizeInBytes + header.paddingSize) - metadataBytesRead, pWav->pUserData) == MA_FALSE) { + ma_dr_wav_free(metadataParser.pMetadata, &pWav->allocationCallbacks); + return MA_FALSE; + } + } + pWav->pMetadata = metadataParser.pMetadata; + pWav->metadataCount = metadataParser.metadataCount; + } + if (dataChunkSize == 0xFFFFFFFF && (pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rifx) && pWav->isSequentialWrite == MA_FALSE) { + dataChunkSize = 0; + for (;;) { + ma_uint8 temp[4096]; + size_t bytesRead = pWav->onRead(pWav->pUserData, temp, sizeof(temp)); + dataChunkSize += bytesRead; + if (bytesRead < sizeof(temp)) { + break; + } + } + } + if (ma_dr_wav__seek_from_start(pWav->onSeek, pWav->dataChunkDataPos, pWav->pUserData) == MA_FALSE) { + ma_dr_wav_free(pWav->pMetadata, &pWav->allocationCallbacks); + return MA_FALSE; + } + pWav->fmt = fmt; + pWav->sampleRate = fmt.sampleRate; + pWav->channels = fmt.channels; + pWav->bitsPerSample = fmt.bitsPerSample; + pWav->bytesRemaining = dataChunkSize; + pWav->translatedFormatTag = translatedFormatTag; + pWav->dataChunkDataSize = dataChunkSize; + if (sampleCountFromFactChunk != 0) { + pWav->totalPCMFrameCount = sampleCountFromFactChunk; + } else if (aiffFrameCount != 0) { + pWav->totalPCMFrameCount = aiffFrameCount; + } else { + ma_uint32 bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + ma_dr_wav_free(pWav->pMetadata, &pWav->allocationCallbacks); + return MA_FALSE; + } + pWav->totalPCMFrameCount = dataChunkSize / bytesPerFrame; + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM) { + ma_uint64 totalBlockHeaderSizeInBytes; + ma_uint64 blockCount = dataChunkSize / fmt.blockAlign; + if ((blockCount * fmt.blockAlign) < dataChunkSize) { + blockCount += 1; + } + totalBlockHeaderSizeInBytes = blockCount * (6*fmt.channels); + pWav->totalPCMFrameCount = ((dataChunkSize - totalBlockHeaderSizeInBytes) * 2) / fmt.channels; + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + ma_uint64 totalBlockHeaderSizeInBytes; + ma_uint64 blockCount = dataChunkSize / fmt.blockAlign; + if ((blockCount * fmt.blockAlign) < dataChunkSize) { + blockCount += 1; + } + totalBlockHeaderSizeInBytes = blockCount * (4*fmt.channels); + pWav->totalPCMFrameCount = ((dataChunkSize - totalBlockHeaderSizeInBytes) * 2) / fmt.channels; + pWav->totalPCMFrameCount += blockCount; + } + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM || pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + if (pWav->channels > 2) { + ma_dr_wav_free(pWav->pMetadata, &pWav->allocationCallbacks); + return MA_FALSE; + } + } + if (ma_dr_wav_get_bytes_per_pcm_frame(pWav) == 0) { + ma_dr_wav_free(pWav->pMetadata, &pWav->allocationCallbacks); + return MA_FALSE; + } +#ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM) { + ma_uint64 blockCount = dataChunkSize / fmt.blockAlign; + pWav->totalPCMFrameCount = (((blockCount * (fmt.blockAlign - (6*pWav->channels))) * 2)) / fmt.channels; + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + ma_uint64 blockCount = dataChunkSize / fmt.blockAlign; + pWav->totalPCMFrameCount = (((blockCount * (fmt.blockAlign - (4*pWav->channels))) * 2) + (blockCount * pWav->channels)) / fmt.channels; + } +#endif + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_wav_init(ma_dr_wav* pWav, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_ex(pWav, onRead, onSeek, NULL, pUserData, NULL, 0, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_ex(ma_dr_wav* pWav, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, ma_dr_wav_chunk_proc onChunk, void* pReadSeekUserData, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (!ma_dr_wav_preinit(pWav, onRead, onSeek, pReadSeekUserData, pAllocationCallbacks)) { + return MA_FALSE; + } + return ma_dr_wav_init__internal(pWav, onChunk, pChunkUserData, flags); +} +MA_API ma_bool32 ma_dr_wav_init_with_metadata(ma_dr_wav* pWav, ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (!ma_dr_wav_preinit(pWav, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return MA_FALSE; + } + return ma_dr_wav_init__internal(pWav, NULL, NULL, flags | MA_DR_WAV_WITH_METADATA); +} +MA_API ma_dr_wav_metadata* ma_dr_wav_take_ownership_of_metadata(ma_dr_wav* pWav) +{ + ma_dr_wav_metadata *result = pWav->pMetadata; + pWav->pMetadata = NULL; + pWav->metadataCount = 0; + return result; +} +MA_PRIVATE size_t ma_dr_wav__write(ma_dr_wav* pWav, const void* pData, size_t dataSize) +{ + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->onWrite != NULL); + return pWav->onWrite(pWav->pUserData, pData, dataSize); +} +MA_PRIVATE size_t ma_dr_wav__write_byte(ma_dr_wav* pWav, ma_uint8 byte) +{ + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->onWrite != NULL); + return pWav->onWrite(pWav->pUserData, &byte, 1); +} +MA_PRIVATE size_t ma_dr_wav__write_u16ne_to_le(ma_dr_wav* pWav, ma_uint16 value) +{ + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->onWrite != NULL); + if (!ma_dr_wav__is_little_endian()) { + value = ma_dr_wav__bswap16(value); + } + return ma_dr_wav__write(pWav, &value, 2); +} +MA_PRIVATE size_t ma_dr_wav__write_u32ne_to_le(ma_dr_wav* pWav, ma_uint32 value) +{ + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->onWrite != NULL); + if (!ma_dr_wav__is_little_endian()) { + value = ma_dr_wav__bswap32(value); + } + return ma_dr_wav__write(pWav, &value, 4); +} +MA_PRIVATE size_t ma_dr_wav__write_u64ne_to_le(ma_dr_wav* pWav, ma_uint64 value) +{ + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->onWrite != NULL); + if (!ma_dr_wav__is_little_endian()) { + value = ma_dr_wav__bswap64(value); + } + return ma_dr_wav__write(pWav, &value, 8); +} +MA_PRIVATE size_t ma_dr_wav__write_f32ne_to_le(ma_dr_wav* pWav, float value) +{ + union { + ma_uint32 u32; + float f32; + } u; + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->onWrite != NULL); + u.f32 = value; + if (!ma_dr_wav__is_little_endian()) { + u.u32 = ma_dr_wav__bswap32(u.u32); + } + return ma_dr_wav__write(pWav, &u.u32, 4); +} +MA_PRIVATE size_t ma_dr_wav__write_or_count(ma_dr_wav* pWav, const void* pData, size_t dataSize) +{ + if (pWav == NULL) { + return dataSize; + } + return ma_dr_wav__write(pWav, pData, dataSize); +} +MA_PRIVATE size_t ma_dr_wav__write_or_count_byte(ma_dr_wav* pWav, ma_uint8 byte) +{ + if (pWav == NULL) { + return 1; + } + return ma_dr_wav__write_byte(pWav, byte); +} +MA_PRIVATE size_t ma_dr_wav__write_or_count_u16ne_to_le(ma_dr_wav* pWav, ma_uint16 value) +{ + if (pWav == NULL) { + return 2; + } + return ma_dr_wav__write_u16ne_to_le(pWav, value); +} +MA_PRIVATE size_t ma_dr_wav__write_or_count_u32ne_to_le(ma_dr_wav* pWav, ma_uint32 value) +{ + if (pWav == NULL) { + return 4; + } + return ma_dr_wav__write_u32ne_to_le(pWav, value); +} +#if 0 +MA_PRIVATE size_t ma_dr_wav__write_or_count_u64ne_to_le(ma_dr_wav* pWav, ma_uint64 value) +{ + if (pWav == NULL) { + return 8; + } + return ma_dr_wav__write_u64ne_to_le(pWav, value); +} +#endif +MA_PRIVATE size_t ma_dr_wav__write_or_count_f32ne_to_le(ma_dr_wav* pWav, float value) +{ + if (pWav == NULL) { + return 4; + } + return ma_dr_wav__write_f32ne_to_le(pWav, value); +} +MA_PRIVATE size_t ma_dr_wav__write_or_count_string_to_fixed_size_buf(ma_dr_wav* pWav, char* str, size_t bufFixedSize) +{ + size_t len; + if (pWav == NULL) { + return bufFixedSize; + } + len = ma_dr_wav__strlen_clamped(str, bufFixedSize); + ma_dr_wav__write_or_count(pWav, str, len); + if (len < bufFixedSize) { + size_t i; + for (i = 0; i < bufFixedSize - len; ++i) { + ma_dr_wav__write_byte(pWav, 0); + } + } + return bufFixedSize; +} +MA_PRIVATE size_t ma_dr_wav__write_or_count_metadata(ma_dr_wav* pWav, ma_dr_wav_metadata* pMetadatas, ma_uint32 metadataCount) +{ + size_t bytesWritten = 0; + ma_bool32 hasListAdtl = MA_FALSE; + ma_bool32 hasListInfo = MA_FALSE; + ma_uint32 iMetadata; + if (pMetadatas == NULL || metadataCount == 0) { + return 0; + } + for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { + ma_dr_wav_metadata* pMetadata = &pMetadatas[iMetadata]; + ma_uint32 chunkSize = 0; + if ((pMetadata->type & ma_dr_wav_metadata_type_list_all_info_strings) || (pMetadata->type == ma_dr_wav_metadata_type_unknown && pMetadata->data.unknown.chunkLocation == ma_dr_wav_metadata_location_inside_info_list)) { + hasListInfo = MA_TRUE; + } + if ((pMetadata->type & ma_dr_wav_metadata_type_list_all_adtl) || (pMetadata->type == ma_dr_wav_metadata_type_unknown && pMetadata->data.unknown.chunkLocation == ma_dr_wav_metadata_location_inside_adtl_list)) { + hasListAdtl = MA_TRUE; + } + switch (pMetadata->type) { + case ma_dr_wav_metadata_type_smpl: + { + ma_uint32 iLoop; + chunkSize = MA_DR_WAV_SMPL_BYTES + MA_DR_WAV_SMPL_LOOP_BYTES * pMetadata->data.smpl.sampleLoopCount + pMetadata->data.smpl.samplerSpecificDataSizeInBytes; + bytesWritten += ma_dr_wav__write_or_count(pWav, "smpl", 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.manufacturerId); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.productId); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.samplePeriodNanoseconds); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.midiUnityNote); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.midiPitchFraction); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.smpteFormat); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.smpteOffset); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.sampleLoopCount); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.samplerSpecificDataSizeInBytes); + for (iLoop = 0; iLoop < pMetadata->data.smpl.sampleLoopCount; ++iLoop) { + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].cuePointId); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].type); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].firstSampleByteOffset); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].lastSampleByteOffset); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].sampleFraction); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.smpl.pLoops[iLoop].playCount); + } + if (pMetadata->data.smpl.samplerSpecificDataSizeInBytes > 0) { + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.smpl.pSamplerSpecificData, pMetadata->data.smpl.samplerSpecificDataSizeInBytes); + } + } break; + case ma_dr_wav_metadata_type_inst: + { + chunkSize = MA_DR_WAV_INST_BYTES; + bytesWritten += ma_dr_wav__write_or_count(pWav, "inst", 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count(pWav, &pMetadata->data.inst.midiUnityNote, 1); + bytesWritten += ma_dr_wav__write_or_count(pWav, &pMetadata->data.inst.fineTuneCents, 1); + bytesWritten += ma_dr_wav__write_or_count(pWav, &pMetadata->data.inst.gainDecibels, 1); + bytesWritten += ma_dr_wav__write_or_count(pWav, &pMetadata->data.inst.lowNote, 1); + bytesWritten += ma_dr_wav__write_or_count(pWav, &pMetadata->data.inst.highNote, 1); + bytesWritten += ma_dr_wav__write_or_count(pWav, &pMetadata->data.inst.lowVelocity, 1); + bytesWritten += ma_dr_wav__write_or_count(pWav, &pMetadata->data.inst.highVelocity, 1); + } break; + case ma_dr_wav_metadata_type_cue: + { + ma_uint32 iCuePoint; + chunkSize = MA_DR_WAV_CUE_BYTES + MA_DR_WAV_CUE_POINT_BYTES * pMetadata->data.cue.cuePointCount; + bytesWritten += ma_dr_wav__write_or_count(pWav, "cue ", 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.cuePointCount); + for (iCuePoint = 0; iCuePoint < pMetadata->data.cue.cuePointCount; ++iCuePoint) { + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].id); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].playOrderPosition); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].dataChunkId, 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].chunkStart); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].blockStart); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.cue.pCuePoints[iCuePoint].sampleByteOffset); + } + } break; + case ma_dr_wav_metadata_type_acid: + { + chunkSize = MA_DR_WAV_ACID_BYTES; + bytesWritten += ma_dr_wav__write_or_count(pWav, "acid", 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.acid.flags); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.acid.midiUnityNote); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.acid.reserved1); + bytesWritten += ma_dr_wav__write_or_count_f32ne_to_le(pWav, pMetadata->data.acid.reserved2); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.acid.numBeats); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.acid.meterDenominator); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.acid.meterNumerator); + bytesWritten += ma_dr_wav__write_or_count_f32ne_to_le(pWav, pMetadata->data.acid.tempo); + } break; + case ma_dr_wav_metadata_type_bext: + { + char reservedBuf[MA_DR_WAV_BEXT_RESERVED_BYTES]; + ma_uint32 timeReferenceLow; + ma_uint32 timeReferenceHigh; + chunkSize = MA_DR_WAV_BEXT_BYTES + pMetadata->data.bext.codingHistorySize; + bytesWritten += ma_dr_wav__write_or_count(pWav, "bext", 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count_string_to_fixed_size_buf(pWav, pMetadata->data.bext.pDescription, MA_DR_WAV_BEXT_DESCRIPTION_BYTES); + bytesWritten += ma_dr_wav__write_or_count_string_to_fixed_size_buf(pWav, pMetadata->data.bext.pOriginatorName, MA_DR_WAV_BEXT_ORIGINATOR_NAME_BYTES); + bytesWritten += ma_dr_wav__write_or_count_string_to_fixed_size_buf(pWav, pMetadata->data.bext.pOriginatorReference, MA_DR_WAV_BEXT_ORIGINATOR_REF_BYTES); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.bext.pOriginationDate, sizeof(pMetadata->data.bext.pOriginationDate)); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.bext.pOriginationTime, sizeof(pMetadata->data.bext.pOriginationTime)); + timeReferenceLow = (ma_uint32)(pMetadata->data.bext.timeReference & 0xFFFFFFFF); + timeReferenceHigh = (ma_uint32)(pMetadata->data.bext.timeReference >> 32); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, timeReferenceLow); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, timeReferenceHigh); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.version); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.bext.pUMID, MA_DR_WAV_BEXT_UMID_BYTES); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.loudnessValue); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.loudnessRange); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.maxTruePeakLevel); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.maxMomentaryLoudness); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.bext.maxShortTermLoudness); + MA_DR_WAV_ZERO_MEMORY(reservedBuf, sizeof(reservedBuf)); + bytesWritten += ma_dr_wav__write_or_count(pWav, reservedBuf, sizeof(reservedBuf)); + if (pMetadata->data.bext.codingHistorySize > 0) { + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.bext.pCodingHistory, pMetadata->data.bext.codingHistorySize); + } + } break; + case ma_dr_wav_metadata_type_unknown: + { + if (pMetadata->data.unknown.chunkLocation == ma_dr_wav_metadata_location_top_level) { + chunkSize = pMetadata->data.unknown.dataSizeInBytes; + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.unknown.id, 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.unknown.pData, pMetadata->data.unknown.dataSizeInBytes); + } + } break; + default: break; + } + if ((chunkSize % 2) != 0) { + bytesWritten += ma_dr_wav__write_or_count_byte(pWav, 0); + } + } + if (hasListInfo) { + ma_uint32 chunkSize = 4; + for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { + ma_dr_wav_metadata* pMetadata = &pMetadatas[iMetadata]; + if ((pMetadata->type & ma_dr_wav_metadata_type_list_all_info_strings)) { + chunkSize += 8; + chunkSize += pMetadata->data.infoText.stringLength + 1; + } else if (pMetadata->type == ma_dr_wav_metadata_type_unknown && pMetadata->data.unknown.chunkLocation == ma_dr_wav_metadata_location_inside_info_list) { + chunkSize += 8; + chunkSize += pMetadata->data.unknown.dataSizeInBytes; + } + if ((chunkSize % 2) != 0) { + chunkSize += 1; + } + } + bytesWritten += ma_dr_wav__write_or_count(pWav, "LIST", 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count(pWav, "INFO", 4); + for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { + ma_dr_wav_metadata* pMetadata = &pMetadatas[iMetadata]; + ma_uint32 subchunkSize = 0; + if (pMetadata->type & ma_dr_wav_metadata_type_list_all_info_strings) { + const char* pID = NULL; + switch (pMetadata->type) { + case ma_dr_wav_metadata_type_list_info_software: pID = "ISFT"; break; + case ma_dr_wav_metadata_type_list_info_copyright: pID = "ICOP"; break; + case ma_dr_wav_metadata_type_list_info_title: pID = "INAM"; break; + case ma_dr_wav_metadata_type_list_info_artist: pID = "IART"; break; + case ma_dr_wav_metadata_type_list_info_comment: pID = "ICMT"; break; + case ma_dr_wav_metadata_type_list_info_date: pID = "ICRD"; break; + case ma_dr_wav_metadata_type_list_info_genre: pID = "IGNR"; break; + case ma_dr_wav_metadata_type_list_info_album: pID = "IPRD"; break; + case ma_dr_wav_metadata_type_list_info_tracknumber: pID = "ITRK"; break; + default: break; + } + MA_DR_WAV_ASSERT(pID != NULL); + if (pMetadata->data.infoText.stringLength) { + subchunkSize = pMetadata->data.infoText.stringLength + 1; + bytesWritten += ma_dr_wav__write_or_count(pWav, pID, 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, subchunkSize); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.infoText.pString, pMetadata->data.infoText.stringLength); + bytesWritten += ma_dr_wav__write_or_count_byte(pWav, '\0'); + } + } else if (pMetadata->type == ma_dr_wav_metadata_type_unknown && pMetadata->data.unknown.chunkLocation == ma_dr_wav_metadata_location_inside_info_list) { + if (pMetadata->data.unknown.dataSizeInBytes) { + subchunkSize = pMetadata->data.unknown.dataSizeInBytes; + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.unknown.id, 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.unknown.dataSizeInBytes); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.unknown.pData, subchunkSize); + } + } + if ((subchunkSize % 2) != 0) { + bytesWritten += ma_dr_wav__write_or_count_byte(pWav, 0); + } + } + } + if (hasListAdtl) { + ma_uint32 chunkSize = 4; + for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { + ma_dr_wav_metadata* pMetadata = &pMetadatas[iMetadata]; + switch (pMetadata->type) + { + case ma_dr_wav_metadata_type_list_label: + case ma_dr_wav_metadata_type_list_note: + { + chunkSize += 8; + chunkSize += MA_DR_WAV_LIST_LABEL_OR_NOTE_BYTES; + if (pMetadata->data.labelOrNote.stringLength > 0) { + chunkSize += pMetadata->data.labelOrNote.stringLength + 1; + } + } break; + case ma_dr_wav_metadata_type_list_labelled_cue_region: + { + chunkSize += 8; + chunkSize += MA_DR_WAV_LIST_LABELLED_TEXT_BYTES; + if (pMetadata->data.labelledCueRegion.stringLength > 0) { + chunkSize += pMetadata->data.labelledCueRegion.stringLength + 1; + } + } break; + case ma_dr_wav_metadata_type_unknown: + { + if (pMetadata->data.unknown.chunkLocation == ma_dr_wav_metadata_location_inside_adtl_list) { + chunkSize += 8; + chunkSize += pMetadata->data.unknown.dataSizeInBytes; + } + } break; + default: break; + } + if ((chunkSize % 2) != 0) { + chunkSize += 1; + } + } + bytesWritten += ma_dr_wav__write_or_count(pWav, "LIST", 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, chunkSize); + bytesWritten += ma_dr_wav__write_or_count(pWav, "adtl", 4); + for (iMetadata = 0; iMetadata < metadataCount; ++iMetadata) { + ma_dr_wav_metadata* pMetadata = &pMetadatas[iMetadata]; + ma_uint32 subchunkSize = 0; + switch (pMetadata->type) + { + case ma_dr_wav_metadata_type_list_label: + case ma_dr_wav_metadata_type_list_note: + { + if (pMetadata->data.labelOrNote.stringLength > 0) { + const char *pID = NULL; + if (pMetadata->type == ma_dr_wav_metadata_type_list_label) { + pID = "labl"; + } + else if (pMetadata->type == ma_dr_wav_metadata_type_list_note) { + pID = "note"; + } + MA_DR_WAV_ASSERT(pID != NULL); + MA_DR_WAV_ASSERT(pMetadata->data.labelOrNote.pString != NULL); + subchunkSize = MA_DR_WAV_LIST_LABEL_OR_NOTE_BYTES; + bytesWritten += ma_dr_wav__write_or_count(pWav, pID, 4); + subchunkSize += pMetadata->data.labelOrNote.stringLength + 1; + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, subchunkSize); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.labelOrNote.cuePointId); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.labelOrNote.pString, pMetadata->data.labelOrNote.stringLength); + bytesWritten += ma_dr_wav__write_or_count_byte(pWav, '\0'); + } + } break; + case ma_dr_wav_metadata_type_list_labelled_cue_region: + { + subchunkSize = MA_DR_WAV_LIST_LABELLED_TEXT_BYTES; + bytesWritten += ma_dr_wav__write_or_count(pWav, "ltxt", 4); + if (pMetadata->data.labelledCueRegion.stringLength > 0) { + subchunkSize += pMetadata->data.labelledCueRegion.stringLength + 1; + } + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, subchunkSize); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.labelledCueRegion.cuePointId); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, pMetadata->data.labelledCueRegion.sampleLength); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.labelledCueRegion.purposeId, 4); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.labelledCueRegion.country); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.labelledCueRegion.language); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.labelledCueRegion.dialect); + bytesWritten += ma_dr_wav__write_or_count_u16ne_to_le(pWav, pMetadata->data.labelledCueRegion.codePage); + if (pMetadata->data.labelledCueRegion.stringLength > 0) { + MA_DR_WAV_ASSERT(pMetadata->data.labelledCueRegion.pString != NULL); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.labelledCueRegion.pString, pMetadata->data.labelledCueRegion.stringLength); + bytesWritten += ma_dr_wav__write_or_count_byte(pWav, '\0'); + } + } break; + case ma_dr_wav_metadata_type_unknown: + { + if (pMetadata->data.unknown.chunkLocation == ma_dr_wav_metadata_location_inside_adtl_list) { + subchunkSize = pMetadata->data.unknown.dataSizeInBytes; + MA_DR_WAV_ASSERT(pMetadata->data.unknown.pData != NULL); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.unknown.id, 4); + bytesWritten += ma_dr_wav__write_or_count_u32ne_to_le(pWav, subchunkSize); + bytesWritten += ma_dr_wav__write_or_count(pWav, pMetadata->data.unknown.pData, subchunkSize); + } + } break; + default: break; + } + if ((subchunkSize % 2) != 0) { + bytesWritten += ma_dr_wav__write_or_count_byte(pWav, 0); + } + } + } + MA_DR_WAV_ASSERT((bytesWritten % 2) == 0); + return bytesWritten; +} +MA_PRIVATE ma_uint32 ma_dr_wav__riff_chunk_size_riff(ma_uint64 dataChunkSize, ma_dr_wav_metadata* pMetadata, ma_uint32 metadataCount) +{ + ma_uint64 chunkSize = 4 + 24 + (ma_uint64)ma_dr_wav__write_or_count_metadata(NULL, pMetadata, metadataCount) + 8 + dataChunkSize + ma_dr_wav__chunk_padding_size_riff(dataChunkSize); + if (chunkSize > 0xFFFFFFFFUL) { + chunkSize = 0xFFFFFFFFUL; + } + return (ma_uint32)chunkSize; +} +MA_PRIVATE ma_uint32 ma_dr_wav__data_chunk_size_riff(ma_uint64 dataChunkSize) +{ + if (dataChunkSize <= 0xFFFFFFFFUL) { + return (ma_uint32)dataChunkSize; + } else { + return 0xFFFFFFFFUL; + } +} +MA_PRIVATE ma_uint64 ma_dr_wav__riff_chunk_size_w64(ma_uint64 dataChunkSize) +{ + ma_uint64 dataSubchunkPaddingSize = ma_dr_wav__chunk_padding_size_w64(dataChunkSize); + return 80 + 24 + dataChunkSize + dataSubchunkPaddingSize; +} +MA_PRIVATE ma_uint64 ma_dr_wav__data_chunk_size_w64(ma_uint64 dataChunkSize) +{ + return 24 + dataChunkSize; +} +MA_PRIVATE ma_uint64 ma_dr_wav__riff_chunk_size_rf64(ma_uint64 dataChunkSize, ma_dr_wav_metadata *metadata, ma_uint32 numMetadata) +{ + ma_uint64 chunkSize = 4 + 36 + 24 + (ma_uint64)ma_dr_wav__write_or_count_metadata(NULL, metadata, numMetadata) + 8 + dataChunkSize + ma_dr_wav__chunk_padding_size_riff(dataChunkSize); + if (chunkSize > 0xFFFFFFFFUL) { + chunkSize = 0xFFFFFFFFUL; + } + return chunkSize; +} +MA_PRIVATE ma_uint64 ma_dr_wav__data_chunk_size_rf64(ma_uint64 dataChunkSize) +{ + return dataChunkSize; +} +MA_PRIVATE ma_bool32 ma_dr_wav_preinit_write(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_bool32 isSequential, ma_dr_wav_write_proc onWrite, ma_dr_wav_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pWav == NULL || onWrite == NULL) { + return MA_FALSE; + } + if (!isSequential && onSeek == NULL) { + return MA_FALSE; + } + if (pFormat->format == MA_DR_WAVE_FORMAT_EXTENSIBLE) { + return MA_FALSE; + } + if (pFormat->format == MA_DR_WAVE_FORMAT_ADPCM || pFormat->format == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + return MA_FALSE; + } + MA_DR_WAV_ZERO_MEMORY(pWav, sizeof(*pWav)); + pWav->onWrite = onWrite; + pWav->onSeek = onSeek; + pWav->pUserData = pUserData; + pWav->allocationCallbacks = ma_dr_wav_copy_allocation_callbacks_or_defaults(pAllocationCallbacks); + if (pWav->allocationCallbacks.onFree == NULL || (pWav->allocationCallbacks.onMalloc == NULL && pWav->allocationCallbacks.onRealloc == NULL)) { + return MA_FALSE; + } + pWav->fmt.formatTag = (ma_uint16)pFormat->format; + pWav->fmt.channels = (ma_uint16)pFormat->channels; + pWav->fmt.sampleRate = pFormat->sampleRate; + pWav->fmt.avgBytesPerSec = (ma_uint32)((pFormat->bitsPerSample * pFormat->sampleRate * pFormat->channels) / 8); + pWav->fmt.blockAlign = (ma_uint16)((pFormat->channels * pFormat->bitsPerSample) / 8); + pWav->fmt.bitsPerSample = (ma_uint16)pFormat->bitsPerSample; + pWav->fmt.extendedSize = 0; + pWav->isSequentialWrite = isSequential; + return MA_TRUE; +} +MA_PRIVATE ma_bool32 ma_dr_wav_init_write__internal(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount) +{ + size_t runningPos = 0; + ma_uint64 initialDataChunkSize = 0; + ma_uint64 chunkSizeFMT; + if (pWav->isSequentialWrite) { + initialDataChunkSize = (totalSampleCount * pWav->fmt.bitsPerSample) / 8; + if (pFormat->container == ma_dr_wav_container_riff) { + if (initialDataChunkSize > (0xFFFFFFFFUL - 36)) { + return MA_FALSE; + } + } + } + pWav->dataChunkDataSizeTargetWrite = initialDataChunkSize; + if (pFormat->container == ma_dr_wav_container_riff) { + ma_uint32 chunkSizeRIFF = 28 + (ma_uint32)initialDataChunkSize; + runningPos += ma_dr_wav__write(pWav, "RIFF", 4); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, chunkSizeRIFF); + runningPos += ma_dr_wav__write(pWav, "WAVE", 4); + } else if (pFormat->container == ma_dr_wav_container_w64) { + ma_uint64 chunkSizeRIFF = 80 + 24 + initialDataChunkSize; + runningPos += ma_dr_wav__write(pWav, ma_dr_wavGUID_W64_RIFF, 16); + runningPos += ma_dr_wav__write_u64ne_to_le(pWav, chunkSizeRIFF); + runningPos += ma_dr_wav__write(pWav, ma_dr_wavGUID_W64_WAVE, 16); + } else if (pFormat->container == ma_dr_wav_container_rf64) { + runningPos += ma_dr_wav__write(pWav, "RF64", 4); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, 0xFFFFFFFF); + runningPos += ma_dr_wav__write(pWav, "WAVE", 4); + } else { + return MA_FALSE; + } + if (pFormat->container == ma_dr_wav_container_rf64) { + ma_uint32 initialds64ChunkSize = 28; + ma_uint64 initialRiffChunkSize = 8 + initialds64ChunkSize + initialDataChunkSize; + runningPos += ma_dr_wav__write(pWav, "ds64", 4); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, initialds64ChunkSize); + runningPos += ma_dr_wav__write_u64ne_to_le(pWav, initialRiffChunkSize); + runningPos += ma_dr_wav__write_u64ne_to_le(pWav, initialDataChunkSize); + runningPos += ma_dr_wav__write_u64ne_to_le(pWav, totalSampleCount); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, 0); + } + if (pFormat->container == ma_dr_wav_container_riff || pFormat->container == ma_dr_wav_container_rf64) { + chunkSizeFMT = 16; + runningPos += ma_dr_wav__write(pWav, "fmt ", 4); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, (ma_uint32)chunkSizeFMT); + } else if (pFormat->container == ma_dr_wav_container_w64) { + chunkSizeFMT = 40; + runningPos += ma_dr_wav__write(pWav, ma_dr_wavGUID_W64_FMT, 16); + runningPos += ma_dr_wav__write_u64ne_to_le(pWav, chunkSizeFMT); + } + runningPos += ma_dr_wav__write_u16ne_to_le(pWav, pWav->fmt.formatTag); + runningPos += ma_dr_wav__write_u16ne_to_le(pWav, pWav->fmt.channels); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, pWav->fmt.sampleRate); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, pWav->fmt.avgBytesPerSec); + runningPos += ma_dr_wav__write_u16ne_to_le(pWav, pWav->fmt.blockAlign); + runningPos += ma_dr_wav__write_u16ne_to_le(pWav, pWav->fmt.bitsPerSample); + if (!pWav->isSequentialWrite && pWav->pMetadata != NULL && pWav->metadataCount > 0 && (pFormat->container == ma_dr_wav_container_riff || pFormat->container == ma_dr_wav_container_rf64)) { + runningPos += ma_dr_wav__write_or_count_metadata(pWav, pWav->pMetadata, pWav->metadataCount); + } + pWav->dataChunkDataPos = runningPos; + if (pFormat->container == ma_dr_wav_container_riff) { + ma_uint32 chunkSizeDATA = (ma_uint32)initialDataChunkSize; + runningPos += ma_dr_wav__write(pWav, "data", 4); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, chunkSizeDATA); + } else if (pFormat->container == ma_dr_wav_container_w64) { + ma_uint64 chunkSizeDATA = 24 + initialDataChunkSize; + runningPos += ma_dr_wav__write(pWav, ma_dr_wavGUID_W64_DATA, 16); + runningPos += ma_dr_wav__write_u64ne_to_le(pWav, chunkSizeDATA); + } else if (pFormat->container == ma_dr_wav_container_rf64) { + runningPos += ma_dr_wav__write(pWav, "data", 4); + runningPos += ma_dr_wav__write_u32ne_to_le(pWav, 0xFFFFFFFF); + } + pWav->container = pFormat->container; + pWav->channels = (ma_uint16)pFormat->channels; + pWav->sampleRate = pFormat->sampleRate; + pWav->bitsPerSample = (ma_uint16)pFormat->bitsPerSample; + pWav->translatedFormatTag = (ma_uint16)pFormat->format; + pWav->dataChunkDataPos = runningPos; + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_wav_init_write(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_dr_wav_write_proc onWrite, ma_dr_wav_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (!ma_dr_wav_preinit_write(pWav, pFormat, MA_FALSE, onWrite, onSeek, pUserData, pAllocationCallbacks)) { + return MA_FALSE; + } + return ma_dr_wav_init_write__internal(pWav, pFormat, 0); +} +MA_API ma_bool32 ma_dr_wav_init_write_sequential(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, ma_dr_wav_write_proc onWrite, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (!ma_dr_wav_preinit_write(pWav, pFormat, MA_TRUE, onWrite, NULL, pUserData, pAllocationCallbacks)) { + return MA_FALSE; + } + return ma_dr_wav_init_write__internal(pWav, pFormat, totalSampleCount); +} +MA_API ma_bool32 ma_dr_wav_init_write_sequential_pcm_frames(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, ma_dr_wav_write_proc onWrite, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFormat == NULL) { + return MA_FALSE; + } + return ma_dr_wav_init_write_sequential(pWav, pFormat, totalPCMFrameCount*pFormat->channels, onWrite, pUserData, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_write_with_metadata(ma_dr_wav* pWav, const ma_dr_wav_data_format* pFormat, ma_dr_wav_write_proc onWrite, ma_dr_wav_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks, ma_dr_wav_metadata* pMetadata, ma_uint32 metadataCount) +{ + if (!ma_dr_wav_preinit_write(pWav, pFormat, MA_FALSE, onWrite, onSeek, pUserData, pAllocationCallbacks)) { + return MA_FALSE; + } + pWav->pMetadata = pMetadata; + pWav->metadataCount = metadataCount; + return ma_dr_wav_init_write__internal(pWav, pFormat, 0); +} +MA_API ma_uint64 ma_dr_wav_target_write_size_bytes(const ma_dr_wav_data_format* pFormat, ma_uint64 totalFrameCount, ma_dr_wav_metadata* pMetadata, ma_uint32 metadataCount) +{ + ma_uint64 targetDataSizeBytes = (ma_uint64)((ma_int64)totalFrameCount * pFormat->channels * pFormat->bitsPerSample/8.0); + ma_uint64 riffChunkSizeBytes; + ma_uint64 fileSizeBytes = 0; + if (pFormat->container == ma_dr_wav_container_riff) { + riffChunkSizeBytes = ma_dr_wav__riff_chunk_size_riff(targetDataSizeBytes, pMetadata, metadataCount); + fileSizeBytes = (8 + riffChunkSizeBytes); + } else if (pFormat->container == ma_dr_wav_container_w64) { + riffChunkSizeBytes = ma_dr_wav__riff_chunk_size_w64(targetDataSizeBytes); + fileSizeBytes = riffChunkSizeBytes; + } else if (pFormat->container == ma_dr_wav_container_rf64) { + riffChunkSizeBytes = ma_dr_wav__riff_chunk_size_rf64(targetDataSizeBytes, pMetadata, metadataCount); + fileSizeBytes = (8 + riffChunkSizeBytes); + } + return fileSizeBytes; +} +#ifndef MA_DR_WAV_NO_STDIO +MA_PRIVATE size_t ma_dr_wav__on_read_stdio(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + return fread(pBufferOut, 1, bytesToRead, (FILE*)pUserData); +} +MA_PRIVATE size_t ma_dr_wav__on_write_stdio(void* pUserData, const void* pData, size_t bytesToWrite) +{ + return fwrite(pData, 1, bytesToWrite, (FILE*)pUserData); +} +MA_PRIVATE ma_bool32 ma_dr_wav__on_seek_stdio(void* pUserData, int offset, ma_dr_wav_seek_origin origin) +{ + return fseek((FILE*)pUserData, offset, (origin == ma_dr_wav_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0; +} +MA_API ma_bool32 ma_dr_wav_init_file(ma_dr_wav* pWav, const char* filename, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_file_ex(pWav, filename, NULL, NULL, 0, pAllocationCallbacks); +} +MA_PRIVATE ma_bool32 ma_dr_wav_init_file__internal_FILE(ma_dr_wav* pWav, FILE* pFile, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_bool32 result; + result = ma_dr_wav_preinit(pWav, ma_dr_wav__on_read_stdio, ma_dr_wav__on_seek_stdio, (void*)pFile, pAllocationCallbacks); + if (result != MA_TRUE) { + fclose(pFile); + return result; + } + result = ma_dr_wav_init__internal(pWav, onChunk, pChunkUserData, flags); + if (result != MA_TRUE) { + fclose(pFile); + return result; + } + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_wav_init_file_ex(ma_dr_wav* pWav, const char* filename, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (ma_fopen(&pFile, filename, "rb") != MA_SUCCESS) { + return MA_FALSE; + } + return ma_dr_wav_init_file__internal_FILE(pWav, pFile, onChunk, pChunkUserData, flags, pAllocationCallbacks); +} +#ifndef MA_DR_WAV_NO_WCHAR +MA_API ma_bool32 ma_dr_wav_init_file_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_file_ex_w(pWav, filename, NULL, NULL, 0, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_file_ex_w(ma_dr_wav* pWav, const wchar_t* filename, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (ma_wfopen(&pFile, filename, L"rb", pAllocationCallbacks) != MA_SUCCESS) { + return MA_FALSE; + } + return ma_dr_wav_init_file__internal_FILE(pWav, pFile, onChunk, pChunkUserData, flags, pAllocationCallbacks); +} +#endif +MA_API ma_bool32 ma_dr_wav_init_file_with_metadata(ma_dr_wav* pWav, const char* filename, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (ma_fopen(&pFile, filename, "rb") != MA_SUCCESS) { + return MA_FALSE; + } + return ma_dr_wav_init_file__internal_FILE(pWav, pFile, NULL, NULL, flags | MA_DR_WAV_WITH_METADATA, pAllocationCallbacks); +} +#ifndef MA_DR_WAV_NO_WCHAR +MA_API ma_bool32 ma_dr_wav_init_file_with_metadata_w(ma_dr_wav* pWav, const wchar_t* filename, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (ma_wfopen(&pFile, filename, L"rb", pAllocationCallbacks) != MA_SUCCESS) { + return MA_FALSE; + } + return ma_dr_wav_init_file__internal_FILE(pWav, pFile, NULL, NULL, flags | MA_DR_WAV_WITH_METADATA, pAllocationCallbacks); +} +#endif +MA_PRIVATE ma_bool32 ma_dr_wav_init_file_write__internal_FILE(ma_dr_wav* pWav, FILE* pFile, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, ma_bool32 isSequential, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_bool32 result; + result = ma_dr_wav_preinit_write(pWav, pFormat, isSequential, ma_dr_wav__on_write_stdio, ma_dr_wav__on_seek_stdio, (void*)pFile, pAllocationCallbacks); + if (result != MA_TRUE) { + fclose(pFile); + return result; + } + result = ma_dr_wav_init_write__internal(pWav, pFormat, totalSampleCount); + if (result != MA_TRUE) { + fclose(pFile); + return result; + } + return MA_TRUE; +} +MA_PRIVATE ma_bool32 ma_dr_wav_init_file_write__internal(ma_dr_wav* pWav, const char* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, ma_bool32 isSequential, const ma_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (ma_fopen(&pFile, filename, "wb") != MA_SUCCESS) { + return MA_FALSE; + } + return ma_dr_wav_init_file_write__internal_FILE(pWav, pFile, pFormat, totalSampleCount, isSequential, pAllocationCallbacks); +} +#ifndef MA_DR_WAV_NO_WCHAR +MA_PRIVATE ma_bool32 ma_dr_wav_init_file_write_w__internal(ma_dr_wav* pWav, const wchar_t* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, ma_bool32 isSequential, const ma_allocation_callbacks* pAllocationCallbacks) +{ + FILE* pFile; + if (ma_wfopen(&pFile, filename, L"wb", pAllocationCallbacks) != MA_SUCCESS) { + return MA_FALSE; + } + return ma_dr_wav_init_file_write__internal_FILE(pWav, pFile, pFormat, totalSampleCount, isSequential, pAllocationCallbacks); +} +#endif +MA_API ma_bool32 ma_dr_wav_init_file_write(ma_dr_wav* pWav, const char* filename, const ma_dr_wav_data_format* pFormat, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_file_write__internal(pWav, filename, pFormat, 0, MA_FALSE, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_file_write_sequential(ma_dr_wav* pWav, const char* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_file_write__internal(pWav, filename, pFormat, totalSampleCount, MA_TRUE, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_file_write_sequential_pcm_frames(ma_dr_wav* pWav, const char* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFormat == NULL) { + return MA_FALSE; + } + return ma_dr_wav_init_file_write_sequential(pWav, filename, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks); +} +#ifndef MA_DR_WAV_NO_WCHAR +MA_API ma_bool32 ma_dr_wav_init_file_write_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_dr_wav_data_format* pFormat, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_file_write_w__internal(pWav, filename, pFormat, 0, MA_FALSE, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_file_write_sequential_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_file_write_w__internal(pWav, filename, pFormat, totalSampleCount, MA_TRUE, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_file_write_sequential_pcm_frames_w(ma_dr_wav* pWav, const wchar_t* filename, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFormat == NULL) { + return MA_FALSE; + } + return ma_dr_wav_init_file_write_sequential_w(pWav, filename, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks); +} +#endif +#endif +MA_PRIVATE size_t ma_dr_wav__on_read_memory(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + ma_dr_wav* pWav = (ma_dr_wav*)pUserData; + size_t bytesRemaining; + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->memoryStream.dataSize >= pWav->memoryStream.currentReadPos); + bytesRemaining = pWav->memoryStream.dataSize - pWav->memoryStream.currentReadPos; + if (bytesToRead > bytesRemaining) { + bytesToRead = bytesRemaining; + } + if (bytesToRead > 0) { + MA_DR_WAV_COPY_MEMORY(pBufferOut, pWav->memoryStream.data + pWav->memoryStream.currentReadPos, bytesToRead); + pWav->memoryStream.currentReadPos += bytesToRead; + } + return bytesToRead; +} +MA_PRIVATE ma_bool32 ma_dr_wav__on_seek_memory(void* pUserData, int offset, ma_dr_wav_seek_origin origin) +{ + ma_dr_wav* pWav = (ma_dr_wav*)pUserData; + MA_DR_WAV_ASSERT(pWav != NULL); + if (origin == ma_dr_wav_seek_origin_current) { + if (offset > 0) { + if (pWav->memoryStream.currentReadPos + offset > pWav->memoryStream.dataSize) { + return MA_FALSE; + } + } else { + if (pWav->memoryStream.currentReadPos < (size_t)-offset) { + return MA_FALSE; + } + } + pWav->memoryStream.currentReadPos += offset; + } else { + if ((ma_uint32)offset <= pWav->memoryStream.dataSize) { + pWav->memoryStream.currentReadPos = offset; + } else { + return MA_FALSE; + } + } + return MA_TRUE; +} +MA_PRIVATE size_t ma_dr_wav__on_write_memory(void* pUserData, const void* pDataIn, size_t bytesToWrite) +{ + ma_dr_wav* pWav = (ma_dr_wav*)pUserData; + size_t bytesRemaining; + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(pWav->memoryStreamWrite.dataCapacity >= pWav->memoryStreamWrite.currentWritePos); + bytesRemaining = pWav->memoryStreamWrite.dataCapacity - pWav->memoryStreamWrite.currentWritePos; + if (bytesRemaining < bytesToWrite) { + void* pNewData; + size_t newDataCapacity = (pWav->memoryStreamWrite.dataCapacity == 0) ? 256 : pWav->memoryStreamWrite.dataCapacity * 2; + if ((newDataCapacity - pWav->memoryStreamWrite.currentWritePos) < bytesToWrite) { + newDataCapacity = pWav->memoryStreamWrite.currentWritePos + bytesToWrite; + } + pNewData = ma_dr_wav__realloc_from_callbacks(*pWav->memoryStreamWrite.ppData, newDataCapacity, pWav->memoryStreamWrite.dataCapacity, &pWav->allocationCallbacks); + if (pNewData == NULL) { + return 0; + } + *pWav->memoryStreamWrite.ppData = pNewData; + pWav->memoryStreamWrite.dataCapacity = newDataCapacity; + } + MA_DR_WAV_COPY_MEMORY(((ma_uint8*)(*pWav->memoryStreamWrite.ppData)) + pWav->memoryStreamWrite.currentWritePos, pDataIn, bytesToWrite); + pWav->memoryStreamWrite.currentWritePos += bytesToWrite; + if (pWav->memoryStreamWrite.dataSize < pWav->memoryStreamWrite.currentWritePos) { + pWav->memoryStreamWrite.dataSize = pWav->memoryStreamWrite.currentWritePos; + } + *pWav->memoryStreamWrite.pDataSize = pWav->memoryStreamWrite.dataSize; + return bytesToWrite; +} +MA_PRIVATE ma_bool32 ma_dr_wav__on_seek_memory_write(void* pUserData, int offset, ma_dr_wav_seek_origin origin) +{ + ma_dr_wav* pWav = (ma_dr_wav*)pUserData; + MA_DR_WAV_ASSERT(pWav != NULL); + if (origin == ma_dr_wav_seek_origin_current) { + if (offset > 0) { + if (pWav->memoryStreamWrite.currentWritePos + offset > pWav->memoryStreamWrite.dataSize) { + offset = (int)(pWav->memoryStreamWrite.dataSize - pWav->memoryStreamWrite.currentWritePos); + } + } else { + if (pWav->memoryStreamWrite.currentWritePos < (size_t)-offset) { + offset = -(int)pWav->memoryStreamWrite.currentWritePos; + } + } + pWav->memoryStreamWrite.currentWritePos += offset; + } else { + if ((ma_uint32)offset <= pWav->memoryStreamWrite.dataSize) { + pWav->memoryStreamWrite.currentWritePos = offset; + } else { + pWav->memoryStreamWrite.currentWritePos = pWav->memoryStreamWrite.dataSize; + } + } + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_wav_init_memory(ma_dr_wav* pWav, const void* data, size_t dataSize, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_memory_ex(pWav, data, dataSize, NULL, NULL, 0, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_memory_ex(ma_dr_wav* pWav, const void* data, size_t dataSize, ma_dr_wav_chunk_proc onChunk, void* pChunkUserData, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (data == NULL || dataSize == 0) { + return MA_FALSE; + } + if (!ma_dr_wav_preinit(pWav, ma_dr_wav__on_read_memory, ma_dr_wav__on_seek_memory, pWav, pAllocationCallbacks)) { + return MA_FALSE; + } + pWav->memoryStream.data = (const ma_uint8*)data; + pWav->memoryStream.dataSize = dataSize; + pWav->memoryStream.currentReadPos = 0; + return ma_dr_wav_init__internal(pWav, onChunk, pChunkUserData, flags); +} +MA_API ma_bool32 ma_dr_wav_init_memory_with_metadata(ma_dr_wav* pWav, const void* data, size_t dataSize, ma_uint32 flags, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (data == NULL || dataSize == 0) { + return MA_FALSE; + } + if (!ma_dr_wav_preinit(pWav, ma_dr_wav__on_read_memory, ma_dr_wav__on_seek_memory, pWav, pAllocationCallbacks)) { + return MA_FALSE; + } + pWav->memoryStream.data = (const ma_uint8*)data; + pWav->memoryStream.dataSize = dataSize; + pWav->memoryStream.currentReadPos = 0; + return ma_dr_wav_init__internal(pWav, NULL, NULL, flags | MA_DR_WAV_WITH_METADATA); +} +MA_PRIVATE ma_bool32 ma_dr_wav_init_memory_write__internal(ma_dr_wav* pWav, void** ppData, size_t* pDataSize, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, ma_bool32 isSequential, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (ppData == NULL || pDataSize == NULL) { + return MA_FALSE; + } + *ppData = NULL; + *pDataSize = 0; + if (!ma_dr_wav_preinit_write(pWav, pFormat, isSequential, ma_dr_wav__on_write_memory, ma_dr_wav__on_seek_memory_write, pWav, pAllocationCallbacks)) { + return MA_FALSE; + } + pWav->memoryStreamWrite.ppData = ppData; + pWav->memoryStreamWrite.pDataSize = pDataSize; + pWav->memoryStreamWrite.dataSize = 0; + pWav->memoryStreamWrite.dataCapacity = 0; + pWav->memoryStreamWrite.currentWritePos = 0; + return ma_dr_wav_init_write__internal(pWav, pFormat, totalSampleCount); +} +MA_API ma_bool32 ma_dr_wav_init_memory_write(ma_dr_wav* pWav, void** ppData, size_t* pDataSize, const ma_dr_wav_data_format* pFormat, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_memory_write__internal(pWav, ppData, pDataSize, pFormat, 0, MA_FALSE, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_memory_write_sequential(ma_dr_wav* pWav, void** ppData, size_t* pDataSize, const ma_dr_wav_data_format* pFormat, ma_uint64 totalSampleCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_wav_init_memory_write__internal(pWav, ppData, pDataSize, pFormat, totalSampleCount, MA_TRUE, pAllocationCallbacks); +} +MA_API ma_bool32 ma_dr_wav_init_memory_write_sequential_pcm_frames(ma_dr_wav* pWav, void** ppData, size_t* pDataSize, const ma_dr_wav_data_format* pFormat, ma_uint64 totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pFormat == NULL) { + return MA_FALSE; + } + return ma_dr_wav_init_memory_write_sequential(pWav, ppData, pDataSize, pFormat, totalPCMFrameCount*pFormat->channels, pAllocationCallbacks); +} +MA_API ma_result ma_dr_wav_uninit(ma_dr_wav* pWav) +{ + ma_result result = MA_SUCCESS; + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + if (pWav->onWrite != NULL) { + ma_uint32 paddingSize = 0; + if (pWav->container == ma_dr_wav_container_riff || pWav->container == ma_dr_wav_container_rf64) { + paddingSize = ma_dr_wav__chunk_padding_size_riff(pWav->dataChunkDataSize); + } else { + paddingSize = ma_dr_wav__chunk_padding_size_w64(pWav->dataChunkDataSize); + } + if (paddingSize > 0) { + ma_uint64 paddingData = 0; + ma_dr_wav__write(pWav, &paddingData, paddingSize); + } + if (pWav->onSeek && !pWav->isSequentialWrite) { + if (pWav->container == ma_dr_wav_container_riff) { + if (pWav->onSeek(pWav->pUserData, 4, ma_dr_wav_seek_origin_start)) { + ma_uint32 riffChunkSize = ma_dr_wav__riff_chunk_size_riff(pWav->dataChunkDataSize, pWav->pMetadata, pWav->metadataCount); + ma_dr_wav__write_u32ne_to_le(pWav, riffChunkSize); + } + if (pWav->onSeek(pWav->pUserData, (int)pWav->dataChunkDataPos - 4, ma_dr_wav_seek_origin_start)) { + ma_uint32 dataChunkSize = ma_dr_wav__data_chunk_size_riff(pWav->dataChunkDataSize); + ma_dr_wav__write_u32ne_to_le(pWav, dataChunkSize); + } + } else if (pWav->container == ma_dr_wav_container_w64) { + if (pWav->onSeek(pWav->pUserData, 16, ma_dr_wav_seek_origin_start)) { + ma_uint64 riffChunkSize = ma_dr_wav__riff_chunk_size_w64(pWav->dataChunkDataSize); + ma_dr_wav__write_u64ne_to_le(pWav, riffChunkSize); + } + if (pWav->onSeek(pWav->pUserData, (int)pWav->dataChunkDataPos - 8, ma_dr_wav_seek_origin_start)) { + ma_uint64 dataChunkSize = ma_dr_wav__data_chunk_size_w64(pWav->dataChunkDataSize); + ma_dr_wav__write_u64ne_to_le(pWav, dataChunkSize); + } + } else if (pWav->container == ma_dr_wav_container_rf64) { + int ds64BodyPos = 12 + 8; + if (pWav->onSeek(pWav->pUserData, ds64BodyPos + 0, ma_dr_wav_seek_origin_start)) { + ma_uint64 riffChunkSize = ma_dr_wav__riff_chunk_size_rf64(pWav->dataChunkDataSize, pWav->pMetadata, pWav->metadataCount); + ma_dr_wav__write_u64ne_to_le(pWav, riffChunkSize); + } + if (pWav->onSeek(pWav->pUserData, ds64BodyPos + 8, ma_dr_wav_seek_origin_start)) { + ma_uint64 dataChunkSize = ma_dr_wav__data_chunk_size_rf64(pWav->dataChunkDataSize); + ma_dr_wav__write_u64ne_to_le(pWav, dataChunkSize); + } + } + } + if (pWav->isSequentialWrite) { + if (pWav->dataChunkDataSize != pWav->dataChunkDataSizeTargetWrite) { + result = MA_INVALID_FILE; + } + } + } else { + ma_dr_wav_free(pWav->pMetadata, &pWav->allocationCallbacks); + } +#ifndef MA_DR_WAV_NO_STDIO + if (pWav->onRead == ma_dr_wav__on_read_stdio || pWav->onWrite == ma_dr_wav__on_write_stdio) { + fclose((FILE*)pWav->pUserData); + } +#endif + return result; +} +MA_API size_t ma_dr_wav_read_raw(ma_dr_wav* pWav, size_t bytesToRead, void* pBufferOut) +{ + size_t bytesRead; + ma_uint32 bytesPerFrame; + if (pWav == NULL || bytesToRead == 0) { + return 0; + } + if (bytesToRead > pWav->bytesRemaining) { + bytesToRead = (size_t)pWav->bytesRemaining; + } + if (bytesToRead == 0) { + return 0; + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + if (pBufferOut != NULL) { + bytesRead = pWav->onRead(pWav->pUserData, pBufferOut, bytesToRead); + } else { + bytesRead = 0; + while (bytesRead < bytesToRead) { + size_t bytesToSeek = (bytesToRead - bytesRead); + if (bytesToSeek > 0x7FFFFFFF) { + bytesToSeek = 0x7FFFFFFF; + } + if (pWav->onSeek(pWav->pUserData, (int)bytesToSeek, ma_dr_wav_seek_origin_current) == MA_FALSE) { + break; + } + bytesRead += bytesToSeek; + } + while (bytesRead < bytesToRead) { + ma_uint8 buffer[4096]; + size_t bytesSeeked; + size_t bytesToSeek = (bytesToRead - bytesRead); + if (bytesToSeek > sizeof(buffer)) { + bytesToSeek = sizeof(buffer); + } + bytesSeeked = pWav->onRead(pWav->pUserData, buffer, bytesToSeek); + bytesRead += bytesSeeked; + if (bytesSeeked < bytesToSeek) { + break; + } + } + } + pWav->readCursorInPCMFrames += bytesRead / bytesPerFrame; + pWav->bytesRemaining -= bytesRead; + return bytesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_le(ma_dr_wav* pWav, ma_uint64 framesToRead, void* pBufferOut) +{ + ma_uint32 bytesPerFrame; + ma_uint64 bytesToRead; + ma_uint64 framesRemainingInFile; + if (pWav == NULL || framesToRead == 0) { + return 0; + } + if (ma_dr_wav__is_compressed_format_tag(pWav->translatedFormatTag)) { + return 0; + } + framesRemainingInFile = pWav->totalPCMFrameCount - pWav->readCursorInPCMFrames; + if (framesToRead > framesRemainingInFile) { + framesToRead = framesRemainingInFile; + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesToRead = framesToRead * bytesPerFrame; + if (bytesToRead > MA_SIZE_MAX) { + bytesToRead = (MA_SIZE_MAX / bytesPerFrame) * bytesPerFrame; + } + if (bytesToRead == 0) { + return 0; + } + return ma_dr_wav_read_raw(pWav, (size_t)bytesToRead, pBufferOut) / bytesPerFrame; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_be(ma_dr_wav* pWav, ma_uint64 framesToRead, void* pBufferOut) +{ + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_le(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL) { + ma_uint32 bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + ma_dr_wav__bswap_samples(pBufferOut, framesRead*pWav->channels, bytesPerFrame/pWav->channels); + } + return framesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames(ma_dr_wav* pWav, ma_uint64 framesToRead, void* pBufferOut) +{ + ma_uint64 framesRead = 0; + if (ma_dr_wav_is_container_be(pWav->container)) { + if (pWav->container != ma_dr_wav_container_aiff || pWav->aiff.isLE == MA_FALSE) { + if (ma_dr_wav__is_little_endian()) { + framesRead = ma_dr_wav_read_pcm_frames_be(pWav, framesToRead, pBufferOut); + } else { + framesRead = ma_dr_wav_read_pcm_frames_le(pWav, framesToRead, pBufferOut); + } + goto post_process; + } + } + if (ma_dr_wav__is_little_endian()) { + framesRead = ma_dr_wav_read_pcm_frames_le(pWav, framesToRead, pBufferOut); + } else { + framesRead = ma_dr_wav_read_pcm_frames_be(pWav, framesToRead, pBufferOut); + } + post_process: + { + if (pWav->container == ma_dr_wav_container_aiff && pWav->bitsPerSample == 8 && pWav->aiff.isUnsigned == MA_FALSE) { + if (pBufferOut != NULL) { + ma_uint64 iSample; + for (iSample = 0; iSample < framesRead * pWav->channels; iSample += 1) { + ((ma_uint8*)pBufferOut)[iSample] += 128; + } + } + } + } + return framesRead; +} +MA_PRIVATE ma_bool32 ma_dr_wav_seek_to_first_pcm_frame(ma_dr_wav* pWav) +{ + if (pWav->onWrite != NULL) { + return MA_FALSE; + } + if (!pWav->onSeek(pWav->pUserData, (int)pWav->dataChunkDataPos, ma_dr_wav_seek_origin_start)) { + return MA_FALSE; + } + if (ma_dr_wav__is_compressed_format_tag(pWav->translatedFormatTag)) { + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM) { + MA_DR_WAV_ZERO_OBJECT(&pWav->msadpcm); + } else if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + MA_DR_WAV_ZERO_OBJECT(&pWav->ima); + } else { + MA_DR_WAV_ASSERT(MA_FALSE); + } + } + pWav->readCursorInPCMFrames = 0; + pWav->bytesRemaining = pWav->dataChunkDataSize; + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_wav_seek_to_pcm_frame(ma_dr_wav* pWav, ma_uint64 targetFrameIndex) +{ + if (pWav == NULL || pWav->onSeek == NULL) { + return MA_FALSE; + } + if (pWav->onWrite != NULL) { + return MA_FALSE; + } + if (pWav->totalPCMFrameCount == 0) { + return MA_TRUE; + } + if (targetFrameIndex > pWav->totalPCMFrameCount) { + targetFrameIndex = pWav->totalPCMFrameCount; + } + if (ma_dr_wav__is_compressed_format_tag(pWav->translatedFormatTag)) { + if (targetFrameIndex < pWav->readCursorInPCMFrames) { + if (!ma_dr_wav_seek_to_first_pcm_frame(pWav)) { + return MA_FALSE; + } + } + if (targetFrameIndex > pWav->readCursorInPCMFrames) { + ma_uint64 offsetInFrames = targetFrameIndex - pWav->readCursorInPCMFrames; + ma_int16 devnull[2048]; + while (offsetInFrames > 0) { + ma_uint64 framesRead = 0; + ma_uint64 framesToRead = offsetInFrames; + if (framesToRead > ma_dr_wav_countof(devnull)/pWav->channels) { + framesToRead = ma_dr_wav_countof(devnull)/pWav->channels; + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM) { + framesRead = ma_dr_wav_read_pcm_frames_s16__msadpcm(pWav, framesToRead, devnull); + } else if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + framesRead = ma_dr_wav_read_pcm_frames_s16__ima(pWav, framesToRead, devnull); + } else { + MA_DR_WAV_ASSERT(MA_FALSE); + } + if (framesRead != framesToRead) { + return MA_FALSE; + } + offsetInFrames -= framesRead; + } + } + } else { + ma_uint64 totalSizeInBytes; + ma_uint64 currentBytePos; + ma_uint64 targetBytePos; + ma_uint64 offset; + ma_uint32 bytesPerFrame; + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return MA_FALSE; + } + totalSizeInBytes = pWav->totalPCMFrameCount * bytesPerFrame; + currentBytePos = totalSizeInBytes - pWav->bytesRemaining; + targetBytePos = targetFrameIndex * bytesPerFrame; + if (currentBytePos < targetBytePos) { + offset = (targetBytePos - currentBytePos); + } else { + if (!ma_dr_wav_seek_to_first_pcm_frame(pWav)) { + return MA_FALSE; + } + offset = targetBytePos; + } + while (offset > 0) { + int offset32 = ((offset > INT_MAX) ? INT_MAX : (int)offset); + if (!pWav->onSeek(pWav->pUserData, offset32, ma_dr_wav_seek_origin_current)) { + return MA_FALSE; + } + pWav->readCursorInPCMFrames += offset32 / bytesPerFrame; + pWav->bytesRemaining -= offset32; + offset -= offset32; + } + } + return MA_TRUE; +} +MA_API ma_result ma_dr_wav_get_cursor_in_pcm_frames(ma_dr_wav* pWav, ma_uint64* pCursor) +{ + if (pCursor == NULL) { + return MA_INVALID_ARGS; + } + *pCursor = 0; + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + *pCursor = pWav->readCursorInPCMFrames; + return MA_SUCCESS; +} +MA_API ma_result ma_dr_wav_get_length_in_pcm_frames(ma_dr_wav* pWav, ma_uint64* pLength) +{ + if (pLength == NULL) { + return MA_INVALID_ARGS; + } + *pLength = 0; + if (pWav == NULL) { + return MA_INVALID_ARGS; + } + *pLength = pWav->totalPCMFrameCount; + return MA_SUCCESS; +} +MA_API size_t ma_dr_wav_write_raw(ma_dr_wav* pWav, size_t bytesToWrite, const void* pData) +{ + size_t bytesWritten; + if (pWav == NULL || bytesToWrite == 0 || pData == NULL) { + return 0; + } + bytesWritten = pWav->onWrite(pWav->pUserData, pData, bytesToWrite); + pWav->dataChunkDataSize += bytesWritten; + return bytesWritten; +} +MA_API ma_uint64 ma_dr_wav_write_pcm_frames_le(ma_dr_wav* pWav, ma_uint64 framesToWrite, const void* pData) +{ + ma_uint64 bytesToWrite; + ma_uint64 bytesWritten; + const ma_uint8* pRunningData; + if (pWav == NULL || framesToWrite == 0 || pData == NULL) { + return 0; + } + bytesToWrite = ((framesToWrite * pWav->channels * pWav->bitsPerSample) / 8); + if (bytesToWrite > MA_SIZE_MAX) { + return 0; + } + bytesWritten = 0; + pRunningData = (const ma_uint8*)pData; + while (bytesToWrite > 0) { + size_t bytesJustWritten; + ma_uint64 bytesToWriteThisIteration; + bytesToWriteThisIteration = bytesToWrite; + MA_DR_WAV_ASSERT(bytesToWriteThisIteration <= MA_SIZE_MAX); + bytesJustWritten = ma_dr_wav_write_raw(pWav, (size_t)bytesToWriteThisIteration, pRunningData); + if (bytesJustWritten == 0) { + break; + } + bytesToWrite -= bytesJustWritten; + bytesWritten += bytesJustWritten; + pRunningData += bytesJustWritten; + } + return (bytesWritten * 8) / pWav->bitsPerSample / pWav->channels; +} +MA_API ma_uint64 ma_dr_wav_write_pcm_frames_be(ma_dr_wav* pWav, ma_uint64 framesToWrite, const void* pData) +{ + ma_uint64 bytesToWrite; + ma_uint64 bytesWritten; + ma_uint32 bytesPerSample; + const ma_uint8* pRunningData; + if (pWav == NULL || framesToWrite == 0 || pData == NULL) { + return 0; + } + bytesToWrite = ((framesToWrite * pWav->channels * pWav->bitsPerSample) / 8); + if (bytesToWrite > MA_SIZE_MAX) { + return 0; + } + bytesWritten = 0; + pRunningData = (const ma_uint8*)pData; + bytesPerSample = ma_dr_wav_get_bytes_per_pcm_frame(pWav) / pWav->channels; + if (bytesPerSample == 0) { + return 0; + } + while (bytesToWrite > 0) { + ma_uint8 temp[4096]; + ma_uint32 sampleCount; + size_t bytesJustWritten; + ma_uint64 bytesToWriteThisIteration; + bytesToWriteThisIteration = bytesToWrite; + MA_DR_WAV_ASSERT(bytesToWriteThisIteration <= MA_SIZE_MAX); + sampleCount = sizeof(temp)/bytesPerSample; + if (bytesToWriteThisIteration > ((ma_uint64)sampleCount)*bytesPerSample) { + bytesToWriteThisIteration = ((ma_uint64)sampleCount)*bytesPerSample; + } + MA_DR_WAV_COPY_MEMORY(temp, pRunningData, (size_t)bytesToWriteThisIteration); + ma_dr_wav__bswap_samples(temp, sampleCount, bytesPerSample); + bytesJustWritten = ma_dr_wav_write_raw(pWav, (size_t)bytesToWriteThisIteration, temp); + if (bytesJustWritten == 0) { + break; + } + bytesToWrite -= bytesJustWritten; + bytesWritten += bytesJustWritten; + pRunningData += bytesJustWritten; + } + return (bytesWritten * 8) / pWav->bitsPerSample / pWav->channels; +} +MA_API ma_uint64 ma_dr_wav_write_pcm_frames(ma_dr_wav* pWav, ma_uint64 framesToWrite, const void* pData) +{ + if (ma_dr_wav__is_little_endian()) { + return ma_dr_wav_write_pcm_frames_le(pWav, framesToWrite, pData); + } else { + return ma_dr_wav_write_pcm_frames_be(pWav, framesToWrite, pData); + } +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__msadpcm(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 totalFramesRead = 0; + static ma_int32 adaptationTable[] = { + 230, 230, 230, 230, 307, 409, 512, 614, + 768, 614, 512, 409, 307, 230, 230, 230 + }; + static ma_int32 coeff1Table[] = { 256, 512, 0, 192, 240, 460, 392 }; + static ma_int32 coeff2Table[] = { 0, -256, 0, 64, 0, -208, -232 }; + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(framesToRead > 0); + while (pWav->readCursorInPCMFrames < pWav->totalPCMFrameCount) { + MA_DR_WAV_ASSERT(framesToRead > 0); + if (pWav->msadpcm.cachedFrameCount == 0 && pWav->msadpcm.bytesRemainingInBlock == 0) { + if (pWav->channels == 1) { + ma_uint8 header[7]; + if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { + return totalFramesRead; + } + pWav->msadpcm.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); + pWav->msadpcm.predictor[0] = header[0]; + pWav->msadpcm.delta[0] = ma_dr_wav_bytes_to_s16(header + 1); + pWav->msadpcm.prevFrames[0][1] = (ma_int32)ma_dr_wav_bytes_to_s16(header + 3); + pWav->msadpcm.prevFrames[0][0] = (ma_int32)ma_dr_wav_bytes_to_s16(header + 5); + pWav->msadpcm.cachedFrames[2] = pWav->msadpcm.prevFrames[0][0]; + pWav->msadpcm.cachedFrames[3] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.cachedFrameCount = 2; + if (pWav->msadpcm.predictor[0] >= ma_dr_wav_countof(coeff1Table)) { + return totalFramesRead; + } + } else { + ma_uint8 header[14]; + if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { + return totalFramesRead; + } + pWav->msadpcm.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); + pWav->msadpcm.predictor[0] = header[0]; + pWav->msadpcm.predictor[1] = header[1]; + pWav->msadpcm.delta[0] = ma_dr_wav_bytes_to_s16(header + 2); + pWav->msadpcm.delta[1] = ma_dr_wav_bytes_to_s16(header + 4); + pWav->msadpcm.prevFrames[0][1] = (ma_int32)ma_dr_wav_bytes_to_s16(header + 6); + pWav->msadpcm.prevFrames[1][1] = (ma_int32)ma_dr_wav_bytes_to_s16(header + 8); + pWav->msadpcm.prevFrames[0][0] = (ma_int32)ma_dr_wav_bytes_to_s16(header + 10); + pWav->msadpcm.prevFrames[1][0] = (ma_int32)ma_dr_wav_bytes_to_s16(header + 12); + pWav->msadpcm.cachedFrames[0] = pWav->msadpcm.prevFrames[0][0]; + pWav->msadpcm.cachedFrames[1] = pWav->msadpcm.prevFrames[1][0]; + pWav->msadpcm.cachedFrames[2] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.cachedFrames[3] = pWav->msadpcm.prevFrames[1][1]; + pWav->msadpcm.cachedFrameCount = 2; + if (pWav->msadpcm.predictor[0] >= ma_dr_wav_countof(coeff1Table) || pWav->msadpcm.predictor[1] >= ma_dr_wav_countof(coeff2Table)) { + return totalFramesRead; + } + } + } + while (framesToRead > 0 && pWav->msadpcm.cachedFrameCount > 0 && pWav->readCursorInPCMFrames < pWav->totalPCMFrameCount) { + if (pBufferOut != NULL) { + ma_uint32 iSample = 0; + for (iSample = 0; iSample < pWav->channels; iSample += 1) { + pBufferOut[iSample] = (ma_int16)pWav->msadpcm.cachedFrames[(ma_dr_wav_countof(pWav->msadpcm.cachedFrames) - (pWav->msadpcm.cachedFrameCount*pWav->channels)) + iSample]; + } + pBufferOut += pWav->channels; + } + framesToRead -= 1; + totalFramesRead += 1; + pWav->readCursorInPCMFrames += 1; + pWav->msadpcm.cachedFrameCount -= 1; + } + if (framesToRead == 0) { + break; + } + if (pWav->msadpcm.cachedFrameCount == 0) { + if (pWav->msadpcm.bytesRemainingInBlock == 0) { + continue; + } else { + ma_uint8 nibbles; + ma_int32 nibble0; + ma_int32 nibble1; + if (pWav->onRead(pWav->pUserData, &nibbles, 1) != 1) { + return totalFramesRead; + } + pWav->msadpcm.bytesRemainingInBlock -= 1; + nibble0 = ((nibbles & 0xF0) >> 4); if ((nibbles & 0x80)) { nibble0 |= 0xFFFFFFF0UL; } + nibble1 = ((nibbles & 0x0F) >> 0); if ((nibbles & 0x08)) { nibble1 |= 0xFFFFFFF0UL; } + if (pWav->channels == 1) { + ma_int32 newSample0; + ma_int32 newSample1; + newSample0 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8; + newSample0 += nibble0 * pWav->msadpcm.delta[0]; + newSample0 = ma_dr_wav_clamp(newSample0, -32768, 32767); + pWav->msadpcm.delta[0] = (adaptationTable[((nibbles & 0xF0) >> 4)] * pWav->msadpcm.delta[0]) >> 8; + if (pWav->msadpcm.delta[0] < 16) { + pWav->msadpcm.delta[0] = 16; + } + pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.prevFrames[0][1] = newSample0; + newSample1 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8; + newSample1 += nibble1 * pWav->msadpcm.delta[0]; + newSample1 = ma_dr_wav_clamp(newSample1, -32768, 32767); + pWav->msadpcm.delta[0] = (adaptationTable[((nibbles & 0x0F) >> 0)] * pWav->msadpcm.delta[0]) >> 8; + if (pWav->msadpcm.delta[0] < 16) { + pWav->msadpcm.delta[0] = 16; + } + pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.prevFrames[0][1] = newSample1; + pWav->msadpcm.cachedFrames[2] = newSample0; + pWav->msadpcm.cachedFrames[3] = newSample1; + pWav->msadpcm.cachedFrameCount = 2; + } else { + ma_int32 newSample0; + ma_int32 newSample1; + newSample0 = ((pWav->msadpcm.prevFrames[0][1] * coeff1Table[pWav->msadpcm.predictor[0]]) + (pWav->msadpcm.prevFrames[0][0] * coeff2Table[pWav->msadpcm.predictor[0]])) >> 8; + newSample0 += nibble0 * pWav->msadpcm.delta[0]; + newSample0 = ma_dr_wav_clamp(newSample0, -32768, 32767); + pWav->msadpcm.delta[0] = (adaptationTable[((nibbles & 0xF0) >> 4)] * pWav->msadpcm.delta[0]) >> 8; + if (pWav->msadpcm.delta[0] < 16) { + pWav->msadpcm.delta[0] = 16; + } + pWav->msadpcm.prevFrames[0][0] = pWav->msadpcm.prevFrames[0][1]; + pWav->msadpcm.prevFrames[0][1] = newSample0; + newSample1 = ((pWav->msadpcm.prevFrames[1][1] * coeff1Table[pWav->msadpcm.predictor[1]]) + (pWav->msadpcm.prevFrames[1][0] * coeff2Table[pWav->msadpcm.predictor[1]])) >> 8; + newSample1 += nibble1 * pWav->msadpcm.delta[1]; + newSample1 = ma_dr_wav_clamp(newSample1, -32768, 32767); + pWav->msadpcm.delta[1] = (adaptationTable[((nibbles & 0x0F) >> 0)] * pWav->msadpcm.delta[1]) >> 8; + if (pWav->msadpcm.delta[1] < 16) { + pWav->msadpcm.delta[1] = 16; + } + pWav->msadpcm.prevFrames[1][0] = pWav->msadpcm.prevFrames[1][1]; + pWav->msadpcm.prevFrames[1][1] = newSample1; + pWav->msadpcm.cachedFrames[2] = newSample0; + pWav->msadpcm.cachedFrames[3] = newSample1; + pWav->msadpcm.cachedFrameCount = 1; + } + } + } + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__ima(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 totalFramesRead = 0; + ma_uint32 iChannel; + static ma_int32 indexTable[16] = { + -1, -1, -1, -1, 2, 4, 6, 8, + -1, -1, -1, -1, 2, 4, 6, 8 + }; + static ma_int32 stepTable[89] = { + 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, + 19, 21, 23, 25, 28, 31, 34, 37, 41, 45, + 50, 55, 60, 66, 73, 80, 88, 97, 107, 118, + 130, 143, 157, 173, 190, 209, 230, 253, 279, 307, + 337, 371, 408, 449, 494, 544, 598, 658, 724, 796, + 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066, + 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358, + 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899, + 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767 + }; + MA_DR_WAV_ASSERT(pWav != NULL); + MA_DR_WAV_ASSERT(framesToRead > 0); + while (pWav->readCursorInPCMFrames < pWav->totalPCMFrameCount) { + MA_DR_WAV_ASSERT(framesToRead > 0); + if (pWav->ima.cachedFrameCount == 0 && pWav->ima.bytesRemainingInBlock == 0) { + if (pWav->channels == 1) { + ma_uint8 header[4]; + if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { + return totalFramesRead; + } + pWav->ima.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); + if (header[2] >= ma_dr_wav_countof(stepTable)) { + pWav->onSeek(pWav->pUserData, pWav->ima.bytesRemainingInBlock, ma_dr_wav_seek_origin_current); + pWav->ima.bytesRemainingInBlock = 0; + return totalFramesRead; + } + pWav->ima.predictor[0] = (ma_int16)ma_dr_wav_bytes_to_u16(header + 0); + pWav->ima.stepIndex[0] = ma_dr_wav_clamp(header[2], 0, (ma_int32)ma_dr_wav_countof(stepTable)-1); + pWav->ima.cachedFrames[ma_dr_wav_countof(pWav->ima.cachedFrames) - 1] = pWav->ima.predictor[0]; + pWav->ima.cachedFrameCount = 1; + } else { + ma_uint8 header[8]; + if (pWav->onRead(pWav->pUserData, header, sizeof(header)) != sizeof(header)) { + return totalFramesRead; + } + pWav->ima.bytesRemainingInBlock = pWav->fmt.blockAlign - sizeof(header); + if (header[2] >= ma_dr_wav_countof(stepTable) || header[6] >= ma_dr_wav_countof(stepTable)) { + pWav->onSeek(pWav->pUserData, pWav->ima.bytesRemainingInBlock, ma_dr_wav_seek_origin_current); + pWav->ima.bytesRemainingInBlock = 0; + return totalFramesRead; + } + pWav->ima.predictor[0] = ma_dr_wav_bytes_to_s16(header + 0); + pWav->ima.stepIndex[0] = ma_dr_wav_clamp(header[2], 0, (ma_int32)ma_dr_wav_countof(stepTable)-1); + pWav->ima.predictor[1] = ma_dr_wav_bytes_to_s16(header + 4); + pWav->ima.stepIndex[1] = ma_dr_wav_clamp(header[6], 0, (ma_int32)ma_dr_wav_countof(stepTable)-1); + pWav->ima.cachedFrames[ma_dr_wav_countof(pWav->ima.cachedFrames) - 2] = pWav->ima.predictor[0]; + pWav->ima.cachedFrames[ma_dr_wav_countof(pWav->ima.cachedFrames) - 1] = pWav->ima.predictor[1]; + pWav->ima.cachedFrameCount = 1; + } + } + while (framesToRead > 0 && pWav->ima.cachedFrameCount > 0 && pWav->readCursorInPCMFrames < pWav->totalPCMFrameCount) { + if (pBufferOut != NULL) { + ma_uint32 iSample; + for (iSample = 0; iSample < pWav->channels; iSample += 1) { + pBufferOut[iSample] = (ma_int16)pWav->ima.cachedFrames[(ma_dr_wav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + iSample]; + } + pBufferOut += pWav->channels; + } + framesToRead -= 1; + totalFramesRead += 1; + pWav->readCursorInPCMFrames += 1; + pWav->ima.cachedFrameCount -= 1; + } + if (framesToRead == 0) { + break; + } + if (pWav->ima.cachedFrameCount == 0) { + if (pWav->ima.bytesRemainingInBlock == 0) { + continue; + } else { + pWav->ima.cachedFrameCount = 8; + for (iChannel = 0; iChannel < pWav->channels; ++iChannel) { + ma_uint32 iByte; + ma_uint8 nibbles[4]; + if (pWav->onRead(pWav->pUserData, &nibbles, 4) != 4) { + pWav->ima.cachedFrameCount = 0; + return totalFramesRead; + } + pWav->ima.bytesRemainingInBlock -= 4; + for (iByte = 0; iByte < 4; ++iByte) { + ma_uint8 nibble0 = ((nibbles[iByte] & 0x0F) >> 0); + ma_uint8 nibble1 = ((nibbles[iByte] & 0xF0) >> 4); + ma_int32 step = stepTable[pWav->ima.stepIndex[iChannel]]; + ma_int32 predictor = pWav->ima.predictor[iChannel]; + ma_int32 diff = step >> 3; + if (nibble0 & 1) diff += step >> 2; + if (nibble0 & 2) diff += step >> 1; + if (nibble0 & 4) diff += step; + if (nibble0 & 8) diff = -diff; + predictor = ma_dr_wav_clamp(predictor + diff, -32768, 32767); + pWav->ima.predictor[iChannel] = predictor; + pWav->ima.stepIndex[iChannel] = ma_dr_wav_clamp(pWav->ima.stepIndex[iChannel] + indexTable[nibble0], 0, (ma_int32)ma_dr_wav_countof(stepTable)-1); + pWav->ima.cachedFrames[(ma_dr_wav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + (iByte*2+0)*pWav->channels + iChannel] = predictor; + step = stepTable[pWav->ima.stepIndex[iChannel]]; + predictor = pWav->ima.predictor[iChannel]; + diff = step >> 3; + if (nibble1 & 1) diff += step >> 2; + if (nibble1 & 2) diff += step >> 1; + if (nibble1 & 4) diff += step; + if (nibble1 & 8) diff = -diff; + predictor = ma_dr_wav_clamp(predictor + diff, -32768, 32767); + pWav->ima.predictor[iChannel] = predictor; + pWav->ima.stepIndex[iChannel] = ma_dr_wav_clamp(pWav->ima.stepIndex[iChannel] + indexTable[nibble1], 0, (ma_int32)ma_dr_wav_countof(stepTable)-1); + pWav->ima.cachedFrames[(ma_dr_wav_countof(pWav->ima.cachedFrames) - (pWav->ima.cachedFrameCount*pWav->channels)) + (iByte*2+1)*pWav->channels + iChannel] = predictor; + } + } + } + } + } + return totalFramesRead; +} +#ifndef MA_DR_WAV_NO_CONVERSION_API +static unsigned short g_ma_dr_wavAlawTable[256] = { + 0xEA80, 0xEB80, 0xE880, 0xE980, 0xEE80, 0xEF80, 0xEC80, 0xED80, 0xE280, 0xE380, 0xE080, 0xE180, 0xE680, 0xE780, 0xE480, 0xE580, + 0xF540, 0xF5C0, 0xF440, 0xF4C0, 0xF740, 0xF7C0, 0xF640, 0xF6C0, 0xF140, 0xF1C0, 0xF040, 0xF0C0, 0xF340, 0xF3C0, 0xF240, 0xF2C0, + 0xAA00, 0xAE00, 0xA200, 0xA600, 0xBA00, 0xBE00, 0xB200, 0xB600, 0x8A00, 0x8E00, 0x8200, 0x8600, 0x9A00, 0x9E00, 0x9200, 0x9600, + 0xD500, 0xD700, 0xD100, 0xD300, 0xDD00, 0xDF00, 0xD900, 0xDB00, 0xC500, 0xC700, 0xC100, 0xC300, 0xCD00, 0xCF00, 0xC900, 0xCB00, + 0xFEA8, 0xFEB8, 0xFE88, 0xFE98, 0xFEE8, 0xFEF8, 0xFEC8, 0xFED8, 0xFE28, 0xFE38, 0xFE08, 0xFE18, 0xFE68, 0xFE78, 0xFE48, 0xFE58, + 0xFFA8, 0xFFB8, 0xFF88, 0xFF98, 0xFFE8, 0xFFF8, 0xFFC8, 0xFFD8, 0xFF28, 0xFF38, 0xFF08, 0xFF18, 0xFF68, 0xFF78, 0xFF48, 0xFF58, + 0xFAA0, 0xFAE0, 0xFA20, 0xFA60, 0xFBA0, 0xFBE0, 0xFB20, 0xFB60, 0xF8A0, 0xF8E0, 0xF820, 0xF860, 0xF9A0, 0xF9E0, 0xF920, 0xF960, + 0xFD50, 0xFD70, 0xFD10, 0xFD30, 0xFDD0, 0xFDF0, 0xFD90, 0xFDB0, 0xFC50, 0xFC70, 0xFC10, 0xFC30, 0xFCD0, 0xFCF0, 0xFC90, 0xFCB0, + 0x1580, 0x1480, 0x1780, 0x1680, 0x1180, 0x1080, 0x1380, 0x1280, 0x1D80, 0x1C80, 0x1F80, 0x1E80, 0x1980, 0x1880, 0x1B80, 0x1A80, + 0x0AC0, 0x0A40, 0x0BC0, 0x0B40, 0x08C0, 0x0840, 0x09C0, 0x0940, 0x0EC0, 0x0E40, 0x0FC0, 0x0F40, 0x0CC0, 0x0C40, 0x0DC0, 0x0D40, + 0x5600, 0x5200, 0x5E00, 0x5A00, 0x4600, 0x4200, 0x4E00, 0x4A00, 0x7600, 0x7200, 0x7E00, 0x7A00, 0x6600, 0x6200, 0x6E00, 0x6A00, + 0x2B00, 0x2900, 0x2F00, 0x2D00, 0x2300, 0x2100, 0x2700, 0x2500, 0x3B00, 0x3900, 0x3F00, 0x3D00, 0x3300, 0x3100, 0x3700, 0x3500, + 0x0158, 0x0148, 0x0178, 0x0168, 0x0118, 0x0108, 0x0138, 0x0128, 0x01D8, 0x01C8, 0x01F8, 0x01E8, 0x0198, 0x0188, 0x01B8, 0x01A8, + 0x0058, 0x0048, 0x0078, 0x0068, 0x0018, 0x0008, 0x0038, 0x0028, 0x00D8, 0x00C8, 0x00F8, 0x00E8, 0x0098, 0x0088, 0x00B8, 0x00A8, + 0x0560, 0x0520, 0x05E0, 0x05A0, 0x0460, 0x0420, 0x04E0, 0x04A0, 0x0760, 0x0720, 0x07E0, 0x07A0, 0x0660, 0x0620, 0x06E0, 0x06A0, + 0x02B0, 0x0290, 0x02F0, 0x02D0, 0x0230, 0x0210, 0x0270, 0x0250, 0x03B0, 0x0390, 0x03F0, 0x03D0, 0x0330, 0x0310, 0x0370, 0x0350 +}; +static unsigned short g_ma_dr_wavMulawTable[256] = { + 0x8284, 0x8684, 0x8A84, 0x8E84, 0x9284, 0x9684, 0x9A84, 0x9E84, 0xA284, 0xA684, 0xAA84, 0xAE84, 0xB284, 0xB684, 0xBA84, 0xBE84, + 0xC184, 0xC384, 0xC584, 0xC784, 0xC984, 0xCB84, 0xCD84, 0xCF84, 0xD184, 0xD384, 0xD584, 0xD784, 0xD984, 0xDB84, 0xDD84, 0xDF84, + 0xE104, 0xE204, 0xE304, 0xE404, 0xE504, 0xE604, 0xE704, 0xE804, 0xE904, 0xEA04, 0xEB04, 0xEC04, 0xED04, 0xEE04, 0xEF04, 0xF004, + 0xF0C4, 0xF144, 0xF1C4, 0xF244, 0xF2C4, 0xF344, 0xF3C4, 0xF444, 0xF4C4, 0xF544, 0xF5C4, 0xF644, 0xF6C4, 0xF744, 0xF7C4, 0xF844, + 0xF8A4, 0xF8E4, 0xF924, 0xF964, 0xF9A4, 0xF9E4, 0xFA24, 0xFA64, 0xFAA4, 0xFAE4, 0xFB24, 0xFB64, 0xFBA4, 0xFBE4, 0xFC24, 0xFC64, + 0xFC94, 0xFCB4, 0xFCD4, 0xFCF4, 0xFD14, 0xFD34, 0xFD54, 0xFD74, 0xFD94, 0xFDB4, 0xFDD4, 0xFDF4, 0xFE14, 0xFE34, 0xFE54, 0xFE74, + 0xFE8C, 0xFE9C, 0xFEAC, 0xFEBC, 0xFECC, 0xFEDC, 0xFEEC, 0xFEFC, 0xFF0C, 0xFF1C, 0xFF2C, 0xFF3C, 0xFF4C, 0xFF5C, 0xFF6C, 0xFF7C, + 0xFF88, 0xFF90, 0xFF98, 0xFFA0, 0xFFA8, 0xFFB0, 0xFFB8, 0xFFC0, 0xFFC8, 0xFFD0, 0xFFD8, 0xFFE0, 0xFFE8, 0xFFF0, 0xFFF8, 0x0000, + 0x7D7C, 0x797C, 0x757C, 0x717C, 0x6D7C, 0x697C, 0x657C, 0x617C, 0x5D7C, 0x597C, 0x557C, 0x517C, 0x4D7C, 0x497C, 0x457C, 0x417C, + 0x3E7C, 0x3C7C, 0x3A7C, 0x387C, 0x367C, 0x347C, 0x327C, 0x307C, 0x2E7C, 0x2C7C, 0x2A7C, 0x287C, 0x267C, 0x247C, 0x227C, 0x207C, + 0x1EFC, 0x1DFC, 0x1CFC, 0x1BFC, 0x1AFC, 0x19FC, 0x18FC, 0x17FC, 0x16FC, 0x15FC, 0x14FC, 0x13FC, 0x12FC, 0x11FC, 0x10FC, 0x0FFC, + 0x0F3C, 0x0EBC, 0x0E3C, 0x0DBC, 0x0D3C, 0x0CBC, 0x0C3C, 0x0BBC, 0x0B3C, 0x0ABC, 0x0A3C, 0x09BC, 0x093C, 0x08BC, 0x083C, 0x07BC, + 0x075C, 0x071C, 0x06DC, 0x069C, 0x065C, 0x061C, 0x05DC, 0x059C, 0x055C, 0x051C, 0x04DC, 0x049C, 0x045C, 0x041C, 0x03DC, 0x039C, + 0x036C, 0x034C, 0x032C, 0x030C, 0x02EC, 0x02CC, 0x02AC, 0x028C, 0x026C, 0x024C, 0x022C, 0x020C, 0x01EC, 0x01CC, 0x01AC, 0x018C, + 0x0174, 0x0164, 0x0154, 0x0144, 0x0134, 0x0124, 0x0114, 0x0104, 0x00F4, 0x00E4, 0x00D4, 0x00C4, 0x00B4, 0x00A4, 0x0094, 0x0084, + 0x0078, 0x0070, 0x0068, 0x0060, 0x0058, 0x0050, 0x0048, 0x0040, 0x0038, 0x0030, 0x0028, 0x0020, 0x0018, 0x0010, 0x0008, 0x0000 +}; +static MA_INLINE ma_int16 ma_dr_wav__alaw_to_s16(ma_uint8 sampleIn) +{ + return (short)g_ma_dr_wavAlawTable[sampleIn]; +} +static MA_INLINE ma_int16 ma_dr_wav__mulaw_to_s16(ma_uint8 sampleIn) +{ + return (short)g_ma_dr_wavMulawTable[sampleIn]; +} +MA_PRIVATE void ma_dr_wav__pcm_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) +{ + size_t i; + if (bytesPerSample == 1) { + ma_dr_wav_u8_to_s16(pOut, pIn, totalSampleCount); + return; + } + if (bytesPerSample == 2) { + for (i = 0; i < totalSampleCount; ++i) { + *pOut++ = ((const ma_int16*)pIn)[i]; + } + return; + } + if (bytesPerSample == 3) { + ma_dr_wav_s24_to_s16(pOut, pIn, totalSampleCount); + return; + } + if (bytesPerSample == 4) { + ma_dr_wav_s32_to_s16(pOut, (const ma_int32*)pIn, totalSampleCount); + return; + } + if (bytesPerSample > 8) { + MA_DR_WAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); + return; + } + for (i = 0; i < totalSampleCount; ++i) { + ma_uint64 sample = 0; + unsigned int shift = (8 - bytesPerSample) * 8; + unsigned int j; + for (j = 0; j < bytesPerSample; j += 1) { + MA_DR_WAV_ASSERT(j < 8); + sample |= (ma_uint64)(pIn[j]) << shift; + shift += 8; + } + pIn += j; + *pOut++ = (ma_int16)((ma_int64)sample >> 48); + } +} +MA_PRIVATE void ma_dr_wav__ieee_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) +{ + if (bytesPerSample == 4) { + ma_dr_wav_f32_to_s16(pOut, (const float*)pIn, totalSampleCount); + return; + } else if (bytesPerSample == 8) { + ma_dr_wav_f64_to_s16(pOut, (const double*)pIn, totalSampleCount); + return; + } else { + MA_DR_WAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); + return; + } +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__pcm(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + if ((pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_PCM && pWav->bitsPerSample == 16) || pBufferOut == NULL) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, pBufferOut); + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav__pcm_to_s16(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__ieee(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + if (pBufferOut == NULL) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, NULL); + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav__ieee_to_s16(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__alaw(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + if (pBufferOut == NULL) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, NULL); + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav_alaw_to_s16(pBufferOut, sampleData, (size_t)samplesRead); + #ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == ma_dr_wav_container_aiff) { + ma_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } + #endif + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s16__mulaw(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + if (pBufferOut == NULL) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, NULL); + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav_mulaw_to_s16(pBufferOut, sampleData, (size_t)samplesRead); + #ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == ma_dr_wav_container_aiff) { + ma_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } + #endif + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s16(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + if (pWav == NULL || framesToRead == 0) { + return 0; + } + if (pBufferOut == NULL) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, NULL); + } + if (framesToRead * pWav->channels * sizeof(ma_int16) > MA_SIZE_MAX) { + framesToRead = MA_SIZE_MAX / sizeof(ma_int16) / pWav->channels; + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_PCM) { + return ma_dr_wav_read_pcm_frames_s16__pcm(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_IEEE_FLOAT) { + return ma_dr_wav_read_pcm_frames_s16__ieee(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ALAW) { + return ma_dr_wav_read_pcm_frames_s16__alaw(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_MULAW) { + return ma_dr_wav_read_pcm_frames_s16__mulaw(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM) { + return ma_dr_wav_read_pcm_frames_s16__msadpcm(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + return ma_dr_wav_read_pcm_frames_s16__ima(pWav, framesToRead, pBufferOut); + } + return 0; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s16le(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_s16(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && ma_dr_wav__is_little_endian() == MA_FALSE) { + ma_dr_wav__bswap_samples_s16(pBufferOut, framesRead*pWav->channels); + } + return framesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s16be(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_s16(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && ma_dr_wav__is_little_endian() == MA_TRUE) { + ma_dr_wav__bswap_samples_s16(pBufferOut, framesRead*pWav->channels); + } + return framesRead; +} +MA_API void ma_dr_wav_u8_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + int x = pIn[i]; + r = x << 8; + r = r - 32768; + pOut[i] = (short)r; + } +} +MA_API void ma_dr_wav_s24_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + int x = ((int)(((unsigned int)(((const ma_uint8*)pIn)[i*3+0]) << 8) | ((unsigned int)(((const ma_uint8*)pIn)[i*3+1]) << 16) | ((unsigned int)(((const ma_uint8*)pIn)[i*3+2])) << 24)) >> 8; + r = x >> 8; + pOut[i] = (short)r; + } +} +MA_API void ma_dr_wav_s32_to_s16(ma_int16* pOut, const ma_int32* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + int x = pIn[i]; + r = x >> 16; + pOut[i] = (short)r; + } +} +MA_API void ma_dr_wav_f32_to_s16(ma_int16* pOut, const float* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + float x = pIn[i]; + float c; + c = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); + c = c + 1; + r = (int)(c * 32767.5f); + r = r - 32768; + pOut[i] = (short)r; + } +} +MA_API void ma_dr_wav_f64_to_s16(ma_int16* pOut, const double* pIn, size_t sampleCount) +{ + int r; + size_t i; + for (i = 0; i < sampleCount; ++i) { + double x = pIn[i]; + double c; + c = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); + c = c + 1; + r = (int)(c * 32767.5); + r = r - 32768; + pOut[i] = (short)r; + } +} +MA_API void ma_dr_wav_alaw_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + for (i = 0; i < sampleCount; ++i) { + pOut[i] = ma_dr_wav__alaw_to_s16(pIn[i]); + } +} +MA_API void ma_dr_wav_mulaw_to_s16(ma_int16* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + for (i = 0; i < sampleCount; ++i) { + pOut[i] = ma_dr_wav__mulaw_to_s16(pIn[i]); + } +} +MA_PRIVATE void ma_dr_wav__pcm_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount, unsigned int bytesPerSample) +{ + unsigned int i; + if (bytesPerSample == 1) { + ma_dr_wav_u8_to_f32(pOut, pIn, sampleCount); + return; + } + if (bytesPerSample == 2) { + ma_dr_wav_s16_to_f32(pOut, (const ma_int16*)pIn, sampleCount); + return; + } + if (bytesPerSample == 3) { + ma_dr_wav_s24_to_f32(pOut, pIn, sampleCount); + return; + } + if (bytesPerSample == 4) { + ma_dr_wav_s32_to_f32(pOut, (const ma_int32*)pIn, sampleCount); + return; + } + if (bytesPerSample > 8) { + MA_DR_WAV_ZERO_MEMORY(pOut, sampleCount * sizeof(*pOut)); + return; + } + for (i = 0; i < sampleCount; ++i) { + ma_uint64 sample = 0; + unsigned int shift = (8 - bytesPerSample) * 8; + unsigned int j; + for (j = 0; j < bytesPerSample; j += 1) { + MA_DR_WAV_ASSERT(j < 8); + sample |= (ma_uint64)(pIn[j]) << shift; + shift += 8; + } + pIn += j; + *pOut++ = (float)((ma_int64)sample / 9223372036854775807.0); + } +} +MA_PRIVATE void ma_dr_wav__ieee_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount, unsigned int bytesPerSample) +{ + if (bytesPerSample == 4) { + unsigned int i; + for (i = 0; i < sampleCount; ++i) { + *pOut++ = ((const float*)pIn)[i]; + } + return; + } else if (bytesPerSample == 8) { + ma_dr_wav_f64_to_f32(pOut, (const double*)pIn, sampleCount); + return; + } else { + MA_DR_WAV_ZERO_MEMORY(pOut, sampleCount * sizeof(*pOut)); + return; + } +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_f32__pcm(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav__pcm_to_f32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_f32__msadpcm_ima(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_int16 samples16[2048]; + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, ma_dr_wav_countof(samples16)/pWav->channels); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_s16(pWav, framesToReadThisIteration, samples16); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + ma_dr_wav_s16_to_f32(pBufferOut, samples16, (size_t)(framesRead*pWav->channels)); + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_f32__ieee(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_IEEE_FLOAT && pWav->bitsPerSample == 32) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, pBufferOut); + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav__ieee_to_f32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_f32__alaw(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav_alaw_to_f32(pBufferOut, sampleData, (size_t)samplesRead); + #ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == ma_dr_wav_container_aiff) { + ma_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } + #endif + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_f32__mulaw(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav_mulaw_to_f32(pBufferOut, sampleData, (size_t)samplesRead); + #ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == ma_dr_wav_container_aiff) { + ma_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } + #endif + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_f32(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + if (pWav == NULL || framesToRead == 0) { + return 0; + } + if (pBufferOut == NULL) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, NULL); + } + if (framesToRead * pWav->channels * sizeof(float) > MA_SIZE_MAX) { + framesToRead = MA_SIZE_MAX / sizeof(float) / pWav->channels; + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_PCM) { + return ma_dr_wav_read_pcm_frames_f32__pcm(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM || pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + return ma_dr_wav_read_pcm_frames_f32__msadpcm_ima(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_IEEE_FLOAT) { + return ma_dr_wav_read_pcm_frames_f32__ieee(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ALAW) { + return ma_dr_wav_read_pcm_frames_f32__alaw(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_MULAW) { + return ma_dr_wav_read_pcm_frames_f32__mulaw(pWav, framesToRead, pBufferOut); + } + return 0; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_f32le(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_f32(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && ma_dr_wav__is_little_endian() == MA_FALSE) { + ma_dr_wav__bswap_samples_f32(pBufferOut, framesRead*pWav->channels); + } + return framesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_f32be(ma_dr_wav* pWav, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_f32(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && ma_dr_wav__is_little_endian() == MA_TRUE) { + ma_dr_wav__bswap_samples_f32(pBufferOut, framesRead*pWav->channels); + } + return framesRead; +} +MA_API void ma_dr_wav_u8_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } +#ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (pIn[i] / 256.0f) * 2 - 1; + } +#else + for (i = 0; i < sampleCount; ++i) { + float x = pIn[i]; + x = x * 0.00784313725490196078f; + x = x - 1; + *pOut++ = x; + } +#endif +} +MA_API void ma_dr_wav_s16_to_f32(float* pOut, const ma_int16* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = pIn[i] * 0.000030517578125f; + } +} +MA_API void ma_dr_wav_s24_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + double x; + ma_uint32 a = ((ma_uint32)(pIn[i*3+0]) << 8); + ma_uint32 b = ((ma_uint32)(pIn[i*3+1]) << 16); + ma_uint32 c = ((ma_uint32)(pIn[i*3+2]) << 24); + x = (double)((ma_int32)(a | b | c) >> 8); + *pOut++ = (float)(x * 0.00000011920928955078125); + } +} +MA_API void ma_dr_wav_s32_to_f32(float* pOut, const ma_int32* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (float)(pIn[i] / 2147483648.0); + } +} +MA_API void ma_dr_wav_f64_to_f32(float* pOut, const double* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (float)pIn[i]; + } +} +MA_API void ma_dr_wav_alaw_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = ma_dr_wav__alaw_to_s16(pIn[i]) / 32768.0f; + } +} +MA_API void ma_dr_wav_mulaw_to_f32(float* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = ma_dr_wav__mulaw_to_s16(pIn[i]) / 32768.0f; + } +} +MA_PRIVATE void ma_dr_wav__pcm_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) +{ + unsigned int i; + if (bytesPerSample == 1) { + ma_dr_wav_u8_to_s32(pOut, pIn, totalSampleCount); + return; + } + if (bytesPerSample == 2) { + ma_dr_wav_s16_to_s32(pOut, (const ma_int16*)pIn, totalSampleCount); + return; + } + if (bytesPerSample == 3) { + ma_dr_wav_s24_to_s32(pOut, pIn, totalSampleCount); + return; + } + if (bytesPerSample == 4) { + for (i = 0; i < totalSampleCount; ++i) { + *pOut++ = ((const ma_int32*)pIn)[i]; + } + return; + } + if (bytesPerSample > 8) { + MA_DR_WAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); + return; + } + for (i = 0; i < totalSampleCount; ++i) { + ma_uint64 sample = 0; + unsigned int shift = (8 - bytesPerSample) * 8; + unsigned int j; + for (j = 0; j < bytesPerSample; j += 1) { + MA_DR_WAV_ASSERT(j < 8); + sample |= (ma_uint64)(pIn[j]) << shift; + shift += 8; + } + pIn += j; + *pOut++ = (ma_int32)((ma_int64)sample >> 32); + } +} +MA_PRIVATE void ma_dr_wav__ieee_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t totalSampleCount, unsigned int bytesPerSample) +{ + if (bytesPerSample == 4) { + ma_dr_wav_f32_to_s32(pOut, (const float*)pIn, totalSampleCount); + return; + } else if (bytesPerSample == 8) { + ma_dr_wav_f64_to_s32(pOut, (const double*)pIn, totalSampleCount); + return; + } else { + MA_DR_WAV_ZERO_MEMORY(pOut, totalSampleCount * sizeof(*pOut)); + return; + } +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s32__pcm(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_PCM && pWav->bitsPerSample == 32) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, pBufferOut); + } + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav__pcm_to_s32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s32__msadpcm_ima(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 totalFramesRead = 0; + ma_int16 samples16[2048]; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, ma_dr_wav_countof(samples16)/pWav->channels); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_s16(pWav, framesToReadThisIteration, samples16); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + ma_dr_wav_s16_to_s32(pBufferOut, samples16, (size_t)(framesRead*pWav->channels)); + pBufferOut += framesRead*pWav->channels; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s32__ieee(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav__ieee_to_s32(pBufferOut, sampleData, (size_t)samplesRead, bytesPerSample); + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s32__alaw(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav_alaw_to_s32(pBufferOut, sampleData, (size_t)samplesRead); + #ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == ma_dr_wav_container_aiff) { + ma_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } + #endif + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_PRIVATE ma_uint64 ma_dr_wav_read_pcm_frames_s32__mulaw(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 totalFramesRead; + ma_uint8 sampleData[4096] = {0}; + ma_uint32 bytesPerFrame; + ma_uint32 bytesPerSample; + ma_uint64 samplesRead; + bytesPerFrame = ma_dr_wav_get_bytes_per_pcm_frame(pWav); + if (bytesPerFrame == 0) { + return 0; + } + bytesPerSample = bytesPerFrame / pWav->channels; + if (bytesPerSample == 0 || (bytesPerFrame % pWav->channels) != 0) { + return 0; + } + totalFramesRead = 0; + while (framesToRead > 0) { + ma_uint64 framesToReadThisIteration = ma_dr_wav_min(framesToRead, sizeof(sampleData)/bytesPerFrame); + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames(pWav, framesToReadThisIteration, sampleData); + if (framesRead == 0) { + break; + } + MA_DR_WAV_ASSERT(framesRead <= framesToReadThisIteration); + samplesRead = framesRead * pWav->channels; + if ((samplesRead * bytesPerSample) > sizeof(sampleData)) { + MA_DR_WAV_ASSERT(MA_FALSE); + break; + } + ma_dr_wav_mulaw_to_s32(pBufferOut, sampleData, (size_t)samplesRead); + #ifdef MA_DR_WAV_LIBSNDFILE_COMPAT + { + if (pWav->container == ma_dr_wav_container_aiff) { + ma_uint64 iSample; + for (iSample = 0; iSample < samplesRead; iSample += 1) { + pBufferOut[iSample] = -pBufferOut[iSample]; + } + } + } + #endif + pBufferOut += samplesRead; + framesToRead -= framesRead; + totalFramesRead += framesRead; + } + return totalFramesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s32(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + if (pWav == NULL || framesToRead == 0) { + return 0; + } + if (pBufferOut == NULL) { + return ma_dr_wav_read_pcm_frames(pWav, framesToRead, NULL); + } + if (framesToRead * pWav->channels * sizeof(ma_int32) > MA_SIZE_MAX) { + framesToRead = MA_SIZE_MAX / sizeof(ma_int32) / pWav->channels; + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_PCM) { + return ma_dr_wav_read_pcm_frames_s32__pcm(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ADPCM || pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_DVI_ADPCM) { + return ma_dr_wav_read_pcm_frames_s32__msadpcm_ima(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_IEEE_FLOAT) { + return ma_dr_wav_read_pcm_frames_s32__ieee(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_ALAW) { + return ma_dr_wav_read_pcm_frames_s32__alaw(pWav, framesToRead, pBufferOut); + } + if (pWav->translatedFormatTag == MA_DR_WAVE_FORMAT_MULAW) { + return ma_dr_wav_read_pcm_frames_s32__mulaw(pWav, framesToRead, pBufferOut); + } + return 0; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s32le(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_s32(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && ma_dr_wav__is_little_endian() == MA_FALSE) { + ma_dr_wav__bswap_samples_s32(pBufferOut, framesRead*pWav->channels); + } + return framesRead; +} +MA_API ma_uint64 ma_dr_wav_read_pcm_frames_s32be(ma_dr_wav* pWav, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 framesRead = ma_dr_wav_read_pcm_frames_s32(pWav, framesToRead, pBufferOut); + if (pBufferOut != NULL && ma_dr_wav__is_little_endian() == MA_TRUE) { + ma_dr_wav__bswap_samples_s32(pBufferOut, framesRead*pWav->channels); + } + return framesRead; +} +MA_API void ma_dr_wav_u8_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = ((int)pIn[i] - 128) << 24; + } +} +MA_API void ma_dr_wav_s16_to_s32(ma_int32* pOut, const ma_int16* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = pIn[i] << 16; + } +} +MA_API void ma_dr_wav_s24_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + unsigned int s0 = pIn[i*3 + 0]; + unsigned int s1 = pIn[i*3 + 1]; + unsigned int s2 = pIn[i*3 + 2]; + ma_int32 sample32 = (ma_int32)((s0 << 8) | (s1 << 16) | (s2 << 24)); + *pOut++ = sample32; + } +} +MA_API void ma_dr_wav_f32_to_s32(ma_int32* pOut, const float* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (ma_int32)(2147483648.0f * pIn[i]); + } +} +MA_API void ma_dr_wav_f64_to_s32(ma_int32* pOut, const double* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = (ma_int32)(2147483648.0 * pIn[i]); + } +} +MA_API void ma_dr_wav_alaw_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i = 0; i < sampleCount; ++i) { + *pOut++ = ((ma_int32)ma_dr_wav__alaw_to_s16(pIn[i])) << 16; + } +} +MA_API void ma_dr_wav_mulaw_to_s32(ma_int32* pOut, const ma_uint8* pIn, size_t sampleCount) +{ + size_t i; + if (pOut == NULL || pIn == NULL) { + return; + } + for (i= 0; i < sampleCount; ++i) { + *pOut++ = ((ma_int32)ma_dr_wav__mulaw_to_s16(pIn[i])) << 16; + } +} +MA_PRIVATE ma_int16* ma_dr_wav__read_pcm_frames_and_close_s16(ma_dr_wav* pWav, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalFrameCount) +{ + ma_uint64 sampleDataSize; + ma_int16* pSampleData; + ma_uint64 framesRead; + MA_DR_WAV_ASSERT(pWav != NULL); + sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(ma_int16); + if (sampleDataSize > MA_SIZE_MAX) { + ma_dr_wav_uninit(pWav); + return NULL; + } + pSampleData = (ma_int16*)ma_dr_wav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); + if (pSampleData == NULL) { + ma_dr_wav_uninit(pWav); + return NULL; + } + framesRead = ma_dr_wav_read_pcm_frames_s16(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData); + if (framesRead != pWav->totalPCMFrameCount) { + ma_dr_wav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks); + ma_dr_wav_uninit(pWav); + return NULL; + } + ma_dr_wav_uninit(pWav); + if (sampleRate) { + *sampleRate = pWav->sampleRate; + } + if (channels) { + *channels = pWav->channels; + } + if (totalFrameCount) { + *totalFrameCount = pWav->totalPCMFrameCount; + } + return pSampleData; +} +MA_PRIVATE float* ma_dr_wav__read_pcm_frames_and_close_f32(ma_dr_wav* pWav, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalFrameCount) +{ + ma_uint64 sampleDataSize; + float* pSampleData; + ma_uint64 framesRead; + MA_DR_WAV_ASSERT(pWav != NULL); + sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(float); + if (sampleDataSize > MA_SIZE_MAX) { + ma_dr_wav_uninit(pWav); + return NULL; + } + pSampleData = (float*)ma_dr_wav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); + if (pSampleData == NULL) { + ma_dr_wav_uninit(pWav); + return NULL; + } + framesRead = ma_dr_wav_read_pcm_frames_f32(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData); + if (framesRead != pWav->totalPCMFrameCount) { + ma_dr_wav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks); + ma_dr_wav_uninit(pWav); + return NULL; + } + ma_dr_wav_uninit(pWav); + if (sampleRate) { + *sampleRate = pWav->sampleRate; + } + if (channels) { + *channels = pWav->channels; + } + if (totalFrameCount) { + *totalFrameCount = pWav->totalPCMFrameCount; + } + return pSampleData; +} +MA_PRIVATE ma_int32* ma_dr_wav__read_pcm_frames_and_close_s32(ma_dr_wav* pWav, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalFrameCount) +{ + ma_uint64 sampleDataSize; + ma_int32* pSampleData; + ma_uint64 framesRead; + MA_DR_WAV_ASSERT(pWav != NULL); + sampleDataSize = pWav->totalPCMFrameCount * pWav->channels * sizeof(ma_int32); + if (sampleDataSize > MA_SIZE_MAX) { + ma_dr_wav_uninit(pWav); + return NULL; + } + pSampleData = (ma_int32*)ma_dr_wav__malloc_from_callbacks((size_t)sampleDataSize, &pWav->allocationCallbacks); + if (pSampleData == NULL) { + ma_dr_wav_uninit(pWav); + return NULL; + } + framesRead = ma_dr_wav_read_pcm_frames_s32(pWav, (size_t)pWav->totalPCMFrameCount, pSampleData); + if (framesRead != pWav->totalPCMFrameCount) { + ma_dr_wav__free_from_callbacks(pSampleData, &pWav->allocationCallbacks); + ma_dr_wav_uninit(pWav); + return NULL; + } + ma_dr_wav_uninit(pWav); + if (sampleRate) { + *sampleRate = pWav->sampleRate; + } + if (channels) { + *channels = pWav->channels; + } + if (totalFrameCount) { + *totalFrameCount = pWav->totalPCMFrameCount; + } + return pSampleData; +} +MA_API ma_int16* ma_dr_wav_open_and_read_pcm_frames_s16(ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API float* ma_dr_wav_open_and_read_pcm_frames_f32(ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API ma_int32* ma_dr_wav_open_and_read_pcm_frames_s32(ma_dr_wav_read_proc onRead, ma_dr_wav_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init(&wav, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +#ifndef MA_DR_WAV_NO_STDIO +MA_API ma_int16* ma_dr_wav_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_file(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API float* ma_dr_wav_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_file(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API ma_int32* ma_dr_wav_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_file(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +#ifndef MA_DR_WAV_NO_WCHAR +MA_API ma_int16* ma_dr_wav_open_file_and_read_pcm_frames_s16_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (channelsOut) { + *channelsOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_file_w(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API float* ma_dr_wav_open_file_and_read_pcm_frames_f32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (channelsOut) { + *channelsOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_file_w(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API ma_int32* ma_dr_wav_open_file_and_read_pcm_frames_s32_w(const wchar_t* filename, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (channelsOut) { + *channelsOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_file_w(&wav, filename, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +#endif +#endif +MA_API ma_int16* ma_dr_wav_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s16(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API float* ma_dr_wav_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_f32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +MA_API ma_int32* ma_dr_wav_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_wav wav; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalFrameCountOut) { + *totalFrameCountOut = 0; + } + if (!ma_dr_wav_init_memory(&wav, data, dataSize, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_wav__read_pcm_frames_and_close_s32(&wav, channelsOut, sampleRateOut, totalFrameCountOut); +} +#endif +MA_API void ma_dr_wav_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + ma_dr_wav__free_from_callbacks(p, pAllocationCallbacks); + } else { + ma_dr_wav__free_default(p, NULL); + } +} +MA_API ma_uint16 ma_dr_wav_bytes_to_u16(const ma_uint8* data) +{ + return ((ma_uint16)data[0] << 0) | ((ma_uint16)data[1] << 8); +} +MA_API ma_int16 ma_dr_wav_bytes_to_s16(const ma_uint8* data) +{ + return (ma_int16)ma_dr_wav_bytes_to_u16(data); +} +MA_API ma_uint32 ma_dr_wav_bytes_to_u32(const ma_uint8* data) +{ + return ma_dr_wav_bytes_to_u32_le(data); +} +MA_API float ma_dr_wav_bytes_to_f32(const ma_uint8* data) +{ + union { + ma_uint32 u32; + float f32; + } value; + value.u32 = ma_dr_wav_bytes_to_u32(data); + return value.f32; +} +MA_API ma_int32 ma_dr_wav_bytes_to_s32(const ma_uint8* data) +{ + return (ma_int32)ma_dr_wav_bytes_to_u32(data); +} +MA_API ma_uint64 ma_dr_wav_bytes_to_u64(const ma_uint8* data) +{ + return + ((ma_uint64)data[0] << 0) | ((ma_uint64)data[1] << 8) | ((ma_uint64)data[2] << 16) | ((ma_uint64)data[3] << 24) | + ((ma_uint64)data[4] << 32) | ((ma_uint64)data[5] << 40) | ((ma_uint64)data[6] << 48) | ((ma_uint64)data[7] << 56); +} +MA_API ma_int64 ma_dr_wav_bytes_to_s64(const ma_uint8* data) +{ + return (ma_int64)ma_dr_wav_bytes_to_u64(data); +} +MA_API ma_bool32 ma_dr_wav_guid_equal(const ma_uint8 a[16], const ma_uint8 b[16]) +{ + int i; + for (i = 0; i < 16; i += 1) { + if (a[i] != b[i]) { + return MA_FALSE; + } + } + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_wav_fourcc_equal(const ma_uint8* a, const char* b) +{ + return + a[0] == b[0] && + a[1] == b[1] && + a[2] == b[2] && + a[3] == b[3]; +} +#ifdef __MRC__ +#pragma options opt reset +#endif +#endif +/* dr_wav_c end */ +#endif /* MA_DR_WAV_IMPLEMENTATION */ +#endif /* MA_NO_WAV */ + +#if !defined(MA_NO_FLAC) && !defined(MA_NO_DECODING) +#if !defined(MA_DR_FLAC_IMPLEMENTATION) +/* dr_flac_c begin */ +#ifndef ma_dr_flac_c +#define ma_dr_flac_c +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) + #pragma GCC diagnostic push + #if __GNUC__ >= 7 + #pragma GCC diagnostic ignored "-Wimplicit-fallthrough" + #endif +#endif +#ifdef __linux__ + #ifndef _BSD_SOURCE + #define _BSD_SOURCE + #endif + #ifndef _DEFAULT_SOURCE + #define _DEFAULT_SOURCE + #endif + #ifndef __USE_BSD + #define __USE_BSD + #endif + #include +#endif +#include +#include +#if !defined(MA_DR_FLAC_NO_SIMD) + #if defined(MA_X64) || defined(MA_X86) + #if defined(_MSC_VER) && !defined(__clang__) + #if _MSC_VER >= 1400 && !defined(MA_DR_FLAC_NO_SSE2) + #define MA_DR_FLAC_SUPPORT_SSE2 + #endif + #if _MSC_VER >= 1600 && !defined(MA_DR_FLAC_NO_SSE41) + #define MA_DR_FLAC_SUPPORT_SSE41 + #endif + #elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) + #if defined(__SSE2__) && !defined(MA_DR_FLAC_NO_SSE2) + #define MA_DR_FLAC_SUPPORT_SSE2 + #endif + #if defined(__SSE4_1__) && !defined(MA_DR_FLAC_NO_SSE41) + #define MA_DR_FLAC_SUPPORT_SSE41 + #endif + #endif + #if !defined(__GNUC__) && !defined(__clang__) && defined(__has_include) + #if !defined(MA_DR_FLAC_SUPPORT_SSE2) && !defined(MA_DR_FLAC_NO_SSE2) && __has_include() + #define MA_DR_FLAC_SUPPORT_SSE2 + #endif + #if !defined(MA_DR_FLAC_SUPPORT_SSE41) && !defined(MA_DR_FLAC_NO_SSE41) && __has_include() + #define MA_DR_FLAC_SUPPORT_SSE41 + #endif + #endif + #if defined(MA_DR_FLAC_SUPPORT_SSE41) + #include + #elif defined(MA_DR_FLAC_SUPPORT_SSE2) + #include + #endif + #endif + #if defined(MA_ARM) + #if !defined(MA_DR_FLAC_NO_NEON) && (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64)) + #define MA_DR_FLAC_SUPPORT_NEON + #include + #endif + #endif +#endif +#if !defined(MA_DR_FLAC_NO_SIMD) && (defined(MA_X86) || defined(MA_X64)) + #if defined(_MSC_VER) && !defined(__clang__) + #if _MSC_VER >= 1400 + #include + static void ma_dr_flac__cpuid(int info[4], int fid) + { + __cpuid(info, fid); + } + #else + #define MA_DR_FLAC_NO_CPUID + #endif + #else + #if defined(__GNUC__) || defined(__clang__) + static void ma_dr_flac__cpuid(int info[4], int fid) + { + #if defined(MA_X86) && defined(__PIC__) + __asm__ __volatile__ ( + "xchg{l} {%%}ebx, %k1;" + "cpuid;" + "xchg{l} {%%}ebx, %k1;" + : "=a"(info[0]), "=&r"(info[1]), "=c"(info[2]), "=d"(info[3]) : "a"(fid), "c"(0) + ); + #else + __asm__ __volatile__ ( + "cpuid" : "=a"(info[0]), "=b"(info[1]), "=c"(info[2]), "=d"(info[3]) : "a"(fid), "c"(0) + ); + #endif + } + #else + #define MA_DR_FLAC_NO_CPUID + #endif + #endif +#else + #define MA_DR_FLAC_NO_CPUID +#endif +static MA_INLINE ma_bool32 ma_dr_flac_has_sse2(void) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_DR_FLAC_NO_SSE2) + #if defined(MA_X64) + return MA_TRUE; + #elif (defined(_M_IX86_FP) && _M_IX86_FP == 2) || defined(__SSE2__) + return MA_TRUE; + #else + #if defined(MA_DR_FLAC_NO_CPUID) + return MA_FALSE; + #else + int info[4]; + ma_dr_flac__cpuid(info, 1); + return (info[3] & (1 << 26)) != 0; + #endif + #endif + #else + return MA_FALSE; + #endif +#else + return MA_FALSE; +#endif +} +static MA_INLINE ma_bool32 ma_dr_flac_has_sse41(void) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE41) + #if (defined(MA_X64) || defined(MA_X86)) && !defined(MA_DR_FLAC_NO_SSE41) + #if defined(__SSE4_1__) || defined(__AVX__) + return MA_TRUE; + #else + #if defined(MA_DR_FLAC_NO_CPUID) + return MA_FALSE; + #else + int info[4]; + ma_dr_flac__cpuid(info, 1); + return (info[2] & (1 << 19)) != 0; + #endif + #endif + #else + return MA_FALSE; + #endif +#else + return MA_FALSE; +#endif +} +#if defined(_MSC_VER) && _MSC_VER >= 1500 && (defined(MA_X86) || defined(MA_X64)) && !defined(__clang__) + #define MA_DR_FLAC_HAS_LZCNT_INTRINSIC +#elif (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))) + #define MA_DR_FLAC_HAS_LZCNT_INTRINSIC +#elif defined(__clang__) + #if defined(__has_builtin) + #if __has_builtin(__builtin_clzll) || __has_builtin(__builtin_clzl) + #define MA_DR_FLAC_HAS_LZCNT_INTRINSIC + #endif + #endif +#endif +#if defined(_MSC_VER) && _MSC_VER >= 1400 && !defined(__clang__) + #define MA_DR_FLAC_HAS_BYTESWAP16_INTRINSIC + #define MA_DR_FLAC_HAS_BYTESWAP32_INTRINSIC + #define MA_DR_FLAC_HAS_BYTESWAP64_INTRINSIC +#elif defined(__clang__) + #if defined(__has_builtin) + #if __has_builtin(__builtin_bswap16) + #define MA_DR_FLAC_HAS_BYTESWAP16_INTRINSIC + #endif + #if __has_builtin(__builtin_bswap32) + #define MA_DR_FLAC_HAS_BYTESWAP32_INTRINSIC + #endif + #if __has_builtin(__builtin_bswap64) + #define MA_DR_FLAC_HAS_BYTESWAP64_INTRINSIC + #endif + #endif +#elif defined(__GNUC__) + #if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define MA_DR_FLAC_HAS_BYTESWAP32_INTRINSIC + #define MA_DR_FLAC_HAS_BYTESWAP64_INTRINSIC + #endif + #if ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)) + #define MA_DR_FLAC_HAS_BYTESWAP16_INTRINSIC + #endif +#elif defined(__WATCOMC__) && defined(__386__) + #define MA_DR_FLAC_HAS_BYTESWAP16_INTRINSIC + #define MA_DR_FLAC_HAS_BYTESWAP32_INTRINSIC + #define MA_DR_FLAC_HAS_BYTESWAP64_INTRINSIC + extern __inline ma_uint16 _watcom_bswap16(ma_uint16); + extern __inline ma_uint32 _watcom_bswap32(ma_uint32); + extern __inline ma_uint64 _watcom_bswap64(ma_uint64); +#pragma aux _watcom_bswap16 = \ + "xchg al, ah" \ + parm [ax] \ + value [ax] \ + modify nomemory; +#pragma aux _watcom_bswap32 = \ + "bswap eax" \ + parm [eax] \ + value [eax] \ + modify nomemory; +#pragma aux _watcom_bswap64 = \ + "bswap eax" \ + "bswap edx" \ + "xchg eax,edx" \ + parm [eax edx] \ + value [eax edx] \ + modify nomemory; +#endif +#ifndef MA_DR_FLAC_ASSERT +#include +#define MA_DR_FLAC_ASSERT(expression) assert(expression) +#endif +#ifndef MA_DR_FLAC_MALLOC +#define MA_DR_FLAC_MALLOC(sz) malloc((sz)) +#endif +#ifndef MA_DR_FLAC_REALLOC +#define MA_DR_FLAC_REALLOC(p, sz) realloc((p), (sz)) +#endif +#ifndef MA_DR_FLAC_FREE +#define MA_DR_FLAC_FREE(p) free((p)) +#endif +#ifndef MA_DR_FLAC_COPY_MEMORY +#define MA_DR_FLAC_COPY_MEMORY(dst, src, sz) memcpy((dst), (src), (sz)) +#endif +#ifndef MA_DR_FLAC_ZERO_MEMORY +#define MA_DR_FLAC_ZERO_MEMORY(p, sz) memset((p), 0, (sz)) +#endif +#ifndef MA_DR_FLAC_ZERO_OBJECT +#define MA_DR_FLAC_ZERO_OBJECT(p) MA_DR_FLAC_ZERO_MEMORY((p), sizeof(*(p))) +#endif +#define MA_DR_FLAC_MAX_SIMD_VECTOR_SIZE 64 +#define MA_DR_FLAC_SUBFRAME_CONSTANT 0 +#define MA_DR_FLAC_SUBFRAME_VERBATIM 1 +#define MA_DR_FLAC_SUBFRAME_FIXED 8 +#define MA_DR_FLAC_SUBFRAME_LPC 32 +#define MA_DR_FLAC_SUBFRAME_RESERVED 255 +#define MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE 0 +#define MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2 1 +#define MA_DR_FLAC_CHANNEL_ASSIGNMENT_INDEPENDENT 0 +#define MA_DR_FLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE 8 +#define MA_DR_FLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE 9 +#define MA_DR_FLAC_CHANNEL_ASSIGNMENT_MID_SIDE 10 +#define MA_DR_FLAC_SEEKPOINT_SIZE_IN_BYTES 18 +#define MA_DR_FLAC_CUESHEET_TRACK_SIZE_IN_BYTES 36 +#define MA_DR_FLAC_CUESHEET_TRACK_INDEX_SIZE_IN_BYTES 12 +#define ma_dr_flac_align(x, a) ((((x) + (a) - 1) / (a)) * (a)) +MA_API void ma_dr_flac_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision) +{ + if (pMajor) { + *pMajor = MA_DR_FLAC_VERSION_MAJOR; + } + if (pMinor) { + *pMinor = MA_DR_FLAC_VERSION_MINOR; + } + if (pRevision) { + *pRevision = MA_DR_FLAC_VERSION_REVISION; + } +} +MA_API const char* ma_dr_flac_version_string(void) +{ + return MA_DR_FLAC_VERSION_STRING; +} +#if defined(__has_feature) + #if __has_feature(thread_sanitizer) + #define MA_DR_FLAC_NO_THREAD_SANITIZE __attribute__((no_sanitize("thread"))) + #else + #define MA_DR_FLAC_NO_THREAD_SANITIZE + #endif +#else + #define MA_DR_FLAC_NO_THREAD_SANITIZE +#endif +#if defined(MA_DR_FLAC_HAS_LZCNT_INTRINSIC) +static ma_bool32 ma_dr_flac__gIsLZCNTSupported = MA_FALSE; +#endif +#ifndef MA_DR_FLAC_NO_CPUID +static ma_bool32 ma_dr_flac__gIsSSE2Supported = MA_FALSE; +static ma_bool32 ma_dr_flac__gIsSSE41Supported = MA_FALSE; +MA_DR_FLAC_NO_THREAD_SANITIZE static void ma_dr_flac__init_cpu_caps(void) +{ + static ma_bool32 isCPUCapsInitialized = MA_FALSE; + if (!isCPUCapsInitialized) { +#if defined(MA_DR_FLAC_HAS_LZCNT_INTRINSIC) + int info[4] = {0}; + ma_dr_flac__cpuid(info, 0x80000001); + ma_dr_flac__gIsLZCNTSupported = (info[2] & (1 << 5)) != 0; +#endif + ma_dr_flac__gIsSSE2Supported = ma_dr_flac_has_sse2(); + ma_dr_flac__gIsSSE41Supported = ma_dr_flac_has_sse41(); + isCPUCapsInitialized = MA_TRUE; + } +} +#else +static ma_bool32 ma_dr_flac__gIsNEONSupported = MA_FALSE; +static MA_INLINE ma_bool32 ma_dr_flac__has_neon(void) +{ +#if defined(MA_DR_FLAC_SUPPORT_NEON) + #if defined(MA_ARM) && !defined(MA_DR_FLAC_NO_NEON) + #if (defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64)) + return MA_TRUE; + #else + return MA_FALSE; + #endif + #else + return MA_FALSE; + #endif +#else + return MA_FALSE; +#endif +} +MA_DR_FLAC_NO_THREAD_SANITIZE static void ma_dr_flac__init_cpu_caps(void) +{ + ma_dr_flac__gIsNEONSupported = ma_dr_flac__has_neon(); +#if defined(MA_DR_FLAC_HAS_LZCNT_INTRINSIC) && defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 5) + ma_dr_flac__gIsLZCNTSupported = MA_TRUE; +#endif +} +#endif +static MA_INLINE ma_bool32 ma_dr_flac__is_little_endian(void) +{ +#if defined(MA_X86) || defined(MA_X64) + return MA_TRUE; +#elif defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN + return MA_TRUE; +#else + int n = 1; + return (*(char*)&n) == 1; +#endif +} +static MA_INLINE ma_uint16 ma_dr_flac__swap_endian_uint16(ma_uint16 n) +{ +#ifdef MA_DR_FLAC_HAS_BYTESWAP16_INTRINSIC + #if defined(_MSC_VER) && !defined(__clang__) + return _byteswap_ushort(n); + #elif defined(__GNUC__) || defined(__clang__) + return __builtin_bswap16(n); + #elif defined(__WATCOMC__) && defined(__386__) + return _watcom_bswap16(n); + #else + #error "This compiler does not support the byte swap intrinsic." + #endif +#else + return ((n & 0xFF00) >> 8) | + ((n & 0x00FF) << 8); +#endif +} +static MA_INLINE ma_uint32 ma_dr_flac__swap_endian_uint32(ma_uint32 n) +{ +#ifdef MA_DR_FLAC_HAS_BYTESWAP32_INTRINSIC + #if defined(_MSC_VER) && !defined(__clang__) + return _byteswap_ulong(n); + #elif defined(__GNUC__) || defined(__clang__) + #if defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 6) && !defined(__ARM_ARCH_6M__) && !defined(MA_64BIT) + ma_uint32 r; + __asm__ __volatile__ ( + #if defined(MA_64BIT) + "rev %w[out], %w[in]" : [out]"=r"(r) : [in]"r"(n) + #else + "rev %[out], %[in]" : [out]"=r"(r) : [in]"r"(n) + #endif + ); + return r; + #else + return __builtin_bswap32(n); + #endif + #elif defined(__WATCOMC__) && defined(__386__) + return _watcom_bswap32(n); + #else + #error "This compiler does not support the byte swap intrinsic." + #endif +#else + return ((n & 0xFF000000) >> 24) | + ((n & 0x00FF0000) >> 8) | + ((n & 0x0000FF00) << 8) | + ((n & 0x000000FF) << 24); +#endif +} +static MA_INLINE ma_uint64 ma_dr_flac__swap_endian_uint64(ma_uint64 n) +{ +#ifdef MA_DR_FLAC_HAS_BYTESWAP64_INTRINSIC + #if defined(_MSC_VER) && !defined(__clang__) + return _byteswap_uint64(n); + #elif defined(__GNUC__) || defined(__clang__) + return __builtin_bswap64(n); + #elif defined(__WATCOMC__) && defined(__386__) + return _watcom_bswap64(n); + #else + #error "This compiler does not support the byte swap intrinsic." + #endif +#else + return ((n & ((ma_uint64)0xFF000000 << 32)) >> 56) | + ((n & ((ma_uint64)0x00FF0000 << 32)) >> 40) | + ((n & ((ma_uint64)0x0000FF00 << 32)) >> 24) | + ((n & ((ma_uint64)0x000000FF << 32)) >> 8) | + ((n & ((ma_uint64)0xFF000000 )) << 8) | + ((n & ((ma_uint64)0x00FF0000 )) << 24) | + ((n & ((ma_uint64)0x0000FF00 )) << 40) | + ((n & ((ma_uint64)0x000000FF )) << 56); +#endif +} +static MA_INLINE ma_uint16 ma_dr_flac__be2host_16(ma_uint16 n) +{ + if (ma_dr_flac__is_little_endian()) { + return ma_dr_flac__swap_endian_uint16(n); + } + return n; +} +static MA_INLINE ma_uint32 ma_dr_flac__be2host_32(ma_uint32 n) +{ + if (ma_dr_flac__is_little_endian()) { + return ma_dr_flac__swap_endian_uint32(n); + } + return n; +} +static MA_INLINE ma_uint32 ma_dr_flac__be2host_32_ptr_unaligned(const void* pData) +{ + const ma_uint8* pNum = (ma_uint8*)pData; + return *(pNum) << 24 | *(pNum+1) << 16 | *(pNum+2) << 8 | *(pNum+3); +} +static MA_INLINE ma_uint64 ma_dr_flac__be2host_64(ma_uint64 n) +{ + if (ma_dr_flac__is_little_endian()) { + return ma_dr_flac__swap_endian_uint64(n); + } + return n; +} +static MA_INLINE ma_uint32 ma_dr_flac__le2host_32(ma_uint32 n) +{ + if (!ma_dr_flac__is_little_endian()) { + return ma_dr_flac__swap_endian_uint32(n); + } + return n; +} +static MA_INLINE ma_uint32 ma_dr_flac__le2host_32_ptr_unaligned(const void* pData) +{ + const ma_uint8* pNum = (ma_uint8*)pData; + return *pNum | *(pNum+1) << 8 | *(pNum+2) << 16 | *(pNum+3) << 24; +} +static MA_INLINE ma_uint32 ma_dr_flac__unsynchsafe_32(ma_uint32 n) +{ + ma_uint32 result = 0; + result |= (n & 0x7F000000) >> 3; + result |= (n & 0x007F0000) >> 2; + result |= (n & 0x00007F00) >> 1; + result |= (n & 0x0000007F) >> 0; + return result; +} +static ma_uint8 ma_dr_flac__crc8_table[] = { + 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15, 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D, + 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65, 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D, + 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5, 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD, + 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85, 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD, + 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2, 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA, + 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2, 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A, + 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32, 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A, + 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42, 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A, + 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C, 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4, + 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC, 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4, + 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C, 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44, + 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C, 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34, + 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B, 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63, + 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B, 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13, + 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB, 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83, + 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB, 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3 +}; +static ma_uint16 ma_dr_flac__crc16_table[] = { + 0x0000, 0x8005, 0x800F, 0x000A, 0x801B, 0x001E, 0x0014, 0x8011, + 0x8033, 0x0036, 0x003C, 0x8039, 0x0028, 0x802D, 0x8027, 0x0022, + 0x8063, 0x0066, 0x006C, 0x8069, 0x0078, 0x807D, 0x8077, 0x0072, + 0x0050, 0x8055, 0x805F, 0x005A, 0x804B, 0x004E, 0x0044, 0x8041, + 0x80C3, 0x00C6, 0x00CC, 0x80C9, 0x00D8, 0x80DD, 0x80D7, 0x00D2, + 0x00F0, 0x80F5, 0x80FF, 0x00FA, 0x80EB, 0x00EE, 0x00E4, 0x80E1, + 0x00A0, 0x80A5, 0x80AF, 0x00AA, 0x80BB, 0x00BE, 0x00B4, 0x80B1, + 0x8093, 0x0096, 0x009C, 0x8099, 0x0088, 0x808D, 0x8087, 0x0082, + 0x8183, 0x0186, 0x018C, 0x8189, 0x0198, 0x819D, 0x8197, 0x0192, + 0x01B0, 0x81B5, 0x81BF, 0x01BA, 0x81AB, 0x01AE, 0x01A4, 0x81A1, + 0x01E0, 0x81E5, 0x81EF, 0x01EA, 0x81FB, 0x01FE, 0x01F4, 0x81F1, + 0x81D3, 0x01D6, 0x01DC, 0x81D9, 0x01C8, 0x81CD, 0x81C7, 0x01C2, + 0x0140, 0x8145, 0x814F, 0x014A, 0x815B, 0x015E, 0x0154, 0x8151, + 0x8173, 0x0176, 0x017C, 0x8179, 0x0168, 0x816D, 0x8167, 0x0162, + 0x8123, 0x0126, 0x012C, 0x8129, 0x0138, 0x813D, 0x8137, 0x0132, + 0x0110, 0x8115, 0x811F, 0x011A, 0x810B, 0x010E, 0x0104, 0x8101, + 0x8303, 0x0306, 0x030C, 0x8309, 0x0318, 0x831D, 0x8317, 0x0312, + 0x0330, 0x8335, 0x833F, 0x033A, 0x832B, 0x032E, 0x0324, 0x8321, + 0x0360, 0x8365, 0x836F, 0x036A, 0x837B, 0x037E, 0x0374, 0x8371, + 0x8353, 0x0356, 0x035C, 0x8359, 0x0348, 0x834D, 0x8347, 0x0342, + 0x03C0, 0x83C5, 0x83CF, 0x03CA, 0x83DB, 0x03DE, 0x03D4, 0x83D1, + 0x83F3, 0x03F6, 0x03FC, 0x83F9, 0x03E8, 0x83ED, 0x83E7, 0x03E2, + 0x83A3, 0x03A6, 0x03AC, 0x83A9, 0x03B8, 0x83BD, 0x83B7, 0x03B2, + 0x0390, 0x8395, 0x839F, 0x039A, 0x838B, 0x038E, 0x0384, 0x8381, + 0x0280, 0x8285, 0x828F, 0x028A, 0x829B, 0x029E, 0x0294, 0x8291, + 0x82B3, 0x02B6, 0x02BC, 0x82B9, 0x02A8, 0x82AD, 0x82A7, 0x02A2, + 0x82E3, 0x02E6, 0x02EC, 0x82E9, 0x02F8, 0x82FD, 0x82F7, 0x02F2, + 0x02D0, 0x82D5, 0x82DF, 0x02DA, 0x82CB, 0x02CE, 0x02C4, 0x82C1, + 0x8243, 0x0246, 0x024C, 0x8249, 0x0258, 0x825D, 0x8257, 0x0252, + 0x0270, 0x8275, 0x827F, 0x027A, 0x826B, 0x026E, 0x0264, 0x8261, + 0x0220, 0x8225, 0x822F, 0x022A, 0x823B, 0x023E, 0x0234, 0x8231, + 0x8213, 0x0216, 0x021C, 0x8219, 0x0208, 0x820D, 0x8207, 0x0202 +}; +static MA_INLINE ma_uint8 ma_dr_flac_crc8_byte(ma_uint8 crc, ma_uint8 data) +{ + return ma_dr_flac__crc8_table[crc ^ data]; +} +static MA_INLINE ma_uint8 ma_dr_flac_crc8(ma_uint8 crc, ma_uint32 data, ma_uint32 count) +{ +#ifdef MA_DR_FLAC_NO_CRC + (void)crc; + (void)data; + (void)count; + return 0; +#else +#if 0 + ma_uint8 p = 0x07; + for (int i = count-1; i >= 0; --i) { + ma_uint8 bit = (data & (1 << i)) >> i; + if (crc & 0x80) { + crc = ((crc << 1) | bit) ^ p; + } else { + crc = ((crc << 1) | bit); + } + } + return crc; +#else + ma_uint32 wholeBytes; + ma_uint32 leftoverBits; + ma_uint64 leftoverDataMask; + static ma_uint64 leftoverDataMaskTable[8] = { + 0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F + }; + MA_DR_FLAC_ASSERT(count <= 32); + wholeBytes = count >> 3; + leftoverBits = count - (wholeBytes*8); + leftoverDataMask = leftoverDataMaskTable[leftoverBits]; + switch (wholeBytes) { + case 4: crc = ma_dr_flac_crc8_byte(crc, (ma_uint8)((data & (0xFF000000UL << leftoverBits)) >> (24 + leftoverBits))); + case 3: crc = ma_dr_flac_crc8_byte(crc, (ma_uint8)((data & (0x00FF0000UL << leftoverBits)) >> (16 + leftoverBits))); + case 2: crc = ma_dr_flac_crc8_byte(crc, (ma_uint8)((data & (0x0000FF00UL << leftoverBits)) >> ( 8 + leftoverBits))); + case 1: crc = ma_dr_flac_crc8_byte(crc, (ma_uint8)((data & (0x000000FFUL << leftoverBits)) >> ( 0 + leftoverBits))); + case 0: if (leftoverBits > 0) crc = (ma_uint8)((crc << leftoverBits) ^ ma_dr_flac__crc8_table[(crc >> (8 - leftoverBits)) ^ (data & leftoverDataMask)]); + } + return crc; +#endif +#endif +} +static MA_INLINE ma_uint16 ma_dr_flac_crc16_byte(ma_uint16 crc, ma_uint8 data) +{ + return (crc << 8) ^ ma_dr_flac__crc16_table[(ma_uint8)(crc >> 8) ^ data]; +} +static MA_INLINE ma_uint16 ma_dr_flac_crc16_cache(ma_uint16 crc, ma_dr_flac_cache_t data) +{ +#ifdef MA_64BIT + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 56) & 0xFF)); + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 48) & 0xFF)); + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 40) & 0xFF)); + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 32) & 0xFF)); +#endif + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 24) & 0xFF)); + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 16) & 0xFF)); + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 8) & 0xFF)); + crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 0) & 0xFF)); + return crc; +} +static MA_INLINE ma_uint16 ma_dr_flac_crc16_bytes(ma_uint16 crc, ma_dr_flac_cache_t data, ma_uint32 byteCount) +{ + switch (byteCount) + { +#ifdef MA_64BIT + case 8: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 56) & 0xFF)); + case 7: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 48) & 0xFF)); + case 6: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 40) & 0xFF)); + case 5: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 32) & 0xFF)); +#endif + case 4: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 24) & 0xFF)); + case 3: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 16) & 0xFF)); + case 2: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 8) & 0xFF)); + case 1: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data >> 0) & 0xFF)); + } + return crc; +} +#if 0 +static MA_INLINE ma_uint16 ma_dr_flac_crc16__32bit(ma_uint16 crc, ma_uint32 data, ma_uint32 count) +{ +#ifdef MA_DR_FLAC_NO_CRC + (void)crc; + (void)data; + (void)count; + return 0; +#else +#if 0 + ma_uint16 p = 0x8005; + for (int i = count-1; i >= 0; --i) { + ma_uint16 bit = (data & (1ULL << i)) >> i; + if (r & 0x8000) { + r = ((r << 1) | bit) ^ p; + } else { + r = ((r << 1) | bit); + } + } + return crc; +#else + ma_uint32 wholeBytes; + ma_uint32 leftoverBits; + ma_uint64 leftoverDataMask; + static ma_uint64 leftoverDataMaskTable[8] = { + 0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F + }; + MA_DR_FLAC_ASSERT(count <= 64); + wholeBytes = count >> 3; + leftoverBits = count & 7; + leftoverDataMask = leftoverDataMaskTable[leftoverBits]; + switch (wholeBytes) { + default: + case 4: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (0xFF000000UL << leftoverBits)) >> (24 + leftoverBits))); + case 3: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (0x00FF0000UL << leftoverBits)) >> (16 + leftoverBits))); + case 2: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (0x0000FF00UL << leftoverBits)) >> ( 8 + leftoverBits))); + case 1: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (0x000000FFUL << leftoverBits)) >> ( 0 + leftoverBits))); + case 0: if (leftoverBits > 0) crc = (crc << leftoverBits) ^ ma_dr_flac__crc16_table[(crc >> (16 - leftoverBits)) ^ (data & leftoverDataMask)]; + } + return crc; +#endif +#endif +} +static MA_INLINE ma_uint16 ma_dr_flac_crc16__64bit(ma_uint16 crc, ma_uint64 data, ma_uint32 count) +{ +#ifdef MA_DR_FLAC_NO_CRC + (void)crc; + (void)data; + (void)count; + return 0; +#else + ma_uint32 wholeBytes; + ma_uint32 leftoverBits; + ma_uint64 leftoverDataMask; + static ma_uint64 leftoverDataMaskTable[8] = { + 0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F + }; + MA_DR_FLAC_ASSERT(count <= 64); + wholeBytes = count >> 3; + leftoverBits = count & 7; + leftoverDataMask = leftoverDataMaskTable[leftoverBits]; + switch (wholeBytes) { + default: + case 8: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0xFF000000 << 32) << leftoverBits)) >> (56 + leftoverBits))); + case 7: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0x00FF0000 << 32) << leftoverBits)) >> (48 + leftoverBits))); + case 6: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0x0000FF00 << 32) << leftoverBits)) >> (40 + leftoverBits))); + case 5: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0x000000FF << 32) << leftoverBits)) >> (32 + leftoverBits))); + case 4: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0xFF000000 ) << leftoverBits)) >> (24 + leftoverBits))); + case 3: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0x00FF0000 ) << leftoverBits)) >> (16 + leftoverBits))); + case 2: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0x0000FF00 ) << leftoverBits)) >> ( 8 + leftoverBits))); + case 1: crc = ma_dr_flac_crc16_byte(crc, (ma_uint8)((data & (((ma_uint64)0x000000FF ) << leftoverBits)) >> ( 0 + leftoverBits))); + case 0: if (leftoverBits > 0) crc = (crc << leftoverBits) ^ ma_dr_flac__crc16_table[(crc >> (16 - leftoverBits)) ^ (data & leftoverDataMask)]; + } + return crc; +#endif +} +static MA_INLINE ma_uint16 ma_dr_flac_crc16(ma_uint16 crc, ma_dr_flac_cache_t data, ma_uint32 count) +{ +#ifdef MA_64BIT + return ma_dr_flac_crc16__64bit(crc, data, count); +#else + return ma_dr_flac_crc16__32bit(crc, data, count); +#endif +} +#endif +#ifdef MA_64BIT +#define ma_dr_flac__be2host__cache_line ma_dr_flac__be2host_64 +#else +#define ma_dr_flac__be2host__cache_line ma_dr_flac__be2host_32 +#endif +#define MA_DR_FLAC_CACHE_L1_SIZE_BYTES(bs) (sizeof((bs)->cache)) +#define MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs) (sizeof((bs)->cache)*8) +#define MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs) (MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs) - (bs)->consumedBits) +#define MA_DR_FLAC_CACHE_L1_SELECTION_MASK(_bitCount) (~((~(ma_dr_flac_cache_t)0) >> (_bitCount))) +#define MA_DR_FLAC_CACHE_L1_SELECTION_SHIFT(bs, _bitCount) (MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs) - (_bitCount)) +#define MA_DR_FLAC_CACHE_L1_SELECT(bs, _bitCount) (((bs)->cache) & MA_DR_FLAC_CACHE_L1_SELECTION_MASK(_bitCount)) +#define MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT(bs, _bitCount) (MA_DR_FLAC_CACHE_L1_SELECT((bs), (_bitCount)) >> MA_DR_FLAC_CACHE_L1_SELECTION_SHIFT((bs), (_bitCount))) +#define MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT_SAFE(bs, _bitCount)(MA_DR_FLAC_CACHE_L1_SELECT((bs), (_bitCount)) >> (MA_DR_FLAC_CACHE_L1_SELECTION_SHIFT((bs), (_bitCount)) & (MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs)-1))) +#define MA_DR_FLAC_CACHE_L2_SIZE_BYTES(bs) (sizeof((bs)->cacheL2)) +#define MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs) (MA_DR_FLAC_CACHE_L2_SIZE_BYTES(bs) / sizeof((bs)->cacheL2[0])) +#define MA_DR_FLAC_CACHE_L2_LINES_REMAINING(bs) (MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs) - (bs)->nextL2Line) +#ifndef MA_DR_FLAC_NO_CRC +static MA_INLINE void ma_dr_flac__reset_crc16(ma_dr_flac_bs* bs) +{ + bs->crc16 = 0; + bs->crc16CacheIgnoredBytes = bs->consumedBits >> 3; +} +static MA_INLINE void ma_dr_flac__update_crc16(ma_dr_flac_bs* bs) +{ + if (bs->crc16CacheIgnoredBytes == 0) { + bs->crc16 = ma_dr_flac_crc16_cache(bs->crc16, bs->crc16Cache); + } else { + bs->crc16 = ma_dr_flac_crc16_bytes(bs->crc16, bs->crc16Cache, MA_DR_FLAC_CACHE_L1_SIZE_BYTES(bs) - bs->crc16CacheIgnoredBytes); + bs->crc16CacheIgnoredBytes = 0; + } +} +static MA_INLINE ma_uint16 ma_dr_flac__flush_crc16(ma_dr_flac_bs* bs) +{ + MA_DR_FLAC_ASSERT((MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs) & 7) == 0); + if (MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs) == 0) { + ma_dr_flac__update_crc16(bs); + } else { + bs->crc16 = ma_dr_flac_crc16_bytes(bs->crc16, bs->crc16Cache >> MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs), (bs->consumedBits >> 3) - bs->crc16CacheIgnoredBytes); + bs->crc16CacheIgnoredBytes = bs->consumedBits >> 3; + } + return bs->crc16; +} +#endif +static MA_INLINE ma_bool32 ma_dr_flac__reload_l1_cache_from_l2(ma_dr_flac_bs* bs) +{ + size_t bytesRead; + size_t alignedL1LineCount; + if (bs->nextL2Line < MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs)) { + bs->cache = bs->cacheL2[bs->nextL2Line++]; + return MA_TRUE; + } + if (bs->unalignedByteCount > 0) { + return MA_FALSE; + } + bytesRead = bs->onRead(bs->pUserData, bs->cacheL2, MA_DR_FLAC_CACHE_L2_SIZE_BYTES(bs)); + bs->nextL2Line = 0; + if (bytesRead == MA_DR_FLAC_CACHE_L2_SIZE_BYTES(bs)) { + bs->cache = bs->cacheL2[bs->nextL2Line++]; + return MA_TRUE; + } + alignedL1LineCount = bytesRead / MA_DR_FLAC_CACHE_L1_SIZE_BYTES(bs); + bs->unalignedByteCount = bytesRead - (alignedL1LineCount * MA_DR_FLAC_CACHE_L1_SIZE_BYTES(bs)); + if (bs->unalignedByteCount > 0) { + bs->unalignedCache = bs->cacheL2[alignedL1LineCount]; + } + if (alignedL1LineCount > 0) { + size_t offset = MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs) - alignedL1LineCount; + size_t i; + for (i = alignedL1LineCount; i > 0; --i) { + bs->cacheL2[i-1 + offset] = bs->cacheL2[i-1]; + } + bs->nextL2Line = (ma_uint32)offset; + bs->cache = bs->cacheL2[bs->nextL2Line++]; + return MA_TRUE; + } else { + bs->nextL2Line = MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs); + return MA_FALSE; + } +} +static ma_bool32 ma_dr_flac__reload_cache(ma_dr_flac_bs* bs) +{ + size_t bytesRead; +#ifndef MA_DR_FLAC_NO_CRC + ma_dr_flac__update_crc16(bs); +#endif + if (ma_dr_flac__reload_l1_cache_from_l2(bs)) { + bs->cache = ma_dr_flac__be2host__cache_line(bs->cache); + bs->consumedBits = 0; +#ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = bs->cache; +#endif + return MA_TRUE; + } + bytesRead = bs->unalignedByteCount; + if (bytesRead == 0) { + bs->consumedBits = MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs); + return MA_FALSE; + } + MA_DR_FLAC_ASSERT(bytesRead < MA_DR_FLAC_CACHE_L1_SIZE_BYTES(bs)); + bs->consumedBits = (ma_uint32)(MA_DR_FLAC_CACHE_L1_SIZE_BYTES(bs) - bytesRead) * 8; + bs->cache = ma_dr_flac__be2host__cache_line(bs->unalignedCache); + bs->cache &= MA_DR_FLAC_CACHE_L1_SELECTION_MASK(MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)); + bs->unalignedByteCount = 0; +#ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = bs->cache >> bs->consumedBits; + bs->crc16CacheIgnoredBytes = bs->consumedBits >> 3; +#endif + return MA_TRUE; +} +static void ma_dr_flac__reset_cache(ma_dr_flac_bs* bs) +{ + bs->nextL2Line = MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs); + bs->consumedBits = MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs); + bs->cache = 0; + bs->unalignedByteCount = 0; + bs->unalignedCache = 0; +#ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = 0; + bs->crc16CacheIgnoredBytes = 0; +#endif +} +static MA_INLINE ma_bool32 ma_dr_flac__read_uint32(ma_dr_flac_bs* bs, unsigned int bitCount, ma_uint32* pResultOut) +{ + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pResultOut != NULL); + MA_DR_FLAC_ASSERT(bitCount > 0); + MA_DR_FLAC_ASSERT(bitCount <= 32); + if (bs->consumedBits == MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs)) { + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + } + if (bitCount <= MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { +#ifdef MA_64BIT + *pResultOut = (ma_uint32)MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT(bs, bitCount); + bs->consumedBits += bitCount; + bs->cache <<= bitCount; +#else + if (bitCount < MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs)) { + *pResultOut = (ma_uint32)MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT(bs, bitCount); + bs->consumedBits += bitCount; + bs->cache <<= bitCount; + } else { + *pResultOut = (ma_uint32)bs->cache; + bs->consumedBits = MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs); + bs->cache = 0; + } +#endif + return MA_TRUE; + } else { + ma_uint32 bitCountHi = MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs); + ma_uint32 bitCountLo = bitCount - bitCountHi; + ma_uint32 resultHi; + MA_DR_FLAC_ASSERT(bitCountHi > 0); + MA_DR_FLAC_ASSERT(bitCountHi < 32); + resultHi = (ma_uint32)MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT(bs, bitCountHi); + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + if (bitCountLo > MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { + return MA_FALSE; + } + *pResultOut = (resultHi << bitCountLo) | (ma_uint32)MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT(bs, bitCountLo); + bs->consumedBits += bitCountLo; + bs->cache <<= bitCountLo; + return MA_TRUE; + } +} +static ma_bool32 ma_dr_flac__read_int32(ma_dr_flac_bs* bs, unsigned int bitCount, ma_int32* pResult) +{ + ma_uint32 result; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pResult != NULL); + MA_DR_FLAC_ASSERT(bitCount > 0); + MA_DR_FLAC_ASSERT(bitCount <= 32); + if (!ma_dr_flac__read_uint32(bs, bitCount, &result)) { + return MA_FALSE; + } + if (bitCount < 32) { + ma_uint32 signbit; + signbit = ((result >> (bitCount-1)) & 0x01); + result |= (~signbit + 1) << bitCount; + } + *pResult = (ma_int32)result; + return MA_TRUE; +} +#ifdef MA_64BIT +static ma_bool32 ma_dr_flac__read_uint64(ma_dr_flac_bs* bs, unsigned int bitCount, ma_uint64* pResultOut) +{ + ma_uint32 resultHi; + ma_uint32 resultLo; + MA_DR_FLAC_ASSERT(bitCount <= 64); + MA_DR_FLAC_ASSERT(bitCount > 32); + if (!ma_dr_flac__read_uint32(bs, bitCount - 32, &resultHi)) { + return MA_FALSE; + } + if (!ma_dr_flac__read_uint32(bs, 32, &resultLo)) { + return MA_FALSE; + } + *pResultOut = (((ma_uint64)resultHi) << 32) | ((ma_uint64)resultLo); + return MA_TRUE; +} +#endif +#if 0 +static ma_bool32 ma_dr_flac__read_int64(ma_dr_flac_bs* bs, unsigned int bitCount, ma_int64* pResultOut) +{ + ma_uint64 result; + ma_uint64 signbit; + MA_DR_FLAC_ASSERT(bitCount <= 64); + if (!ma_dr_flac__read_uint64(bs, bitCount, &result)) { + return MA_FALSE; + } + signbit = ((result >> (bitCount-1)) & 0x01); + result |= (~signbit + 1) << bitCount; + *pResultOut = (ma_int64)result; + return MA_TRUE; +} +#endif +static ma_bool32 ma_dr_flac__read_uint16(ma_dr_flac_bs* bs, unsigned int bitCount, ma_uint16* pResult) +{ + ma_uint32 result; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pResult != NULL); + MA_DR_FLAC_ASSERT(bitCount > 0); + MA_DR_FLAC_ASSERT(bitCount <= 16); + if (!ma_dr_flac__read_uint32(bs, bitCount, &result)) { + return MA_FALSE; + } + *pResult = (ma_uint16)result; + return MA_TRUE; +} +#if 0 +static ma_bool32 ma_dr_flac__read_int16(ma_dr_flac_bs* bs, unsigned int bitCount, ma_int16* pResult) +{ + ma_int32 result; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pResult != NULL); + MA_DR_FLAC_ASSERT(bitCount > 0); + MA_DR_FLAC_ASSERT(bitCount <= 16); + if (!ma_dr_flac__read_int32(bs, bitCount, &result)) { + return MA_FALSE; + } + *pResult = (ma_int16)result; + return MA_TRUE; +} +#endif +static ma_bool32 ma_dr_flac__read_uint8(ma_dr_flac_bs* bs, unsigned int bitCount, ma_uint8* pResult) +{ + ma_uint32 result; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pResult != NULL); + MA_DR_FLAC_ASSERT(bitCount > 0); + MA_DR_FLAC_ASSERT(bitCount <= 8); + if (!ma_dr_flac__read_uint32(bs, bitCount, &result)) { + return MA_FALSE; + } + *pResult = (ma_uint8)result; + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__read_int8(ma_dr_flac_bs* bs, unsigned int bitCount, ma_int8* pResult) +{ + ma_int32 result; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pResult != NULL); + MA_DR_FLAC_ASSERT(bitCount > 0); + MA_DR_FLAC_ASSERT(bitCount <= 8); + if (!ma_dr_flac__read_int32(bs, bitCount, &result)) { + return MA_FALSE; + } + *pResult = (ma_int8)result; + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__seek_bits(ma_dr_flac_bs* bs, size_t bitsToSeek) +{ + if (bitsToSeek <= MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { + bs->consumedBits += (ma_uint32)bitsToSeek; + bs->cache <<= bitsToSeek; + return MA_TRUE; + } else { + bitsToSeek -= MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs); + bs->consumedBits += MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs); + bs->cache = 0; +#ifdef MA_64BIT + while (bitsToSeek >= MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs)) { + ma_uint64 bin; + if (!ma_dr_flac__read_uint64(bs, MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs), &bin)) { + return MA_FALSE; + } + bitsToSeek -= MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs); + } +#else + while (bitsToSeek >= MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs)) { + ma_uint32 bin; + if (!ma_dr_flac__read_uint32(bs, MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs), &bin)) { + return MA_FALSE; + } + bitsToSeek -= MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs); + } +#endif + while (bitsToSeek >= 8) { + ma_uint8 bin; + if (!ma_dr_flac__read_uint8(bs, 8, &bin)) { + return MA_FALSE; + } + bitsToSeek -= 8; + } + if (bitsToSeek > 0) { + ma_uint8 bin; + if (!ma_dr_flac__read_uint8(bs, (ma_uint32)bitsToSeek, &bin)) { + return MA_FALSE; + } + bitsToSeek = 0; + } + MA_DR_FLAC_ASSERT(bitsToSeek == 0); + return MA_TRUE; + } +} +static ma_bool32 ma_dr_flac__find_and_seek_to_next_sync_code(ma_dr_flac_bs* bs) +{ + MA_DR_FLAC_ASSERT(bs != NULL); + if (!ma_dr_flac__seek_bits(bs, MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs) & 7)) { + return MA_FALSE; + } + for (;;) { + ma_uint8 hi; +#ifndef MA_DR_FLAC_NO_CRC + ma_dr_flac__reset_crc16(bs); +#endif + if (!ma_dr_flac__read_uint8(bs, 8, &hi)) { + return MA_FALSE; + } + if (hi == 0xFF) { + ma_uint8 lo; + if (!ma_dr_flac__read_uint8(bs, 6, &lo)) { + return MA_FALSE; + } + if (lo == 0x3E) { + return MA_TRUE; + } else { + if (!ma_dr_flac__seek_bits(bs, MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs) & 7)) { + return MA_FALSE; + } + } + } + } +} +#if defined(MA_DR_FLAC_HAS_LZCNT_INTRINSIC) +#define MA_DR_FLAC_IMPLEMENT_CLZ_LZCNT +#endif +#if defined(_MSC_VER) && _MSC_VER >= 1400 && (defined(MA_X64) || defined(MA_X86)) && !defined(__clang__) +#define MA_DR_FLAC_IMPLEMENT_CLZ_MSVC +#endif +#if defined(__WATCOMC__) && defined(__386__) +#define MA_DR_FLAC_IMPLEMENT_CLZ_WATCOM +#endif +#ifdef __MRC__ +#include +#define MA_DR_FLAC_IMPLEMENT_CLZ_MRC +#endif +static MA_INLINE ma_uint32 ma_dr_flac__clz_software(ma_dr_flac_cache_t x) +{ + ma_uint32 n; + static ma_uint32 clz_table_4[] = { + 0, + 4, + 3, 3, + 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1 + }; + if (x == 0) { + return sizeof(x)*8; + } + n = clz_table_4[x >> (sizeof(x)*8 - 4)]; + if (n == 0) { +#ifdef MA_64BIT + if ((x & ((ma_uint64)0xFFFFFFFF << 32)) == 0) { n = 32; x <<= 32; } + if ((x & ((ma_uint64)0xFFFF0000 << 32)) == 0) { n += 16; x <<= 16; } + if ((x & ((ma_uint64)0xFF000000 << 32)) == 0) { n += 8; x <<= 8; } + if ((x & ((ma_uint64)0xF0000000 << 32)) == 0) { n += 4; x <<= 4; } +#else + if ((x & 0xFFFF0000) == 0) { n = 16; x <<= 16; } + if ((x & 0xFF000000) == 0) { n += 8; x <<= 8; } + if ((x & 0xF0000000) == 0) { n += 4; x <<= 4; } +#endif + n += clz_table_4[x >> (sizeof(x)*8 - 4)]; + } + return n - 1; +} +#ifdef MA_DR_FLAC_IMPLEMENT_CLZ_LZCNT +static MA_INLINE ma_bool32 ma_dr_flac__is_lzcnt_supported(void) +{ +#if defined(MA_DR_FLAC_HAS_LZCNT_INTRINSIC) && defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 5) + return MA_TRUE; +#elif defined(__MRC__) + return MA_TRUE; +#else + #ifdef MA_DR_FLAC_HAS_LZCNT_INTRINSIC + return ma_dr_flac__gIsLZCNTSupported; + #else + return MA_FALSE; + #endif +#endif +} +static MA_INLINE ma_uint32 ma_dr_flac__clz_lzcnt(ma_dr_flac_cache_t x) +{ +#if defined(_MSC_VER) + #ifdef MA_64BIT + return (ma_uint32)__lzcnt64(x); + #else + return (ma_uint32)__lzcnt(x); + #endif +#else + #if defined(__GNUC__) || defined(__clang__) + #if defined(MA_X64) + { + ma_uint64 r; + __asm__ __volatile__ ( + "lzcnt{ %1, %0| %0, %1}" : "=r"(r) : "r"(x) : "cc" + ); + return (ma_uint32)r; + } + #elif defined(MA_X86) + { + ma_uint32 r; + __asm__ __volatile__ ( + "lzcnt{l %1, %0| %0, %1}" : "=r"(r) : "r"(x) : "cc" + ); + return r; + } + #elif defined(MA_ARM) && (defined(__ARM_ARCH) && __ARM_ARCH >= 5) && !defined(__ARM_ARCH_6M__) && !defined(MA_64BIT) + { + unsigned int r; + __asm__ __volatile__ ( + #if defined(MA_64BIT) + "clz %w[out], %w[in]" : [out]"=r"(r) : [in]"r"(x) + #else + "clz %[out], %[in]" : [out]"=r"(r) : [in]"r"(x) + #endif + ); + return r; + } + #else + if (x == 0) { + return sizeof(x)*8; + } + #ifdef MA_64BIT + return (ma_uint32)__builtin_clzll((ma_uint64)x); + #else + return (ma_uint32)__builtin_clzl((ma_uint32)x); + #endif + #endif + #else + #error "This compiler does not support the lzcnt intrinsic." + #endif +#endif +} +#endif +#ifdef MA_DR_FLAC_IMPLEMENT_CLZ_MSVC +#include +static MA_INLINE ma_uint32 ma_dr_flac__clz_msvc(ma_dr_flac_cache_t x) +{ + ma_uint32 n; + if (x == 0) { + return sizeof(x)*8; + } +#ifdef MA_64BIT + _BitScanReverse64((unsigned long*)&n, x); +#else + _BitScanReverse((unsigned long*)&n, x); +#endif + return sizeof(x)*8 - n - 1; +} +#endif +#ifdef MA_DR_FLAC_IMPLEMENT_CLZ_WATCOM +static __inline ma_uint32 ma_dr_flac__clz_watcom (ma_uint32); +#ifdef MA_DR_FLAC_IMPLEMENT_CLZ_WATCOM_LZCNT +#pragma aux ma_dr_flac__clz_watcom_lzcnt = \ + "db 0F3h, 0Fh, 0BDh, 0C0h" \ + parm [eax] \ + value [eax] \ + modify nomemory; +#else +#pragma aux ma_dr_flac__clz_watcom = \ + "bsr eax, eax" \ + "xor eax, 31" \ + parm [eax] nomemory \ + value [eax] \ + modify exact [eax] nomemory; +#endif +#endif +static MA_INLINE ma_uint32 ma_dr_flac__clz(ma_dr_flac_cache_t x) +{ +#ifdef MA_DR_FLAC_IMPLEMENT_CLZ_LZCNT + if (ma_dr_flac__is_lzcnt_supported()) { + return ma_dr_flac__clz_lzcnt(x); + } else +#endif + { +#ifdef MA_DR_FLAC_IMPLEMENT_CLZ_MSVC + return ma_dr_flac__clz_msvc(x); +#elif defined(MA_DR_FLAC_IMPLEMENT_CLZ_WATCOM_LZCNT) + return ma_dr_flac__clz_watcom_lzcnt(x); +#elif defined(MA_DR_FLAC_IMPLEMENT_CLZ_WATCOM) + return (x == 0) ? sizeof(x)*8 : ma_dr_flac__clz_watcom(x); +#elif defined(__MRC__) + return __cntlzw(x); +#else + return ma_dr_flac__clz_software(x); +#endif + } +} +static MA_INLINE ma_bool32 ma_dr_flac__seek_past_next_set_bit(ma_dr_flac_bs* bs, unsigned int* pOffsetOut) +{ + ma_uint32 zeroCounter = 0; + ma_uint32 setBitOffsetPlus1; + while (bs->cache == 0) { + zeroCounter += (ma_uint32)MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs); + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + } + if (bs->cache == 1) { + *pOffsetOut = zeroCounter + (ma_uint32)MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs) - 1; + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + return MA_TRUE; + } + setBitOffsetPlus1 = ma_dr_flac__clz(bs->cache); + setBitOffsetPlus1 += 1; + if (setBitOffsetPlus1 > MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { + return MA_FALSE; + } + bs->consumedBits += setBitOffsetPlus1; + bs->cache <<= setBitOffsetPlus1; + *pOffsetOut = zeroCounter + setBitOffsetPlus1 - 1; + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__seek_to_byte(ma_dr_flac_bs* bs, ma_uint64 offsetFromStart) +{ + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(offsetFromStart > 0); + if (offsetFromStart > 0x7FFFFFFF) { + ma_uint64 bytesRemaining = offsetFromStart; + if (!bs->onSeek(bs->pUserData, 0x7FFFFFFF, ma_dr_flac_seek_origin_start)) { + return MA_FALSE; + } + bytesRemaining -= 0x7FFFFFFF; + while (bytesRemaining > 0x7FFFFFFF) { + if (!bs->onSeek(bs->pUserData, 0x7FFFFFFF, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + bytesRemaining -= 0x7FFFFFFF; + } + if (bytesRemaining > 0) { + if (!bs->onSeek(bs->pUserData, (int)bytesRemaining, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + } + } else { + if (!bs->onSeek(bs->pUserData, (int)offsetFromStart, ma_dr_flac_seek_origin_start)) { + return MA_FALSE; + } + } + ma_dr_flac__reset_cache(bs); + return MA_TRUE; +} +static ma_result ma_dr_flac__read_utf8_coded_number(ma_dr_flac_bs* bs, ma_uint64* pNumberOut, ma_uint8* pCRCOut) +{ + ma_uint8 crc; + ma_uint64 result; + ma_uint8 utf8[7] = {0}; + int byteCount; + int i; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pNumberOut != NULL); + MA_DR_FLAC_ASSERT(pCRCOut != NULL); + crc = *pCRCOut; + if (!ma_dr_flac__read_uint8(bs, 8, utf8)) { + *pNumberOut = 0; + return MA_AT_END; + } + crc = ma_dr_flac_crc8(crc, utf8[0], 8); + if ((utf8[0] & 0x80) == 0) { + *pNumberOut = utf8[0]; + *pCRCOut = crc; + return MA_SUCCESS; + } + if ((utf8[0] & 0xE0) == 0xC0) { + byteCount = 2; + } else if ((utf8[0] & 0xF0) == 0xE0) { + byteCount = 3; + } else if ((utf8[0] & 0xF8) == 0xF0) { + byteCount = 4; + } else if ((utf8[0] & 0xFC) == 0xF8) { + byteCount = 5; + } else if ((utf8[0] & 0xFE) == 0xFC) { + byteCount = 6; + } else if ((utf8[0] & 0xFF) == 0xFE) { + byteCount = 7; + } else { + *pNumberOut = 0; + return MA_CRC_MISMATCH; + } + MA_DR_FLAC_ASSERT(byteCount > 1); + result = (ma_uint64)(utf8[0] & (0xFF >> (byteCount + 1))); + for (i = 1; i < byteCount; ++i) { + if (!ma_dr_flac__read_uint8(bs, 8, utf8 + i)) { + *pNumberOut = 0; + return MA_AT_END; + } + crc = ma_dr_flac_crc8(crc, utf8[i], 8); + result = (result << 6) | (utf8[i] & 0x3F); + } + *pNumberOut = result; + *pCRCOut = crc; + return MA_SUCCESS; +} +static MA_INLINE ma_uint32 ma_dr_flac__ilog2_u32(ma_uint32 x) +{ +#if 1 + ma_uint32 result = 0; + while (x > 0) { + result += 1; + x >>= 1; + } + return result; +#endif +} +static MA_INLINE ma_bool32 ma_dr_flac__use_64_bit_prediction(ma_uint32 bitsPerSample, ma_uint32 order, ma_uint32 precision) +{ + return bitsPerSample + precision + ma_dr_flac__ilog2_u32(order) > 32; +} +#if defined(__clang__) +__attribute__((no_sanitize("signed-integer-overflow"))) +#endif +static MA_INLINE ma_int32 ma_dr_flac__calculate_prediction_32(ma_uint32 order, ma_int32 shift, const ma_int32* coefficients, ma_int32* pDecodedSamples) +{ + ma_int32 prediction = 0; + MA_DR_FLAC_ASSERT(order <= 32); + switch (order) + { + case 32: prediction += coefficients[31] * pDecodedSamples[-32]; + case 31: prediction += coefficients[30] * pDecodedSamples[-31]; + case 30: prediction += coefficients[29] * pDecodedSamples[-30]; + case 29: prediction += coefficients[28] * pDecodedSamples[-29]; + case 28: prediction += coefficients[27] * pDecodedSamples[-28]; + case 27: prediction += coefficients[26] * pDecodedSamples[-27]; + case 26: prediction += coefficients[25] * pDecodedSamples[-26]; + case 25: prediction += coefficients[24] * pDecodedSamples[-25]; + case 24: prediction += coefficients[23] * pDecodedSamples[-24]; + case 23: prediction += coefficients[22] * pDecodedSamples[-23]; + case 22: prediction += coefficients[21] * pDecodedSamples[-22]; + case 21: prediction += coefficients[20] * pDecodedSamples[-21]; + case 20: prediction += coefficients[19] * pDecodedSamples[-20]; + case 19: prediction += coefficients[18] * pDecodedSamples[-19]; + case 18: prediction += coefficients[17] * pDecodedSamples[-18]; + case 17: prediction += coefficients[16] * pDecodedSamples[-17]; + case 16: prediction += coefficients[15] * pDecodedSamples[-16]; + case 15: prediction += coefficients[14] * pDecodedSamples[-15]; + case 14: prediction += coefficients[13] * pDecodedSamples[-14]; + case 13: prediction += coefficients[12] * pDecodedSamples[-13]; + case 12: prediction += coefficients[11] * pDecodedSamples[-12]; + case 11: prediction += coefficients[10] * pDecodedSamples[-11]; + case 10: prediction += coefficients[ 9] * pDecodedSamples[-10]; + case 9: prediction += coefficients[ 8] * pDecodedSamples[- 9]; + case 8: prediction += coefficients[ 7] * pDecodedSamples[- 8]; + case 7: prediction += coefficients[ 6] * pDecodedSamples[- 7]; + case 6: prediction += coefficients[ 5] * pDecodedSamples[- 6]; + case 5: prediction += coefficients[ 4] * pDecodedSamples[- 5]; + case 4: prediction += coefficients[ 3] * pDecodedSamples[- 4]; + case 3: prediction += coefficients[ 2] * pDecodedSamples[- 3]; + case 2: prediction += coefficients[ 1] * pDecodedSamples[- 2]; + case 1: prediction += coefficients[ 0] * pDecodedSamples[- 1]; + } + return (ma_int32)(prediction >> shift); +} +static MA_INLINE ma_int32 ma_dr_flac__calculate_prediction_64(ma_uint32 order, ma_int32 shift, const ma_int32* coefficients, ma_int32* pDecodedSamples) +{ + ma_int64 prediction; + MA_DR_FLAC_ASSERT(order <= 32); +#ifndef MA_64BIT + if (order == 8) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + prediction += coefficients[5] * (ma_int64)pDecodedSamples[-6]; + prediction += coefficients[6] * (ma_int64)pDecodedSamples[-7]; + prediction += coefficients[7] * (ma_int64)pDecodedSamples[-8]; + } + else if (order == 7) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + prediction += coefficients[5] * (ma_int64)pDecodedSamples[-6]; + prediction += coefficients[6] * (ma_int64)pDecodedSamples[-7]; + } + else if (order == 3) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + } + else if (order == 6) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + prediction += coefficients[5] * (ma_int64)pDecodedSamples[-6]; + } + else if (order == 5) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + } + else if (order == 4) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + } + else if (order == 12) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + prediction += coefficients[5] * (ma_int64)pDecodedSamples[-6]; + prediction += coefficients[6] * (ma_int64)pDecodedSamples[-7]; + prediction += coefficients[7] * (ma_int64)pDecodedSamples[-8]; + prediction += coefficients[8] * (ma_int64)pDecodedSamples[-9]; + prediction += coefficients[9] * (ma_int64)pDecodedSamples[-10]; + prediction += coefficients[10] * (ma_int64)pDecodedSamples[-11]; + prediction += coefficients[11] * (ma_int64)pDecodedSamples[-12]; + } + else if (order == 2) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + } + else if (order == 1) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + } + else if (order == 10) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + prediction += coefficients[5] * (ma_int64)pDecodedSamples[-6]; + prediction += coefficients[6] * (ma_int64)pDecodedSamples[-7]; + prediction += coefficients[7] * (ma_int64)pDecodedSamples[-8]; + prediction += coefficients[8] * (ma_int64)pDecodedSamples[-9]; + prediction += coefficients[9] * (ma_int64)pDecodedSamples[-10]; + } + else if (order == 9) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + prediction += coefficients[5] * (ma_int64)pDecodedSamples[-6]; + prediction += coefficients[6] * (ma_int64)pDecodedSamples[-7]; + prediction += coefficients[7] * (ma_int64)pDecodedSamples[-8]; + prediction += coefficients[8] * (ma_int64)pDecodedSamples[-9]; + } + else if (order == 11) + { + prediction = coefficients[0] * (ma_int64)pDecodedSamples[-1]; + prediction += coefficients[1] * (ma_int64)pDecodedSamples[-2]; + prediction += coefficients[2] * (ma_int64)pDecodedSamples[-3]; + prediction += coefficients[3] * (ma_int64)pDecodedSamples[-4]; + prediction += coefficients[4] * (ma_int64)pDecodedSamples[-5]; + prediction += coefficients[5] * (ma_int64)pDecodedSamples[-6]; + prediction += coefficients[6] * (ma_int64)pDecodedSamples[-7]; + prediction += coefficients[7] * (ma_int64)pDecodedSamples[-8]; + prediction += coefficients[8] * (ma_int64)pDecodedSamples[-9]; + prediction += coefficients[9] * (ma_int64)pDecodedSamples[-10]; + prediction += coefficients[10] * (ma_int64)pDecodedSamples[-11]; + } + else + { + int j; + prediction = 0; + for (j = 0; j < (int)order; ++j) { + prediction += coefficients[j] * (ma_int64)pDecodedSamples[-j-1]; + } + } +#endif +#ifdef MA_64BIT + prediction = 0; + switch (order) + { + case 32: prediction += coefficients[31] * (ma_int64)pDecodedSamples[-32]; + case 31: prediction += coefficients[30] * (ma_int64)pDecodedSamples[-31]; + case 30: prediction += coefficients[29] * (ma_int64)pDecodedSamples[-30]; + case 29: prediction += coefficients[28] * (ma_int64)pDecodedSamples[-29]; + case 28: prediction += coefficients[27] * (ma_int64)pDecodedSamples[-28]; + case 27: prediction += coefficients[26] * (ma_int64)pDecodedSamples[-27]; + case 26: prediction += coefficients[25] * (ma_int64)pDecodedSamples[-26]; + case 25: prediction += coefficients[24] * (ma_int64)pDecodedSamples[-25]; + case 24: prediction += coefficients[23] * (ma_int64)pDecodedSamples[-24]; + case 23: prediction += coefficients[22] * (ma_int64)pDecodedSamples[-23]; + case 22: prediction += coefficients[21] * (ma_int64)pDecodedSamples[-22]; + case 21: prediction += coefficients[20] * (ma_int64)pDecodedSamples[-21]; + case 20: prediction += coefficients[19] * (ma_int64)pDecodedSamples[-20]; + case 19: prediction += coefficients[18] * (ma_int64)pDecodedSamples[-19]; + case 18: prediction += coefficients[17] * (ma_int64)pDecodedSamples[-18]; + case 17: prediction += coefficients[16] * (ma_int64)pDecodedSamples[-17]; + case 16: prediction += coefficients[15] * (ma_int64)pDecodedSamples[-16]; + case 15: prediction += coefficients[14] * (ma_int64)pDecodedSamples[-15]; + case 14: prediction += coefficients[13] * (ma_int64)pDecodedSamples[-14]; + case 13: prediction += coefficients[12] * (ma_int64)pDecodedSamples[-13]; + case 12: prediction += coefficients[11] * (ma_int64)pDecodedSamples[-12]; + case 11: prediction += coefficients[10] * (ma_int64)pDecodedSamples[-11]; + case 10: prediction += coefficients[ 9] * (ma_int64)pDecodedSamples[-10]; + case 9: prediction += coefficients[ 8] * (ma_int64)pDecodedSamples[- 9]; + case 8: prediction += coefficients[ 7] * (ma_int64)pDecodedSamples[- 8]; + case 7: prediction += coefficients[ 6] * (ma_int64)pDecodedSamples[- 7]; + case 6: prediction += coefficients[ 5] * (ma_int64)pDecodedSamples[- 6]; + case 5: prediction += coefficients[ 4] * (ma_int64)pDecodedSamples[- 5]; + case 4: prediction += coefficients[ 3] * (ma_int64)pDecodedSamples[- 4]; + case 3: prediction += coefficients[ 2] * (ma_int64)pDecodedSamples[- 3]; + case 2: prediction += coefficients[ 1] * (ma_int64)pDecodedSamples[- 2]; + case 1: prediction += coefficients[ 0] * (ma_int64)pDecodedSamples[- 1]; + } +#endif + return (ma_int32)(prediction >> shift); +} +#if 0 +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__reference(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 count, ma_uint8 riceParam, ma_uint32 lpcOrder, ma_int32 lpcShift, ma_uint32 lpcPrecision, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + ma_uint32 i; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pSamplesOut != NULL); + for (i = 0; i < count; ++i) { + ma_uint32 zeroCounter = 0; + for (;;) { + ma_uint8 bit; + if (!ma_dr_flac__read_uint8(bs, 1, &bit)) { + return MA_FALSE; + } + if (bit == 0) { + zeroCounter += 1; + } else { + break; + } + } + ma_uint32 decodedRice; + if (riceParam > 0) { + if (!ma_dr_flac__read_uint32(bs, riceParam, &decodedRice)) { + return MA_FALSE; + } + } else { + decodedRice = 0; + } + decodedRice |= (zeroCounter << riceParam); + if ((decodedRice & 0x01)) { + decodedRice = ~(decodedRice >> 1); + } else { + decodedRice = (decodedRice >> 1); + } + if (ma_dr_flac__use_64_bit_prediction(bitsPerSample, lpcOrder, lpcPrecision)) { + pSamplesOut[i] = decodedRice + ma_dr_flac__calculate_prediction_64(lpcOrder, lpcShift, coefficients, pSamplesOut + i); + } else { + pSamplesOut[i] = decodedRice + ma_dr_flac__calculate_prediction_32(lpcOrder, lpcShift, coefficients, pSamplesOut + i); + } + } + return MA_TRUE; +} +#endif +#if 0 +static ma_bool32 ma_dr_flac__read_rice_parts__reference(ma_dr_flac_bs* bs, ma_uint8 riceParam, ma_uint32* pZeroCounterOut, ma_uint32* pRiceParamPartOut) +{ + ma_uint32 zeroCounter = 0; + ma_uint32 decodedRice; + for (;;) { + ma_uint8 bit; + if (!ma_dr_flac__read_uint8(bs, 1, &bit)) { + return MA_FALSE; + } + if (bit == 0) { + zeroCounter += 1; + } else { + break; + } + } + if (riceParam > 0) { + if (!ma_dr_flac__read_uint32(bs, riceParam, &decodedRice)) { + return MA_FALSE; + } + } else { + decodedRice = 0; + } + *pZeroCounterOut = zeroCounter; + *pRiceParamPartOut = decodedRice; + return MA_TRUE; +} +#endif +#if 0 +static MA_INLINE ma_bool32 ma_dr_flac__read_rice_parts(ma_dr_flac_bs* bs, ma_uint8 riceParam, ma_uint32* pZeroCounterOut, ma_uint32* pRiceParamPartOut) +{ + ma_dr_flac_cache_t riceParamMask; + ma_uint32 zeroCounter; + ma_uint32 setBitOffsetPlus1; + ma_uint32 riceParamPart; + ma_uint32 riceLength; + MA_DR_FLAC_ASSERT(riceParam > 0); + riceParamMask = MA_DR_FLAC_CACHE_L1_SELECTION_MASK(riceParam); + zeroCounter = 0; + while (bs->cache == 0) { + zeroCounter += (ma_uint32)MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs); + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + } + setBitOffsetPlus1 = ma_dr_flac__clz(bs->cache); + zeroCounter += setBitOffsetPlus1; + setBitOffsetPlus1 += 1; + riceLength = setBitOffsetPlus1 + riceParam; + if (riceLength < MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { + riceParamPart = (ma_uint32)((bs->cache & (riceParamMask >> setBitOffsetPlus1)) >> MA_DR_FLAC_CACHE_L1_SELECTION_SHIFT(bs, riceLength)); + bs->consumedBits += riceLength; + bs->cache <<= riceLength; + } else { + ma_uint32 bitCountLo; + ma_dr_flac_cache_t resultHi; + bs->consumedBits += riceLength; + bs->cache <<= setBitOffsetPlus1 & (MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs)-1); + bitCountLo = bs->consumedBits - MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs); + resultHi = MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT(bs, riceParam); + if (bs->nextL2Line < MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs)) { +#ifndef MA_DR_FLAC_NO_CRC + ma_dr_flac__update_crc16(bs); +#endif + bs->cache = ma_dr_flac__be2host__cache_line(bs->cacheL2[bs->nextL2Line++]); + bs->consumedBits = 0; +#ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = bs->cache; +#endif + } else { + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + if (bitCountLo > MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { + return MA_FALSE; + } + } + riceParamPart = (ma_uint32)(resultHi | MA_DR_FLAC_CACHE_L1_SELECT_AND_SHIFT_SAFE(bs, bitCountLo)); + bs->consumedBits += bitCountLo; + bs->cache <<= bitCountLo; + } + pZeroCounterOut[0] = zeroCounter; + pRiceParamPartOut[0] = riceParamPart; + return MA_TRUE; +} +#endif +static MA_INLINE ma_bool32 ma_dr_flac__read_rice_parts_x1(ma_dr_flac_bs* bs, ma_uint8 riceParam, ma_uint32* pZeroCounterOut, ma_uint32* pRiceParamPartOut) +{ + ma_uint32 riceParamPlus1 = riceParam + 1; + ma_uint32 riceParamPlus1Shift = MA_DR_FLAC_CACHE_L1_SELECTION_SHIFT(bs, riceParamPlus1); + ma_uint32 riceParamPlus1MaxConsumedBits = MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs) - riceParamPlus1; + ma_dr_flac_cache_t bs_cache = bs->cache; + ma_uint32 bs_consumedBits = bs->consumedBits; + ma_uint32 lzcount = ma_dr_flac__clz(bs_cache); + if (lzcount < sizeof(bs_cache)*8) { + pZeroCounterOut[0] = lzcount; + extract_rice_param_part: + bs_cache <<= lzcount; + bs_consumedBits += lzcount; + if (bs_consumedBits <= riceParamPlus1MaxConsumedBits) { + pRiceParamPartOut[0] = (ma_uint32)(bs_cache >> riceParamPlus1Shift); + bs_cache <<= riceParamPlus1; + bs_consumedBits += riceParamPlus1; + } else { + ma_uint32 riceParamPartHi; + ma_uint32 riceParamPartLo; + ma_uint32 riceParamPartLoBitCount; + riceParamPartHi = (ma_uint32)(bs_cache >> riceParamPlus1Shift); + riceParamPartLoBitCount = bs_consumedBits - riceParamPlus1MaxConsumedBits; + MA_DR_FLAC_ASSERT(riceParamPartLoBitCount > 0 && riceParamPartLoBitCount < 32); + if (bs->nextL2Line < MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs)) { + #ifndef MA_DR_FLAC_NO_CRC + ma_dr_flac__update_crc16(bs); + #endif + bs_cache = ma_dr_flac__be2host__cache_line(bs->cacheL2[bs->nextL2Line++]); + bs_consumedBits = riceParamPartLoBitCount; + #ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = bs_cache; + #endif + } else { + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + if (riceParamPartLoBitCount > MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { + return MA_FALSE; + } + bs_cache = bs->cache; + bs_consumedBits = bs->consumedBits + riceParamPartLoBitCount; + } + riceParamPartLo = (ma_uint32)(bs_cache >> (MA_DR_FLAC_CACHE_L1_SELECTION_SHIFT(bs, riceParamPartLoBitCount))); + pRiceParamPartOut[0] = riceParamPartHi | riceParamPartLo; + bs_cache <<= riceParamPartLoBitCount; + } + } else { + ma_uint32 zeroCounter = (ma_uint32)(MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs) - bs_consumedBits); + for (;;) { + if (bs->nextL2Line < MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs)) { + #ifndef MA_DR_FLAC_NO_CRC + ma_dr_flac__update_crc16(bs); + #endif + bs_cache = ma_dr_flac__be2host__cache_line(bs->cacheL2[bs->nextL2Line++]); + bs_consumedBits = 0; + #ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = bs_cache; + #endif + } else { + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + bs_cache = bs->cache; + bs_consumedBits = bs->consumedBits; + } + lzcount = ma_dr_flac__clz(bs_cache); + zeroCounter += lzcount; + if (lzcount < sizeof(bs_cache)*8) { + break; + } + } + pZeroCounterOut[0] = zeroCounter; + goto extract_rice_param_part; + } + bs->cache = bs_cache; + bs->consumedBits = bs_consumedBits; + return MA_TRUE; +} +static MA_INLINE ma_bool32 ma_dr_flac__seek_rice_parts(ma_dr_flac_bs* bs, ma_uint8 riceParam) +{ + ma_uint32 riceParamPlus1 = riceParam + 1; + ma_uint32 riceParamPlus1MaxConsumedBits = MA_DR_FLAC_CACHE_L1_SIZE_BITS(bs) - riceParamPlus1; + ma_dr_flac_cache_t bs_cache = bs->cache; + ma_uint32 bs_consumedBits = bs->consumedBits; + ma_uint32 lzcount = ma_dr_flac__clz(bs_cache); + if (lzcount < sizeof(bs_cache)*8) { + extract_rice_param_part: + bs_cache <<= lzcount; + bs_consumedBits += lzcount; + if (bs_consumedBits <= riceParamPlus1MaxConsumedBits) { + bs_cache <<= riceParamPlus1; + bs_consumedBits += riceParamPlus1; + } else { + ma_uint32 riceParamPartLoBitCount = bs_consumedBits - riceParamPlus1MaxConsumedBits; + MA_DR_FLAC_ASSERT(riceParamPartLoBitCount > 0 && riceParamPartLoBitCount < 32); + if (bs->nextL2Line < MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs)) { + #ifndef MA_DR_FLAC_NO_CRC + ma_dr_flac__update_crc16(bs); + #endif + bs_cache = ma_dr_flac__be2host__cache_line(bs->cacheL2[bs->nextL2Line++]); + bs_consumedBits = riceParamPartLoBitCount; + #ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = bs_cache; + #endif + } else { + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + if (riceParamPartLoBitCount > MA_DR_FLAC_CACHE_L1_BITS_REMAINING(bs)) { + return MA_FALSE; + } + bs_cache = bs->cache; + bs_consumedBits = bs->consumedBits + riceParamPartLoBitCount; + } + bs_cache <<= riceParamPartLoBitCount; + } + } else { + for (;;) { + if (bs->nextL2Line < MA_DR_FLAC_CACHE_L2_LINE_COUNT(bs)) { + #ifndef MA_DR_FLAC_NO_CRC + ma_dr_flac__update_crc16(bs); + #endif + bs_cache = ma_dr_flac__be2host__cache_line(bs->cacheL2[bs->nextL2Line++]); + bs_consumedBits = 0; + #ifndef MA_DR_FLAC_NO_CRC + bs->crc16Cache = bs_cache; + #endif + } else { + if (!ma_dr_flac__reload_cache(bs)) { + return MA_FALSE; + } + bs_cache = bs->cache; + bs_consumedBits = bs->consumedBits; + } + lzcount = ma_dr_flac__clz(bs_cache); + if (lzcount < sizeof(bs_cache)*8) { + break; + } + } + goto extract_rice_param_part; + } + bs->cache = bs_cache; + bs->consumedBits = bs_consumedBits; + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__scalar_zeroorder(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 count, ma_uint8 riceParam, ma_uint32 order, ma_int32 shift, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + ma_uint32 t[2] = {0x00000000, 0xFFFFFFFF}; + ma_uint32 zeroCountPart0; + ma_uint32 riceParamPart0; + ma_uint32 riceParamMask; + ma_uint32 i; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pSamplesOut != NULL); + (void)bitsPerSample; + (void)order; + (void)shift; + (void)coefficients; + riceParamMask = (ma_uint32)~((~0UL) << riceParam); + i = 0; + while (i < count) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart0, &riceParamPart0)) { + return MA_FALSE; + } + riceParamPart0 &= riceParamMask; + riceParamPart0 |= (zeroCountPart0 << riceParam); + riceParamPart0 = (riceParamPart0 >> 1) ^ t[riceParamPart0 & 0x01]; + pSamplesOut[i] = riceParamPart0; + i += 1; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__scalar(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 count, ma_uint8 riceParam, ma_uint32 lpcOrder, ma_int32 lpcShift, ma_uint32 lpcPrecision, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + ma_uint32 t[2] = {0x00000000, 0xFFFFFFFF}; + ma_uint32 zeroCountPart0 = 0; + ma_uint32 zeroCountPart1 = 0; + ma_uint32 zeroCountPart2 = 0; + ma_uint32 zeroCountPart3 = 0; + ma_uint32 riceParamPart0 = 0; + ma_uint32 riceParamPart1 = 0; + ma_uint32 riceParamPart2 = 0; + ma_uint32 riceParamPart3 = 0; + ma_uint32 riceParamMask; + const ma_int32* pSamplesOutEnd; + ma_uint32 i; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pSamplesOut != NULL); + if (lpcOrder == 0) { + return ma_dr_flac__decode_samples_with_residual__rice__scalar_zeroorder(bs, bitsPerSample, count, riceParam, lpcOrder, lpcShift, coefficients, pSamplesOut); + } + riceParamMask = (ma_uint32)~((~0UL) << riceParam); + pSamplesOutEnd = pSamplesOut + (count & ~3); + if (ma_dr_flac__use_64_bit_prediction(bitsPerSample, lpcOrder, lpcPrecision)) { + while (pSamplesOut < pSamplesOutEnd) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart0, &riceParamPart0) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart1, &riceParamPart1) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart2, &riceParamPart2) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart3, &riceParamPart3)) { + return MA_FALSE; + } + riceParamPart0 &= riceParamMask; + riceParamPart1 &= riceParamMask; + riceParamPart2 &= riceParamMask; + riceParamPart3 &= riceParamMask; + riceParamPart0 |= (zeroCountPart0 << riceParam); + riceParamPart1 |= (zeroCountPart1 << riceParam); + riceParamPart2 |= (zeroCountPart2 << riceParam); + riceParamPart3 |= (zeroCountPart3 << riceParam); + riceParamPart0 = (riceParamPart0 >> 1) ^ t[riceParamPart0 & 0x01]; + riceParamPart1 = (riceParamPart1 >> 1) ^ t[riceParamPart1 & 0x01]; + riceParamPart2 = (riceParamPart2 >> 1) ^ t[riceParamPart2 & 0x01]; + riceParamPart3 = (riceParamPart3 >> 1) ^ t[riceParamPart3 & 0x01]; + pSamplesOut[0] = riceParamPart0 + ma_dr_flac__calculate_prediction_64(lpcOrder, lpcShift, coefficients, pSamplesOut + 0); + pSamplesOut[1] = riceParamPart1 + ma_dr_flac__calculate_prediction_64(lpcOrder, lpcShift, coefficients, pSamplesOut + 1); + pSamplesOut[2] = riceParamPart2 + ma_dr_flac__calculate_prediction_64(lpcOrder, lpcShift, coefficients, pSamplesOut + 2); + pSamplesOut[3] = riceParamPart3 + ma_dr_flac__calculate_prediction_64(lpcOrder, lpcShift, coefficients, pSamplesOut + 3); + pSamplesOut += 4; + } + } else { + while (pSamplesOut < pSamplesOutEnd) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart0, &riceParamPart0) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart1, &riceParamPart1) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart2, &riceParamPart2) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart3, &riceParamPart3)) { + return MA_FALSE; + } + riceParamPart0 &= riceParamMask; + riceParamPart1 &= riceParamMask; + riceParamPart2 &= riceParamMask; + riceParamPart3 &= riceParamMask; + riceParamPart0 |= (zeroCountPart0 << riceParam); + riceParamPart1 |= (zeroCountPart1 << riceParam); + riceParamPart2 |= (zeroCountPart2 << riceParam); + riceParamPart3 |= (zeroCountPart3 << riceParam); + riceParamPart0 = (riceParamPart0 >> 1) ^ t[riceParamPart0 & 0x01]; + riceParamPart1 = (riceParamPart1 >> 1) ^ t[riceParamPart1 & 0x01]; + riceParamPart2 = (riceParamPart2 >> 1) ^ t[riceParamPart2 & 0x01]; + riceParamPart3 = (riceParamPart3 >> 1) ^ t[riceParamPart3 & 0x01]; + pSamplesOut[0] = riceParamPart0 + ma_dr_flac__calculate_prediction_32(lpcOrder, lpcShift, coefficients, pSamplesOut + 0); + pSamplesOut[1] = riceParamPart1 + ma_dr_flac__calculate_prediction_32(lpcOrder, lpcShift, coefficients, pSamplesOut + 1); + pSamplesOut[2] = riceParamPart2 + ma_dr_flac__calculate_prediction_32(lpcOrder, lpcShift, coefficients, pSamplesOut + 2); + pSamplesOut[3] = riceParamPart3 + ma_dr_flac__calculate_prediction_32(lpcOrder, lpcShift, coefficients, pSamplesOut + 3); + pSamplesOut += 4; + } + } + i = (count & ~3); + while (i < count) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountPart0, &riceParamPart0)) { + return MA_FALSE; + } + riceParamPart0 &= riceParamMask; + riceParamPart0 |= (zeroCountPart0 << riceParam); + riceParamPart0 = (riceParamPart0 >> 1) ^ t[riceParamPart0 & 0x01]; + if (ma_dr_flac__use_64_bit_prediction(bitsPerSample, lpcOrder, lpcPrecision)) { + pSamplesOut[0] = riceParamPart0 + ma_dr_flac__calculate_prediction_64(lpcOrder, lpcShift, coefficients, pSamplesOut + 0); + } else { + pSamplesOut[0] = riceParamPart0 + ma_dr_flac__calculate_prediction_32(lpcOrder, lpcShift, coefficients, pSamplesOut + 0); + } + i += 1; + pSamplesOut += 1; + } + return MA_TRUE; +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE __m128i ma_dr_flac__mm_packs_interleaved_epi32(__m128i a, __m128i b) +{ + __m128i r; + r = _mm_packs_epi32(a, b); + r = _mm_shuffle_epi32(r, _MM_SHUFFLE(3, 1, 2, 0)); + r = _mm_shufflehi_epi16(r, _MM_SHUFFLE(3, 1, 2, 0)); + r = _mm_shufflelo_epi16(r, _MM_SHUFFLE(3, 1, 2, 0)); + return r; +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_SSE41) +static MA_INLINE __m128i ma_dr_flac__mm_not_si128(__m128i a) +{ + return _mm_xor_si128(a, _mm_cmpeq_epi32(_mm_setzero_si128(), _mm_setzero_si128())); +} +static MA_INLINE __m128i ma_dr_flac__mm_hadd_epi32(__m128i x) +{ + __m128i x64 = _mm_add_epi32(x, _mm_shuffle_epi32(x, _MM_SHUFFLE(1, 0, 3, 2))); + __m128i x32 = _mm_shufflelo_epi16(x64, _MM_SHUFFLE(1, 0, 3, 2)); + return _mm_add_epi32(x64, x32); +} +static MA_INLINE __m128i ma_dr_flac__mm_hadd_epi64(__m128i x) +{ + return _mm_add_epi64(x, _mm_shuffle_epi32(x, _MM_SHUFFLE(1, 0, 3, 2))); +} +static MA_INLINE __m128i ma_dr_flac__mm_srai_epi64(__m128i x, int count) +{ + __m128i lo = _mm_srli_epi64(x, count); + __m128i hi = _mm_srai_epi32(x, count); + hi = _mm_and_si128(hi, _mm_set_epi32(0xFFFFFFFF, 0, 0xFFFFFFFF, 0)); + return _mm_or_si128(lo, hi); +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__sse41_32(ma_dr_flac_bs* bs, ma_uint32 count, ma_uint8 riceParam, ma_uint32 order, ma_int32 shift, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + int i; + ma_uint32 riceParamMask; + ma_int32* pDecodedSamples = pSamplesOut; + ma_int32* pDecodedSamplesEnd = pSamplesOut + (count & ~3); + ma_uint32 zeroCountParts0 = 0; + ma_uint32 zeroCountParts1 = 0; + ma_uint32 zeroCountParts2 = 0; + ma_uint32 zeroCountParts3 = 0; + ma_uint32 riceParamParts0 = 0; + ma_uint32 riceParamParts1 = 0; + ma_uint32 riceParamParts2 = 0; + ma_uint32 riceParamParts3 = 0; + __m128i coefficients128_0; + __m128i coefficients128_4; + __m128i coefficients128_8; + __m128i samples128_0; + __m128i samples128_4; + __m128i samples128_8; + __m128i riceParamMask128; + const ma_uint32 t[2] = {0x00000000, 0xFFFFFFFF}; + riceParamMask = (ma_uint32)~((~0UL) << riceParam); + riceParamMask128 = _mm_set1_epi32(riceParamMask); + coefficients128_0 = _mm_setzero_si128(); + coefficients128_4 = _mm_setzero_si128(); + coefficients128_8 = _mm_setzero_si128(); + samples128_0 = _mm_setzero_si128(); + samples128_4 = _mm_setzero_si128(); + samples128_8 = _mm_setzero_si128(); +#if 1 + { + int runningOrder = order; + if (runningOrder >= 4) { + coefficients128_0 = _mm_loadu_si128((const __m128i*)(coefficients + 0)); + samples128_0 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 4)); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: coefficients128_0 = _mm_set_epi32(0, coefficients[2], coefficients[1], coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], pSamplesOut[-2], pSamplesOut[-3], 0); break; + case 2: coefficients128_0 = _mm_set_epi32(0, 0, coefficients[1], coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], pSamplesOut[-2], 0, 0); break; + case 1: coefficients128_0 = _mm_set_epi32(0, 0, 0, coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], 0, 0, 0); break; + } + runningOrder = 0; + } + if (runningOrder >= 4) { + coefficients128_4 = _mm_loadu_si128((const __m128i*)(coefficients + 4)); + samples128_4 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 8)); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: coefficients128_4 = _mm_set_epi32(0, coefficients[6], coefficients[5], coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], pSamplesOut[-6], pSamplesOut[-7], 0); break; + case 2: coefficients128_4 = _mm_set_epi32(0, 0, coefficients[5], coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], pSamplesOut[-6], 0, 0); break; + case 1: coefficients128_4 = _mm_set_epi32(0, 0, 0, coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], 0, 0, 0); break; + } + runningOrder = 0; + } + if (runningOrder == 4) { + coefficients128_8 = _mm_loadu_si128((const __m128i*)(coefficients + 8)); + samples128_8 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 12)); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: coefficients128_8 = _mm_set_epi32(0, coefficients[10], coefficients[9], coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], pSamplesOut[-10], pSamplesOut[-11], 0); break; + case 2: coefficients128_8 = _mm_set_epi32(0, 0, coefficients[9], coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], pSamplesOut[-10], 0, 0); break; + case 1: coefficients128_8 = _mm_set_epi32(0, 0, 0, coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], 0, 0, 0); break; + } + runningOrder = 0; + } + coefficients128_0 = _mm_shuffle_epi32(coefficients128_0, _MM_SHUFFLE(0, 1, 2, 3)); + coefficients128_4 = _mm_shuffle_epi32(coefficients128_4, _MM_SHUFFLE(0, 1, 2, 3)); + coefficients128_8 = _mm_shuffle_epi32(coefficients128_8, _MM_SHUFFLE(0, 1, 2, 3)); + } +#else + switch (order) + { + case 12: ((ma_int32*)&coefficients128_8)[0] = coefficients[11]; ((ma_int32*)&samples128_8)[0] = pDecodedSamples[-12]; + case 11: ((ma_int32*)&coefficients128_8)[1] = coefficients[10]; ((ma_int32*)&samples128_8)[1] = pDecodedSamples[-11]; + case 10: ((ma_int32*)&coefficients128_8)[2] = coefficients[ 9]; ((ma_int32*)&samples128_8)[2] = pDecodedSamples[-10]; + case 9: ((ma_int32*)&coefficients128_8)[3] = coefficients[ 8]; ((ma_int32*)&samples128_8)[3] = pDecodedSamples[- 9]; + case 8: ((ma_int32*)&coefficients128_4)[0] = coefficients[ 7]; ((ma_int32*)&samples128_4)[0] = pDecodedSamples[- 8]; + case 7: ((ma_int32*)&coefficients128_4)[1] = coefficients[ 6]; ((ma_int32*)&samples128_4)[1] = pDecodedSamples[- 7]; + case 6: ((ma_int32*)&coefficients128_4)[2] = coefficients[ 5]; ((ma_int32*)&samples128_4)[2] = pDecodedSamples[- 6]; + case 5: ((ma_int32*)&coefficients128_4)[3] = coefficients[ 4]; ((ma_int32*)&samples128_4)[3] = pDecodedSamples[- 5]; + case 4: ((ma_int32*)&coefficients128_0)[0] = coefficients[ 3]; ((ma_int32*)&samples128_0)[0] = pDecodedSamples[- 4]; + case 3: ((ma_int32*)&coefficients128_0)[1] = coefficients[ 2]; ((ma_int32*)&samples128_0)[1] = pDecodedSamples[- 3]; + case 2: ((ma_int32*)&coefficients128_0)[2] = coefficients[ 1]; ((ma_int32*)&samples128_0)[2] = pDecodedSamples[- 2]; + case 1: ((ma_int32*)&coefficients128_0)[3] = coefficients[ 0]; ((ma_int32*)&samples128_0)[3] = pDecodedSamples[- 1]; + } +#endif + while (pDecodedSamples < pDecodedSamplesEnd) { + __m128i prediction128; + __m128i zeroCountPart128; + __m128i riceParamPart128; + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts0, &riceParamParts0) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts1, &riceParamParts1) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts2, &riceParamParts2) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts3, &riceParamParts3)) { + return MA_FALSE; + } + zeroCountPart128 = _mm_set_epi32(zeroCountParts3, zeroCountParts2, zeroCountParts1, zeroCountParts0); + riceParamPart128 = _mm_set_epi32(riceParamParts3, riceParamParts2, riceParamParts1, riceParamParts0); + riceParamPart128 = _mm_and_si128(riceParamPart128, riceParamMask128); + riceParamPart128 = _mm_or_si128(riceParamPart128, _mm_slli_epi32(zeroCountPart128, riceParam)); + riceParamPart128 = _mm_xor_si128(_mm_srli_epi32(riceParamPart128, 1), _mm_add_epi32(ma_dr_flac__mm_not_si128(_mm_and_si128(riceParamPart128, _mm_set1_epi32(0x01))), _mm_set1_epi32(0x01))); + if (order <= 4) { + for (i = 0; i < 4; i += 1) { + prediction128 = _mm_mullo_epi32(coefficients128_0, samples128_0); + prediction128 = ma_dr_flac__mm_hadd_epi32(prediction128); + prediction128 = _mm_srai_epi32(prediction128, shift); + prediction128 = _mm_add_epi32(riceParamPart128, prediction128); + samples128_0 = _mm_alignr_epi8(prediction128, samples128_0, 4); + riceParamPart128 = _mm_alignr_epi8(_mm_setzero_si128(), riceParamPart128, 4); + } + } else if (order <= 8) { + for (i = 0; i < 4; i += 1) { + prediction128 = _mm_mullo_epi32(coefficients128_4, samples128_4); + prediction128 = _mm_add_epi32(prediction128, _mm_mullo_epi32(coefficients128_0, samples128_0)); + prediction128 = ma_dr_flac__mm_hadd_epi32(prediction128); + prediction128 = _mm_srai_epi32(prediction128, shift); + prediction128 = _mm_add_epi32(riceParamPart128, prediction128); + samples128_4 = _mm_alignr_epi8(samples128_0, samples128_4, 4); + samples128_0 = _mm_alignr_epi8(prediction128, samples128_0, 4); + riceParamPart128 = _mm_alignr_epi8(_mm_setzero_si128(), riceParamPart128, 4); + } + } else { + for (i = 0; i < 4; i += 1) { + prediction128 = _mm_mullo_epi32(coefficients128_8, samples128_8); + prediction128 = _mm_add_epi32(prediction128, _mm_mullo_epi32(coefficients128_4, samples128_4)); + prediction128 = _mm_add_epi32(prediction128, _mm_mullo_epi32(coefficients128_0, samples128_0)); + prediction128 = ma_dr_flac__mm_hadd_epi32(prediction128); + prediction128 = _mm_srai_epi32(prediction128, shift); + prediction128 = _mm_add_epi32(riceParamPart128, prediction128); + samples128_8 = _mm_alignr_epi8(samples128_4, samples128_8, 4); + samples128_4 = _mm_alignr_epi8(samples128_0, samples128_4, 4); + samples128_0 = _mm_alignr_epi8(prediction128, samples128_0, 4); + riceParamPart128 = _mm_alignr_epi8(_mm_setzero_si128(), riceParamPart128, 4); + } + } + _mm_storeu_si128((__m128i*)pDecodedSamples, samples128_0); + pDecodedSamples += 4; + } + i = (count & ~3); + while (i < (int)count) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts0, &riceParamParts0)) { + return MA_FALSE; + } + riceParamParts0 &= riceParamMask; + riceParamParts0 |= (zeroCountParts0 << riceParam); + riceParamParts0 = (riceParamParts0 >> 1) ^ t[riceParamParts0 & 0x01]; + pDecodedSamples[0] = riceParamParts0 + ma_dr_flac__calculate_prediction_32(order, shift, coefficients, pDecodedSamples); + i += 1; + pDecodedSamples += 1; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__sse41_64(ma_dr_flac_bs* bs, ma_uint32 count, ma_uint8 riceParam, ma_uint32 order, ma_int32 shift, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + int i; + ma_uint32 riceParamMask; + ma_int32* pDecodedSamples = pSamplesOut; + ma_int32* pDecodedSamplesEnd = pSamplesOut + (count & ~3); + ma_uint32 zeroCountParts0 = 0; + ma_uint32 zeroCountParts1 = 0; + ma_uint32 zeroCountParts2 = 0; + ma_uint32 zeroCountParts3 = 0; + ma_uint32 riceParamParts0 = 0; + ma_uint32 riceParamParts1 = 0; + ma_uint32 riceParamParts2 = 0; + ma_uint32 riceParamParts3 = 0; + __m128i coefficients128_0; + __m128i coefficients128_4; + __m128i coefficients128_8; + __m128i samples128_0; + __m128i samples128_4; + __m128i samples128_8; + __m128i prediction128; + __m128i riceParamMask128; + const ma_uint32 t[2] = {0x00000000, 0xFFFFFFFF}; + MA_DR_FLAC_ASSERT(order <= 12); + riceParamMask = (ma_uint32)~((~0UL) << riceParam); + riceParamMask128 = _mm_set1_epi32(riceParamMask); + prediction128 = _mm_setzero_si128(); + coefficients128_0 = _mm_setzero_si128(); + coefficients128_4 = _mm_setzero_si128(); + coefficients128_8 = _mm_setzero_si128(); + samples128_0 = _mm_setzero_si128(); + samples128_4 = _mm_setzero_si128(); + samples128_8 = _mm_setzero_si128(); +#if 1 + { + int runningOrder = order; + if (runningOrder >= 4) { + coefficients128_0 = _mm_loadu_si128((const __m128i*)(coefficients + 0)); + samples128_0 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 4)); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: coefficients128_0 = _mm_set_epi32(0, coefficients[2], coefficients[1], coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], pSamplesOut[-2], pSamplesOut[-3], 0); break; + case 2: coefficients128_0 = _mm_set_epi32(0, 0, coefficients[1], coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], pSamplesOut[-2], 0, 0); break; + case 1: coefficients128_0 = _mm_set_epi32(0, 0, 0, coefficients[0]); samples128_0 = _mm_set_epi32(pSamplesOut[-1], 0, 0, 0); break; + } + runningOrder = 0; + } + if (runningOrder >= 4) { + coefficients128_4 = _mm_loadu_si128((const __m128i*)(coefficients + 4)); + samples128_4 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 8)); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: coefficients128_4 = _mm_set_epi32(0, coefficients[6], coefficients[5], coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], pSamplesOut[-6], pSamplesOut[-7], 0); break; + case 2: coefficients128_4 = _mm_set_epi32(0, 0, coefficients[5], coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], pSamplesOut[-6], 0, 0); break; + case 1: coefficients128_4 = _mm_set_epi32(0, 0, 0, coefficients[4]); samples128_4 = _mm_set_epi32(pSamplesOut[-5], 0, 0, 0); break; + } + runningOrder = 0; + } + if (runningOrder == 4) { + coefficients128_8 = _mm_loadu_si128((const __m128i*)(coefficients + 8)); + samples128_8 = _mm_loadu_si128((const __m128i*)(pSamplesOut - 12)); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: coefficients128_8 = _mm_set_epi32(0, coefficients[10], coefficients[9], coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], pSamplesOut[-10], pSamplesOut[-11], 0); break; + case 2: coefficients128_8 = _mm_set_epi32(0, 0, coefficients[9], coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], pSamplesOut[-10], 0, 0); break; + case 1: coefficients128_8 = _mm_set_epi32(0, 0, 0, coefficients[8]); samples128_8 = _mm_set_epi32(pSamplesOut[-9], 0, 0, 0); break; + } + runningOrder = 0; + } + coefficients128_0 = _mm_shuffle_epi32(coefficients128_0, _MM_SHUFFLE(0, 1, 2, 3)); + coefficients128_4 = _mm_shuffle_epi32(coefficients128_4, _MM_SHUFFLE(0, 1, 2, 3)); + coefficients128_8 = _mm_shuffle_epi32(coefficients128_8, _MM_SHUFFLE(0, 1, 2, 3)); + } +#else + switch (order) + { + case 12: ((ma_int32*)&coefficients128_8)[0] = coefficients[11]; ((ma_int32*)&samples128_8)[0] = pDecodedSamples[-12]; + case 11: ((ma_int32*)&coefficients128_8)[1] = coefficients[10]; ((ma_int32*)&samples128_8)[1] = pDecodedSamples[-11]; + case 10: ((ma_int32*)&coefficients128_8)[2] = coefficients[ 9]; ((ma_int32*)&samples128_8)[2] = pDecodedSamples[-10]; + case 9: ((ma_int32*)&coefficients128_8)[3] = coefficients[ 8]; ((ma_int32*)&samples128_8)[3] = pDecodedSamples[- 9]; + case 8: ((ma_int32*)&coefficients128_4)[0] = coefficients[ 7]; ((ma_int32*)&samples128_4)[0] = pDecodedSamples[- 8]; + case 7: ((ma_int32*)&coefficients128_4)[1] = coefficients[ 6]; ((ma_int32*)&samples128_4)[1] = pDecodedSamples[- 7]; + case 6: ((ma_int32*)&coefficients128_4)[2] = coefficients[ 5]; ((ma_int32*)&samples128_4)[2] = pDecodedSamples[- 6]; + case 5: ((ma_int32*)&coefficients128_4)[3] = coefficients[ 4]; ((ma_int32*)&samples128_4)[3] = pDecodedSamples[- 5]; + case 4: ((ma_int32*)&coefficients128_0)[0] = coefficients[ 3]; ((ma_int32*)&samples128_0)[0] = pDecodedSamples[- 4]; + case 3: ((ma_int32*)&coefficients128_0)[1] = coefficients[ 2]; ((ma_int32*)&samples128_0)[1] = pDecodedSamples[- 3]; + case 2: ((ma_int32*)&coefficients128_0)[2] = coefficients[ 1]; ((ma_int32*)&samples128_0)[2] = pDecodedSamples[- 2]; + case 1: ((ma_int32*)&coefficients128_0)[3] = coefficients[ 0]; ((ma_int32*)&samples128_0)[3] = pDecodedSamples[- 1]; + } +#endif + while (pDecodedSamples < pDecodedSamplesEnd) { + __m128i zeroCountPart128; + __m128i riceParamPart128; + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts0, &riceParamParts0) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts1, &riceParamParts1) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts2, &riceParamParts2) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts3, &riceParamParts3)) { + return MA_FALSE; + } + zeroCountPart128 = _mm_set_epi32(zeroCountParts3, zeroCountParts2, zeroCountParts1, zeroCountParts0); + riceParamPart128 = _mm_set_epi32(riceParamParts3, riceParamParts2, riceParamParts1, riceParamParts0); + riceParamPart128 = _mm_and_si128(riceParamPart128, riceParamMask128); + riceParamPart128 = _mm_or_si128(riceParamPart128, _mm_slli_epi32(zeroCountPart128, riceParam)); + riceParamPart128 = _mm_xor_si128(_mm_srli_epi32(riceParamPart128, 1), _mm_add_epi32(ma_dr_flac__mm_not_si128(_mm_and_si128(riceParamPart128, _mm_set1_epi32(1))), _mm_set1_epi32(1))); + for (i = 0; i < 4; i += 1) { + prediction128 = _mm_xor_si128(prediction128, prediction128); + switch (order) + { + case 12: + case 11: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_8, _MM_SHUFFLE(1, 1, 0, 0)), _mm_shuffle_epi32(samples128_8, _MM_SHUFFLE(1, 1, 0, 0)))); + case 10: + case 9: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_8, _MM_SHUFFLE(3, 3, 2, 2)), _mm_shuffle_epi32(samples128_8, _MM_SHUFFLE(3, 3, 2, 2)))); + case 8: + case 7: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_4, _MM_SHUFFLE(1, 1, 0, 0)), _mm_shuffle_epi32(samples128_4, _MM_SHUFFLE(1, 1, 0, 0)))); + case 6: + case 5: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_4, _MM_SHUFFLE(3, 3, 2, 2)), _mm_shuffle_epi32(samples128_4, _MM_SHUFFLE(3, 3, 2, 2)))); + case 4: + case 3: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_0, _MM_SHUFFLE(1, 1, 0, 0)), _mm_shuffle_epi32(samples128_0, _MM_SHUFFLE(1, 1, 0, 0)))); + case 2: + case 1: prediction128 = _mm_add_epi64(prediction128, _mm_mul_epi32(_mm_shuffle_epi32(coefficients128_0, _MM_SHUFFLE(3, 3, 2, 2)), _mm_shuffle_epi32(samples128_0, _MM_SHUFFLE(3, 3, 2, 2)))); + } + prediction128 = ma_dr_flac__mm_hadd_epi64(prediction128); + prediction128 = ma_dr_flac__mm_srai_epi64(prediction128, shift); + prediction128 = _mm_add_epi32(riceParamPart128, prediction128); + samples128_8 = _mm_alignr_epi8(samples128_4, samples128_8, 4); + samples128_4 = _mm_alignr_epi8(samples128_0, samples128_4, 4); + samples128_0 = _mm_alignr_epi8(prediction128, samples128_0, 4); + riceParamPart128 = _mm_alignr_epi8(_mm_setzero_si128(), riceParamPart128, 4); + } + _mm_storeu_si128((__m128i*)pDecodedSamples, samples128_0); + pDecodedSamples += 4; + } + i = (count & ~3); + while (i < (int)count) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts0, &riceParamParts0)) { + return MA_FALSE; + } + riceParamParts0 &= riceParamMask; + riceParamParts0 |= (zeroCountParts0 << riceParam); + riceParamParts0 = (riceParamParts0 >> 1) ^ t[riceParamParts0 & 0x01]; + pDecodedSamples[0] = riceParamParts0 + ma_dr_flac__calculate_prediction_64(order, shift, coefficients, pDecodedSamples); + i += 1; + pDecodedSamples += 1; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__sse41(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 count, ma_uint8 riceParam, ma_uint32 lpcOrder, ma_int32 lpcShift, ma_uint32 lpcPrecision, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pSamplesOut != NULL); + if (lpcOrder > 0 && lpcOrder <= 12) { + if (ma_dr_flac__use_64_bit_prediction(bitsPerSample, lpcOrder, lpcPrecision)) { + return ma_dr_flac__decode_samples_with_residual__rice__sse41_64(bs, count, riceParam, lpcOrder, lpcShift, coefficients, pSamplesOut); + } else { + return ma_dr_flac__decode_samples_with_residual__rice__sse41_32(bs, count, riceParam, lpcOrder, lpcShift, coefficients, pSamplesOut); + } + } else { + return ma_dr_flac__decode_samples_with_residual__rice__scalar(bs, bitsPerSample, count, riceParam, lpcOrder, lpcShift, lpcPrecision, coefficients, pSamplesOut); + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac__vst2q_s32(ma_int32* p, int32x4x2_t x) +{ + vst1q_s32(p+0, x.val[0]); + vst1q_s32(p+4, x.val[1]); +} +static MA_INLINE void ma_dr_flac__vst2q_u32(ma_uint32* p, uint32x4x2_t x) +{ + vst1q_u32(p+0, x.val[0]); + vst1q_u32(p+4, x.val[1]); +} +static MA_INLINE void ma_dr_flac__vst2q_f32(float* p, float32x4x2_t x) +{ + vst1q_f32(p+0, x.val[0]); + vst1q_f32(p+4, x.val[1]); +} +static MA_INLINE void ma_dr_flac__vst2q_s16(ma_int16* p, int16x4x2_t x) +{ + vst1q_s16(p, vcombine_s16(x.val[0], x.val[1])); +} +static MA_INLINE void ma_dr_flac__vst2q_u16(ma_uint16* p, uint16x4x2_t x) +{ + vst1q_u16(p, vcombine_u16(x.val[0], x.val[1])); +} +static MA_INLINE int32x4_t ma_dr_flac__vdupq_n_s32x4(ma_int32 x3, ma_int32 x2, ma_int32 x1, ma_int32 x0) +{ + ma_int32 x[4]; + x[3] = x3; + x[2] = x2; + x[1] = x1; + x[0] = x0; + return vld1q_s32(x); +} +static MA_INLINE int32x4_t ma_dr_flac__valignrq_s32_1(int32x4_t a, int32x4_t b) +{ + return vextq_s32(b, a, 1); +} +static MA_INLINE uint32x4_t ma_dr_flac__valignrq_u32_1(uint32x4_t a, uint32x4_t b) +{ + return vextq_u32(b, a, 1); +} +static MA_INLINE int32x2_t ma_dr_flac__vhaddq_s32(int32x4_t x) +{ + int32x2_t r = vadd_s32(vget_high_s32(x), vget_low_s32(x)); + return vpadd_s32(r, r); +} +static MA_INLINE int64x1_t ma_dr_flac__vhaddq_s64(int64x2_t x) +{ + return vadd_s64(vget_high_s64(x), vget_low_s64(x)); +} +static MA_INLINE int32x4_t ma_dr_flac__vrevq_s32(int32x4_t x) +{ + return vrev64q_s32(vcombine_s32(vget_high_s32(x), vget_low_s32(x))); +} +static MA_INLINE int32x4_t ma_dr_flac__vnotq_s32(int32x4_t x) +{ + return veorq_s32(x, vdupq_n_s32(0xFFFFFFFF)); +} +static MA_INLINE uint32x4_t ma_dr_flac__vnotq_u32(uint32x4_t x) +{ + return veorq_u32(x, vdupq_n_u32(0xFFFFFFFF)); +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__neon_32(ma_dr_flac_bs* bs, ma_uint32 count, ma_uint8 riceParam, ma_uint32 order, ma_int32 shift, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + int i; + ma_uint32 riceParamMask; + ma_int32* pDecodedSamples = pSamplesOut; + ma_int32* pDecodedSamplesEnd = pSamplesOut + (count & ~3); + ma_uint32 zeroCountParts[4]; + ma_uint32 riceParamParts[4]; + int32x4_t coefficients128_0; + int32x4_t coefficients128_4; + int32x4_t coefficients128_8; + int32x4_t samples128_0; + int32x4_t samples128_4; + int32x4_t samples128_8; + uint32x4_t riceParamMask128; + int32x4_t riceParam128; + int32x2_t shift64; + uint32x4_t one128; + const ma_uint32 t[2] = {0x00000000, 0xFFFFFFFF}; + riceParamMask = (ma_uint32)~((~0UL) << riceParam); + riceParamMask128 = vdupq_n_u32(riceParamMask); + riceParam128 = vdupq_n_s32(riceParam); + shift64 = vdup_n_s32(-shift); + one128 = vdupq_n_u32(1); + { + int runningOrder = order; + ma_int32 tempC[4] = {0, 0, 0, 0}; + ma_int32 tempS[4] = {0, 0, 0, 0}; + if (runningOrder >= 4) { + coefficients128_0 = vld1q_s32(coefficients + 0); + samples128_0 = vld1q_s32(pSamplesOut - 4); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: tempC[2] = coefficients[2]; tempS[1] = pSamplesOut[-3]; + case 2: tempC[1] = coefficients[1]; tempS[2] = pSamplesOut[-2]; + case 1: tempC[0] = coefficients[0]; tempS[3] = pSamplesOut[-1]; + } + coefficients128_0 = vld1q_s32(tempC); + samples128_0 = vld1q_s32(tempS); + runningOrder = 0; + } + if (runningOrder >= 4) { + coefficients128_4 = vld1q_s32(coefficients + 4); + samples128_4 = vld1q_s32(pSamplesOut - 8); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: tempC[2] = coefficients[6]; tempS[1] = pSamplesOut[-7]; + case 2: tempC[1] = coefficients[5]; tempS[2] = pSamplesOut[-6]; + case 1: tempC[0] = coefficients[4]; tempS[3] = pSamplesOut[-5]; + } + coefficients128_4 = vld1q_s32(tempC); + samples128_4 = vld1q_s32(tempS); + runningOrder = 0; + } + if (runningOrder == 4) { + coefficients128_8 = vld1q_s32(coefficients + 8); + samples128_8 = vld1q_s32(pSamplesOut - 12); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: tempC[2] = coefficients[10]; tempS[1] = pSamplesOut[-11]; + case 2: tempC[1] = coefficients[ 9]; tempS[2] = pSamplesOut[-10]; + case 1: tempC[0] = coefficients[ 8]; tempS[3] = pSamplesOut[- 9]; + } + coefficients128_8 = vld1q_s32(tempC); + samples128_8 = vld1q_s32(tempS); + runningOrder = 0; + } + coefficients128_0 = ma_dr_flac__vrevq_s32(coefficients128_0); + coefficients128_4 = ma_dr_flac__vrevq_s32(coefficients128_4); + coefficients128_8 = ma_dr_flac__vrevq_s32(coefficients128_8); + } + while (pDecodedSamples < pDecodedSamplesEnd) { + int32x4_t prediction128; + int32x2_t prediction64; + uint32x4_t zeroCountPart128; + uint32x4_t riceParamPart128; + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[0], &riceParamParts[0]) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[1], &riceParamParts[1]) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[2], &riceParamParts[2]) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[3], &riceParamParts[3])) { + return MA_FALSE; + } + zeroCountPart128 = vld1q_u32(zeroCountParts); + riceParamPart128 = vld1q_u32(riceParamParts); + riceParamPart128 = vandq_u32(riceParamPart128, riceParamMask128); + riceParamPart128 = vorrq_u32(riceParamPart128, vshlq_u32(zeroCountPart128, riceParam128)); + riceParamPart128 = veorq_u32(vshrq_n_u32(riceParamPart128, 1), vaddq_u32(ma_dr_flac__vnotq_u32(vandq_u32(riceParamPart128, one128)), one128)); + if (order <= 4) { + for (i = 0; i < 4; i += 1) { + prediction128 = vmulq_s32(coefficients128_0, samples128_0); + prediction64 = ma_dr_flac__vhaddq_s32(prediction128); + prediction64 = vshl_s32(prediction64, shift64); + prediction64 = vadd_s32(prediction64, vget_low_s32(vreinterpretq_s32_u32(riceParamPart128))); + samples128_0 = ma_dr_flac__valignrq_s32_1(vcombine_s32(prediction64, vdup_n_s32(0)), samples128_0); + riceParamPart128 = ma_dr_flac__valignrq_u32_1(vdupq_n_u32(0), riceParamPart128); + } + } else if (order <= 8) { + for (i = 0; i < 4; i += 1) { + prediction128 = vmulq_s32(coefficients128_4, samples128_4); + prediction128 = vmlaq_s32(prediction128, coefficients128_0, samples128_0); + prediction64 = ma_dr_flac__vhaddq_s32(prediction128); + prediction64 = vshl_s32(prediction64, shift64); + prediction64 = vadd_s32(prediction64, vget_low_s32(vreinterpretq_s32_u32(riceParamPart128))); + samples128_4 = ma_dr_flac__valignrq_s32_1(samples128_0, samples128_4); + samples128_0 = ma_dr_flac__valignrq_s32_1(vcombine_s32(prediction64, vdup_n_s32(0)), samples128_0); + riceParamPart128 = ma_dr_flac__valignrq_u32_1(vdupq_n_u32(0), riceParamPart128); + } + } else { + for (i = 0; i < 4; i += 1) { + prediction128 = vmulq_s32(coefficients128_8, samples128_8); + prediction128 = vmlaq_s32(prediction128, coefficients128_4, samples128_4); + prediction128 = vmlaq_s32(prediction128, coefficients128_0, samples128_0); + prediction64 = ma_dr_flac__vhaddq_s32(prediction128); + prediction64 = vshl_s32(prediction64, shift64); + prediction64 = vadd_s32(prediction64, vget_low_s32(vreinterpretq_s32_u32(riceParamPart128))); + samples128_8 = ma_dr_flac__valignrq_s32_1(samples128_4, samples128_8); + samples128_4 = ma_dr_flac__valignrq_s32_1(samples128_0, samples128_4); + samples128_0 = ma_dr_flac__valignrq_s32_1(vcombine_s32(prediction64, vdup_n_s32(0)), samples128_0); + riceParamPart128 = ma_dr_flac__valignrq_u32_1(vdupq_n_u32(0), riceParamPart128); + } + } + vst1q_s32(pDecodedSamples, samples128_0); + pDecodedSamples += 4; + } + i = (count & ~3); + while (i < (int)count) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[0], &riceParamParts[0])) { + return MA_FALSE; + } + riceParamParts[0] &= riceParamMask; + riceParamParts[0] |= (zeroCountParts[0] << riceParam); + riceParamParts[0] = (riceParamParts[0] >> 1) ^ t[riceParamParts[0] & 0x01]; + pDecodedSamples[0] = riceParamParts[0] + ma_dr_flac__calculate_prediction_32(order, shift, coefficients, pDecodedSamples); + i += 1; + pDecodedSamples += 1; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__neon_64(ma_dr_flac_bs* bs, ma_uint32 count, ma_uint8 riceParam, ma_uint32 order, ma_int32 shift, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + int i; + ma_uint32 riceParamMask; + ma_int32* pDecodedSamples = pSamplesOut; + ma_int32* pDecodedSamplesEnd = pSamplesOut + (count & ~3); + ma_uint32 zeroCountParts[4]; + ma_uint32 riceParamParts[4]; + int32x4_t coefficients128_0; + int32x4_t coefficients128_4; + int32x4_t coefficients128_8; + int32x4_t samples128_0; + int32x4_t samples128_4; + int32x4_t samples128_8; + uint32x4_t riceParamMask128; + int32x4_t riceParam128; + int64x1_t shift64; + uint32x4_t one128; + int64x2_t prediction128 = { 0 }; + uint32x4_t zeroCountPart128; + uint32x4_t riceParamPart128; + const ma_uint32 t[2] = {0x00000000, 0xFFFFFFFF}; + riceParamMask = (ma_uint32)~((~0UL) << riceParam); + riceParamMask128 = vdupq_n_u32(riceParamMask); + riceParam128 = vdupq_n_s32(riceParam); + shift64 = vdup_n_s64(-shift); + one128 = vdupq_n_u32(1); + { + int runningOrder = order; + ma_int32 tempC[4] = {0, 0, 0, 0}; + ma_int32 tempS[4] = {0, 0, 0, 0}; + if (runningOrder >= 4) { + coefficients128_0 = vld1q_s32(coefficients + 0); + samples128_0 = vld1q_s32(pSamplesOut - 4); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: tempC[2] = coefficients[2]; tempS[1] = pSamplesOut[-3]; + case 2: tempC[1] = coefficients[1]; tempS[2] = pSamplesOut[-2]; + case 1: tempC[0] = coefficients[0]; tempS[3] = pSamplesOut[-1]; + } + coefficients128_0 = vld1q_s32(tempC); + samples128_0 = vld1q_s32(tempS); + runningOrder = 0; + } + if (runningOrder >= 4) { + coefficients128_4 = vld1q_s32(coefficients + 4); + samples128_4 = vld1q_s32(pSamplesOut - 8); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: tempC[2] = coefficients[6]; tempS[1] = pSamplesOut[-7]; + case 2: tempC[1] = coefficients[5]; tempS[2] = pSamplesOut[-6]; + case 1: tempC[0] = coefficients[4]; tempS[3] = pSamplesOut[-5]; + } + coefficients128_4 = vld1q_s32(tempC); + samples128_4 = vld1q_s32(tempS); + runningOrder = 0; + } + if (runningOrder == 4) { + coefficients128_8 = vld1q_s32(coefficients + 8); + samples128_8 = vld1q_s32(pSamplesOut - 12); + runningOrder -= 4; + } else { + switch (runningOrder) { + case 3: tempC[2] = coefficients[10]; tempS[1] = pSamplesOut[-11]; + case 2: tempC[1] = coefficients[ 9]; tempS[2] = pSamplesOut[-10]; + case 1: tempC[0] = coefficients[ 8]; tempS[3] = pSamplesOut[- 9]; + } + coefficients128_8 = vld1q_s32(tempC); + samples128_8 = vld1q_s32(tempS); + runningOrder = 0; + } + coefficients128_0 = ma_dr_flac__vrevq_s32(coefficients128_0); + coefficients128_4 = ma_dr_flac__vrevq_s32(coefficients128_4); + coefficients128_8 = ma_dr_flac__vrevq_s32(coefficients128_8); + } + while (pDecodedSamples < pDecodedSamplesEnd) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[0], &riceParamParts[0]) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[1], &riceParamParts[1]) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[2], &riceParamParts[2]) || + !ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[3], &riceParamParts[3])) { + return MA_FALSE; + } + zeroCountPart128 = vld1q_u32(zeroCountParts); + riceParamPart128 = vld1q_u32(riceParamParts); + riceParamPart128 = vandq_u32(riceParamPart128, riceParamMask128); + riceParamPart128 = vorrq_u32(riceParamPart128, vshlq_u32(zeroCountPart128, riceParam128)); + riceParamPart128 = veorq_u32(vshrq_n_u32(riceParamPart128, 1), vaddq_u32(ma_dr_flac__vnotq_u32(vandq_u32(riceParamPart128, one128)), one128)); + for (i = 0; i < 4; i += 1) { + int64x1_t prediction64; + prediction128 = veorq_s64(prediction128, prediction128); + switch (order) + { + case 12: + case 11: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_low_s32(coefficients128_8), vget_low_s32(samples128_8))); + case 10: + case 9: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_high_s32(coefficients128_8), vget_high_s32(samples128_8))); + case 8: + case 7: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_low_s32(coefficients128_4), vget_low_s32(samples128_4))); + case 6: + case 5: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_high_s32(coefficients128_4), vget_high_s32(samples128_4))); + case 4: + case 3: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_low_s32(coefficients128_0), vget_low_s32(samples128_0))); + case 2: + case 1: prediction128 = vaddq_s64(prediction128, vmull_s32(vget_high_s32(coefficients128_0), vget_high_s32(samples128_0))); + } + prediction64 = ma_dr_flac__vhaddq_s64(prediction128); + prediction64 = vshl_s64(prediction64, shift64); + prediction64 = vadd_s64(prediction64, vdup_n_s64(vgetq_lane_u32(riceParamPart128, 0))); + samples128_8 = ma_dr_flac__valignrq_s32_1(samples128_4, samples128_8); + samples128_4 = ma_dr_flac__valignrq_s32_1(samples128_0, samples128_4); + samples128_0 = ma_dr_flac__valignrq_s32_1(vcombine_s32(vreinterpret_s32_s64(prediction64), vdup_n_s32(0)), samples128_0); + riceParamPart128 = ma_dr_flac__valignrq_u32_1(vdupq_n_u32(0), riceParamPart128); + } + vst1q_s32(pDecodedSamples, samples128_0); + pDecodedSamples += 4; + } + i = (count & ~3); + while (i < (int)count) { + if (!ma_dr_flac__read_rice_parts_x1(bs, riceParam, &zeroCountParts[0], &riceParamParts[0])) { + return MA_FALSE; + } + riceParamParts[0] &= riceParamMask; + riceParamParts[0] |= (zeroCountParts[0] << riceParam); + riceParamParts[0] = (riceParamParts[0] >> 1) ^ t[riceParamParts[0] & 0x01]; + pDecodedSamples[0] = riceParamParts[0] + ma_dr_flac__calculate_prediction_64(order, shift, coefficients, pDecodedSamples); + i += 1; + pDecodedSamples += 1; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice__neon(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 count, ma_uint8 riceParam, ma_uint32 lpcOrder, ma_int32 lpcShift, ma_uint32 lpcPrecision, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(pSamplesOut != NULL); + if (lpcOrder > 0 && lpcOrder <= 12) { + if (ma_dr_flac__use_64_bit_prediction(bitsPerSample, lpcOrder, lpcPrecision)) { + return ma_dr_flac__decode_samples_with_residual__rice__neon_64(bs, count, riceParam, lpcOrder, lpcShift, coefficients, pSamplesOut); + } else { + return ma_dr_flac__decode_samples_with_residual__rice__neon_32(bs, count, riceParam, lpcOrder, lpcShift, coefficients, pSamplesOut); + } + } else { + return ma_dr_flac__decode_samples_with_residual__rice__scalar(bs, bitsPerSample, count, riceParam, lpcOrder, lpcShift, lpcPrecision, coefficients, pSamplesOut); + } +} +#endif +static ma_bool32 ma_dr_flac__decode_samples_with_residual__rice(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 count, ma_uint8 riceParam, ma_uint32 lpcOrder, ma_int32 lpcShift, ma_uint32 lpcPrecision, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE41) + if (ma_dr_flac__gIsSSE41Supported) { + return ma_dr_flac__decode_samples_with_residual__rice__sse41(bs, bitsPerSample, count, riceParam, lpcOrder, lpcShift, lpcPrecision, coefficients, pSamplesOut); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported) { + return ma_dr_flac__decode_samples_with_residual__rice__neon(bs, bitsPerSample, count, riceParam, lpcOrder, lpcShift, lpcPrecision, coefficients, pSamplesOut); + } else +#endif + { + #if 0 + return ma_dr_flac__decode_samples_with_residual__rice__reference(bs, bitsPerSample, count, riceParam, lpcOrder, lpcShift, lpcPrecision, coefficients, pSamplesOut); + #else + return ma_dr_flac__decode_samples_with_residual__rice__scalar(bs, bitsPerSample, count, riceParam, lpcOrder, lpcShift, lpcPrecision, coefficients, pSamplesOut); + #endif + } +} +static ma_bool32 ma_dr_flac__read_and_seek_residual__rice(ma_dr_flac_bs* bs, ma_uint32 count, ma_uint8 riceParam) +{ + ma_uint32 i; + MA_DR_FLAC_ASSERT(bs != NULL); + for (i = 0; i < count; ++i) { + if (!ma_dr_flac__seek_rice_parts(bs, riceParam)) { + return MA_FALSE; + } + } + return MA_TRUE; +} +#if defined(__clang__) +__attribute__((no_sanitize("signed-integer-overflow"))) +#endif +static ma_bool32 ma_dr_flac__decode_samples_with_residual__unencoded(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 count, ma_uint8 unencodedBitsPerSample, ma_uint32 lpcOrder, ma_int32 lpcShift, ma_uint32 lpcPrecision, const ma_int32* coefficients, ma_int32* pSamplesOut) +{ + ma_uint32 i; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(unencodedBitsPerSample <= 31); + MA_DR_FLAC_ASSERT(pSamplesOut != NULL); + for (i = 0; i < count; ++i) { + if (unencodedBitsPerSample > 0) { + if (!ma_dr_flac__read_int32(bs, unencodedBitsPerSample, pSamplesOut + i)) { + return MA_FALSE; + } + } else { + pSamplesOut[i] = 0; + } + if (ma_dr_flac__use_64_bit_prediction(bitsPerSample, lpcOrder, lpcPrecision)) { + pSamplesOut[i] += ma_dr_flac__calculate_prediction_64(lpcOrder, lpcShift, coefficients, pSamplesOut + i); + } else { + pSamplesOut[i] += ma_dr_flac__calculate_prediction_32(lpcOrder, lpcShift, coefficients, pSamplesOut + i); + } + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples_with_residual(ma_dr_flac_bs* bs, ma_uint32 bitsPerSample, ma_uint32 blockSize, ma_uint32 lpcOrder, ma_int32 lpcShift, ma_uint32 lpcPrecision, const ma_int32* coefficients, ma_int32* pDecodedSamples) +{ + ma_uint8 residualMethod; + ma_uint8 partitionOrder; + ma_uint32 samplesInPartition; + ma_uint32 partitionsRemaining; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(blockSize != 0); + MA_DR_FLAC_ASSERT(pDecodedSamples != NULL); + if (!ma_dr_flac__read_uint8(bs, 2, &residualMethod)) { + return MA_FALSE; + } + if (residualMethod != MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE && residualMethod != MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) { + return MA_FALSE; + } + pDecodedSamples += lpcOrder; + if (!ma_dr_flac__read_uint8(bs, 4, &partitionOrder)) { + return MA_FALSE; + } + if (partitionOrder > 8) { + return MA_FALSE; + } + if ((blockSize / (1 << partitionOrder)) < lpcOrder) { + return MA_FALSE; + } + samplesInPartition = (blockSize / (1 << partitionOrder)) - lpcOrder; + partitionsRemaining = (1 << partitionOrder); + for (;;) { + ma_uint8 riceParam = 0; + if (residualMethod == MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE) { + if (!ma_dr_flac__read_uint8(bs, 4, &riceParam)) { + return MA_FALSE; + } + if (riceParam == 15) { + riceParam = 0xFF; + } + } else if (residualMethod == MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) { + if (!ma_dr_flac__read_uint8(bs, 5, &riceParam)) { + return MA_FALSE; + } + if (riceParam == 31) { + riceParam = 0xFF; + } + } + if (riceParam != 0xFF) { + if (!ma_dr_flac__decode_samples_with_residual__rice(bs, bitsPerSample, samplesInPartition, riceParam, lpcOrder, lpcShift, lpcPrecision, coefficients, pDecodedSamples)) { + return MA_FALSE; + } + } else { + ma_uint8 unencodedBitsPerSample = 0; + if (!ma_dr_flac__read_uint8(bs, 5, &unencodedBitsPerSample)) { + return MA_FALSE; + } + if (!ma_dr_flac__decode_samples_with_residual__unencoded(bs, bitsPerSample, samplesInPartition, unencodedBitsPerSample, lpcOrder, lpcShift, lpcPrecision, coefficients, pDecodedSamples)) { + return MA_FALSE; + } + } + pDecodedSamples += samplesInPartition; + if (partitionsRemaining == 1) { + break; + } + partitionsRemaining -= 1; + if (partitionOrder != 0) { + samplesInPartition = blockSize / (1 << partitionOrder); + } + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__read_and_seek_residual(ma_dr_flac_bs* bs, ma_uint32 blockSize, ma_uint32 order) +{ + ma_uint8 residualMethod; + ma_uint8 partitionOrder; + ma_uint32 samplesInPartition; + ma_uint32 partitionsRemaining; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(blockSize != 0); + if (!ma_dr_flac__read_uint8(bs, 2, &residualMethod)) { + return MA_FALSE; + } + if (residualMethod != MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE && residualMethod != MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) { + return MA_FALSE; + } + if (!ma_dr_flac__read_uint8(bs, 4, &partitionOrder)) { + return MA_FALSE; + } + if (partitionOrder > 8) { + return MA_FALSE; + } + if ((blockSize / (1 << partitionOrder)) <= order) { + return MA_FALSE; + } + samplesInPartition = (blockSize / (1 << partitionOrder)) - order; + partitionsRemaining = (1 << partitionOrder); + for (;;) + { + ma_uint8 riceParam = 0; + if (residualMethod == MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE) { + if (!ma_dr_flac__read_uint8(bs, 4, &riceParam)) { + return MA_FALSE; + } + if (riceParam == 15) { + riceParam = 0xFF; + } + } else if (residualMethod == MA_DR_FLAC_RESIDUAL_CODING_METHOD_PARTITIONED_RICE2) { + if (!ma_dr_flac__read_uint8(bs, 5, &riceParam)) { + return MA_FALSE; + } + if (riceParam == 31) { + riceParam = 0xFF; + } + } + if (riceParam != 0xFF) { + if (!ma_dr_flac__read_and_seek_residual__rice(bs, samplesInPartition, riceParam)) { + return MA_FALSE; + } + } else { + ma_uint8 unencodedBitsPerSample = 0; + if (!ma_dr_flac__read_uint8(bs, 5, &unencodedBitsPerSample)) { + return MA_FALSE; + } + if (!ma_dr_flac__seek_bits(bs, unencodedBitsPerSample * samplesInPartition)) { + return MA_FALSE; + } + } + if (partitionsRemaining == 1) { + break; + } + partitionsRemaining -= 1; + samplesInPartition = blockSize / (1 << partitionOrder); + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples__constant(ma_dr_flac_bs* bs, ma_uint32 blockSize, ma_uint32 subframeBitsPerSample, ma_int32* pDecodedSamples) +{ + ma_uint32 i; + ma_int32 sample; + if (!ma_dr_flac__read_int32(bs, subframeBitsPerSample, &sample)) { + return MA_FALSE; + } + for (i = 0; i < blockSize; ++i) { + pDecodedSamples[i] = sample; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples__verbatim(ma_dr_flac_bs* bs, ma_uint32 blockSize, ma_uint32 subframeBitsPerSample, ma_int32* pDecodedSamples) +{ + ma_uint32 i; + for (i = 0; i < blockSize; ++i) { + ma_int32 sample; + if (!ma_dr_flac__read_int32(bs, subframeBitsPerSample, &sample)) { + return MA_FALSE; + } + pDecodedSamples[i] = sample; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples__fixed(ma_dr_flac_bs* bs, ma_uint32 blockSize, ma_uint32 subframeBitsPerSample, ma_uint8 lpcOrder, ma_int32* pDecodedSamples) +{ + ma_uint32 i; + static ma_int32 lpcCoefficientsTable[5][4] = { + {0, 0, 0, 0}, + {1, 0, 0, 0}, + {2, -1, 0, 0}, + {3, -3, 1, 0}, + {4, -6, 4, -1} + }; + for (i = 0; i < lpcOrder; ++i) { + ma_int32 sample; + if (!ma_dr_flac__read_int32(bs, subframeBitsPerSample, &sample)) { + return MA_FALSE; + } + pDecodedSamples[i] = sample; + } + if (!ma_dr_flac__decode_samples_with_residual(bs, subframeBitsPerSample, blockSize, lpcOrder, 0, 4, lpcCoefficientsTable[lpcOrder], pDecodedSamples)) { + return MA_FALSE; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_samples__lpc(ma_dr_flac_bs* bs, ma_uint32 blockSize, ma_uint32 bitsPerSample, ma_uint8 lpcOrder, ma_int32* pDecodedSamples) +{ + ma_uint8 i; + ma_uint8 lpcPrecision; + ma_int8 lpcShift; + ma_int32 coefficients[32]; + for (i = 0; i < lpcOrder; ++i) { + ma_int32 sample; + if (!ma_dr_flac__read_int32(bs, bitsPerSample, &sample)) { + return MA_FALSE; + } + pDecodedSamples[i] = sample; + } + if (!ma_dr_flac__read_uint8(bs, 4, &lpcPrecision)) { + return MA_FALSE; + } + if (lpcPrecision == 15) { + return MA_FALSE; + } + lpcPrecision += 1; + if (!ma_dr_flac__read_int8(bs, 5, &lpcShift)) { + return MA_FALSE; + } + if (lpcShift < 0) { + return MA_FALSE; + } + MA_DR_FLAC_ZERO_MEMORY(coefficients, sizeof(coefficients)); + for (i = 0; i < lpcOrder; ++i) { + if (!ma_dr_flac__read_int32(bs, lpcPrecision, coefficients + i)) { + return MA_FALSE; + } + } + if (!ma_dr_flac__decode_samples_with_residual(bs, bitsPerSample, blockSize, lpcOrder, lpcShift, lpcPrecision, coefficients, pDecodedSamples)) { + return MA_FALSE; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__read_next_flac_frame_header(ma_dr_flac_bs* bs, ma_uint8 streaminfoBitsPerSample, ma_dr_flac_frame_header* header) +{ + const ma_uint32 sampleRateTable[12] = {0, 88200, 176400, 192000, 8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000}; + const ma_uint8 bitsPerSampleTable[8] = {0, 8, 12, (ma_uint8)-1, 16, 20, 24, (ma_uint8)-1}; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(header != NULL); + for (;;) { + ma_uint8 crc8 = 0xCE; + ma_uint8 reserved = 0; + ma_uint8 blockingStrategy = 0; + ma_uint8 blockSize = 0; + ma_uint8 sampleRate = 0; + ma_uint8 channelAssignment = 0; + ma_uint8 bitsPerSample = 0; + ma_bool32 isVariableBlockSize; + if (!ma_dr_flac__find_and_seek_to_next_sync_code(bs)) { + return MA_FALSE; + } + if (!ma_dr_flac__read_uint8(bs, 1, &reserved)) { + return MA_FALSE; + } + if (reserved == 1) { + continue; + } + crc8 = ma_dr_flac_crc8(crc8, reserved, 1); + if (!ma_dr_flac__read_uint8(bs, 1, &blockingStrategy)) { + return MA_FALSE; + } + crc8 = ma_dr_flac_crc8(crc8, blockingStrategy, 1); + if (!ma_dr_flac__read_uint8(bs, 4, &blockSize)) { + return MA_FALSE; + } + if (blockSize == 0) { + continue; + } + crc8 = ma_dr_flac_crc8(crc8, blockSize, 4); + if (!ma_dr_flac__read_uint8(bs, 4, &sampleRate)) { + return MA_FALSE; + } + crc8 = ma_dr_flac_crc8(crc8, sampleRate, 4); + if (!ma_dr_flac__read_uint8(bs, 4, &channelAssignment)) { + return MA_FALSE; + } + if (channelAssignment > 10) { + continue; + } + crc8 = ma_dr_flac_crc8(crc8, channelAssignment, 4); + if (!ma_dr_flac__read_uint8(bs, 3, &bitsPerSample)) { + return MA_FALSE; + } + if (bitsPerSample == 3 || bitsPerSample == 7) { + continue; + } + crc8 = ma_dr_flac_crc8(crc8, bitsPerSample, 3); + if (!ma_dr_flac__read_uint8(bs, 1, &reserved)) { + return MA_FALSE; + } + if (reserved == 1) { + continue; + } + crc8 = ma_dr_flac_crc8(crc8, reserved, 1); + isVariableBlockSize = blockingStrategy == 1; + if (isVariableBlockSize) { + ma_uint64 pcmFrameNumber; + ma_result result = ma_dr_flac__read_utf8_coded_number(bs, &pcmFrameNumber, &crc8); + if (result != MA_SUCCESS) { + if (result == MA_AT_END) { + return MA_FALSE; + } else { + continue; + } + } + header->flacFrameNumber = 0; + header->pcmFrameNumber = pcmFrameNumber; + } else { + ma_uint64 flacFrameNumber = 0; + ma_result result = ma_dr_flac__read_utf8_coded_number(bs, &flacFrameNumber, &crc8); + if (result != MA_SUCCESS) { + if (result == MA_AT_END) { + return MA_FALSE; + } else { + continue; + } + } + header->flacFrameNumber = (ma_uint32)flacFrameNumber; + header->pcmFrameNumber = 0; + } + MA_DR_FLAC_ASSERT(blockSize > 0); + if (blockSize == 1) { + header->blockSizeInPCMFrames = 192; + } else if (blockSize <= 5) { + MA_DR_FLAC_ASSERT(blockSize >= 2); + header->blockSizeInPCMFrames = 576 * (1 << (blockSize - 2)); + } else if (blockSize == 6) { + if (!ma_dr_flac__read_uint16(bs, 8, &header->blockSizeInPCMFrames)) { + return MA_FALSE; + } + crc8 = ma_dr_flac_crc8(crc8, header->blockSizeInPCMFrames, 8); + header->blockSizeInPCMFrames += 1; + } else if (blockSize == 7) { + if (!ma_dr_flac__read_uint16(bs, 16, &header->blockSizeInPCMFrames)) { + return MA_FALSE; + } + crc8 = ma_dr_flac_crc8(crc8, header->blockSizeInPCMFrames, 16); + if (header->blockSizeInPCMFrames == 0xFFFF) { + return MA_FALSE; + } + header->blockSizeInPCMFrames += 1; + } else { + MA_DR_FLAC_ASSERT(blockSize >= 8); + header->blockSizeInPCMFrames = 256 * (1 << (blockSize - 8)); + } + if (sampleRate <= 11) { + header->sampleRate = sampleRateTable[sampleRate]; + } else if (sampleRate == 12) { + if (!ma_dr_flac__read_uint32(bs, 8, &header->sampleRate)) { + return MA_FALSE; + } + crc8 = ma_dr_flac_crc8(crc8, header->sampleRate, 8); + header->sampleRate *= 1000; + } else if (sampleRate == 13) { + if (!ma_dr_flac__read_uint32(bs, 16, &header->sampleRate)) { + return MA_FALSE; + } + crc8 = ma_dr_flac_crc8(crc8, header->sampleRate, 16); + } else if (sampleRate == 14) { + if (!ma_dr_flac__read_uint32(bs, 16, &header->sampleRate)) { + return MA_FALSE; + } + crc8 = ma_dr_flac_crc8(crc8, header->sampleRate, 16); + header->sampleRate *= 10; + } else { + continue; + } + header->channelAssignment = channelAssignment; + header->bitsPerSample = bitsPerSampleTable[bitsPerSample]; + if (header->bitsPerSample == 0) { + header->bitsPerSample = streaminfoBitsPerSample; + } + if (header->bitsPerSample != streaminfoBitsPerSample) { + return MA_FALSE; + } + if (!ma_dr_flac__read_uint8(bs, 8, &header->crc8)) { + return MA_FALSE; + } +#ifndef MA_DR_FLAC_NO_CRC + if (header->crc8 != crc8) { + continue; + } +#endif + return MA_TRUE; + } +} +static ma_bool32 ma_dr_flac__read_subframe_header(ma_dr_flac_bs* bs, ma_dr_flac_subframe* pSubframe) +{ + ma_uint8 header; + int type; + if (!ma_dr_flac__read_uint8(bs, 8, &header)) { + return MA_FALSE; + } + if ((header & 0x80) != 0) { + return MA_FALSE; + } + pSubframe->lpcOrder = 0; + type = (header & 0x7E) >> 1; + if (type == 0) { + pSubframe->subframeType = MA_DR_FLAC_SUBFRAME_CONSTANT; + } else if (type == 1) { + pSubframe->subframeType = MA_DR_FLAC_SUBFRAME_VERBATIM; + } else { + if ((type & 0x20) != 0) { + pSubframe->subframeType = MA_DR_FLAC_SUBFRAME_LPC; + pSubframe->lpcOrder = (ma_uint8)(type & 0x1F) + 1; + } else if ((type & 0x08) != 0) { + pSubframe->subframeType = MA_DR_FLAC_SUBFRAME_FIXED; + pSubframe->lpcOrder = (ma_uint8)(type & 0x07); + if (pSubframe->lpcOrder > 4) { + pSubframe->subframeType = MA_DR_FLAC_SUBFRAME_RESERVED; + pSubframe->lpcOrder = 0; + } + } else { + pSubframe->subframeType = MA_DR_FLAC_SUBFRAME_RESERVED; + } + } + if (pSubframe->subframeType == MA_DR_FLAC_SUBFRAME_RESERVED) { + return MA_FALSE; + } + pSubframe->wastedBitsPerSample = 0; + if ((header & 0x01) == 1) { + unsigned int wastedBitsPerSample; + if (!ma_dr_flac__seek_past_next_set_bit(bs, &wastedBitsPerSample)) { + return MA_FALSE; + } + pSubframe->wastedBitsPerSample = (ma_uint8)wastedBitsPerSample + 1; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_subframe(ma_dr_flac_bs* bs, ma_dr_flac_frame* frame, int subframeIndex, ma_int32* pDecodedSamplesOut) +{ + ma_dr_flac_subframe* pSubframe; + ma_uint32 subframeBitsPerSample; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(frame != NULL); + pSubframe = frame->subframes + subframeIndex; + if (!ma_dr_flac__read_subframe_header(bs, pSubframe)) { + return MA_FALSE; + } + subframeBitsPerSample = frame->header.bitsPerSample; + if ((frame->header.channelAssignment == MA_DR_FLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE || frame->header.channelAssignment == MA_DR_FLAC_CHANNEL_ASSIGNMENT_MID_SIDE) && subframeIndex == 1) { + subframeBitsPerSample += 1; + } else if (frame->header.channelAssignment == MA_DR_FLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE && subframeIndex == 0) { + subframeBitsPerSample += 1; + } + if (subframeBitsPerSample > 32) { + return MA_FALSE; + } + if (pSubframe->wastedBitsPerSample >= subframeBitsPerSample) { + return MA_FALSE; + } + subframeBitsPerSample -= pSubframe->wastedBitsPerSample; + pSubframe->pSamplesS32 = pDecodedSamplesOut; + if (frame->header.blockSizeInPCMFrames < pSubframe->lpcOrder) { + return MA_FALSE; + } + switch (pSubframe->subframeType) + { + case MA_DR_FLAC_SUBFRAME_CONSTANT: + { + ma_dr_flac__decode_samples__constant(bs, frame->header.blockSizeInPCMFrames, subframeBitsPerSample, pSubframe->pSamplesS32); + } break; + case MA_DR_FLAC_SUBFRAME_VERBATIM: + { + ma_dr_flac__decode_samples__verbatim(bs, frame->header.blockSizeInPCMFrames, subframeBitsPerSample, pSubframe->pSamplesS32); + } break; + case MA_DR_FLAC_SUBFRAME_FIXED: + { + ma_dr_flac__decode_samples__fixed(bs, frame->header.blockSizeInPCMFrames, subframeBitsPerSample, pSubframe->lpcOrder, pSubframe->pSamplesS32); + } break; + case MA_DR_FLAC_SUBFRAME_LPC: + { + ma_dr_flac__decode_samples__lpc(bs, frame->header.blockSizeInPCMFrames, subframeBitsPerSample, pSubframe->lpcOrder, pSubframe->pSamplesS32); + } break; + default: return MA_FALSE; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__seek_subframe(ma_dr_flac_bs* bs, ma_dr_flac_frame* frame, int subframeIndex) +{ + ma_dr_flac_subframe* pSubframe; + ma_uint32 subframeBitsPerSample; + MA_DR_FLAC_ASSERT(bs != NULL); + MA_DR_FLAC_ASSERT(frame != NULL); + pSubframe = frame->subframes + subframeIndex; + if (!ma_dr_flac__read_subframe_header(bs, pSubframe)) { + return MA_FALSE; + } + subframeBitsPerSample = frame->header.bitsPerSample; + if ((frame->header.channelAssignment == MA_DR_FLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE || frame->header.channelAssignment == MA_DR_FLAC_CHANNEL_ASSIGNMENT_MID_SIDE) && subframeIndex == 1) { + subframeBitsPerSample += 1; + } else if (frame->header.channelAssignment == MA_DR_FLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE && subframeIndex == 0) { + subframeBitsPerSample += 1; + } + if (pSubframe->wastedBitsPerSample >= subframeBitsPerSample) { + return MA_FALSE; + } + subframeBitsPerSample -= pSubframe->wastedBitsPerSample; + pSubframe->pSamplesS32 = NULL; + switch (pSubframe->subframeType) + { + case MA_DR_FLAC_SUBFRAME_CONSTANT: + { + if (!ma_dr_flac__seek_bits(bs, subframeBitsPerSample)) { + return MA_FALSE; + } + } break; + case MA_DR_FLAC_SUBFRAME_VERBATIM: + { + unsigned int bitsToSeek = frame->header.blockSizeInPCMFrames * subframeBitsPerSample; + if (!ma_dr_flac__seek_bits(bs, bitsToSeek)) { + return MA_FALSE; + } + } break; + case MA_DR_FLAC_SUBFRAME_FIXED: + { + unsigned int bitsToSeek = pSubframe->lpcOrder * subframeBitsPerSample; + if (!ma_dr_flac__seek_bits(bs, bitsToSeek)) { + return MA_FALSE; + } + if (!ma_dr_flac__read_and_seek_residual(bs, frame->header.blockSizeInPCMFrames, pSubframe->lpcOrder)) { + return MA_FALSE; + } + } break; + case MA_DR_FLAC_SUBFRAME_LPC: + { + ma_uint8 lpcPrecision; + unsigned int bitsToSeek = pSubframe->lpcOrder * subframeBitsPerSample; + if (!ma_dr_flac__seek_bits(bs, bitsToSeek)) { + return MA_FALSE; + } + if (!ma_dr_flac__read_uint8(bs, 4, &lpcPrecision)) { + return MA_FALSE; + } + if (lpcPrecision == 15) { + return MA_FALSE; + } + lpcPrecision += 1; + bitsToSeek = (pSubframe->lpcOrder * lpcPrecision) + 5; + if (!ma_dr_flac__seek_bits(bs, bitsToSeek)) { + return MA_FALSE; + } + if (!ma_dr_flac__read_and_seek_residual(bs, frame->header.blockSizeInPCMFrames, pSubframe->lpcOrder)) { + return MA_FALSE; + } + } break; + default: return MA_FALSE; + } + return MA_TRUE; +} +static MA_INLINE ma_uint8 ma_dr_flac__get_channel_count_from_channel_assignment(ma_int8 channelAssignment) +{ + ma_uint8 lookup[] = {1, 2, 3, 4, 5, 6, 7, 8, 2, 2, 2}; + MA_DR_FLAC_ASSERT(channelAssignment <= 10); + return lookup[channelAssignment]; +} +static ma_result ma_dr_flac__decode_flac_frame(ma_dr_flac* pFlac) +{ + int channelCount; + int i; + ma_uint8 paddingSizeInBits; + ma_uint16 desiredCRC16; +#ifndef MA_DR_FLAC_NO_CRC + ma_uint16 actualCRC16; +#endif + MA_DR_FLAC_ZERO_MEMORY(pFlac->currentFLACFrame.subframes, sizeof(pFlac->currentFLACFrame.subframes)); + if (pFlac->currentFLACFrame.header.blockSizeInPCMFrames > pFlac->maxBlockSizeInPCMFrames) { + return MA_ERROR; + } + channelCount = ma_dr_flac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment); + if (channelCount != (int)pFlac->channels) { + return MA_ERROR; + } + for (i = 0; i < channelCount; ++i) { + if (!ma_dr_flac__decode_subframe(&pFlac->bs, &pFlac->currentFLACFrame, i, pFlac->pDecodedSamples + (pFlac->currentFLACFrame.header.blockSizeInPCMFrames * i))) { + return MA_ERROR; + } + } + paddingSizeInBits = (ma_uint8)(MA_DR_FLAC_CACHE_L1_BITS_REMAINING(&pFlac->bs) & 7); + if (paddingSizeInBits > 0) { + ma_uint8 padding = 0; + if (!ma_dr_flac__read_uint8(&pFlac->bs, paddingSizeInBits, &padding)) { + return MA_AT_END; + } + } +#ifndef MA_DR_FLAC_NO_CRC + actualCRC16 = ma_dr_flac__flush_crc16(&pFlac->bs); +#endif + if (!ma_dr_flac__read_uint16(&pFlac->bs, 16, &desiredCRC16)) { + return MA_AT_END; + } +#ifndef MA_DR_FLAC_NO_CRC + if (actualCRC16 != desiredCRC16) { + return MA_CRC_MISMATCH; + } +#endif + pFlac->currentFLACFrame.pcmFramesRemaining = pFlac->currentFLACFrame.header.blockSizeInPCMFrames; + return MA_SUCCESS; +} +static ma_result ma_dr_flac__seek_flac_frame(ma_dr_flac* pFlac) +{ + int channelCount; + int i; + ma_uint16 desiredCRC16; +#ifndef MA_DR_FLAC_NO_CRC + ma_uint16 actualCRC16; +#endif + channelCount = ma_dr_flac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment); + for (i = 0; i < channelCount; ++i) { + if (!ma_dr_flac__seek_subframe(&pFlac->bs, &pFlac->currentFLACFrame, i)) { + return MA_ERROR; + } + } + if (!ma_dr_flac__seek_bits(&pFlac->bs, MA_DR_FLAC_CACHE_L1_BITS_REMAINING(&pFlac->bs) & 7)) { + return MA_ERROR; + } +#ifndef MA_DR_FLAC_NO_CRC + actualCRC16 = ma_dr_flac__flush_crc16(&pFlac->bs); +#endif + if (!ma_dr_flac__read_uint16(&pFlac->bs, 16, &desiredCRC16)) { + return MA_AT_END; + } +#ifndef MA_DR_FLAC_NO_CRC + if (actualCRC16 != desiredCRC16) { + return MA_CRC_MISMATCH; + } +#endif + return MA_SUCCESS; +} +static ma_bool32 ma_dr_flac__read_and_decode_next_flac_frame(ma_dr_flac* pFlac) +{ + MA_DR_FLAC_ASSERT(pFlac != NULL); + for (;;) { + ma_result result; + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + result = ma_dr_flac__decode_flac_frame(pFlac); + if (result != MA_SUCCESS) { + if (result == MA_CRC_MISMATCH) { + continue; + } else { + return MA_FALSE; + } + } + return MA_TRUE; + } +} +static void ma_dr_flac__get_pcm_frame_range_of_current_flac_frame(ma_dr_flac* pFlac, ma_uint64* pFirstPCMFrame, ma_uint64* pLastPCMFrame) +{ + ma_uint64 firstPCMFrame; + ma_uint64 lastPCMFrame; + MA_DR_FLAC_ASSERT(pFlac != NULL); + firstPCMFrame = pFlac->currentFLACFrame.header.pcmFrameNumber; + if (firstPCMFrame == 0) { + firstPCMFrame = ((ma_uint64)pFlac->currentFLACFrame.header.flacFrameNumber) * pFlac->maxBlockSizeInPCMFrames; + } + lastPCMFrame = firstPCMFrame + pFlac->currentFLACFrame.header.blockSizeInPCMFrames; + if (lastPCMFrame > 0) { + lastPCMFrame -= 1; + } + if (pFirstPCMFrame) { + *pFirstPCMFrame = firstPCMFrame; + } + if (pLastPCMFrame) { + *pLastPCMFrame = lastPCMFrame; + } +} +static ma_bool32 ma_dr_flac__seek_to_first_frame(ma_dr_flac* pFlac) +{ + ma_bool32 result; + MA_DR_FLAC_ASSERT(pFlac != NULL); + result = ma_dr_flac__seek_to_byte(&pFlac->bs, pFlac->firstFLACFramePosInBytes); + MA_DR_FLAC_ZERO_MEMORY(&pFlac->currentFLACFrame, sizeof(pFlac->currentFLACFrame)); + pFlac->currentPCMFrame = 0; + return result; +} +static MA_INLINE ma_result ma_dr_flac__seek_to_next_flac_frame(ma_dr_flac* pFlac) +{ + MA_DR_FLAC_ASSERT(pFlac != NULL); + return ma_dr_flac__seek_flac_frame(pFlac); +} +static ma_uint64 ma_dr_flac__seek_forward_by_pcm_frames(ma_dr_flac* pFlac, ma_uint64 pcmFramesToSeek) +{ + ma_uint64 pcmFramesRead = 0; + while (pcmFramesToSeek > 0) { + if (pFlac->currentFLACFrame.pcmFramesRemaining == 0) { + if (!ma_dr_flac__read_and_decode_next_flac_frame(pFlac)) { + break; + } + } else { + if (pFlac->currentFLACFrame.pcmFramesRemaining > pcmFramesToSeek) { + pcmFramesRead += pcmFramesToSeek; + pFlac->currentFLACFrame.pcmFramesRemaining -= (ma_uint32)pcmFramesToSeek; + pcmFramesToSeek = 0; + } else { + pcmFramesRead += pFlac->currentFLACFrame.pcmFramesRemaining; + pcmFramesToSeek -= pFlac->currentFLACFrame.pcmFramesRemaining; + pFlac->currentFLACFrame.pcmFramesRemaining = 0; + } + } + } + pFlac->currentPCMFrame += pcmFramesRead; + return pcmFramesRead; +} +static ma_bool32 ma_dr_flac__seek_to_pcm_frame__brute_force(ma_dr_flac* pFlac, ma_uint64 pcmFrameIndex) +{ + ma_bool32 isMidFrame = MA_FALSE; + ma_uint64 runningPCMFrameCount; + MA_DR_FLAC_ASSERT(pFlac != NULL); + if (pcmFrameIndex >= pFlac->currentPCMFrame) { + runningPCMFrameCount = pFlac->currentPCMFrame; + if (pFlac->currentPCMFrame == 0 && pFlac->currentFLACFrame.pcmFramesRemaining == 0) { + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + } else { + isMidFrame = MA_TRUE; + } + } else { + runningPCMFrameCount = 0; + if (!ma_dr_flac__seek_to_first_frame(pFlac)) { + return MA_FALSE; + } + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + } + for (;;) { + ma_uint64 pcmFrameCountInThisFLACFrame; + ma_uint64 firstPCMFrameInFLACFrame = 0; + ma_uint64 lastPCMFrameInFLACFrame = 0; + ma_dr_flac__get_pcm_frame_range_of_current_flac_frame(pFlac, &firstPCMFrameInFLACFrame, &lastPCMFrameInFLACFrame); + pcmFrameCountInThisFLACFrame = (lastPCMFrameInFLACFrame - firstPCMFrameInFLACFrame) + 1; + if (pcmFrameIndex < (runningPCMFrameCount + pcmFrameCountInThisFLACFrame)) { + ma_uint64 pcmFramesToDecode = pcmFrameIndex - runningPCMFrameCount; + if (!isMidFrame) { + ma_result result = ma_dr_flac__decode_flac_frame(pFlac); + if (result == MA_SUCCESS) { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode; + } else { + if (result == MA_CRC_MISMATCH) { + goto next_iteration; + } else { + return MA_FALSE; + } + } + } else { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode; + } + } else { + if (!isMidFrame) { + ma_result result = ma_dr_flac__seek_to_next_flac_frame(pFlac); + if (result == MA_SUCCESS) { + runningPCMFrameCount += pcmFrameCountInThisFLACFrame; + } else { + if (result == MA_CRC_MISMATCH) { + goto next_iteration; + } else { + return MA_FALSE; + } + } + } else { + runningPCMFrameCount += pFlac->currentFLACFrame.pcmFramesRemaining; + pFlac->currentFLACFrame.pcmFramesRemaining = 0; + isMidFrame = MA_FALSE; + } + if (pcmFrameIndex == pFlac->totalPCMFrameCount && runningPCMFrameCount == pFlac->totalPCMFrameCount) { + return MA_TRUE; + } + } + next_iteration: + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + } +} +#if !defined(MA_DR_FLAC_NO_CRC) +#define MA_DR_FLAC_BINARY_SEARCH_APPROX_COMPRESSION_RATIO 0.6f +static ma_bool32 ma_dr_flac__seek_to_approximate_flac_frame_to_byte(ma_dr_flac* pFlac, ma_uint64 targetByte, ma_uint64 rangeLo, ma_uint64 rangeHi, ma_uint64* pLastSuccessfulSeekOffset) +{ + MA_DR_FLAC_ASSERT(pFlac != NULL); + MA_DR_FLAC_ASSERT(pLastSuccessfulSeekOffset != NULL); + MA_DR_FLAC_ASSERT(targetByte >= rangeLo); + MA_DR_FLAC_ASSERT(targetByte <= rangeHi); + *pLastSuccessfulSeekOffset = pFlac->firstFLACFramePosInBytes; + for (;;) { + ma_uint64 lastTargetByte = targetByte; + if (!ma_dr_flac__seek_to_byte(&pFlac->bs, targetByte)) { + if (targetByte == 0) { + ma_dr_flac__seek_to_first_frame(pFlac); + return MA_FALSE; + } + targetByte = rangeLo + ((rangeHi - rangeLo)/2); + rangeHi = targetByte; + } else { + MA_DR_FLAC_ZERO_MEMORY(&pFlac->currentFLACFrame, sizeof(pFlac->currentFLACFrame)); +#if 1 + if (!ma_dr_flac__read_and_decode_next_flac_frame(pFlac)) { + targetByte = rangeLo + ((rangeHi - rangeLo)/2); + rangeHi = targetByte; + } else { + break; + } +#else + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + targetByte = rangeLo + ((rangeHi - rangeLo)/2); + rangeHi = targetByte; + } else { + break; + } +#endif + } + if(targetByte == lastTargetByte) { + return MA_FALSE; + } + } + ma_dr_flac__get_pcm_frame_range_of_current_flac_frame(pFlac, &pFlac->currentPCMFrame, NULL); + MA_DR_FLAC_ASSERT(targetByte <= rangeHi); + *pLastSuccessfulSeekOffset = targetByte; + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__decode_flac_frame_and_seek_forward_by_pcm_frames(ma_dr_flac* pFlac, ma_uint64 offset) +{ +#if 0 + if (ma_dr_flac__decode_flac_frame(pFlac) != MA_SUCCESS) { + if (ma_dr_flac__read_and_decode_next_flac_frame(pFlac) == MA_FALSE) { + return MA_FALSE; + } + } +#endif + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, offset) == offset; +} +static ma_bool32 ma_dr_flac__seek_to_pcm_frame__binary_search_internal(ma_dr_flac* pFlac, ma_uint64 pcmFrameIndex, ma_uint64 byteRangeLo, ma_uint64 byteRangeHi) +{ + ma_uint64 targetByte; + ma_uint64 pcmRangeLo = pFlac->totalPCMFrameCount; + ma_uint64 pcmRangeHi = 0; + ma_uint64 lastSuccessfulSeekOffset = (ma_uint64)-1; + ma_uint64 closestSeekOffsetBeforeTargetPCMFrame = byteRangeLo; + ma_uint32 seekForwardThreshold = (pFlac->maxBlockSizeInPCMFrames != 0) ? pFlac->maxBlockSizeInPCMFrames*2 : 4096; + targetByte = byteRangeLo + (ma_uint64)(((ma_int64)((pcmFrameIndex - pFlac->currentPCMFrame) * pFlac->channels * pFlac->bitsPerSample)/8.0f) * MA_DR_FLAC_BINARY_SEARCH_APPROX_COMPRESSION_RATIO); + if (targetByte > byteRangeHi) { + targetByte = byteRangeHi; + } + for (;;) { + if (ma_dr_flac__seek_to_approximate_flac_frame_to_byte(pFlac, targetByte, byteRangeLo, byteRangeHi, &lastSuccessfulSeekOffset)) { + ma_uint64 newPCMRangeLo; + ma_uint64 newPCMRangeHi; + ma_dr_flac__get_pcm_frame_range_of_current_flac_frame(pFlac, &newPCMRangeLo, &newPCMRangeHi); + if (pcmRangeLo == newPCMRangeLo) { + if (!ma_dr_flac__seek_to_approximate_flac_frame_to_byte(pFlac, closestSeekOffsetBeforeTargetPCMFrame, closestSeekOffsetBeforeTargetPCMFrame, byteRangeHi, &lastSuccessfulSeekOffset)) { + break; + } + if (ma_dr_flac__decode_flac_frame_and_seek_forward_by_pcm_frames(pFlac, pcmFrameIndex - pFlac->currentPCMFrame)) { + return MA_TRUE; + } else { + break; + } + } + pcmRangeLo = newPCMRangeLo; + pcmRangeHi = newPCMRangeHi; + if (pcmRangeLo <= pcmFrameIndex && pcmRangeHi >= pcmFrameIndex) { + if (ma_dr_flac__decode_flac_frame_and_seek_forward_by_pcm_frames(pFlac, pcmFrameIndex - pFlac->currentPCMFrame) ) { + return MA_TRUE; + } else { + break; + } + } else { + const float approxCompressionRatio = (ma_int64)(lastSuccessfulSeekOffset - pFlac->firstFLACFramePosInBytes) / ((ma_int64)(pcmRangeLo * pFlac->channels * pFlac->bitsPerSample)/8.0f); + if (pcmRangeLo > pcmFrameIndex) { + byteRangeHi = lastSuccessfulSeekOffset; + if (byteRangeLo > byteRangeHi) { + byteRangeLo = byteRangeHi; + } + targetByte = byteRangeLo + ((byteRangeHi - byteRangeLo) / 2); + if (targetByte < byteRangeLo) { + targetByte = byteRangeLo; + } + } else { + if ((pcmFrameIndex - pcmRangeLo) < seekForwardThreshold) { + if (ma_dr_flac__decode_flac_frame_and_seek_forward_by_pcm_frames(pFlac, pcmFrameIndex - pFlac->currentPCMFrame)) { + return MA_TRUE; + } else { + break; + } + } else { + byteRangeLo = lastSuccessfulSeekOffset; + if (byteRangeHi < byteRangeLo) { + byteRangeHi = byteRangeLo; + } + targetByte = lastSuccessfulSeekOffset + (ma_uint64)(((ma_int64)((pcmFrameIndex-pcmRangeLo) * pFlac->channels * pFlac->bitsPerSample)/8.0f) * approxCompressionRatio); + if (targetByte > byteRangeHi) { + targetByte = byteRangeHi; + } + if (closestSeekOffsetBeforeTargetPCMFrame < lastSuccessfulSeekOffset) { + closestSeekOffsetBeforeTargetPCMFrame = lastSuccessfulSeekOffset; + } + } + } + } + } else { + break; + } + } + ma_dr_flac__seek_to_first_frame(pFlac); + return MA_FALSE; +} +static ma_bool32 ma_dr_flac__seek_to_pcm_frame__binary_search(ma_dr_flac* pFlac, ma_uint64 pcmFrameIndex) +{ + ma_uint64 byteRangeLo; + ma_uint64 byteRangeHi; + ma_uint32 seekForwardThreshold = (pFlac->maxBlockSizeInPCMFrames != 0) ? pFlac->maxBlockSizeInPCMFrames*2 : 4096; + if (ma_dr_flac__seek_to_first_frame(pFlac) == MA_FALSE) { + return MA_FALSE; + } + if (pcmFrameIndex < seekForwardThreshold) { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, pcmFrameIndex) == pcmFrameIndex; + } + byteRangeLo = pFlac->firstFLACFramePosInBytes; + byteRangeHi = pFlac->firstFLACFramePosInBytes + (ma_uint64)((ma_int64)(pFlac->totalPCMFrameCount * pFlac->channels * pFlac->bitsPerSample)/8.0f); + return ma_dr_flac__seek_to_pcm_frame__binary_search_internal(pFlac, pcmFrameIndex, byteRangeLo, byteRangeHi); +} +#endif +static ma_bool32 ma_dr_flac__seek_to_pcm_frame__seek_table(ma_dr_flac* pFlac, ma_uint64 pcmFrameIndex) +{ + ma_uint32 iClosestSeekpoint = 0; + ma_bool32 isMidFrame = MA_FALSE; + ma_uint64 runningPCMFrameCount; + ma_uint32 iSeekpoint; + MA_DR_FLAC_ASSERT(pFlac != NULL); + if (pFlac->pSeekpoints == NULL || pFlac->seekpointCount == 0) { + return MA_FALSE; + } + if (pFlac->pSeekpoints[0].firstPCMFrame > pcmFrameIndex) { + return MA_FALSE; + } + for (iSeekpoint = 0; iSeekpoint < pFlac->seekpointCount; ++iSeekpoint) { + if (pFlac->pSeekpoints[iSeekpoint].firstPCMFrame >= pcmFrameIndex) { + break; + } + iClosestSeekpoint = iSeekpoint; + } + if (pFlac->pSeekpoints[iClosestSeekpoint].pcmFrameCount == 0 || pFlac->pSeekpoints[iClosestSeekpoint].pcmFrameCount > pFlac->maxBlockSizeInPCMFrames) { + return MA_FALSE; + } + if (pFlac->pSeekpoints[iClosestSeekpoint].firstPCMFrame > pFlac->totalPCMFrameCount && pFlac->totalPCMFrameCount > 0) { + return MA_FALSE; + } +#if !defined(MA_DR_FLAC_NO_CRC) + if (pFlac->totalPCMFrameCount > 0) { + ma_uint64 byteRangeLo; + ma_uint64 byteRangeHi; + byteRangeHi = pFlac->firstFLACFramePosInBytes + (ma_uint64)((ma_int64)(pFlac->totalPCMFrameCount * pFlac->channels * pFlac->bitsPerSample)/8.0f); + byteRangeLo = pFlac->firstFLACFramePosInBytes + pFlac->pSeekpoints[iClosestSeekpoint].flacFrameOffset; + if (iClosestSeekpoint < pFlac->seekpointCount-1) { + ma_uint32 iNextSeekpoint = iClosestSeekpoint + 1; + if (pFlac->pSeekpoints[iClosestSeekpoint].flacFrameOffset >= pFlac->pSeekpoints[iNextSeekpoint].flacFrameOffset || pFlac->pSeekpoints[iNextSeekpoint].pcmFrameCount == 0) { + return MA_FALSE; + } + if (pFlac->pSeekpoints[iNextSeekpoint].firstPCMFrame != (((ma_uint64)0xFFFFFFFF << 32) | 0xFFFFFFFF)) { + byteRangeHi = pFlac->firstFLACFramePosInBytes + pFlac->pSeekpoints[iNextSeekpoint].flacFrameOffset - 1; + } + } + if (ma_dr_flac__seek_to_byte(&pFlac->bs, pFlac->firstFLACFramePosInBytes + pFlac->pSeekpoints[iClosestSeekpoint].flacFrameOffset)) { + if (ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + ma_dr_flac__get_pcm_frame_range_of_current_flac_frame(pFlac, &pFlac->currentPCMFrame, NULL); + if (ma_dr_flac__seek_to_pcm_frame__binary_search_internal(pFlac, pcmFrameIndex, byteRangeLo, byteRangeHi)) { + return MA_TRUE; + } + } + } + } +#endif + if (pcmFrameIndex >= pFlac->currentPCMFrame && pFlac->pSeekpoints[iClosestSeekpoint].firstPCMFrame <= pFlac->currentPCMFrame) { + runningPCMFrameCount = pFlac->currentPCMFrame; + if (pFlac->currentPCMFrame == 0 && pFlac->currentFLACFrame.pcmFramesRemaining == 0) { + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + } else { + isMidFrame = MA_TRUE; + } + } else { + runningPCMFrameCount = pFlac->pSeekpoints[iClosestSeekpoint].firstPCMFrame; + if (!ma_dr_flac__seek_to_byte(&pFlac->bs, pFlac->firstFLACFramePosInBytes + pFlac->pSeekpoints[iClosestSeekpoint].flacFrameOffset)) { + return MA_FALSE; + } + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + } + for (;;) { + ma_uint64 pcmFrameCountInThisFLACFrame; + ma_uint64 firstPCMFrameInFLACFrame = 0; + ma_uint64 lastPCMFrameInFLACFrame = 0; + ma_dr_flac__get_pcm_frame_range_of_current_flac_frame(pFlac, &firstPCMFrameInFLACFrame, &lastPCMFrameInFLACFrame); + pcmFrameCountInThisFLACFrame = (lastPCMFrameInFLACFrame - firstPCMFrameInFLACFrame) + 1; + if (pcmFrameIndex < (runningPCMFrameCount + pcmFrameCountInThisFLACFrame)) { + ma_uint64 pcmFramesToDecode = pcmFrameIndex - runningPCMFrameCount; + if (!isMidFrame) { + ma_result result = ma_dr_flac__decode_flac_frame(pFlac); + if (result == MA_SUCCESS) { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode; + } else { + if (result == MA_CRC_MISMATCH) { + goto next_iteration; + } else { + return MA_FALSE; + } + } + } else { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode; + } + } else { + if (!isMidFrame) { + ma_result result = ma_dr_flac__seek_to_next_flac_frame(pFlac); + if (result == MA_SUCCESS) { + runningPCMFrameCount += pcmFrameCountInThisFLACFrame; + } else { + if (result == MA_CRC_MISMATCH) { + goto next_iteration; + } else { + return MA_FALSE; + } + } + } else { + runningPCMFrameCount += pFlac->currentFLACFrame.pcmFramesRemaining; + pFlac->currentFLACFrame.pcmFramesRemaining = 0; + isMidFrame = MA_FALSE; + } + if (pcmFrameIndex == pFlac->totalPCMFrameCount && runningPCMFrameCount == pFlac->totalPCMFrameCount) { + return MA_TRUE; + } + } + next_iteration: + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + } +} +#ifndef MA_DR_FLAC_NO_OGG +typedef struct +{ + ma_uint8 capturePattern[4]; + ma_uint8 structureVersion; + ma_uint8 headerType; + ma_uint64 granulePosition; + ma_uint32 serialNumber; + ma_uint32 sequenceNumber; + ma_uint32 checksum; + ma_uint8 segmentCount; + ma_uint8 segmentTable[255]; +} ma_dr_flac_ogg_page_header; +#endif +typedef struct +{ + ma_dr_flac_read_proc onRead; + ma_dr_flac_seek_proc onSeek; + ma_dr_flac_meta_proc onMeta; + ma_dr_flac_container container; + void* pUserData; + void* pUserDataMD; + ma_uint32 sampleRate; + ma_uint8 channels; + ma_uint8 bitsPerSample; + ma_uint64 totalPCMFrameCount; + ma_uint16 maxBlockSizeInPCMFrames; + ma_uint64 runningFilePos; + ma_bool32 hasStreamInfoBlock; + ma_bool32 hasMetadataBlocks; + ma_dr_flac_bs bs; + ma_dr_flac_frame_header firstFrameHeader; +#ifndef MA_DR_FLAC_NO_OGG + ma_uint32 oggSerial; + ma_uint64 oggFirstBytePos; + ma_dr_flac_ogg_page_header oggBosHeader; +#endif +} ma_dr_flac_init_info; +static MA_INLINE void ma_dr_flac__decode_block_header(ma_uint32 blockHeader, ma_uint8* isLastBlock, ma_uint8* blockType, ma_uint32* blockSize) +{ + blockHeader = ma_dr_flac__be2host_32(blockHeader); + *isLastBlock = (ma_uint8)((blockHeader & 0x80000000UL) >> 31); + *blockType = (ma_uint8)((blockHeader & 0x7F000000UL) >> 24); + *blockSize = (blockHeader & 0x00FFFFFFUL); +} +static MA_INLINE ma_bool32 ma_dr_flac__read_and_decode_block_header(ma_dr_flac_read_proc onRead, void* pUserData, ma_uint8* isLastBlock, ma_uint8* blockType, ma_uint32* blockSize) +{ + ma_uint32 blockHeader; + *blockSize = 0; + if (onRead(pUserData, &blockHeader, 4) != 4) { + return MA_FALSE; + } + ma_dr_flac__decode_block_header(blockHeader, isLastBlock, blockType, blockSize); + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__read_streaminfo(ma_dr_flac_read_proc onRead, void* pUserData, ma_dr_flac_streaminfo* pStreamInfo) +{ + ma_uint32 blockSizes; + ma_uint64 frameSizes = 0; + ma_uint64 importantProps; + ma_uint8 md5[16]; + if (onRead(pUserData, &blockSizes, 4) != 4) { + return MA_FALSE; + } + if (onRead(pUserData, &frameSizes, 6) != 6) { + return MA_FALSE; + } + if (onRead(pUserData, &importantProps, 8) != 8) { + return MA_FALSE; + } + if (onRead(pUserData, md5, sizeof(md5)) != sizeof(md5)) { + return MA_FALSE; + } + blockSizes = ma_dr_flac__be2host_32(blockSizes); + frameSizes = ma_dr_flac__be2host_64(frameSizes); + importantProps = ma_dr_flac__be2host_64(importantProps); + pStreamInfo->minBlockSizeInPCMFrames = (ma_uint16)((blockSizes & 0xFFFF0000) >> 16); + pStreamInfo->maxBlockSizeInPCMFrames = (ma_uint16) (blockSizes & 0x0000FFFF); + pStreamInfo->minFrameSizeInPCMFrames = (ma_uint32)((frameSizes & (((ma_uint64)0x00FFFFFF << 16) << 24)) >> 40); + pStreamInfo->maxFrameSizeInPCMFrames = (ma_uint32)((frameSizes & (((ma_uint64)0x00FFFFFF << 16) << 0)) >> 16); + pStreamInfo->sampleRate = (ma_uint32)((importantProps & (((ma_uint64)0x000FFFFF << 16) << 28)) >> 44); + pStreamInfo->channels = (ma_uint8 )((importantProps & (((ma_uint64)0x0000000E << 16) << 24)) >> 41) + 1; + pStreamInfo->bitsPerSample = (ma_uint8 )((importantProps & (((ma_uint64)0x0000001F << 16) << 20)) >> 36) + 1; + pStreamInfo->totalPCMFrameCount = ((importantProps & ((((ma_uint64)0x0000000F << 16) << 16) | 0xFFFFFFFF))); + MA_DR_FLAC_COPY_MEMORY(pStreamInfo->md5, md5, sizeof(md5)); + return MA_TRUE; +} +static void* ma_dr_flac__malloc_default(size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_DR_FLAC_MALLOC(sz); +} +static void* ma_dr_flac__realloc_default(void* p, size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_DR_FLAC_REALLOC(p, sz); +} +static void ma_dr_flac__free_default(void* p, void* pUserData) +{ + (void)pUserData; + MA_DR_FLAC_FREE(p); +} +static void* ma_dr_flac__malloc_from_callbacks(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + if (pAllocationCallbacks->onMalloc != NULL) { + return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData); + } + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData); + } + return NULL; +} +static void* ma_dr_flac__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData); + } + if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) { + void* p2; + p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData); + if (p2 == NULL) { + return NULL; + } + if (p != NULL) { + MA_DR_FLAC_COPY_MEMORY(p2, p, szOld); + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } + return p2; + } + return NULL; +} +static void ma_dr_flac__free_from_callbacks(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (p == NULL || pAllocationCallbacks == NULL) { + return; + } + if (pAllocationCallbacks->onFree != NULL) { + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } +} +static ma_bool32 ma_dr_flac__read_and_decode_metadata(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, void* pUserData, void* pUserDataMD, ma_uint64* pFirstFramePos, ma_uint64* pSeektablePos, ma_uint32* pSeekpointCount, ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_uint64 runningFilePos = 42; + ma_uint64 seektablePos = 0; + ma_uint32 seektableSize = 0; + for (;;) { + ma_dr_flac_metadata metadata; + ma_uint8 isLastBlock = 0; + ma_uint8 blockType = 0; + ma_uint32 blockSize; + if (ma_dr_flac__read_and_decode_block_header(onRead, pUserData, &isLastBlock, &blockType, &blockSize) == MA_FALSE) { + return MA_FALSE; + } + runningFilePos += 4; + metadata.type = blockType; + metadata.pRawData = NULL; + metadata.rawDataSize = 0; + switch (blockType) + { + case MA_DR_FLAC_METADATA_BLOCK_TYPE_APPLICATION: + { + if (blockSize < 4) { + return MA_FALSE; + } + if (onMeta) { + void* pRawData = ma_dr_flac__malloc_from_callbacks(blockSize, pAllocationCallbacks); + if (pRawData == NULL) { + return MA_FALSE; + } + if (onRead(pUserData, pRawData, blockSize) != blockSize) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.pRawData = pRawData; + metadata.rawDataSize = blockSize; + metadata.data.application.id = ma_dr_flac__be2host_32(*(ma_uint32*)pRawData); + metadata.data.application.pData = (const void*)((ma_uint8*)pRawData + sizeof(ma_uint32)); + metadata.data.application.dataSize = blockSize - sizeof(ma_uint32); + onMeta(pUserDataMD, &metadata); + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + } + } break; + case MA_DR_FLAC_METADATA_BLOCK_TYPE_SEEKTABLE: + { + seektablePos = runningFilePos; + seektableSize = blockSize; + if (onMeta) { + ma_uint32 seekpointCount; + ma_uint32 iSeekpoint; + void* pRawData; + seekpointCount = blockSize/MA_DR_FLAC_SEEKPOINT_SIZE_IN_BYTES; + pRawData = ma_dr_flac__malloc_from_callbacks(seekpointCount * sizeof(ma_dr_flac_seekpoint), pAllocationCallbacks); + if (pRawData == NULL) { + return MA_FALSE; + } + for (iSeekpoint = 0; iSeekpoint < seekpointCount; ++iSeekpoint) { + ma_dr_flac_seekpoint* pSeekpoint = (ma_dr_flac_seekpoint*)pRawData + iSeekpoint; + if (onRead(pUserData, pSeekpoint, MA_DR_FLAC_SEEKPOINT_SIZE_IN_BYTES) != MA_DR_FLAC_SEEKPOINT_SIZE_IN_BYTES) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + pSeekpoint->firstPCMFrame = ma_dr_flac__be2host_64(pSeekpoint->firstPCMFrame); + pSeekpoint->flacFrameOffset = ma_dr_flac__be2host_64(pSeekpoint->flacFrameOffset); + pSeekpoint->pcmFrameCount = ma_dr_flac__be2host_16(pSeekpoint->pcmFrameCount); + } + metadata.pRawData = pRawData; + metadata.rawDataSize = blockSize; + metadata.data.seektable.seekpointCount = seekpointCount; + metadata.data.seektable.pSeekpoints = (const ma_dr_flac_seekpoint*)pRawData; + onMeta(pUserDataMD, &metadata); + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + } + } break; + case MA_DR_FLAC_METADATA_BLOCK_TYPE_VORBIS_COMMENT: + { + if (blockSize < 8) { + return MA_FALSE; + } + if (onMeta) { + void* pRawData; + const char* pRunningData; + const char* pRunningDataEnd; + ma_uint32 i; + pRawData = ma_dr_flac__malloc_from_callbacks(blockSize, pAllocationCallbacks); + if (pRawData == NULL) { + return MA_FALSE; + } + if (onRead(pUserData, pRawData, blockSize) != blockSize) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.pRawData = pRawData; + metadata.rawDataSize = blockSize; + pRunningData = (const char*)pRawData; + pRunningDataEnd = (const char*)pRawData + blockSize; + metadata.data.vorbis_comment.vendorLength = ma_dr_flac__le2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + if ((pRunningDataEnd - pRunningData) - 4 < (ma_int64)metadata.data.vorbis_comment.vendorLength) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.data.vorbis_comment.vendor = pRunningData; pRunningData += metadata.data.vorbis_comment.vendorLength; + metadata.data.vorbis_comment.commentCount = ma_dr_flac__le2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + if ((pRunningDataEnd - pRunningData) / sizeof(ma_uint32) < metadata.data.vorbis_comment.commentCount) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.data.vorbis_comment.pComments = pRunningData; + for (i = 0; i < metadata.data.vorbis_comment.commentCount; ++i) { + ma_uint32 commentLength; + if (pRunningDataEnd - pRunningData < 4) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + commentLength = ma_dr_flac__le2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + if (pRunningDataEnd - pRunningData < (ma_int64)commentLength) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + pRunningData += commentLength; + } + onMeta(pUserDataMD, &metadata); + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + } + } break; + case MA_DR_FLAC_METADATA_BLOCK_TYPE_CUESHEET: + { + if (blockSize < 396) { + return MA_FALSE; + } + if (onMeta) { + void* pRawData; + const char* pRunningData; + const char* pRunningDataEnd; + size_t bufferSize; + ma_uint8 iTrack; + ma_uint8 iIndex; + void* pTrackData; + pRawData = ma_dr_flac__malloc_from_callbacks(blockSize, pAllocationCallbacks); + if (pRawData == NULL) { + return MA_FALSE; + } + if (onRead(pUserData, pRawData, blockSize) != blockSize) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.pRawData = pRawData; + metadata.rawDataSize = blockSize; + pRunningData = (const char*)pRawData; + pRunningDataEnd = (const char*)pRawData + blockSize; + MA_DR_FLAC_COPY_MEMORY(metadata.data.cuesheet.catalog, pRunningData, 128); pRunningData += 128; + metadata.data.cuesheet.leadInSampleCount = ma_dr_flac__be2host_64(*(const ma_uint64*)pRunningData); pRunningData += 8; + metadata.data.cuesheet.isCD = (pRunningData[0] & 0x80) != 0; pRunningData += 259; + metadata.data.cuesheet.trackCount = pRunningData[0]; pRunningData += 1; + metadata.data.cuesheet.pTrackData = NULL; + { + const char* pRunningDataSaved = pRunningData; + bufferSize = metadata.data.cuesheet.trackCount * MA_DR_FLAC_CUESHEET_TRACK_SIZE_IN_BYTES; + for (iTrack = 0; iTrack < metadata.data.cuesheet.trackCount; ++iTrack) { + ma_uint8 indexCount; + ma_uint32 indexPointSize; + if (pRunningDataEnd - pRunningData < MA_DR_FLAC_CUESHEET_TRACK_SIZE_IN_BYTES) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + pRunningData += 35; + indexCount = pRunningData[0]; + pRunningData += 1; + bufferSize += indexCount * sizeof(ma_dr_flac_cuesheet_track_index); + indexPointSize = indexCount * MA_DR_FLAC_CUESHEET_TRACK_INDEX_SIZE_IN_BYTES; + if (pRunningDataEnd - pRunningData < (ma_int64)indexPointSize) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + pRunningData += indexPointSize; + } + pRunningData = pRunningDataSaved; + } + { + char* pRunningTrackData; + pTrackData = ma_dr_flac__malloc_from_callbacks(bufferSize, pAllocationCallbacks); + if (pTrackData == NULL) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + pRunningTrackData = (char*)pTrackData; + for (iTrack = 0; iTrack < metadata.data.cuesheet.trackCount; ++iTrack) { + ma_uint8 indexCount; + MA_DR_FLAC_COPY_MEMORY(pRunningTrackData, pRunningData, MA_DR_FLAC_CUESHEET_TRACK_SIZE_IN_BYTES); + pRunningData += MA_DR_FLAC_CUESHEET_TRACK_SIZE_IN_BYTES-1; + pRunningTrackData += MA_DR_FLAC_CUESHEET_TRACK_SIZE_IN_BYTES-1; + indexCount = pRunningData[0]; + pRunningData += 1; + pRunningTrackData += 1; + for (iIndex = 0; iIndex < indexCount; ++iIndex) { + ma_dr_flac_cuesheet_track_index* pTrackIndex = (ma_dr_flac_cuesheet_track_index*)pRunningTrackData; + MA_DR_FLAC_COPY_MEMORY(pRunningTrackData, pRunningData, MA_DR_FLAC_CUESHEET_TRACK_INDEX_SIZE_IN_BYTES); + pRunningData += MA_DR_FLAC_CUESHEET_TRACK_INDEX_SIZE_IN_BYTES; + pRunningTrackData += sizeof(ma_dr_flac_cuesheet_track_index); + pTrackIndex->offset = ma_dr_flac__be2host_64(pTrackIndex->offset); + } + } + metadata.data.cuesheet.pTrackData = pTrackData; + } + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + pRawData = NULL; + onMeta(pUserDataMD, &metadata); + ma_dr_flac__free_from_callbacks(pTrackData, pAllocationCallbacks); + pTrackData = NULL; + } + } break; + case MA_DR_FLAC_METADATA_BLOCK_TYPE_PICTURE: + { + if (blockSize < 32) { + return MA_FALSE; + } + if (onMeta) { + void* pRawData; + const char* pRunningData; + const char* pRunningDataEnd; + pRawData = ma_dr_flac__malloc_from_callbacks(blockSize, pAllocationCallbacks); + if (pRawData == NULL) { + return MA_FALSE; + } + if (onRead(pUserData, pRawData, blockSize) != blockSize) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.pRawData = pRawData; + metadata.rawDataSize = blockSize; + pRunningData = (const char*)pRawData; + pRunningDataEnd = (const char*)pRawData + blockSize; + metadata.data.picture.type = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + metadata.data.picture.mimeLength = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + if ((pRunningDataEnd - pRunningData) - 24 < (ma_int64)metadata.data.picture.mimeLength) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.data.picture.mime = pRunningData; pRunningData += metadata.data.picture.mimeLength; + metadata.data.picture.descriptionLength = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + if ((pRunningDataEnd - pRunningData) - 20 < (ma_int64)metadata.data.picture.descriptionLength) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.data.picture.description = pRunningData; pRunningData += metadata.data.picture.descriptionLength; + metadata.data.picture.width = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + metadata.data.picture.height = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + metadata.data.picture.colorDepth = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + metadata.data.picture.indexColorCount = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + metadata.data.picture.pictureDataSize = ma_dr_flac__be2host_32_ptr_unaligned(pRunningData); pRunningData += 4; + metadata.data.picture.pPictureData = (const ma_uint8*)pRunningData; + if (pRunningDataEnd - pRunningData < (ma_int64)metadata.data.picture.pictureDataSize) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + onMeta(pUserDataMD, &metadata); + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + } + } break; + case MA_DR_FLAC_METADATA_BLOCK_TYPE_PADDING: + { + if (onMeta) { + metadata.data.padding.unused = 0; + if (!onSeek(pUserData, blockSize, ma_dr_flac_seek_origin_current)) { + isLastBlock = MA_TRUE; + } else { + onMeta(pUserDataMD, &metadata); + } + } + } break; + case MA_DR_FLAC_METADATA_BLOCK_TYPE_INVALID: + { + if (onMeta) { + if (!onSeek(pUserData, blockSize, ma_dr_flac_seek_origin_current)) { + isLastBlock = MA_TRUE; + } + } + } break; + default: + { + if (onMeta) { + void* pRawData = ma_dr_flac__malloc_from_callbacks(blockSize, pAllocationCallbacks); + if (pRawData == NULL) { + return MA_FALSE; + } + if (onRead(pUserData, pRawData, blockSize) != blockSize) { + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + return MA_FALSE; + } + metadata.pRawData = pRawData; + metadata.rawDataSize = blockSize; + onMeta(pUserDataMD, &metadata); + ma_dr_flac__free_from_callbacks(pRawData, pAllocationCallbacks); + } + } break; + } + if (onMeta == NULL && blockSize > 0) { + if (!onSeek(pUserData, blockSize, ma_dr_flac_seek_origin_current)) { + isLastBlock = MA_TRUE; + } + } + runningFilePos += blockSize; + if (isLastBlock) { + break; + } + } + *pSeektablePos = seektablePos; + *pSeekpointCount = seektableSize / MA_DR_FLAC_SEEKPOINT_SIZE_IN_BYTES; + *pFirstFramePos = runningFilePos; + return MA_TRUE; +} +static ma_bool32 ma_dr_flac__init_private__native(ma_dr_flac_init_info* pInit, ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, void* pUserData, void* pUserDataMD, ma_bool32 relaxed) +{ + ma_uint8 isLastBlock; + ma_uint8 blockType; + ma_uint32 blockSize; + (void)onSeek; + pInit->container = ma_dr_flac_container_native; + if (!ma_dr_flac__read_and_decode_block_header(onRead, pUserData, &isLastBlock, &blockType, &blockSize)) { + return MA_FALSE; + } + if (blockType != MA_DR_FLAC_METADATA_BLOCK_TYPE_STREAMINFO || blockSize != 34) { + if (!relaxed) { + return MA_FALSE; + } else { + pInit->hasStreamInfoBlock = MA_FALSE; + pInit->hasMetadataBlocks = MA_FALSE; + if (!ma_dr_flac__read_next_flac_frame_header(&pInit->bs, 0, &pInit->firstFrameHeader)) { + return MA_FALSE; + } + if (pInit->firstFrameHeader.bitsPerSample == 0) { + return MA_FALSE; + } + pInit->sampleRate = pInit->firstFrameHeader.sampleRate; + pInit->channels = ma_dr_flac__get_channel_count_from_channel_assignment(pInit->firstFrameHeader.channelAssignment); + pInit->bitsPerSample = pInit->firstFrameHeader.bitsPerSample; + pInit->maxBlockSizeInPCMFrames = 65535; + return MA_TRUE; + } + } else { + ma_dr_flac_streaminfo streaminfo; + if (!ma_dr_flac__read_streaminfo(onRead, pUserData, &streaminfo)) { + return MA_FALSE; + } + pInit->hasStreamInfoBlock = MA_TRUE; + pInit->sampleRate = streaminfo.sampleRate; + pInit->channels = streaminfo.channels; + pInit->bitsPerSample = streaminfo.bitsPerSample; + pInit->totalPCMFrameCount = streaminfo.totalPCMFrameCount; + pInit->maxBlockSizeInPCMFrames = streaminfo.maxBlockSizeInPCMFrames; + pInit->hasMetadataBlocks = !isLastBlock; + if (onMeta) { + ma_dr_flac_metadata metadata; + metadata.type = MA_DR_FLAC_METADATA_BLOCK_TYPE_STREAMINFO; + metadata.pRawData = NULL; + metadata.rawDataSize = 0; + metadata.data.streaminfo = streaminfo; + onMeta(pUserDataMD, &metadata); + } + return MA_TRUE; + } +} +#ifndef MA_DR_FLAC_NO_OGG +#define MA_DR_FLAC_OGG_MAX_PAGE_SIZE 65307 +#define MA_DR_FLAC_OGG_CAPTURE_PATTERN_CRC32 1605413199 +typedef enum +{ + ma_dr_flac_ogg_recover_on_crc_mismatch, + ma_dr_flac_ogg_fail_on_crc_mismatch +} ma_dr_flac_ogg_crc_mismatch_recovery; +#ifndef MA_DR_FLAC_NO_CRC +static ma_uint32 ma_dr_flac__crc32_table[] = { + 0x00000000L, 0x04C11DB7L, 0x09823B6EL, 0x0D4326D9L, + 0x130476DCL, 0x17C56B6BL, 0x1A864DB2L, 0x1E475005L, + 0x2608EDB8L, 0x22C9F00FL, 0x2F8AD6D6L, 0x2B4BCB61L, + 0x350C9B64L, 0x31CD86D3L, 0x3C8EA00AL, 0x384FBDBDL, + 0x4C11DB70L, 0x48D0C6C7L, 0x4593E01EL, 0x4152FDA9L, + 0x5F15ADACL, 0x5BD4B01BL, 0x569796C2L, 0x52568B75L, + 0x6A1936C8L, 0x6ED82B7FL, 0x639B0DA6L, 0x675A1011L, + 0x791D4014L, 0x7DDC5DA3L, 0x709F7B7AL, 0x745E66CDL, + 0x9823B6E0L, 0x9CE2AB57L, 0x91A18D8EL, 0x95609039L, + 0x8B27C03CL, 0x8FE6DD8BL, 0x82A5FB52L, 0x8664E6E5L, + 0xBE2B5B58L, 0xBAEA46EFL, 0xB7A96036L, 0xB3687D81L, + 0xAD2F2D84L, 0xA9EE3033L, 0xA4AD16EAL, 0xA06C0B5DL, + 0xD4326D90L, 0xD0F37027L, 0xDDB056FEL, 0xD9714B49L, + 0xC7361B4CL, 0xC3F706FBL, 0xCEB42022L, 0xCA753D95L, + 0xF23A8028L, 0xF6FB9D9FL, 0xFBB8BB46L, 0xFF79A6F1L, + 0xE13EF6F4L, 0xE5FFEB43L, 0xE8BCCD9AL, 0xEC7DD02DL, + 0x34867077L, 0x30476DC0L, 0x3D044B19L, 0x39C556AEL, + 0x278206ABL, 0x23431B1CL, 0x2E003DC5L, 0x2AC12072L, + 0x128E9DCFL, 0x164F8078L, 0x1B0CA6A1L, 0x1FCDBB16L, + 0x018AEB13L, 0x054BF6A4L, 0x0808D07DL, 0x0CC9CDCAL, + 0x7897AB07L, 0x7C56B6B0L, 0x71159069L, 0x75D48DDEL, + 0x6B93DDDBL, 0x6F52C06CL, 0x6211E6B5L, 0x66D0FB02L, + 0x5E9F46BFL, 0x5A5E5B08L, 0x571D7DD1L, 0x53DC6066L, + 0x4D9B3063L, 0x495A2DD4L, 0x44190B0DL, 0x40D816BAL, + 0xACA5C697L, 0xA864DB20L, 0xA527FDF9L, 0xA1E6E04EL, + 0xBFA1B04BL, 0xBB60ADFCL, 0xB6238B25L, 0xB2E29692L, + 0x8AAD2B2FL, 0x8E6C3698L, 0x832F1041L, 0x87EE0DF6L, + 0x99A95DF3L, 0x9D684044L, 0x902B669DL, 0x94EA7B2AL, + 0xE0B41DE7L, 0xE4750050L, 0xE9362689L, 0xEDF73B3EL, + 0xF3B06B3BL, 0xF771768CL, 0xFA325055L, 0xFEF34DE2L, + 0xC6BCF05FL, 0xC27DEDE8L, 0xCF3ECB31L, 0xCBFFD686L, + 0xD5B88683L, 0xD1799B34L, 0xDC3ABDEDL, 0xD8FBA05AL, + 0x690CE0EEL, 0x6DCDFD59L, 0x608EDB80L, 0x644FC637L, + 0x7A089632L, 0x7EC98B85L, 0x738AAD5CL, 0x774BB0EBL, + 0x4F040D56L, 0x4BC510E1L, 0x46863638L, 0x42472B8FL, + 0x5C007B8AL, 0x58C1663DL, 0x558240E4L, 0x51435D53L, + 0x251D3B9EL, 0x21DC2629L, 0x2C9F00F0L, 0x285E1D47L, + 0x36194D42L, 0x32D850F5L, 0x3F9B762CL, 0x3B5A6B9BL, + 0x0315D626L, 0x07D4CB91L, 0x0A97ED48L, 0x0E56F0FFL, + 0x1011A0FAL, 0x14D0BD4DL, 0x19939B94L, 0x1D528623L, + 0xF12F560EL, 0xF5EE4BB9L, 0xF8AD6D60L, 0xFC6C70D7L, + 0xE22B20D2L, 0xE6EA3D65L, 0xEBA91BBCL, 0xEF68060BL, + 0xD727BBB6L, 0xD3E6A601L, 0xDEA580D8L, 0xDA649D6FL, + 0xC423CD6AL, 0xC0E2D0DDL, 0xCDA1F604L, 0xC960EBB3L, + 0xBD3E8D7EL, 0xB9FF90C9L, 0xB4BCB610L, 0xB07DABA7L, + 0xAE3AFBA2L, 0xAAFBE615L, 0xA7B8C0CCL, 0xA379DD7BL, + 0x9B3660C6L, 0x9FF77D71L, 0x92B45BA8L, 0x9675461FL, + 0x8832161AL, 0x8CF30BADL, 0x81B02D74L, 0x857130C3L, + 0x5D8A9099L, 0x594B8D2EL, 0x5408ABF7L, 0x50C9B640L, + 0x4E8EE645L, 0x4A4FFBF2L, 0x470CDD2BL, 0x43CDC09CL, + 0x7B827D21L, 0x7F436096L, 0x7200464FL, 0x76C15BF8L, + 0x68860BFDL, 0x6C47164AL, 0x61043093L, 0x65C52D24L, + 0x119B4BE9L, 0x155A565EL, 0x18197087L, 0x1CD86D30L, + 0x029F3D35L, 0x065E2082L, 0x0B1D065BL, 0x0FDC1BECL, + 0x3793A651L, 0x3352BBE6L, 0x3E119D3FL, 0x3AD08088L, + 0x2497D08DL, 0x2056CD3AL, 0x2D15EBE3L, 0x29D4F654L, + 0xC5A92679L, 0xC1683BCEL, 0xCC2B1D17L, 0xC8EA00A0L, + 0xD6AD50A5L, 0xD26C4D12L, 0xDF2F6BCBL, 0xDBEE767CL, + 0xE3A1CBC1L, 0xE760D676L, 0xEA23F0AFL, 0xEEE2ED18L, + 0xF0A5BD1DL, 0xF464A0AAL, 0xF9278673L, 0xFDE69BC4L, + 0x89B8FD09L, 0x8D79E0BEL, 0x803AC667L, 0x84FBDBD0L, + 0x9ABC8BD5L, 0x9E7D9662L, 0x933EB0BBL, 0x97FFAD0CL, + 0xAFB010B1L, 0xAB710D06L, 0xA6322BDFL, 0xA2F33668L, + 0xBCB4666DL, 0xB8757BDAL, 0xB5365D03L, 0xB1F740B4L +}; +#endif +static MA_INLINE ma_uint32 ma_dr_flac_crc32_byte(ma_uint32 crc32, ma_uint8 data) +{ +#ifndef MA_DR_FLAC_NO_CRC + return (crc32 << 8) ^ ma_dr_flac__crc32_table[(ma_uint8)((crc32 >> 24) & 0xFF) ^ data]; +#else + (void)data; + return crc32; +#endif +} +#if 0 +static MA_INLINE ma_uint32 ma_dr_flac_crc32_uint32(ma_uint32 crc32, ma_uint32 data) +{ + crc32 = ma_dr_flac_crc32_byte(crc32, (ma_uint8)((data >> 24) & 0xFF)); + crc32 = ma_dr_flac_crc32_byte(crc32, (ma_uint8)((data >> 16) & 0xFF)); + crc32 = ma_dr_flac_crc32_byte(crc32, (ma_uint8)((data >> 8) & 0xFF)); + crc32 = ma_dr_flac_crc32_byte(crc32, (ma_uint8)((data >> 0) & 0xFF)); + return crc32; +} +static MA_INLINE ma_uint32 ma_dr_flac_crc32_uint64(ma_uint32 crc32, ma_uint64 data) +{ + crc32 = ma_dr_flac_crc32_uint32(crc32, (ma_uint32)((data >> 32) & 0xFFFFFFFF)); + crc32 = ma_dr_flac_crc32_uint32(crc32, (ma_uint32)((data >> 0) & 0xFFFFFFFF)); + return crc32; +} +#endif +static MA_INLINE ma_uint32 ma_dr_flac_crc32_buffer(ma_uint32 crc32, ma_uint8* pData, ma_uint32 dataSize) +{ + ma_uint32 i; + for (i = 0; i < dataSize; ++i) { + crc32 = ma_dr_flac_crc32_byte(crc32, pData[i]); + } + return crc32; +} +static MA_INLINE ma_bool32 ma_dr_flac_ogg__is_capture_pattern(ma_uint8 pattern[4]) +{ + return pattern[0] == 'O' && pattern[1] == 'g' && pattern[2] == 'g' && pattern[3] == 'S'; +} +static MA_INLINE ma_uint32 ma_dr_flac_ogg__get_page_header_size(ma_dr_flac_ogg_page_header* pHeader) +{ + return 27 + pHeader->segmentCount; +} +static MA_INLINE ma_uint32 ma_dr_flac_ogg__get_page_body_size(ma_dr_flac_ogg_page_header* pHeader) +{ + ma_uint32 pageBodySize = 0; + int i; + for (i = 0; i < pHeader->segmentCount; ++i) { + pageBodySize += pHeader->segmentTable[i]; + } + return pageBodySize; +} +static ma_result ma_dr_flac_ogg__read_page_header_after_capture_pattern(ma_dr_flac_read_proc onRead, void* pUserData, ma_dr_flac_ogg_page_header* pHeader, ma_uint32* pBytesRead, ma_uint32* pCRC32) +{ + ma_uint8 data[23]; + ma_uint32 i; + MA_DR_FLAC_ASSERT(*pCRC32 == MA_DR_FLAC_OGG_CAPTURE_PATTERN_CRC32); + if (onRead(pUserData, data, 23) != 23) { + return MA_AT_END; + } + *pBytesRead += 23; + pHeader->capturePattern[0] = 'O'; + pHeader->capturePattern[1] = 'g'; + pHeader->capturePattern[2] = 'g'; + pHeader->capturePattern[3] = 'S'; + pHeader->structureVersion = data[0]; + pHeader->headerType = data[1]; + MA_DR_FLAC_COPY_MEMORY(&pHeader->granulePosition, &data[ 2], 8); + MA_DR_FLAC_COPY_MEMORY(&pHeader->serialNumber, &data[10], 4); + MA_DR_FLAC_COPY_MEMORY(&pHeader->sequenceNumber, &data[14], 4); + MA_DR_FLAC_COPY_MEMORY(&pHeader->checksum, &data[18], 4); + pHeader->segmentCount = data[22]; + data[18] = 0; + data[19] = 0; + data[20] = 0; + data[21] = 0; + for (i = 0; i < 23; ++i) { + *pCRC32 = ma_dr_flac_crc32_byte(*pCRC32, data[i]); + } + if (onRead(pUserData, pHeader->segmentTable, pHeader->segmentCount) != pHeader->segmentCount) { + return MA_AT_END; + } + *pBytesRead += pHeader->segmentCount; + for (i = 0; i < pHeader->segmentCount; ++i) { + *pCRC32 = ma_dr_flac_crc32_byte(*pCRC32, pHeader->segmentTable[i]); + } + return MA_SUCCESS; +} +static ma_result ma_dr_flac_ogg__read_page_header(ma_dr_flac_read_proc onRead, void* pUserData, ma_dr_flac_ogg_page_header* pHeader, ma_uint32* pBytesRead, ma_uint32* pCRC32) +{ + ma_uint8 id[4]; + *pBytesRead = 0; + if (onRead(pUserData, id, 4) != 4) { + return MA_AT_END; + } + *pBytesRead += 4; + for (;;) { + if (ma_dr_flac_ogg__is_capture_pattern(id)) { + ma_result result; + *pCRC32 = MA_DR_FLAC_OGG_CAPTURE_PATTERN_CRC32; + result = ma_dr_flac_ogg__read_page_header_after_capture_pattern(onRead, pUserData, pHeader, pBytesRead, pCRC32); + if (result == MA_SUCCESS) { + return MA_SUCCESS; + } else { + if (result == MA_CRC_MISMATCH) { + continue; + } else { + return result; + } + } + } else { + id[0] = id[1]; + id[1] = id[2]; + id[2] = id[3]; + if (onRead(pUserData, &id[3], 1) != 1) { + return MA_AT_END; + } + *pBytesRead += 1; + } + } +} +typedef struct +{ + ma_dr_flac_read_proc onRead; + ma_dr_flac_seek_proc onSeek; + void* pUserData; + ma_uint64 currentBytePos; + ma_uint64 firstBytePos; + ma_uint32 serialNumber; + ma_dr_flac_ogg_page_header bosPageHeader; + ma_dr_flac_ogg_page_header currentPageHeader; + ma_uint32 bytesRemainingInPage; + ma_uint32 pageDataSize; + ma_uint8 pageData[MA_DR_FLAC_OGG_MAX_PAGE_SIZE]; +} ma_dr_flac_oggbs; +static size_t ma_dr_flac_oggbs__read_physical(ma_dr_flac_oggbs* oggbs, void* bufferOut, size_t bytesToRead) +{ + size_t bytesActuallyRead = oggbs->onRead(oggbs->pUserData, bufferOut, bytesToRead); + oggbs->currentBytePos += bytesActuallyRead; + return bytesActuallyRead; +} +static ma_bool32 ma_dr_flac_oggbs__seek_physical(ma_dr_flac_oggbs* oggbs, ma_uint64 offset, ma_dr_flac_seek_origin origin) +{ + if (origin == ma_dr_flac_seek_origin_start) { + if (offset <= 0x7FFFFFFF) { + if (!oggbs->onSeek(oggbs->pUserData, (int)offset, ma_dr_flac_seek_origin_start)) { + return MA_FALSE; + } + oggbs->currentBytePos = offset; + return MA_TRUE; + } else { + if (!oggbs->onSeek(oggbs->pUserData, 0x7FFFFFFF, ma_dr_flac_seek_origin_start)) { + return MA_FALSE; + } + oggbs->currentBytePos = offset; + return ma_dr_flac_oggbs__seek_physical(oggbs, offset - 0x7FFFFFFF, ma_dr_flac_seek_origin_current); + } + } else { + while (offset > 0x7FFFFFFF) { + if (!oggbs->onSeek(oggbs->pUserData, 0x7FFFFFFF, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + oggbs->currentBytePos += 0x7FFFFFFF; + offset -= 0x7FFFFFFF; + } + if (!oggbs->onSeek(oggbs->pUserData, (int)offset, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + oggbs->currentBytePos += offset; + return MA_TRUE; + } +} +static ma_bool32 ma_dr_flac_oggbs__goto_next_page(ma_dr_flac_oggbs* oggbs, ma_dr_flac_ogg_crc_mismatch_recovery recoveryMethod) +{ + ma_dr_flac_ogg_page_header header; + for (;;) { + ma_uint32 crc32 = 0; + ma_uint32 bytesRead; + ma_uint32 pageBodySize; +#ifndef MA_DR_FLAC_NO_CRC + ma_uint32 actualCRC32; +#endif + if (ma_dr_flac_ogg__read_page_header(oggbs->onRead, oggbs->pUserData, &header, &bytesRead, &crc32) != MA_SUCCESS) { + return MA_FALSE; + } + oggbs->currentBytePos += bytesRead; + pageBodySize = ma_dr_flac_ogg__get_page_body_size(&header); + if (pageBodySize > MA_DR_FLAC_OGG_MAX_PAGE_SIZE) { + continue; + } + if (header.serialNumber != oggbs->serialNumber) { + if (pageBodySize > 0 && !ma_dr_flac_oggbs__seek_physical(oggbs, pageBodySize, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + continue; + } + if (ma_dr_flac_oggbs__read_physical(oggbs, oggbs->pageData, pageBodySize) != pageBodySize) { + return MA_FALSE; + } + oggbs->pageDataSize = pageBodySize; +#ifndef MA_DR_FLAC_NO_CRC + actualCRC32 = ma_dr_flac_crc32_buffer(crc32, oggbs->pageData, oggbs->pageDataSize); + if (actualCRC32 != header.checksum) { + if (recoveryMethod == ma_dr_flac_ogg_recover_on_crc_mismatch) { + continue; + } else { + ma_dr_flac_oggbs__goto_next_page(oggbs, ma_dr_flac_ogg_recover_on_crc_mismatch); + return MA_FALSE; + } + } +#else + (void)recoveryMethod; +#endif + oggbs->currentPageHeader = header; + oggbs->bytesRemainingInPage = pageBodySize; + return MA_TRUE; + } +} +#if 0 +static ma_uint8 ma_dr_flac_oggbs__get_current_segment_index(ma_dr_flac_oggbs* oggbs, ma_uint8* pBytesRemainingInSeg) +{ + ma_uint32 bytesConsumedInPage = ma_dr_flac_ogg__get_page_body_size(&oggbs->currentPageHeader) - oggbs->bytesRemainingInPage; + ma_uint8 iSeg = 0; + ma_uint32 iByte = 0; + while (iByte < bytesConsumedInPage) { + ma_uint8 segmentSize = oggbs->currentPageHeader.segmentTable[iSeg]; + if (iByte + segmentSize > bytesConsumedInPage) { + break; + } else { + iSeg += 1; + iByte += segmentSize; + } + } + *pBytesRemainingInSeg = oggbs->currentPageHeader.segmentTable[iSeg] - (ma_uint8)(bytesConsumedInPage - iByte); + return iSeg; +} +static ma_bool32 ma_dr_flac_oggbs__seek_to_next_packet(ma_dr_flac_oggbs* oggbs) +{ + for (;;) { + ma_bool32 atEndOfPage = MA_FALSE; + ma_uint8 bytesRemainingInSeg; + ma_uint8 iFirstSeg = ma_dr_flac_oggbs__get_current_segment_index(oggbs, &bytesRemainingInSeg); + ma_uint32 bytesToEndOfPacketOrPage = bytesRemainingInSeg; + for (ma_uint8 iSeg = iFirstSeg; iSeg < oggbs->currentPageHeader.segmentCount; ++iSeg) { + ma_uint8 segmentSize = oggbs->currentPageHeader.segmentTable[iSeg]; + if (segmentSize < 255) { + if (iSeg == oggbs->currentPageHeader.segmentCount-1) { + atEndOfPage = MA_TRUE; + } + break; + } + bytesToEndOfPacketOrPage += segmentSize; + } + ma_dr_flac_oggbs__seek_physical(oggbs, bytesToEndOfPacketOrPage, ma_dr_flac_seek_origin_current); + oggbs->bytesRemainingInPage -= bytesToEndOfPacketOrPage; + if (atEndOfPage) { + if (!ma_dr_flac_oggbs__goto_next_page(oggbs)) { + return MA_FALSE; + } + if ((oggbs->currentPageHeader.headerType & 0x01) == 0) { + return MA_TRUE; + } + } else { + return MA_TRUE; + } + } +} +static ma_bool32 ma_dr_flac_oggbs__seek_to_next_frame(ma_dr_flac_oggbs* oggbs) +{ + return ma_dr_flac_oggbs__seek_to_next_packet(oggbs); +} +#endif +static size_t ma_dr_flac__on_read_ogg(void* pUserData, void* bufferOut, size_t bytesToRead) +{ + ma_dr_flac_oggbs* oggbs = (ma_dr_flac_oggbs*)pUserData; + ma_uint8* pRunningBufferOut = (ma_uint8*)bufferOut; + size_t bytesRead = 0; + MA_DR_FLAC_ASSERT(oggbs != NULL); + MA_DR_FLAC_ASSERT(pRunningBufferOut != NULL); + while (bytesRead < bytesToRead) { + size_t bytesRemainingToRead = bytesToRead - bytesRead; + if (oggbs->bytesRemainingInPage >= bytesRemainingToRead) { + MA_DR_FLAC_COPY_MEMORY(pRunningBufferOut, oggbs->pageData + (oggbs->pageDataSize - oggbs->bytesRemainingInPage), bytesRemainingToRead); + bytesRead += bytesRemainingToRead; + oggbs->bytesRemainingInPage -= (ma_uint32)bytesRemainingToRead; + break; + } + if (oggbs->bytesRemainingInPage > 0) { + MA_DR_FLAC_COPY_MEMORY(pRunningBufferOut, oggbs->pageData + (oggbs->pageDataSize - oggbs->bytesRemainingInPage), oggbs->bytesRemainingInPage); + bytesRead += oggbs->bytesRemainingInPage; + pRunningBufferOut += oggbs->bytesRemainingInPage; + oggbs->bytesRemainingInPage = 0; + } + MA_DR_FLAC_ASSERT(bytesRemainingToRead > 0); + if (!ma_dr_flac_oggbs__goto_next_page(oggbs, ma_dr_flac_ogg_recover_on_crc_mismatch)) { + break; + } + } + return bytesRead; +} +static ma_bool32 ma_dr_flac__on_seek_ogg(void* pUserData, int offset, ma_dr_flac_seek_origin origin) +{ + ma_dr_flac_oggbs* oggbs = (ma_dr_flac_oggbs*)pUserData; + int bytesSeeked = 0; + MA_DR_FLAC_ASSERT(oggbs != NULL); + MA_DR_FLAC_ASSERT(offset >= 0); + if (origin == ma_dr_flac_seek_origin_start) { + if (!ma_dr_flac_oggbs__seek_physical(oggbs, (int)oggbs->firstBytePos, ma_dr_flac_seek_origin_start)) { + return MA_FALSE; + } + if (!ma_dr_flac_oggbs__goto_next_page(oggbs, ma_dr_flac_ogg_fail_on_crc_mismatch)) { + return MA_FALSE; + } + return ma_dr_flac__on_seek_ogg(pUserData, offset, ma_dr_flac_seek_origin_current); + } + MA_DR_FLAC_ASSERT(origin == ma_dr_flac_seek_origin_current); + while (bytesSeeked < offset) { + int bytesRemainingToSeek = offset - bytesSeeked; + MA_DR_FLAC_ASSERT(bytesRemainingToSeek >= 0); + if (oggbs->bytesRemainingInPage >= (size_t)bytesRemainingToSeek) { + bytesSeeked += bytesRemainingToSeek; + (void)bytesSeeked; + oggbs->bytesRemainingInPage -= bytesRemainingToSeek; + break; + } + if (oggbs->bytesRemainingInPage > 0) { + bytesSeeked += (int)oggbs->bytesRemainingInPage; + oggbs->bytesRemainingInPage = 0; + } + MA_DR_FLAC_ASSERT(bytesRemainingToSeek > 0); + if (!ma_dr_flac_oggbs__goto_next_page(oggbs, ma_dr_flac_ogg_fail_on_crc_mismatch)) { + return MA_FALSE; + } + } + return MA_TRUE; +} +static ma_bool32 ma_dr_flac_ogg__seek_to_pcm_frame(ma_dr_flac* pFlac, ma_uint64 pcmFrameIndex) +{ + ma_dr_flac_oggbs* oggbs = (ma_dr_flac_oggbs*)pFlac->_oggbs; + ma_uint64 originalBytePos; + ma_uint64 runningGranulePosition; + ma_uint64 runningFrameBytePos; + ma_uint64 runningPCMFrameCount; + MA_DR_FLAC_ASSERT(oggbs != NULL); + originalBytePos = oggbs->currentBytePos; + if (!ma_dr_flac__seek_to_byte(&pFlac->bs, pFlac->firstFLACFramePosInBytes)) { + return MA_FALSE; + } + oggbs->bytesRemainingInPage = 0; + runningGranulePosition = 0; + for (;;) { + if (!ma_dr_flac_oggbs__goto_next_page(oggbs, ma_dr_flac_ogg_recover_on_crc_mismatch)) { + ma_dr_flac_oggbs__seek_physical(oggbs, originalBytePos, ma_dr_flac_seek_origin_start); + return MA_FALSE; + } + runningFrameBytePos = oggbs->currentBytePos - ma_dr_flac_ogg__get_page_header_size(&oggbs->currentPageHeader) - oggbs->pageDataSize; + if (oggbs->currentPageHeader.granulePosition >= pcmFrameIndex) { + break; + } + if ((oggbs->currentPageHeader.headerType & 0x01) == 0) { + if (oggbs->currentPageHeader.segmentTable[0] >= 2) { + ma_uint8 firstBytesInPage[2]; + firstBytesInPage[0] = oggbs->pageData[0]; + firstBytesInPage[1] = oggbs->pageData[1]; + if ((firstBytesInPage[0] == 0xFF) && (firstBytesInPage[1] & 0xFC) == 0xF8) { + runningGranulePosition = oggbs->currentPageHeader.granulePosition; + } + continue; + } + } + } + if (!ma_dr_flac_oggbs__seek_physical(oggbs, runningFrameBytePos, ma_dr_flac_seek_origin_start)) { + return MA_FALSE; + } + if (!ma_dr_flac_oggbs__goto_next_page(oggbs, ma_dr_flac_ogg_recover_on_crc_mismatch)) { + return MA_FALSE; + } + runningPCMFrameCount = runningGranulePosition; + for (;;) { + ma_uint64 firstPCMFrameInFLACFrame = 0; + ma_uint64 lastPCMFrameInFLACFrame = 0; + ma_uint64 pcmFrameCountInThisFrame; + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + return MA_FALSE; + } + ma_dr_flac__get_pcm_frame_range_of_current_flac_frame(pFlac, &firstPCMFrameInFLACFrame, &lastPCMFrameInFLACFrame); + pcmFrameCountInThisFrame = (lastPCMFrameInFLACFrame - firstPCMFrameInFLACFrame) + 1; + if (pcmFrameIndex == pFlac->totalPCMFrameCount && (runningPCMFrameCount + pcmFrameCountInThisFrame) == pFlac->totalPCMFrameCount) { + ma_result result = ma_dr_flac__decode_flac_frame(pFlac); + if (result == MA_SUCCESS) { + pFlac->currentPCMFrame = pcmFrameIndex; + pFlac->currentFLACFrame.pcmFramesRemaining = 0; + return MA_TRUE; + } else { + return MA_FALSE; + } + } + if (pcmFrameIndex < (runningPCMFrameCount + pcmFrameCountInThisFrame)) { + ma_result result = ma_dr_flac__decode_flac_frame(pFlac); + if (result == MA_SUCCESS) { + ma_uint64 pcmFramesToDecode = (size_t)(pcmFrameIndex - runningPCMFrameCount); + if (pcmFramesToDecode == 0) { + return MA_TRUE; + } + pFlac->currentPCMFrame = runningPCMFrameCount; + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, pcmFramesToDecode) == pcmFramesToDecode; + } else { + if (result == MA_CRC_MISMATCH) { + continue; + } else { + return MA_FALSE; + } + } + } else { + ma_result result = ma_dr_flac__seek_to_next_flac_frame(pFlac); + if (result == MA_SUCCESS) { + runningPCMFrameCount += pcmFrameCountInThisFrame; + } else { + if (result == MA_CRC_MISMATCH) { + continue; + } else { + return MA_FALSE; + } + } + } + } +} +static ma_bool32 ma_dr_flac__init_private__ogg(ma_dr_flac_init_info* pInit, ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, void* pUserData, void* pUserDataMD, ma_bool32 relaxed) +{ + ma_dr_flac_ogg_page_header header; + ma_uint32 crc32 = MA_DR_FLAC_OGG_CAPTURE_PATTERN_CRC32; + ma_uint32 bytesRead = 0; + (void)relaxed; + pInit->container = ma_dr_flac_container_ogg; + pInit->oggFirstBytePos = 0; + if (ma_dr_flac_ogg__read_page_header_after_capture_pattern(onRead, pUserData, &header, &bytesRead, &crc32) != MA_SUCCESS) { + return MA_FALSE; + } + pInit->runningFilePos += bytesRead; + for (;;) { + int pageBodySize; + if ((header.headerType & 0x02) == 0) { + return MA_FALSE; + } + pageBodySize = ma_dr_flac_ogg__get_page_body_size(&header); + if (pageBodySize == 51) { + ma_uint32 bytesRemainingInPage = pageBodySize; + ma_uint8 packetType; + if (onRead(pUserData, &packetType, 1) != 1) { + return MA_FALSE; + } + bytesRemainingInPage -= 1; + if (packetType == 0x7F) { + ma_uint8 sig[4]; + if (onRead(pUserData, sig, 4) != 4) { + return MA_FALSE; + } + bytesRemainingInPage -= 4; + if (sig[0] == 'F' && sig[1] == 'L' && sig[2] == 'A' && sig[3] == 'C') { + ma_uint8 mappingVersion[2]; + if (onRead(pUserData, mappingVersion, 2) != 2) { + return MA_FALSE; + } + if (mappingVersion[0] != 1) { + return MA_FALSE; + } + if (!onSeek(pUserData, 2, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + if (onRead(pUserData, sig, 4) != 4) { + return MA_FALSE; + } + if (sig[0] == 'f' && sig[1] == 'L' && sig[2] == 'a' && sig[3] == 'C') { + ma_dr_flac_streaminfo streaminfo; + ma_uint8 isLastBlock; + ma_uint8 blockType; + ma_uint32 blockSize; + if (!ma_dr_flac__read_and_decode_block_header(onRead, pUserData, &isLastBlock, &blockType, &blockSize)) { + return MA_FALSE; + } + if (blockType != MA_DR_FLAC_METADATA_BLOCK_TYPE_STREAMINFO || blockSize != 34) { + return MA_FALSE; + } + if (ma_dr_flac__read_streaminfo(onRead, pUserData, &streaminfo)) { + pInit->hasStreamInfoBlock = MA_TRUE; + pInit->sampleRate = streaminfo.sampleRate; + pInit->channels = streaminfo.channels; + pInit->bitsPerSample = streaminfo.bitsPerSample; + pInit->totalPCMFrameCount = streaminfo.totalPCMFrameCount; + pInit->maxBlockSizeInPCMFrames = streaminfo.maxBlockSizeInPCMFrames; + pInit->hasMetadataBlocks = !isLastBlock; + if (onMeta) { + ma_dr_flac_metadata metadata; + metadata.type = MA_DR_FLAC_METADATA_BLOCK_TYPE_STREAMINFO; + metadata.pRawData = NULL; + metadata.rawDataSize = 0; + metadata.data.streaminfo = streaminfo; + onMeta(pUserDataMD, &metadata); + } + pInit->runningFilePos += pageBodySize; + pInit->oggFirstBytePos = pInit->runningFilePos - 79; + pInit->oggSerial = header.serialNumber; + pInit->oggBosHeader = header; + break; + } else { + return MA_FALSE; + } + } else { + return MA_FALSE; + } + } else { + if (!onSeek(pUserData, bytesRemainingInPage, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + } + } else { + if (!onSeek(pUserData, bytesRemainingInPage, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + } + } else { + if (!onSeek(pUserData, pageBodySize, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + } + pInit->runningFilePos += pageBodySize; + if (ma_dr_flac_ogg__read_page_header(onRead, pUserData, &header, &bytesRead, &crc32) != MA_SUCCESS) { + return MA_FALSE; + } + pInit->runningFilePos += bytesRead; + } + pInit->hasMetadataBlocks = MA_TRUE; + return MA_TRUE; +} +#endif +static ma_bool32 ma_dr_flac__init_private(ma_dr_flac_init_info* pInit, ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, ma_dr_flac_container container, void* pUserData, void* pUserDataMD) +{ + ma_bool32 relaxed; + ma_uint8 id[4]; + if (pInit == NULL || onRead == NULL || onSeek == NULL) { + return MA_FALSE; + } + MA_DR_FLAC_ZERO_MEMORY(pInit, sizeof(*pInit)); + pInit->onRead = onRead; + pInit->onSeek = onSeek; + pInit->onMeta = onMeta; + pInit->container = container; + pInit->pUserData = pUserData; + pInit->pUserDataMD = pUserDataMD; + pInit->bs.onRead = onRead; + pInit->bs.onSeek = onSeek; + pInit->bs.pUserData = pUserData; + ma_dr_flac__reset_cache(&pInit->bs); + relaxed = container != ma_dr_flac_container_unknown; + for (;;) { + if (onRead(pUserData, id, 4) != 4) { + return MA_FALSE; + } + pInit->runningFilePos += 4; + if (id[0] == 'I' && id[1] == 'D' && id[2] == '3') { + ma_uint8 header[6]; + ma_uint8 flags; + ma_uint32 headerSize; + if (onRead(pUserData, header, 6) != 6) { + return MA_FALSE; + } + pInit->runningFilePos += 6; + flags = header[1]; + MA_DR_FLAC_COPY_MEMORY(&headerSize, header+2, 4); + headerSize = ma_dr_flac__unsynchsafe_32(ma_dr_flac__be2host_32(headerSize)); + if (flags & 0x10) { + headerSize += 10; + } + if (!onSeek(pUserData, headerSize, ma_dr_flac_seek_origin_current)) { + return MA_FALSE; + } + pInit->runningFilePos += headerSize; + } else { + break; + } + } + if (id[0] == 'f' && id[1] == 'L' && id[2] == 'a' && id[3] == 'C') { + return ma_dr_flac__init_private__native(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed); + } +#ifndef MA_DR_FLAC_NO_OGG + if (id[0] == 'O' && id[1] == 'g' && id[2] == 'g' && id[3] == 'S') { + return ma_dr_flac__init_private__ogg(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed); + } +#endif + if (relaxed) { + if (container == ma_dr_flac_container_native) { + return ma_dr_flac__init_private__native(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed); + } +#ifndef MA_DR_FLAC_NO_OGG + if (container == ma_dr_flac_container_ogg) { + return ma_dr_flac__init_private__ogg(pInit, onRead, onSeek, onMeta, pUserData, pUserDataMD, relaxed); + } +#endif + } + return MA_FALSE; +} +static void ma_dr_flac__init_from_info(ma_dr_flac* pFlac, const ma_dr_flac_init_info* pInit) +{ + MA_DR_FLAC_ASSERT(pFlac != NULL); + MA_DR_FLAC_ASSERT(pInit != NULL); + MA_DR_FLAC_ZERO_MEMORY(pFlac, sizeof(*pFlac)); + pFlac->bs = pInit->bs; + pFlac->onMeta = pInit->onMeta; + pFlac->pUserDataMD = pInit->pUserDataMD; + pFlac->maxBlockSizeInPCMFrames = pInit->maxBlockSizeInPCMFrames; + pFlac->sampleRate = pInit->sampleRate; + pFlac->channels = (ma_uint8)pInit->channels; + pFlac->bitsPerSample = (ma_uint8)pInit->bitsPerSample; + pFlac->totalPCMFrameCount = pInit->totalPCMFrameCount; + pFlac->container = pInit->container; +} +static ma_dr_flac* ma_dr_flac_open_with_metadata_private(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, ma_dr_flac_container container, void* pUserData, void* pUserDataMD, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac_init_info init; + ma_uint32 allocationSize; + ma_uint32 wholeSIMDVectorCountPerChannel; + ma_uint32 decodedSamplesAllocationSize; +#ifndef MA_DR_FLAC_NO_OGG + ma_dr_flac_oggbs* pOggbs = NULL; +#endif + ma_uint64 firstFramePos; + ma_uint64 seektablePos; + ma_uint32 seekpointCount; + ma_allocation_callbacks allocationCallbacks; + ma_dr_flac* pFlac; + ma_dr_flac__init_cpu_caps(); + if (!ma_dr_flac__init_private(&init, onRead, onSeek, onMeta, container, pUserData, pUserDataMD)) { + return NULL; + } + if (pAllocationCallbacks != NULL) { + allocationCallbacks = *pAllocationCallbacks; + if (allocationCallbacks.onFree == NULL || (allocationCallbacks.onMalloc == NULL && allocationCallbacks.onRealloc == NULL)) { + return NULL; + } + } else { + allocationCallbacks.pUserData = NULL; + allocationCallbacks.onMalloc = ma_dr_flac__malloc_default; + allocationCallbacks.onRealloc = ma_dr_flac__realloc_default; + allocationCallbacks.onFree = ma_dr_flac__free_default; + } + allocationSize = sizeof(ma_dr_flac); + if ((init.maxBlockSizeInPCMFrames % (MA_DR_FLAC_MAX_SIMD_VECTOR_SIZE / sizeof(ma_int32))) == 0) { + wholeSIMDVectorCountPerChannel = (init.maxBlockSizeInPCMFrames / (MA_DR_FLAC_MAX_SIMD_VECTOR_SIZE / sizeof(ma_int32))); + } else { + wholeSIMDVectorCountPerChannel = (init.maxBlockSizeInPCMFrames / (MA_DR_FLAC_MAX_SIMD_VECTOR_SIZE / sizeof(ma_int32))) + 1; + } + decodedSamplesAllocationSize = wholeSIMDVectorCountPerChannel * MA_DR_FLAC_MAX_SIMD_VECTOR_SIZE * init.channels; + allocationSize += decodedSamplesAllocationSize; + allocationSize += MA_DR_FLAC_MAX_SIMD_VECTOR_SIZE; +#ifndef MA_DR_FLAC_NO_OGG + if (init.container == ma_dr_flac_container_ogg) { + allocationSize += sizeof(ma_dr_flac_oggbs); + pOggbs = (ma_dr_flac_oggbs*)ma_dr_flac__malloc_from_callbacks(sizeof(*pOggbs), &allocationCallbacks); + if (pOggbs == NULL) { + return NULL; + } + MA_DR_FLAC_ZERO_MEMORY(pOggbs, sizeof(*pOggbs)); + pOggbs->onRead = onRead; + pOggbs->onSeek = onSeek; + pOggbs->pUserData = pUserData; + pOggbs->currentBytePos = init.oggFirstBytePos; + pOggbs->firstBytePos = init.oggFirstBytePos; + pOggbs->serialNumber = init.oggSerial; + pOggbs->bosPageHeader = init.oggBosHeader; + pOggbs->bytesRemainingInPage = 0; + } +#endif + firstFramePos = 42; + seektablePos = 0; + seekpointCount = 0; + if (init.hasMetadataBlocks) { + ma_dr_flac_read_proc onReadOverride = onRead; + ma_dr_flac_seek_proc onSeekOverride = onSeek; + void* pUserDataOverride = pUserData; +#ifndef MA_DR_FLAC_NO_OGG + if (init.container == ma_dr_flac_container_ogg) { + onReadOverride = ma_dr_flac__on_read_ogg; + onSeekOverride = ma_dr_flac__on_seek_ogg; + pUserDataOverride = (void*)pOggbs; + } +#endif + if (!ma_dr_flac__read_and_decode_metadata(onReadOverride, onSeekOverride, onMeta, pUserDataOverride, pUserDataMD, &firstFramePos, &seektablePos, &seekpointCount, &allocationCallbacks)) { + #ifndef MA_DR_FLAC_NO_OGG + ma_dr_flac__free_from_callbacks(pOggbs, &allocationCallbacks); + #endif + return NULL; + } + allocationSize += seekpointCount * sizeof(ma_dr_flac_seekpoint); + } + pFlac = (ma_dr_flac*)ma_dr_flac__malloc_from_callbacks(allocationSize, &allocationCallbacks); + if (pFlac == NULL) { + #ifndef MA_DR_FLAC_NO_OGG + ma_dr_flac__free_from_callbacks(pOggbs, &allocationCallbacks); + #endif + return NULL; + } + ma_dr_flac__init_from_info(pFlac, &init); + pFlac->allocationCallbacks = allocationCallbacks; + pFlac->pDecodedSamples = (ma_int32*)ma_dr_flac_align((size_t)pFlac->pExtraData, MA_DR_FLAC_MAX_SIMD_VECTOR_SIZE); +#ifndef MA_DR_FLAC_NO_OGG + if (init.container == ma_dr_flac_container_ogg) { + ma_dr_flac_oggbs* pInternalOggbs = (ma_dr_flac_oggbs*)((ma_uint8*)pFlac->pDecodedSamples + decodedSamplesAllocationSize + (seekpointCount * sizeof(ma_dr_flac_seekpoint))); + MA_DR_FLAC_COPY_MEMORY(pInternalOggbs, pOggbs, sizeof(*pOggbs)); + ma_dr_flac__free_from_callbacks(pOggbs, &allocationCallbacks); + pOggbs = NULL; + pFlac->bs.onRead = ma_dr_flac__on_read_ogg; + pFlac->bs.onSeek = ma_dr_flac__on_seek_ogg; + pFlac->bs.pUserData = (void*)pInternalOggbs; + pFlac->_oggbs = (void*)pInternalOggbs; + } +#endif + pFlac->firstFLACFramePosInBytes = firstFramePos; +#ifndef MA_DR_FLAC_NO_OGG + if (init.container == ma_dr_flac_container_ogg) + { + pFlac->pSeekpoints = NULL; + pFlac->seekpointCount = 0; + } + else +#endif + { + if (seektablePos != 0) { + pFlac->seekpointCount = seekpointCount; + pFlac->pSeekpoints = (ma_dr_flac_seekpoint*)((ma_uint8*)pFlac->pDecodedSamples + decodedSamplesAllocationSize); + MA_DR_FLAC_ASSERT(pFlac->bs.onSeek != NULL); + MA_DR_FLAC_ASSERT(pFlac->bs.onRead != NULL); + if (pFlac->bs.onSeek(pFlac->bs.pUserData, (int)seektablePos, ma_dr_flac_seek_origin_start)) { + ma_uint32 iSeekpoint; + for (iSeekpoint = 0; iSeekpoint < seekpointCount; iSeekpoint += 1) { + if (pFlac->bs.onRead(pFlac->bs.pUserData, pFlac->pSeekpoints + iSeekpoint, MA_DR_FLAC_SEEKPOINT_SIZE_IN_BYTES) == MA_DR_FLAC_SEEKPOINT_SIZE_IN_BYTES) { + pFlac->pSeekpoints[iSeekpoint].firstPCMFrame = ma_dr_flac__be2host_64(pFlac->pSeekpoints[iSeekpoint].firstPCMFrame); + pFlac->pSeekpoints[iSeekpoint].flacFrameOffset = ma_dr_flac__be2host_64(pFlac->pSeekpoints[iSeekpoint].flacFrameOffset); + pFlac->pSeekpoints[iSeekpoint].pcmFrameCount = ma_dr_flac__be2host_16(pFlac->pSeekpoints[iSeekpoint].pcmFrameCount); + } else { + pFlac->pSeekpoints = NULL; + pFlac->seekpointCount = 0; + break; + } + } + if (!pFlac->bs.onSeek(pFlac->bs.pUserData, (int)pFlac->firstFLACFramePosInBytes, ma_dr_flac_seek_origin_start)) { + ma_dr_flac__free_from_callbacks(pFlac, &allocationCallbacks); + return NULL; + } + } else { + pFlac->pSeekpoints = NULL; + pFlac->seekpointCount = 0; + } + } + } + if (!init.hasStreamInfoBlock) { + pFlac->currentFLACFrame.header = init.firstFrameHeader; + for (;;) { + ma_result result = ma_dr_flac__decode_flac_frame(pFlac); + if (result == MA_SUCCESS) { + break; + } else { + if (result == MA_CRC_MISMATCH) { + if (!ma_dr_flac__read_next_flac_frame_header(&pFlac->bs, pFlac->bitsPerSample, &pFlac->currentFLACFrame.header)) { + ma_dr_flac__free_from_callbacks(pFlac, &allocationCallbacks); + return NULL; + } + continue; + } else { + ma_dr_flac__free_from_callbacks(pFlac, &allocationCallbacks); + return NULL; + } + } + } + } + return pFlac; +} +#ifndef MA_DR_FLAC_NO_STDIO +#include +#ifndef MA_DR_FLAC_NO_WCHAR +#include +#endif +static size_t ma_dr_flac__on_read_stdio(void* pUserData, void* bufferOut, size_t bytesToRead) +{ + return fread(bufferOut, 1, bytesToRead, (FILE*)pUserData); +} +static ma_bool32 ma_dr_flac__on_seek_stdio(void* pUserData, int offset, ma_dr_flac_seek_origin origin) +{ + MA_DR_FLAC_ASSERT(offset >= 0); + return fseek((FILE*)pUserData, offset, (origin == ma_dr_flac_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0; +} +MA_API ma_dr_flac* ma_dr_flac_open_file(const char* pFileName, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + FILE* pFile; + if (ma_fopen(&pFile, pFileName, "rb") != MA_SUCCESS) { + return NULL; + } + pFlac = ma_dr_flac_open(ma_dr_flac__on_read_stdio, ma_dr_flac__on_seek_stdio, (void*)pFile, pAllocationCallbacks); + if (pFlac == NULL) { + fclose(pFile); + return NULL; + } + return pFlac; +} +#ifndef MA_DR_FLAC_NO_WCHAR +MA_API ma_dr_flac* ma_dr_flac_open_file_w(const wchar_t* pFileName, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + FILE* pFile; + if (ma_wfopen(&pFile, pFileName, L"rb", pAllocationCallbacks) != MA_SUCCESS) { + return NULL; + } + pFlac = ma_dr_flac_open(ma_dr_flac__on_read_stdio, ma_dr_flac__on_seek_stdio, (void*)pFile, pAllocationCallbacks); + if (pFlac == NULL) { + fclose(pFile); + return NULL; + } + return pFlac; +} +#endif +MA_API ma_dr_flac* ma_dr_flac_open_file_with_metadata(const char* pFileName, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + FILE* pFile; + if (ma_fopen(&pFile, pFileName, "rb") != MA_SUCCESS) { + return NULL; + } + pFlac = ma_dr_flac_open_with_metadata_private(ma_dr_flac__on_read_stdio, ma_dr_flac__on_seek_stdio, onMeta, ma_dr_flac_container_unknown, (void*)pFile, pUserData, pAllocationCallbacks); + if (pFlac == NULL) { + fclose(pFile); + return pFlac; + } + return pFlac; +} +#ifndef MA_DR_FLAC_NO_WCHAR +MA_API ma_dr_flac* ma_dr_flac_open_file_with_metadata_w(const wchar_t* pFileName, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + FILE* pFile; + if (ma_wfopen(&pFile, pFileName, L"rb", pAllocationCallbacks) != MA_SUCCESS) { + return NULL; + } + pFlac = ma_dr_flac_open_with_metadata_private(ma_dr_flac__on_read_stdio, ma_dr_flac__on_seek_stdio, onMeta, ma_dr_flac_container_unknown, (void*)pFile, pUserData, pAllocationCallbacks); + if (pFlac == NULL) { + fclose(pFile); + return pFlac; + } + return pFlac; +} +#endif +#endif +static size_t ma_dr_flac__on_read_memory(void* pUserData, void* bufferOut, size_t bytesToRead) +{ + ma_dr_flac__memory_stream* memoryStream = (ma_dr_flac__memory_stream*)pUserData; + size_t bytesRemaining; + MA_DR_FLAC_ASSERT(memoryStream != NULL); + MA_DR_FLAC_ASSERT(memoryStream->dataSize >= memoryStream->currentReadPos); + bytesRemaining = memoryStream->dataSize - memoryStream->currentReadPos; + if (bytesToRead > bytesRemaining) { + bytesToRead = bytesRemaining; + } + if (bytesToRead > 0) { + MA_DR_FLAC_COPY_MEMORY(bufferOut, memoryStream->data + memoryStream->currentReadPos, bytesToRead); + memoryStream->currentReadPos += bytesToRead; + } + return bytesToRead; +} +static ma_bool32 ma_dr_flac__on_seek_memory(void* pUserData, int offset, ma_dr_flac_seek_origin origin) +{ + ma_dr_flac__memory_stream* memoryStream = (ma_dr_flac__memory_stream*)pUserData; + MA_DR_FLAC_ASSERT(memoryStream != NULL); + MA_DR_FLAC_ASSERT(offset >= 0); + if (offset > (ma_int64)memoryStream->dataSize) { + return MA_FALSE; + } + if (origin == ma_dr_flac_seek_origin_current) { + if (memoryStream->currentReadPos + offset <= memoryStream->dataSize) { + memoryStream->currentReadPos += offset; + } else { + return MA_FALSE; + } + } else { + if ((ma_uint32)offset <= memoryStream->dataSize) { + memoryStream->currentReadPos = offset; + } else { + return MA_FALSE; + } + } + return MA_TRUE; +} +MA_API ma_dr_flac* ma_dr_flac_open_memory(const void* pData, size_t dataSize, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac__memory_stream memoryStream; + ma_dr_flac* pFlac; + memoryStream.data = (const ma_uint8*)pData; + memoryStream.dataSize = dataSize; + memoryStream.currentReadPos = 0; + pFlac = ma_dr_flac_open(ma_dr_flac__on_read_memory, ma_dr_flac__on_seek_memory, &memoryStream, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + pFlac->memoryStream = memoryStream; +#ifndef MA_DR_FLAC_NO_OGG + if (pFlac->container == ma_dr_flac_container_ogg) + { + ma_dr_flac_oggbs* oggbs = (ma_dr_flac_oggbs*)pFlac->_oggbs; + oggbs->pUserData = &pFlac->memoryStream; + } + else +#endif + { + pFlac->bs.pUserData = &pFlac->memoryStream; + } + return pFlac; +} +MA_API ma_dr_flac* ma_dr_flac_open_memory_with_metadata(const void* pData, size_t dataSize, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac__memory_stream memoryStream; + ma_dr_flac* pFlac; + memoryStream.data = (const ma_uint8*)pData; + memoryStream.dataSize = dataSize; + memoryStream.currentReadPos = 0; + pFlac = ma_dr_flac_open_with_metadata_private(ma_dr_flac__on_read_memory, ma_dr_flac__on_seek_memory, onMeta, ma_dr_flac_container_unknown, &memoryStream, pUserData, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + pFlac->memoryStream = memoryStream; +#ifndef MA_DR_FLAC_NO_OGG + if (pFlac->container == ma_dr_flac_container_ogg) + { + ma_dr_flac_oggbs* oggbs = (ma_dr_flac_oggbs*)pFlac->_oggbs; + oggbs->pUserData = &pFlac->memoryStream; + } + else +#endif + { + pFlac->bs.pUserData = &pFlac->memoryStream; + } + return pFlac; +} +MA_API ma_dr_flac* ma_dr_flac_open(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_flac_open_with_metadata_private(onRead, onSeek, NULL, ma_dr_flac_container_unknown, pUserData, pUserData, pAllocationCallbacks); +} +MA_API ma_dr_flac* ma_dr_flac_open_relaxed(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_container container, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_flac_open_with_metadata_private(onRead, onSeek, NULL, container, pUserData, pUserData, pAllocationCallbacks); +} +MA_API ma_dr_flac* ma_dr_flac_open_with_metadata(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_flac_open_with_metadata_private(onRead, onSeek, onMeta, ma_dr_flac_container_unknown, pUserData, pUserData, pAllocationCallbacks); +} +MA_API ma_dr_flac* ma_dr_flac_open_with_metadata_relaxed(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, ma_dr_flac_meta_proc onMeta, ma_dr_flac_container container, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + return ma_dr_flac_open_with_metadata_private(onRead, onSeek, onMeta, container, pUserData, pUserData, pAllocationCallbacks); +} +MA_API void ma_dr_flac_close(ma_dr_flac* pFlac) +{ + if (pFlac == NULL) { + return; + } +#ifndef MA_DR_FLAC_NO_STDIO + if (pFlac->bs.onRead == ma_dr_flac__on_read_stdio) { + fclose((FILE*)pFlac->bs.pUserData); + } +#ifndef MA_DR_FLAC_NO_OGG + if (pFlac->container == ma_dr_flac_container_ogg) { + ma_dr_flac_oggbs* oggbs = (ma_dr_flac_oggbs*)pFlac->_oggbs; + MA_DR_FLAC_ASSERT(pFlac->bs.onRead == ma_dr_flac__on_read_ogg); + if (oggbs->onRead == ma_dr_flac__on_read_stdio) { + fclose((FILE*)oggbs->pUserData); + } + } +#endif +#endif + ma_dr_flac__free_from_callbacks(pFlac, &pFlac->allocationCallbacks); +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_left_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + for (i = 0; i < frameCount; ++i) { + ma_uint32 left = (ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + ma_uint32 side = (ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_left_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 left0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 left1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 left2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 left3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << shift1; + ma_uint32 right0 = left0 - side0; + ma_uint32 right1 = left1 - side1; + ma_uint32 right2 = left2 - side2; + ma_uint32 right3 = left3 - side3; + pOutputSamples[i*8+0] = (ma_int32)left0; + pOutputSamples[i*8+1] = (ma_int32)right0; + pOutputSamples[i*8+2] = (ma_int32)left1; + pOutputSamples[i*8+3] = (ma_int32)right1; + pOutputSamples[i*8+4] = (ma_int32)left2; + pOutputSamples[i*8+5] = (ma_int32)right2; + pOutputSamples[i*8+6] = (ma_int32)left3; + pOutputSamples[i*8+7] = (ma_int32)right3; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_left_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + for (i = 0; i < frameCount4; ++i) { + __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + __m128i right = _mm_sub_epi32(left, side); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right)); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_left_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + int32x4_t shift0_4; + int32x4_t shift1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + shift0_4 = vdupq_n_s32(shift0); + shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t left; + uint32x4_t side; + uint32x4_t right; + left = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4); + right = vsubq_u32(left, side); + ma_dr_flac__vst2q_u32((ma_uint32*)pOutputSamples + i*8, vzipq_u32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_left_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_left_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_left_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s32__decode_left_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s32__decode_left_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_right_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + for (i = 0; i < frameCount; ++i) { + ma_uint32 side = (ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + ma_uint32 right = (ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_right_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 side0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 side1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 side2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 side3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 right0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 right1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 right2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 right3 = pInputSamples1U32[i*4+3] << shift1; + ma_uint32 left0 = right0 + side0; + ma_uint32 left1 = right1 + side1; + ma_uint32 left2 = right2 + side2; + ma_uint32 left3 = right3 + side3; + pOutputSamples[i*8+0] = (ma_int32)left0; + pOutputSamples[i*8+1] = (ma_int32)right0; + pOutputSamples[i*8+2] = (ma_int32)left1; + pOutputSamples[i*8+3] = (ma_int32)right1; + pOutputSamples[i*8+4] = (ma_int32)left2; + pOutputSamples[i*8+5] = (ma_int32)right2; + pOutputSamples[i*8+6] = (ma_int32)left3; + pOutputSamples[i*8+7] = (ma_int32)right3; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_right_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + for (i = 0; i < frameCount4; ++i) { + __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + __m128i left = _mm_add_epi32(right, side); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right)); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_right_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + int32x4_t shift0_4; + int32x4_t shift1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + shift0_4 = vdupq_n_s32(shift0); + shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t side; + uint32x4_t right; + uint32x4_t left; + side = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4); + right = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4); + left = vaddq_u32(right, side); + ma_dr_flac__vst2q_u32((ma_uint32*)pOutputSamples + i*8, vzipq_u32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (ma_int32)left; + pOutputSamples[i*2+1] = (ma_int32)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_right_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_right_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_right_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s32__decode_right_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s32__decode_right_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_mid_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + for (ma_uint64 i = 0; i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)((ma_uint32)((ma_int32)(mid + side) >> 1) << unusedBitsPerSample); + pOutputSamples[i*2+1] = (ma_int32)((ma_uint32)((ma_int32)(mid - side) >> 1) << unusedBitsPerSample); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_mid_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_int32 shift = unusedBitsPerSample; + if (shift > 0) { + shift -= 1; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 temp0L; + ma_uint32 temp1L; + ma_uint32 temp2L; + ma_uint32 temp3L; + ma_uint32 temp0R; + ma_uint32 temp1R; + ma_uint32 temp2R; + ma_uint32 temp3R; + ma_uint32 mid0 = pInputSamples0U32[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid1 = pInputSamples0U32[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid2 = pInputSamples0U32[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid3 = pInputSamples0U32[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid0 = (mid0 << 1) | (side0 & 0x01); + mid1 = (mid1 << 1) | (side1 & 0x01); + mid2 = (mid2 << 1) | (side2 & 0x01); + mid3 = (mid3 << 1) | (side3 & 0x01); + temp0L = (mid0 + side0) << shift; + temp1L = (mid1 + side1) << shift; + temp2L = (mid2 + side2) << shift; + temp3L = (mid3 + side3) << shift; + temp0R = (mid0 - side0) << shift; + temp1R = (mid1 - side1) << shift; + temp2R = (mid2 - side2) << shift; + temp3R = (mid3 - side3) << shift; + pOutputSamples[i*8+0] = (ma_int32)temp0L; + pOutputSamples[i*8+1] = (ma_int32)temp0R; + pOutputSamples[i*8+2] = (ma_int32)temp1L; + pOutputSamples[i*8+3] = (ma_int32)temp1R; + pOutputSamples[i*8+4] = (ma_int32)temp2L; + pOutputSamples[i*8+5] = (ma_int32)temp2R; + pOutputSamples[i*8+6] = (ma_int32)temp3L; + pOutputSamples[i*8+7] = (ma_int32)temp3R; + } + } else { + for (i = 0; i < frameCount4; ++i) { + ma_uint32 temp0L; + ma_uint32 temp1L; + ma_uint32 temp2L; + ma_uint32 temp3L; + ma_uint32 temp0R; + ma_uint32 temp1R; + ma_uint32 temp2R; + ma_uint32 temp3R; + ma_uint32 mid0 = pInputSamples0U32[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid1 = pInputSamples0U32[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid2 = pInputSamples0U32[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid3 = pInputSamples0U32[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid0 = (mid0 << 1) | (side0 & 0x01); + mid1 = (mid1 << 1) | (side1 & 0x01); + mid2 = (mid2 << 1) | (side2 & 0x01); + mid3 = (mid3 << 1) | (side3 & 0x01); + temp0L = (ma_uint32)((ma_int32)(mid0 + side0) >> 1); + temp1L = (ma_uint32)((ma_int32)(mid1 + side1) >> 1); + temp2L = (ma_uint32)((ma_int32)(mid2 + side2) >> 1); + temp3L = (ma_uint32)((ma_int32)(mid3 + side3) >> 1); + temp0R = (ma_uint32)((ma_int32)(mid0 - side0) >> 1); + temp1R = (ma_uint32)((ma_int32)(mid1 - side1) >> 1); + temp2R = (ma_uint32)((ma_int32)(mid2 - side2) >> 1); + temp3R = (ma_uint32)((ma_int32)(mid3 - side3) >> 1); + pOutputSamples[i*8+0] = (ma_int32)temp0L; + pOutputSamples[i*8+1] = (ma_int32)temp0R; + pOutputSamples[i*8+2] = (ma_int32)temp1L; + pOutputSamples[i*8+3] = (ma_int32)temp1R; + pOutputSamples[i*8+4] = (ma_int32)temp2L; + pOutputSamples[i*8+5] = (ma_int32)temp2R; + pOutputSamples[i*8+6] = (ma_int32)temp3L; + pOutputSamples[i*8+7] = (ma_int32)temp3R; + } + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)((ma_uint32)((ma_int32)(mid + side) >> 1) << unusedBitsPerSample); + pOutputSamples[i*2+1] = (ma_int32)((ma_uint32)((ma_int32)(mid - side) >> 1) << unusedBitsPerSample); + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_mid_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_int32 shift = unusedBitsPerSample; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + if (shift == 0) { + for (i = 0; i < frameCount4; ++i) { + __m128i mid; + __m128i side; + __m128i left; + __m128i right; + mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01))); + left = _mm_srai_epi32(_mm_add_epi32(mid, side), 1); + right = _mm_srai_epi32(_mm_sub_epi32(mid, side), 1); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right)); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)(mid + side) >> 1; + pOutputSamples[i*2+1] = (ma_int32)(mid - side) >> 1; + } + } else { + shift -= 1; + for (i = 0; i < frameCount4; ++i) { + __m128i mid; + __m128i side; + __m128i left; + __m128i right; + mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01))); + left = _mm_slli_epi32(_mm_add_epi32(mid, side), shift); + right = _mm_slli_epi32(_mm_sub_epi32(mid, side), shift); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right)); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)((mid + side) << shift); + pOutputSamples[i*2+1] = (ma_int32)((mid - side) << shift); + } + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_mid_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_int32 shift = unusedBitsPerSample; + int32x4_t wbpsShift0_4; + int32x4_t wbpsShift1_4; + uint32x4_t one4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + wbpsShift0_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + wbpsShift1_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + one4 = vdupq_n_u32(1); + if (shift == 0) { + for (i = 0; i < frameCount4; ++i) { + uint32x4_t mid; + uint32x4_t side; + int32x4_t left; + int32x4_t right; + mid = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), wbpsShift0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), wbpsShift1_4); + mid = vorrq_u32(vshlq_n_u32(mid, 1), vandq_u32(side, one4)); + left = vshrq_n_s32(vreinterpretq_s32_u32(vaddq_u32(mid, side)), 1); + right = vshrq_n_s32(vreinterpretq_s32_u32(vsubq_u32(mid, side)), 1); + ma_dr_flac__vst2q_s32(pOutputSamples + i*8, vzipq_s32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)(mid + side) >> 1; + pOutputSamples[i*2+1] = (ma_int32)(mid - side) >> 1; + } + } else { + int32x4_t shift4; + shift -= 1; + shift4 = vdupq_n_s32(shift); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t mid; + uint32x4_t side; + int32x4_t left; + int32x4_t right; + mid = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), wbpsShift0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), wbpsShift1_4); + mid = vorrq_u32(vshlq_n_u32(mid, 1), vandq_u32(side, one4)); + left = vreinterpretq_s32_u32(vshlq_u32(vaddq_u32(mid, side), shift4)); + right = vreinterpretq_s32_u32(vshlq_u32(vsubq_u32(mid, side), shift4)); + ma_dr_flac__vst2q_s32(pOutputSamples + i*8, vzipq_s32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)((mid + side) << shift); + pOutputSamples[i*2+1] = (ma_int32)((mid - side) << shift); + } + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_mid_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_mid_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_mid_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s32__decode_mid_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s32__decode_mid_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + for (ma_uint64 i = 0; i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int32)((ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample)); + pOutputSamples[i*2+1] = (ma_int32)((ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample)); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 tempL0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 tempL1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 tempL2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 tempL3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 tempR0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 tempR1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 tempR2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 tempR3 = pInputSamples1U32[i*4+3] << shift1; + pOutputSamples[i*8+0] = (ma_int32)tempL0; + pOutputSamples[i*8+1] = (ma_int32)tempR0; + pOutputSamples[i*8+2] = (ma_int32)tempL1; + pOutputSamples[i*8+3] = (ma_int32)tempR1; + pOutputSamples[i*8+4] = (ma_int32)tempL2; + pOutputSamples[i*8+5] = (ma_int32)tempR2; + pOutputSamples[i*8+6] = (ma_int32)tempL3; + pOutputSamples[i*8+7] = (ma_int32)tempR3; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int32)(pInputSamples0U32[i] << shift0); + pOutputSamples[i*2+1] = (ma_int32)(pInputSamples1U32[i] << shift1); + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 0), _mm_unpacklo_epi32(left, right)); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8 + 4), _mm_unpackhi_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int32)(pInputSamples0U32[i] << shift0); + pOutputSamples[i*2+1] = (ma_int32)(pInputSamples1U32[i] << shift1); + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + int32x4_t shift4_0 = vdupq_n_s32(shift0); + int32x4_t shift4_1 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + int32x4_t left; + int32x4_t right; + left = vreinterpretq_s32_u32(vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift4_0)); + right = vreinterpretq_s32_u32(vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift4_1)); + ma_dr_flac__vst2q_s32(pOutputSamples + i*8, vzipq_s32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int32)(pInputSamples0U32[i] << shift0); + pOutputSamples[i*2+1] = (ma_int32)(pInputSamples1U32[i] << shift1); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int32* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +MA_API ma_uint64 ma_dr_flac_read_pcm_frames_s32(ma_dr_flac* pFlac, ma_uint64 framesToRead, ma_int32* pBufferOut) +{ + ma_uint64 framesRead; + ma_uint32 unusedBitsPerSample; + if (pFlac == NULL || framesToRead == 0) { + return 0; + } + if (pBufferOut == NULL) { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, framesToRead); + } + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 32); + unusedBitsPerSample = 32 - pFlac->bitsPerSample; + framesRead = 0; + while (framesToRead > 0) { + if (pFlac->currentFLACFrame.pcmFramesRemaining == 0) { + if (!ma_dr_flac__read_and_decode_next_flac_frame(pFlac)) { + break; + } + } else { + unsigned int channelCount = ma_dr_flac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment); + ma_uint64 iFirstPCMFrame = pFlac->currentFLACFrame.header.blockSizeInPCMFrames - pFlac->currentFLACFrame.pcmFramesRemaining; + ma_uint64 frameCountThisIteration = framesToRead; + if (frameCountThisIteration > pFlac->currentFLACFrame.pcmFramesRemaining) { + frameCountThisIteration = pFlac->currentFLACFrame.pcmFramesRemaining; + } + if (channelCount == 2) { + const ma_int32* pDecodedSamples0 = pFlac->currentFLACFrame.subframes[0].pSamplesS32 + iFirstPCMFrame; + const ma_int32* pDecodedSamples1 = pFlac->currentFLACFrame.subframes[1].pSamplesS32 + iFirstPCMFrame; + switch (pFlac->currentFLACFrame.header.channelAssignment) + { + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE: + { + ma_dr_flac_read_pcm_frames_s32__decode_left_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE: + { + ma_dr_flac_read_pcm_frames_s32__decode_right_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_MID_SIDE: + { + ma_dr_flac_read_pcm_frames_s32__decode_mid_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_INDEPENDENT: + default: + { + ma_dr_flac_read_pcm_frames_s32__decode_independent_stereo(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + } + } else { + ma_uint64 i; + for (i = 0; i < frameCountThisIteration; ++i) { + unsigned int j; + for (j = 0; j < channelCount; ++j) { + pBufferOut[(i*channelCount)+j] = (ma_int32)((ma_uint32)(pFlac->currentFLACFrame.subframes[j].pSamplesS32[iFirstPCMFrame + i]) << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[j].wastedBitsPerSample)); + } + } + } + framesRead += frameCountThisIteration; + pBufferOut += frameCountThisIteration * channelCount; + framesToRead -= frameCountThisIteration; + pFlac->currentPCMFrame += frameCountThisIteration; + pFlac->currentFLACFrame.pcmFramesRemaining -= (ma_uint32)frameCountThisIteration; + } + } + return framesRead; +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_left_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + for (i = 0; i < frameCount; ++i) { + ma_uint32 left = (ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + ma_uint32 side = (ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + ma_uint32 right = left - side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_left_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 left0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 left1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 left2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 left3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << shift1; + ma_uint32 right0 = left0 - side0; + ma_uint32 right1 = left1 - side1; + ma_uint32 right2 = left2 - side2; + ma_uint32 right3 = left3 - side3; + left0 >>= 16; + left1 >>= 16; + left2 >>= 16; + left3 >>= 16; + right0 >>= 16; + right1 >>= 16; + right2 >>= 16; + right3 >>= 16; + pOutputSamples[i*8+0] = (ma_int16)left0; + pOutputSamples[i*8+1] = (ma_int16)right0; + pOutputSamples[i*8+2] = (ma_int16)left1; + pOutputSamples[i*8+3] = (ma_int16)right1; + pOutputSamples[i*8+4] = (ma_int16)left2; + pOutputSamples[i*8+5] = (ma_int16)right2; + pOutputSamples[i*8+6] = (ma_int16)left3; + pOutputSamples[i*8+7] = (ma_int16)right3; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_left_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + for (i = 0; i < frameCount4; ++i) { + __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + __m128i right = _mm_sub_epi32(left, side); + left = _mm_srai_epi32(left, 16); + right = _mm_srai_epi32(right, 16); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), ma_dr_flac__mm_packs_interleaved_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_left_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + int32x4_t shift0_4; + int32x4_t shift1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + shift0_4 = vdupq_n_s32(shift0); + shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t left; + uint32x4_t side; + uint32x4_t right; + left = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4); + right = vsubq_u32(left, side); + left = vshrq_n_u32(left, 16); + right = vshrq_n_u32(right, 16); + ma_dr_flac__vst2q_u16((ma_uint16*)pOutputSamples + i*8, vzip_u16(vmovn_u32(left), vmovn_u32(right))); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_left_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_left_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_left_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s16__decode_left_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s16__decode_left_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_right_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + for (i = 0; i < frameCount; ++i) { + ma_uint32 side = (ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + ma_uint32 right = (ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + ma_uint32 left = right + side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_right_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 side0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 side1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 side2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 side3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 right0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 right1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 right2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 right3 = pInputSamples1U32[i*4+3] << shift1; + ma_uint32 left0 = right0 + side0; + ma_uint32 left1 = right1 + side1; + ma_uint32 left2 = right2 + side2; + ma_uint32 left3 = right3 + side3; + left0 >>= 16; + left1 >>= 16; + left2 >>= 16; + left3 >>= 16; + right0 >>= 16; + right1 >>= 16; + right2 >>= 16; + right3 >>= 16; + pOutputSamples[i*8+0] = (ma_int16)left0; + pOutputSamples[i*8+1] = (ma_int16)right0; + pOutputSamples[i*8+2] = (ma_int16)left1; + pOutputSamples[i*8+3] = (ma_int16)right1; + pOutputSamples[i*8+4] = (ma_int16)left2; + pOutputSamples[i*8+5] = (ma_int16)right2; + pOutputSamples[i*8+6] = (ma_int16)left3; + pOutputSamples[i*8+7] = (ma_int16)right3; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_right_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + for (i = 0; i < frameCount4; ++i) { + __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + __m128i left = _mm_add_epi32(right, side); + left = _mm_srai_epi32(left, 16); + right = _mm_srai_epi32(right, 16); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), ma_dr_flac__mm_packs_interleaved_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_right_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + int32x4_t shift0_4; + int32x4_t shift1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + shift0_4 = vdupq_n_s32(shift0); + shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t side; + uint32x4_t right; + uint32x4_t left; + side = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4); + right = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4); + left = vaddq_u32(right, side); + left = vshrq_n_u32(left, 16); + right = vshrq_n_u32(right, 16); + ma_dr_flac__vst2q_u16((ma_uint16*)pOutputSamples + i*8, vzip_u16(vmovn_u32(left), vmovn_u32(right))); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + left >>= 16; + right >>= 16; + pOutputSamples[i*2+0] = (ma_int16)left; + pOutputSamples[i*2+1] = (ma_int16)right; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_right_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_right_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_right_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s16__decode_right_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s16__decode_right_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_mid_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + for (ma_uint64 i = 0; i < frameCount; ++i) { + ma_uint32 mid = (ma_uint32)pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = (ma_uint32)pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int16)(((ma_uint32)((ma_int32)(mid + side) >> 1) << unusedBitsPerSample) >> 16); + pOutputSamples[i*2+1] = (ma_int16)(((ma_uint32)((ma_int32)(mid - side) >> 1) << unusedBitsPerSample) >> 16); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_mid_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift = unusedBitsPerSample; + if (shift > 0) { + shift -= 1; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 temp0L; + ma_uint32 temp1L; + ma_uint32 temp2L; + ma_uint32 temp3L; + ma_uint32 temp0R; + ma_uint32 temp1R; + ma_uint32 temp2R; + ma_uint32 temp3R; + ma_uint32 mid0 = pInputSamples0U32[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid1 = pInputSamples0U32[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid2 = pInputSamples0U32[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid3 = pInputSamples0U32[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid0 = (mid0 << 1) | (side0 & 0x01); + mid1 = (mid1 << 1) | (side1 & 0x01); + mid2 = (mid2 << 1) | (side2 & 0x01); + mid3 = (mid3 << 1) | (side3 & 0x01); + temp0L = (mid0 + side0) << shift; + temp1L = (mid1 + side1) << shift; + temp2L = (mid2 + side2) << shift; + temp3L = (mid3 + side3) << shift; + temp0R = (mid0 - side0) << shift; + temp1R = (mid1 - side1) << shift; + temp2R = (mid2 - side2) << shift; + temp3R = (mid3 - side3) << shift; + temp0L >>= 16; + temp1L >>= 16; + temp2L >>= 16; + temp3L >>= 16; + temp0R >>= 16; + temp1R >>= 16; + temp2R >>= 16; + temp3R >>= 16; + pOutputSamples[i*8+0] = (ma_int16)temp0L; + pOutputSamples[i*8+1] = (ma_int16)temp0R; + pOutputSamples[i*8+2] = (ma_int16)temp1L; + pOutputSamples[i*8+3] = (ma_int16)temp1R; + pOutputSamples[i*8+4] = (ma_int16)temp2L; + pOutputSamples[i*8+5] = (ma_int16)temp2R; + pOutputSamples[i*8+6] = (ma_int16)temp3L; + pOutputSamples[i*8+7] = (ma_int16)temp3R; + } + } else { + for (i = 0; i < frameCount4; ++i) { + ma_uint32 temp0L; + ma_uint32 temp1L; + ma_uint32 temp2L; + ma_uint32 temp3L; + ma_uint32 temp0R; + ma_uint32 temp1R; + ma_uint32 temp2R; + ma_uint32 temp3R; + ma_uint32 mid0 = pInputSamples0U32[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid1 = pInputSamples0U32[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid2 = pInputSamples0U32[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid3 = pInputSamples0U32[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid0 = (mid0 << 1) | (side0 & 0x01); + mid1 = (mid1 << 1) | (side1 & 0x01); + mid2 = (mid2 << 1) | (side2 & 0x01); + mid3 = (mid3 << 1) | (side3 & 0x01); + temp0L = ((ma_int32)(mid0 + side0) >> 1); + temp1L = ((ma_int32)(mid1 + side1) >> 1); + temp2L = ((ma_int32)(mid2 + side2) >> 1); + temp3L = ((ma_int32)(mid3 + side3) >> 1); + temp0R = ((ma_int32)(mid0 - side0) >> 1); + temp1R = ((ma_int32)(mid1 - side1) >> 1); + temp2R = ((ma_int32)(mid2 - side2) >> 1); + temp3R = ((ma_int32)(mid3 - side3) >> 1); + temp0L >>= 16; + temp1L >>= 16; + temp2L >>= 16; + temp3L >>= 16; + temp0R >>= 16; + temp1R >>= 16; + temp2R >>= 16; + temp3R >>= 16; + pOutputSamples[i*8+0] = (ma_int16)temp0L; + pOutputSamples[i*8+1] = (ma_int16)temp0R; + pOutputSamples[i*8+2] = (ma_int16)temp1L; + pOutputSamples[i*8+3] = (ma_int16)temp1R; + pOutputSamples[i*8+4] = (ma_int16)temp2L; + pOutputSamples[i*8+5] = (ma_int16)temp2R; + pOutputSamples[i*8+6] = (ma_int16)temp3L; + pOutputSamples[i*8+7] = (ma_int16)temp3R; + } + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int16)(((ma_uint32)((ma_int32)(mid + side) >> 1) << unusedBitsPerSample) >> 16); + pOutputSamples[i*2+1] = (ma_int16)(((ma_uint32)((ma_int32)(mid - side) >> 1) << unusedBitsPerSample) >> 16); + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_mid_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift = unusedBitsPerSample; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + if (shift == 0) { + for (i = 0; i < frameCount4; ++i) { + __m128i mid; + __m128i side; + __m128i left; + __m128i right; + mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01))); + left = _mm_srai_epi32(_mm_add_epi32(mid, side), 1); + right = _mm_srai_epi32(_mm_sub_epi32(mid, side), 1); + left = _mm_srai_epi32(left, 16); + right = _mm_srai_epi32(right, 16); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), ma_dr_flac__mm_packs_interleaved_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int16)(((ma_int32)(mid + side) >> 1) >> 16); + pOutputSamples[i*2+1] = (ma_int16)(((ma_int32)(mid - side) >> 1) >> 16); + } + } else { + shift -= 1; + for (i = 0; i < frameCount4; ++i) { + __m128i mid; + __m128i side; + __m128i left; + __m128i right; + mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01))); + left = _mm_slli_epi32(_mm_add_epi32(mid, side), shift); + right = _mm_slli_epi32(_mm_sub_epi32(mid, side), shift); + left = _mm_srai_epi32(left, 16); + right = _mm_srai_epi32(right, 16); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), ma_dr_flac__mm_packs_interleaved_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int16)(((mid + side) << shift) >> 16); + pOutputSamples[i*2+1] = (ma_int16)(((mid - side) << shift) >> 16); + } + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_mid_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift = unusedBitsPerSample; + int32x4_t wbpsShift0_4; + int32x4_t wbpsShift1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + wbpsShift0_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + wbpsShift1_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + if (shift == 0) { + for (i = 0; i < frameCount4; ++i) { + uint32x4_t mid; + uint32x4_t side; + int32x4_t left; + int32x4_t right; + mid = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), wbpsShift0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), wbpsShift1_4); + mid = vorrq_u32(vshlq_n_u32(mid, 1), vandq_u32(side, vdupq_n_u32(1))); + left = vshrq_n_s32(vreinterpretq_s32_u32(vaddq_u32(mid, side)), 1); + right = vshrq_n_s32(vreinterpretq_s32_u32(vsubq_u32(mid, side)), 1); + left = vshrq_n_s32(left, 16); + right = vshrq_n_s32(right, 16); + ma_dr_flac__vst2q_s16(pOutputSamples + i*8, vzip_s16(vmovn_s32(left), vmovn_s32(right))); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int16)(((ma_int32)(mid + side) >> 1) >> 16); + pOutputSamples[i*2+1] = (ma_int16)(((ma_int32)(mid - side) >> 1) >> 16); + } + } else { + int32x4_t shift4; + shift -= 1; + shift4 = vdupq_n_s32(shift); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t mid; + uint32x4_t side; + int32x4_t left; + int32x4_t right; + mid = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), wbpsShift0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), wbpsShift1_4); + mid = vorrq_u32(vshlq_n_u32(mid, 1), vandq_u32(side, vdupq_n_u32(1))); + left = vreinterpretq_s32_u32(vshlq_u32(vaddq_u32(mid, side), shift4)); + right = vreinterpretq_s32_u32(vshlq_u32(vsubq_u32(mid, side), shift4)); + left = vshrq_n_s32(left, 16); + right = vshrq_n_s32(right, 16); + ma_dr_flac__vst2q_s16(pOutputSamples + i*8, vzip_s16(vmovn_s32(left), vmovn_s32(right))); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int16)(((mid + side) << shift) >> 16); + pOutputSamples[i*2+1] = (ma_int16)(((mid - side) << shift) >> 16); + } + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_mid_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_mid_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_mid_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s16__decode_mid_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s16__decode_mid_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + for (ma_uint64 i = 0; i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int16)((ma_int32)((ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample)) >> 16); + pOutputSamples[i*2+1] = (ma_int16)((ma_int32)((ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample)) >> 16); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 tempL0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 tempL1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 tempL2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 tempL3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 tempR0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 tempR1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 tempR2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 tempR3 = pInputSamples1U32[i*4+3] << shift1; + tempL0 >>= 16; + tempL1 >>= 16; + tempL2 >>= 16; + tempL3 >>= 16; + tempR0 >>= 16; + tempR1 >>= 16; + tempR2 >>= 16; + tempR3 >>= 16; + pOutputSamples[i*8+0] = (ma_int16)tempL0; + pOutputSamples[i*8+1] = (ma_int16)tempR0; + pOutputSamples[i*8+2] = (ma_int16)tempL1; + pOutputSamples[i*8+3] = (ma_int16)tempR1; + pOutputSamples[i*8+4] = (ma_int16)tempL2; + pOutputSamples[i*8+5] = (ma_int16)tempR2; + pOutputSamples[i*8+6] = (ma_int16)tempL3; + pOutputSamples[i*8+7] = (ma_int16)tempR3; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int16)((pInputSamples0U32[i] << shift0) >> 16); + pOutputSamples[i*2+1] = (ma_int16)((pInputSamples1U32[i] << shift1) >> 16); + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + for (i = 0; i < frameCount4; ++i) { + __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + left = _mm_srai_epi32(left, 16); + right = _mm_srai_epi32(right, 16); + _mm_storeu_si128((__m128i*)(pOutputSamples + i*8), ma_dr_flac__mm_packs_interleaved_epi32(left, right)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int16)((pInputSamples0U32[i] << shift0) >> 16); + pOutputSamples[i*2+1] = (ma_int16)((pInputSamples1U32[i] << shift1) >> 16); + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + int32x4_t shift0_4 = vdupq_n_s32(shift0); + int32x4_t shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + int32x4_t left; + int32x4_t right; + left = vreinterpretq_s32_u32(vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4)); + right = vreinterpretq_s32_u32(vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4)); + left = vshrq_n_s32(left, 16); + right = vshrq_n_s32(right, 16); + ma_dr_flac__vst2q_s16(pOutputSamples + i*8, vzip_s16(vmovn_s32(left), vmovn_s32(right))); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int16)((pInputSamples0U32[i] << shift0) >> 16); + pOutputSamples[i*2+1] = (ma_int16)((pInputSamples1U32[i] << shift1) >> 16); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, ma_int16* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +MA_API ma_uint64 ma_dr_flac_read_pcm_frames_s16(ma_dr_flac* pFlac, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + ma_uint64 framesRead; + ma_uint32 unusedBitsPerSample; + if (pFlac == NULL || framesToRead == 0) { + return 0; + } + if (pBufferOut == NULL) { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, framesToRead); + } + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 32); + unusedBitsPerSample = 32 - pFlac->bitsPerSample; + framesRead = 0; + while (framesToRead > 0) { + if (pFlac->currentFLACFrame.pcmFramesRemaining == 0) { + if (!ma_dr_flac__read_and_decode_next_flac_frame(pFlac)) { + break; + } + } else { + unsigned int channelCount = ma_dr_flac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment); + ma_uint64 iFirstPCMFrame = pFlac->currentFLACFrame.header.blockSizeInPCMFrames - pFlac->currentFLACFrame.pcmFramesRemaining; + ma_uint64 frameCountThisIteration = framesToRead; + if (frameCountThisIteration > pFlac->currentFLACFrame.pcmFramesRemaining) { + frameCountThisIteration = pFlac->currentFLACFrame.pcmFramesRemaining; + } + if (channelCount == 2) { + const ma_int32* pDecodedSamples0 = pFlac->currentFLACFrame.subframes[0].pSamplesS32 + iFirstPCMFrame; + const ma_int32* pDecodedSamples1 = pFlac->currentFLACFrame.subframes[1].pSamplesS32 + iFirstPCMFrame; + switch (pFlac->currentFLACFrame.header.channelAssignment) + { + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE: + { + ma_dr_flac_read_pcm_frames_s16__decode_left_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE: + { + ma_dr_flac_read_pcm_frames_s16__decode_right_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_MID_SIDE: + { + ma_dr_flac_read_pcm_frames_s16__decode_mid_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_INDEPENDENT: + default: + { + ma_dr_flac_read_pcm_frames_s16__decode_independent_stereo(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + } + } else { + ma_uint64 i; + for (i = 0; i < frameCountThisIteration; ++i) { + unsigned int j; + for (j = 0; j < channelCount; ++j) { + ma_int32 sampleS32 = (ma_int32)((ma_uint32)(pFlac->currentFLACFrame.subframes[j].pSamplesS32[iFirstPCMFrame + i]) << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[j].wastedBitsPerSample)); + pBufferOut[(i*channelCount)+j] = (ma_int16)(sampleS32 >> 16); + } + } + } + framesRead += frameCountThisIteration; + pBufferOut += frameCountThisIteration * channelCount; + framesToRead -= frameCountThisIteration; + pFlac->currentPCMFrame += frameCountThisIteration; + pFlac->currentFLACFrame.pcmFramesRemaining -= (ma_uint32)frameCountThisIteration; + } + } + return framesRead; +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_left_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + for (i = 0; i < frameCount; ++i) { + ma_uint32 left = (ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + ma_uint32 side = (ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (float)((ma_int32)left / 2147483648.0); + pOutputSamples[i*2+1] = (float)((ma_int32)right / 2147483648.0); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_left_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + float factor = 1 / 2147483648.0; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 left0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 left1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 left2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 left3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << shift1; + ma_uint32 right0 = left0 - side0; + ma_uint32 right1 = left1 - side1; + ma_uint32 right2 = left2 - side2; + ma_uint32 right3 = left3 - side3; + pOutputSamples[i*8+0] = (ma_int32)left0 * factor; + pOutputSamples[i*8+1] = (ma_int32)right0 * factor; + pOutputSamples[i*8+2] = (ma_int32)left1 * factor; + pOutputSamples[i*8+3] = (ma_int32)right1 * factor; + pOutputSamples[i*8+4] = (ma_int32)left2 * factor; + pOutputSamples[i*8+5] = (ma_int32)right2 * factor; + pOutputSamples[i*8+6] = (ma_int32)left3 * factor; + pOutputSamples[i*8+7] = (ma_int32)right3 * factor; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (ma_int32)left * factor; + pOutputSamples[i*2+1] = (ma_int32)right * factor; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_left_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8; + ma_uint32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8; + __m128 factor; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + factor = _mm_set1_ps(1.0f / 8388608.0f); + for (i = 0; i < frameCount4; ++i) { + __m128i left = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + __m128i right = _mm_sub_epi32(left, side); + __m128 leftf = _mm_mul_ps(_mm_cvtepi32_ps(left), factor); + __m128 rightf = _mm_mul_ps(_mm_cvtepi32_ps(right), factor); + _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf)); + _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (ma_int32)left / 8388608.0f; + pOutputSamples[i*2+1] = (ma_int32)right / 8388608.0f; + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_left_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8; + ma_uint32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8; + float32x4_t factor4; + int32x4_t shift0_4; + int32x4_t shift1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + factor4 = vdupq_n_f32(1.0f / 8388608.0f); + shift0_4 = vdupq_n_s32(shift0); + shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t left; + uint32x4_t side; + uint32x4_t right; + float32x4_t leftf; + float32x4_t rightf; + left = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4); + right = vsubq_u32(left, side); + leftf = vmulq_f32(vcvtq_f32_s32(vreinterpretq_s32_u32(left)), factor4); + rightf = vmulq_f32(vcvtq_f32_s32(vreinterpretq_s32_u32(right)), factor4); + ma_dr_flac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 left = pInputSamples0U32[i] << shift0; + ma_uint32 side = pInputSamples1U32[i] << shift1; + ma_uint32 right = left - side; + pOutputSamples[i*2+0] = (ma_int32)left / 8388608.0f; + pOutputSamples[i*2+1] = (ma_int32)right / 8388608.0f; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_left_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_left_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_left_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_f32__decode_left_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_f32__decode_left_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_right_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + for (i = 0; i < frameCount; ++i) { + ma_uint32 side = (ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + ma_uint32 right = (ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (float)((ma_int32)left / 2147483648.0); + pOutputSamples[i*2+1] = (float)((ma_int32)right / 2147483648.0); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_right_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + float factor = 1 / 2147483648.0; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 side0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 side1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 side2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 side3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 right0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 right1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 right2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 right3 = pInputSamples1U32[i*4+3] << shift1; + ma_uint32 left0 = right0 + side0; + ma_uint32 left1 = right1 + side1; + ma_uint32 left2 = right2 + side2; + ma_uint32 left3 = right3 + side3; + pOutputSamples[i*8+0] = (ma_int32)left0 * factor; + pOutputSamples[i*8+1] = (ma_int32)right0 * factor; + pOutputSamples[i*8+2] = (ma_int32)left1 * factor; + pOutputSamples[i*8+3] = (ma_int32)right1 * factor; + pOutputSamples[i*8+4] = (ma_int32)left2 * factor; + pOutputSamples[i*8+5] = (ma_int32)right2 * factor; + pOutputSamples[i*8+6] = (ma_int32)left3 * factor; + pOutputSamples[i*8+7] = (ma_int32)right3 * factor; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (ma_int32)left * factor; + pOutputSamples[i*2+1] = (ma_int32)right * factor; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_right_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8; + ma_uint32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8; + __m128 factor; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + factor = _mm_set1_ps(1.0f / 8388608.0f); + for (i = 0; i < frameCount4; ++i) { + __m128i side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + __m128i right = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + __m128i left = _mm_add_epi32(right, side); + __m128 leftf = _mm_mul_ps(_mm_cvtepi32_ps(left), factor); + __m128 rightf = _mm_mul_ps(_mm_cvtepi32_ps(right), factor); + _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf)); + _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (ma_int32)left / 8388608.0f; + pOutputSamples[i*2+1] = (ma_int32)right / 8388608.0f; + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_right_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8; + ma_uint32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8; + float32x4_t factor4; + int32x4_t shift0_4; + int32x4_t shift1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + factor4 = vdupq_n_f32(1.0f / 8388608.0f); + shift0_4 = vdupq_n_s32(shift0); + shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t side; + uint32x4_t right; + uint32x4_t left; + float32x4_t leftf; + float32x4_t rightf; + side = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4); + right = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4); + left = vaddq_u32(right, side); + leftf = vmulq_f32(vcvtq_f32_s32(vreinterpretq_s32_u32(left)), factor4); + rightf = vmulq_f32(vcvtq_f32_s32(vreinterpretq_s32_u32(right)), factor4); + ma_dr_flac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 side = pInputSamples0U32[i] << shift0; + ma_uint32 right = pInputSamples1U32[i] << shift1; + ma_uint32 left = right + side; + pOutputSamples[i*2+0] = (ma_int32)left / 8388608.0f; + pOutputSamples[i*2+1] = (ma_int32)right / 8388608.0f; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_right_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_right_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_right_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_f32__decode_right_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_f32__decode_right_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_mid_side__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + for (ma_uint64 i = 0; i < frameCount; ++i) { + ma_uint32 mid = (ma_uint32)pInputSamples0[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = (ma_uint32)pInputSamples1[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (float)((((ma_int32)(mid + side) >> 1) << (unusedBitsPerSample)) / 2147483648.0); + pOutputSamples[i*2+1] = (float)((((ma_int32)(mid - side) >> 1) << (unusedBitsPerSample)) / 2147483648.0); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_mid_side__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift = unusedBitsPerSample; + float factor = 1 / 2147483648.0; + if (shift > 0) { + shift -= 1; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 temp0L; + ma_uint32 temp1L; + ma_uint32 temp2L; + ma_uint32 temp3L; + ma_uint32 temp0R; + ma_uint32 temp1R; + ma_uint32 temp2R; + ma_uint32 temp3R; + ma_uint32 mid0 = pInputSamples0U32[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid1 = pInputSamples0U32[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid2 = pInputSamples0U32[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid3 = pInputSamples0U32[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid0 = (mid0 << 1) | (side0 & 0x01); + mid1 = (mid1 << 1) | (side1 & 0x01); + mid2 = (mid2 << 1) | (side2 & 0x01); + mid3 = (mid3 << 1) | (side3 & 0x01); + temp0L = (mid0 + side0) << shift; + temp1L = (mid1 + side1) << shift; + temp2L = (mid2 + side2) << shift; + temp3L = (mid3 + side3) << shift; + temp0R = (mid0 - side0) << shift; + temp1R = (mid1 - side1) << shift; + temp2R = (mid2 - side2) << shift; + temp3R = (mid3 - side3) << shift; + pOutputSamples[i*8+0] = (ma_int32)temp0L * factor; + pOutputSamples[i*8+1] = (ma_int32)temp0R * factor; + pOutputSamples[i*8+2] = (ma_int32)temp1L * factor; + pOutputSamples[i*8+3] = (ma_int32)temp1R * factor; + pOutputSamples[i*8+4] = (ma_int32)temp2L * factor; + pOutputSamples[i*8+5] = (ma_int32)temp2R * factor; + pOutputSamples[i*8+6] = (ma_int32)temp3L * factor; + pOutputSamples[i*8+7] = (ma_int32)temp3R * factor; + } + } else { + for (i = 0; i < frameCount4; ++i) { + ma_uint32 temp0L; + ma_uint32 temp1L; + ma_uint32 temp2L; + ma_uint32 temp3L; + ma_uint32 temp0R; + ma_uint32 temp1R; + ma_uint32 temp2R; + ma_uint32 temp3R; + ma_uint32 mid0 = pInputSamples0U32[i*4+0] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid1 = pInputSamples0U32[i*4+1] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid2 = pInputSamples0U32[i*4+2] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 mid3 = pInputSamples0U32[i*4+3] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side0 = pInputSamples1U32[i*4+0] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side1 = pInputSamples1U32[i*4+1] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side2 = pInputSamples1U32[i*4+2] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + ma_uint32 side3 = pInputSamples1U32[i*4+3] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid0 = (mid0 << 1) | (side0 & 0x01); + mid1 = (mid1 << 1) | (side1 & 0x01); + mid2 = (mid2 << 1) | (side2 & 0x01); + mid3 = (mid3 << 1) | (side3 & 0x01); + temp0L = (ma_uint32)((ma_int32)(mid0 + side0) >> 1); + temp1L = (ma_uint32)((ma_int32)(mid1 + side1) >> 1); + temp2L = (ma_uint32)((ma_int32)(mid2 + side2) >> 1); + temp3L = (ma_uint32)((ma_int32)(mid3 + side3) >> 1); + temp0R = (ma_uint32)((ma_int32)(mid0 - side0) >> 1); + temp1R = (ma_uint32)((ma_int32)(mid1 - side1) >> 1); + temp2R = (ma_uint32)((ma_int32)(mid2 - side2) >> 1); + temp3R = (ma_uint32)((ma_int32)(mid3 - side3) >> 1); + pOutputSamples[i*8+0] = (ma_int32)temp0L * factor; + pOutputSamples[i*8+1] = (ma_int32)temp0R * factor; + pOutputSamples[i*8+2] = (ma_int32)temp1L * factor; + pOutputSamples[i*8+3] = (ma_int32)temp1R * factor; + pOutputSamples[i*8+4] = (ma_int32)temp2L * factor; + pOutputSamples[i*8+5] = (ma_int32)temp2R * factor; + pOutputSamples[i*8+6] = (ma_int32)temp3L * factor; + pOutputSamples[i*8+7] = (ma_int32)temp3R * factor; + } + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)((ma_uint32)((ma_int32)(mid + side) >> 1) << unusedBitsPerSample) * factor; + pOutputSamples[i*2+1] = (ma_int32)((ma_uint32)((ma_int32)(mid - side) >> 1) << unusedBitsPerSample) * factor; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_mid_side__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift = unusedBitsPerSample - 8; + float factor; + __m128 factor128; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + factor = 1.0f / 8388608.0f; + factor128 = _mm_set1_ps(factor); + if (shift == 0) { + for (i = 0; i < frameCount4; ++i) { + __m128i mid; + __m128i side; + __m128i tempL; + __m128i tempR; + __m128 leftf; + __m128 rightf; + mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01))); + tempL = _mm_srai_epi32(_mm_add_epi32(mid, side), 1); + tempR = _mm_srai_epi32(_mm_sub_epi32(mid, side), 1); + leftf = _mm_mul_ps(_mm_cvtepi32_ps(tempL), factor128); + rightf = _mm_mul_ps(_mm_cvtepi32_ps(tempR), factor128); + _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf)); + _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = ((ma_int32)(mid + side) >> 1) * factor; + pOutputSamples[i*2+1] = ((ma_int32)(mid - side) >> 1) * factor; + } + } else { + shift -= 1; + for (i = 0; i < frameCount4; ++i) { + __m128i mid; + __m128i side; + __m128i tempL; + __m128i tempR; + __m128 leftf; + __m128 rightf; + mid = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + side = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + mid = _mm_or_si128(_mm_slli_epi32(mid, 1), _mm_and_si128(side, _mm_set1_epi32(0x01))); + tempL = _mm_slli_epi32(_mm_add_epi32(mid, side), shift); + tempR = _mm_slli_epi32(_mm_sub_epi32(mid, side), shift); + leftf = _mm_mul_ps(_mm_cvtepi32_ps(tempL), factor128); + rightf = _mm_mul_ps(_mm_cvtepi32_ps(tempR), factor128); + _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf)); + _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)((mid + side) << shift) * factor; + pOutputSamples[i*2+1] = (ma_int32)((mid - side) << shift) * factor; + } + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_mid_side__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift = unusedBitsPerSample - 8; + float factor; + float32x4_t factor4; + int32x4_t shift4; + int32x4_t wbps0_4; + int32x4_t wbps1_4; + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 24); + factor = 1.0f / 8388608.0f; + factor4 = vdupq_n_f32(factor); + wbps0_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample); + wbps1_4 = vdupq_n_s32(pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample); + if (shift == 0) { + for (i = 0; i < frameCount4; ++i) { + int32x4_t lefti; + int32x4_t righti; + float32x4_t leftf; + float32x4_t rightf; + uint32x4_t mid = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), wbps0_4); + uint32x4_t side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), wbps1_4); + mid = vorrq_u32(vshlq_n_u32(mid, 1), vandq_u32(side, vdupq_n_u32(1))); + lefti = vshrq_n_s32(vreinterpretq_s32_u32(vaddq_u32(mid, side)), 1); + righti = vshrq_n_s32(vreinterpretq_s32_u32(vsubq_u32(mid, side)), 1); + leftf = vmulq_f32(vcvtq_f32_s32(lefti), factor4); + rightf = vmulq_f32(vcvtq_f32_s32(righti), factor4); + ma_dr_flac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = ((ma_int32)(mid + side) >> 1) * factor; + pOutputSamples[i*2+1] = ((ma_int32)(mid - side) >> 1) * factor; + } + } else { + shift -= 1; + shift4 = vdupq_n_s32(shift); + for (i = 0; i < frameCount4; ++i) { + uint32x4_t mid; + uint32x4_t side; + int32x4_t lefti; + int32x4_t righti; + float32x4_t leftf; + float32x4_t rightf; + mid = vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), wbps0_4); + side = vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), wbps1_4); + mid = vorrq_u32(vshlq_n_u32(mid, 1), vandq_u32(side, vdupq_n_u32(1))); + lefti = vreinterpretq_s32_u32(vshlq_u32(vaddq_u32(mid, side), shift4)); + righti = vreinterpretq_s32_u32(vshlq_u32(vsubq_u32(mid, side), shift4)); + leftf = vmulq_f32(vcvtq_f32_s32(lefti), factor4); + rightf = vmulq_f32(vcvtq_f32_s32(righti), factor4); + ma_dr_flac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + ma_uint32 mid = pInputSamples0U32[i] << pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 side = pInputSamples1U32[i] << pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + mid = (mid << 1) | (side & 0x01); + pOutputSamples[i*2+0] = (ma_int32)((mid + side) << shift) * factor; + pOutputSamples[i*2+1] = (ma_int32)((mid - side) << shift) * factor; + } + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_mid_side(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_mid_side__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_mid_side__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_f32__decode_mid_side__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_f32__decode_mid_side__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +#if 0 +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__reference(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + for (ma_uint64 i = 0; i < frameCount; ++i) { + pOutputSamples[i*2+0] = (float)((ma_int32)((ma_uint32)pInputSamples0[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample)) / 2147483648.0); + pOutputSamples[i*2+1] = (float)((ma_int32)((ma_uint32)pInputSamples1[i] << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample)) / 2147483648.0); + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__scalar(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample; + ma_uint32 shift1 = unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample; + float factor = 1 / 2147483648.0; + for (i = 0; i < frameCount4; ++i) { + ma_uint32 tempL0 = pInputSamples0U32[i*4+0] << shift0; + ma_uint32 tempL1 = pInputSamples0U32[i*4+1] << shift0; + ma_uint32 tempL2 = pInputSamples0U32[i*4+2] << shift0; + ma_uint32 tempL3 = pInputSamples0U32[i*4+3] << shift0; + ma_uint32 tempR0 = pInputSamples1U32[i*4+0] << shift1; + ma_uint32 tempR1 = pInputSamples1U32[i*4+1] << shift1; + ma_uint32 tempR2 = pInputSamples1U32[i*4+2] << shift1; + ma_uint32 tempR3 = pInputSamples1U32[i*4+3] << shift1; + pOutputSamples[i*8+0] = (ma_int32)tempL0 * factor; + pOutputSamples[i*8+1] = (ma_int32)tempR0 * factor; + pOutputSamples[i*8+2] = (ma_int32)tempL1 * factor; + pOutputSamples[i*8+3] = (ma_int32)tempR1 * factor; + pOutputSamples[i*8+4] = (ma_int32)tempL2 * factor; + pOutputSamples[i*8+5] = (ma_int32)tempR2 * factor; + pOutputSamples[i*8+6] = (ma_int32)tempL3 * factor; + pOutputSamples[i*8+7] = (ma_int32)tempR3 * factor; + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int32)(pInputSamples0U32[i] << shift0) * factor; + pOutputSamples[i*2+1] = (ma_int32)(pInputSamples1U32[i] << shift1) * factor; + } +} +#if defined(MA_DR_FLAC_SUPPORT_SSE2) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__sse2(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8; + ma_uint32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8; + float factor = 1.0f / 8388608.0f; + __m128 factor128 = _mm_set1_ps(factor); + for (i = 0; i < frameCount4; ++i) { + __m128i lefti; + __m128i righti; + __m128 leftf; + __m128 rightf; + lefti = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples0 + i), shift0); + righti = _mm_slli_epi32(_mm_loadu_si128((const __m128i*)pInputSamples1 + i), shift1); + leftf = _mm_mul_ps(_mm_cvtepi32_ps(lefti), factor128); + rightf = _mm_mul_ps(_mm_cvtepi32_ps(righti), factor128); + _mm_storeu_ps(pOutputSamples + i*8 + 0, _mm_unpacklo_ps(leftf, rightf)); + _mm_storeu_ps(pOutputSamples + i*8 + 4, _mm_unpackhi_ps(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int32)(pInputSamples0U32[i] << shift0) * factor; + pOutputSamples[i*2+1] = (ma_int32)(pInputSamples1U32[i] << shift1) * factor; + } +} +#endif +#if defined(MA_DR_FLAC_SUPPORT_NEON) +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__neon(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ + ma_uint64 i; + ma_uint64 frameCount4 = frameCount >> 2; + const ma_uint32* pInputSamples0U32 = (const ma_uint32*)pInputSamples0; + const ma_uint32* pInputSamples1U32 = (const ma_uint32*)pInputSamples1; + ma_uint32 shift0 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[0].wastedBitsPerSample) - 8; + ma_uint32 shift1 = (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[1].wastedBitsPerSample) - 8; + float factor = 1.0f / 8388608.0f; + float32x4_t factor4 = vdupq_n_f32(factor); + int32x4_t shift0_4 = vdupq_n_s32(shift0); + int32x4_t shift1_4 = vdupq_n_s32(shift1); + for (i = 0; i < frameCount4; ++i) { + int32x4_t lefti; + int32x4_t righti; + float32x4_t leftf; + float32x4_t rightf; + lefti = vreinterpretq_s32_u32(vshlq_u32(vld1q_u32(pInputSamples0U32 + i*4), shift0_4)); + righti = vreinterpretq_s32_u32(vshlq_u32(vld1q_u32(pInputSamples1U32 + i*4), shift1_4)); + leftf = vmulq_f32(vcvtq_f32_s32(lefti), factor4); + rightf = vmulq_f32(vcvtq_f32_s32(righti), factor4); + ma_dr_flac__vst2q_f32(pOutputSamples + i*8, vzipq_f32(leftf, rightf)); + } + for (i = (frameCount4 << 2); i < frameCount; ++i) { + pOutputSamples[i*2+0] = (ma_int32)(pInputSamples0U32[i] << shift0) * factor; + pOutputSamples[i*2+1] = (ma_int32)(pInputSamples1U32[i] << shift1) * factor; + } +} +#endif +static MA_INLINE void ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo(ma_dr_flac* pFlac, ma_uint64 frameCount, ma_uint32 unusedBitsPerSample, const ma_int32* pInputSamples0, const ma_int32* pInputSamples1, float* pOutputSamples) +{ +#if defined(MA_DR_FLAC_SUPPORT_SSE2) + if (ma_dr_flac__gIsSSE2Supported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__sse2(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#elif defined(MA_DR_FLAC_SUPPORT_NEON) + if (ma_dr_flac__gIsNEONSupported && pFlac->bitsPerSample <= 24) { + ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__neon(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); + } else +#endif + { +#if 0 + ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__reference(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#else + ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo__scalar(pFlac, frameCount, unusedBitsPerSample, pInputSamples0, pInputSamples1, pOutputSamples); +#endif + } +} +MA_API ma_uint64 ma_dr_flac_read_pcm_frames_f32(ma_dr_flac* pFlac, ma_uint64 framesToRead, float* pBufferOut) +{ + ma_uint64 framesRead; + ma_uint32 unusedBitsPerSample; + if (pFlac == NULL || framesToRead == 0) { + return 0; + } + if (pBufferOut == NULL) { + return ma_dr_flac__seek_forward_by_pcm_frames(pFlac, framesToRead); + } + MA_DR_FLAC_ASSERT(pFlac->bitsPerSample <= 32); + unusedBitsPerSample = 32 - pFlac->bitsPerSample; + framesRead = 0; + while (framesToRead > 0) { + if (pFlac->currentFLACFrame.pcmFramesRemaining == 0) { + if (!ma_dr_flac__read_and_decode_next_flac_frame(pFlac)) { + break; + } + } else { + unsigned int channelCount = ma_dr_flac__get_channel_count_from_channel_assignment(pFlac->currentFLACFrame.header.channelAssignment); + ma_uint64 iFirstPCMFrame = pFlac->currentFLACFrame.header.blockSizeInPCMFrames - pFlac->currentFLACFrame.pcmFramesRemaining; + ma_uint64 frameCountThisIteration = framesToRead; + if (frameCountThisIteration > pFlac->currentFLACFrame.pcmFramesRemaining) { + frameCountThisIteration = pFlac->currentFLACFrame.pcmFramesRemaining; + } + if (channelCount == 2) { + const ma_int32* pDecodedSamples0 = pFlac->currentFLACFrame.subframes[0].pSamplesS32 + iFirstPCMFrame; + const ma_int32* pDecodedSamples1 = pFlac->currentFLACFrame.subframes[1].pSamplesS32 + iFirstPCMFrame; + switch (pFlac->currentFLACFrame.header.channelAssignment) + { + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_LEFT_SIDE: + { + ma_dr_flac_read_pcm_frames_f32__decode_left_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_RIGHT_SIDE: + { + ma_dr_flac_read_pcm_frames_f32__decode_right_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_MID_SIDE: + { + ma_dr_flac_read_pcm_frames_f32__decode_mid_side(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + case MA_DR_FLAC_CHANNEL_ASSIGNMENT_INDEPENDENT: + default: + { + ma_dr_flac_read_pcm_frames_f32__decode_independent_stereo(pFlac, frameCountThisIteration, unusedBitsPerSample, pDecodedSamples0, pDecodedSamples1, pBufferOut); + } break; + } + } else { + ma_uint64 i; + for (i = 0; i < frameCountThisIteration; ++i) { + unsigned int j; + for (j = 0; j < channelCount; ++j) { + ma_int32 sampleS32 = (ma_int32)((ma_uint32)(pFlac->currentFLACFrame.subframes[j].pSamplesS32[iFirstPCMFrame + i]) << (unusedBitsPerSample + pFlac->currentFLACFrame.subframes[j].wastedBitsPerSample)); + pBufferOut[(i*channelCount)+j] = (float)(sampleS32 / 2147483648.0); + } + } + } + framesRead += frameCountThisIteration; + pBufferOut += frameCountThisIteration * channelCount; + framesToRead -= frameCountThisIteration; + pFlac->currentPCMFrame += frameCountThisIteration; + pFlac->currentFLACFrame.pcmFramesRemaining -= (unsigned int)frameCountThisIteration; + } + } + return framesRead; +} +MA_API ma_bool32 ma_dr_flac_seek_to_pcm_frame(ma_dr_flac* pFlac, ma_uint64 pcmFrameIndex) +{ + if (pFlac == NULL) { + return MA_FALSE; + } + if (pFlac->currentPCMFrame == pcmFrameIndex) { + return MA_TRUE; + } + if (pFlac->firstFLACFramePosInBytes == 0) { + return MA_FALSE; + } + if (pcmFrameIndex == 0) { + pFlac->currentPCMFrame = 0; + return ma_dr_flac__seek_to_first_frame(pFlac); + } else { + ma_bool32 wasSuccessful = MA_FALSE; + ma_uint64 originalPCMFrame = pFlac->currentPCMFrame; + if (pcmFrameIndex > pFlac->totalPCMFrameCount) { + pcmFrameIndex = pFlac->totalPCMFrameCount; + } + if (pcmFrameIndex > pFlac->currentPCMFrame) { + ma_uint32 offset = (ma_uint32)(pcmFrameIndex - pFlac->currentPCMFrame); + if (pFlac->currentFLACFrame.pcmFramesRemaining > offset) { + pFlac->currentFLACFrame.pcmFramesRemaining -= offset; + pFlac->currentPCMFrame = pcmFrameIndex; + return MA_TRUE; + } + } else { + ma_uint32 offsetAbs = (ma_uint32)(pFlac->currentPCMFrame - pcmFrameIndex); + ma_uint32 currentFLACFramePCMFrameCount = pFlac->currentFLACFrame.header.blockSizeInPCMFrames; + ma_uint32 currentFLACFramePCMFramesConsumed = currentFLACFramePCMFrameCount - pFlac->currentFLACFrame.pcmFramesRemaining; + if (currentFLACFramePCMFramesConsumed > offsetAbs) { + pFlac->currentFLACFrame.pcmFramesRemaining += offsetAbs; + pFlac->currentPCMFrame = pcmFrameIndex; + return MA_TRUE; + } + } +#ifndef MA_DR_FLAC_NO_OGG + if (pFlac->container == ma_dr_flac_container_ogg) + { + wasSuccessful = ma_dr_flac_ogg__seek_to_pcm_frame(pFlac, pcmFrameIndex); + } + else +#endif + { + if (!pFlac->_noSeekTableSeek) { + wasSuccessful = ma_dr_flac__seek_to_pcm_frame__seek_table(pFlac, pcmFrameIndex); + } +#if !defined(MA_DR_FLAC_NO_CRC) + if (!wasSuccessful && !pFlac->_noBinarySearchSeek && pFlac->totalPCMFrameCount > 0) { + wasSuccessful = ma_dr_flac__seek_to_pcm_frame__binary_search(pFlac, pcmFrameIndex); + } +#endif + if (!wasSuccessful && !pFlac->_noBruteForceSeek) { + wasSuccessful = ma_dr_flac__seek_to_pcm_frame__brute_force(pFlac, pcmFrameIndex); + } + } + if (wasSuccessful) { + pFlac->currentPCMFrame = pcmFrameIndex; + } else { + if (ma_dr_flac_seek_to_pcm_frame(pFlac, originalPCMFrame) == MA_FALSE) { + ma_dr_flac_seek_to_pcm_frame(pFlac, 0); + } + } + return wasSuccessful; + } +} +#define MA_DR_FLAC_DEFINE_FULL_READ_AND_CLOSE(extension, type) \ +static type* ma_dr_flac__full_read_and_close_ ## extension (ma_dr_flac* pFlac, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalPCMFrameCountOut)\ +{ \ + type* pSampleData = NULL; \ + ma_uint64 totalPCMFrameCount; \ + \ + MA_DR_FLAC_ASSERT(pFlac != NULL); \ + \ + totalPCMFrameCount = pFlac->totalPCMFrameCount; \ + \ + if (totalPCMFrameCount == 0) { \ + type buffer[4096]; \ + ma_uint64 pcmFramesRead; \ + size_t sampleDataBufferSize = sizeof(buffer); \ + \ + pSampleData = (type*)ma_dr_flac__malloc_from_callbacks(sampleDataBufferSize, &pFlac->allocationCallbacks); \ + if (pSampleData == NULL) { \ + goto on_error; \ + } \ + \ + while ((pcmFramesRead = (ma_uint64)ma_dr_flac_read_pcm_frames_##extension(pFlac, sizeof(buffer)/sizeof(buffer[0])/pFlac->channels, buffer)) > 0) { \ + if (((totalPCMFrameCount + pcmFramesRead) * pFlac->channels * sizeof(type)) > sampleDataBufferSize) { \ + type* pNewSampleData; \ + size_t newSampleDataBufferSize; \ + \ + newSampleDataBufferSize = sampleDataBufferSize * 2; \ + pNewSampleData = (type*)ma_dr_flac__realloc_from_callbacks(pSampleData, newSampleDataBufferSize, sampleDataBufferSize, &pFlac->allocationCallbacks); \ + if (pNewSampleData == NULL) { \ + ma_dr_flac__free_from_callbacks(pSampleData, &pFlac->allocationCallbacks); \ + goto on_error; \ + } \ + \ + sampleDataBufferSize = newSampleDataBufferSize; \ + pSampleData = pNewSampleData; \ + } \ + \ + MA_DR_FLAC_COPY_MEMORY(pSampleData + (totalPCMFrameCount*pFlac->channels), buffer, (size_t)(pcmFramesRead*pFlac->channels*sizeof(type))); \ + totalPCMFrameCount += pcmFramesRead; \ + } \ + \ + \ + MA_DR_FLAC_ZERO_MEMORY(pSampleData + (totalPCMFrameCount*pFlac->channels), (size_t)(sampleDataBufferSize - totalPCMFrameCount*pFlac->channels*sizeof(type))); \ + } else { \ + ma_uint64 dataSize = totalPCMFrameCount*pFlac->channels*sizeof(type); \ + if (dataSize > (ma_uint64)MA_SIZE_MAX) { \ + goto on_error; \ + } \ + \ + pSampleData = (type*)ma_dr_flac__malloc_from_callbacks((size_t)dataSize, &pFlac->allocationCallbacks); \ + if (pSampleData == NULL) { \ + goto on_error; \ + } \ + \ + totalPCMFrameCount = ma_dr_flac_read_pcm_frames_##extension(pFlac, pFlac->totalPCMFrameCount, pSampleData); \ + } \ + \ + if (sampleRateOut) *sampleRateOut = pFlac->sampleRate; \ + if (channelsOut) *channelsOut = pFlac->channels; \ + if (totalPCMFrameCountOut) *totalPCMFrameCountOut = totalPCMFrameCount; \ + \ + ma_dr_flac_close(pFlac); \ + return pSampleData; \ + \ +on_error: \ + ma_dr_flac_close(pFlac); \ + return NULL; \ +} +MA_DR_FLAC_DEFINE_FULL_READ_AND_CLOSE(s32, ma_int32) +MA_DR_FLAC_DEFINE_FULL_READ_AND_CLOSE(s16, ma_int16) +MA_DR_FLAC_DEFINE_FULL_READ_AND_CLOSE(f32, float) +MA_API ma_int32* ma_dr_flac_open_and_read_pcm_frames_s32(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalPCMFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalPCMFrameCountOut) { + *totalPCMFrameCountOut = 0; + } + pFlac = ma_dr_flac_open(onRead, onSeek, pUserData, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_s32(pFlac, channelsOut, sampleRateOut, totalPCMFrameCountOut); +} +MA_API ma_int16* ma_dr_flac_open_and_read_pcm_frames_s16(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalPCMFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalPCMFrameCountOut) { + *totalPCMFrameCountOut = 0; + } + pFlac = ma_dr_flac_open(onRead, onSeek, pUserData, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_s16(pFlac, channelsOut, sampleRateOut, totalPCMFrameCountOut); +} +MA_API float* ma_dr_flac_open_and_read_pcm_frames_f32(ma_dr_flac_read_proc onRead, ma_dr_flac_seek_proc onSeek, void* pUserData, unsigned int* channelsOut, unsigned int* sampleRateOut, ma_uint64* totalPCMFrameCountOut, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (channelsOut) { + *channelsOut = 0; + } + if (sampleRateOut) { + *sampleRateOut = 0; + } + if (totalPCMFrameCountOut) { + *totalPCMFrameCountOut = 0; + } + pFlac = ma_dr_flac_open(onRead, onSeek, pUserData, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_f32(pFlac, channelsOut, sampleRateOut, totalPCMFrameCountOut); +} +#ifndef MA_DR_FLAC_NO_STDIO +MA_API ma_int32* ma_dr_flac_open_file_and_read_pcm_frames_s32(const char* filename, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (sampleRate) { + *sampleRate = 0; + } + if (channels) { + *channels = 0; + } + if (totalPCMFrameCount) { + *totalPCMFrameCount = 0; + } + pFlac = ma_dr_flac_open_file(filename, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_s32(pFlac, channels, sampleRate, totalPCMFrameCount); +} +MA_API ma_int16* ma_dr_flac_open_file_and_read_pcm_frames_s16(const char* filename, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (sampleRate) { + *sampleRate = 0; + } + if (channels) { + *channels = 0; + } + if (totalPCMFrameCount) { + *totalPCMFrameCount = 0; + } + pFlac = ma_dr_flac_open_file(filename, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_s16(pFlac, channels, sampleRate, totalPCMFrameCount); +} +MA_API float* ma_dr_flac_open_file_and_read_pcm_frames_f32(const char* filename, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (sampleRate) { + *sampleRate = 0; + } + if (channels) { + *channels = 0; + } + if (totalPCMFrameCount) { + *totalPCMFrameCount = 0; + } + pFlac = ma_dr_flac_open_file(filename, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_f32(pFlac, channels, sampleRate, totalPCMFrameCount); +} +#endif +MA_API ma_int32* ma_dr_flac_open_memory_and_read_pcm_frames_s32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (sampleRate) { + *sampleRate = 0; + } + if (channels) { + *channels = 0; + } + if (totalPCMFrameCount) { + *totalPCMFrameCount = 0; + } + pFlac = ma_dr_flac_open_memory(data, dataSize, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_s32(pFlac, channels, sampleRate, totalPCMFrameCount); +} +MA_API ma_int16* ma_dr_flac_open_memory_and_read_pcm_frames_s16(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (sampleRate) { + *sampleRate = 0; + } + if (channels) { + *channels = 0; + } + if (totalPCMFrameCount) { + *totalPCMFrameCount = 0; + } + pFlac = ma_dr_flac_open_memory(data, dataSize, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_s16(pFlac, channels, sampleRate, totalPCMFrameCount); +} +MA_API float* ma_dr_flac_open_memory_and_read_pcm_frames_f32(const void* data, size_t dataSize, unsigned int* channels, unsigned int* sampleRate, ma_uint64* totalPCMFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_flac* pFlac; + if (sampleRate) { + *sampleRate = 0; + } + if (channels) { + *channels = 0; + } + if (totalPCMFrameCount) { + *totalPCMFrameCount = 0; + } + pFlac = ma_dr_flac_open_memory(data, dataSize, pAllocationCallbacks); + if (pFlac == NULL) { + return NULL; + } + return ma_dr_flac__full_read_and_close_f32(pFlac, channels, sampleRate, totalPCMFrameCount); +} +MA_API void ma_dr_flac_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + ma_dr_flac__free_from_callbacks(p, pAllocationCallbacks); + } else { + ma_dr_flac__free_default(p, NULL); + } +} +MA_API void ma_dr_flac_init_vorbis_comment_iterator(ma_dr_flac_vorbis_comment_iterator* pIter, ma_uint32 commentCount, const void* pComments) +{ + if (pIter == NULL) { + return; + } + pIter->countRemaining = commentCount; + pIter->pRunningData = (const char*)pComments; +} +MA_API const char* ma_dr_flac_next_vorbis_comment(ma_dr_flac_vorbis_comment_iterator* pIter, ma_uint32* pCommentLengthOut) +{ + ma_int32 length; + const char* pComment; + if (pCommentLengthOut) { + *pCommentLengthOut = 0; + } + if (pIter == NULL || pIter->countRemaining == 0 || pIter->pRunningData == NULL) { + return NULL; + } + length = ma_dr_flac__le2host_32_ptr_unaligned(pIter->pRunningData); + pIter->pRunningData += 4; + pComment = pIter->pRunningData; + pIter->pRunningData += length; + pIter->countRemaining -= 1; + if (pCommentLengthOut) { + *pCommentLengthOut = length; + } + return pComment; +} +MA_API void ma_dr_flac_init_cuesheet_track_iterator(ma_dr_flac_cuesheet_track_iterator* pIter, ma_uint32 trackCount, const void* pTrackData) +{ + if (pIter == NULL) { + return; + } + pIter->countRemaining = trackCount; + pIter->pRunningData = (const char*)pTrackData; +} +MA_API ma_bool32 ma_dr_flac_next_cuesheet_track(ma_dr_flac_cuesheet_track_iterator* pIter, ma_dr_flac_cuesheet_track* pCuesheetTrack) +{ + ma_dr_flac_cuesheet_track cuesheetTrack; + const char* pRunningData; + ma_uint64 offsetHi; + ma_uint64 offsetLo; + if (pIter == NULL || pIter->countRemaining == 0 || pIter->pRunningData == NULL) { + return MA_FALSE; + } + pRunningData = pIter->pRunningData; + offsetHi = ma_dr_flac__be2host_32(*(const ma_uint32*)pRunningData); pRunningData += 4; + offsetLo = ma_dr_flac__be2host_32(*(const ma_uint32*)pRunningData); pRunningData += 4; + cuesheetTrack.offset = offsetLo | (offsetHi << 32); + cuesheetTrack.trackNumber = pRunningData[0]; pRunningData += 1; + MA_DR_FLAC_COPY_MEMORY(cuesheetTrack.ISRC, pRunningData, sizeof(cuesheetTrack.ISRC)); pRunningData += 12; + cuesheetTrack.isAudio = (pRunningData[0] & 0x80) != 0; + cuesheetTrack.preEmphasis = (pRunningData[0] & 0x40) != 0; pRunningData += 14; + cuesheetTrack.indexCount = pRunningData[0]; pRunningData += 1; + cuesheetTrack.pIndexPoints = (const ma_dr_flac_cuesheet_track_index*)pRunningData; pRunningData += cuesheetTrack.indexCount * sizeof(ma_dr_flac_cuesheet_track_index); + pIter->pRunningData = pRunningData; + pIter->countRemaining -= 1; + if (pCuesheetTrack) { + *pCuesheetTrack = cuesheetTrack; + } + return MA_TRUE; +} +#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))) + #pragma GCC diagnostic pop +#endif +#endif +/* dr_flac_c end */ +#endif /* MA_DR_FLAC_IMPLEMENTATION */ +#endif /* MA_NO_FLAC */ + +#if !defined(MA_NO_MP3) && !defined(MA_NO_DECODING) +#if !defined(MA_DR_MP3_IMPLEMENTATION) +/* dr_mp3_c begin */ +#ifndef ma_dr_mp3_c +#define ma_dr_mp3_c +#include +#include +#include +MA_API void ma_dr_mp3_version(ma_uint32* pMajor, ma_uint32* pMinor, ma_uint32* pRevision) +{ + if (pMajor) { + *pMajor = MA_DR_MP3_VERSION_MAJOR; + } + if (pMinor) { + *pMinor = MA_DR_MP3_VERSION_MINOR; + } + if (pRevision) { + *pRevision = MA_DR_MP3_VERSION_REVISION; + } +} +MA_API const char* ma_dr_mp3_version_string(void) +{ + return MA_DR_MP3_VERSION_STRING; +} +#if defined(__TINYC__) +#define MA_DR_MP3_NO_SIMD +#endif +#define MA_DR_MP3_OFFSET_PTR(p, offset) ((void*)((ma_uint8*)(p) + (offset))) +#define MA_DR_MP3_MAX_FREE_FORMAT_FRAME_SIZE 2304 +#ifndef MA_DR_MP3_MAX_FRAME_SYNC_MATCHES +#define MA_DR_MP3_MAX_FRAME_SYNC_MATCHES 10 +#endif +#define MA_DR_MP3_MAX_L3_FRAME_PAYLOAD_BYTES MA_DR_MP3_MAX_FREE_FORMAT_FRAME_SIZE +#define MA_DR_MP3_MAX_BITRESERVOIR_BYTES 511 +#define MA_DR_MP3_SHORT_BLOCK_TYPE 2 +#define MA_DR_MP3_STOP_BLOCK_TYPE 3 +#define MA_DR_MP3_MODE_MONO 3 +#define MA_DR_MP3_MODE_JOINT_STEREO 1 +#define MA_DR_MP3_HDR_SIZE 4 +#define MA_DR_MP3_HDR_IS_MONO(h) (((h[3]) & 0xC0) == 0xC0) +#define MA_DR_MP3_HDR_IS_MS_STEREO(h) (((h[3]) & 0xE0) == 0x60) +#define MA_DR_MP3_HDR_IS_FREE_FORMAT(h) (((h[2]) & 0xF0) == 0) +#define MA_DR_MP3_HDR_IS_CRC(h) (!((h[1]) & 1)) +#define MA_DR_MP3_HDR_TEST_PADDING(h) ((h[2]) & 0x2) +#define MA_DR_MP3_HDR_TEST_MPEG1(h) ((h[1]) & 0x8) +#define MA_DR_MP3_HDR_TEST_NOT_MPEG25(h) ((h[1]) & 0x10) +#define MA_DR_MP3_HDR_TEST_I_STEREO(h) ((h[3]) & 0x10) +#define MA_DR_MP3_HDR_TEST_MS_STEREO(h) ((h[3]) & 0x20) +#define MA_DR_MP3_HDR_GET_STEREO_MODE(h) (((h[3]) >> 6) & 3) +#define MA_DR_MP3_HDR_GET_STEREO_MODE_EXT(h) (((h[3]) >> 4) & 3) +#define MA_DR_MP3_HDR_GET_LAYER(h) (((h[1]) >> 1) & 3) +#define MA_DR_MP3_HDR_GET_BITRATE(h) ((h[2]) >> 4) +#define MA_DR_MP3_HDR_GET_SAMPLE_RATE(h) (((h[2]) >> 2) & 3) +#define MA_DR_MP3_HDR_GET_MY_SAMPLE_RATE(h) (MA_DR_MP3_HDR_GET_SAMPLE_RATE(h) + (((h[1] >> 3) & 1) + ((h[1] >> 4) & 1))*3) +#define MA_DR_MP3_HDR_IS_FRAME_576(h) ((h[1] & 14) == 2) +#define MA_DR_MP3_HDR_IS_LAYER_1(h) ((h[1] & 6) == 6) +#define MA_DR_MP3_BITS_DEQUANTIZER_OUT -1 +#define MA_DR_MP3_MAX_SCF (255 + MA_DR_MP3_BITS_DEQUANTIZER_OUT*4 - 210) +#define MA_DR_MP3_MAX_SCFI ((MA_DR_MP3_MAX_SCF + 3) & ~3) +#define MA_DR_MP3_MIN(a, b) ((a) > (b) ? (b) : (a)) +#define MA_DR_MP3_MAX(a, b) ((a) < (b) ? (b) : (a)) +#if !defined(MA_DR_MP3_NO_SIMD) +#if !defined(MA_DR_MP3_ONLY_SIMD) && (defined(_M_X64) || defined(__x86_64__) || defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) +#define MA_DR_MP3_ONLY_SIMD +#endif +#if ((defined(_MSC_VER) && _MSC_VER >= 1400) && defined(_M_X64)) || ((defined(__i386) || defined(_M_IX86) || defined(__i386__) || defined(__x86_64__)) && ((defined(_M_IX86_FP) && _M_IX86_FP == 2) || defined(__SSE2__))) +#if defined(_MSC_VER) +#include +#endif +#include +#define MA_DR_MP3_HAVE_SSE 1 +#define MA_DR_MP3_HAVE_SIMD 1 +#define MA_DR_MP3_VSTORE _mm_storeu_ps +#define MA_DR_MP3_VLD _mm_loadu_ps +#define MA_DR_MP3_VSET _mm_set1_ps +#define MA_DR_MP3_VADD _mm_add_ps +#define MA_DR_MP3_VSUB _mm_sub_ps +#define MA_DR_MP3_VMUL _mm_mul_ps +#define MA_DR_MP3_VMAC(a, x, y) _mm_add_ps(a, _mm_mul_ps(x, y)) +#define MA_DR_MP3_VMSB(a, x, y) _mm_sub_ps(a, _mm_mul_ps(x, y)) +#define MA_DR_MP3_VMUL_S(x, s) _mm_mul_ps(x, _mm_set1_ps(s)) +#define MA_DR_MP3_VREV(x) _mm_shuffle_ps(x, x, _MM_SHUFFLE(0, 1, 2, 3)) +typedef __m128 ma_dr_mp3_f4; +#if defined(_MSC_VER) || defined(MA_DR_MP3_ONLY_SIMD) +#define ma_dr_mp3_cpuid __cpuid +#else +static __inline__ __attribute__((always_inline)) void ma_dr_mp3_cpuid(int CPUInfo[], const int InfoType) +{ +#if defined(__PIC__) + __asm__ __volatile__( +#if defined(__x86_64__) + "push %%rbx\n" + "cpuid\n" + "xchgl %%ebx, %1\n" + "pop %%rbx\n" +#else + "xchgl %%ebx, %1\n" + "cpuid\n" + "xchgl %%ebx, %1\n" +#endif + : "=a" (CPUInfo[0]), "=r" (CPUInfo[1]), "=c" (CPUInfo[2]), "=d" (CPUInfo[3]) + : "a" (InfoType)); +#else + __asm__ __volatile__( + "cpuid" + : "=a" (CPUInfo[0]), "=b" (CPUInfo[1]), "=c" (CPUInfo[2]), "=d" (CPUInfo[3]) + : "a" (InfoType)); +#endif +} +#endif +static int ma_dr_mp3_have_simd(void) +{ +#ifdef MA_DR_MP3_ONLY_SIMD + return 1; +#else + static int g_have_simd; + int CPUInfo[4]; +#ifdef MINIMP3_TEST + static int g_counter; + if (g_counter++ > 100) + return 0; +#endif + if (g_have_simd) + goto end; + ma_dr_mp3_cpuid(CPUInfo, 0); + if (CPUInfo[0] > 0) + { + ma_dr_mp3_cpuid(CPUInfo, 1); + g_have_simd = (CPUInfo[3] & (1 << 26)) + 1; + return g_have_simd - 1; + } +end: + return g_have_simd - 1; +#endif +} +#elif defined(__ARM_NEON) || defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC) +#include +#define MA_DR_MP3_HAVE_SSE 0 +#define MA_DR_MP3_HAVE_SIMD 1 +#define MA_DR_MP3_VSTORE vst1q_f32 +#define MA_DR_MP3_VLD vld1q_f32 +#define MA_DR_MP3_VSET vmovq_n_f32 +#define MA_DR_MP3_VADD vaddq_f32 +#define MA_DR_MP3_VSUB vsubq_f32 +#define MA_DR_MP3_VMUL vmulq_f32 +#define MA_DR_MP3_VMAC(a, x, y) vmlaq_f32(a, x, y) +#define MA_DR_MP3_VMSB(a, x, y) vmlsq_f32(a, x, y) +#define MA_DR_MP3_VMUL_S(x, s) vmulq_f32(x, vmovq_n_f32(s)) +#define MA_DR_MP3_VREV(x) vcombine_f32(vget_high_f32(vrev64q_f32(x)), vget_low_f32(vrev64q_f32(x))) +typedef float32x4_t ma_dr_mp3_f4; +static int ma_dr_mp3_have_simd(void) +{ + return 1; +} +#else +#define MA_DR_MP3_HAVE_SSE 0 +#define MA_DR_MP3_HAVE_SIMD 0 +#ifdef MA_DR_MP3_ONLY_SIMD +#error MA_DR_MP3_ONLY_SIMD used, but SSE/NEON not enabled +#endif +#endif +#else +#define MA_DR_MP3_HAVE_SIMD 0 +#endif +#if defined(__ARM_ARCH) && (__ARM_ARCH >= 6) && !defined(__aarch64__) && !defined(_M_ARM64) && !defined(_M_ARM64EC) && !defined(__ARM_ARCH_6M__) +#define MA_DR_MP3_HAVE_ARMV6 1 +static __inline__ __attribute__((always_inline)) ma_int32 ma_dr_mp3_clip_int16_arm(ma_int32 a) +{ + ma_int32 x = 0; + __asm__ ("ssat %0, #16, %1" : "=r"(x) : "r"(a)); + return x; +} +#else +#define MA_DR_MP3_HAVE_ARMV6 0 +#endif +#ifndef MA_DR_MP3_ASSERT +#include +#define MA_DR_MP3_ASSERT(expression) assert(expression) +#endif +#ifndef MA_DR_MP3_COPY_MEMORY +#define MA_DR_MP3_COPY_MEMORY(dst, src, sz) memcpy((dst), (src), (sz)) +#endif +#ifndef MA_DR_MP3_MOVE_MEMORY +#define MA_DR_MP3_MOVE_MEMORY(dst, src, sz) memmove((dst), (src), (sz)) +#endif +#ifndef MA_DR_MP3_ZERO_MEMORY +#define MA_DR_MP3_ZERO_MEMORY(p, sz) memset((p), 0, (sz)) +#endif +#define MA_DR_MP3_ZERO_OBJECT(p) MA_DR_MP3_ZERO_MEMORY((p), sizeof(*(p))) +#ifndef MA_DR_MP3_MALLOC +#define MA_DR_MP3_MALLOC(sz) malloc((sz)) +#endif +#ifndef MA_DR_MP3_REALLOC +#define MA_DR_MP3_REALLOC(p, sz) realloc((p), (sz)) +#endif +#ifndef MA_DR_MP3_FREE +#define MA_DR_MP3_FREE(p) free((p)) +#endif +typedef struct +{ + const ma_uint8 *buf; + int pos, limit; +} ma_dr_mp3_bs; +typedef struct +{ + float scf[3*64]; + ma_uint8 total_bands, stereo_bands, bitalloc[64], scfcod[64]; +} ma_dr_mp3_L12_scale_info; +typedef struct +{ + ma_uint8 tab_offset, code_tab_width, band_count; +} ma_dr_mp3_L12_subband_alloc; +typedef struct +{ + const ma_uint8 *sfbtab; + ma_uint16 part_23_length, big_values, scalefac_compress; + ma_uint8 global_gain, block_type, mixed_block_flag, n_long_sfb, n_short_sfb; + ma_uint8 table_select[3], region_count[3], subblock_gain[3]; + ma_uint8 preflag, scalefac_scale, count1_table, scfsi; +} ma_dr_mp3_L3_gr_info; +typedef struct +{ + ma_dr_mp3_bs bs; + ma_uint8 maindata[MA_DR_MP3_MAX_BITRESERVOIR_BYTES + MA_DR_MP3_MAX_L3_FRAME_PAYLOAD_BYTES]; + ma_dr_mp3_L3_gr_info gr_info[4]; + float grbuf[2][576], scf[40], syn[18 + 15][2*32]; + ma_uint8 ist_pos[2][39]; +} ma_dr_mp3dec_scratch; +static void ma_dr_mp3_bs_init(ma_dr_mp3_bs *bs, const ma_uint8 *data, int bytes) +{ + bs->buf = data; + bs->pos = 0; + bs->limit = bytes*8; +} +static ma_uint32 ma_dr_mp3_bs_get_bits(ma_dr_mp3_bs *bs, int n) +{ + ma_uint32 next, cache = 0, s = bs->pos & 7; + int shl = n + s; + const ma_uint8 *p = bs->buf + (bs->pos >> 3); + if ((bs->pos += n) > bs->limit) + return 0; + next = *p++ & (255 >> s); + while ((shl -= 8) > 0) + { + cache |= next << shl; + next = *p++; + } + return cache | (next >> -shl); +} +static int ma_dr_mp3_hdr_valid(const ma_uint8 *h) +{ + return h[0] == 0xff && + ((h[1] & 0xF0) == 0xf0 || (h[1] & 0xFE) == 0xe2) && + (MA_DR_MP3_HDR_GET_LAYER(h) != 0) && + (MA_DR_MP3_HDR_GET_BITRATE(h) != 15) && + (MA_DR_MP3_HDR_GET_SAMPLE_RATE(h) != 3); +} +static int ma_dr_mp3_hdr_compare(const ma_uint8 *h1, const ma_uint8 *h2) +{ + return ma_dr_mp3_hdr_valid(h2) && + ((h1[1] ^ h2[1]) & 0xFE) == 0 && + ((h1[2] ^ h2[2]) & 0x0C) == 0 && + !(MA_DR_MP3_HDR_IS_FREE_FORMAT(h1) ^ MA_DR_MP3_HDR_IS_FREE_FORMAT(h2)); +} +static unsigned ma_dr_mp3_hdr_bitrate_kbps(const ma_uint8 *h) +{ + static const ma_uint8 halfrate[2][3][15] = { + { { 0,4,8,12,16,20,24,28,32,40,48,56,64,72,80 }, { 0,4,8,12,16,20,24,28,32,40,48,56,64,72,80 }, { 0,16,24,28,32,40,48,56,64,72,80,88,96,112,128 } }, + { { 0,16,20,24,28,32,40,48,56,64,80,96,112,128,160 }, { 0,16,24,28,32,40,48,56,64,80,96,112,128,160,192 }, { 0,16,32,48,64,80,96,112,128,144,160,176,192,208,224 } }, + }; + return 2*halfrate[!!MA_DR_MP3_HDR_TEST_MPEG1(h)][MA_DR_MP3_HDR_GET_LAYER(h) - 1][MA_DR_MP3_HDR_GET_BITRATE(h)]; +} +static unsigned ma_dr_mp3_hdr_sample_rate_hz(const ma_uint8 *h) +{ + static const unsigned g_hz[3] = { 44100, 48000, 32000 }; + return g_hz[MA_DR_MP3_HDR_GET_SAMPLE_RATE(h)] >> (int)!MA_DR_MP3_HDR_TEST_MPEG1(h) >> (int)!MA_DR_MP3_HDR_TEST_NOT_MPEG25(h); +} +static unsigned ma_dr_mp3_hdr_frame_samples(const ma_uint8 *h) +{ + return MA_DR_MP3_HDR_IS_LAYER_1(h) ? 384 : (1152 >> (int)MA_DR_MP3_HDR_IS_FRAME_576(h)); +} +static int ma_dr_mp3_hdr_frame_bytes(const ma_uint8 *h, int free_format_size) +{ + int frame_bytes = ma_dr_mp3_hdr_frame_samples(h)*ma_dr_mp3_hdr_bitrate_kbps(h)*125/ma_dr_mp3_hdr_sample_rate_hz(h); + if (MA_DR_MP3_HDR_IS_LAYER_1(h)) + { + frame_bytes &= ~3; + } + return frame_bytes ? frame_bytes : free_format_size; +} +static int ma_dr_mp3_hdr_padding(const ma_uint8 *h) +{ + return MA_DR_MP3_HDR_TEST_PADDING(h) ? (MA_DR_MP3_HDR_IS_LAYER_1(h) ? 4 : 1) : 0; +} +#ifndef MA_DR_MP3_ONLY_MP3 +static const ma_dr_mp3_L12_subband_alloc *ma_dr_mp3_L12_subband_alloc_table(const ma_uint8 *hdr, ma_dr_mp3_L12_scale_info *sci) +{ + const ma_dr_mp3_L12_subband_alloc *alloc; + int mode = MA_DR_MP3_HDR_GET_STEREO_MODE(hdr); + int nbands, stereo_bands = (mode == MA_DR_MP3_MODE_MONO) ? 0 : (mode == MA_DR_MP3_MODE_JOINT_STEREO) ? (MA_DR_MP3_HDR_GET_STEREO_MODE_EXT(hdr) << 2) + 4 : 32; + if (MA_DR_MP3_HDR_IS_LAYER_1(hdr)) + { + static const ma_dr_mp3_L12_subband_alloc g_alloc_L1[] = { { 76, 4, 32 } }; + alloc = g_alloc_L1; + nbands = 32; + } else if (!MA_DR_MP3_HDR_TEST_MPEG1(hdr)) + { + static const ma_dr_mp3_L12_subband_alloc g_alloc_L2M2[] = { { 60, 4, 4 }, { 44, 3, 7 }, { 44, 2, 19 } }; + alloc = g_alloc_L2M2; + nbands = 30; + } else + { + static const ma_dr_mp3_L12_subband_alloc g_alloc_L2M1[] = { { 0, 4, 3 }, { 16, 4, 8 }, { 32, 3, 12 }, { 40, 2, 7 } }; + int sample_rate_idx = MA_DR_MP3_HDR_GET_SAMPLE_RATE(hdr); + unsigned kbps = ma_dr_mp3_hdr_bitrate_kbps(hdr) >> (int)(mode != MA_DR_MP3_MODE_MONO); + if (!kbps) + { + kbps = 192; + } + alloc = g_alloc_L2M1; + nbands = 27; + if (kbps < 56) + { + static const ma_dr_mp3_L12_subband_alloc g_alloc_L2M1_lowrate[] = { { 44, 4, 2 }, { 44, 3, 10 } }; + alloc = g_alloc_L2M1_lowrate; + nbands = sample_rate_idx == 2 ? 12 : 8; + } else if (kbps >= 96 && sample_rate_idx != 1) + { + nbands = 30; + } + } + sci->total_bands = (ma_uint8)nbands; + sci->stereo_bands = (ma_uint8)MA_DR_MP3_MIN(stereo_bands, nbands); + return alloc; +} +static void ma_dr_mp3_L12_read_scalefactors(ma_dr_mp3_bs *bs, ma_uint8 *pba, ma_uint8 *scfcod, int bands, float *scf) +{ + static const float g_deq_L12[18*3] = { +#define MA_DR_MP3_DQ(x) 9.53674316e-07f/x, 7.56931807e-07f/x, 6.00777173e-07f/x + MA_DR_MP3_DQ(3),MA_DR_MP3_DQ(7),MA_DR_MP3_DQ(15),MA_DR_MP3_DQ(31),MA_DR_MP3_DQ(63),MA_DR_MP3_DQ(127),MA_DR_MP3_DQ(255),MA_DR_MP3_DQ(511),MA_DR_MP3_DQ(1023),MA_DR_MP3_DQ(2047),MA_DR_MP3_DQ(4095),MA_DR_MP3_DQ(8191),MA_DR_MP3_DQ(16383),MA_DR_MP3_DQ(32767),MA_DR_MP3_DQ(65535),MA_DR_MP3_DQ(3),MA_DR_MP3_DQ(5),MA_DR_MP3_DQ(9) + }; + int i, m; + for (i = 0; i < bands; i++) + { + float s = 0; + int ba = *pba++; + int mask = ba ? 4 + ((19 >> scfcod[i]) & 3) : 0; + for (m = 4; m; m >>= 1) + { + if (mask & m) + { + int b = ma_dr_mp3_bs_get_bits(bs, 6); + s = g_deq_L12[ba*3 - 6 + b % 3]*(int)(1 << 21 >> b/3); + } + *scf++ = s; + } + } +} +static void ma_dr_mp3_L12_read_scale_info(const ma_uint8 *hdr, ma_dr_mp3_bs *bs, ma_dr_mp3_L12_scale_info *sci) +{ + static const ma_uint8 g_bitalloc_code_tab[] = { + 0,17, 3, 4, 5,6,7, 8,9,10,11,12,13,14,15,16, + 0,17,18, 3,19,4,5, 6,7, 8, 9,10,11,12,13,16, + 0,17,18, 3,19,4,5,16, + 0,17,18,16, + 0,17,18,19, 4,5,6, 7,8, 9,10,11,12,13,14,15, + 0,17,18, 3,19,4,5, 6,7, 8, 9,10,11,12,13,14, + 0, 2, 3, 4, 5,6,7, 8,9,10,11,12,13,14,15,16 + }; + const ma_dr_mp3_L12_subband_alloc *subband_alloc = ma_dr_mp3_L12_subband_alloc_table(hdr, sci); + int i, k = 0, ba_bits = 0; + const ma_uint8 *ba_code_tab = g_bitalloc_code_tab; + for (i = 0; i < sci->total_bands; i++) + { + ma_uint8 ba; + if (i == k) + { + k += subband_alloc->band_count; + ba_bits = subband_alloc->code_tab_width; + ba_code_tab = g_bitalloc_code_tab + subband_alloc->tab_offset; + subband_alloc++; + } + ba = ba_code_tab[ma_dr_mp3_bs_get_bits(bs, ba_bits)]; + sci->bitalloc[2*i] = ba; + if (i < sci->stereo_bands) + { + ba = ba_code_tab[ma_dr_mp3_bs_get_bits(bs, ba_bits)]; + } + sci->bitalloc[2*i + 1] = sci->stereo_bands ? ba : 0; + } + for (i = 0; i < 2*sci->total_bands; i++) + { + sci->scfcod[i] = (ma_uint8)(sci->bitalloc[i] ? MA_DR_MP3_HDR_IS_LAYER_1(hdr) ? 2 : ma_dr_mp3_bs_get_bits(bs, 2) : 6); + } + ma_dr_mp3_L12_read_scalefactors(bs, sci->bitalloc, sci->scfcod, sci->total_bands*2, sci->scf); + for (i = sci->stereo_bands; i < sci->total_bands; i++) + { + sci->bitalloc[2*i + 1] = 0; + } +} +static int ma_dr_mp3_L12_dequantize_granule(float *grbuf, ma_dr_mp3_bs *bs, ma_dr_mp3_L12_scale_info *sci, int group_size) +{ + int i, j, k, choff = 576; + for (j = 0; j < 4; j++) + { + float *dst = grbuf + group_size*j; + for (i = 0; i < 2*sci->total_bands; i++) + { + int ba = sci->bitalloc[i]; + if (ba != 0) + { + if (ba < 17) + { + int half = (1 << (ba - 1)) - 1; + for (k = 0; k < group_size; k++) + { + dst[k] = (float)((int)ma_dr_mp3_bs_get_bits(bs, ba) - half); + } + } else + { + unsigned mod = (2 << (ba - 17)) + 1; + unsigned code = ma_dr_mp3_bs_get_bits(bs, mod + 2 - (mod >> 3)); + for (k = 0; k < group_size; k++, code /= mod) + { + dst[k] = (float)((int)(code % mod - mod/2)); + } + } + } + dst += choff; + choff = 18 - choff; + } + } + return group_size*4; +} +static void ma_dr_mp3_L12_apply_scf_384(ma_dr_mp3_L12_scale_info *sci, const float *scf, float *dst) +{ + int i, k; + MA_DR_MP3_COPY_MEMORY(dst + 576 + sci->stereo_bands*18, dst + sci->stereo_bands*18, (sci->total_bands - sci->stereo_bands)*18*sizeof(float)); + for (i = 0; i < sci->total_bands; i++, dst += 18, scf += 6) + { + for (k = 0; k < 12; k++) + { + dst[k + 0] *= scf[0]; + dst[k + 576] *= scf[3]; + } + } +} +#endif +static int ma_dr_mp3_L3_read_side_info(ma_dr_mp3_bs *bs, ma_dr_mp3_L3_gr_info *gr, const ma_uint8 *hdr) +{ + static const ma_uint8 g_scf_long[8][23] = { + { 6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54,0 }, + { 12,12,12,12,12,12,16,20,24,28,32,40,48,56,64,76,90,2,2,2,2,2,0 }, + { 6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54,0 }, + { 6,6,6,6,6,6,8,10,12,14,16,18,22,26,32,38,46,54,62,70,76,36,0 }, + { 6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54,0 }, + { 4,4,4,4,4,4,6,6,8,8,10,12,16,20,24,28,34,42,50,54,76,158,0 }, + { 4,4,4,4,4,4,6,6,6,8,10,12,16,18,22,28,34,40,46,54,54,192,0 }, + { 4,4,4,4,4,4,6,6,8,10,12,16,20,24,30,38,46,56,68,84,102,26,0 } + }; + static const ma_uint8 g_scf_short[8][40] = { + { 4,4,4,4,4,4,4,4,4,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,24,24,24,30,30,30,40,40,40,18,18,18,0 }, + { 8,8,8,8,8,8,8,8,8,12,12,12,16,16,16,20,20,20,24,24,24,28,28,28,36,36,36,2,2,2,2,2,2,2,2,2,26,26,26,0 }, + { 4,4,4,4,4,4,4,4,4,6,6,6,6,6,6,8,8,8,10,10,10,14,14,14,18,18,18,26,26,26,32,32,32,42,42,42,18,18,18,0 }, + { 4,4,4,4,4,4,4,4,4,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,24,24,24,32,32,32,44,44,44,12,12,12,0 }, + { 4,4,4,4,4,4,4,4,4,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,24,24,24,30,30,30,40,40,40,18,18,18,0 }, + { 4,4,4,4,4,4,4,4,4,4,4,4,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,22,22,22,30,30,30,56,56,56,0 }, + { 4,4,4,4,4,4,4,4,4,4,4,4,6,6,6,6,6,6,10,10,10,12,12,12,14,14,14,16,16,16,20,20,20,26,26,26,66,66,66,0 }, + { 4,4,4,4,4,4,4,4,4,4,4,4,6,6,6,8,8,8,12,12,12,16,16,16,20,20,20,26,26,26,34,34,34,42,42,42,12,12,12,0 } + }; + static const ma_uint8 g_scf_mixed[8][40] = { + { 6,6,6,6,6,6,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,24,24,24,30,30,30,40,40,40,18,18,18,0 }, + { 12,12,12,4,4,4,8,8,8,12,12,12,16,16,16,20,20,20,24,24,24,28,28,28,36,36,36,2,2,2,2,2,2,2,2,2,26,26,26,0 }, + { 6,6,6,6,6,6,6,6,6,6,6,6,8,8,8,10,10,10,14,14,14,18,18,18,26,26,26,32,32,32,42,42,42,18,18,18,0 }, + { 6,6,6,6,6,6,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,24,24,24,32,32,32,44,44,44,12,12,12,0 }, + { 6,6,6,6,6,6,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,24,24,24,30,30,30,40,40,40,18,18,18,0 }, + { 4,4,4,4,4,4,6,6,4,4,4,6,6,6,8,8,8,10,10,10,12,12,12,14,14,14,18,18,18,22,22,22,30,30,30,56,56,56,0 }, + { 4,4,4,4,4,4,6,6,4,4,4,6,6,6,6,6,6,10,10,10,12,12,12,14,14,14,16,16,16,20,20,20,26,26,26,66,66,66,0 }, + { 4,4,4,4,4,4,6,6,4,4,4,6,6,6,8,8,8,12,12,12,16,16,16,20,20,20,26,26,26,34,34,34,42,42,42,12,12,12,0 } + }; + unsigned tables, scfsi = 0; + int main_data_begin, part_23_sum = 0; + int gr_count = MA_DR_MP3_HDR_IS_MONO(hdr) ? 1 : 2; + int sr_idx = MA_DR_MP3_HDR_GET_MY_SAMPLE_RATE(hdr); sr_idx -= (sr_idx != 0); + if (MA_DR_MP3_HDR_TEST_MPEG1(hdr)) + { + gr_count *= 2; + main_data_begin = ma_dr_mp3_bs_get_bits(bs, 9); + scfsi = ma_dr_mp3_bs_get_bits(bs, 7 + gr_count); + } else + { + main_data_begin = ma_dr_mp3_bs_get_bits(bs, 8 + gr_count) >> gr_count; + } + do + { + if (MA_DR_MP3_HDR_IS_MONO(hdr)) + { + scfsi <<= 4; + } + gr->part_23_length = (ma_uint16)ma_dr_mp3_bs_get_bits(bs, 12); + part_23_sum += gr->part_23_length; + gr->big_values = (ma_uint16)ma_dr_mp3_bs_get_bits(bs, 9); + if (gr->big_values > 288) + { + return -1; + } + gr->global_gain = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 8); + gr->scalefac_compress = (ma_uint16)ma_dr_mp3_bs_get_bits(bs, MA_DR_MP3_HDR_TEST_MPEG1(hdr) ? 4 : 9); + gr->sfbtab = g_scf_long[sr_idx]; + gr->n_long_sfb = 22; + gr->n_short_sfb = 0; + if (ma_dr_mp3_bs_get_bits(bs, 1)) + { + gr->block_type = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 2); + if (!gr->block_type) + { + return -1; + } + gr->mixed_block_flag = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 1); + gr->region_count[0] = 7; + gr->region_count[1] = 255; + if (gr->block_type == MA_DR_MP3_SHORT_BLOCK_TYPE) + { + scfsi &= 0x0F0F; + if (!gr->mixed_block_flag) + { + gr->region_count[0] = 8; + gr->sfbtab = g_scf_short[sr_idx]; + gr->n_long_sfb = 0; + gr->n_short_sfb = 39; + } else + { + gr->sfbtab = g_scf_mixed[sr_idx]; + gr->n_long_sfb = MA_DR_MP3_HDR_TEST_MPEG1(hdr) ? 8 : 6; + gr->n_short_sfb = 30; + } + } + tables = ma_dr_mp3_bs_get_bits(bs, 10); + tables <<= 5; + gr->subblock_gain[0] = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 3); + gr->subblock_gain[1] = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 3); + gr->subblock_gain[2] = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 3); + } else + { + gr->block_type = 0; + gr->mixed_block_flag = 0; + tables = ma_dr_mp3_bs_get_bits(bs, 15); + gr->region_count[0] = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 4); + gr->region_count[1] = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 3); + gr->region_count[2] = 255; + } + gr->table_select[0] = (ma_uint8)(tables >> 10); + gr->table_select[1] = (ma_uint8)((tables >> 5) & 31); + gr->table_select[2] = (ma_uint8)((tables) & 31); + gr->preflag = (ma_uint8)(MA_DR_MP3_HDR_TEST_MPEG1(hdr) ? ma_dr_mp3_bs_get_bits(bs, 1) : (gr->scalefac_compress >= 500)); + gr->scalefac_scale = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 1); + gr->count1_table = (ma_uint8)ma_dr_mp3_bs_get_bits(bs, 1); + gr->scfsi = (ma_uint8)((scfsi >> 12) & 15); + scfsi <<= 4; + gr++; + } while(--gr_count); + if (part_23_sum + bs->pos > bs->limit + main_data_begin*8) + { + return -1; + } + return main_data_begin; +} +static void ma_dr_mp3_L3_read_scalefactors(ma_uint8 *scf, ma_uint8 *ist_pos, const ma_uint8 *scf_size, const ma_uint8 *scf_count, ma_dr_mp3_bs *bitbuf, int scfsi) +{ + int i, k; + for (i = 0; i < 4 && scf_count[i]; i++, scfsi *= 2) + { + int cnt = scf_count[i]; + if (scfsi & 8) + { + MA_DR_MP3_COPY_MEMORY(scf, ist_pos, cnt); + } else + { + int bits = scf_size[i]; + if (!bits) + { + MA_DR_MP3_ZERO_MEMORY(scf, cnt); + MA_DR_MP3_ZERO_MEMORY(ist_pos, cnt); + } else + { + int max_scf = (scfsi < 0) ? (1 << bits) - 1 : -1; + for (k = 0; k < cnt; k++) + { + int s = ma_dr_mp3_bs_get_bits(bitbuf, bits); + ist_pos[k] = (ma_uint8)(s == max_scf ? -1 : s); + scf[k] = (ma_uint8)s; + } + } + } + ist_pos += cnt; + scf += cnt; + } + scf[0] = scf[1] = scf[2] = 0; +} +static float ma_dr_mp3_L3_ldexp_q2(float y, int exp_q2) +{ + static const float g_expfrac[4] = { 9.31322575e-10f,7.83145814e-10f,6.58544508e-10f,5.53767716e-10f }; + int e; + do + { + e = MA_DR_MP3_MIN(30*4, exp_q2); + y *= g_expfrac[e & 3]*(1 << 30 >> (e >> 2)); + } while ((exp_q2 -= e) > 0); + return y; +} +static void ma_dr_mp3_L3_decode_scalefactors(const ma_uint8 *hdr, ma_uint8 *ist_pos, ma_dr_mp3_bs *bs, const ma_dr_mp3_L3_gr_info *gr, float *scf, int ch) +{ + static const ma_uint8 g_scf_partitions[3][28] = { + { 6,5,5, 5,6,5,5,5,6,5, 7,3,11,10,0,0, 7, 7, 7,0, 6, 6,6,3, 8, 8,5,0 }, + { 8,9,6,12,6,9,9,9,6,9,12,6,15,18,0,0, 6,15,12,0, 6,12,9,6, 6,18,9,0 }, + { 9,9,6,12,9,9,9,9,9,9,12,6,18,18,0,0,12,12,12,0,12, 9,9,6,15,12,9,0 } + }; + const ma_uint8 *scf_partition = g_scf_partitions[!!gr->n_short_sfb + !gr->n_long_sfb]; + ma_uint8 scf_size[4], iscf[40]; + int i, scf_shift = gr->scalefac_scale + 1, gain_exp, scfsi = gr->scfsi; + float gain; + if (MA_DR_MP3_HDR_TEST_MPEG1(hdr)) + { + static const ma_uint8 g_scfc_decode[16] = { 0,1,2,3, 12,5,6,7, 9,10,11,13, 14,15,18,19 }; + int part = g_scfc_decode[gr->scalefac_compress]; + scf_size[1] = scf_size[0] = (ma_uint8)(part >> 2); + scf_size[3] = scf_size[2] = (ma_uint8)(part & 3); + } else + { + static const ma_uint8 g_mod[6*4] = { 5,5,4,4,5,5,4,1,4,3,1,1,5,6,6,1,4,4,4,1,4,3,1,1 }; + int k, modprod, sfc, ist = MA_DR_MP3_HDR_TEST_I_STEREO(hdr) && ch; + sfc = gr->scalefac_compress >> ist; + for (k = ist*3*4; sfc >= 0; sfc -= modprod, k += 4) + { + for (modprod = 1, i = 3; i >= 0; i--) + { + scf_size[i] = (ma_uint8)(sfc / modprod % g_mod[k + i]); + modprod *= g_mod[k + i]; + } + } + scf_partition += k; + scfsi = -16; + } + ma_dr_mp3_L3_read_scalefactors(iscf, ist_pos, scf_size, scf_partition, bs, scfsi); + if (gr->n_short_sfb) + { + int sh = 3 - scf_shift; + for (i = 0; i < gr->n_short_sfb; i += 3) + { + iscf[gr->n_long_sfb + i + 0] = (ma_uint8)(iscf[gr->n_long_sfb + i + 0] + (gr->subblock_gain[0] << sh)); + iscf[gr->n_long_sfb + i + 1] = (ma_uint8)(iscf[gr->n_long_sfb + i + 1] + (gr->subblock_gain[1] << sh)); + iscf[gr->n_long_sfb + i + 2] = (ma_uint8)(iscf[gr->n_long_sfb + i + 2] + (gr->subblock_gain[2] << sh)); + } + } else if (gr->preflag) + { + static const ma_uint8 g_preamp[10] = { 1,1,1,1,2,2,3,3,3,2 }; + for (i = 0; i < 10; i++) + { + iscf[11 + i] = (ma_uint8)(iscf[11 + i] + g_preamp[i]); + } + } + gain_exp = gr->global_gain + MA_DR_MP3_BITS_DEQUANTIZER_OUT*4 - 210 - (MA_DR_MP3_HDR_IS_MS_STEREO(hdr) ? 2 : 0); + gain = ma_dr_mp3_L3_ldexp_q2(1 << (MA_DR_MP3_MAX_SCFI/4), MA_DR_MP3_MAX_SCFI - gain_exp); + for (i = 0; i < (int)(gr->n_long_sfb + gr->n_short_sfb); i++) + { + scf[i] = ma_dr_mp3_L3_ldexp_q2(gain, iscf[i] << scf_shift); + } +} +static const float g_ma_dr_mp3_pow43[129 + 16] = { + 0,-1,-2.519842f,-4.326749f,-6.349604f,-8.549880f,-10.902724f,-13.390518f,-16.000000f,-18.720754f,-21.544347f,-24.463781f,-27.473142f,-30.567351f,-33.741992f,-36.993181f, + 0,1,2.519842f,4.326749f,6.349604f,8.549880f,10.902724f,13.390518f,16.000000f,18.720754f,21.544347f,24.463781f,27.473142f,30.567351f,33.741992f,36.993181f,40.317474f,43.711787f,47.173345f,50.699631f,54.288352f,57.937408f,61.644865f,65.408941f,69.227979f,73.100443f,77.024898f,81.000000f,85.024491f,89.097188f,93.216975f,97.382800f,101.593667f,105.848633f,110.146801f,114.487321f,118.869381f,123.292209f,127.755065f,132.257246f,136.798076f,141.376907f,145.993119f,150.646117f,155.335327f,160.060199f,164.820202f,169.614826f,174.443577f,179.305980f,184.201575f,189.129918f,194.090580f,199.083145f,204.107210f,209.162385f,214.248292f,219.364564f,224.510845f,229.686789f,234.892058f,240.126328f,245.389280f,250.680604f,256.000000f,261.347174f,266.721841f,272.123723f,277.552547f,283.008049f,288.489971f,293.998060f,299.532071f,305.091761f,310.676898f,316.287249f,321.922592f,327.582707f,333.267377f,338.976394f,344.709550f,350.466646f,356.247482f,362.051866f,367.879608f,373.730522f,379.604427f,385.501143f,391.420496f,397.362314f,403.326427f,409.312672f,415.320884f,421.350905f,427.402579f,433.475750f,439.570269f,445.685987f,451.822757f,457.980436f,464.158883f,470.357960f,476.577530f,482.817459f,489.077615f,495.357868f,501.658090f,507.978156f,514.317941f,520.677324f,527.056184f,533.454404f,539.871867f,546.308458f,552.764065f,559.238575f,565.731879f,572.243870f,578.774440f,585.323483f,591.890898f,598.476581f,605.080431f,611.702349f,618.342238f,625.000000f,631.675540f,638.368763f,645.079578f +}; +static float ma_dr_mp3_L3_pow_43(int x) +{ + float frac; + int sign, mult = 256; + if (x < 129) + { + return g_ma_dr_mp3_pow43[16 + x]; + } + if (x < 1024) + { + mult = 16; + x <<= 3; + } + sign = 2*x & 64; + frac = (float)((x & 63) - sign) / ((x & ~63) + sign); + return g_ma_dr_mp3_pow43[16 + ((x + sign) >> 6)]*(1.f + frac*((4.f/3) + frac*(2.f/9)))*mult; +} +static void ma_dr_mp3_L3_huffman(float *dst, ma_dr_mp3_bs *bs, const ma_dr_mp3_L3_gr_info *gr_info, const float *scf, int layer3gr_limit) +{ + static const ma_int16 tabs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 785,785,785,785,784,784,784,784,513,513,513,513,513,513,513,513,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256, + -255,1313,1298,1282,785,785,785,785,784,784,784,784,769,769,769,769,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,290,288, + -255,1313,1298,1282,769,769,769,769,529,529,529,529,529,529,529,529,528,528,528,528,528,528,528,528,512,512,512,512,512,512,512,512,290,288, + -253,-318,-351,-367,785,785,785,785,784,784,784,784,769,769,769,769,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,819,818,547,547,275,275,275,275,561,560,515,546,289,274,288,258, + -254,-287,1329,1299,1314,1312,1057,1057,1042,1042,1026,1026,784,784,784,784,529,529,529,529,529,529,529,529,769,769,769,769,768,768,768,768,563,560,306,306,291,259, + -252,-413,-477,-542,1298,-575,1041,1041,784,784,784,784,769,769,769,769,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,-383,-399,1107,1092,1106,1061,849,849,789,789,1104,1091,773,773,1076,1075,341,340,325,309,834,804,577,577,532,532,516,516,832,818,803,816,561,561,531,531,515,546,289,289,288,258, + -252,-429,-493,-559,1057,1057,1042,1042,529,529,529,529,529,529,529,529,784,784,784,784,769,769,769,769,512,512,512,512,512,512,512,512,-382,1077,-415,1106,1061,1104,849,849,789,789,1091,1076,1029,1075,834,834,597,581,340,340,339,324,804,833,532,532,832,772,818,803,817,787,816,771,290,290,290,290,288,258, + -253,-349,-414,-447,-463,1329,1299,-479,1314,1312,1057,1057,1042,1042,1026,1026,785,785,785,785,784,784,784,784,769,769,769,769,768,768,768,768,-319,851,821,-335,836,850,805,849,341,340,325,336,533,533,579,579,564,564,773,832,578,548,563,516,321,276,306,291,304,259, + -251,-572,-733,-830,-863,-879,1041,1041,784,784,784,784,769,769,769,769,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,-511,-527,-543,1396,1351,1381,1366,1395,1335,1380,-559,1334,1138,1138,1063,1063,1350,1392,1031,1031,1062,1062,1364,1363,1120,1120,1333,1348,881,881,881,881,375,374,359,373,343,358,341,325,791,791,1123,1122,-703,1105,1045,-719,865,865,790,790,774,774,1104,1029,338,293,323,308,-799,-815,833,788,772,818,803,816,322,292,307,320,561,531,515,546,289,274,288,258, + -251,-525,-605,-685,-765,-831,-846,1298,1057,1057,1312,1282,785,785,785,785,784,784,784,784,769,769,769,769,512,512,512,512,512,512,512,512,1399,1398,1383,1367,1382,1396,1351,-511,1381,1366,1139,1139,1079,1079,1124,1124,1364,1349,1363,1333,882,882,882,882,807,807,807,807,1094,1094,1136,1136,373,341,535,535,881,775,867,822,774,-591,324,338,-671,849,550,550,866,864,609,609,293,336,534,534,789,835,773,-751,834,804,308,307,833,788,832,772,562,562,547,547,305,275,560,515,290,290, + -252,-397,-477,-557,-622,-653,-719,-735,-750,1329,1299,1314,1057,1057,1042,1042,1312,1282,1024,1024,785,785,785,785,784,784,784,784,769,769,769,769,-383,1127,1141,1111,1126,1140,1095,1110,869,869,883,883,1079,1109,882,882,375,374,807,868,838,881,791,-463,867,822,368,263,852,837,836,-543,610,610,550,550,352,336,534,534,865,774,851,821,850,805,593,533,579,564,773,832,578,578,548,548,577,577,307,276,306,291,516,560,259,259, + -250,-2107,-2507,-2764,-2909,-2974,-3007,-3023,1041,1041,1040,1040,769,769,769,769,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,-767,-1052,-1213,-1277,-1358,-1405,-1469,-1535,-1550,-1582,-1614,-1647,-1662,-1694,-1726,-1759,-1774,-1807,-1822,-1854,-1886,1565,-1919,-1935,-1951,-1967,1731,1730,1580,1717,-1983,1729,1564,-1999,1548,-2015,-2031,1715,1595,-2047,1714,-2063,1610,-2079,1609,-2095,1323,1323,1457,1457,1307,1307,1712,1547,1641,1700,1699,1594,1685,1625,1442,1442,1322,1322,-780,-973,-910,1279,1278,1277,1262,1276,1261,1275,1215,1260,1229,-959,974,974,989,989,-943,735,478,478,495,463,506,414,-1039,1003,958,1017,927,942,987,957,431,476,1272,1167,1228,-1183,1256,-1199,895,895,941,941,1242,1227,1212,1135,1014,1014,490,489,503,487,910,1013,985,925,863,894,970,955,1012,847,-1343,831,755,755,984,909,428,366,754,559,-1391,752,486,457,924,997,698,698,983,893,740,740,908,877,739,739,667,667,953,938,497,287,271,271,683,606,590,712,726,574,302,302,738,736,481,286,526,725,605,711,636,724,696,651,589,681,666,710,364,467,573,695,466,466,301,465,379,379,709,604,665,679,316,316,634,633,436,436,464,269,424,394,452,332,438,363,347,408,393,448,331,422,362,407,392,421,346,406,391,376,375,359,1441,1306,-2367,1290,-2383,1337,-2399,-2415,1426,1321,-2431,1411,1336,-2447,-2463,-2479,1169,1169,1049,1049,1424,1289,1412,1352,1319,-2495,1154,1154,1064,1064,1153,1153,416,390,360,404,403,389,344,374,373,343,358,372,327,357,342,311,356,326,1395,1394,1137,1137,1047,1047,1365,1392,1287,1379,1334,1364,1349,1378,1318,1363,792,792,792,792,1152,1152,1032,1032,1121,1121,1046,1046,1120,1120,1030,1030,-2895,1106,1061,1104,849,849,789,789,1091,1076,1029,1090,1060,1075,833,833,309,324,532,532,832,772,818,803,561,561,531,560,515,546,289,274,288,258, + -250,-1179,-1579,-1836,-1996,-2124,-2253,-2333,-2413,-2477,-2542,-2574,-2607,-2622,-2655,1314,1313,1298,1312,1282,785,785,785,785,1040,1040,1025,1025,768,768,768,768,-766,-798,-830,-862,-895,-911,-927,-943,-959,-975,-991,-1007,-1023,-1039,-1055,-1070,1724,1647,-1103,-1119,1631,1767,1662,1738,1708,1723,-1135,1780,1615,1779,1599,1677,1646,1778,1583,-1151,1777,1567,1737,1692,1765,1722,1707,1630,1751,1661,1764,1614,1736,1676,1763,1750,1645,1598,1721,1691,1762,1706,1582,1761,1566,-1167,1749,1629,767,766,751,765,494,494,735,764,719,749,734,763,447,447,748,718,477,506,431,491,446,476,461,505,415,430,475,445,504,399,460,489,414,503,383,474,429,459,502,502,746,752,488,398,501,473,413,472,486,271,480,270,-1439,-1455,1357,-1471,-1487,-1503,1341,1325,-1519,1489,1463,1403,1309,-1535,1372,1448,1418,1476,1356,1462,1387,-1551,1475,1340,1447,1402,1386,-1567,1068,1068,1474,1461,455,380,468,440,395,425,410,454,364,467,466,464,453,269,409,448,268,432,1371,1473,1432,1417,1308,1460,1355,1446,1459,1431,1083,1083,1401,1416,1458,1445,1067,1067,1370,1457,1051,1051,1291,1430,1385,1444,1354,1415,1400,1443,1082,1082,1173,1113,1186,1066,1185,1050,-1967,1158,1128,1172,1097,1171,1081,-1983,1157,1112,416,266,375,400,1170,1142,1127,1065,793,793,1169,1033,1156,1096,1141,1111,1155,1080,1126,1140,898,898,808,808,897,897,792,792,1095,1152,1032,1125,1110,1139,1079,1124,882,807,838,881,853,791,-2319,867,368,263,822,852,837,866,806,865,-2399,851,352,262,534,534,821,836,594,594,549,549,593,593,533,533,848,773,579,579,564,578,548,563,276,276,577,576,306,291,516,560,305,305,275,259, + -251,-892,-2058,-2620,-2828,-2957,-3023,-3039,1041,1041,1040,1040,769,769,769,769,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,-511,-527,-543,-559,1530,-575,-591,1528,1527,1407,1526,1391,1023,1023,1023,1023,1525,1375,1268,1268,1103,1103,1087,1087,1039,1039,1523,-604,815,815,815,815,510,495,509,479,508,463,507,447,431,505,415,399,-734,-782,1262,-815,1259,1244,-831,1258,1228,-847,-863,1196,-879,1253,987,987,748,-767,493,493,462,477,414,414,686,669,478,446,461,445,474,429,487,458,412,471,1266,1264,1009,1009,799,799,-1019,-1276,-1452,-1581,-1677,-1757,-1821,-1886,-1933,-1997,1257,1257,1483,1468,1512,1422,1497,1406,1467,1496,1421,1510,1134,1134,1225,1225,1466,1451,1374,1405,1252,1252,1358,1480,1164,1164,1251,1251,1238,1238,1389,1465,-1407,1054,1101,-1423,1207,-1439,830,830,1248,1038,1237,1117,1223,1148,1236,1208,411,426,395,410,379,269,1193,1222,1132,1235,1221,1116,976,976,1192,1162,1177,1220,1131,1191,963,963,-1647,961,780,-1663,558,558,994,993,437,408,393,407,829,978,813,797,947,-1743,721,721,377,392,844,950,828,890,706,706,812,859,796,960,948,843,934,874,571,571,-1919,690,555,689,421,346,539,539,944,779,918,873,932,842,903,888,570,570,931,917,674,674,-2575,1562,-2591,1609,-2607,1654,1322,1322,1441,1441,1696,1546,1683,1593,1669,1624,1426,1426,1321,1321,1639,1680,1425,1425,1305,1305,1545,1668,1608,1623,1667,1592,1638,1666,1320,1320,1652,1607,1409,1409,1304,1304,1288,1288,1664,1637,1395,1395,1335,1335,1622,1636,1394,1394,1319,1319,1606,1621,1392,1392,1137,1137,1137,1137,345,390,360,375,404,373,1047,-2751,-2767,-2783,1062,1121,1046,-2799,1077,-2815,1106,1061,789,789,1105,1104,263,355,310,340,325,354,352,262,339,324,1091,1076,1029,1090,1060,1075,833,833,788,788,1088,1028,818,818,803,803,561,561,531,531,816,771,546,546,289,274,288,258, + -253,-317,-381,-446,-478,-509,1279,1279,-811,-1179,-1451,-1756,-1900,-2028,-2189,-2253,-2333,-2414,-2445,-2511,-2526,1313,1298,-2559,1041,1041,1040,1040,1025,1025,1024,1024,1022,1007,1021,991,1020,975,1019,959,687,687,1018,1017,671,671,655,655,1016,1015,639,639,758,758,623,623,757,607,756,591,755,575,754,559,543,543,1009,783,-575,-621,-685,-749,496,-590,750,749,734,748,974,989,1003,958,988,973,1002,942,987,957,972,1001,926,986,941,971,956,1000,910,985,925,999,894,970,-1071,-1087,-1102,1390,-1135,1436,1509,1451,1374,-1151,1405,1358,1480,1420,-1167,1507,1494,1389,1342,1465,1435,1450,1326,1505,1310,1493,1373,1479,1404,1492,1464,1419,428,443,472,397,736,526,464,464,486,457,442,471,484,482,1357,1449,1434,1478,1388,1491,1341,1490,1325,1489,1463,1403,1309,1477,1372,1448,1418,1433,1476,1356,1462,1387,-1439,1475,1340,1447,1402,1474,1324,1461,1371,1473,269,448,1432,1417,1308,1460,-1711,1459,-1727,1441,1099,1099,1446,1386,1431,1401,-1743,1289,1083,1083,1160,1160,1458,1445,1067,1067,1370,1457,1307,1430,1129,1129,1098,1098,268,432,267,416,266,400,-1887,1144,1187,1082,1173,1113,1186,1066,1050,1158,1128,1143,1172,1097,1171,1081,420,391,1157,1112,1170,1142,1127,1065,1169,1049,1156,1096,1141,1111,1155,1080,1126,1154,1064,1153,1140,1095,1048,-2159,1125,1110,1137,-2175,823,823,1139,1138,807,807,384,264,368,263,868,838,853,791,867,822,852,837,866,806,865,790,-2319,851,821,836,352,262,850,805,849,-2399,533,533,835,820,336,261,578,548,563,577,532,532,832,772,562,562,547,547,305,275,560,515,290,290,288,258 }; + static const ma_uint8 tab32[] = { 130,162,193,209,44,28,76,140,9,9,9,9,9,9,9,9,190,254,222,238,126,94,157,157,109,61,173,205}; + static const ma_uint8 tab33[] = { 252,236,220,204,188,172,156,140,124,108,92,76,60,44,28,12 }; + static const ma_int16 tabindex[2*16] = { 0,32,64,98,0,132,180,218,292,364,426,538,648,746,0,1126,1460,1460,1460,1460,1460,1460,1460,1460,1842,1842,1842,1842,1842,1842,1842,1842 }; + static const ma_uint8 g_linbits[] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,4,6,8,10,13,4,5,6,7,8,9,11,13 }; +#define MA_DR_MP3_PEEK_BITS(n) (bs_cache >> (32 - (n))) +#define MA_DR_MP3_FLUSH_BITS(n) { bs_cache <<= (n); bs_sh += (n); } +#define MA_DR_MP3_CHECK_BITS while (bs_sh >= 0) { bs_cache |= (ma_uint32)*bs_next_ptr++ << bs_sh; bs_sh -= 8; } +#define MA_DR_MP3_BSPOS ((bs_next_ptr - bs->buf)*8 - 24 + bs_sh) + float one = 0.0f; + int ireg = 0, big_val_cnt = gr_info->big_values; + const ma_uint8 *sfb = gr_info->sfbtab; + const ma_uint8 *bs_next_ptr = bs->buf + bs->pos/8; + ma_uint32 bs_cache = (((bs_next_ptr[0]*256u + bs_next_ptr[1])*256u + bs_next_ptr[2])*256u + bs_next_ptr[3]) << (bs->pos & 7); + int pairs_to_decode, np, bs_sh = (bs->pos & 7) - 8; + bs_next_ptr += 4; + while (big_val_cnt > 0) + { + int tab_num = gr_info->table_select[ireg]; + int sfb_cnt = gr_info->region_count[ireg++]; + const ma_int16 *codebook = tabs + tabindex[tab_num]; + int linbits = g_linbits[tab_num]; + if (linbits) + { + do + { + np = *sfb++ / 2; + pairs_to_decode = MA_DR_MP3_MIN(big_val_cnt, np); + one = *scf++; + do + { + int j, w = 5; + int leaf = codebook[MA_DR_MP3_PEEK_BITS(w)]; + while (leaf < 0) + { + MA_DR_MP3_FLUSH_BITS(w); + w = leaf & 7; + leaf = codebook[MA_DR_MP3_PEEK_BITS(w) - (leaf >> 3)]; + } + MA_DR_MP3_FLUSH_BITS(leaf >> 8); + for (j = 0; j < 2; j++, dst++, leaf >>= 4) + { + int lsb = leaf & 0x0F; + if (lsb == 15) + { + lsb += MA_DR_MP3_PEEK_BITS(linbits); + MA_DR_MP3_FLUSH_BITS(linbits); + MA_DR_MP3_CHECK_BITS; + *dst = one*ma_dr_mp3_L3_pow_43(lsb)*((ma_int32)bs_cache < 0 ? -1: 1); + } else + { + *dst = g_ma_dr_mp3_pow43[16 + lsb - 16*(bs_cache >> 31)]*one; + } + MA_DR_MP3_FLUSH_BITS(lsb ? 1 : 0); + } + MA_DR_MP3_CHECK_BITS; + } while (--pairs_to_decode); + } while ((big_val_cnt -= np) > 0 && --sfb_cnt >= 0); + } else + { + do + { + np = *sfb++ / 2; + pairs_to_decode = MA_DR_MP3_MIN(big_val_cnt, np); + one = *scf++; + do + { + int j, w = 5; + int leaf = codebook[MA_DR_MP3_PEEK_BITS(w)]; + while (leaf < 0) + { + MA_DR_MP3_FLUSH_BITS(w); + w = leaf & 7; + leaf = codebook[MA_DR_MP3_PEEK_BITS(w) - (leaf >> 3)]; + } + MA_DR_MP3_FLUSH_BITS(leaf >> 8); + for (j = 0; j < 2; j++, dst++, leaf >>= 4) + { + int lsb = leaf & 0x0F; + *dst = g_ma_dr_mp3_pow43[16 + lsb - 16*(bs_cache >> 31)]*one; + MA_DR_MP3_FLUSH_BITS(lsb ? 1 : 0); + } + MA_DR_MP3_CHECK_BITS; + } while (--pairs_to_decode); + } while ((big_val_cnt -= np) > 0 && --sfb_cnt >= 0); + } + } + for (np = 1 - big_val_cnt;; dst += 4) + { + const ma_uint8 *codebook_count1 = (gr_info->count1_table) ? tab33 : tab32; + int leaf = codebook_count1[MA_DR_MP3_PEEK_BITS(4)]; + if (!(leaf & 8)) + { + leaf = codebook_count1[(leaf >> 3) + (bs_cache << 4 >> (32 - (leaf & 3)))]; + } + MA_DR_MP3_FLUSH_BITS(leaf & 7); + if (MA_DR_MP3_BSPOS > layer3gr_limit) + { + break; + } +#define MA_DR_MP3_RELOAD_SCALEFACTOR if (!--np) { np = *sfb++/2; if (!np) break; one = *scf++; } +#define MA_DR_MP3_DEQ_COUNT1(s) if (leaf & (128 >> s)) { dst[s] = ((ma_int32)bs_cache < 0) ? -one : one; MA_DR_MP3_FLUSH_BITS(1) } + MA_DR_MP3_RELOAD_SCALEFACTOR; + MA_DR_MP3_DEQ_COUNT1(0); + MA_DR_MP3_DEQ_COUNT1(1); + MA_DR_MP3_RELOAD_SCALEFACTOR; + MA_DR_MP3_DEQ_COUNT1(2); + MA_DR_MP3_DEQ_COUNT1(3); + MA_DR_MP3_CHECK_BITS; + } + bs->pos = layer3gr_limit; +} +static void ma_dr_mp3_L3_midside_stereo(float *left, int n) +{ + int i = 0; + float *right = left + 576; +#if MA_DR_MP3_HAVE_SIMD + if (ma_dr_mp3_have_simd()) + { + for (; i < n - 3; i += 4) + { + ma_dr_mp3_f4 vl = MA_DR_MP3_VLD(left + i); + ma_dr_mp3_f4 vr = MA_DR_MP3_VLD(right + i); + MA_DR_MP3_VSTORE(left + i, MA_DR_MP3_VADD(vl, vr)); + MA_DR_MP3_VSTORE(right + i, MA_DR_MP3_VSUB(vl, vr)); + } +#ifdef __GNUC__ + if (__builtin_constant_p(n % 4 == 0) && n % 4 == 0) + return; +#endif + } +#endif + for (; i < n; i++) + { + float a = left[i]; + float b = right[i]; + left[i] = a + b; + right[i] = a - b; + } +} +static void ma_dr_mp3_L3_intensity_stereo_band(float *left, int n, float kl, float kr) +{ + int i; + for (i = 0; i < n; i++) + { + left[i + 576] = left[i]*kr; + left[i] = left[i]*kl; + } +} +static void ma_dr_mp3_L3_stereo_top_band(const float *right, const ma_uint8 *sfb, int nbands, int max_band[3]) +{ + int i, k; + max_band[0] = max_band[1] = max_band[2] = -1; + for (i = 0; i < nbands; i++) + { + for (k = 0; k < sfb[i]; k += 2) + { + if (right[k] != 0 || right[k + 1] != 0) + { + max_band[i % 3] = i; + break; + } + } + right += sfb[i]; + } +} +static void ma_dr_mp3_L3_stereo_process(float *left, const ma_uint8 *ist_pos, const ma_uint8 *sfb, const ma_uint8 *hdr, int max_band[3], int mpeg2_sh) +{ + static const float g_pan[7*2] = { 0,1,0.21132487f,0.78867513f,0.36602540f,0.63397460f,0.5f,0.5f,0.63397460f,0.36602540f,0.78867513f,0.21132487f,1,0 }; + unsigned i, max_pos = MA_DR_MP3_HDR_TEST_MPEG1(hdr) ? 7 : 64; + for (i = 0; sfb[i]; i++) + { + unsigned ipos = ist_pos[i]; + if ((int)i > max_band[i % 3] && ipos < max_pos) + { + float kl, kr, s = MA_DR_MP3_HDR_TEST_MS_STEREO(hdr) ? 1.41421356f : 1; + if (MA_DR_MP3_HDR_TEST_MPEG1(hdr)) + { + kl = g_pan[2*ipos]; + kr = g_pan[2*ipos + 1]; + } else + { + kl = 1; + kr = ma_dr_mp3_L3_ldexp_q2(1, (ipos + 1) >> 1 << mpeg2_sh); + if (ipos & 1) + { + kl = kr; + kr = 1; + } + } + ma_dr_mp3_L3_intensity_stereo_band(left, sfb[i], kl*s, kr*s); + } else if (MA_DR_MP3_HDR_TEST_MS_STEREO(hdr)) + { + ma_dr_mp3_L3_midside_stereo(left, sfb[i]); + } + left += sfb[i]; + } +} +static void ma_dr_mp3_L3_intensity_stereo(float *left, ma_uint8 *ist_pos, const ma_dr_mp3_L3_gr_info *gr, const ma_uint8 *hdr) +{ + int max_band[3], n_sfb = gr->n_long_sfb + gr->n_short_sfb; + int i, max_blocks = gr->n_short_sfb ? 3 : 1; + ma_dr_mp3_L3_stereo_top_band(left + 576, gr->sfbtab, n_sfb, max_band); + if (gr->n_long_sfb) + { + max_band[0] = max_band[1] = max_band[2] = MA_DR_MP3_MAX(MA_DR_MP3_MAX(max_band[0], max_band[1]), max_band[2]); + } + for (i = 0; i < max_blocks; i++) + { + int default_pos = MA_DR_MP3_HDR_TEST_MPEG1(hdr) ? 3 : 0; + int itop = n_sfb - max_blocks + i; + int prev = itop - max_blocks; + ist_pos[itop] = (ma_uint8)(max_band[i] >= prev ? default_pos : ist_pos[prev]); + } + ma_dr_mp3_L3_stereo_process(left, ist_pos, gr->sfbtab, hdr, max_band, gr[1].scalefac_compress & 1); +} +static void ma_dr_mp3_L3_reorder(float *grbuf, float *scratch, const ma_uint8 *sfb) +{ + int i, len; + float *src = grbuf, *dst = scratch; + for (;0 != (len = *sfb); sfb += 3, src += 2*len) + { + for (i = 0; i < len; i++, src++) + { + *dst++ = src[0*len]; + *dst++ = src[1*len]; + *dst++ = src[2*len]; + } + } + MA_DR_MP3_COPY_MEMORY(grbuf, scratch, (dst - scratch)*sizeof(float)); +} +static void ma_dr_mp3_L3_antialias(float *grbuf, int nbands) +{ + static const float g_aa[2][8] = { + {0.85749293f,0.88174200f,0.94962865f,0.98331459f,0.99551782f,0.99916056f,0.99989920f,0.99999316f}, + {0.51449576f,0.47173197f,0.31337745f,0.18191320f,0.09457419f,0.04096558f,0.01419856f,0.00369997f} + }; + for (; nbands > 0; nbands--, grbuf += 18) + { + int i = 0; +#if MA_DR_MP3_HAVE_SIMD + if (ma_dr_mp3_have_simd()) for (; i < 8; i += 4) + { + ma_dr_mp3_f4 vu = MA_DR_MP3_VLD(grbuf + 18 + i); + ma_dr_mp3_f4 vd = MA_DR_MP3_VLD(grbuf + 14 - i); + ma_dr_mp3_f4 vc0 = MA_DR_MP3_VLD(g_aa[0] + i); + ma_dr_mp3_f4 vc1 = MA_DR_MP3_VLD(g_aa[1] + i); + vd = MA_DR_MP3_VREV(vd); + MA_DR_MP3_VSTORE(grbuf + 18 + i, MA_DR_MP3_VSUB(MA_DR_MP3_VMUL(vu, vc0), MA_DR_MP3_VMUL(vd, vc1))); + vd = MA_DR_MP3_VADD(MA_DR_MP3_VMUL(vu, vc1), MA_DR_MP3_VMUL(vd, vc0)); + MA_DR_MP3_VSTORE(grbuf + 14 - i, MA_DR_MP3_VREV(vd)); + } +#endif +#ifndef MA_DR_MP3_ONLY_SIMD + for(; i < 8; i++) + { + float u = grbuf[18 + i]; + float d = grbuf[17 - i]; + grbuf[18 + i] = u*g_aa[0][i] - d*g_aa[1][i]; + grbuf[17 - i] = u*g_aa[1][i] + d*g_aa[0][i]; + } +#endif + } +} +static void ma_dr_mp3_L3_dct3_9(float *y) +{ + float s0, s1, s2, s3, s4, s5, s6, s7, s8, t0, t2, t4; + s0 = y[0]; s2 = y[2]; s4 = y[4]; s6 = y[6]; s8 = y[8]; + t0 = s0 + s6*0.5f; + s0 -= s6; + t4 = (s4 + s2)*0.93969262f; + t2 = (s8 + s2)*0.76604444f; + s6 = (s4 - s8)*0.17364818f; + s4 += s8 - s2; + s2 = s0 - s4*0.5f; + y[4] = s4 + s0; + s8 = t0 - t2 + s6; + s0 = t0 - t4 + t2; + s4 = t0 + t4 - s6; + s1 = y[1]; s3 = y[3]; s5 = y[5]; s7 = y[7]; + s3 *= 0.86602540f; + t0 = (s5 + s1)*0.98480775f; + t4 = (s5 - s7)*0.34202014f; + t2 = (s1 + s7)*0.64278761f; + s1 = (s1 - s5 - s7)*0.86602540f; + s5 = t0 - s3 - t2; + s7 = t4 - s3 - t0; + s3 = t4 + s3 - t2; + y[0] = s4 - s7; + y[1] = s2 + s1; + y[2] = s0 - s3; + y[3] = s8 + s5; + y[5] = s8 - s5; + y[6] = s0 + s3; + y[7] = s2 - s1; + y[8] = s4 + s7; +} +static void ma_dr_mp3_L3_imdct36(float *grbuf, float *overlap, const float *window, int nbands) +{ + int i, j; + static const float g_twid9[18] = { + 0.73727734f,0.79335334f,0.84339145f,0.88701083f,0.92387953f,0.95371695f,0.97629601f,0.99144486f,0.99904822f,0.67559021f,0.60876143f,0.53729961f,0.46174861f,0.38268343f,0.30070580f,0.21643961f,0.13052619f,0.04361938f + }; + for (j = 0; j < nbands; j++, grbuf += 18, overlap += 9) + { + float co[9], si[9]; + co[0] = -grbuf[0]; + si[0] = grbuf[17]; + for (i = 0; i < 4; i++) + { + si[8 - 2*i] = grbuf[4*i + 1] - grbuf[4*i + 2]; + co[1 + 2*i] = grbuf[4*i + 1] + grbuf[4*i + 2]; + si[7 - 2*i] = grbuf[4*i + 4] - grbuf[4*i + 3]; + co[2 + 2*i] = -(grbuf[4*i + 3] + grbuf[4*i + 4]); + } + ma_dr_mp3_L3_dct3_9(co); + ma_dr_mp3_L3_dct3_9(si); + si[1] = -si[1]; + si[3] = -si[3]; + si[5] = -si[5]; + si[7] = -si[7]; + i = 0; +#if MA_DR_MP3_HAVE_SIMD + if (ma_dr_mp3_have_simd()) for (; i < 8; i += 4) + { + ma_dr_mp3_f4 vovl = MA_DR_MP3_VLD(overlap + i); + ma_dr_mp3_f4 vc = MA_DR_MP3_VLD(co + i); + ma_dr_mp3_f4 vs = MA_DR_MP3_VLD(si + i); + ma_dr_mp3_f4 vr0 = MA_DR_MP3_VLD(g_twid9 + i); + ma_dr_mp3_f4 vr1 = MA_DR_MP3_VLD(g_twid9 + 9 + i); + ma_dr_mp3_f4 vw0 = MA_DR_MP3_VLD(window + i); + ma_dr_mp3_f4 vw1 = MA_DR_MP3_VLD(window + 9 + i); + ma_dr_mp3_f4 vsum = MA_DR_MP3_VADD(MA_DR_MP3_VMUL(vc, vr1), MA_DR_MP3_VMUL(vs, vr0)); + MA_DR_MP3_VSTORE(overlap + i, MA_DR_MP3_VSUB(MA_DR_MP3_VMUL(vc, vr0), MA_DR_MP3_VMUL(vs, vr1))); + MA_DR_MP3_VSTORE(grbuf + i, MA_DR_MP3_VSUB(MA_DR_MP3_VMUL(vovl, vw0), MA_DR_MP3_VMUL(vsum, vw1))); + vsum = MA_DR_MP3_VADD(MA_DR_MP3_VMUL(vovl, vw1), MA_DR_MP3_VMUL(vsum, vw0)); + MA_DR_MP3_VSTORE(grbuf + 14 - i, MA_DR_MP3_VREV(vsum)); + } +#endif + for (; i < 9; i++) + { + float ovl = overlap[i]; + float sum = co[i]*g_twid9[9 + i] + si[i]*g_twid9[0 + i]; + overlap[i] = co[i]*g_twid9[0 + i] - si[i]*g_twid9[9 + i]; + grbuf[i] = ovl*window[0 + i] - sum*window[9 + i]; + grbuf[17 - i] = ovl*window[9 + i] + sum*window[0 + i]; + } + } +} +static void ma_dr_mp3_L3_idct3(float x0, float x1, float x2, float *dst) +{ + float m1 = x1*0.86602540f; + float a1 = x0 - x2*0.5f; + dst[1] = x0 + x2; + dst[0] = a1 + m1; + dst[2] = a1 - m1; +} +static void ma_dr_mp3_L3_imdct12(float *x, float *dst, float *overlap) +{ + static const float g_twid3[6] = { 0.79335334f,0.92387953f,0.99144486f, 0.60876143f,0.38268343f,0.13052619f }; + float co[3], si[3]; + int i; + ma_dr_mp3_L3_idct3(-x[0], x[6] + x[3], x[12] + x[9], co); + ma_dr_mp3_L3_idct3(x[15], x[12] - x[9], x[6] - x[3], si); + si[1] = -si[1]; + for (i = 0; i < 3; i++) + { + float ovl = overlap[i]; + float sum = co[i]*g_twid3[3 + i] + si[i]*g_twid3[0 + i]; + overlap[i] = co[i]*g_twid3[0 + i] - si[i]*g_twid3[3 + i]; + dst[i] = ovl*g_twid3[2 - i] - sum*g_twid3[5 - i]; + dst[5 - i] = ovl*g_twid3[5 - i] + sum*g_twid3[2 - i]; + } +} +static void ma_dr_mp3_L3_imdct_short(float *grbuf, float *overlap, int nbands) +{ + for (;nbands > 0; nbands--, overlap += 9, grbuf += 18) + { + float tmp[18]; + MA_DR_MP3_COPY_MEMORY(tmp, grbuf, sizeof(tmp)); + MA_DR_MP3_COPY_MEMORY(grbuf, overlap, 6*sizeof(float)); + ma_dr_mp3_L3_imdct12(tmp, grbuf + 6, overlap + 6); + ma_dr_mp3_L3_imdct12(tmp + 1, grbuf + 12, overlap + 6); + ma_dr_mp3_L3_imdct12(tmp + 2, overlap, overlap + 6); + } +} +static void ma_dr_mp3_L3_change_sign(float *grbuf) +{ + int b, i; + for (b = 0, grbuf += 18; b < 32; b += 2, grbuf += 36) + for (i = 1; i < 18; i += 2) + grbuf[i] = -grbuf[i]; +} +static void ma_dr_mp3_L3_imdct_gr(float *grbuf, float *overlap, unsigned block_type, unsigned n_long_bands) +{ + static const float g_mdct_window[2][18] = { + { 0.99904822f,0.99144486f,0.97629601f,0.95371695f,0.92387953f,0.88701083f,0.84339145f,0.79335334f,0.73727734f,0.04361938f,0.13052619f,0.21643961f,0.30070580f,0.38268343f,0.46174861f,0.53729961f,0.60876143f,0.67559021f }, + { 1,1,1,1,1,1,0.99144486f,0.92387953f,0.79335334f,0,0,0,0,0,0,0.13052619f,0.38268343f,0.60876143f } + }; + if (n_long_bands) + { + ma_dr_mp3_L3_imdct36(grbuf, overlap, g_mdct_window[0], n_long_bands); + grbuf += 18*n_long_bands; + overlap += 9*n_long_bands; + } + if (block_type == MA_DR_MP3_SHORT_BLOCK_TYPE) + ma_dr_mp3_L3_imdct_short(grbuf, overlap, 32 - n_long_bands); + else + ma_dr_mp3_L3_imdct36(grbuf, overlap, g_mdct_window[block_type == MA_DR_MP3_STOP_BLOCK_TYPE], 32 - n_long_bands); +} +static void ma_dr_mp3_L3_save_reservoir(ma_dr_mp3dec *h, ma_dr_mp3dec_scratch *s) +{ + int pos = (s->bs.pos + 7)/8u; + int remains = s->bs.limit/8u - pos; + if (remains > MA_DR_MP3_MAX_BITRESERVOIR_BYTES) + { + pos += remains - MA_DR_MP3_MAX_BITRESERVOIR_BYTES; + remains = MA_DR_MP3_MAX_BITRESERVOIR_BYTES; + } + if (remains > 0) + { + MA_DR_MP3_MOVE_MEMORY(h->reserv_buf, s->maindata + pos, remains); + } + h->reserv = remains; +} +static int ma_dr_mp3_L3_restore_reservoir(ma_dr_mp3dec *h, ma_dr_mp3_bs *bs, ma_dr_mp3dec_scratch *s, int main_data_begin) +{ + int frame_bytes = (bs->limit - bs->pos)/8; + int bytes_have = MA_DR_MP3_MIN(h->reserv, main_data_begin); + MA_DR_MP3_COPY_MEMORY(s->maindata, h->reserv_buf + MA_DR_MP3_MAX(0, h->reserv - main_data_begin), MA_DR_MP3_MIN(h->reserv, main_data_begin)); + MA_DR_MP3_COPY_MEMORY(s->maindata + bytes_have, bs->buf + bs->pos/8, frame_bytes); + ma_dr_mp3_bs_init(&s->bs, s->maindata, bytes_have + frame_bytes); + return h->reserv >= main_data_begin; +} +static void ma_dr_mp3_L3_decode(ma_dr_mp3dec *h, ma_dr_mp3dec_scratch *s, ma_dr_mp3_L3_gr_info *gr_info, int nch) +{ + int ch; + for (ch = 0; ch < nch; ch++) + { + int layer3gr_limit = s->bs.pos + gr_info[ch].part_23_length; + ma_dr_mp3_L3_decode_scalefactors(h->header, s->ist_pos[ch], &s->bs, gr_info + ch, s->scf, ch); + ma_dr_mp3_L3_huffman(s->grbuf[ch], &s->bs, gr_info + ch, s->scf, layer3gr_limit); + } + if (MA_DR_MP3_HDR_TEST_I_STEREO(h->header)) + { + ma_dr_mp3_L3_intensity_stereo(s->grbuf[0], s->ist_pos[1], gr_info, h->header); + } else if (MA_DR_MP3_HDR_IS_MS_STEREO(h->header)) + { + ma_dr_mp3_L3_midside_stereo(s->grbuf[0], 576); + } + for (ch = 0; ch < nch; ch++, gr_info++) + { + int aa_bands = 31; + int n_long_bands = (gr_info->mixed_block_flag ? 2 : 0) << (int)(MA_DR_MP3_HDR_GET_MY_SAMPLE_RATE(h->header) == 2); + if (gr_info->n_short_sfb) + { + aa_bands = n_long_bands - 1; + ma_dr_mp3_L3_reorder(s->grbuf[ch] + n_long_bands*18, s->syn[0], gr_info->sfbtab + gr_info->n_long_sfb); + } + ma_dr_mp3_L3_antialias(s->grbuf[ch], aa_bands); + ma_dr_mp3_L3_imdct_gr(s->grbuf[ch], h->mdct_overlap[ch], gr_info->block_type, n_long_bands); + ma_dr_mp3_L3_change_sign(s->grbuf[ch]); + } +} +static void ma_dr_mp3d_DCT_II(float *grbuf, int n) +{ + static const float g_sec[24] = { + 10.19000816f,0.50060302f,0.50241929f,3.40760851f,0.50547093f,0.52249861f,2.05778098f,0.51544732f,0.56694406f,1.48416460f,0.53104258f,0.64682180f,1.16943991f,0.55310392f,0.78815460f,0.97256821f,0.58293498f,1.06067765f,0.83934963f,0.62250412f,1.72244716f,0.74453628f,0.67480832f,5.10114861f + }; + int i, k = 0; +#if MA_DR_MP3_HAVE_SIMD + if (ma_dr_mp3_have_simd()) for (; k < n; k += 4) + { + ma_dr_mp3_f4 t[4][8], *x; + float *y = grbuf + k; + for (x = t[0], i = 0; i < 8; i++, x++) + { + ma_dr_mp3_f4 x0 = MA_DR_MP3_VLD(&y[i*18]); + ma_dr_mp3_f4 x1 = MA_DR_MP3_VLD(&y[(15 - i)*18]); + ma_dr_mp3_f4 x2 = MA_DR_MP3_VLD(&y[(16 + i)*18]); + ma_dr_mp3_f4 x3 = MA_DR_MP3_VLD(&y[(31 - i)*18]); + ma_dr_mp3_f4 t0 = MA_DR_MP3_VADD(x0, x3); + ma_dr_mp3_f4 t1 = MA_DR_MP3_VADD(x1, x2); + ma_dr_mp3_f4 t2 = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(x1, x2), g_sec[3*i + 0]); + ma_dr_mp3_f4 t3 = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(x0, x3), g_sec[3*i + 1]); + x[0] = MA_DR_MP3_VADD(t0, t1); + x[8] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(t0, t1), g_sec[3*i + 2]); + x[16] = MA_DR_MP3_VADD(t3, t2); + x[24] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(t3, t2), g_sec[3*i + 2]); + } + for (x = t[0], i = 0; i < 4; i++, x += 8) + { + ma_dr_mp3_f4 x0 = x[0], x1 = x[1], x2 = x[2], x3 = x[3], x4 = x[4], x5 = x[5], x6 = x[6], x7 = x[7], xt; + xt = MA_DR_MP3_VSUB(x0, x7); x0 = MA_DR_MP3_VADD(x0, x7); + x7 = MA_DR_MP3_VSUB(x1, x6); x1 = MA_DR_MP3_VADD(x1, x6); + x6 = MA_DR_MP3_VSUB(x2, x5); x2 = MA_DR_MP3_VADD(x2, x5); + x5 = MA_DR_MP3_VSUB(x3, x4); x3 = MA_DR_MP3_VADD(x3, x4); + x4 = MA_DR_MP3_VSUB(x0, x3); x0 = MA_DR_MP3_VADD(x0, x3); + x3 = MA_DR_MP3_VSUB(x1, x2); x1 = MA_DR_MP3_VADD(x1, x2); + x[0] = MA_DR_MP3_VADD(x0, x1); + x[4] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(x0, x1), 0.70710677f); + x5 = MA_DR_MP3_VADD(x5, x6); + x6 = MA_DR_MP3_VMUL_S(MA_DR_MP3_VADD(x6, x7), 0.70710677f); + x7 = MA_DR_MP3_VADD(x7, xt); + x3 = MA_DR_MP3_VMUL_S(MA_DR_MP3_VADD(x3, x4), 0.70710677f); + x5 = MA_DR_MP3_VSUB(x5, MA_DR_MP3_VMUL_S(x7, 0.198912367f)); + x7 = MA_DR_MP3_VADD(x7, MA_DR_MP3_VMUL_S(x5, 0.382683432f)); + x5 = MA_DR_MP3_VSUB(x5, MA_DR_MP3_VMUL_S(x7, 0.198912367f)); + x0 = MA_DR_MP3_VSUB(xt, x6); xt = MA_DR_MP3_VADD(xt, x6); + x[1] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VADD(xt, x7), 0.50979561f); + x[2] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VADD(x4, x3), 0.54119611f); + x[3] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(x0, x5), 0.60134488f); + x[5] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VADD(x0, x5), 0.89997619f); + x[6] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(x4, x3), 1.30656302f); + x[7] = MA_DR_MP3_VMUL_S(MA_DR_MP3_VSUB(xt, x7), 2.56291556f); + } + if (k > n - 3) + { +#if MA_DR_MP3_HAVE_SSE +#define MA_DR_MP3_VSAVE2(i, v) _mm_storel_pi((__m64 *)(void*)&y[i*18], v) +#else +#define MA_DR_MP3_VSAVE2(i, v) vst1_f32((float32_t *)&y[(i)*18], vget_low_f32(v)) +#endif + for (i = 0; i < 7; i++, y += 4*18) + { + ma_dr_mp3_f4 s = MA_DR_MP3_VADD(t[3][i], t[3][i + 1]); + MA_DR_MP3_VSAVE2(0, t[0][i]); + MA_DR_MP3_VSAVE2(1, MA_DR_MP3_VADD(t[2][i], s)); + MA_DR_MP3_VSAVE2(2, MA_DR_MP3_VADD(t[1][i], t[1][i + 1])); + MA_DR_MP3_VSAVE2(3, MA_DR_MP3_VADD(t[2][1 + i], s)); + } + MA_DR_MP3_VSAVE2(0, t[0][7]); + MA_DR_MP3_VSAVE2(1, MA_DR_MP3_VADD(t[2][7], t[3][7])); + MA_DR_MP3_VSAVE2(2, t[1][7]); + MA_DR_MP3_VSAVE2(3, t[3][7]); + } else + { +#define MA_DR_MP3_VSAVE4(i, v) MA_DR_MP3_VSTORE(&y[(i)*18], v) + for (i = 0; i < 7; i++, y += 4*18) + { + ma_dr_mp3_f4 s = MA_DR_MP3_VADD(t[3][i], t[3][i + 1]); + MA_DR_MP3_VSAVE4(0, t[0][i]); + MA_DR_MP3_VSAVE4(1, MA_DR_MP3_VADD(t[2][i], s)); + MA_DR_MP3_VSAVE4(2, MA_DR_MP3_VADD(t[1][i], t[1][i + 1])); + MA_DR_MP3_VSAVE4(3, MA_DR_MP3_VADD(t[2][1 + i], s)); + } + MA_DR_MP3_VSAVE4(0, t[0][7]); + MA_DR_MP3_VSAVE4(1, MA_DR_MP3_VADD(t[2][7], t[3][7])); + MA_DR_MP3_VSAVE4(2, t[1][7]); + MA_DR_MP3_VSAVE4(3, t[3][7]); + } + } else +#endif +#ifdef MA_DR_MP3_ONLY_SIMD + {} +#else + for (; k < n; k++) + { + float t[4][8], *x, *y = grbuf + k; + for (x = t[0], i = 0; i < 8; i++, x++) + { + float x0 = y[i*18]; + float x1 = y[(15 - i)*18]; + float x2 = y[(16 + i)*18]; + float x3 = y[(31 - i)*18]; + float t0 = x0 + x3; + float t1 = x1 + x2; + float t2 = (x1 - x2)*g_sec[3*i + 0]; + float t3 = (x0 - x3)*g_sec[3*i + 1]; + x[0] = t0 + t1; + x[8] = (t0 - t1)*g_sec[3*i + 2]; + x[16] = t3 + t2; + x[24] = (t3 - t2)*g_sec[3*i + 2]; + } + for (x = t[0], i = 0; i < 4; i++, x += 8) + { + float x0 = x[0], x1 = x[1], x2 = x[2], x3 = x[3], x4 = x[4], x5 = x[5], x6 = x[6], x7 = x[7], xt; + xt = x0 - x7; x0 += x7; + x7 = x1 - x6; x1 += x6; + x6 = x2 - x5; x2 += x5; + x5 = x3 - x4; x3 += x4; + x4 = x0 - x3; x0 += x3; + x3 = x1 - x2; x1 += x2; + x[0] = x0 + x1; + x[4] = (x0 - x1)*0.70710677f; + x5 = x5 + x6; + x6 = (x6 + x7)*0.70710677f; + x7 = x7 + xt; + x3 = (x3 + x4)*0.70710677f; + x5 -= x7*0.198912367f; + x7 += x5*0.382683432f; + x5 -= x7*0.198912367f; + x0 = xt - x6; xt += x6; + x[1] = (xt + x7)*0.50979561f; + x[2] = (x4 + x3)*0.54119611f; + x[3] = (x0 - x5)*0.60134488f; + x[5] = (x0 + x5)*0.89997619f; + x[6] = (x4 - x3)*1.30656302f; + x[7] = (xt - x7)*2.56291556f; + } + for (i = 0; i < 7; i++, y += 4*18) + { + y[0*18] = t[0][i]; + y[1*18] = t[2][i] + t[3][i] + t[3][i + 1]; + y[2*18] = t[1][i] + t[1][i + 1]; + y[3*18] = t[2][i + 1] + t[3][i] + t[3][i + 1]; + } + y[0*18] = t[0][7]; + y[1*18] = t[2][7] + t[3][7]; + y[2*18] = t[1][7]; + y[3*18] = t[3][7]; + } +#endif +} +#ifndef MA_DR_MP3_FLOAT_OUTPUT +typedef ma_int16 ma_dr_mp3d_sample_t; +static ma_int16 ma_dr_mp3d_scale_pcm(float sample) +{ + ma_int16 s; +#if MA_DR_MP3_HAVE_ARMV6 + ma_int32 s32 = (ma_int32)(sample + .5f); + s32 -= (s32 < 0); + s = (ma_int16)ma_dr_mp3_clip_int16_arm(s32); +#else + if (sample >= 32766.5f) return (ma_int16) 32767; + if (sample <= -32767.5f) return (ma_int16)-32768; + s = (ma_int16)(sample + .5f); + s -= (s < 0); +#endif + return s; +} +#else +typedef float ma_dr_mp3d_sample_t; +static float ma_dr_mp3d_scale_pcm(float sample) +{ + return sample*(1.f/32768.f); +} +#endif +static void ma_dr_mp3d_synth_pair(ma_dr_mp3d_sample_t *pcm, int nch, const float *z) +{ + float a; + a = (z[14*64] - z[ 0]) * 29; + a += (z[ 1*64] + z[13*64]) * 213; + a += (z[12*64] - z[ 2*64]) * 459; + a += (z[ 3*64] + z[11*64]) * 2037; + a += (z[10*64] - z[ 4*64]) * 5153; + a += (z[ 5*64] + z[ 9*64]) * 6574; + a += (z[ 8*64] - z[ 6*64]) * 37489; + a += z[ 7*64] * 75038; + pcm[0] = ma_dr_mp3d_scale_pcm(a); + z += 2; + a = z[14*64] * 104; + a += z[12*64] * 1567; + a += z[10*64] * 9727; + a += z[ 8*64] * 64019; + a += z[ 6*64] * -9975; + a += z[ 4*64] * -45; + a += z[ 2*64] * 146; + a += z[ 0*64] * -5; + pcm[16*nch] = ma_dr_mp3d_scale_pcm(a); +} +static void ma_dr_mp3d_synth(float *xl, ma_dr_mp3d_sample_t *dstl, int nch, float *lins) +{ + int i; + float *xr = xl + 576*(nch - 1); + ma_dr_mp3d_sample_t *dstr = dstl + (nch - 1); + static const float g_win[] = { + -1,26,-31,208,218,401,-519,2063,2000,4788,-5517,7134,5959,35640,-39336,74992, + -1,24,-35,202,222,347,-581,2080,1952,4425,-5879,7640,5288,33791,-41176,74856, + -1,21,-38,196,225,294,-645,2087,1893,4063,-6237,8092,4561,31947,-43006,74630, + -1,19,-41,190,227,244,-711,2085,1822,3705,-6589,8492,3776,30112,-44821,74313, + -1,17,-45,183,228,197,-779,2075,1739,3351,-6935,8840,2935,28289,-46617,73908, + -1,16,-49,176,228,153,-848,2057,1644,3004,-7271,9139,2037,26482,-48390,73415, + -2,14,-53,169,227,111,-919,2032,1535,2663,-7597,9389,1082,24694,-50137,72835, + -2,13,-58,161,224,72,-991,2001,1414,2330,-7910,9592,70,22929,-51853,72169, + -2,11,-63,154,221,36,-1064,1962,1280,2006,-8209,9750,-998,21189,-53534,71420, + -2,10,-68,147,215,2,-1137,1919,1131,1692,-8491,9863,-2122,19478,-55178,70590, + -3,9,-73,139,208,-29,-1210,1870,970,1388,-8755,9935,-3300,17799,-56778,69679, + -3,8,-79,132,200,-57,-1283,1817,794,1095,-8998,9966,-4533,16155,-58333,68692, + -4,7,-85,125,189,-83,-1356,1759,605,814,-9219,9959,-5818,14548,-59838,67629, + -4,7,-91,117,177,-106,-1428,1698,402,545,-9416,9916,-7154,12980,-61289,66494, + -5,6,-97,111,163,-127,-1498,1634,185,288,-9585,9838,-8540,11455,-62684,65290 + }; + float *zlin = lins + 15*64; + const float *w = g_win; + zlin[4*15] = xl[18*16]; + zlin[4*15 + 1] = xr[18*16]; + zlin[4*15 + 2] = xl[0]; + zlin[4*15 + 3] = xr[0]; + zlin[4*31] = xl[1 + 18*16]; + zlin[4*31 + 1] = xr[1 + 18*16]; + zlin[4*31 + 2] = xl[1]; + zlin[4*31 + 3] = xr[1]; + ma_dr_mp3d_synth_pair(dstr, nch, lins + 4*15 + 1); + ma_dr_mp3d_synth_pair(dstr + 32*nch, nch, lins + 4*15 + 64 + 1); + ma_dr_mp3d_synth_pair(dstl, nch, lins + 4*15); + ma_dr_mp3d_synth_pair(dstl + 32*nch, nch, lins + 4*15 + 64); +#if MA_DR_MP3_HAVE_SIMD + if (ma_dr_mp3_have_simd()) for (i = 14; i >= 0; i--) + { +#define MA_DR_MP3_VLOAD(k) ma_dr_mp3_f4 w0 = MA_DR_MP3_VSET(*w++); ma_dr_mp3_f4 w1 = MA_DR_MP3_VSET(*w++); ma_dr_mp3_f4 vz = MA_DR_MP3_VLD(&zlin[4*i - 64*k]); ma_dr_mp3_f4 vy = MA_DR_MP3_VLD(&zlin[4*i - 64*(15 - k)]); +#define MA_DR_MP3_V0(k) { MA_DR_MP3_VLOAD(k) b = MA_DR_MP3_VADD(MA_DR_MP3_VMUL(vz, w1), MA_DR_MP3_VMUL(vy, w0)) ; a = MA_DR_MP3_VSUB(MA_DR_MP3_VMUL(vz, w0), MA_DR_MP3_VMUL(vy, w1)); } +#define MA_DR_MP3_V1(k) { MA_DR_MP3_VLOAD(k) b = MA_DR_MP3_VADD(b, MA_DR_MP3_VADD(MA_DR_MP3_VMUL(vz, w1), MA_DR_MP3_VMUL(vy, w0))); a = MA_DR_MP3_VADD(a, MA_DR_MP3_VSUB(MA_DR_MP3_VMUL(vz, w0), MA_DR_MP3_VMUL(vy, w1))); } +#define MA_DR_MP3_V2(k) { MA_DR_MP3_VLOAD(k) b = MA_DR_MP3_VADD(b, MA_DR_MP3_VADD(MA_DR_MP3_VMUL(vz, w1), MA_DR_MP3_VMUL(vy, w0))); a = MA_DR_MP3_VADD(a, MA_DR_MP3_VSUB(MA_DR_MP3_VMUL(vy, w1), MA_DR_MP3_VMUL(vz, w0))); } + ma_dr_mp3_f4 a, b; + zlin[4*i] = xl[18*(31 - i)]; + zlin[4*i + 1] = xr[18*(31 - i)]; + zlin[4*i + 2] = xl[1 + 18*(31 - i)]; + zlin[4*i + 3] = xr[1 + 18*(31 - i)]; + zlin[4*i + 64] = xl[1 + 18*(1 + i)]; + zlin[4*i + 64 + 1] = xr[1 + 18*(1 + i)]; + zlin[4*i - 64 + 2] = xl[18*(1 + i)]; + zlin[4*i - 64 + 3] = xr[18*(1 + i)]; + MA_DR_MP3_V0(0) MA_DR_MP3_V2(1) MA_DR_MP3_V1(2) MA_DR_MP3_V2(3) MA_DR_MP3_V1(4) MA_DR_MP3_V2(5) MA_DR_MP3_V1(6) MA_DR_MP3_V2(7) + { +#ifndef MA_DR_MP3_FLOAT_OUTPUT +#if MA_DR_MP3_HAVE_SSE + static const ma_dr_mp3_f4 g_max = { 32767.0f, 32767.0f, 32767.0f, 32767.0f }; + static const ma_dr_mp3_f4 g_min = { -32768.0f, -32768.0f, -32768.0f, -32768.0f }; + __m128i pcm8 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(a, g_max), g_min)), + _mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(b, g_max), g_min))); + dstr[(15 - i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 1); + dstr[(17 + i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 5); + dstl[(15 - i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 0); + dstl[(17 + i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 4); + dstr[(47 - i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 3); + dstr[(49 + i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 7); + dstl[(47 - i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 2); + dstl[(49 + i)*nch] = (ma_int16)_mm_extract_epi16(pcm8, 6); +#else + int16x4_t pcma, pcmb; + a = MA_DR_MP3_VADD(a, MA_DR_MP3_VSET(0.5f)); + b = MA_DR_MP3_VADD(b, MA_DR_MP3_VSET(0.5f)); + pcma = vqmovn_s32(vqaddq_s32(vcvtq_s32_f32(a), vreinterpretq_s32_u32(vcltq_f32(a, MA_DR_MP3_VSET(0))))); + pcmb = vqmovn_s32(vqaddq_s32(vcvtq_s32_f32(b), vreinterpretq_s32_u32(vcltq_f32(b, MA_DR_MP3_VSET(0))))); + vst1_lane_s16(dstr + (15 - i)*nch, pcma, 1); + vst1_lane_s16(dstr + (17 + i)*nch, pcmb, 1); + vst1_lane_s16(dstl + (15 - i)*nch, pcma, 0); + vst1_lane_s16(dstl + (17 + i)*nch, pcmb, 0); + vst1_lane_s16(dstr + (47 - i)*nch, pcma, 3); + vst1_lane_s16(dstr + (49 + i)*nch, pcmb, 3); + vst1_lane_s16(dstl + (47 - i)*nch, pcma, 2); + vst1_lane_s16(dstl + (49 + i)*nch, pcmb, 2); +#endif +#else + #if MA_DR_MP3_HAVE_SSE + static const ma_dr_mp3_f4 g_scale = { 1.0f/32768.0f, 1.0f/32768.0f, 1.0f/32768.0f, 1.0f/32768.0f }; + #else + const ma_dr_mp3_f4 g_scale = vdupq_n_f32(1.0f/32768.0f); + #endif + a = MA_DR_MP3_VMUL(a, g_scale); + b = MA_DR_MP3_VMUL(b, g_scale); +#if MA_DR_MP3_HAVE_SSE + _mm_store_ss(dstr + (15 - i)*nch, _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 1, 1, 1))); + _mm_store_ss(dstr + (17 + i)*nch, _mm_shuffle_ps(b, b, _MM_SHUFFLE(1, 1, 1, 1))); + _mm_store_ss(dstl + (15 - i)*nch, _mm_shuffle_ps(a, a, _MM_SHUFFLE(0, 0, 0, 0))); + _mm_store_ss(dstl + (17 + i)*nch, _mm_shuffle_ps(b, b, _MM_SHUFFLE(0, 0, 0, 0))); + _mm_store_ss(dstr + (47 - i)*nch, _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 3, 3, 3))); + _mm_store_ss(dstr + (49 + i)*nch, _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 3, 3, 3))); + _mm_store_ss(dstl + (47 - i)*nch, _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 2, 2, 2))); + _mm_store_ss(dstl + (49 + i)*nch, _mm_shuffle_ps(b, b, _MM_SHUFFLE(2, 2, 2, 2))); +#else + vst1q_lane_f32(dstr + (15 - i)*nch, a, 1); + vst1q_lane_f32(dstr + (17 + i)*nch, b, 1); + vst1q_lane_f32(dstl + (15 - i)*nch, a, 0); + vst1q_lane_f32(dstl + (17 + i)*nch, b, 0); + vst1q_lane_f32(dstr + (47 - i)*nch, a, 3); + vst1q_lane_f32(dstr + (49 + i)*nch, b, 3); + vst1q_lane_f32(dstl + (47 - i)*nch, a, 2); + vst1q_lane_f32(dstl + (49 + i)*nch, b, 2); +#endif +#endif + } + } else +#endif +#ifdef MA_DR_MP3_ONLY_SIMD + {} +#else + for (i = 14; i >= 0; i--) + { +#define MA_DR_MP3_LOAD(k) float w0 = *w++; float w1 = *w++; float *vz = &zlin[4*i - k*64]; float *vy = &zlin[4*i - (15 - k)*64]; +#define MA_DR_MP3_S0(k) { int j; MA_DR_MP3_LOAD(k); for (j = 0; j < 4; j++) b[j] = vz[j]*w1 + vy[j]*w0, a[j] = vz[j]*w0 - vy[j]*w1; } +#define MA_DR_MP3_S1(k) { int j; MA_DR_MP3_LOAD(k); for (j = 0; j < 4; j++) b[j] += vz[j]*w1 + vy[j]*w0, a[j] += vz[j]*w0 - vy[j]*w1; } +#define MA_DR_MP3_S2(k) { int j; MA_DR_MP3_LOAD(k); for (j = 0; j < 4; j++) b[j] += vz[j]*w1 + vy[j]*w0, a[j] += vy[j]*w1 - vz[j]*w0; } + float a[4], b[4]; + zlin[4*i] = xl[18*(31 - i)]; + zlin[4*i + 1] = xr[18*(31 - i)]; + zlin[4*i + 2] = xl[1 + 18*(31 - i)]; + zlin[4*i + 3] = xr[1 + 18*(31 - i)]; + zlin[4*(i + 16)] = xl[1 + 18*(1 + i)]; + zlin[4*(i + 16) + 1] = xr[1 + 18*(1 + i)]; + zlin[4*(i - 16) + 2] = xl[18*(1 + i)]; + zlin[4*(i - 16) + 3] = xr[18*(1 + i)]; + MA_DR_MP3_S0(0) MA_DR_MP3_S2(1) MA_DR_MP3_S1(2) MA_DR_MP3_S2(3) MA_DR_MP3_S1(4) MA_DR_MP3_S2(5) MA_DR_MP3_S1(6) MA_DR_MP3_S2(7) + dstr[(15 - i)*nch] = ma_dr_mp3d_scale_pcm(a[1]); + dstr[(17 + i)*nch] = ma_dr_mp3d_scale_pcm(b[1]); + dstl[(15 - i)*nch] = ma_dr_mp3d_scale_pcm(a[0]); + dstl[(17 + i)*nch] = ma_dr_mp3d_scale_pcm(b[0]); + dstr[(47 - i)*nch] = ma_dr_mp3d_scale_pcm(a[3]); + dstr[(49 + i)*nch] = ma_dr_mp3d_scale_pcm(b[3]); + dstl[(47 - i)*nch] = ma_dr_mp3d_scale_pcm(a[2]); + dstl[(49 + i)*nch] = ma_dr_mp3d_scale_pcm(b[2]); + } +#endif +} +static void ma_dr_mp3d_synth_granule(float *qmf_state, float *grbuf, int nbands, int nch, ma_dr_mp3d_sample_t *pcm, float *lins) +{ + int i; + for (i = 0; i < nch; i++) + { + ma_dr_mp3d_DCT_II(grbuf + 576*i, nbands); + } + MA_DR_MP3_COPY_MEMORY(lins, qmf_state, sizeof(float)*15*64); + for (i = 0; i < nbands; i += 2) + { + ma_dr_mp3d_synth(grbuf + i, pcm + 32*nch*i, nch, lins + i*64); + } +#ifndef MA_DR_MP3_NONSTANDARD_BUT_LOGICAL + if (nch == 1) + { + for (i = 0; i < 15*64; i += 2) + { + qmf_state[i] = lins[nbands*64 + i]; + } + } else +#endif + { + MA_DR_MP3_COPY_MEMORY(qmf_state, lins + nbands*64, sizeof(float)*15*64); + } +} +static int ma_dr_mp3d_match_frame(const ma_uint8 *hdr, int mp3_bytes, int frame_bytes) +{ + int i, nmatch; + for (i = 0, nmatch = 0; nmatch < MA_DR_MP3_MAX_FRAME_SYNC_MATCHES; nmatch++) + { + i += ma_dr_mp3_hdr_frame_bytes(hdr + i, frame_bytes) + ma_dr_mp3_hdr_padding(hdr + i); + if (i + MA_DR_MP3_HDR_SIZE > mp3_bytes) + return nmatch > 0; + if (!ma_dr_mp3_hdr_compare(hdr, hdr + i)) + return 0; + } + return 1; +} +static int ma_dr_mp3d_find_frame(const ma_uint8 *mp3, int mp3_bytes, int *free_format_bytes, int *ptr_frame_bytes) +{ + int i, k; + for (i = 0; i < mp3_bytes - MA_DR_MP3_HDR_SIZE; i++, mp3++) + { + if (ma_dr_mp3_hdr_valid(mp3)) + { + int frame_bytes = ma_dr_mp3_hdr_frame_bytes(mp3, *free_format_bytes); + int frame_and_padding = frame_bytes + ma_dr_mp3_hdr_padding(mp3); + for (k = MA_DR_MP3_HDR_SIZE; !frame_bytes && k < MA_DR_MP3_MAX_FREE_FORMAT_FRAME_SIZE && i + 2*k < mp3_bytes - MA_DR_MP3_HDR_SIZE; k++) + { + if (ma_dr_mp3_hdr_compare(mp3, mp3 + k)) + { + int fb = k - ma_dr_mp3_hdr_padding(mp3); + int nextfb = fb + ma_dr_mp3_hdr_padding(mp3 + k); + if (i + k + nextfb + MA_DR_MP3_HDR_SIZE > mp3_bytes || !ma_dr_mp3_hdr_compare(mp3, mp3 + k + nextfb)) + continue; + frame_and_padding = k; + frame_bytes = fb; + *free_format_bytes = fb; + } + } + if ((frame_bytes && i + frame_and_padding <= mp3_bytes && + ma_dr_mp3d_match_frame(mp3, mp3_bytes - i, frame_bytes)) || + (!i && frame_and_padding == mp3_bytes)) + { + *ptr_frame_bytes = frame_and_padding; + return i; + } + *free_format_bytes = 0; + } + } + *ptr_frame_bytes = 0; + return mp3_bytes; +} +MA_API void ma_dr_mp3dec_init(ma_dr_mp3dec *dec) +{ + dec->header[0] = 0; +} +MA_API int ma_dr_mp3dec_decode_frame(ma_dr_mp3dec *dec, const ma_uint8 *mp3, int mp3_bytes, void *pcm, ma_dr_mp3dec_frame_info *info) +{ + int i = 0, igr, frame_size = 0, success = 1; + const ma_uint8 *hdr; + ma_dr_mp3_bs bs_frame[1]; + ma_dr_mp3dec_scratch scratch; + if (mp3_bytes > 4 && dec->header[0] == 0xff && ma_dr_mp3_hdr_compare(dec->header, mp3)) + { + frame_size = ma_dr_mp3_hdr_frame_bytes(mp3, dec->free_format_bytes) + ma_dr_mp3_hdr_padding(mp3); + if (frame_size != mp3_bytes && (frame_size + MA_DR_MP3_HDR_SIZE > mp3_bytes || !ma_dr_mp3_hdr_compare(mp3, mp3 + frame_size))) + { + frame_size = 0; + } + } + if (!frame_size) + { + MA_DR_MP3_ZERO_MEMORY(dec, sizeof(ma_dr_mp3dec)); + i = ma_dr_mp3d_find_frame(mp3, mp3_bytes, &dec->free_format_bytes, &frame_size); + if (!frame_size || i + frame_size > mp3_bytes) + { + info->frame_bytes = i; + return 0; + } + } + hdr = mp3 + i; + MA_DR_MP3_COPY_MEMORY(dec->header, hdr, MA_DR_MP3_HDR_SIZE); + info->frame_bytes = i + frame_size; + info->channels = MA_DR_MP3_HDR_IS_MONO(hdr) ? 1 : 2; + info->hz = ma_dr_mp3_hdr_sample_rate_hz(hdr); + info->layer = 4 - MA_DR_MP3_HDR_GET_LAYER(hdr); + info->bitrate_kbps = ma_dr_mp3_hdr_bitrate_kbps(hdr); + ma_dr_mp3_bs_init(bs_frame, hdr + MA_DR_MP3_HDR_SIZE, frame_size - MA_DR_MP3_HDR_SIZE); + if (MA_DR_MP3_HDR_IS_CRC(hdr)) + { + ma_dr_mp3_bs_get_bits(bs_frame, 16); + } + if (info->layer == 3) + { + int main_data_begin = ma_dr_mp3_L3_read_side_info(bs_frame, scratch.gr_info, hdr); + if (main_data_begin < 0 || bs_frame->pos > bs_frame->limit) + { + ma_dr_mp3dec_init(dec); + return 0; + } + success = ma_dr_mp3_L3_restore_reservoir(dec, bs_frame, &scratch, main_data_begin); + if (success && pcm != NULL) + { + for (igr = 0; igr < (MA_DR_MP3_HDR_TEST_MPEG1(hdr) ? 2 : 1); igr++, pcm = MA_DR_MP3_OFFSET_PTR(pcm, sizeof(ma_dr_mp3d_sample_t)*576*info->channels)) + { + MA_DR_MP3_ZERO_MEMORY(scratch.grbuf[0], 576*2*sizeof(float)); + ma_dr_mp3_L3_decode(dec, &scratch, scratch.gr_info + igr*info->channels, info->channels); + ma_dr_mp3d_synth_granule(dec->qmf_state, scratch.grbuf[0], 18, info->channels, (ma_dr_mp3d_sample_t*)pcm, scratch.syn[0]); + } + } + ma_dr_mp3_L3_save_reservoir(dec, &scratch); + } else + { +#ifdef MA_DR_MP3_ONLY_MP3 + return 0; +#else + ma_dr_mp3_L12_scale_info sci[1]; + if (pcm == NULL) { + return ma_dr_mp3_hdr_frame_samples(hdr); + } + ma_dr_mp3_L12_read_scale_info(hdr, bs_frame, sci); + MA_DR_MP3_ZERO_MEMORY(scratch.grbuf[0], 576*2*sizeof(float)); + for (i = 0, igr = 0; igr < 3; igr++) + { + if (12 == (i += ma_dr_mp3_L12_dequantize_granule(scratch.grbuf[0] + i, bs_frame, sci, info->layer | 1))) + { + i = 0; + ma_dr_mp3_L12_apply_scf_384(sci, sci->scf + igr, scratch.grbuf[0]); + ma_dr_mp3d_synth_granule(dec->qmf_state, scratch.grbuf[0], 12, info->channels, (ma_dr_mp3d_sample_t*)pcm, scratch.syn[0]); + MA_DR_MP3_ZERO_MEMORY(scratch.grbuf[0], 576*2*sizeof(float)); + pcm = MA_DR_MP3_OFFSET_PTR(pcm, sizeof(ma_dr_mp3d_sample_t)*384*info->channels); + } + if (bs_frame->pos > bs_frame->limit) + { + ma_dr_mp3dec_init(dec); + return 0; + } + } +#endif + } + return success*ma_dr_mp3_hdr_frame_samples(dec->header); +} +MA_API void ma_dr_mp3dec_f32_to_s16(const float *in, ma_int16 *out, size_t num_samples) +{ + size_t i = 0; +#if MA_DR_MP3_HAVE_SIMD + size_t aligned_count = num_samples & ~7; + for(; i < aligned_count; i+=8) + { + ma_dr_mp3_f4 scale = MA_DR_MP3_VSET(32768.0f); + ma_dr_mp3_f4 a = MA_DR_MP3_VMUL(MA_DR_MP3_VLD(&in[i ]), scale); + ma_dr_mp3_f4 b = MA_DR_MP3_VMUL(MA_DR_MP3_VLD(&in[i+4]), scale); +#if MA_DR_MP3_HAVE_SSE + ma_dr_mp3_f4 s16max = MA_DR_MP3_VSET( 32767.0f); + ma_dr_mp3_f4 s16min = MA_DR_MP3_VSET(-32768.0f); + __m128i pcm8 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(a, s16max), s16min)), + _mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(b, s16max), s16min))); + out[i ] = (ma_int16)_mm_extract_epi16(pcm8, 0); + out[i+1] = (ma_int16)_mm_extract_epi16(pcm8, 1); + out[i+2] = (ma_int16)_mm_extract_epi16(pcm8, 2); + out[i+3] = (ma_int16)_mm_extract_epi16(pcm8, 3); + out[i+4] = (ma_int16)_mm_extract_epi16(pcm8, 4); + out[i+5] = (ma_int16)_mm_extract_epi16(pcm8, 5); + out[i+6] = (ma_int16)_mm_extract_epi16(pcm8, 6); + out[i+7] = (ma_int16)_mm_extract_epi16(pcm8, 7); +#else + int16x4_t pcma, pcmb; + a = MA_DR_MP3_VADD(a, MA_DR_MP3_VSET(0.5f)); + b = MA_DR_MP3_VADD(b, MA_DR_MP3_VSET(0.5f)); + pcma = vqmovn_s32(vqaddq_s32(vcvtq_s32_f32(a), vreinterpretq_s32_u32(vcltq_f32(a, MA_DR_MP3_VSET(0))))); + pcmb = vqmovn_s32(vqaddq_s32(vcvtq_s32_f32(b), vreinterpretq_s32_u32(vcltq_f32(b, MA_DR_MP3_VSET(0))))); + vst1_lane_s16(out+i , pcma, 0); + vst1_lane_s16(out+i+1, pcma, 1); + vst1_lane_s16(out+i+2, pcma, 2); + vst1_lane_s16(out+i+3, pcma, 3); + vst1_lane_s16(out+i+4, pcmb, 0); + vst1_lane_s16(out+i+5, pcmb, 1); + vst1_lane_s16(out+i+6, pcmb, 2); + vst1_lane_s16(out+i+7, pcmb, 3); +#endif + } +#endif + for(; i < num_samples; i++) + { + float sample = in[i] * 32768.0f; + if (sample >= 32766.5f) + out[i] = (ma_int16) 32767; + else if (sample <= -32767.5f) + out[i] = (ma_int16)-32768; + else + { + short s = (ma_int16)(sample + .5f); + s -= (s < 0); + out[i] = s; + } + } +} +#ifndef MA_DR_MP3_SEEK_LEADING_MP3_FRAMES +#define MA_DR_MP3_SEEK_LEADING_MP3_FRAMES 2 +#endif +#define MA_DR_MP3_MIN_DATA_CHUNK_SIZE 16384 +#ifndef MA_DR_MP3_DATA_CHUNK_SIZE +#define MA_DR_MP3_DATA_CHUNK_SIZE (MA_DR_MP3_MIN_DATA_CHUNK_SIZE*4) +#endif +#define MA_DR_MP3_COUNTOF(x) (sizeof(x) / sizeof(x[0])) +#define MA_DR_MP3_CLAMP(x, lo, hi) (MA_DR_MP3_MAX(lo, MA_DR_MP3_MIN(x, hi))) +#ifndef MA_DR_MP3_PI_D +#define MA_DR_MP3_PI_D 3.14159265358979323846264 +#endif +#define MA_DR_MP3_DEFAULT_RESAMPLER_LPF_ORDER 2 +static MA_INLINE float ma_dr_mp3_mix_f32(float x, float y, float a) +{ + return x*(1-a) + y*a; +} +static MA_INLINE float ma_dr_mp3_mix_f32_fast(float x, float y, float a) +{ + float r0 = (y - x); + float r1 = r0*a; + return x + r1; +} +static MA_INLINE ma_uint32 ma_dr_mp3_gcf_u32(ma_uint32 a, ma_uint32 b) +{ + for (;;) { + if (b == 0) { + break; + } else { + ma_uint32 t = a; + a = b; + b = t % a; + } + } + return a; +} +static void* ma_dr_mp3__malloc_default(size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_DR_MP3_MALLOC(sz); +} +static void* ma_dr_mp3__realloc_default(void* p, size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_DR_MP3_REALLOC(p, sz); +} +static void ma_dr_mp3__free_default(void* p, void* pUserData) +{ + (void)pUserData; + MA_DR_MP3_FREE(p); +} +static void* ma_dr_mp3__malloc_from_callbacks(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + if (pAllocationCallbacks->onMalloc != NULL) { + return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData); + } + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData); + } + return NULL; +} +static void* ma_dr_mp3__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData); + } + if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) { + void* p2; + p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData); + if (p2 == NULL) { + return NULL; + } + if (p != NULL) { + MA_DR_MP3_COPY_MEMORY(p2, p, szOld); + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } + return p2; + } + return NULL; +} +static void ma_dr_mp3__free_from_callbacks(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (p == NULL || pAllocationCallbacks == NULL) { + return; + } + if (pAllocationCallbacks->onFree != NULL) { + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } +} +static ma_allocation_callbacks ma_dr_mp3_copy_allocation_callbacks_or_defaults(const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + return *pAllocationCallbacks; + } else { + ma_allocation_callbacks allocationCallbacks; + allocationCallbacks.pUserData = NULL; + allocationCallbacks.onMalloc = ma_dr_mp3__malloc_default; + allocationCallbacks.onRealloc = ma_dr_mp3__realloc_default; + allocationCallbacks.onFree = ma_dr_mp3__free_default; + return allocationCallbacks; + } +} +static size_t ma_dr_mp3__on_read(ma_dr_mp3* pMP3, void* pBufferOut, size_t bytesToRead) +{ + size_t bytesRead = pMP3->onRead(pMP3->pUserData, pBufferOut, bytesToRead); + pMP3->streamCursor += bytesRead; + return bytesRead; +} +static ma_bool32 ma_dr_mp3__on_seek(ma_dr_mp3* pMP3, int offset, ma_dr_mp3_seek_origin origin) +{ + MA_DR_MP3_ASSERT(offset >= 0); + if (!pMP3->onSeek(pMP3->pUserData, offset, origin)) { + return MA_FALSE; + } + if (origin == ma_dr_mp3_seek_origin_start) { + pMP3->streamCursor = (ma_uint64)offset; + } else { + pMP3->streamCursor += offset; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_mp3__on_seek_64(ma_dr_mp3* pMP3, ma_uint64 offset, ma_dr_mp3_seek_origin origin) +{ + if (offset <= 0x7FFFFFFF) { + return ma_dr_mp3__on_seek(pMP3, (int)offset, origin); + } + if (!ma_dr_mp3__on_seek(pMP3, 0x7FFFFFFF, ma_dr_mp3_seek_origin_start)) { + return MA_FALSE; + } + offset -= 0x7FFFFFFF; + while (offset > 0) { + if (offset <= 0x7FFFFFFF) { + if (!ma_dr_mp3__on_seek(pMP3, (int)offset, ma_dr_mp3_seek_origin_current)) { + return MA_FALSE; + } + offset = 0; + } else { + if (!ma_dr_mp3__on_seek(pMP3, 0x7FFFFFFF, ma_dr_mp3_seek_origin_current)) { + return MA_FALSE; + } + offset -= 0x7FFFFFFF; + } + } + return MA_TRUE; +} +static ma_uint32 ma_dr_mp3_decode_next_frame_ex__callbacks(ma_dr_mp3* pMP3, ma_dr_mp3d_sample_t* pPCMFrames) +{ + ma_uint32 pcmFramesRead = 0; + MA_DR_MP3_ASSERT(pMP3 != NULL); + MA_DR_MP3_ASSERT(pMP3->onRead != NULL); + if (pMP3->atEnd) { + return 0; + } + for (;;) { + ma_dr_mp3dec_frame_info info; + if (pMP3->dataSize < MA_DR_MP3_MIN_DATA_CHUNK_SIZE) { + size_t bytesRead; + if (pMP3->pData != NULL) { + MA_DR_MP3_MOVE_MEMORY(pMP3->pData, pMP3->pData + pMP3->dataConsumed, pMP3->dataSize); + } + pMP3->dataConsumed = 0; + if (pMP3->dataCapacity < MA_DR_MP3_DATA_CHUNK_SIZE) { + ma_uint8* pNewData; + size_t newDataCap; + newDataCap = MA_DR_MP3_DATA_CHUNK_SIZE; + pNewData = (ma_uint8*)ma_dr_mp3__realloc_from_callbacks(pMP3->pData, newDataCap, pMP3->dataCapacity, &pMP3->allocationCallbacks); + if (pNewData == NULL) { + return 0; + } + pMP3->pData = pNewData; + pMP3->dataCapacity = newDataCap; + } + bytesRead = ma_dr_mp3__on_read(pMP3, pMP3->pData + pMP3->dataSize, (pMP3->dataCapacity - pMP3->dataSize)); + if (bytesRead == 0) { + if (pMP3->dataSize == 0) { + pMP3->atEnd = MA_TRUE; + return 0; + } + } + pMP3->dataSize += bytesRead; + } + if (pMP3->dataSize > INT_MAX) { + pMP3->atEnd = MA_TRUE; + return 0; + } + MA_DR_MP3_ASSERT(pMP3->pData != NULL); + MA_DR_MP3_ASSERT(pMP3->dataCapacity > 0); + if (pMP3->pData == NULL) { + return 0; + } + pcmFramesRead = ma_dr_mp3dec_decode_frame(&pMP3->decoder, pMP3->pData + pMP3->dataConsumed, (int)pMP3->dataSize, pPCMFrames, &info); + if (info.frame_bytes > 0) { + pMP3->dataConsumed += (size_t)info.frame_bytes; + pMP3->dataSize -= (size_t)info.frame_bytes; + } + if (pcmFramesRead > 0) { + pcmFramesRead = ma_dr_mp3_hdr_frame_samples(pMP3->decoder.header); + pMP3->pcmFramesConsumedInMP3Frame = 0; + pMP3->pcmFramesRemainingInMP3Frame = pcmFramesRead; + pMP3->mp3FrameChannels = info.channels; + pMP3->mp3FrameSampleRate = info.hz; + break; + } else if (info.frame_bytes == 0) { + size_t bytesRead; + MA_DR_MP3_MOVE_MEMORY(pMP3->pData, pMP3->pData + pMP3->dataConsumed, pMP3->dataSize); + pMP3->dataConsumed = 0; + if (pMP3->dataCapacity == pMP3->dataSize) { + ma_uint8* pNewData; + size_t newDataCap; + newDataCap = pMP3->dataCapacity + MA_DR_MP3_DATA_CHUNK_SIZE; + pNewData = (ma_uint8*)ma_dr_mp3__realloc_from_callbacks(pMP3->pData, newDataCap, pMP3->dataCapacity, &pMP3->allocationCallbacks); + if (pNewData == NULL) { + return 0; + } + pMP3->pData = pNewData; + pMP3->dataCapacity = newDataCap; + } + bytesRead = ma_dr_mp3__on_read(pMP3, pMP3->pData + pMP3->dataSize, (pMP3->dataCapacity - pMP3->dataSize)); + if (bytesRead == 0) { + pMP3->atEnd = MA_TRUE; + return 0; + } + pMP3->dataSize += bytesRead; + } + }; + return pcmFramesRead; +} +static ma_uint32 ma_dr_mp3_decode_next_frame_ex__memory(ma_dr_mp3* pMP3, ma_dr_mp3d_sample_t* pPCMFrames) +{ + ma_uint32 pcmFramesRead = 0; + ma_dr_mp3dec_frame_info info; + MA_DR_MP3_ASSERT(pMP3 != NULL); + MA_DR_MP3_ASSERT(pMP3->memory.pData != NULL); + if (pMP3->atEnd) { + return 0; + } + for (;;) { + pcmFramesRead = ma_dr_mp3dec_decode_frame(&pMP3->decoder, pMP3->memory.pData + pMP3->memory.currentReadPos, (int)(pMP3->memory.dataSize - pMP3->memory.currentReadPos), pPCMFrames, &info); + if (pcmFramesRead > 0) { + pcmFramesRead = ma_dr_mp3_hdr_frame_samples(pMP3->decoder.header); + pMP3->pcmFramesConsumedInMP3Frame = 0; + pMP3->pcmFramesRemainingInMP3Frame = pcmFramesRead; + pMP3->mp3FrameChannels = info.channels; + pMP3->mp3FrameSampleRate = info.hz; + break; + } else if (info.frame_bytes > 0) { + pMP3->memory.currentReadPos += (size_t)info.frame_bytes; + } else { + break; + } + } + pMP3->memory.currentReadPos += (size_t)info.frame_bytes; + return pcmFramesRead; +} +static ma_uint32 ma_dr_mp3_decode_next_frame_ex(ma_dr_mp3* pMP3, ma_dr_mp3d_sample_t* pPCMFrames) +{ + if (pMP3->memory.pData != NULL && pMP3->memory.dataSize > 0) { + return ma_dr_mp3_decode_next_frame_ex__memory(pMP3, pPCMFrames); + } else { + return ma_dr_mp3_decode_next_frame_ex__callbacks(pMP3, pPCMFrames); + } +} +static ma_uint32 ma_dr_mp3_decode_next_frame(ma_dr_mp3* pMP3) +{ + MA_DR_MP3_ASSERT(pMP3 != NULL); + return ma_dr_mp3_decode_next_frame_ex(pMP3, (ma_dr_mp3d_sample_t*)pMP3->pcmFrames); +} +#if 0 +static ma_uint32 ma_dr_mp3_seek_next_frame(ma_dr_mp3* pMP3) +{ + ma_uint32 pcmFrameCount; + MA_DR_MP3_ASSERT(pMP3 != NULL); + pcmFrameCount = ma_dr_mp3_decode_next_frame_ex(pMP3, NULL); + if (pcmFrameCount == 0) { + return 0; + } + pMP3->currentPCMFrame += pcmFrameCount; + pMP3->pcmFramesConsumedInMP3Frame = pcmFrameCount; + pMP3->pcmFramesRemainingInMP3Frame = 0; + return pcmFrameCount; +} +#endif +static ma_bool32 ma_dr_mp3_init_internal(ma_dr_mp3* pMP3, ma_dr_mp3_read_proc onRead, ma_dr_mp3_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + MA_DR_MP3_ASSERT(pMP3 != NULL); + MA_DR_MP3_ASSERT(onRead != NULL); + ma_dr_mp3dec_init(&pMP3->decoder); + pMP3->onRead = onRead; + pMP3->onSeek = onSeek; + pMP3->pUserData = pUserData; + pMP3->allocationCallbacks = ma_dr_mp3_copy_allocation_callbacks_or_defaults(pAllocationCallbacks); + if (pMP3->allocationCallbacks.onFree == NULL || (pMP3->allocationCallbacks.onMalloc == NULL && pMP3->allocationCallbacks.onRealloc == NULL)) { + return MA_FALSE; + } + if (ma_dr_mp3_decode_next_frame(pMP3) == 0) { + ma_dr_mp3__free_from_callbacks(pMP3->pData, &pMP3->allocationCallbacks); + return MA_FALSE; + } + pMP3->channels = pMP3->mp3FrameChannels; + pMP3->sampleRate = pMP3->mp3FrameSampleRate; + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_mp3_init(ma_dr_mp3* pMP3, ma_dr_mp3_read_proc onRead, ma_dr_mp3_seek_proc onSeek, void* pUserData, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pMP3 == NULL || onRead == NULL) { + return MA_FALSE; + } + MA_DR_MP3_ZERO_OBJECT(pMP3); + return ma_dr_mp3_init_internal(pMP3, onRead, onSeek, pUserData, pAllocationCallbacks); +} +static size_t ma_dr_mp3__on_read_memory(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + ma_dr_mp3* pMP3 = (ma_dr_mp3*)pUserData; + size_t bytesRemaining; + MA_DR_MP3_ASSERT(pMP3 != NULL); + MA_DR_MP3_ASSERT(pMP3->memory.dataSize >= pMP3->memory.currentReadPos); + bytesRemaining = pMP3->memory.dataSize - pMP3->memory.currentReadPos; + if (bytesToRead > bytesRemaining) { + bytesToRead = bytesRemaining; + } + if (bytesToRead > 0) { + MA_DR_MP3_COPY_MEMORY(pBufferOut, pMP3->memory.pData + pMP3->memory.currentReadPos, bytesToRead); + pMP3->memory.currentReadPos += bytesToRead; + } + return bytesToRead; +} +static ma_bool32 ma_dr_mp3__on_seek_memory(void* pUserData, int byteOffset, ma_dr_mp3_seek_origin origin) +{ + ma_dr_mp3* pMP3 = (ma_dr_mp3*)pUserData; + MA_DR_MP3_ASSERT(pMP3 != NULL); + if (origin == ma_dr_mp3_seek_origin_current) { + if (byteOffset > 0) { + if (pMP3->memory.currentReadPos + byteOffset > pMP3->memory.dataSize) { + byteOffset = (int)(pMP3->memory.dataSize - pMP3->memory.currentReadPos); + } + } else { + if (pMP3->memory.currentReadPos < (size_t)-byteOffset) { + byteOffset = -(int)pMP3->memory.currentReadPos; + } + } + pMP3->memory.currentReadPos += byteOffset; + } else { + if ((ma_uint32)byteOffset <= pMP3->memory.dataSize) { + pMP3->memory.currentReadPos = byteOffset; + } else { + pMP3->memory.currentReadPos = pMP3->memory.dataSize; + } + } + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_mp3_init_memory(ma_dr_mp3* pMP3, const void* pData, size_t dataSize, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pMP3 == NULL) { + return MA_FALSE; + } + MA_DR_MP3_ZERO_OBJECT(pMP3); + if (pData == NULL || dataSize == 0) { + return MA_FALSE; + } + pMP3->memory.pData = (const ma_uint8*)pData; + pMP3->memory.dataSize = dataSize; + pMP3->memory.currentReadPos = 0; + return ma_dr_mp3_init_internal(pMP3, ma_dr_mp3__on_read_memory, ma_dr_mp3__on_seek_memory, pMP3, pAllocationCallbacks); +} +#ifndef MA_DR_MP3_NO_STDIO +#include +#include +static size_t ma_dr_mp3__on_read_stdio(void* pUserData, void* pBufferOut, size_t bytesToRead) +{ + return fread(pBufferOut, 1, bytesToRead, (FILE*)pUserData); +} +static ma_bool32 ma_dr_mp3__on_seek_stdio(void* pUserData, int offset, ma_dr_mp3_seek_origin origin) +{ + return fseek((FILE*)pUserData, offset, (origin == ma_dr_mp3_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0; +} +MA_API ma_bool32 ma_dr_mp3_init_file(ma_dr_mp3* pMP3, const char* pFilePath, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_bool32 result; + FILE* pFile; + if (ma_fopen(&pFile, pFilePath, "rb") != MA_SUCCESS) { + return MA_FALSE; + } + result = ma_dr_mp3_init(pMP3, ma_dr_mp3__on_read_stdio, ma_dr_mp3__on_seek_stdio, (void*)pFile, pAllocationCallbacks); + if (result != MA_TRUE) { + fclose(pFile); + return result; + } + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_mp3_init_file_w(ma_dr_mp3* pMP3, const wchar_t* pFilePath, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_bool32 result; + FILE* pFile; + if (ma_wfopen(&pFile, pFilePath, L"rb", pAllocationCallbacks) != MA_SUCCESS) { + return MA_FALSE; + } + result = ma_dr_mp3_init(pMP3, ma_dr_mp3__on_read_stdio, ma_dr_mp3__on_seek_stdio, (void*)pFile, pAllocationCallbacks); + if (result != MA_TRUE) { + fclose(pFile); + return result; + } + return MA_TRUE; +} +#endif +MA_API void ma_dr_mp3_uninit(ma_dr_mp3* pMP3) +{ + if (pMP3 == NULL) { + return; + } +#ifndef MA_DR_MP3_NO_STDIO + if (pMP3->onRead == ma_dr_mp3__on_read_stdio) { + FILE* pFile = (FILE*)pMP3->pUserData; + if (pFile != NULL) { + fclose(pFile); + pMP3->pUserData = NULL; + } + } +#endif + ma_dr_mp3__free_from_callbacks(pMP3->pData, &pMP3->allocationCallbacks); +} +#if defined(MA_DR_MP3_FLOAT_OUTPUT) +static void ma_dr_mp3_f32_to_s16(ma_int16* dst, const float* src, ma_uint64 sampleCount) +{ + ma_uint64 i; + ma_uint64 i4; + ma_uint64 sampleCount4; + i = 0; + sampleCount4 = sampleCount >> 2; + for (i4 = 0; i4 < sampleCount4; i4 += 1) { + float x0 = src[i+0]; + float x1 = src[i+1]; + float x2 = src[i+2]; + float x3 = src[i+3]; + x0 = ((x0 < -1) ? -1 : ((x0 > 1) ? 1 : x0)); + x1 = ((x1 < -1) ? -1 : ((x1 > 1) ? 1 : x1)); + x2 = ((x2 < -1) ? -1 : ((x2 > 1) ? 1 : x2)); + x3 = ((x3 < -1) ? -1 : ((x3 > 1) ? 1 : x3)); + x0 = x0 * 32767.0f; + x1 = x1 * 32767.0f; + x2 = x2 * 32767.0f; + x3 = x3 * 32767.0f; + dst[i+0] = (ma_int16)x0; + dst[i+1] = (ma_int16)x1; + dst[i+2] = (ma_int16)x2; + dst[i+3] = (ma_int16)x3; + i += 4; + } + for (; i < sampleCount; i += 1) { + float x = src[i]; + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); + x = x * 32767.0f; + dst[i] = (ma_int16)x; + } +} +#endif +#if !defined(MA_DR_MP3_FLOAT_OUTPUT) +static void ma_dr_mp3_s16_to_f32(float* dst, const ma_int16* src, ma_uint64 sampleCount) +{ + ma_uint64 i; + for (i = 0; i < sampleCount; i += 1) { + float x = (float)src[i]; + x = x * 0.000030517578125f; + dst[i] = x; + } +} +#endif +static ma_uint64 ma_dr_mp3_read_pcm_frames_raw(ma_dr_mp3* pMP3, ma_uint64 framesToRead, void* pBufferOut) +{ + ma_uint64 totalFramesRead = 0; + MA_DR_MP3_ASSERT(pMP3 != NULL); + MA_DR_MP3_ASSERT(pMP3->onRead != NULL); + while (framesToRead > 0) { + ma_uint32 framesToConsume = (ma_uint32)MA_DR_MP3_MIN(pMP3->pcmFramesRemainingInMP3Frame, framesToRead); + if (pBufferOut != NULL) { + #if defined(MA_DR_MP3_FLOAT_OUTPUT) + float* pFramesOutF32 = (float*)MA_DR_MP3_OFFSET_PTR(pBufferOut, sizeof(float) * totalFramesRead * pMP3->channels); + float* pFramesInF32 = (float*)MA_DR_MP3_OFFSET_PTR(&pMP3->pcmFrames[0], sizeof(float) * pMP3->pcmFramesConsumedInMP3Frame * pMP3->mp3FrameChannels); + MA_DR_MP3_COPY_MEMORY(pFramesOutF32, pFramesInF32, sizeof(float) * framesToConsume * pMP3->channels); + #else + ma_int16* pFramesOutS16 = (ma_int16*)MA_DR_MP3_OFFSET_PTR(pBufferOut, sizeof(ma_int16) * totalFramesRead * pMP3->channels); + ma_int16* pFramesInS16 = (ma_int16*)MA_DR_MP3_OFFSET_PTR(&pMP3->pcmFrames[0], sizeof(ma_int16) * pMP3->pcmFramesConsumedInMP3Frame * pMP3->mp3FrameChannels); + MA_DR_MP3_COPY_MEMORY(pFramesOutS16, pFramesInS16, sizeof(ma_int16) * framesToConsume * pMP3->channels); + #endif + } + pMP3->currentPCMFrame += framesToConsume; + pMP3->pcmFramesConsumedInMP3Frame += framesToConsume; + pMP3->pcmFramesRemainingInMP3Frame -= framesToConsume; + totalFramesRead += framesToConsume; + framesToRead -= framesToConsume; + if (framesToRead == 0) { + break; + } + MA_DR_MP3_ASSERT(pMP3->pcmFramesRemainingInMP3Frame == 0); + if (ma_dr_mp3_decode_next_frame(pMP3) == 0) { + break; + } + } + return totalFramesRead; +} +MA_API ma_uint64 ma_dr_mp3_read_pcm_frames_f32(ma_dr_mp3* pMP3, ma_uint64 framesToRead, float* pBufferOut) +{ + if (pMP3 == NULL || pMP3->onRead == NULL) { + return 0; + } +#if defined(MA_DR_MP3_FLOAT_OUTPUT) + return ma_dr_mp3_read_pcm_frames_raw(pMP3, framesToRead, pBufferOut); +#else + { + ma_int16 pTempS16[8192]; + ma_uint64 totalPCMFramesRead = 0; + while (totalPCMFramesRead < framesToRead) { + ma_uint64 framesJustRead; + ma_uint64 framesRemaining = framesToRead - totalPCMFramesRead; + ma_uint64 framesToReadNow = MA_DR_MP3_COUNTOF(pTempS16) / pMP3->channels; + if (framesToReadNow > framesRemaining) { + framesToReadNow = framesRemaining; + } + framesJustRead = ma_dr_mp3_read_pcm_frames_raw(pMP3, framesToReadNow, pTempS16); + if (framesJustRead == 0) { + break; + } + ma_dr_mp3_s16_to_f32((float*)MA_DR_MP3_OFFSET_PTR(pBufferOut, sizeof(float) * totalPCMFramesRead * pMP3->channels), pTempS16, framesJustRead * pMP3->channels); + totalPCMFramesRead += framesJustRead; + } + return totalPCMFramesRead; + } +#endif +} +MA_API ma_uint64 ma_dr_mp3_read_pcm_frames_s16(ma_dr_mp3* pMP3, ma_uint64 framesToRead, ma_int16* pBufferOut) +{ + if (pMP3 == NULL || pMP3->onRead == NULL) { + return 0; + } +#if !defined(MA_DR_MP3_FLOAT_OUTPUT) + return ma_dr_mp3_read_pcm_frames_raw(pMP3, framesToRead, pBufferOut); +#else + { + float pTempF32[4096]; + ma_uint64 totalPCMFramesRead = 0; + while (totalPCMFramesRead < framesToRead) { + ma_uint64 framesJustRead; + ma_uint64 framesRemaining = framesToRead - totalPCMFramesRead; + ma_uint64 framesToReadNow = MA_DR_MP3_COUNTOF(pTempF32) / pMP3->channels; + if (framesToReadNow > framesRemaining) { + framesToReadNow = framesRemaining; + } + framesJustRead = ma_dr_mp3_read_pcm_frames_raw(pMP3, framesToReadNow, pTempF32); + if (framesJustRead == 0) { + break; + } + ma_dr_mp3_f32_to_s16((ma_int16*)MA_DR_MP3_OFFSET_PTR(pBufferOut, sizeof(ma_int16) * totalPCMFramesRead * pMP3->channels), pTempF32, framesJustRead * pMP3->channels); + totalPCMFramesRead += framesJustRead; + } + return totalPCMFramesRead; + } +#endif +} +static void ma_dr_mp3_reset(ma_dr_mp3* pMP3) +{ + MA_DR_MP3_ASSERT(pMP3 != NULL); + pMP3->pcmFramesConsumedInMP3Frame = 0; + pMP3->pcmFramesRemainingInMP3Frame = 0; + pMP3->currentPCMFrame = 0; + pMP3->dataSize = 0; + pMP3->atEnd = MA_FALSE; + ma_dr_mp3dec_init(&pMP3->decoder); +} +static ma_bool32 ma_dr_mp3_seek_to_start_of_stream(ma_dr_mp3* pMP3) +{ + MA_DR_MP3_ASSERT(pMP3 != NULL); + MA_DR_MP3_ASSERT(pMP3->onSeek != NULL); + if (!ma_dr_mp3__on_seek(pMP3, 0, ma_dr_mp3_seek_origin_start)) { + return MA_FALSE; + } + ma_dr_mp3_reset(pMP3); + return MA_TRUE; +} +static ma_bool32 ma_dr_mp3_seek_forward_by_pcm_frames__brute_force(ma_dr_mp3* pMP3, ma_uint64 frameOffset) +{ + ma_uint64 framesRead; +#if defined(MA_DR_MP3_FLOAT_OUTPUT) + framesRead = ma_dr_mp3_read_pcm_frames_f32(pMP3, frameOffset, NULL); +#else + framesRead = ma_dr_mp3_read_pcm_frames_s16(pMP3, frameOffset, NULL); +#endif + if (framesRead != frameOffset) { + return MA_FALSE; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_mp3_seek_to_pcm_frame__brute_force(ma_dr_mp3* pMP3, ma_uint64 frameIndex) +{ + MA_DR_MP3_ASSERT(pMP3 != NULL); + if (frameIndex == pMP3->currentPCMFrame) { + return MA_TRUE; + } + if (frameIndex < pMP3->currentPCMFrame) { + if (!ma_dr_mp3_seek_to_start_of_stream(pMP3)) { + return MA_FALSE; + } + } + MA_DR_MP3_ASSERT(frameIndex >= pMP3->currentPCMFrame); + return ma_dr_mp3_seek_forward_by_pcm_frames__brute_force(pMP3, (frameIndex - pMP3->currentPCMFrame)); +} +static ma_bool32 ma_dr_mp3_find_closest_seek_point(ma_dr_mp3* pMP3, ma_uint64 frameIndex, ma_uint32* pSeekPointIndex) +{ + ma_uint32 iSeekPoint; + MA_DR_MP3_ASSERT(pSeekPointIndex != NULL); + *pSeekPointIndex = 0; + if (frameIndex < pMP3->pSeekPoints[0].pcmFrameIndex) { + return MA_FALSE; + } + for (iSeekPoint = 0; iSeekPoint < pMP3->seekPointCount; ++iSeekPoint) { + if (pMP3->pSeekPoints[iSeekPoint].pcmFrameIndex > frameIndex) { + break; + } + *pSeekPointIndex = iSeekPoint; + } + return MA_TRUE; +} +static ma_bool32 ma_dr_mp3_seek_to_pcm_frame__seek_table(ma_dr_mp3* pMP3, ma_uint64 frameIndex) +{ + ma_dr_mp3_seek_point seekPoint; + ma_uint32 priorSeekPointIndex; + ma_uint16 iMP3Frame; + ma_uint64 leftoverFrames; + MA_DR_MP3_ASSERT(pMP3 != NULL); + MA_DR_MP3_ASSERT(pMP3->pSeekPoints != NULL); + MA_DR_MP3_ASSERT(pMP3->seekPointCount > 0); + if (ma_dr_mp3_find_closest_seek_point(pMP3, frameIndex, &priorSeekPointIndex)) { + seekPoint = pMP3->pSeekPoints[priorSeekPointIndex]; + } else { + seekPoint.seekPosInBytes = 0; + seekPoint.pcmFrameIndex = 0; + seekPoint.mp3FramesToDiscard = 0; + seekPoint.pcmFramesToDiscard = 0; + } + if (!ma_dr_mp3__on_seek_64(pMP3, seekPoint.seekPosInBytes, ma_dr_mp3_seek_origin_start)) { + return MA_FALSE; + } + ma_dr_mp3_reset(pMP3); + for (iMP3Frame = 0; iMP3Frame < seekPoint.mp3FramesToDiscard; ++iMP3Frame) { + ma_uint32 pcmFramesRead; + ma_dr_mp3d_sample_t* pPCMFrames; + pPCMFrames = NULL; + if (iMP3Frame == seekPoint.mp3FramesToDiscard-1) { + pPCMFrames = (ma_dr_mp3d_sample_t*)pMP3->pcmFrames; + } + pcmFramesRead = ma_dr_mp3_decode_next_frame_ex(pMP3, pPCMFrames); + if (pcmFramesRead == 0) { + return MA_FALSE; + } + } + pMP3->currentPCMFrame = seekPoint.pcmFrameIndex - seekPoint.pcmFramesToDiscard; + leftoverFrames = frameIndex - pMP3->currentPCMFrame; + return ma_dr_mp3_seek_forward_by_pcm_frames__brute_force(pMP3, leftoverFrames); +} +MA_API ma_bool32 ma_dr_mp3_seek_to_pcm_frame(ma_dr_mp3* pMP3, ma_uint64 frameIndex) +{ + if (pMP3 == NULL || pMP3->onSeek == NULL) { + return MA_FALSE; + } + if (frameIndex == 0) { + return ma_dr_mp3_seek_to_start_of_stream(pMP3); + } + if (pMP3->pSeekPoints != NULL && pMP3->seekPointCount > 0) { + return ma_dr_mp3_seek_to_pcm_frame__seek_table(pMP3, frameIndex); + } else { + return ma_dr_mp3_seek_to_pcm_frame__brute_force(pMP3, frameIndex); + } +} +MA_API ma_bool32 ma_dr_mp3_get_mp3_and_pcm_frame_count(ma_dr_mp3* pMP3, ma_uint64* pMP3FrameCount, ma_uint64* pPCMFrameCount) +{ + ma_uint64 currentPCMFrame; + ma_uint64 totalPCMFrameCount; + ma_uint64 totalMP3FrameCount; + if (pMP3 == NULL) { + return MA_FALSE; + } + if (pMP3->onSeek == NULL) { + return MA_FALSE; + } + currentPCMFrame = pMP3->currentPCMFrame; + if (!ma_dr_mp3_seek_to_start_of_stream(pMP3)) { + return MA_FALSE; + } + totalPCMFrameCount = 0; + totalMP3FrameCount = 0; + for (;;) { + ma_uint32 pcmFramesInCurrentMP3Frame; + pcmFramesInCurrentMP3Frame = ma_dr_mp3_decode_next_frame_ex(pMP3, NULL); + if (pcmFramesInCurrentMP3Frame == 0) { + break; + } + totalPCMFrameCount += pcmFramesInCurrentMP3Frame; + totalMP3FrameCount += 1; + } + if (!ma_dr_mp3_seek_to_start_of_stream(pMP3)) { + return MA_FALSE; + } + if (!ma_dr_mp3_seek_to_pcm_frame(pMP3, currentPCMFrame)) { + return MA_FALSE; + } + if (pMP3FrameCount != NULL) { + *pMP3FrameCount = totalMP3FrameCount; + } + if (pPCMFrameCount != NULL) { + *pPCMFrameCount = totalPCMFrameCount; + } + return MA_TRUE; +} +MA_API ma_uint64 ma_dr_mp3_get_pcm_frame_count(ma_dr_mp3* pMP3) +{ + ma_uint64 totalPCMFrameCount; + if (!ma_dr_mp3_get_mp3_and_pcm_frame_count(pMP3, NULL, &totalPCMFrameCount)) { + return 0; + } + return totalPCMFrameCount; +} +MA_API ma_uint64 ma_dr_mp3_get_mp3_frame_count(ma_dr_mp3* pMP3) +{ + ma_uint64 totalMP3FrameCount; + if (!ma_dr_mp3_get_mp3_and_pcm_frame_count(pMP3, &totalMP3FrameCount, NULL)) { + return 0; + } + return totalMP3FrameCount; +} +static void ma_dr_mp3__accumulate_running_pcm_frame_count(ma_dr_mp3* pMP3, ma_uint32 pcmFrameCountIn, ma_uint64* pRunningPCMFrameCount, float* pRunningPCMFrameCountFractionalPart) +{ + float srcRatio; + float pcmFrameCountOutF; + ma_uint32 pcmFrameCountOut; + srcRatio = (float)pMP3->mp3FrameSampleRate / (float)pMP3->sampleRate; + MA_DR_MP3_ASSERT(srcRatio > 0); + pcmFrameCountOutF = *pRunningPCMFrameCountFractionalPart + (pcmFrameCountIn / srcRatio); + pcmFrameCountOut = (ma_uint32)pcmFrameCountOutF; + *pRunningPCMFrameCountFractionalPart = pcmFrameCountOutF - pcmFrameCountOut; + *pRunningPCMFrameCount += pcmFrameCountOut; +} +typedef struct +{ + ma_uint64 bytePos; + ma_uint64 pcmFrameIndex; +} ma_dr_mp3__seeking_mp3_frame_info; +MA_API ma_bool32 ma_dr_mp3_calculate_seek_points(ma_dr_mp3* pMP3, ma_uint32* pSeekPointCount, ma_dr_mp3_seek_point* pSeekPoints) +{ + ma_uint32 seekPointCount; + ma_uint64 currentPCMFrame; + ma_uint64 totalMP3FrameCount; + ma_uint64 totalPCMFrameCount; + if (pMP3 == NULL || pSeekPointCount == NULL || pSeekPoints == NULL) { + return MA_FALSE; + } + seekPointCount = *pSeekPointCount; + if (seekPointCount == 0) { + return MA_FALSE; + } + currentPCMFrame = pMP3->currentPCMFrame; + if (!ma_dr_mp3_get_mp3_and_pcm_frame_count(pMP3, &totalMP3FrameCount, &totalPCMFrameCount)) { + return MA_FALSE; + } + if (totalMP3FrameCount < MA_DR_MP3_SEEK_LEADING_MP3_FRAMES+1) { + seekPointCount = 1; + pSeekPoints[0].seekPosInBytes = 0; + pSeekPoints[0].pcmFrameIndex = 0; + pSeekPoints[0].mp3FramesToDiscard = 0; + pSeekPoints[0].pcmFramesToDiscard = 0; + } else { + ma_uint64 pcmFramesBetweenSeekPoints; + ma_dr_mp3__seeking_mp3_frame_info mp3FrameInfo[MA_DR_MP3_SEEK_LEADING_MP3_FRAMES+1]; + ma_uint64 runningPCMFrameCount = 0; + float runningPCMFrameCountFractionalPart = 0; + ma_uint64 nextTargetPCMFrame; + ma_uint32 iMP3Frame; + ma_uint32 iSeekPoint; + if (seekPointCount > totalMP3FrameCount-1) { + seekPointCount = (ma_uint32)totalMP3FrameCount-1; + } + pcmFramesBetweenSeekPoints = totalPCMFrameCount / (seekPointCount+1); + if (!ma_dr_mp3_seek_to_start_of_stream(pMP3)) { + return MA_FALSE; + } + for (iMP3Frame = 0; iMP3Frame < MA_DR_MP3_SEEK_LEADING_MP3_FRAMES+1; ++iMP3Frame) { + ma_uint32 pcmFramesInCurrentMP3FrameIn; + MA_DR_MP3_ASSERT(pMP3->streamCursor >= pMP3->dataSize); + mp3FrameInfo[iMP3Frame].bytePos = pMP3->streamCursor - pMP3->dataSize; + mp3FrameInfo[iMP3Frame].pcmFrameIndex = runningPCMFrameCount; + pcmFramesInCurrentMP3FrameIn = ma_dr_mp3_decode_next_frame_ex(pMP3, NULL); + if (pcmFramesInCurrentMP3FrameIn == 0) { + return MA_FALSE; + } + ma_dr_mp3__accumulate_running_pcm_frame_count(pMP3, pcmFramesInCurrentMP3FrameIn, &runningPCMFrameCount, &runningPCMFrameCountFractionalPart); + } + nextTargetPCMFrame = 0; + for (iSeekPoint = 0; iSeekPoint < seekPointCount; ++iSeekPoint) { + nextTargetPCMFrame += pcmFramesBetweenSeekPoints; + for (;;) { + if (nextTargetPCMFrame < runningPCMFrameCount) { + pSeekPoints[iSeekPoint].seekPosInBytes = mp3FrameInfo[0].bytePos; + pSeekPoints[iSeekPoint].pcmFrameIndex = nextTargetPCMFrame; + pSeekPoints[iSeekPoint].mp3FramesToDiscard = MA_DR_MP3_SEEK_LEADING_MP3_FRAMES; + pSeekPoints[iSeekPoint].pcmFramesToDiscard = (ma_uint16)(nextTargetPCMFrame - mp3FrameInfo[MA_DR_MP3_SEEK_LEADING_MP3_FRAMES-1].pcmFrameIndex); + break; + } else { + size_t i; + ma_uint32 pcmFramesInCurrentMP3FrameIn; + for (i = 0; i < MA_DR_MP3_COUNTOF(mp3FrameInfo)-1; ++i) { + mp3FrameInfo[i] = mp3FrameInfo[i+1]; + } + mp3FrameInfo[MA_DR_MP3_COUNTOF(mp3FrameInfo)-1].bytePos = pMP3->streamCursor - pMP3->dataSize; + mp3FrameInfo[MA_DR_MP3_COUNTOF(mp3FrameInfo)-1].pcmFrameIndex = runningPCMFrameCount; + pcmFramesInCurrentMP3FrameIn = ma_dr_mp3_decode_next_frame_ex(pMP3, NULL); + if (pcmFramesInCurrentMP3FrameIn == 0) { + pSeekPoints[iSeekPoint].seekPosInBytes = mp3FrameInfo[0].bytePos; + pSeekPoints[iSeekPoint].pcmFrameIndex = nextTargetPCMFrame; + pSeekPoints[iSeekPoint].mp3FramesToDiscard = MA_DR_MP3_SEEK_LEADING_MP3_FRAMES; + pSeekPoints[iSeekPoint].pcmFramesToDiscard = (ma_uint16)(nextTargetPCMFrame - mp3FrameInfo[MA_DR_MP3_SEEK_LEADING_MP3_FRAMES-1].pcmFrameIndex); + break; + } + ma_dr_mp3__accumulate_running_pcm_frame_count(pMP3, pcmFramesInCurrentMP3FrameIn, &runningPCMFrameCount, &runningPCMFrameCountFractionalPart); + } + } + } + if (!ma_dr_mp3_seek_to_start_of_stream(pMP3)) { + return MA_FALSE; + } + if (!ma_dr_mp3_seek_to_pcm_frame(pMP3, currentPCMFrame)) { + return MA_FALSE; + } + } + *pSeekPointCount = seekPointCount; + return MA_TRUE; +} +MA_API ma_bool32 ma_dr_mp3_bind_seek_table(ma_dr_mp3* pMP3, ma_uint32 seekPointCount, ma_dr_mp3_seek_point* pSeekPoints) +{ + if (pMP3 == NULL) { + return MA_FALSE; + } + if (seekPointCount == 0 || pSeekPoints == NULL) { + pMP3->seekPointCount = 0; + pMP3->pSeekPoints = NULL; + } else { + pMP3->seekPointCount = seekPointCount; + pMP3->pSeekPoints = pSeekPoints; + } + return MA_TRUE; +} +static float* ma_dr_mp3__full_read_and_close_f32(ma_dr_mp3* pMP3, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount) +{ + ma_uint64 totalFramesRead = 0; + ma_uint64 framesCapacity = 0; + float* pFrames = NULL; + float temp[4096]; + MA_DR_MP3_ASSERT(pMP3 != NULL); + for (;;) { + ma_uint64 framesToReadRightNow = MA_DR_MP3_COUNTOF(temp) / pMP3->channels; + ma_uint64 framesJustRead = ma_dr_mp3_read_pcm_frames_f32(pMP3, framesToReadRightNow, temp); + if (framesJustRead == 0) { + break; + } + if (framesCapacity < totalFramesRead + framesJustRead) { + ma_uint64 oldFramesBufferSize; + ma_uint64 newFramesBufferSize; + ma_uint64 newFramesCap; + float* pNewFrames; + newFramesCap = framesCapacity * 2; + if (newFramesCap < totalFramesRead + framesJustRead) { + newFramesCap = totalFramesRead + framesJustRead; + } + oldFramesBufferSize = framesCapacity * pMP3->channels * sizeof(float); + newFramesBufferSize = newFramesCap * pMP3->channels * sizeof(float); + if (newFramesBufferSize > (ma_uint64)MA_SIZE_MAX) { + break; + } + pNewFrames = (float*)ma_dr_mp3__realloc_from_callbacks(pFrames, (size_t)newFramesBufferSize, (size_t)oldFramesBufferSize, &pMP3->allocationCallbacks); + if (pNewFrames == NULL) { + ma_dr_mp3__free_from_callbacks(pFrames, &pMP3->allocationCallbacks); + break; + } + pFrames = pNewFrames; + framesCapacity = newFramesCap; + } + MA_DR_MP3_COPY_MEMORY(pFrames + totalFramesRead*pMP3->channels, temp, (size_t)(framesJustRead*pMP3->channels*sizeof(float))); + totalFramesRead += framesJustRead; + if (framesJustRead != framesToReadRightNow) { + break; + } + } + if (pConfig != NULL) { + pConfig->channels = pMP3->channels; + pConfig->sampleRate = pMP3->sampleRate; + } + ma_dr_mp3_uninit(pMP3); + if (pTotalFrameCount) { + *pTotalFrameCount = totalFramesRead; + } + return pFrames; +} +static ma_int16* ma_dr_mp3__full_read_and_close_s16(ma_dr_mp3* pMP3, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount) +{ + ma_uint64 totalFramesRead = 0; + ma_uint64 framesCapacity = 0; + ma_int16* pFrames = NULL; + ma_int16 temp[4096]; + MA_DR_MP3_ASSERT(pMP3 != NULL); + for (;;) { + ma_uint64 framesToReadRightNow = MA_DR_MP3_COUNTOF(temp) / pMP3->channels; + ma_uint64 framesJustRead = ma_dr_mp3_read_pcm_frames_s16(pMP3, framesToReadRightNow, temp); + if (framesJustRead == 0) { + break; + } + if (framesCapacity < totalFramesRead + framesJustRead) { + ma_uint64 newFramesBufferSize; + ma_uint64 oldFramesBufferSize; + ma_uint64 newFramesCap; + ma_int16* pNewFrames; + newFramesCap = framesCapacity * 2; + if (newFramesCap < totalFramesRead + framesJustRead) { + newFramesCap = totalFramesRead + framesJustRead; + } + oldFramesBufferSize = framesCapacity * pMP3->channels * sizeof(ma_int16); + newFramesBufferSize = newFramesCap * pMP3->channels * sizeof(ma_int16); + if (newFramesBufferSize > (ma_uint64)MA_SIZE_MAX) { + break; + } + pNewFrames = (ma_int16*)ma_dr_mp3__realloc_from_callbacks(pFrames, (size_t)newFramesBufferSize, (size_t)oldFramesBufferSize, &pMP3->allocationCallbacks); + if (pNewFrames == NULL) { + ma_dr_mp3__free_from_callbacks(pFrames, &pMP3->allocationCallbacks); + break; + } + pFrames = pNewFrames; + framesCapacity = newFramesCap; + } + MA_DR_MP3_COPY_MEMORY(pFrames + totalFramesRead*pMP3->channels, temp, (size_t)(framesJustRead*pMP3->channels*sizeof(ma_int16))); + totalFramesRead += framesJustRead; + if (framesJustRead != framesToReadRightNow) { + break; + } + } + if (pConfig != NULL) { + pConfig->channels = pMP3->channels; + pConfig->sampleRate = pMP3->sampleRate; + } + ma_dr_mp3_uninit(pMP3); + if (pTotalFrameCount) { + *pTotalFrameCount = totalFramesRead; + } + return pFrames; +} +MA_API float* ma_dr_mp3_open_and_read_pcm_frames_f32(ma_dr_mp3_read_proc onRead, ma_dr_mp3_seek_proc onSeek, void* pUserData, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_mp3 mp3; + if (!ma_dr_mp3_init(&mp3, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_mp3__full_read_and_close_f32(&mp3, pConfig, pTotalFrameCount); +} +MA_API ma_int16* ma_dr_mp3_open_and_read_pcm_frames_s16(ma_dr_mp3_read_proc onRead, ma_dr_mp3_seek_proc onSeek, void* pUserData, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_mp3 mp3; + if (!ma_dr_mp3_init(&mp3, onRead, onSeek, pUserData, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_mp3__full_read_and_close_s16(&mp3, pConfig, pTotalFrameCount); +} +MA_API float* ma_dr_mp3_open_memory_and_read_pcm_frames_f32(const void* pData, size_t dataSize, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_mp3 mp3; + if (!ma_dr_mp3_init_memory(&mp3, pData, dataSize, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_mp3__full_read_and_close_f32(&mp3, pConfig, pTotalFrameCount); +} +MA_API ma_int16* ma_dr_mp3_open_memory_and_read_pcm_frames_s16(const void* pData, size_t dataSize, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_mp3 mp3; + if (!ma_dr_mp3_init_memory(&mp3, pData, dataSize, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_mp3__full_read_and_close_s16(&mp3, pConfig, pTotalFrameCount); +} +#ifndef MA_DR_MP3_NO_STDIO +MA_API float* ma_dr_mp3_open_file_and_read_pcm_frames_f32(const char* filePath, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_mp3 mp3; + if (!ma_dr_mp3_init_file(&mp3, filePath, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_mp3__full_read_and_close_f32(&mp3, pConfig, pTotalFrameCount); +} +MA_API ma_int16* ma_dr_mp3_open_file_and_read_pcm_frames_s16(const char* filePath, ma_dr_mp3_config* pConfig, ma_uint64* pTotalFrameCount, const ma_allocation_callbacks* pAllocationCallbacks) +{ + ma_dr_mp3 mp3; + if (!ma_dr_mp3_init_file(&mp3, filePath, pAllocationCallbacks)) { + return NULL; + } + return ma_dr_mp3__full_read_and_close_s16(&mp3, pConfig, pTotalFrameCount); +} +#endif +MA_API void* ma_dr_mp3_malloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + return ma_dr_mp3__malloc_from_callbacks(sz, pAllocationCallbacks); + } else { + return ma_dr_mp3__malloc_default(sz, NULL); + } +} +MA_API void ma_dr_mp3_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks != NULL) { + ma_dr_mp3__free_from_callbacks(p, pAllocationCallbacks); + } else { + ma_dr_mp3__free_default(p, NULL); + } +} +#endif +/* dr_mp3_c end */ +#endif /* MA_DR_MP3_IMPLEMENTATION */ +#endif /* MA_NO_MP3 */ + + +/* End globally disabled warnings. */ +#if defined(_MSC_VER) + #pragma warning(pop) +#endif + +#endif /* miniaudio_c */ +#endif /* MINIAUDIO_IMPLEMENTATION */ + + +/* +This software is available as a choice of the following licenses. Choose +whichever you prefer. + +=============================================================================== +ALTERNATIVE 1 - Public Domain (www.unlicense.org) +=============================================================================== +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. + +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to + +=============================================================================== +ALTERNATIVE 2 - MIT No Attribution +=============================================================================== +Copyright 2025 David Reid + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ diff --git a/llama/llama.cpp/common/json.hpp b/llama/llama.cpp/vendor/nlohmann/json.hpp similarity index 94% rename from llama/llama.cpp/common/json.hpp rename to llama/llama.cpp/vendor/nlohmann/json.hpp index a858728c4..82d69f7c5 100644 --- a/llama/llama.cpp/common/json.hpp +++ b/llama/llama.cpp/vendor/nlohmann/json.hpp @@ -1,9 +1,9 @@ // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT /****************************************************************************\ @@ -34,10 +34,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -47,10 +47,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -59,20 +59,24 @@ #ifndef JSON_SKIP_LIBRARY_VERSION_CHECK #if defined(NLOHMANN_JSON_VERSION_MAJOR) && defined(NLOHMANN_JSON_VERSION_MINOR) && defined(NLOHMANN_JSON_VERSION_PATCH) - #if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 11 || NLOHMANN_JSON_VERSION_PATCH != 3 + #if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 12 || NLOHMANN_JSON_VERSION_PATCH != 0 #warning "Already included a different version of the library!" #endif #endif #endif #define NLOHMANN_JSON_VERSION_MAJOR 3 // NOLINT(modernize-macro-to-enum) -#define NLOHMANN_JSON_VERSION_MINOR 11 // NOLINT(modernize-macro-to-enum) -#define NLOHMANN_JSON_VERSION_PATCH 3 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_MINOR 12 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_PATCH 0 // NOLINT(modernize-macro-to-enum) #ifndef JSON_DIAGNOSTICS #define JSON_DIAGNOSTICS 0 #endif +#ifndef JSON_DIAGNOSTIC_POSITIONS + #define JSON_DIAGNOSTIC_POSITIONS 0 +#endif + #ifndef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON #define JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON 0 #endif @@ -83,6 +87,12 @@ #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS #endif +#if JSON_DIAGNOSTIC_POSITIONS + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS _dp +#else + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS +#endif + #if JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON _ldvcmp #else @@ -94,14 +104,15 @@ #endif // Construct the namespace ABI tags component -#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) json_abi ## a ## b -#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b) \ - NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) +#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b, c) json_abi ## a ## b ## c +#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b, c) \ + NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b, c) #define NLOHMANN_JSON_ABI_TAGS \ NLOHMANN_JSON_ABI_TAGS_CONCAT( \ NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS, \ - NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON) + NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON, \ + NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS) // Construct the namespace version component #define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) \ @@ -149,10 +160,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -162,6 +173,9 @@ #include // forward_list #include // inserter, front_inserter, end #include // map +#ifdef JSON_HAS_CPP_17 + #include // optional +#endif #include // string #include // tuple, make_tuple #include // is_arithmetic, is_same, is_enum, underlying_type, is_convertible @@ -172,10 +186,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -192,10 +206,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -208,10 +222,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -220,10 +234,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -233,10 +247,10 @@ // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -320,11 +334,11 @@ NLOHMANN_JSON_NAMESPACE_END // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann -// SPDX-FileCopyrightText: 2016-2021 Evan Nemerson +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann +// SPDX-FileCopyrightText: 2016 - 2021 Evan Nemerson // SPDX-License-Identifier: MIT /* Hedley - https://nemequ.github.io/hedley @@ -2384,15 +2398,20 @@ JSON_HEDLEY_DIAGNOSTIC_POP // C++ language standard detection // if the user manually specified the used c++ version this is skipped -#if !defined(JSON_HAS_CPP_20) && !defined(JSON_HAS_CPP_17) && !defined(JSON_HAS_CPP_14) && !defined(JSON_HAS_CPP_11) - #if (defined(__cplusplus) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) +#if !defined(JSON_HAS_CPP_23) && !defined(JSON_HAS_CPP_20) && !defined(JSON_HAS_CPP_17) && !defined(JSON_HAS_CPP_14) && !defined(JSON_HAS_CPP_11) + #if (defined(__cplusplus) && __cplusplus > 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG > 202002L) + #define JSON_HAS_CPP_23 #define JSON_HAS_CPP_20 #define JSON_HAS_CPP_17 #define JSON_HAS_CPP_14 - #elif (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 + #elif (defined(__cplusplus) && __cplusplus > 201703L) || (defined(_MSVC_LANG) && _MSVC_LANG > 201703L) + #define JSON_HAS_CPP_20 #define JSON_HAS_CPP_17 #define JSON_HAS_CPP_14 - #elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) + #elif (defined(__cplusplus) && __cplusplus > 201402L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 + #define JSON_HAS_CPP_17 + #define JSON_HAS_CPP_14 + #elif (defined(__cplusplus) && __cplusplus > 201103L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) #define JSON_HAS_CPP_14 #endif // the cpp 11 flag is always specified because it is the minimal required version @@ -2568,7 +2587,9 @@ JSON_HEDLEY_DIAGNOSTIC_POP template \ inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \ { \ + /* NOLINTNEXTLINE(modernize-type-traits) we use C++11 */ \ static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + /* NOLINTNEXTLINE(modernize-avoid-c-arrays) we don't want to depend on */ \ static const std::pair m[] = __VA_ARGS__; \ auto it = std::find_if(std::begin(m), std::end(m), \ [e](const std::pair& ej_pair) -> bool \ @@ -2580,7 +2601,9 @@ JSON_HEDLEY_DIAGNOSTIC_POP template \ inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \ { \ + /* NOLINTNEXTLINE(modernize-type-traits) we use C++11 */ \ static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ + /* NOLINTNEXTLINE(modernize-avoid-c-arrays) we don't want to depend on */ \ static const std::pair m[] = __VA_ARGS__; \ auto it = std::find_if(std::begin(m), std::end(m), \ [&j](const std::pair& ej_pair) -> bool \ @@ -2743,42 +2766,146 @@ JSON_HEDLEY_DIAGNOSTIC_POP #define NLOHMANN_JSON_TO(v1) nlohmann_json_j[#v1] = nlohmann_json_t.v1; #define NLOHMANN_JSON_FROM(v1) nlohmann_json_j.at(#v1).get_to(nlohmann_json_t.v1); -#define NLOHMANN_JSON_FROM_WITH_DEFAULT(v1) nlohmann_json_t.v1 = nlohmann_json_j.value(#v1, nlohmann_json_default_obj.v1); +#define NLOHMANN_JSON_FROM_WITH_DEFAULT(v1) nlohmann_json_t.v1 = !nlohmann_json_j.is_null() ? nlohmann_json_j.value(#v1, nlohmann_json_default_obj.v1) : nlohmann_json_default_obj.v1; /*! @brief macro @def NLOHMANN_DEFINE_TYPE_INTRUSIVE @since version 3.9.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_intrusive/ */ #define NLOHMANN_DEFINE_TYPE_INTRUSIVE(Type, ...) \ - friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + friend void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_INTRUSIVE_WITH_DEFAULT +@since version 3.11.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_intrusive/ +*/ #define NLOHMANN_DEFINE_TYPE_INTRUSIVE_WITH_DEFAULT(Type, ...) \ - friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + friend void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_INTRUSIVE_ONLY_SERIALIZE +@since version 3.11.3 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_intrusive/ +*/ #define NLOHMANN_DEFINE_TYPE_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ - friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } /*! @brief macro @def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE @since version 3.9.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_non_intrusive/ */ #define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Type, ...) \ - inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } - -#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ - inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_WITH_DEFAULT +@since version 3.11.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_non_intrusive/ +*/ #define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_WITH_DEFAULT(Type, ...) \ - inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE +@since version 3.11.3 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_type_non_intrusive/ +*/ +#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE(Type, ...) \ + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE(Type, BaseType, ...) \ + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + friend void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { nlohmann::from_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE_WITH_DEFAULT +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE_WITH_DEFAULT(Type, BaseType, ...) \ + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + friend void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { nlohmann::from_json(nlohmann_json_j, static_cast(nlohmann_json_t)); const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE_ONLY_SERIALIZE +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_INTRUSIVE_ONLY_SERIALIZE(Type, BaseType, ...) \ + template::value, int> = 0> \ + friend void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE(Type, BaseType, ...) \ + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { nlohmann::from_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE_WITH_DEFAULT +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE_WITH_DEFAULT(Type, BaseType, ...) \ + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ + template::value, int> = 0> \ + void from_json(const BasicJsonType& nlohmann_json_j, Type& nlohmann_json_t) { nlohmann::from_json(nlohmann_json_j, static_cast(nlohmann_json_t)); const Type nlohmann_json_default_obj{}; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } + +/*! +@brief macro +@def NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE +@since version 3.12.0 +@sa https://json.nlohmann.me/api/macros/nlohmann_define_derived_type/ +*/ +#define NLOHMANN_DEFINE_DERIVED_TYPE_NON_INTRUSIVE_ONLY_SERIALIZE(Type, BaseType, ...) \ + template::value, int> = 0> \ + void to_json(BasicJsonType& nlohmann_json_j, const Type& nlohmann_json_t) { nlohmann::to_json(nlohmann_json_j, static_cast(nlohmann_json_t)); NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } // inspired from https://stackoverflow.com/a/26745591 -// allows to call any std function as if (e.g. with begin): +// allows calling any std function as if (e.g., with begin): // using std::begin; begin(x); // // it allows using the detected idiom to retrieve the return type @@ -2939,10 +3066,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3014,10 +3141,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3056,10 +3183,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-FileCopyrightText: 2018 The Abseil Authors // SPDX-License-Identifier: MIT @@ -3219,7 +3346,7 @@ struct static_const #endif template -inline constexpr std::array make_array(Args&& ... args) +constexpr std::array make_array(Args&& ... args) { return std::array {{static_cast(std::forward(args))...}}; } @@ -3230,27 +3357,27 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT #include // numeric_limits +#include // char_traits +#include // tuple #include // false_type, is_constructible, is_integral, is_same, true_type #include // declval -#include // tuple -#include // char_traits // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3293,7 +3420,7 @@ struct iterator_traits template struct iterator_traits < T, enable_if_t < !std::is_pointer::value >> - : iterator_types + : iterator_types { }; @@ -3315,10 +3442,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3335,10 +3462,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -3359,10 +3486,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT #ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_ @@ -3624,7 +3751,7 @@ struct char_traits : std::char_traits static constexpr int_type eof() noexcept { - return static_cast(EOF); + return static_cast(std::char_traits::eof()); } }; @@ -3648,7 +3775,7 @@ struct char_traits : std::char_traits static constexpr int_type eof() noexcept { - return static_cast(EOF); + return static_cast(std::char_traits::eof()); } }; @@ -3674,19 +3801,19 @@ struct is_default_constructible : std::is_default_constructible {}; template struct is_default_constructible> - : conjunction, is_default_constructible> {}; + : conjunction, is_default_constructible> {}; template struct is_default_constructible> - : conjunction, is_default_constructible> {}; + : conjunction, is_default_constructible> {}; template struct is_default_constructible> - : conjunction...> {}; + : conjunction...> {}; template struct is_default_constructible> - : conjunction...> {}; + : conjunction...> {}; template struct is_constructible : std::is_constructible {}; @@ -3884,8 +4011,8 @@ is_detected::value&& // special case for types like std::filesystem::path whose iterator's value_type are themselves // c.f. https://github.com/nlohmann/json/pull/3073 !std::is_same>::value&& - is_complete_type < - detected_t>::value >> +is_complete_type < +detected_t>::value >> { using value_type = range_value_t; @@ -4008,12 +4135,12 @@ using is_usable_as_key_type = typename std::conditional < template> using is_usable_as_basic_json_key_type = typename std::conditional < - is_usable_as_key_type::value - && !is_json_iterator_of::value, - std::true_type, - std::false_type >::type; + is_usable_as_key_type::value + && !is_json_iterator_of::value, + std::true_type, + std::false_type >::type; template using detect_erase_with_key_type = decltype(std::declval().erase(std::declval())); @@ -4147,7 +4274,7 @@ struct value_in_range_of_impl1 }; template -inline constexpr bool value_in_range_of(T val) +constexpr bool value_in_range_of(T val) { return value_in_range_of_impl1::test(val); } @@ -4163,7 +4290,7 @@ namespace impl { template -inline constexpr bool is_c_string() +constexpr bool is_c_string() { using TUnExt = typename std::remove_extent::type; using TUnCVExt = typename std::remove_cv::type; @@ -4191,7 +4318,7 @@ namespace impl { template -inline constexpr bool is_transparent() +constexpr bool is_transparent() { return is_detected::value; } @@ -4210,10 +4337,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -4358,6 +4485,18 @@ inline OutStringType concat(Args && ... args) NLOHMANN_JSON_NAMESPACE_END +// With -Wweak-vtables, Clang will complain about the exception classes as they +// have no out-of-line virtual method definitions and their vtable will be +// emitted in every translation unit. This issue cannot be fixed with a +// header-only library as there is no implementation file to move these +// functions to. As a result, we suppress this warning here to avoid client +// code to stumble over this. See https://github.com/nlohmann/json/issues/4087 +// for a discussion. +#if defined(__clang__) + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wweak-vtables" +#endif + NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { @@ -4452,16 +4591,34 @@ class exception : public std::exception { return concat(a, '/', detail::escape(b)); }); - return concat('(', str, ") "); + + return concat('(', str, ") ", get_byte_positions(leaf_element)); #else - static_cast(leaf_element); - return ""; + return get_byte_positions(leaf_element); #endif } private: /// an exception object as storage for error messages std::runtime_error m; +#if JSON_DIAGNOSTIC_POSITIONS + template + static std::string get_byte_positions(const BasicJsonType* leaf_element) + { + if ((leaf_element->start_pos() != std::string::npos) && (leaf_element->end_pos() != std::string::npos)) + { + return concat("(bytes ", std::to_string(leaf_element->start_pos()), "-", std::to_string(leaf_element->end_pos()), ") "); + } + return ""; + } +#else + template + static std::string get_byte_positions(const BasicJsonType* leaf_element) + { + static_cast(leaf_element); + return ""; + } +#endif }; /// @brief exception indicating a parse error @@ -4589,6 +4746,10 @@ class other_error : public exception } // namespace detail NLOHMANN_JSON_NAMESPACE_END +#if defined(__clang__) + #pragma clang diagnostic pop +#endif + // #include // #include @@ -4596,10 +4757,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -4620,10 +4781,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -4640,7 +4801,7 @@ namespace std_fs = std::experimental::filesystem; } // namespace detail NLOHMANN_JSON_NAMESPACE_END #elif JSON_HAS_FILESYSTEM -#include +#include // NOLINT(build/c++17) NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { @@ -4670,6 +4831,24 @@ inline void from_json(const BasicJsonType& j, typename std::nullptr_t& n) n = nullptr; } +#ifdef JSON_HAS_CPP_17 +#ifndef JSON_USE_IMPLICIT_CONVERSIONS +template +void from_json(const BasicJsonType& j, std::optional& opt) +{ + if (j.is_null()) + { + opt = std::nullopt; + } + else + { + opt.emplace(j.template get()); + } +} + +#endif // JSON_USE_IMPLICIT_CONVERSIONS +#endif // JSON_HAS_CPP_17 + // overloads for basic_json template parameters template < typename BasicJsonType, typename ArithmeticType, enable_if_t < std::is_arithmetic::value&& @@ -4817,6 +4996,54 @@ auto from_json(const BasicJsonType& j, T (&arr)[N]) // NOLINT(cppcoreguidelines } } +template +auto from_json(const BasicJsonType& j, T (&arr)[N1][N2]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) +-> decltype(j.template get(), void()) +{ + for (std::size_t i1 = 0; i1 < N1; ++i1) + { + for (std::size_t i2 = 0; i2 < N2; ++i2) + { + arr[i1][i2] = j.at(i1).at(i2).template get(); + } + } +} + +template +auto from_json(const BasicJsonType& j, T (&arr)[N1][N2][N3]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) +-> decltype(j.template get(), void()) +{ + for (std::size_t i1 = 0; i1 < N1; ++i1) + { + for (std::size_t i2 = 0; i2 < N2; ++i2) + { + for (std::size_t i3 = 0; i3 < N3; ++i3) + { + arr[i1][i2][i3] = j.at(i1).at(i2).at(i3).template get(); + } + } + } +} + +template +auto from_json(const BasicJsonType& j, T (&arr)[N1][N2][N3][N4]) // NOLINT(cppcoreguidelines-avoid-c-arrays,hicpp-avoid-c-arrays,modernize-avoid-c-arrays) +-> decltype(j.template get(), void()) +{ + for (std::size_t i1 = 0; i1 < N1; ++i1) + { + for (std::size_t i2 = 0; i2 < N2; ++i2) + { + for (std::size_t i3 = 0; i3 < N3; ++i3) + { + for (std::size_t i4 = 0; i4 < N4; ++i4) + { + arr[i1][i2][i3][i4] = j.at(i1).at(i2).at(i3).at(i4).template get(); + } + } + } + } +} + template inline void from_json_array_impl(const BasicJsonType& j, typename BasicJsonType::array_t& arr, priority_tag<3> /*unused*/) { @@ -4902,7 +5129,7 @@ void()) template < typename BasicJsonType, typename T, std::size_t... Idx > std::array from_json_inplace_array_impl(BasicJsonType&& j, - identity_tag> /*unused*/, index_sequence /*unused*/) + identity_tag> /*unused*/, index_sequence /*unused*/) { return { { std::forward(j).at(Idx).template get()... } }; } @@ -5006,6 +5233,12 @@ std::tuple from_json_tuple_impl_base(BasicJsonType&& j, index_sequence< return std::make_tuple(std::forward(j).at(Idx).template get()...); } +template +std::tuple<> from_json_tuple_impl_base(BasicJsonType& /*unused*/, index_sequence<> /*unused*/) +{ + return {}; +} + template < typename BasicJsonType, class A1, class A2 > std::pair from_json_tuple_impl(BasicJsonType&& j, identity_tag> /*unused*/, priority_tag<0> /*unused*/) { @@ -5091,7 +5324,12 @@ inline void from_json(const BasicJsonType& j, std_fs::path& p) { JSON_THROW(type_error::create(302, concat("type must be string, but is ", j.type_name()), &j)); } - p = *j.template get_ptr(); + const auto& s = *j.template get_ptr(); +#ifdef JSON_HAS_CPP_20 + p = std_fs::path(std::u8string_view(reinterpret_cast(s.data()), s.size())); +#else + p = std_fs::u8path(s); // accepts UTF-8 encoded std::string in C++17, deprecated in C++20 +#endif } #endif @@ -5126,14 +5364,20 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT +// #include +// JSON_HAS_CPP_17 +#ifdef JSON_HAS_CPP_17 + #include // optional +#endif + #include // copy #include // begin, end #include // string @@ -5146,17 +5390,16 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT #include // size_t -#include // input_iterator_tag -#include // string, to_string +#include // forward_iterator_tag #include // tuple_size, get, tuple_element #include // move @@ -5168,6 +5411,46 @@ NLOHMANN_JSON_NAMESPACE_END // #include +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.12.0 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +#include // size_t +#include // string, to_string + +// #include + + +NLOHMANN_JSON_NAMESPACE_BEGIN +namespace detail +{ + +template +void int_to_string(StringType& target, std::size_t value) +{ + // For ADL + using std::to_string; + target = to_string(value); +} + +template +StringType to_string(std::size_t value) +{ + StringType result; + int_to_string(result, value); + return result; +} + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + // #include @@ -5175,13 +5458,6 @@ NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { -template -void int_to_string( string_type& target, std::size_t value ) -{ - // For ADL - using std::to_string; - target = to_string(value); -} template class iteration_proxy_value { public: @@ -5189,7 +5465,7 @@ template class iteration_proxy_value using value_type = iteration_proxy_value; using pointer = value_type *; using reference = value_type &; - using iterator_category = std::input_iterator_tag; + using iterator_category = std::forward_iterator_tag; using string_type = typename std::remove_cv< typename std::remove_reference().key() ) >::type >::type; private: @@ -5369,7 +5645,7 @@ namespace std #endif template class tuple_size<::nlohmann::detail::iteration_proxy_value> // NOLINT(cert-dcl58-cpp) - : public std::integral_constant {}; + : public std::integral_constant {}; template class tuple_element> // NOLINT(cert-dcl58-cpp) @@ -5390,8 +5666,6 @@ class tuple_element> inline constexpr bool ::std::ranges::enable_borrowed_range<::nlohmann::detail::iteration_proxy> = true; #endif -// #include - // #include // #include @@ -5637,6 +5911,22 @@ struct external_constructor // to_json // ///////////// +#ifdef JSON_HAS_CPP_17 +template::value, int> = 0> +void to_json(BasicJsonType& j, const std::optional& opt) +{ + if (opt.has_value()) + { + j = *opt; + } + else + { + j = nullptr; + } +} +#endif + template::value, int> = 0> inline void to_json(BasicJsonType& j, T b) noexcept @@ -5783,6 +6073,13 @@ inline void to_json_tuple_impl(BasicJsonType& j, const Tuple& t, index_sequence< j = { std::get(t)... }; } +template +inline void to_json_tuple_impl(BasicJsonType& j, const Tuple& /*unused*/, index_sequence<> /*unused*/) +{ + using array_t = typename BasicJsonType::array_t; + j = array_t(); +} + template::value, int > = 0> inline void to_json(BasicJsonType& j, const T& t) { @@ -5793,7 +6090,12 @@ inline void to_json(BasicJsonType& j, const T& t) template inline void to_json(BasicJsonType& j, const std_fs::path& p) { - j = p.string(); +#ifdef JSON_HAS_CPP_20 + const std::u8string s = p.u8string(); + j = std::string(s.begin(), s.end()); +#else + j = p.u8string(); // returns std::string in C++17 +#endif } #endif @@ -5868,10 +6170,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -5980,10 +6282,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -6113,10 +6415,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -6133,16 +6435,19 @@ NLOHMANN_JSON_NAMESPACE_END #include // char_traits, string #include // make_pair, move #include // vector +#ifdef __cpp_lib_byteswap + #include //byteswap +#endif // #include // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -6162,6 +6467,8 @@ NLOHMANN_JSON_NAMESPACE_END #include // istream #endif // JSON_NO_IO +// #include + // #include // #include @@ -6209,6 +6516,13 @@ class file_input_adapter return std::fgetc(m_file); } + // returns the number of characters successfully read + template + std::size_t get_elements(T* dest, std::size_t count = 1) + { + return fread(dest, 1, sizeof(T) * count, m_file); + } + private: /// the file pointer to read from std::FILE* m_file; @@ -6268,6 +6582,17 @@ class input_stream_adapter return res; } + template + std::size_t get_elements(T* dest, std::size_t count = 1) + { + auto res = static_cast(sb->sgetn(reinterpret_cast(dest), static_cast(count * sizeof(T)))); + if (JSON_HEDLEY_UNLIKELY(res < count * sizeof(T))) + { + is->clear(is->rdstate() | std::ios::eofbit); + } + return res; + } + private: /// the associated input stream std::istream* is = nullptr; @@ -6299,6 +6624,26 @@ class iterator_input_adapter return char_traits::eof(); } + // for general iterators, we cannot really do something better than falling back to processing the range one-by-one + template + std::size_t get_elements(T* dest, std::size_t count = 1) + { + auto* ptr = reinterpret_cast(dest); + for (std::size_t read_index = 0; read_index < count * sizeof(T); ++read_index) + { + if (JSON_HEDLEY_LIKELY(current != end)) + { + ptr[read_index] = static_cast(*current); + std::advance(current, 1); + } + else + { + return read_index; + } + } + return count * sizeof(T); + } + private: IteratorType current; IteratorType end; @@ -6462,6 +6807,13 @@ class wide_string_input_adapter return utf8_bytes[utf8_bytes_index++]; } + // parsing binary with wchar doesn't make sense, but since the parsing mode can be runtime, we need something here + template + std::size_t get_elements(T* /*dest*/, std::size_t /*count*/ = 1) + { + JSON_THROW(parse_error::create(112, 1, "wide string type cannot be interpreted as binary data", nullptr)); + } + private: BaseInputAdapter base_adapter; @@ -6558,10 +6910,17 @@ typename container_input_adapter_factory_impl::container_input_adapter_factory::create(container); } +// specialization for std::string +using string_input_adapter_type = decltype(input_adapter(std::declval())); + #ifndef JSON_NO_IO // Special cases with fast paths inline file_input_adapter input_adapter(std::FILE* file) { + if (file == nullptr) + { + JSON_THROW(parse_error::create(101, 0, "attempting to parse an empty input; check that your input string or stream contains the expected JSON", nullptr)); + } return file_input_adapter(file); } @@ -6588,9 +6947,13 @@ template < typename CharT, int >::type = 0 > contiguous_bytes_input_adapter input_adapter(CharT b) { + if (b == nullptr) + { + JSON_THROW(parse_error::create(101, 0, "attempting to parse an empty input; check that your input string or stream contains the expected JSON", nullptr)); + } auto length = std::strlen(reinterpret_cast(b)); const auto* ptr = reinterpret_cast(b); - return input_adapter(ptr, ptr + length); + return input_adapter(ptr, ptr + length); // cppcheck-suppress[nullPointerArithmeticRedundantCheck] } template @@ -6636,742 +6999,29 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT #include #include // string +#include // enable_if_t #include // move #include // vector // #include -// #include - -// #include - - -NLOHMANN_JSON_NAMESPACE_BEGIN - -/*! -@brief SAX interface - -This class describes the SAX interface used by @ref nlohmann::json::sax_parse. -Each function is called in different situations while the input is parsed. The -boolean return value informs the parser whether to continue processing the -input. -*/ -template -struct json_sax -{ - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - - /*! - @brief a null value was read - @return whether parsing should proceed - */ - virtual bool null() = 0; - - /*! - @brief a boolean value was read - @param[in] val boolean value - @return whether parsing should proceed - */ - virtual bool boolean(bool val) = 0; - - /*! - @brief an integer number was read - @param[in] val integer value - @return whether parsing should proceed - */ - virtual bool number_integer(number_integer_t val) = 0; - - /*! - @brief an unsigned integer number was read - @param[in] val unsigned integer value - @return whether parsing should proceed - */ - virtual bool number_unsigned(number_unsigned_t val) = 0; - - /*! - @brief a floating-point number was read - @param[in] val floating-point value - @param[in] s raw token value - @return whether parsing should proceed - */ - virtual bool number_float(number_float_t val, const string_t& s) = 0; - - /*! - @brief a string value was read - @param[in] val string value - @return whether parsing should proceed - @note It is safe to move the passed string value. - */ - virtual bool string(string_t& val) = 0; - - /*! - @brief a binary value was read - @param[in] val binary value - @return whether parsing should proceed - @note It is safe to move the passed binary value. - */ - virtual bool binary(binary_t& val) = 0; - - /*! - @brief the beginning of an object was read - @param[in] elements number of object elements or -1 if unknown - @return whether parsing should proceed - @note binary formats may report the number of elements - */ - virtual bool start_object(std::size_t elements) = 0; - - /*! - @brief an object key was read - @param[in] val object key - @return whether parsing should proceed - @note It is safe to move the passed string. - */ - virtual bool key(string_t& val) = 0; - - /*! - @brief the end of an object was read - @return whether parsing should proceed - */ - virtual bool end_object() = 0; - - /*! - @brief the beginning of an array was read - @param[in] elements number of array elements or -1 if unknown - @return whether parsing should proceed - @note binary formats may report the number of elements - */ - virtual bool start_array(std::size_t elements) = 0; - - /*! - @brief the end of an array was read - @return whether parsing should proceed - */ - virtual bool end_array() = 0; - - /*! - @brief a parse error occurred - @param[in] position the position in the input where the error occurs - @param[in] last_token the last read token - @param[in] ex an exception object describing the error - @return whether parsing should proceed (must return false) - */ - virtual bool parse_error(std::size_t position, - const std::string& last_token, - const detail::exception& ex) = 0; - - json_sax() = default; - json_sax(const json_sax&) = default; - json_sax(json_sax&&) noexcept = default; - json_sax& operator=(const json_sax&) = default; - json_sax& operator=(json_sax&&) noexcept = default; - virtual ~json_sax() = default; -}; - -namespace detail -{ -/*! -@brief SAX implementation to create a JSON value from SAX events - -This class implements the @ref json_sax interface and processes the SAX events -to create a JSON value which makes it basically a DOM parser. The structure or -hierarchy of the JSON value is managed by the stack `ref_stack` which contains -a pointer to the respective array or object for each recursion depth. - -After successful parsing, the value that is passed by reference to the -constructor contains the parsed value. - -@tparam BasicJsonType the JSON type -*/ -template -class json_sax_dom_parser -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - - /*! - @param[in,out] r reference to a JSON value that is manipulated while - parsing - @param[in] allow_exceptions_ whether parse errors yield exceptions - */ - explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true) - : root(r), allow_exceptions(allow_exceptions_) - {} - - // make class move-only - json_sax_dom_parser(const json_sax_dom_parser&) = delete; - json_sax_dom_parser(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) - json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; - json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) - ~json_sax_dom_parser() = default; - - bool null() - { - handle_value(nullptr); - return true; - } - - bool boolean(bool val) - { - handle_value(val); - return true; - } - - bool number_integer(number_integer_t val) - { - handle_value(val); - return true; - } - - bool number_unsigned(number_unsigned_t val) - { - handle_value(val); - return true; - } - - bool number_float(number_float_t val, const string_t& /*unused*/) - { - handle_value(val); - return true; - } - - bool string(string_t& val) - { - handle_value(val); - return true; - } - - bool binary(binary_t& val) - { - handle_value(std::move(val)); - return true; - } - - bool start_object(std::size_t len) - { - ref_stack.push_back(handle_value(BasicJsonType::value_t::object)); - - if (JSON_HEDLEY_UNLIKELY(len != static_cast(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back())); - } - - return true; - } - - bool key(string_t& val) - { - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(ref_stack.back()->is_object()); - - // add null at given key and store the reference for later - object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val)); - return true; - } - - bool end_object() - { - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(ref_stack.back()->is_object()); - - ref_stack.back()->set_parents(); - ref_stack.pop_back(); - return true; - } - - bool start_array(std::size_t len) - { - ref_stack.push_back(handle_value(BasicJsonType::value_t::array)); - - if (JSON_HEDLEY_UNLIKELY(len != static_cast(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back())); - } - - return true; - } - - bool end_array() - { - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(ref_stack.back()->is_array()); - - ref_stack.back()->set_parents(); - ref_stack.pop_back(); - return true; - } - - template - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, - const Exception& ex) - { - errored = true; - static_cast(ex); - if (allow_exceptions) - { - JSON_THROW(ex); - } - return false; - } - - constexpr bool is_errored() const - { - return errored; - } - - private: - /*! - @invariant If the ref stack is empty, then the passed value will be the new - root. - @invariant If the ref stack contains a value, then it is an array or an - object to which we can add elements - */ - template - JSON_HEDLEY_RETURNS_NON_NULL - BasicJsonType* handle_value(Value&& v) - { - if (ref_stack.empty()) - { - root = BasicJsonType(std::forward(v)); - return &root; - } - - JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); - - if (ref_stack.back()->is_array()) - { - ref_stack.back()->m_data.m_value.array->emplace_back(std::forward(v)); - return &(ref_stack.back()->m_data.m_value.array->back()); - } - - JSON_ASSERT(ref_stack.back()->is_object()); - JSON_ASSERT(object_element); - *object_element = BasicJsonType(std::forward(v)); - return object_element; - } - - /// the parsed JSON value - BasicJsonType& root; - /// stack to model hierarchy of values - std::vector ref_stack {}; - /// helper to hold the reference for the next object element - BasicJsonType* object_element = nullptr; - /// whether a syntax error occurred - bool errored = false; - /// whether to throw exceptions in case of errors - const bool allow_exceptions = true; -}; - -template -class json_sax_dom_callback_parser -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - using parser_callback_t = typename BasicJsonType::parser_callback_t; - using parse_event_t = typename BasicJsonType::parse_event_t; - - json_sax_dom_callback_parser(BasicJsonType& r, - const parser_callback_t cb, - const bool allow_exceptions_ = true) - : root(r), callback(cb), allow_exceptions(allow_exceptions_) - { - keep_stack.push_back(true); - } - - // make class move-only - json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) - json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; - json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) - ~json_sax_dom_callback_parser() = default; - - bool null() - { - handle_value(nullptr); - return true; - } - - bool boolean(bool val) - { - handle_value(val); - return true; - } - - bool number_integer(number_integer_t val) - { - handle_value(val); - return true; - } - - bool number_unsigned(number_unsigned_t val) - { - handle_value(val); - return true; - } - - bool number_float(number_float_t val, const string_t& /*unused*/) - { - handle_value(val); - return true; - } - - bool string(string_t& val) - { - handle_value(val); - return true; - } - - bool binary(binary_t& val) - { - handle_value(std::move(val)); - return true; - } - - bool start_object(std::size_t len) - { - // check callback for object start - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::object_start, discarded); - keep_stack.push_back(keep); - - auto val = handle_value(BasicJsonType::value_t::object, true); - ref_stack.push_back(val.second); - - // check object limit - if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != static_cast(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back())); - } - - return true; - } - - bool key(string_t& val) - { - BasicJsonType k = BasicJsonType(val); - - // check callback for key - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::key, k); - key_keep_stack.push_back(keep); - - // add discarded value at given key and store the reference for later - if (keep && ref_stack.back()) - { - object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val) = discarded); - } - - return true; - } - - bool end_object() - { - if (ref_stack.back()) - { - if (!callback(static_cast(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back())) - { - // discard object - *ref_stack.back() = discarded; - } - else - { - ref_stack.back()->set_parents(); - } - } - - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(!keep_stack.empty()); - ref_stack.pop_back(); - keep_stack.pop_back(); - - if (!ref_stack.empty() && ref_stack.back() && ref_stack.back()->is_structured()) - { - // remove discarded value - for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it) - { - if (it->is_discarded()) - { - ref_stack.back()->erase(it); - break; - } - } - } - - return true; - } - - bool start_array(std::size_t len) - { - const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::array_start, discarded); - keep_stack.push_back(keep); - - auto val = handle_value(BasicJsonType::value_t::array, true); - ref_stack.push_back(val.second); - - // check array limit - if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != static_cast(-1) && len > ref_stack.back()->max_size())) - { - JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back())); - } - - return true; - } - - bool end_array() - { - bool keep = true; - - if (ref_stack.back()) - { - keep = callback(static_cast(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back()); - if (keep) - { - ref_stack.back()->set_parents(); - } - else - { - // discard array - *ref_stack.back() = discarded; - } - } - - JSON_ASSERT(!ref_stack.empty()); - JSON_ASSERT(!keep_stack.empty()); - ref_stack.pop_back(); - keep_stack.pop_back(); - - // remove discarded value - if (!keep && !ref_stack.empty() && ref_stack.back()->is_array()) - { - ref_stack.back()->m_data.m_value.array->pop_back(); - } - - return true; - } - - template - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, - const Exception& ex) - { - errored = true; - static_cast(ex); - if (allow_exceptions) - { - JSON_THROW(ex); - } - return false; - } - - constexpr bool is_errored() const - { - return errored; - } - - private: - /*! - @param[in] v value to add to the JSON value we build during parsing - @param[in] skip_callback whether we should skip calling the callback - function; this is required after start_array() and - start_object() SAX events, because otherwise we would call the - callback function with an empty array or object, respectively. - - @invariant If the ref stack is empty, then the passed value will be the new - root. - @invariant If the ref stack contains a value, then it is an array or an - object to which we can add elements - - @return pair of boolean (whether value should be kept) and pointer (to the - passed value in the ref_stack hierarchy; nullptr if not kept) - */ - template - std::pair handle_value(Value&& v, const bool skip_callback = false) - { - JSON_ASSERT(!keep_stack.empty()); - - // do not handle this value if we know it would be added to a discarded - // container - if (!keep_stack.back()) - { - return {false, nullptr}; - } - - // create value - auto value = BasicJsonType(std::forward(v)); - - // check callback - const bool keep = skip_callback || callback(static_cast(ref_stack.size()), parse_event_t::value, value); - - // do not handle this value if we just learnt it shall be discarded - if (!keep) - { - return {false, nullptr}; - } - - if (ref_stack.empty()) - { - root = std::move(value); - return {true, & root}; - } - - // skip this value if we already decided to skip the parent - // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360) - if (!ref_stack.back()) - { - return {false, nullptr}; - } - - // we now only expect arrays and objects - JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); - - // array - if (ref_stack.back()->is_array()) - { - ref_stack.back()->m_data.m_value.array->emplace_back(std::move(value)); - return {true, & (ref_stack.back()->m_data.m_value.array->back())}; - } - - // object - JSON_ASSERT(ref_stack.back()->is_object()); - // check if we should store an element for the current key - JSON_ASSERT(!key_keep_stack.empty()); - const bool store_element = key_keep_stack.back(); - key_keep_stack.pop_back(); - - if (!store_element) - { - return {false, nullptr}; - } - - JSON_ASSERT(object_element); - *object_element = std::move(value); - return {true, object_element}; - } - - /// the parsed JSON value - BasicJsonType& root; - /// stack to model hierarchy of values - std::vector ref_stack {}; - /// stack to manage which values to keep - std::vector keep_stack {}; // NOLINT(readability-redundant-member-init) - /// stack to manage which object keys to keep - std::vector key_keep_stack {}; // NOLINT(readability-redundant-member-init) - /// helper to hold the reference for the next object element - BasicJsonType* object_element = nullptr; - /// whether a syntax error occurred - bool errored = false; - /// callback function - const parser_callback_t callback = nullptr; - /// whether to throw exceptions in case of errors - const bool allow_exceptions = true; - /// a discarded value for the callback - BasicJsonType discarded = BasicJsonType::value_t::discarded; -}; - -template -class json_sax_acceptor -{ - public: - using number_integer_t = typename BasicJsonType::number_integer_t; - using number_unsigned_t = typename BasicJsonType::number_unsigned_t; - using number_float_t = typename BasicJsonType::number_float_t; - using string_t = typename BasicJsonType::string_t; - using binary_t = typename BasicJsonType::binary_t; - - bool null() - { - return true; - } - - bool boolean(bool /*unused*/) - { - return true; - } - - bool number_integer(number_integer_t /*unused*/) - { - return true; - } - - bool number_unsigned(number_unsigned_t /*unused*/) - { - return true; - } - - bool number_float(number_float_t /*unused*/, const string_t& /*unused*/) - { - return true; - } - - bool string(string_t& /*unused*/) - { - return true; - } - - bool binary(binary_t& /*unused*/) - { - return true; - } - - bool start_object(std::size_t /*unused*/ = static_cast(-1)) - { - return true; - } - - bool key(string_t& /*unused*/) - { - return true; - } - - bool end_object() - { - return true; - } - - bool start_array(std::size_t /*unused*/ = static_cast(-1)) - { - return true; - } - - bool end_array() - { - return true; - } - - bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/) - { - return false; - } -}; - -} // namespace detail -NLOHMANN_JSON_NAMESPACE_END - // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -8339,7 +7989,7 @@ class lexer : public lexer_base locale's decimal point is used instead of `.` to work with the locale-dependent converters. */ - token_type scan_number() // lgtm [cpp/use-of-goto] + token_type scan_number() // lgtm [cpp/use-of-goto] `goto` is used in this function to implement the number-parsing state machine described above. By design, any finite input will eventually reach the "done" state or return token_type::parse_error. In each intermediate state, 1 byte of the input is appended to the token_buffer vector, and only the already initialized variables token_buffer, number_type, and error_message are manipulated. { // reset token_buffer to store the number's bytes reset(); @@ -8421,6 +8071,7 @@ scan_number_zero: case '.': { add(decimal_point_char); + decimal_point_position = token_buffer.size() - 1; goto scan_number_decimal1; } @@ -8457,6 +8108,7 @@ scan_number_any1: case '.': { add(decimal_point_char); + decimal_point_position = token_buffer.size() - 1; goto scan_number_decimal1; } @@ -8617,7 +8269,7 @@ scan_number_done: // we are done scanning a number) unget(); - char* endptr = nullptr; // NOLINT(cppcoreguidelines-pro-type-vararg,hicpp-vararg) + char* endptr = nullptr; // NOLINT(misc-const-correctness,cppcoreguidelines-pro-type-vararg,hicpp-vararg) errno = 0; // try to parse integers first and fall back to floats @@ -8628,7 +8280,7 @@ scan_number_done: // we checked the number format before JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); - if (errno == 0) + if (errno != ERANGE) { value_unsigned = static_cast(x); if (value_unsigned == x) @@ -8644,7 +8296,7 @@ scan_number_done: // we checked the number format before JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size()); - if (errno == 0) + if (errno != ERANGE) { value_integer = static_cast(x); if (value_integer == x) @@ -8694,6 +8346,7 @@ scan_number_done: { token_buffer.clear(); token_string.clear(); + decimal_point_position = std::string::npos; token_string.push_back(char_traits::to_char_type(current)); } @@ -8802,6 +8455,11 @@ scan_number_done: /// return current string value (implicitly resets the token; useful only once) string_t& get_string() { + // translate decimal points from locale back to '.' (#4084) + if (decimal_point_char != '.' && decimal_point_position != std::string::npos) + { + token_buffer[decimal_point_position] = '.'; + } return token_buffer; } @@ -8999,6 +8657,8 @@ scan_number_done: /// the decimal point const char_int_type decimal_point_char = '.'; + /// the position of the decimal point in the input + std::size_t decimal_point_position = std::string::npos; }; } // namespace detail @@ -9006,13 +8666,986 @@ NLOHMANN_JSON_NAMESPACE_END // #include +// #include + +NLOHMANN_JSON_NAMESPACE_BEGIN + +/*! +@brief SAX interface + +This class describes the SAX interface used by @ref nlohmann::json::sax_parse. +Each function is called in different situations while the input is parsed. The +boolean return value informs the parser whether to continue processing the +input. +*/ +template +struct json_sax +{ + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + + /*! + @brief a null value was read + @return whether parsing should proceed + */ + virtual bool null() = 0; + + /*! + @brief a boolean value was read + @param[in] val boolean value + @return whether parsing should proceed + */ + virtual bool boolean(bool val) = 0; + + /*! + @brief an integer number was read + @param[in] val integer value + @return whether parsing should proceed + */ + virtual bool number_integer(number_integer_t val) = 0; + + /*! + @brief an unsigned integer number was read + @param[in] val unsigned integer value + @return whether parsing should proceed + */ + virtual bool number_unsigned(number_unsigned_t val) = 0; + + /*! + @brief a floating-point number was read + @param[in] val floating-point value + @param[in] s raw token value + @return whether parsing should proceed + */ + virtual bool number_float(number_float_t val, const string_t& s) = 0; + + /*! + @brief a string value was read + @param[in] val string value + @return whether parsing should proceed + @note It is safe to move the passed string value. + */ + virtual bool string(string_t& val) = 0; + + /*! + @brief a binary value was read + @param[in] val binary value + @return whether parsing should proceed + @note It is safe to move the passed binary value. + */ + virtual bool binary(binary_t& val) = 0; + + /*! + @brief the beginning of an object was read + @param[in] elements number of object elements or -1 if unknown + @return whether parsing should proceed + @note binary formats may report the number of elements + */ + virtual bool start_object(std::size_t elements) = 0; + + /*! + @brief an object key was read + @param[in] val object key + @return whether parsing should proceed + @note It is safe to move the passed string. + */ + virtual bool key(string_t& val) = 0; + + /*! + @brief the end of an object was read + @return whether parsing should proceed + */ + virtual bool end_object() = 0; + + /*! + @brief the beginning of an array was read + @param[in] elements number of array elements or -1 if unknown + @return whether parsing should proceed + @note binary formats may report the number of elements + */ + virtual bool start_array(std::size_t elements) = 0; + + /*! + @brief the end of an array was read + @return whether parsing should proceed + */ + virtual bool end_array() = 0; + + /*! + @brief a parse error occurred + @param[in] position the position in the input where the error occurs + @param[in] last_token the last read token + @param[in] ex an exception object describing the error + @return whether parsing should proceed (must return false) + */ + virtual bool parse_error(std::size_t position, + const std::string& last_token, + const detail::exception& ex) = 0; + + json_sax() = default; + json_sax(const json_sax&) = default; + json_sax(json_sax&&) noexcept = default; + json_sax& operator=(const json_sax&) = default; + json_sax& operator=(json_sax&&) noexcept = default; + virtual ~json_sax() = default; +}; + +namespace detail +{ +constexpr std::size_t unknown_size() +{ + return (std::numeric_limits::max)(); +} + +/*! +@brief SAX implementation to create a JSON value from SAX events + +This class implements the @ref json_sax interface and processes the SAX events +to create a JSON value which makes it basically a DOM parser. The structure or +hierarchy of the JSON value is managed by the stack `ref_stack` which contains +a pointer to the respective array or object for each recursion depth. + +After successful parsing, the value that is passed by reference to the +constructor contains the parsed value. + +@tparam BasicJsonType the JSON type +*/ +template +class json_sax_dom_parser +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + using lexer_t = lexer; + + /*! + @param[in,out] r reference to a JSON value that is manipulated while + parsing + @param[in] allow_exceptions_ whether parse errors yield exceptions + */ + explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true, lexer_t* lexer_ = nullptr) + : root(r), allow_exceptions(allow_exceptions_), m_lexer_ref(lexer_) + {} + + // make class move-only + json_sax_dom_parser(const json_sax_dom_parser&) = delete; + json_sax_dom_parser(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) + json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete; + json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) + ~json_sax_dom_parser() = default; + + bool null() + { + handle_value(nullptr); + return true; + } + + bool boolean(bool val) + { + handle_value(val); + return true; + } + + bool number_integer(number_integer_t val) + { + handle_value(val); + return true; + } + + bool number_unsigned(number_unsigned_t val) + { + handle_value(val); + return true; + } + + bool number_float(number_float_t val, const string_t& /*unused*/) + { + handle_value(val); + return true; + } + + bool string(string_t& val) + { + handle_value(val); + return true; + } + + bool binary(binary_t& val) + { + handle_value(std::move(val)); + return true; + } + + bool start_object(std::size_t len) + { + ref_stack.push_back(handle_value(BasicJsonType::value_t::object)); + +#if JSON_DIAGNOSTIC_POSITIONS + // Manually set the start position of the object here. + // Ensure this is after the call to handle_value to ensure correct start position. + if (m_lexer_ref) + { + // Lexer has read the first character of the object, so + // subtract 1 from the position to get the correct start position. + ref_stack.back()->start_position = m_lexer_ref->get_position() - 1; + } +#endif + + if (JSON_HEDLEY_UNLIKELY(len != detail::unknown_size() && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back())); + } + + return true; + } + + bool key(string_t& val) + { + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(ref_stack.back()->is_object()); + + // add null at given key and store the reference for later + object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val)); + return true; + } + + bool end_object() + { + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(ref_stack.back()->is_object()); + +#if JSON_DIAGNOSTIC_POSITIONS + if (m_lexer_ref) + { + // Lexer's position is past the closing brace, so set that as the end position. + ref_stack.back()->end_position = m_lexer_ref->get_position(); + } +#endif + + ref_stack.back()->set_parents(); + ref_stack.pop_back(); + return true; + } + + bool start_array(std::size_t len) + { + ref_stack.push_back(handle_value(BasicJsonType::value_t::array)); + +#if JSON_DIAGNOSTIC_POSITIONS + // Manually set the start position of the array here. + // Ensure this is after the call to handle_value to ensure correct start position. + if (m_lexer_ref) + { + ref_stack.back()->start_position = m_lexer_ref->get_position() - 1; + } +#endif + + if (JSON_HEDLEY_UNLIKELY(len != detail::unknown_size() && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back())); + } + + return true; + } + + bool end_array() + { + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(ref_stack.back()->is_array()); + +#if JSON_DIAGNOSTIC_POSITIONS + if (m_lexer_ref) + { + // Lexer's position is past the closing bracket, so set that as the end position. + ref_stack.back()->end_position = m_lexer_ref->get_position(); + } +#endif + + ref_stack.back()->set_parents(); + ref_stack.pop_back(); + return true; + } + + template + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, + const Exception& ex) + { + errored = true; + static_cast(ex); + if (allow_exceptions) + { + JSON_THROW(ex); + } + return false; + } + + constexpr bool is_errored() const + { + return errored; + } + + private: + +#if JSON_DIAGNOSTIC_POSITIONS + void handle_diagnostic_positions_for_json_value(BasicJsonType& v) + { + if (m_lexer_ref) + { + // Lexer has read past the current field value, so set the end position to the current position. + // The start position will be set below based on the length of the string representation + // of the value. + v.end_position = m_lexer_ref->get_position(); + + switch (v.type()) + { + case value_t::boolean: + { + // 4 and 5 are the string length of "true" and "false" + v.start_position = v.end_position - (v.m_data.m_value.boolean ? 4 : 5); + break; + } + + case value_t::null: + { + // 4 is the string length of "null" + v.start_position = v.end_position - 4; + break; + } + + case value_t::string: + { + // include the length of the quotes, which is 2 + v.start_position = v.end_position - v.m_data.m_value.string->size() - 2; + break; + } + + // As we handle the start and end positions for values created during parsing, + // we do not expect the following value type to be called. Regardless, set the positions + // in case this is created manually or through a different constructor. Exclude from lcov + // since the exact condition of this switch is esoteric. + // LCOV_EXCL_START + case value_t::discarded: + { + v.end_position = std::string::npos; + v.start_position = v.end_position; + break; + } + // LCOV_EXCL_STOP + case value_t::binary: + case value_t::number_integer: + case value_t::number_unsigned: + case value_t::number_float: + { + v.start_position = v.end_position - m_lexer_ref->get_string().size(); + break; + } + case value_t::object: + case value_t::array: + { + // object and array are handled in start_object() and start_array() handlers + // skip setting the values here. + break; + } + default: // LCOV_EXCL_LINE + // Handle all possible types discretely, default handler should never be reached. + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert,-warnings-as-errors) LCOV_EXCL_LINE + } + } + } +#endif + + /*! + @invariant If the ref stack is empty, then the passed value will be the new + root. + @invariant If the ref stack contains a value, then it is an array or an + object to which we can add elements + */ + template + JSON_HEDLEY_RETURNS_NON_NULL + BasicJsonType* handle_value(Value&& v) + { + if (ref_stack.empty()) + { + root = BasicJsonType(std::forward(v)); + +#if JSON_DIAGNOSTIC_POSITIONS + handle_diagnostic_positions_for_json_value(root); +#endif + + return &root; + } + + JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); + + if (ref_stack.back()->is_array()) + { + ref_stack.back()->m_data.m_value.array->emplace_back(std::forward(v)); + +#if JSON_DIAGNOSTIC_POSITIONS + handle_diagnostic_positions_for_json_value(ref_stack.back()->m_data.m_value.array->back()); +#endif + + return &(ref_stack.back()->m_data.m_value.array->back()); + } + + JSON_ASSERT(ref_stack.back()->is_object()); + JSON_ASSERT(object_element); + *object_element = BasicJsonType(std::forward(v)); + +#if JSON_DIAGNOSTIC_POSITIONS + handle_diagnostic_positions_for_json_value(*object_element); +#endif + + return object_element; + } + + /// the parsed JSON value + BasicJsonType& root; + /// stack to model hierarchy of values + std::vector ref_stack {}; + /// helper to hold the reference for the next object element + BasicJsonType* object_element = nullptr; + /// whether a syntax error occurred + bool errored = false; + /// whether to throw exceptions in case of errors + const bool allow_exceptions = true; + /// the lexer reference to obtain the current position + lexer_t* m_lexer_ref = nullptr; +}; + +template +class json_sax_dom_callback_parser +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + using parser_callback_t = typename BasicJsonType::parser_callback_t; + using parse_event_t = typename BasicJsonType::parse_event_t; + using lexer_t = lexer; + + json_sax_dom_callback_parser(BasicJsonType& r, + parser_callback_t cb, + const bool allow_exceptions_ = true, + lexer_t* lexer_ = nullptr) + : root(r), callback(std::move(cb)), allow_exceptions(allow_exceptions_), m_lexer_ref(lexer_) + { + keep_stack.push_back(true); + } + + // make class move-only + json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete; + json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) + json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete; + json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default; // NOLINT(hicpp-noexcept-move,performance-noexcept-move-constructor) + ~json_sax_dom_callback_parser() = default; + + bool null() + { + handle_value(nullptr); + return true; + } + + bool boolean(bool val) + { + handle_value(val); + return true; + } + + bool number_integer(number_integer_t val) + { + handle_value(val); + return true; + } + + bool number_unsigned(number_unsigned_t val) + { + handle_value(val); + return true; + } + + bool number_float(number_float_t val, const string_t& /*unused*/) + { + handle_value(val); + return true; + } + + bool string(string_t& val) + { + handle_value(val); + return true; + } + + bool binary(binary_t& val) + { + handle_value(std::move(val)); + return true; + } + + bool start_object(std::size_t len) + { + // check callback for object start + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::object_start, discarded); + keep_stack.push_back(keep); + + auto val = handle_value(BasicJsonType::value_t::object, true); + ref_stack.push_back(val.second); + + if (ref_stack.back()) + { + +#if JSON_DIAGNOSTIC_POSITIONS + // Manually set the start position of the object here. + // Ensure this is after the call to handle_value to ensure correct start position. + if (m_lexer_ref) + { + // Lexer has read the first character of the object, so + // subtract 1 from the position to get the correct start position. + ref_stack.back()->start_position = m_lexer_ref->get_position() - 1; + } +#endif + + // check object limit + if (JSON_HEDLEY_UNLIKELY(len != detail::unknown_size() && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, concat("excessive object size: ", std::to_string(len)), ref_stack.back())); + } + } + return true; + } + + bool key(string_t& val) + { + BasicJsonType k = BasicJsonType(val); + + // check callback for key + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::key, k); + key_keep_stack.push_back(keep); + + // add discarded value at given key and store the reference for later + if (keep && ref_stack.back()) + { + object_element = &(ref_stack.back()->m_data.m_value.object->operator[](val) = discarded); + } + + return true; + } + + bool end_object() + { + if (ref_stack.back()) + { + if (!callback(static_cast(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back())) + { + // discard object + *ref_stack.back() = discarded; + +#if JSON_DIAGNOSTIC_POSITIONS + // Set start/end positions for discarded object. + handle_diagnostic_positions_for_json_value(*ref_stack.back()); +#endif + } + else + { + +#if JSON_DIAGNOSTIC_POSITIONS + if (m_lexer_ref) + { + // Lexer's position is past the closing brace, so set that as the end position. + ref_stack.back()->end_position = m_lexer_ref->get_position(); + } +#endif + + ref_stack.back()->set_parents(); + } + } + + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(!keep_stack.empty()); + ref_stack.pop_back(); + keep_stack.pop_back(); + + if (!ref_stack.empty() && ref_stack.back() && ref_stack.back()->is_structured()) + { + // remove discarded value + for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it) + { + if (it->is_discarded()) + { + ref_stack.back()->erase(it); + break; + } + } + } + + return true; + } + + bool start_array(std::size_t len) + { + const bool keep = callback(static_cast(ref_stack.size()), parse_event_t::array_start, discarded); + keep_stack.push_back(keep); + + auto val = handle_value(BasicJsonType::value_t::array, true); + ref_stack.push_back(val.second); + + if (ref_stack.back()) + { + +#if JSON_DIAGNOSTIC_POSITIONS + // Manually set the start position of the array here. + // Ensure this is after the call to handle_value to ensure correct start position. + if (m_lexer_ref) + { + // Lexer has read the first character of the array, so + // subtract 1 from the position to get the correct start position. + ref_stack.back()->start_position = m_lexer_ref->get_position() - 1; + } +#endif + + // check array limit + if (JSON_HEDLEY_UNLIKELY(len != detail::unknown_size() && len > ref_stack.back()->max_size())) + { + JSON_THROW(out_of_range::create(408, concat("excessive array size: ", std::to_string(len)), ref_stack.back())); + } + } + + return true; + } + + bool end_array() + { + bool keep = true; + + if (ref_stack.back()) + { + keep = callback(static_cast(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back()); + if (keep) + { + +#if JSON_DIAGNOSTIC_POSITIONS + if (m_lexer_ref) + { + // Lexer's position is past the closing bracket, so set that as the end position. + ref_stack.back()->end_position = m_lexer_ref->get_position(); + } +#endif + + ref_stack.back()->set_parents(); + } + else + { + // discard array + *ref_stack.back() = discarded; + +#if JSON_DIAGNOSTIC_POSITIONS + // Set start/end positions for discarded array. + handle_diagnostic_positions_for_json_value(*ref_stack.back()); +#endif + } + } + + JSON_ASSERT(!ref_stack.empty()); + JSON_ASSERT(!keep_stack.empty()); + ref_stack.pop_back(); + keep_stack.pop_back(); + + // remove discarded value + if (!keep && !ref_stack.empty() && ref_stack.back()->is_array()) + { + ref_stack.back()->m_data.m_value.array->pop_back(); + } + + return true; + } + + template + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, + const Exception& ex) + { + errored = true; + static_cast(ex); + if (allow_exceptions) + { + JSON_THROW(ex); + } + return false; + } + + constexpr bool is_errored() const + { + return errored; + } + + private: + +#if JSON_DIAGNOSTIC_POSITIONS + void handle_diagnostic_positions_for_json_value(BasicJsonType& v) + { + if (m_lexer_ref) + { + // Lexer has read past the current field value, so set the end position to the current position. + // The start position will be set below based on the length of the string representation + // of the value. + v.end_position = m_lexer_ref->get_position(); + + switch (v.type()) + { + case value_t::boolean: + { + // 4 and 5 are the string length of "true" and "false" + v.start_position = v.end_position - (v.m_data.m_value.boolean ? 4 : 5); + break; + } + + case value_t::null: + { + // 4 is the string length of "null" + v.start_position = v.end_position - 4; + break; + } + + case value_t::string: + { + // include the length of the quotes, which is 2 + v.start_position = v.end_position - v.m_data.m_value.string->size() - 2; + break; + } + + case value_t::discarded: + { + v.end_position = std::string::npos; + v.start_position = v.end_position; + break; + } + + case value_t::binary: + case value_t::number_integer: + case value_t::number_unsigned: + case value_t::number_float: + { + v.start_position = v.end_position - m_lexer_ref->get_string().size(); + break; + } + + case value_t::object: + case value_t::array: + { + // object and array are handled in start_object() and start_array() handlers + // skip setting the values here. + break; + } + default: // LCOV_EXCL_LINE + // Handle all possible types discretely, default handler should never be reached. + JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert,-warnings-as-errors) LCOV_EXCL_LINE + } + } + } +#endif + + /*! + @param[in] v value to add to the JSON value we build during parsing + @param[in] skip_callback whether we should skip calling the callback + function; this is required after start_array() and + start_object() SAX events, because otherwise we would call the + callback function with an empty array or object, respectively. + + @invariant If the ref stack is empty, then the passed value will be the new + root. + @invariant If the ref stack contains a value, then it is an array or an + object to which we can add elements + + @return pair of boolean (whether value should be kept) and pointer (to the + passed value in the ref_stack hierarchy; nullptr if not kept) + */ + template + std::pair handle_value(Value&& v, const bool skip_callback = false) + { + JSON_ASSERT(!keep_stack.empty()); + + // do not handle this value if we know it would be added to a discarded + // container + if (!keep_stack.back()) + { + return {false, nullptr}; + } + + // create value + auto value = BasicJsonType(std::forward(v)); + +#if JSON_DIAGNOSTIC_POSITIONS + handle_diagnostic_positions_for_json_value(value); +#endif + + // check callback + const bool keep = skip_callback || callback(static_cast(ref_stack.size()), parse_event_t::value, value); + + // do not handle this value if we just learnt it shall be discarded + if (!keep) + { + return {false, nullptr}; + } + + if (ref_stack.empty()) + { + root = std::move(value); + return {true, & root}; + } + + // skip this value if we already decided to skip the parent + // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360) + if (!ref_stack.back()) + { + return {false, nullptr}; + } + + // we now only expect arrays and objects + JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object()); + + // array + if (ref_stack.back()->is_array()) + { + ref_stack.back()->m_data.m_value.array->emplace_back(std::move(value)); + return {true, & (ref_stack.back()->m_data.m_value.array->back())}; + } + + // object + JSON_ASSERT(ref_stack.back()->is_object()); + // check if we should store an element for the current key + JSON_ASSERT(!key_keep_stack.empty()); + const bool store_element = key_keep_stack.back(); + key_keep_stack.pop_back(); + + if (!store_element) + { + return {false, nullptr}; + } + + JSON_ASSERT(object_element); + *object_element = std::move(value); + return {true, object_element}; + } + + /// the parsed JSON value + BasicJsonType& root; + /// stack to model hierarchy of values + std::vector ref_stack {}; + /// stack to manage which values to keep + std::vector keep_stack {}; // NOLINT(readability-redundant-member-init) + /// stack to manage which object keys to keep + std::vector key_keep_stack {}; // NOLINT(readability-redundant-member-init) + /// helper to hold the reference for the next object element + BasicJsonType* object_element = nullptr; + /// whether a syntax error occurred + bool errored = false; + /// callback function + const parser_callback_t callback = nullptr; + /// whether to throw exceptions in case of errors + const bool allow_exceptions = true; + /// a discarded value for the callback + BasicJsonType discarded = BasicJsonType::value_t::discarded; + /// the lexer reference to obtain the current position + lexer_t* m_lexer_ref = nullptr; +}; + +template +class json_sax_acceptor +{ + public: + using number_integer_t = typename BasicJsonType::number_integer_t; + using number_unsigned_t = typename BasicJsonType::number_unsigned_t; + using number_float_t = typename BasicJsonType::number_float_t; + using string_t = typename BasicJsonType::string_t; + using binary_t = typename BasicJsonType::binary_t; + + bool null() + { + return true; + } + + bool boolean(bool /*unused*/) + { + return true; + } + + bool number_integer(number_integer_t /*unused*/) + { + return true; + } + + bool number_unsigned(number_unsigned_t /*unused*/) + { + return true; + } + + bool number_float(number_float_t /*unused*/, const string_t& /*unused*/) + { + return true; + } + + bool string(string_t& /*unused*/) + { + return true; + } + + bool binary(binary_t& /*unused*/) + { + return true; + } + + bool start_object(std::size_t /*unused*/ = detail::unknown_size()) + { + return true; + } + + bool key(string_t& /*unused*/) + { + return true; + } + + bool end_object() + { + return true; + } + + bool start_array(std::size_t /*unused*/ = detail::unknown_size()) + { + return true; + } + + bool end_array() + { + return true; + } + + bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/) + { + return false; + } +}; + +} // namespace detail +NLOHMANN_JSON_NAMESPACE_END + +// #include + +// #include + // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -9208,7 +9841,7 @@ static inline bool little_endianness(int num = 1) noexcept /*! @brief deserialization of CBOR, MessagePack, and UBJSON values */ -template> +template> class binary_reader { using number_integer_t = typename BasicJsonType::number_integer_t; @@ -9315,7 +9948,7 @@ class binary_reader std::int32_t document_size{}; get_number(input_format_t::bson, document_size); - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(detail::unknown_size()))) { return false; } @@ -9471,6 +10104,12 @@ class binary_reader return get_number(input_format_t::bson, value) && sax->number_integer(value); } + case 0x11: // uint64 + { + std::uint64_t value{}; + return get_number(input_format_t::bson, value) && sax->number_unsigned(value); + } + default: // anything else not supported (yet) { std::array cr{{}}; @@ -9537,7 +10176,7 @@ class binary_reader std::int32_t document_size{}; get_number(input_format_t::bson, document_size); - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(detail::unknown_size()))) { return false; } @@ -9797,7 +10436,7 @@ class binary_reader } case 0x9F: // array (indefinite length) - return get_cbor_array(static_cast(-1), tag_handler); + return get_cbor_array(detail::unknown_size(), tag_handler); // map (0x00..0x17 pairs of data items follow) case 0xA0: @@ -9851,7 +10490,7 @@ class binary_reader } case 0xBF: // map (indefinite length) - return get_cbor_object(static_cast(-1), tag_handler); + return get_cbor_object(detail::unknown_size(), tag_handler); case 0xC6: // tagged item case 0xC7: @@ -10239,7 +10878,7 @@ class binary_reader } /*! - @param[in] len the length of the array or static_cast(-1) for an + @param[in] len the length of the array or detail::unknown_size() for an array of indefinite size @param[in] tag_handler how CBOR tags should be treated @return whether array creation completed @@ -10252,7 +10891,7 @@ class binary_reader return false; } - if (len != static_cast(-1)) + if (len != detail::unknown_size()) { for (std::size_t i = 0; i < len; ++i) { @@ -10277,7 +10916,7 @@ class binary_reader } /*! - @param[in] len the length of the object or static_cast(-1) for an + @param[in] len the length of the object or detail::unknown_size() for an object of indefinite size @param[in] tag_handler how CBOR tags should be treated @return whether object creation completed @@ -10293,7 +10932,7 @@ class binary_reader if (len != 0) { string_t key; - if (len != static_cast(-1)) + if (len != detail::unknown_size()) { for (std::size_t i = 0; i < len; ++i) { @@ -11456,6 +12095,16 @@ class binary_reader case 'Z': // null return sax->null(); + case 'B': // byte + { + if (input_format != input_format_t::bjdata) + { + break; + } + std::uint8_t number{}; + return get_number(input_format, number) && sax->number_unsigned(number); + } + case 'U': { std::uint8_t number{}; @@ -11656,7 +12305,7 @@ class binary_reader return false; } - if (size_and_type.second == 'C') + if (size_and_type.second == 'C' || size_and_type.second == 'B') { size_and_type.second = 'U'; } @@ -11678,6 +12327,13 @@ class binary_reader return (sax->end_array() && sax->end_object()); } + // If BJData type marker is 'B' decode as binary + if (input_format == input_format_t::bjdata && size_and_type.first != npos && size_and_type.second == 'B') + { + binary_t result; + return get_binary(input_format, size_and_type.first, result) && sax->binary(result); + } + if (size_and_type.first != npos) { if (JSON_HEDLEY_UNLIKELY(!sax->start_array(size_and_type.first))) @@ -11711,7 +12367,7 @@ class binary_reader } else { - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(detail::unknown_size()))) { return false; } @@ -11789,7 +12445,7 @@ class binary_reader } else { - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(detail::unknown_size()))) { return false; } @@ -11900,6 +12556,29 @@ class binary_reader return current = ia.get_character(); } + /*! + @brief get_to read into a primitive type + + This function provides the interface to the used input adapter. It does + not throw in case the input reached EOF, but returns false instead + + @return bool, whether the read was successful + */ + template + bool get_to(T& dest, const input_format_t format, const char* context) + { + auto new_chars_read = ia.get_elements(&dest); + chars_read += new_chars_read; + if (JSON_HEDLEY_UNLIKELY(new_chars_read < sizeof(T))) + { + // in case of failure, advance position by 1 to report failing location + ++chars_read; + sax->parse_error(chars_read, "", parse_error::create(110, chars_read, exception_message(format, "unexpected end of input", context), nullptr)); + return false; + } + return true; + } + /*! @return character read from the input after ignoring all 'N' entries */ @@ -11914,6 +12593,28 @@ class binary_reader return current; } + template + static void byte_swap(NumberType& number) + { + constexpr std::size_t sz = sizeof(number); +#ifdef __cpp_lib_byteswap + if constexpr (sz == 1) + { + return; + } + if constexpr(std::is_integral_v) + { + number = std::byteswap(number); + return; + } +#endif + auto* ptr = reinterpret_cast(&number); + for (std::size_t i = 0; i < sz / 2; ++i) + { + std::swap(ptr[i], ptr[sz - i - 1]); + } + } + /* @brief read a number from the input @@ -11932,29 +12633,16 @@ class binary_reader template bool get_number(const input_format_t format, NumberType& result) { - // step 1: read input into array with system's byte order - std::array vec{}; - for (std::size_t i = 0; i < sizeof(NumberType); ++i) + // read in the original format + + if (JSON_HEDLEY_UNLIKELY(!get_to(result, format, "number"))) { - get(); - if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "number"))) - { - return false; - } - - // reverse byte order prior to conversion if necessary - if (is_little_endian != (InputIsLittleEndian || format == input_format_t::bjdata)) - { - vec[sizeof(NumberType) - i - 1] = static_cast(current); - } - else - { - vec[i] = static_cast(current); // LCOV_EXCL_LINE - } + return false; + } + if (is_little_endian != (InputIsLittleEndian || format == input_format_t::bjdata)) + { + byte_swap(result); } - - // step 2: convert array into number of type T and return - std::memcpy(&result, vec.data(), sizeof(NumberType)); return true; } @@ -12093,7 +12781,7 @@ class binary_reader } private: - static JSON_INLINE_VARIABLE constexpr std::size_t npos = static_cast(-1); + static JSON_INLINE_VARIABLE constexpr std::size_t npos = detail::unknown_size(); /// input adapter InputAdapterType ia; @@ -12119,6 +12807,7 @@ class binary_reader #define JSON_BINARY_READER_MAKE_BJD_TYPES_MAP_ \ make_array( \ + bjd_type{'B', "byte"}, \ bjd_type{'C', "char"}, \ bjd_type{'D', "double"}, \ bjd_type{'I', "int16"}, \ @@ -12161,10 +12850,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -12238,10 +12927,10 @@ class parser public: /// a parser reading from an input adapter explicit parser(InputAdapterType&& adapter, - const parser_callback_t cb = nullptr, + parser_callback_t cb = nullptr, const bool allow_exceptions_ = true, const bool skip_comments = false) - : callback(cb) + : callback(std::move(cb)) , m_lexer(std::move(adapter), skip_comments) , allow_exceptions(allow_exceptions_) { @@ -12263,7 +12952,7 @@ class parser { if (callback) { - json_sax_dom_callback_parser sdp(result, callback, allow_exceptions); + json_sax_dom_callback_parser sdp(result, callback, allow_exceptions, &m_lexer); sax_parse_internal(&sdp); // in strict mode, input must be completely read @@ -12291,7 +12980,7 @@ class parser } else { - json_sax_dom_parser sdp(result, allow_exceptions); + json_sax_dom_parser sdp(result, allow_exceptions, &m_lexer); sax_parse_internal(&sdp); // in strict mode, input must be completely read @@ -12363,7 +13052,7 @@ class parser { case token_type::begin_object: { - if (JSON_HEDLEY_UNLIKELY(!sax->start_object(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_object(detail::unknown_size()))) { return false; } @@ -12408,7 +13097,7 @@ class parser case token_type::begin_array: { - if (JSON_HEDLEY_UNLIKELY(!sax->start_array(static_cast(-1)))) + if (JSON_HEDLEY_UNLIKELY(!sax->start_array(detail::unknown_size()))) { return false; } @@ -12690,10 +13379,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -12703,10 +13392,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -12862,10 +13551,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -13332,7 +14021,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ template < typename IterImpl, detail::enable_if_t < (std::is_same::value || std::is_same::value), std::nullptr_t > = nullptr > bool operator==(const IterImpl& other) const @@ -13343,7 +14032,11 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", m_object)); } - JSON_ASSERT(m_object != nullptr); + // value-initialized forward iterators can be compared, and must compare equal to other value-initialized iterators of the same type #4493 + if (m_object == nullptr) + { + return true; + } switch (m_object->m_data.m_type) { @@ -13368,7 +14061,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: not equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ template < typename IterImpl, detail::enable_if_t < (std::is_same::value || std::is_same::value), std::nullptr_t > = nullptr > bool operator!=(const IterImpl& other) const @@ -13378,7 +14071,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: smaller - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ bool operator<(const iter_impl& other) const { @@ -13388,7 +14081,12 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers", m_object)); } - JSON_ASSERT(m_object != nullptr); + // value-initialized forward iterators can be compared, and must compare equal to other value-initialized iterators of the same type #4493 + if (m_object == nullptr) + { + // the iterators are both value-initialized and are to be considered equal, but this function checks for smaller, so we return false + return false; + } switch (m_object->m_data.m_type) { @@ -13413,7 +14111,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: less than or equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ bool operator<=(const iter_impl& other) const { @@ -13422,7 +14120,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: greater than - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) Both iterators are initialized to point to the same object, or (2) both iterators are value-initialized. */ bool operator>(const iter_impl& other) const { @@ -13431,7 +14129,7 @@ class iter_impl // NOLINT(cppcoreguidelines-special-member-functions,hicpp-speci /*! @brief comparison: greater than or equal - @pre The iterator is initialized; i.e. `m_object != nullptr`. + @pre (1) The iterator is initialized; i.e. `m_object != nullptr`, or (2) both iterators are value-initialized. */ bool operator>=(const iter_impl& other) const { @@ -13624,10 +14322,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -13759,10 +14457,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -13801,10 +14499,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -14034,7 +14732,7 @@ class json_pointer } const char* p = s.c_str(); - char* p_end = nullptr; + char* p_end = nullptr; // NOLINT(misc-const-correctness) errno = 0; // strtoull doesn't reset errno const unsigned long long res = std::strtoull(p, &p_end, 10); // NOLINT(runtime/int) if (p == p_end // invalid input or empty string @@ -14556,7 +15254,7 @@ class json_pointer // iterate array and use index as reference string for (std::size_t i = 0; i < value.m_data.m_value.array->size(); ++i) { - flatten(detail::concat(reference_string, '/', std::to_string(i)), + flatten(detail::concat(reference_string, '/', std::to_string(i)), value.m_data.m_value.array->operator[](i), result); } } @@ -14575,7 +15273,7 @@ class json_pointer // iterate object and use keys as reference string for (const auto& element : *value.m_data.m_value.object) { - flatten(detail::concat(reference_string, '/', detail::escape(element.first)), element.second, result); + flatten(detail::concat(reference_string, '/', detail::escape(element.first)), element.second, result); } } break; @@ -14796,10 +15494,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -14881,6 +15579,8 @@ NLOHMANN_JSON_NAMESPACE_END // #include +// #include + // #include // #include @@ -14888,10 +15588,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -14914,10 +15614,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -15068,6 +15768,13 @@ NLOHMANN_JSON_NAMESPACE_BEGIN namespace detail { +/// how to encode BJData +enum class bjdata_version_t +{ + draft2, + draft3, +}; + /////////////////// // binary writer // /////////////////// @@ -15652,7 +16359,7 @@ class binary_writer case value_t::binary: { // step 0: determine if the binary type has a set subtype to - // determine whether or not to use the ext or fixext types + // determine whether to use the ext or fixext types const bool use_ext = j.m_data.m_value.binary->has_subtype(); // step 1: write control byte and the byte string length @@ -15775,11 +16482,14 @@ class binary_writer @param[in] use_type whether to use '$' prefixes (optimized format) @param[in] add_prefix whether prefixes need to be used for this value @param[in] use_bjdata whether write in BJData format, default is false + @param[in] bjdata_version which BJData version to use, default is draft2 */ void write_ubjson(const BasicJsonType& j, const bool use_count, const bool use_type, const bool add_prefix = true, - const bool use_bjdata = false) + const bool use_bjdata = false, const bjdata_version_t bjdata_version = bjdata_version_t::draft2) { + const bool bjdata_draft3 = use_bjdata && bjdata_version == bjdata_version_t::draft3; + switch (j.type()) { case value_t::null: @@ -15869,7 +16579,7 @@ class binary_writer for (const auto& el : *j.m_data.m_value.array) { - write_ubjson(el, use_count, use_type, prefix_required, use_bjdata); + write_ubjson(el, use_count, use_type, prefix_required, use_bjdata, bjdata_version); } if (!use_count) @@ -15887,11 +16597,11 @@ class binary_writer oa->write_character(to_char_type('[')); } - if (use_type && !j.m_data.m_value.binary->empty()) + if (use_type && (bjdata_draft3 || !j.m_data.m_value.binary->empty())) { JSON_ASSERT(use_count); oa->write_character(to_char_type('$')); - oa->write_character('U'); + oa->write_character(bjdata_draft3 ? 'B' : 'U'); } if (use_count) @@ -15910,7 +16620,7 @@ class binary_writer { for (size_t i = 0; i < j.m_data.m_value.binary->size(); ++i) { - oa->write_character(to_char_type('U')); + oa->write_character(to_char_type(bjdata_draft3 ? 'B' : 'U')); oa->write_character(j.m_data.m_value.binary->data()[i]); } } @@ -15927,7 +16637,7 @@ class binary_writer { if (use_bjdata && j.m_data.m_value.object->size() == 3 && j.m_data.m_value.object->find("_ArrayType_") != j.m_data.m_value.object->end() && j.m_data.m_value.object->find("_ArraySize_") != j.m_data.m_value.object->end() && j.m_data.m_value.object->find("_ArrayData_") != j.m_data.m_value.object->end()) { - if (!write_bjdata_ndarray(*j.m_data.m_value.object, use_count, use_type)) // decode bjdata ndarray in the JData format (https://github.com/NeuroJSON/jdata) + if (!write_bjdata_ndarray(*j.m_data.m_value.object, use_count, use_type, bjdata_version)) // decode bjdata ndarray in the JData format (https://github.com/NeuroJSON/jdata) { break; } @@ -15971,7 +16681,7 @@ class binary_writer oa->write_characters( reinterpret_cast(el.first.c_str()), el.first.size()); - write_ubjson(el.second, use_count, use_type, prefix_required, use_bjdata); + write_ubjson(el.second, use_count, use_type, prefix_required, use_bjdata, bjdata_version); } if (!use_count) @@ -16127,7 +16837,8 @@ class binary_writer } else { - JSON_THROW(out_of_range::create(407, concat("integer number ", std::to_string(j.m_data.m_value.number_unsigned), " cannot be represented by BSON as it does not fit int64"), &j)); + write_bson_entry_header(name, 0x11 /* uint64 */); + write_number(static_cast(j.m_data.m_value.number_unsigned), true); } } @@ -16655,10 +17366,11 @@ class binary_writer /*! @return false if the object is successfully converted to a bjdata ndarray, true if the type or size is invalid */ - bool write_bjdata_ndarray(const typename BasicJsonType::object_t& value, const bool use_count, const bool use_type) + bool write_bjdata_ndarray(const typename BasicJsonType::object_t& value, const bool use_count, const bool use_type, const bjdata_version_t bjdata_version) { std::map bjdtype = {{"uint8", 'U'}, {"int8", 'i'}, {"uint16", 'u'}, {"int16", 'I'}, - {"uint32", 'm'}, {"int32", 'l'}, {"uint64", 'M'}, {"int64", 'L'}, {"single", 'd'}, {"double", 'D'}, {"char", 'C'} + {"uint32", 'm'}, {"int32", 'l'}, {"uint64", 'M'}, {"int64", 'L'}, {"single", 'd'}, {"double", 'D'}, + {"char", 'C'}, {"byte", 'B'} }; string_t key = "_ArrayType_"; @@ -16688,10 +17400,10 @@ class binary_writer oa->write_character('#'); key = "_ArraySize_"; - write_ubjson(value.at(key), use_count, use_type, true, true); + write_ubjson(value.at(key), use_count, use_type, true, true, bjdata_version); key = "_ArrayData_"; - if (dtype == 'U' || dtype == 'C') + if (dtype == 'U' || dtype == 'C' || dtype == 'B') { for (const auto& el : value.at(key)) { @@ -16882,11 +17594,11 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2008-2009 Björn Hoehrmann -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2008 - 2009 Björn Hoehrmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -16907,11 +17619,11 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // // SPDX-FileCopyrightText: 2009 Florian Loitsch -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -17147,10 +17859,10 @@ boundaries compute_boundaries(FloatType value) // v- m- v m+ v+ const bool lower_boundary_is_closer = F == 0 && E > 1; - const diyfp m_plus = diyfp(2 * v.f + 1, v.e - 1); + const diyfp m_plus = diyfp((2 * v.f) + 1, v.e - 1); const diyfp m_minus = lower_boundary_is_closer - ? diyfp(4 * v.f - 1, v.e - 2) // (B) - : diyfp(2 * v.f - 1, v.e - 1); // (A) + ? diyfp((4 * v.f) - 1, v.e - 2) // (B) + : diyfp((2 * v.f) - 1, v.e - 1); // (A) // Determine the normalized w+ = m+. const diyfp w_plus = diyfp::normalize(m_plus); @@ -17380,7 +18092,7 @@ inline cached_power get_cached_power_for_binary_exponent(int e) JSON_ASSERT(e >= -1500); JSON_ASSERT(e <= 1500); const int f = kAlpha - e - 1; - const int k = (f * 78913) / (1 << 18) + static_cast(f > 0); + const int k = ((f * 78913) / (1 << 18)) + static_cast(f > 0); const int index = (-kCachedPowersMinDecExp + k + (kCachedPowersDecStep - 1)) / kCachedPowersDecStep; JSON_ASSERT(index >= 0); @@ -17858,15 +18570,15 @@ inline char* append_exponent(char* buf, int e) } else if (k < 100) { - *buf++ = static_cast('0' + k / 10); + *buf++ = static_cast('0' + (k / 10)); k %= 10; *buf++ = static_cast('0' + k); } else { - *buf++ = static_cast('0' + k / 100); + *buf++ = static_cast('0' + (k / 100)); k %= 100; - *buf++ = static_cast('0' + k / 10); + *buf++ = static_cast('0' + (k / 10)); k %= 10; *buf++ = static_cast('0' + k); } @@ -18652,7 +19364,7 @@ class serializer @param[in] x unsigned integer number to count its digits @return number of decimal digits */ - inline unsigned int count_digits(number_unsigned_t x) noexcept + unsigned int count_digits(number_unsigned_t x) noexcept { unsigned int n_digits = 1; for (;;) @@ -18935,7 +19647,7 @@ class serializer ? (byte & 0x3fu) | (codep << 6u) : (0xFFu >> type) & (byte); - const std::size_t index = 256u + static_cast(state) * 16u + static_cast(type); + const std::size_t index = 256u + (static_cast(state) * 16u) + static_cast(type); JSON_ASSERT(index < utf8d.size()); state = utf8d[index]; return state; @@ -18961,7 +19673,7 @@ class serializer * absolute values of INT_MIN and INT_MAX are usually not the same. See * #1708 for details. */ - inline number_unsigned_t remove_sign(number_integer_t x) noexcept + number_unsigned_t remove_sign(number_integer_t x) noexcept { JSON_ASSERT(x < 0 && x < (std::numeric_limits::max)()); // NOLINT(misc-redundant-expression) return static_cast(-(x + 1)) + 1; @@ -19003,10 +19715,10 @@ NLOHMANN_JSON_NAMESPACE_END // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -19031,7 +19743,7 @@ NLOHMANN_JSON_NAMESPACE_BEGIN /// for use within nlohmann::basic_json template , class Allocator = std::allocator>> - struct ordered_map : std::vector, Allocator> + struct ordered_map : std::vector, Allocator> { using key_type = Key; using mapped_type = T; @@ -19346,7 +20058,7 @@ template , template using require_input_iter = typename std::enable_if::iterator_category, - std::input_iterator_tag>::value>::type; + std::input_iterator_tag>::value>::type; template> void insert(InputIt first, InputIt last) @@ -19417,9 +20129,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec friend class ::nlohmann::detail::binary_writer; template friend class ::nlohmann::detail::binary_reader; - template + template friend class ::nlohmann::detail::json_sax_dom_parser; - template + template friend class ::nlohmann::detail::json_sax_dom_callback_parser; friend class ::nlohmann::detail::exception; @@ -19440,7 +20152,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec ) { return ::nlohmann::detail::parser(std::move(adapter), - std::move(cb), allow_exceptions, ignore_comments); + std::move(cb), allow_exceptions, ignore_comments); } private: @@ -19473,6 +20185,8 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec using error_handler_t = detail::error_handler_t; /// how to treat CBOR tags using cbor_tag_handler_t = detail::cbor_tag_handler_t; + /// how to encode BJData + using bjdata_version_t = detail::bjdata_version_t; /// helper type for initializer lists of basic_json values using initializer_list_t = std::initializer_list>; @@ -19552,7 +20266,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec { basic_json result; - result["copyright"] = "(C) 2013-2023 Niels Lohmann"; + result["copyright"] = "(C) 2013-2025 Niels Lohmann"; result["name"] = "JSON for Modern C++"; result["url"] = "https://github.com/nlohmann/json"; result["version"]["string"] = @@ -19817,7 +20531,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec object = nullptr; // silence warning, see #821 if (JSON_HEDLEY_UNLIKELY(t == value_t::null)) { - JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.11.3", nullptr)); // LCOV_EXCL_LINE + JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.12.0", nullptr)); // LCOV_EXCL_LINE } break; } @@ -20053,10 +20767,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec return it; } - reference set_parent(reference j, std::size_t old_capacity = static_cast(-1)) + reference set_parent(reference j, std::size_t old_capacity = detail::unknown_size()) { #if JSON_DIAGNOSTICS - if (old_capacity != static_cast(-1)) + if (old_capacity != detail::unknown_size()) { // see https://github.com/nlohmann/json/issues/2838 JSON_ASSERT(type() == value_t::array); @@ -20136,8 +20850,8 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::enable_if_t < !detail::is_basic_json::value && detail::is_compatible_type::value, int > = 0 > basic_json(CompatibleType && val) noexcept(noexcept( // NOLINT(bugprone-forwarding-reference-overload,bugprone-exception-escape) - JSONSerializer::to_json(std::declval(), - std::forward(val)))) + JSONSerializer::to_json(std::declval(), + std::forward(val)))) { JSONSerializer::to_json(*this, std::forward(val)); set_parents(); @@ -20150,6 +20864,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::enable_if_t < detail::is_basic_json::value&& !std::is_same::value, int > = 0 > basic_json(const BasicJsonType& val) +#if JSON_DIAGNOSTIC_POSITIONS + : start_position(val.start_pos()), + end_position(val.end_pos()) +#endif { using other_boolean_t = typename BasicJsonType::boolean_t; using other_number_float_t = typename BasicJsonType::number_float_t; @@ -20196,6 +20914,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec JSON_ASSERT(false); // NOLINT(cert-dcl03-c,hicpp-static-assert,misc-static-assert) LCOV_EXCL_LINE } JSON_ASSERT(m_data.m_type == val.type()); + set_parents(); assert_invariant(); } @@ -20332,7 +21051,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec template < class InputIT, typename std::enable_if < std::is_same::value || std::is_same::value, int >::type = 0 > - basic_json(InputIT first, InputIT last) + basic_json(InputIT first, InputIT last) // NOLINT(performance-unnecessary-value-param) { JSON_ASSERT(first.m_object != nullptr); JSON_ASSERT(last.m_object != nullptr); @@ -20447,6 +21166,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @sa https://json.nlohmann.me/api/basic_json/basic_json/ basic_json(const basic_json& other) : json_base_class_t(other) +#if JSON_DIAGNOSTIC_POSITIONS + , start_position(other.start_position) + , end_position(other.end_position) +#endif { m_data.m_type = other.m_data.m_type; // check of passed value is valid @@ -20516,15 +21239,24 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @sa https://json.nlohmann.me/api/basic_json/basic_json/ basic_json(basic_json&& other) noexcept : json_base_class_t(std::forward(other)), - m_data(std::move(other.m_data)) + m_data(std::move(other.m_data)) // cppcheck-suppress[accessForwarded] TODO check +#if JSON_DIAGNOSTIC_POSITIONS + , start_position(other.start_position) // cppcheck-suppress[accessForwarded] TODO check + , end_position(other.end_position) // cppcheck-suppress[accessForwarded] TODO check +#endif { // check that passed value is valid - other.assert_invariant(false); + other.assert_invariant(false); // cppcheck-suppress[accessForwarded] // invalidate payload other.m_data.m_type = value_t::null; other.m_data.m_value = {}; +#if JSON_DIAGNOSTIC_POSITIONS + other.start_position = std::string::npos; + other.end_position = std::string::npos; +#endif + set_parents(); assert_invariant(); } @@ -20545,6 +21277,12 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec using std::swap; swap(m_data.m_type, other.m_data.m_type); swap(m_data.m_value, other.m_data.m_value); + +#if JSON_DIAGNOSTIC_POSITIONS + swap(start_position, other.start_position); + swap(end_position, other.end_position); +#endif + json_base_class_t::operator=(std::move(other)); set_parents(); @@ -20766,13 +21504,13 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// get a pointer to the value (integer number) number_integer_t* get_impl_ptr(number_integer_t* /*unused*/) noexcept { - return is_number_integer() ? &m_data.m_value.number_integer : nullptr; + return m_data.m_type == value_t::number_integer ? &m_data.m_value.number_integer : nullptr; } /// get a pointer to the value (integer number) constexpr const number_integer_t* get_impl_ptr(const number_integer_t* /*unused*/) const noexcept { - return is_number_integer() ? &m_data.m_value.number_integer : nullptr; + return m_data.m_type == value_t::number_integer ? &m_data.m_value.number_integer : nullptr; } /// get a pointer to the value (unsigned number) @@ -20907,7 +21645,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::has_from_json::value, int > = 0 > ValueType get_impl(detail::priority_tag<0> /*unused*/) const noexcept(noexcept( - JSONSerializer::from_json(std::declval(), std::declval()))) + JSONSerializer::from_json(std::declval(), std::declval()))) { auto ret = ValueType(); JSONSerializer::from_json(*this, ret); @@ -20949,7 +21687,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::has_non_default_from_json::value, int > = 0 > ValueType get_impl(detail::priority_tag<1> /*unused*/) const noexcept(noexcept( - JSONSerializer::from_json(std::declval()))) + JSONSerializer::from_json(std::declval()))) { return JSONSerializer::from_json(*this); } @@ -21099,7 +21837,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec detail::has_from_json::value, int > = 0 > ValueType & get_to(ValueType& v) const noexcept(noexcept( - JSONSerializer::from_json(std::declval(), v))) + JSONSerializer::from_json(std::declval(), v))) { JSONSerializer::from_json(*this, v); return v; @@ -21251,7 +21989,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec { // create better exception explanation JSON_THROW(out_of_range::create(401, detail::concat("array index ", std::to_string(idx), " is out of range"), this)); - } + } // cppcheck-suppress[missingReturn] } else { @@ -21274,7 +22012,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec { // create better exception explanation JSON_THROW(out_of_range::create(401, detail::concat("array index ", std::to_string(idx), " is out of range"), this)); - } + } // cppcheck-suppress[missingReturn] } else { @@ -21419,7 +22157,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief access specified object element /// @sa https://json.nlohmann.me/api/basic_json/operator%5B%5D/ - reference operator[](typename object_t::key_type key) + reference operator[](typename object_t::key_type key) // NOLINT(performance-unnecessary-value-param) { // implicitly convert null value to an empty object if (is_null()) @@ -21729,7 +22467,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec template < class IteratorType, detail::enable_if_t < std::is_same::value || std::is_same::value, int > = 0 > - IteratorType erase(IteratorType pos) + IteratorType erase(IteratorType pos) // NOLINT(performance-unnecessary-value-param) { // make sure iterator fits the current value if (JSON_HEDLEY_UNLIKELY(this != pos.m_object)) @@ -21799,7 +22537,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec template < class IteratorType, detail::enable_if_t < std::is_same::value || std::is_same::value, int > = 0 > - IteratorType erase(IteratorType first, IteratorType last) + IteratorType erase(IteratorType first, IteratorType last) // NOLINT(performance-unnecessary-value-param) { // make sure iterator fits the current value if (JSON_HEDLEY_UNLIKELY(this != first.m_object || this != last.m_object)) @@ -22566,7 +23304,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @note: This uses std::distance to support GCC 4.8, /// see https://github.com/nlohmann/json/pull/1257 template - iterator insert_iterator(const_iterator pos, Args&& ... args) + iterator insert_iterator(const_iterator pos, Args&& ... args) // NOLINT(performance-unnecessary-value-param) { iterator result(this); JSON_ASSERT(m_data.m_value.array != nullptr); @@ -22585,7 +23323,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts element into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, const basic_json& val) + iterator insert(const_iterator pos, const basic_json& val) // NOLINT(performance-unnecessary-value-param) { // insert only works for arrays if (JSON_HEDLEY_LIKELY(is_array())) @@ -22605,14 +23343,14 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts element into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, basic_json&& val) + iterator insert(const_iterator pos, basic_json&& val) // NOLINT(performance-unnecessary-value-param) { return insert(pos, val); } /// @brief inserts copies of element into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, size_type cnt, const basic_json& val) + iterator insert(const_iterator pos, size_type cnt, const basic_json& val) // NOLINT(performance-unnecessary-value-param) { // insert only works for arrays if (JSON_HEDLEY_LIKELY(is_array())) @@ -22632,7 +23370,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts range of elements into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, const_iterator first, const_iterator last) + iterator insert(const_iterator pos, const_iterator first, const_iterator last) // NOLINT(performance-unnecessary-value-param) { // insert only works for arrays if (JSON_HEDLEY_UNLIKELY(!is_array())) @@ -22663,7 +23401,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts elements from initializer list into array /// @sa https://json.nlohmann.me/api/basic_json/insert/ - iterator insert(const_iterator pos, initializer_list_t ilist) + iterator insert(const_iterator pos, initializer_list_t ilist) // NOLINT(performance-unnecessary-value-param) { // insert only works for arrays if (JSON_HEDLEY_UNLIKELY(!is_array())) @@ -22683,7 +23421,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief inserts range of elements into object /// @sa https://json.nlohmann.me/api/basic_json/insert/ - void insert(const_iterator first, const_iterator last) + void insert(const_iterator first, const_iterator last) // NOLINT(performance-unnecessary-value-param) { // insert only works for objects if (JSON_HEDLEY_UNLIKELY(!is_object())) @@ -22704,6 +23442,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec } m_data.m_value.object->insert(first.m_it.object_iterator, last.m_it.object_iterator); + set_parents(); } /// @brief updates a JSON object from another object, overwriting existing keys @@ -22715,7 +23454,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @brief updates a JSON object from another object, overwriting existing keys /// @sa https://json.nlohmann.me/api/basic_json/update/ - void update(const_iterator first, const_iterator last, bool merge_objects = false) + void update(const_iterator first, const_iterator last, bool merge_objects = false) // NOLINT(performance-unnecessary-value-param) { // implicitly convert null value to an empty object if (is_null()) @@ -23316,12 +24055,12 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec template JSON_HEDLEY_WARN_UNUSED_RESULT static basic_json parse(InputType&& i, - const parser_callback_t cb = nullptr, + parser_callback_t cb = nullptr, const bool allow_exceptions = true, const bool ignore_comments = false) { basic_json result; - parser(detail::input_adapter(std::forward(i)), cb, allow_exceptions, ignore_comments).parse(true, result); + parser(detail::input_adapter(std::forward(i)), std::move(cb), allow_exceptions, ignore_comments).parse(true, result); // cppcheck-suppress[accessMoved,accessForwarded] return result; } @@ -23331,24 +24070,24 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec JSON_HEDLEY_WARN_UNUSED_RESULT static basic_json parse(IteratorType first, IteratorType last, - const parser_callback_t cb = nullptr, + parser_callback_t cb = nullptr, const bool allow_exceptions = true, const bool ignore_comments = false) { basic_json result; - parser(detail::input_adapter(std::move(first), std::move(last)), cb, allow_exceptions, ignore_comments).parse(true, result); + parser(detail::input_adapter(std::move(first), std::move(last)), std::move(cb), allow_exceptions, ignore_comments).parse(true, result); // cppcheck-suppress[accessMoved] return result; } JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DEPRECATED_FOR(3.8.0, parse(ptr, ptr + len)) static basic_json parse(detail::span_input_adapter&& i, - const parser_callback_t cb = nullptr, + parser_callback_t cb = nullptr, const bool allow_exceptions = true, const bool ignore_comments = false) { basic_json result; - parser(i.get(), cb, allow_exceptions, ignore_comments).parse(true, result); + parser(i.get(), std::move(cb), allow_exceptions, ignore_comments).parse(true, result); // cppcheck-suppress[accessMoved] return result; } @@ -23527,6 +24266,23 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec basic_json* m_parent = nullptr; #endif +#if JSON_DIAGNOSTIC_POSITIONS + /// the start position of the value + std::size_t start_position = std::string::npos; + /// the end position of the value + std::size_t end_position = std::string::npos; + public: + constexpr std::size_t start_pos() const noexcept + { + return start_position; + } + + constexpr std::size_t end_pos() const noexcept + { + return end_position; + } +#endif + ////////////////////////////////////////// // binary serialization/deserialization // ////////////////////////////////////////// @@ -23612,27 +24368,30 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @sa https://json.nlohmann.me/api/basic_json/to_bjdata/ static std::vector to_bjdata(const basic_json& j, const bool use_size = false, - const bool use_type = false) + const bool use_type = false, + const bjdata_version_t version = bjdata_version_t::draft2) { std::vector result; - to_bjdata(j, result, use_size, use_type); + to_bjdata(j, result, use_size, use_type, version); return result; } /// @brief create a BJData serialization of a given JSON value /// @sa https://json.nlohmann.me/api/basic_json/to_bjdata/ static void to_bjdata(const basic_json& j, detail::output_adapter o, - const bool use_size = false, const bool use_type = false) + const bool use_size = false, const bool use_type = false, + const bjdata_version_t version = bjdata_version_t::draft2) { - binary_writer(o).write_ubjson(j, use_size, use_type, true, true); + binary_writer(o).write_ubjson(j, use_size, use_type, true, true, version); } /// @brief create a BJData serialization of a given JSON value /// @sa https://json.nlohmann.me/api/basic_json/to_bjdata/ static void to_bjdata(const basic_json& j, detail::output_adapter o, - const bool use_size = false, const bool use_type = false) + const bool use_size = false, const bool use_type = false, + const bjdata_version_t version = bjdata_version_t::draft2) { - binary_writer(o).write_ubjson(j, use_size, use_type, true, true); + binary_writer(o).write_ubjson(j, use_size, use_type, true, true, version); } /// @brief create a BSON serialization of a given JSON value @@ -23668,9 +24427,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23684,9 +24443,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23709,10 +24468,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + detail::json_sax_dom_parser sdp(result, allow_exceptions); // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) - const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); + const bool res = binary_reader(std::move(ia), input_format_t::cbor).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23725,9 +24484,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23740,9 +24499,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23763,10 +24522,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + detail::json_sax_dom_parser sdp(result, allow_exceptions); // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) - const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); + const bool res = binary_reader(std::move(ia), input_format_t::msgpack).sax_parse(input_format_t::msgpack, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23779,9 +24538,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23794,9 +24553,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23817,10 +24576,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + detail::json_sax_dom_parser sdp(result, allow_exceptions); // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) - const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); + const bool res = binary_reader(std::move(ia), input_format_t::ubjson).sax_parse(input_format_t::ubjson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23833,9 +24592,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::bjdata).sax_parse(input_format_t::bjdata, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::bjdata).sax_parse(input_format_t::bjdata, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23848,9 +24607,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::bjdata).sax_parse(input_format_t::bjdata, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::bjdata).sax_parse(input_format_t::bjdata, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23863,9 +24622,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::forward(i)); - const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23878,9 +24637,9 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = detail::input_adapter(std::move(first), std::move(last)); - const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); + detail::json_sax_dom_parser sdp(result, allow_exceptions); + const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } @@ -23901,10 +24660,10 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec const bool allow_exceptions = true) { basic_json result; - detail::json_sax_dom_parser sdp(result, allow_exceptions); auto ia = i.get(); + detail::json_sax_dom_parser sdp(result, allow_exceptions); // NOLINTNEXTLINE(hicpp-move-const-arg,performance-move-const-arg) - const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); + const bool res = binary_reader(std::move(ia), input_format_t::bson).sax_parse(input_format_t::bson, &sdp, strict); // cppcheck-suppress[accessMoved] return res ? result : basic_json(value_t::discarded); } /// @} @@ -24005,7 +24764,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec // the valid JSON Patch operations enum class patch_operations {add, remove, replace, move, copy, test, invalid}; - const auto get_op = [](const std::string & op) + const auto get_op = [](const string_t& op) { if (op == "add") { @@ -24036,7 +24795,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec }; // wrapper for "add" operation; add value at ptr - const auto operation_add = [&result](json_pointer & ptr, basic_json val) + const auto operation_add = [&result](json_pointer & ptr, const basic_json & val) { // adding to the root of the target document means replacing it if (ptr.empty()) @@ -24142,8 +24901,8 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec for (const auto& val : json_patch) { // wrapper to get a value for an operation - const auto get_value = [&val](const std::string & op, - const std::string & member, + const auto get_value = [&val](const string_t& op, + const string_t& member, bool string_type) -> basic_json & { // find value @@ -24177,8 +24936,8 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec } // collect mandatory members - const auto op = get_value("op", "op", true).template get(); - const auto path = get_value(op, "path", true).template get(); + const auto op = get_value("op", "op", true).template get(); + const auto path = get_value(op, "path", true).template get(); json_pointer ptr(path); switch (get_op(op)) @@ -24204,7 +24963,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec case patch_operations::move: { - const auto from_path = get_value("move", "from", true).template get(); + const auto from_path = get_value("move", "from", true).template get(); json_pointer from_ptr(from_path); // the "from" location must exist - use at() @@ -24221,7 +24980,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec case patch_operations::copy: { - const auto from_path = get_value("copy", "from", true).template get(); + const auto from_path = get_value("copy", "from", true).template get(); const json_pointer from_ptr(from_path); // the "from" location must exist - use at() @@ -24281,7 +25040,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec /// @sa https://json.nlohmann.me/api/basic_json/diff/ JSON_HEDLEY_WARN_UNUSED_RESULT static basic_json diff(const basic_json& source, const basic_json& target, - const std::string& path = "") + const string_t& path = "") { // the patch basic_json result(value_t::array); @@ -24311,7 +25070,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec while (i < source.size() && i < target.size()) { // recursive call to compare array values at index i - auto temp_diff = diff(source[i], target[i], detail::concat(path, '/', std::to_string(i))); + auto temp_diff = diff(source[i], target[i], detail::concat(path, '/', detail::to_string(i))); result.insert(result.end(), temp_diff.begin(), temp_diff.end()); ++i; } @@ -24328,7 +25087,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec result.insert(result.begin() + end_index, object( { {"op", "remove"}, - {"path", detail::concat(path, '/', std::to_string(i))} + {"path", detail::concat(path, '/', detail::to_string(i))} })); ++i; } @@ -24339,7 +25098,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec result.push_back( { {"op", "add"}, - {"path", detail::concat(path, "/-")}, + {"path", detail::concat(path, "/-")}, {"value", target[i]} }); ++i; @@ -24354,7 +25113,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec for (auto it = source.cbegin(); it != source.cend(); ++it) { // escape the key name to be used in a JSON patch - const auto path_key = detail::concat(path, '/', detail::escape(it.key())); + const auto path_key = detail::concat(path, '/', detail::escape(it.key())); if (target.find(it.key()) != target.end()) { @@ -24378,7 +25137,7 @@ class basic_json // NOLINT(cppcoreguidelines-special-member-functions,hicpp-spec if (source.find(it.key()) == source.end()) { // found a key that is not in this -> add it - const auto path_key = detail::concat(path, '/', detail::escape(it.key())); + const auto path_key = detail::concat(path, '/', detail::escape(it.key())); result.push_back( { {"op", "add"}, {"path", path_key}, @@ -24559,10 +25318,10 @@ inline void swap(nlohmann::NLOHMANN_BASIC_JSON_TPL& j1, nlohmann::NLOHMANN_BASIC // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT @@ -24593,6 +25352,7 @@ inline void swap(nlohmann::NLOHMANN_BASIC_JSON_TPL& j1, nlohmann::NLOHMANN_BASIC #undef JSON_HAS_CPP_14 #undef JSON_HAS_CPP_17 #undef JSON_HAS_CPP_20 + #undef JSON_HAS_CPP_23 #undef JSON_HAS_FILESYSTEM #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM #undef JSON_HAS_THREE_WAY_COMPARISON @@ -24604,10 +25364,10 @@ inline void swap(nlohmann::NLOHMANN_BASIC_JSON_TPL& j1, nlohmann::NLOHMANN_BASIC // #include // __ _____ _____ _____ // __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.3 +// | | |__ | | | | | | version 3.12.0 // |_____|_____|_____|_|___| https://github.com/nlohmann/json // -// SPDX-FileCopyrightText: 2013-2023 Niels Lohmann +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann // SPDX-License-Identifier: MIT diff --git a/llama/llama.cpp/vendor/nlohmann/json_fwd.hpp b/llama/llama.cpp/vendor/nlohmann/json_fwd.hpp new file mode 100644 index 000000000..942917139 --- /dev/null +++ b/llama/llama.cpp/vendor/nlohmann/json_fwd.hpp @@ -0,0 +1,187 @@ +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.12.0 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann +// SPDX-License-Identifier: MIT + +#ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_ +#define INCLUDE_NLOHMANN_JSON_FWD_HPP_ + +#include // int64_t, uint64_t +#include // map +#include // allocator +#include // string +#include // vector + +// #include +// __ _____ _____ _____ +// __| | __| | | | JSON for Modern C++ +// | | |__ | | | | | | version 3.12.0 +// |_____|_____|_____|_|___| https://github.com/nlohmann/json +// +// SPDX-FileCopyrightText: 2013 - 2025 Niels Lohmann +// SPDX-License-Identifier: MIT + + + +// This file contains all macro definitions affecting or depending on the ABI + +#ifndef JSON_SKIP_LIBRARY_VERSION_CHECK + #if defined(NLOHMANN_JSON_VERSION_MAJOR) && defined(NLOHMANN_JSON_VERSION_MINOR) && defined(NLOHMANN_JSON_VERSION_PATCH) + #if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 12 || NLOHMANN_JSON_VERSION_PATCH != 0 + #warning "Already included a different version of the library!" + #endif + #endif +#endif + +#define NLOHMANN_JSON_VERSION_MAJOR 3 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_MINOR 12 // NOLINT(modernize-macro-to-enum) +#define NLOHMANN_JSON_VERSION_PATCH 0 // NOLINT(modernize-macro-to-enum) + +#ifndef JSON_DIAGNOSTICS + #define JSON_DIAGNOSTICS 0 +#endif + +#ifndef JSON_DIAGNOSTIC_POSITIONS + #define JSON_DIAGNOSTIC_POSITIONS 0 +#endif + +#ifndef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON + #define JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON 0 +#endif + +#if JSON_DIAGNOSTICS + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS _diag +#else + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS +#endif + +#if JSON_DIAGNOSTIC_POSITIONS + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS _dp +#else + #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS +#endif + +#if JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON + #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON _ldvcmp +#else + #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_NO_VERSION + #define NLOHMANN_JSON_NAMESPACE_NO_VERSION 0 +#endif + +// Construct the namespace ABI tags component +#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b, c) json_abi ## a ## b ## c +#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b, c) \ + NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b, c) + +#define NLOHMANN_JSON_ABI_TAGS \ + NLOHMANN_JSON_ABI_TAGS_CONCAT( \ + NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS, \ + NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON, \ + NLOHMANN_JSON_ABI_TAG_DIAGNOSTIC_POSITIONS) + +// Construct the namespace version component +#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) \ + _v ## major ## _ ## minor ## _ ## patch +#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(major, minor, patch) \ + NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) + +#if NLOHMANN_JSON_NAMESPACE_NO_VERSION +#define NLOHMANN_JSON_NAMESPACE_VERSION +#else +#define NLOHMANN_JSON_NAMESPACE_VERSION \ + NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(NLOHMANN_JSON_VERSION_MAJOR, \ + NLOHMANN_JSON_VERSION_MINOR, \ + NLOHMANN_JSON_VERSION_PATCH) +#endif + +// Combine namespace components +#define NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) a ## b +#define NLOHMANN_JSON_NAMESPACE_CONCAT(a, b) \ + NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) + +#ifndef NLOHMANN_JSON_NAMESPACE +#define NLOHMANN_JSON_NAMESPACE \ + nlohmann::NLOHMANN_JSON_NAMESPACE_CONCAT( \ + NLOHMANN_JSON_ABI_TAGS, \ + NLOHMANN_JSON_NAMESPACE_VERSION) +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_BEGIN +#define NLOHMANN_JSON_NAMESPACE_BEGIN \ + namespace nlohmann \ + { \ + inline namespace NLOHMANN_JSON_NAMESPACE_CONCAT( \ + NLOHMANN_JSON_ABI_TAGS, \ + NLOHMANN_JSON_NAMESPACE_VERSION) \ + { +#endif + +#ifndef NLOHMANN_JSON_NAMESPACE_END +#define NLOHMANN_JSON_NAMESPACE_END \ + } /* namespace (inline namespace) NOLINT(readability/namespace) */ \ + } // namespace nlohmann +#endif + + +/*! +@brief namespace for Niels Lohmann +@see https://github.com/nlohmann +@since version 1.0.0 +*/ +NLOHMANN_JSON_NAMESPACE_BEGIN + +/*! +@brief default JSONSerializer template argument + +This serializer ignores the template arguments and uses ADL +([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) +for serialization. +*/ +template +struct adl_serializer; + +/// a class to store JSON values +/// @sa https://json.nlohmann.me/api/basic_json/ +template class ObjectType = + std::map, + template class ArrayType = std::vector, + class StringType = std::string, class BooleanType = bool, + class NumberIntegerType = std::int64_t, + class NumberUnsignedType = std::uint64_t, + class NumberFloatType = double, + template class AllocatorType = std::allocator, + template class JSONSerializer = + adl_serializer, + class BinaryType = std::vector, // cppcheck-suppress syntaxError + class CustomBaseClass = void> +class basic_json; + +/// @brief JSON Pointer defines a string syntax for identifying a specific value within a JSON document +/// @sa https://json.nlohmann.me/api/json_pointer/ +template +class json_pointer; + +/*! +@brief default specialization +@sa https://json.nlohmann.me/api/json/ +*/ +using json = basic_json<>; + +/// @brief a minimal map-like container that preserves insertion order +/// @sa https://json.nlohmann.me/api/ordered_map/ +template +struct ordered_map; + +/// @brief specialization that maintains the insertion order of object keys +/// @sa https://json.nlohmann.me/api/ordered_json/ +using ordered_json = basic_json; + +NLOHMANN_JSON_NAMESPACE_END + +#endif // INCLUDE_NLOHMANN_JSON_FWD_HPP_ diff --git a/llama/llama.cpp/common/stb_image.h b/llama/llama.cpp/vendor/stb/stb_image.h similarity index 100% rename from llama/llama.cpp/common/stb_image.h rename to llama/llama.cpp/vendor/stb/stb_image.h diff --git a/llama/llama.go b/llama/llama.go index 0dc64e57e..4885949b7 100644 --- a/llama/llama.go +++ b/llama/llama.go @@ -6,6 +6,7 @@ package llama #cgo CXXFLAGS: -std=c++17 #cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/include #cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/common +#cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/vendor #cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/tools/mtmd #cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/src #cgo CPPFLAGS: -I${SRCDIR}/../ml/backend/ggml/ggml/include @@ -13,8 +14,8 @@ package llama #include #include "ggml.h" #include "llama.h" -#include "clip.h" -#include "llava.h" +#include "mtmd.h" +#include "mtmd-helper.h" #include "gguf.h" #include "sampling_ext.h" @@ -148,27 +149,23 @@ func (c *Context) Model() *Model { } func (c *Context) KvCacheSeqAdd(seqId int, p0 int, p1 int, delta int) { - C.llama_kv_self_seq_add(c.c, C.int(seqId), C.int(p0), C.int(p1), C.int(delta)) + C.llama_memory_seq_add(C.llama_get_memory(c.c), C.int(seqId), C.int(p0), C.int(p1), C.int(delta)) } func (c *Context) KvCacheSeqRm(seqId int, p0 int, p1 int) bool { - return bool(C.llama_kv_self_seq_rm(c.c, C.int(seqId), C.int(p0), C.int(p1))) + return bool(C.llama_memory_seq_rm(C.llama_get_memory(c.c), C.int(seqId), C.int(p0), C.int(p1))) } func (c *Context) KvCacheSeqCp(srcSeqId int, dstSeqId int, p0 int, p1 int) { - C.llama_kv_self_seq_cp(c.c, C.int(srcSeqId), C.int(dstSeqId), C.int(p0), C.int(p1)) + C.llama_memory_seq_cp(C.llama_get_memory(c.c), C.int(srcSeqId), C.int(dstSeqId), C.int(p0), C.int(p1)) } func (c *Context) KvCacheClear() { - C.llama_kv_self_clear(c.c) -} - -func (c *Context) KvCacheDefrag() { - C.llama_kv_self_defrag(c.c) + C.llama_memory_clear(C.llama_get_memory(c.c), true) } func (c *Context) KvCacheCanShift() bool { - return bool(C.llama_kv_self_can_shift(c.c)) + return bool(C.llama_memory_can_shift(C.llama_get_memory(c.c))) } // Get the embeddings for a sequence id @@ -460,52 +457,75 @@ func (m *Model) NEmbd() int { } // vision processing -type ClipContext struct { - c *C.struct_clip_ctx +type MtmdContext struct { + c *C.struct_mtmd_context } -func NewClipContext(llamaContext *Context, modelPath string) (*ClipContext, error) { +func NewMtmdContext(llamaContext *Context, modelPath string) (*MtmdContext, error) { mp := C.CString(modelPath) defer C.free(unsafe.Pointer(mp)) - c := C.clip_model_load(mp, 1) + // TODO: Support non-default params + cp := C.mtmd_context_params_default() + + // NOTE: The model and projector embedding lengths are checked during init + c := C.mtmd_init_from_file(mp, C.llama_get_model(llamaContext.c), cp) if c == nil { - return nil, fmt.Errorf("unable to load clip model: %v", modelPath) + return nil, fmt.Errorf("unable to load mmtd model: %v", modelPath) } - projEmbedSize := int(C.clip_n_mmproj_embd(c)) - modelEmbedSize := llamaContext.Model().NEmbd() - if projEmbedSize != modelEmbedSize { - return nil, fmt.Errorf("projector embedding size (%d) does not match model (%d)", projEmbedSize, modelEmbedSize) - } - - return &ClipContext{c: c}, nil + return &MtmdContext{c: c}, nil } -func (c *ClipContext) Free() { - C.clip_free(c.c) +func (c *MtmdContext) Free() { + C.mtmd_free(c.c) } -func (c *ClipContext) NewEmbed(llamaContext *Context, data []byte) ([][]float32, error) { - l := C.llava_image_embed_make_with_bytes(c.c, C.int(llamaContext.numThreads), (*C.uchar)(unsafe.Pointer(&data[0])), C.int(len(data))) - if l == nil { - return nil, errors.New("unable to make llava embedding from image") - } +func (c *MtmdContext) NewEmbed(llamaContext *Context, data []byte) ([][]float32, error) { + // Initialize the input chunks pointer + ic := C.mtmd_input_chunks_init() + defer C.mtmd_input_chunks_free(ic) - numTokens := int(l.n_image_pos) + // Initialize an empty text prompt so we can tokenize + it := C.mtmd_input_text_init(C.mtmd_default_marker(), true, true) + defer C.mtmd_input_text_free(it) + + // Initialize a bitmap with the image data + bm := C.mtmd_helper_bitmap_init_from_buf(c.c, (*C.uchar)(unsafe.Pointer(&data[0])), C.size_t(len(data))) + defer C.mtmd_bitmap_free(bm) + + // Tokenize the image + if C.int32_t(0) != C.mtmd_tokenize(c.c, ic, it, &bm, 1) { + return nil, errors.New("unable to tokenize mtmd embedding from image") + } + nChunks := C.mtmd_input_chunks_size(ic) numEmbed := llamaContext.Model().NEmbd() + lastChunkSize := 0 + for i := range int(nChunks) { + chunk := C.mtmd_input_chunks_get(ic, C.size_t(i)) + numTokens := int(C.mtmd_input_chunk_get_n_tokens(chunk)) + lastChunkSize = numTokens - s := unsafe.Slice((*float32)(l.embed), numEmbed*numTokens) + // Encode the chunk + if C.int32_t(0) != C.mtmd_encode_chunk(c.c, chunk) { + return nil, errors.New("unable to encode mtmd image chunk") + } + } - embed := make([][]float32, numTokens) + // Get the embeddings + embed := make([][]float32, lastChunkSize) + embd := C.mtmd_get_output_embd(c.c) + if nil == embd { + return nil, errors.New("failed to get image embedding") + } + + // Extend the embedding array for each token + s := unsafe.Slice((*float32)(embd), numEmbed*lastChunkSize) rows := make([]float32, len(s)) copy(rows, s) - - for i := range embed { + for i := range lastChunkSize { embed[i] = rows[i*numEmbed : (i+1)*numEmbed] } - C.llava_image_embed_free(l) - return embed, nil } diff --git a/llama/patches/.gitignore b/llama/patches/.gitignore new file mode 100644 index 000000000..9beb6e1a1 --- /dev/null +++ b/llama/patches/.gitignore @@ -0,0 +1 @@ +*.patched diff --git a/llama/patches/0001-ggml-backend-malloc-and-free-using-the-same-compiler.patch b/llama/patches/0001-ggml-backend-malloc-and-free-using-the-same-compiler.patch index edeeb4ffa..d62331d0f 100644 --- a/llama/patches/0001-ggml-backend-malloc-and-free-using-the-same-compiler.patch +++ b/llama/patches/0001-ggml-backend-malloc-and-free-using-the-same-compiler.patch @@ -12,19 +12,18 @@ MSVC and freed by Clang, which can cause problems. This moves freeing of the buffers into the backends to avoid the problem. --- - ggml/src/ggml-backend.cpp | 9 +++++++-- - ggml/src/ggml-cann/ggml-cann.cpp | 2 ++ - ggml/src/ggml-cuda/ggml-cuda.cu | 3 +++ - ggml/src/ggml-kompute/ggml-kompute.cpp | 1 + - ggml/src/ggml-metal/ggml-metal.m | 1 + - ggml/src/ggml-opencl/ggml-opencl.cpp | 1 + - ggml/src/ggml-rpc/ggml-rpc.cpp | 1 + - ggml/src/ggml-sycl/ggml-sycl.cpp | 3 +++ - ggml/src/ggml-vulkan/ggml-vulkan.cpp | 2 ++ - 9 files changed, 21 insertions(+), 2 deletions(-) + ggml/src/ggml-backend.cpp | 9 +++++++-- + ggml/src/ggml-cann/ggml-cann.cpp | 2 ++ + ggml/src/ggml-cuda/ggml-cuda.cu | 3 +++ + ggml/src/ggml-metal/ggml-metal.m | 1 + + ggml/src/ggml-opencl/ggml-opencl.cpp | 1 + + ggml/src/ggml-rpc/ggml-rpc.cpp | 1 + + ggml/src/ggml-sycl/ggml-sycl.cpp | 3 +++ + ggml/src/ggml-vulkan/ggml-vulkan.cpp | 2 ++ + 8 files changed, 20 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp -index b30b4cb3..0ce73a99 100644 +index 1b9d29e9..97f47abd 100644 --- a/ggml/src/ggml-backend.cpp +++ b/ggml/src/ggml-backend.cpp @@ -107,7 +107,6 @@ void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) { @@ -35,7 +34,7 @@ index b30b4cb3..0ce73a99 100644 } size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) { -@@ -544,6 +543,7 @@ static void ggml_backend_multi_buffer_free_buffer(ggml_backend_buffer_t buffer) +@@ -529,6 +528,7 @@ static void ggml_backend_multi_buffer_free_buffer(ggml_backend_buffer_t buffer) free(ctx->buffers); free(ctx); @@ -43,7 +42,7 @@ index b30b4cb3..0ce73a99 100644 } static void ggml_backend_multi_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { -@@ -1871,6 +1871,11 @@ static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) { +@@ -1890,6 +1890,11 @@ static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) { static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_aligned_free(buffer->context, buffer->size); @@ -55,7 +54,7 @@ index b30b4cb3..0ce73a99 100644 } static void ggml_backend_cpu_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { -@@ -1918,7 +1923,7 @@ static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_i = { +@@ -1937,7 +1942,7 @@ static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_i = { }; static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_from_ptr_i = { @@ -65,10 +64,10 @@ index b30b4cb3..0ce73a99 100644 /* .init_tensor = */ NULL, // no initialization required /* .memset_tensor = */ ggml_backend_cpu_buffer_memset_tensor, diff --git a/ggml/src/ggml-cann/ggml-cann.cpp b/ggml/src/ggml-cann/ggml-cann.cpp -index e2617b06..242e50a7 100644 +index cf575b36..ca1addfa 100755 --- a/ggml/src/ggml-cann/ggml-cann.cpp +++ b/ggml/src/ggml-cann/ggml-cann.cpp -@@ -800,6 +800,7 @@ static void ggml_backend_cann_buffer_free_buffer( +@@ -826,6 +826,7 @@ static void ggml_backend_cann_buffer_free_buffer( ggml_backend_cann_buffer_context* ctx = (ggml_backend_cann_buffer_context*)buffer->context; delete ctx; @@ -76,7 +75,7 @@ index e2617b06..242e50a7 100644 } /** -@@ -1472,6 +1473,7 @@ static const char * ggml_backend_cann_host_buffer_name(ggml_backend_buffer_t buf +@@ -1572,6 +1573,7 @@ static const char * ggml_backend_cann_host_buffer_name(ggml_backend_buffer_t buf */ static void ggml_backend_cann_host_buffer_free(ggml_backend_buffer_t buffer) { ACL_CHECK(aclrtFreeHost(buffer->context)); @@ -85,10 +84,10 @@ index e2617b06..242e50a7 100644 /** diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu -index b4b85abc..cb0d8528 100644 +index d9110491..37ee2a6d 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu -@@ -534,6 +534,7 @@ struct ggml_backend_cuda_buffer_context { +@@ -567,6 +567,7 @@ struct ggml_backend_cuda_buffer_context { static void ggml_backend_cuda_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; delete ctx; @@ -96,7 +95,7 @@ index b4b85abc..cb0d8528 100644 } static bool ggml_backend_buffer_is_cuda(ggml_backend_buffer_t buffer) { -@@ -790,6 +791,7 @@ struct ggml_backend_cuda_split_buffer_context { +@@ -822,6 +823,7 @@ struct ggml_backend_cuda_split_buffer_context { static void ggml_backend_cuda_split_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_cuda_split_buffer_context * ctx = (ggml_backend_cuda_split_buffer_context *)buffer->context; delete ctx; @@ -104,7 +103,7 @@ index b4b85abc..cb0d8528 100644 } static void * ggml_backend_cuda_split_buffer_get_base(ggml_backend_buffer_t buffer) { -@@ -1067,6 +1069,7 @@ static const char * ggml_backend_cuda_host_buffer_type_name(ggml_backend_buffer_ +@@ -1103,6 +1105,7 @@ static bool ggml_backend_buft_is_cuda_host(ggml_backend_buffer_type_t buft) { static void ggml_backend_cuda_host_buffer_free_buffer(ggml_backend_buffer_t buffer) { CUDA_CHECK(cudaFreeHost(buffer->context)); @@ -112,23 +111,11 @@ index b4b85abc..cb0d8528 100644 } static void * ggml_cuda_host_malloc(size_t size) { -diff --git a/ggml/src/ggml-kompute/ggml-kompute.cpp b/ggml/src/ggml-kompute/ggml-kompute.cpp -index 50579227..2799a0a5 100644 ---- a/ggml/src/ggml-kompute/ggml-kompute.cpp -+++ b/ggml/src/ggml-kompute/ggml-kompute.cpp -@@ -1911,6 +1911,7 @@ static void ggml_backend_kompute_buffer_free_buffer(ggml_backend_buffer_t buffer - ggml_vk_free_memory(*memory); - } - delete memory; -+ delete buffer; - } - - static void * ggml_backend_kompute_buffer_get_base(ggml_backend_buffer_t buffer) { diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m -index 576f9581..1b56f858 100644 +index cb8eff4a..7bccc7bf 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m -@@ -5214,6 +5214,7 @@ static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) +@@ -6032,6 +6032,7 @@ static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) } free(ctx); @@ -137,10 +124,10 @@ index 576f9581..1b56f858 100644 static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) { diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp -index 05a2f4e6..392cc18d 100644 +index 8ba1e00d..8163e8dc 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp -@@ -1940,6 +1940,7 @@ struct ggml_backend_opencl_buffer_context { +@@ -2745,6 +2745,7 @@ struct ggml_backend_opencl_buffer_context { static void ggml_backend_opencl_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; delete ctx; @@ -149,22 +136,22 @@ index 05a2f4e6..392cc18d 100644 static void * ggml_backend_opencl_buffer_get_base(ggml_backend_buffer_t buffer) { diff --git a/ggml/src/ggml-rpc/ggml-rpc.cpp b/ggml/src/ggml-rpc/ggml-rpc.cpp -index 4f0abb5a..de1ec184 100644 +index df6ba540..2e395968 100644 --- a/ggml/src/ggml-rpc/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc/ggml-rpc.cpp -@@ -483,6 +483,7 @@ static void ggml_backend_rpc_buffer_free_buffer(ggml_backend_buffer_t buffer) { +@@ -486,6 +486,7 @@ static void ggml_backend_rpc_buffer_free_buffer(ggml_backend_buffer_t buffer) { bool status = send_rpc_cmd(ctx->sock, RPC_CMD_FREE_BUFFER, &request, sizeof(request), nullptr, 0); - GGML_ASSERT(status); + RPC_STATUS_ASSERT(status); delete ctx; + delete buffer; } static void * ggml_backend_rpc_buffer_get_base(ggml_backend_buffer_t buffer) { diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp -index 0ea72994..ae3a3c33 100644 +index 3992dad0..67503951 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp -@@ -320,6 +320,7 @@ ggml_backend_sycl_buffer_free_buffer(ggml_backend_buffer_t buffer) try { +@@ -331,6 +331,7 @@ ggml_backend_sycl_buffer_free_buffer(ggml_backend_buffer_t buffer) try { ggml_sycl_set_device(ctx->device); delete ctx; @@ -172,7 +159,7 @@ index 0ea72994..ae3a3c33 100644 } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ -@@ -765,6 +766,7 @@ struct ggml_backend_sycl_split_buffer_context { +@@ -792,6 +793,7 @@ struct ggml_backend_sycl_split_buffer_context { static void ggml_backend_sycl_split_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_sycl_split_buffer_context * ctx = (ggml_backend_sycl_split_buffer_context *)buffer->context; delete ctx; @@ -180,7 +167,7 @@ index 0ea72994..ae3a3c33 100644 } static void * ggml_backend_sycl_split_buffer_get_base(ggml_backend_buffer_t buffer) { -@@ -1099,6 +1101,7 @@ static const char * ggml_backend_sycl_host_buffer_type_name(ggml_backend_buffer_ +@@ -1134,6 +1136,7 @@ static const char * ggml_backend_sycl_host_buffer_type_name(ggml_backend_buffer_ static void ggml_backend_sycl_host_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_sycl_host_free(buffer->context); @@ -189,10 +176,10 @@ index 0ea72994..ae3a3c33 100644 static ggml_backend_buffer_t ggml_backend_sycl_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp -index e2b357fd..68768029 100644 +index 4070e248..394a2839 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp -@@ -8962,6 +8962,7 @@ static void ggml_backend_vk_buffer_free_buffer(ggml_backend_buffer_t buffer) { +@@ -10209,6 +10209,7 @@ static void ggml_backend_vk_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_vk_buffer_context * ctx = (ggml_backend_vk_buffer_context *)buffer->context; ggml_vk_destroy_buffer(ctx->dev_buffer); delete ctx; @@ -200,7 +187,7 @@ index e2b357fd..68768029 100644 } static void * ggml_backend_vk_buffer_get_base(ggml_backend_buffer_t buffer) { -@@ -9105,6 +9106,7 @@ static const char * ggml_backend_vk_host_buffer_name(ggml_backend_buffer_t buffe +@@ -10352,6 +10353,7 @@ static const char * ggml_backend_vk_host_buffer_name(ggml_backend_buffer_t buffe static void ggml_backend_vk_host_buffer_free_buffer(ggml_backend_buffer_t buffer) { VK_LOG_MEMORY("ggml_backend_vk_host_buffer_free_buffer()"); ggml_vk_host_free(vk_instance.devices[0], buffer->context); diff --git a/llama/patches/0002-pretokenizer.patch b/llama/patches/0002-pretokenizer.patch index 07aa4b0ea..15dcbc6ce 100644 --- a/llama/patches/0002-pretokenizer.patch +++ b/llama/patches/0002-pretokenizer.patch @@ -10,10 +10,10 @@ logs instead of throwing an error 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp -index 9389ca80..806c1b3d 100644 +index f7e03e70..8ebe11cf 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp -@@ -1503,16 +1503,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { +@@ -1804,16 +1804,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { if (type == LLAMA_VOCAB_TYPE_BPE) { add_space_prefix = false; clean_spaces = true; @@ -31,8 +31,8 @@ index 9389ca80..806c1b3d 100644 pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT; } else if ( tokenizer_pre == "llama3" || -@@ -1651,7 +1642,8 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { - pre_type = LLAMA_VOCAB_PRE_TYPE_SEED_CODER; +@@ -1975,7 +1966,8 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { + pre_type = LLAMA_VOCAB_PRE_TYPE_KIMI_K2; clean_spaces = false; } else { - throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); diff --git a/llama/patches/0004-clip-unicode.patch b/llama/patches/0003-clip-unicode.patch similarity index 94% rename from llama/patches/0004-clip-unicode.patch rename to llama/patches/0003-clip-unicode.patch index 957109783..548a0da30 100644 --- a/llama/patches/0004-clip-unicode.patch +++ b/llama/patches/0003-clip-unicode.patch @@ -10,10 +10,10 @@ filesystems for paths that include wide characters 1 file changed, 39 insertions(+) diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp -index 41ba45a7..cdd8ca44 100644 +index 20c21733..f4f69cfc 100644 --- a/tools/mtmd/clip.cpp +++ b/tools/mtmd/clip.cpp -@@ -31,6 +31,19 @@ +@@ -28,6 +28,19 @@ #include #include @@ -33,7 +33,7 @@ index 41ba45a7..cdd8ca44 100644 struct clip_logger_state g_logger_state = {GGML_LOG_LEVEL_CONT, clip_log_callback_default, NULL}; enum ffn_op_type { -@@ -2190,7 +2203,29 @@ struct clip_model_loader { +@@ -2597,7 +2610,29 @@ struct clip_model_loader { { std::vector read_buf; @@ -63,7 +63,7 @@ index 41ba45a7..cdd8ca44 100644 if (!fin) { throw std::runtime_error(string_format("%s: failed to open %s\n", __func__, fname.c_str())); } -@@ -2217,7 +2252,11 @@ struct clip_model_loader { +@@ -2624,7 +2659,11 @@ struct clip_model_loader { ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes); } } diff --git a/llama/patches/0003-embeddings.patch b/llama/patches/0003-embeddings.patch deleted file mode 100644 index 80d6b55e5..000000000 --- a/llama/patches/0003-embeddings.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: jmorganca -Date: Tue, 8 Apr 2025 15:28:34 -0700 -Subject: [PATCH] embeddings - -allow a loaded model in llama.cpp to be used for -both embeddings and causal attention text generation -instead of forcing one or the error ---- - src/llama-context.cpp | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/src/llama-context.cpp b/src/llama-context.cpp -index 62246c10..dca22d8b 100644 ---- a/src/llama-context.cpp -+++ b/src/llama-context.cpp -@@ -901,7 +901,7 @@ int llama_context::decode(llama_batch & inp_batch) { - int64_t n_outputs_all = 0; - - // count outputs -- if (batch.logits && !embd_pooled) { -+ if (batch.logits) { - for (uint32_t i = 0; i < n_tokens_all; ++i) { - n_outputs_all += batch.logits[i] != 0; - } -@@ -982,7 +982,7 @@ int llama_context::decode(llama_batch & inp_batch) { - // ggml_graph_dump_dot(gf, NULL, "llama.dot"); - //} - -- auto * t_logits = cparams.embeddings ? nullptr : res->get_logits(); -+ auto * t_logits = cparams.causal_attn ? res->get_logits() : nullptr; - auto * t_embd = cparams.embeddings ? res->get_embd() : nullptr; - - if (t_embd && res->get_embd_pooled()) { -@@ -1151,7 +1151,7 @@ int32_t llama_context::output_reserve(int32_t n_outputs) { - const auto n_embd = hparams.n_embd; - - // TODO: use a per-batch flag for logits presence instead -- bool has_logits = !cparams.embeddings; -+ bool has_logits = cparams.causal_attn; - bool has_embd = cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE); - - // TODO: hacky enc-dec support diff --git a/llama/patches/0005-solar-pro.patch b/llama/patches/0004-solar-pro.patch similarity index 87% rename from llama/patches/0005-solar-pro.patch rename to llama/patches/0004-solar-pro.patch index b4553149e..e2ece004b 100644 --- a/llama/patches/0005-solar-pro.patch +++ b/llama/patches/0004-solar-pro.patch @@ -15,18 +15,18 @@ adds support for the Solar Pro architecture 7 files changed, 248 insertions(+) diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp -index f2bc8ca7..5ab3f572 100644 +index 18dcc6dd..4b285646 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp -@@ -69,6 +69,7 @@ static const std::map LLM_ARCH_NAMES = { - { LLM_ARCH_GRANITE, "granite" }, +@@ -78,6 +78,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_GRANITE_MOE, "granitemoe" }, + { LLM_ARCH_GRANITE_HYBRID, "granitehybrid" }, { LLM_ARCH_CHAMELEON, "chameleon" }, + { LLM_ARCH_SOLAR, "solar" }, { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" }, { LLM_ARCH_PLM, "plm" }, { LLM_ARCH_BAILINGMOE, "bailingmoe" }, -@@ -142,6 +143,7 @@ static const std::map LLM_KV_NAMES = { +@@ -164,6 +165,7 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" }, { LLM_KV_ATTENTION_SLIDING_WINDOW, "%s.attention.sliding_window" }, { LLM_KV_ATTENTION_SCALE, "%s.attention.scale" }, @@ -34,7 +34,7 @@ index f2bc8ca7..5ab3f572 100644 { LLM_KV_ATTENTION_KEY_LENGTH_MLA, "%s.attention.key_length_mla" }, { LLM_KV_ATTENTION_VALUE_LENGTH_MLA, "%s.attention.value_length_mla" }, -@@ -1502,6 +1504,24 @@ static const std::map> LLM_TENSOR_N +@@ -1794,6 +1796,24 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, }, }, @@ -59,8 +59,8 @@ index f2bc8ca7..5ab3f572 100644 { LLM_ARCH_WAVTOKENIZER_DEC, { -@@ -1680,6 +1700,7 @@ static const std::map LLM_TENSOR_INFOS = { - {LLM_TENSOR_FFN_EXP_PROBS_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, +@@ -2219,6 +2239,7 @@ static const std::map LLM_TENSOR_INFOS = { + {LLM_TENSOR_LAUREL_POST_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, // this tensor is loaded for T5, but never used {LLM_TENSOR_DEC_CROSS_ATTN_REL_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}}, + {LLM_TENSOR_BSKCN_TV, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, @@ -68,18 +68,18 @@ index f2bc8ca7..5ab3f572 100644 {LLM_TENSOR_POS_NET_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, {LLM_TENSOR_POS_NET_NORM1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, diff --git a/src/llama-arch.h b/src/llama-arch.h -index 41a023da..525c1b7d 100644 +index 7af587e7..3ea994c7 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h -@@ -73,6 +73,7 @@ enum llm_arch { - LLM_ARCH_GRANITE, +@@ -82,6 +82,7 @@ enum llm_arch { LLM_ARCH_GRANITE_MOE, + LLM_ARCH_GRANITE_HYBRID, LLM_ARCH_CHAMELEON, + LLM_ARCH_SOLAR, LLM_ARCH_WAVTOKENIZER_DEC, LLM_ARCH_PLM, LLM_ARCH_BAILINGMOE, -@@ -146,6 +147,7 @@ enum llm_kv { +@@ -168,6 +169,7 @@ enum llm_kv { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, LLM_KV_ATTENTION_SLIDING_WINDOW, LLM_KV_ATTENTION_SCALE, @@ -87,7 +87,7 @@ index 41a023da..525c1b7d 100644 LLM_KV_ATTENTION_KEY_LENGTH_MLA, LLM_KV_ATTENTION_VALUE_LENGTH_MLA, -@@ -346,6 +348,7 @@ enum llm_tensor { +@@ -394,6 +396,7 @@ enum llm_tensor { LLM_TENSOR_ENC_OUTPUT_NORM, LLM_TENSOR_CLS, LLM_TENSOR_CLS_OUT, @@ -96,11 +96,11 @@ index 41a023da..525c1b7d 100644 LLM_TENSOR_CONVNEXT_DW, LLM_TENSOR_CONVNEXT_NORM, diff --git a/src/llama-hparams.cpp b/src/llama-hparams.cpp -index 90dfe7a7..8a667960 100644 +index 7a06368d..35fc054f 100644 --- a/src/llama-hparams.cpp +++ b/src/llama-hparams.cpp -@@ -70,6 +70,14 @@ uint32_t llama_hparams::n_embd_v_s() const { - return ssm_d_state * ssm_d_inner; +@@ -146,6 +146,14 @@ uint32_t llama_hparams::n_pos_per_embd() const { + return rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1; } +bool llama_hparams::n_bskcn(uint32_t n, uint32_t il) const { @@ -113,12 +113,12 @@ index 90dfe7a7..8a667960 100644 + bool llama_hparams::is_swa(uint32_t il) const { if (il < n_layer) { - return n_swa > 0 && n_swa_pattern > 0 && il % n_swa_pattern < (n_swa_pattern - 1); + return swa_layers[il]; diff --git a/src/llama-hparams.h b/src/llama-hparams.h -index 7ee6a5b7..48dce407 100644 +index bd231224..29bd9056 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h -@@ -55,6 +55,8 @@ struct llama_hparams { +@@ -62,6 +62,8 @@ struct llama_hparams { std::array n_head_kv_arr; std::array n_ff_arr; @@ -127,9 +127,9 @@ index 7ee6a5b7..48dce407 100644 uint32_t n_layer_dense_lead = 0; uint32_t n_lora_q = 0; uint32_t n_lora_kv = 0; -@@ -154,6 +156,9 @@ struct llama_hparams { - // dimension of the recurrent state embeddings - uint32_t n_embd_v_s() const; +@@ -220,6 +222,9 @@ struct llama_hparams { + + uint32_t n_pos_per_embd() const; + // Block skip connection + bool n_bskcn(uint32_t n, uint32_t il) const; @@ -138,10 +138,10 @@ index 7ee6a5b7..48dce407 100644 }; diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp -index 4cce5166..7f6617fa 100644 +index f71c40f8..7eab9b68 100644 --- a/src/llama-model-loader.cpp +++ b/src/llama-model-loader.cpp -@@ -439,6 +439,7 @@ namespace GGUFMeta { +@@ -465,6 +465,7 @@ namespace GGUFMeta { // TODO: this is not very clever - figure out something better template bool llama_model_loader::get_key_or_arr>(enum llm_kv kid, std::array & result, uint32_t n, bool required); template bool llama_model_loader::get_key_or_arr>(enum llm_kv kid, std::array & result, uint32_t n, bool required); @@ -150,10 +150,10 @@ index 4cce5166..7f6617fa 100644 llama_model_loader::llama_model_loader( const std::string & fname, diff --git a/src/llama-model.cpp b/src/llama-model.cpp -index 3a4e72a3..db62973f 100644 +index 58ca7df7..280129e1 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp -@@ -1402,6 +1402,21 @@ void llama_model::load_hparams(llama_model_loader & ml) { +@@ -1706,6 +1706,21 @@ void llama_model::load_hparams(llama_model_loader & ml) { default: type = LLM_TYPE_UNKNOWN; } } break; @@ -175,7 +175,7 @@ index 3a4e72a3..db62973f 100644 case LLM_ARCH_WAVTOKENIZER_DEC: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); -@@ -3774,6 +3789,34 @@ bool llama_model::load_tensors(llama_model_loader & ml) { +@@ -4793,6 +4808,34 @@ bool llama_model::load_tensors(llama_model_loader & ml) { layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); @@ -210,12 +210,12 @@ index 3a4e72a3..db62973f 100644 layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); -@@ -12397,6 +12440,165 @@ struct llm_build_chameleon : public llm_graph_context { +@@ -15495,6 +15538,165 @@ struct llm_build_granite_hybrid : public llm_graph_context_mamba { } }; +struct llm_build_solar : public llm_graph_context { -+ llm_build_solar(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { ++ llm_build_solar(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { + const int64_t n_embd_head = hparams.n_embd_head_v; + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); @@ -270,7 +270,7 @@ index 3a4e72a3..db62973f 100644 + // self-attention + { + // rope freq factors for llama3; may return nullptr for llama2 and other models -+ ggml_tensor * rope_factors = model.get_rope_factors(n_ctx_per_seq, il); ++ ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); + + // compute Q and K and RoPE them + ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); @@ -314,7 +314,7 @@ index 3a4e72a3..db62973f 100644 + cb(Kcur, "Kcur", il); + cb(Vcur, "Vcur", il); + -+ cur = build_attn(inp_attn, gf, ++ cur = build_attn(inp_attn, + model.layers[il].wo, model.layers[il].bo, + Qcur, Kcur, Vcur, nullptr, nullptr, kq_scale, il); + cb(cur, "attn_out", il); @@ -373,33 +373,33 @@ index 3a4e72a3..db62973f 100644 + } +}; + - struct llm_build_wavtokenizer_dec : public llm_graph_context { - llm_build_wavtokenizer_dec(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf) : llm_graph_context(params) { - ggml_tensor * cur; -@@ -13157,6 +13359,10 @@ llm_graph_result_ptr llama_model::build_graph( + // ref: https://github.com/facebookresearch/chameleon + // based on the original build_llama() function, changes: + // * qk-norm +@@ -18439,6 +18641,10 @@ ggml_cgraph * llama_model::build_graph(const llm_graph_params & params) const { { - llm = std::make_unique(*this, params, gf); + llm = std::make_unique(*this, params); } break; + case LLM_ARCH_SOLAR: + { -+ llm = std::make_unique(*this, params, gf); ++ llm = std::make_unique(*this, params); + } break; case LLM_ARCH_WAVTOKENIZER_DEC: { - llm = std::make_unique(*this, params, gf); -@@ -13301,6 +13507,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { - case LLM_ARCH_GRANITE: + llm = std::make_unique(*this, params); +@@ -18652,6 +18858,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) { case LLM_ARCH_GRANITE_MOE: + case LLM_ARCH_GRANITE_HYBRID: case LLM_ARCH_CHAMELEON: + case LLM_ARCH_SOLAR: case LLM_ARCH_BAILINGMOE: - return LLAMA_ROPE_TYPE_NORM; - + case LLM_ARCH_NEO_BERT: + case LLM_ARCH_SMOLLM3: diff --git a/src/llama-model.h b/src/llama-model.h -index 6bdec263..43746c7d 100644 +index 6fcd74d5..09964533 100644 --- a/src/llama-model.h +++ b/src/llama-model.h -@@ -65,6 +65,7 @@ enum llm_type { +@@ -70,6 +70,7 @@ enum llm_type { LLM_TYPE_15B, LLM_TYPE_16B, LLM_TYPE_20B, @@ -407,9 +407,9 @@ index 6bdec263..43746c7d 100644 LLM_TYPE_27B, LLM_TYPE_30B, LLM_TYPE_32B, -@@ -315,6 +316,8 @@ struct llama_layer { - struct ggml_tensor * ffn_up_scale = nullptr; - struct ggml_tensor * ffn_down_scale = nullptr; +@@ -367,6 +368,8 @@ struct llama_layer { + // openai-moe + struct ggml_tensor * attn_sinks = nullptr; + struct ggml_tensor * bskcn_tv = nullptr; + diff --git a/llama/patches/0006-fix-deepseek-deseret-regex.patch b/llama/patches/0005-fix-deepseek-deseret-regex.patch similarity index 94% rename from llama/patches/0006-fix-deepseek-deseret-regex.patch rename to llama/patches/0005-fix-deepseek-deseret-regex.patch index ff4b57577..1f8b55426 100644 --- a/llama/patches/0006-fix-deepseek-deseret-regex.patch +++ b/llama/patches/0005-fix-deepseek-deseret-regex.patch @@ -12,10 +12,10 @@ regex 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp -index 806c1b3d..10f34d33 100644 +index 8ebe11cf..c011008f 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp -@@ -298,7 +298,7 @@ struct llm_tokenizer_bpe : llm_tokenizer { +@@ -299,7 +299,7 @@ struct llm_tokenizer_bpe : llm_tokenizer { case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM: regex_exprs = { "[\r\n]", @@ -25,7 +25,7 @@ index 806c1b3d..10f34d33 100644 "\\s+$", "[一-龥ࠀ-一가-퟿]+", diff --git a/src/unicode.cpp b/src/unicode.cpp -index e63bb4ab..73cb2b1a 100644 +index 65f36651..ce336a22 100644 --- a/src/unicode.cpp +++ b/src/unicode.cpp @@ -2,6 +2,11 @@ @@ -62,7 +62,7 @@ index e63bb4ab..73cb2b1a 100644 #if defined(__clang__) // disable C++17 deprecation warning for std::codecvt_utf8 # pragma clang diagnostic push -@@ -213,6 +233,7 @@ static inline std::wstring unicode_wstring_from_utf8(const std::string & s) { +@@ -218,6 +238,7 @@ static inline std::wstring unicode_wstring_from_utf8(const std::string & s) { #endif return conv.from_bytes(s); diff --git a/llama/patches/0007-maintain-ordering-for-rules-for-grammar.patch b/llama/patches/0006-maintain-ordering-for-rules-for-grammar.patch similarity index 93% rename from llama/patches/0007-maintain-ordering-for-rules-for-grammar.patch rename to llama/patches/0006-maintain-ordering-for-rules-for-grammar.patch index 4c2192887..17bd3989d 100644 --- a/llama/patches/0007-maintain-ordering-for-rules-for-grammar.patch +++ b/llama/patches/0006-maintain-ordering-for-rules-for-grammar.patch @@ -8,10 +8,10 @@ Subject: [PATCH] maintain ordering for rules for grammar 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/json-schema-to-grammar.cpp b/common/json-schema-to-grammar.cpp -index 5b3059c2..656b3eca 100644 +index 637891f5..98b8280f 100644 --- a/common/json-schema-to-grammar.cpp +++ b/common/json-schema-to-grammar.cpp -@@ -349,7 +349,7 @@ private: +@@ -307,7 +307,7 @@ private: friend std::string build_grammar(const std::function & cb, const common_grammar_options & options); std::function _fetch_json; bool _dotall; diff --git a/llama/patches/0009-sort-devices-by-score.patch b/llama/patches/0007-sort-devices-by-score.patch similarity index 89% rename from llama/patches/0009-sort-devices-by-score.patch rename to llama/patches/0007-sort-devices-by-score.patch index e27d1ae92..fa3522f97 100644 --- a/llama/patches/0009-sort-devices-by-score.patch +++ b/llama/patches/0007-sort-devices-by-score.patch @@ -11,10 +11,10 @@ with the fastest acceleration is loaded 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/ggml/src/ggml-backend-reg.cpp b/ggml/src/ggml-backend-reg.cpp -index 405d8e31..4e67d243 100644 +index 6c315137..3040b2aa 100644 --- a/ggml/src/ggml-backend-reg.cpp +++ b/ggml/src/ggml-backend-reg.cpp -@@ -157,7 +157,7 @@ struct ggml_backend_reg_entry { +@@ -162,7 +162,7 @@ struct ggml_backend_reg_entry { struct ggml_backend_registry { std::vector backends; @@ -23,7 +23,7 @@ index 405d8e31..4e67d243 100644 ggml_backend_registry() { #ifdef GGML_USE_CUDA -@@ -202,7 +202,7 @@ struct ggml_backend_registry { +@@ -207,7 +207,7 @@ struct ggml_backend_registry { } } @@ -32,7 +32,7 @@ index 405d8e31..4e67d243 100644 if (!reg) { return; } -@@ -213,15 +213,20 @@ struct ggml_backend_registry { +@@ -218,15 +218,20 @@ struct ggml_backend_registry { #endif backends.push_back({ reg, std::move(handle) }); for (size_t i = 0; i < ggml_backend_reg_dev_count(reg); i++) { @@ -56,7 +56,7 @@ index 405d8e31..4e67d243 100644 } ggml_backend_reg_t load_backend(const fs::path & path, bool silent) { -@@ -265,7 +270,7 @@ struct ggml_backend_registry { +@@ -270,7 +275,7 @@ struct ggml_backend_registry { GGML_LOG_INFO("%s: loaded %s backend from %s\n", __func__, ggml_backend_reg_name(reg), path_str(path).c_str()); @@ -65,7 +65,7 @@ index 405d8e31..4e67d243 100644 return reg; } -@@ -288,7 +293,7 @@ struct ggml_backend_registry { +@@ -293,7 +298,7 @@ struct ggml_backend_registry { // remove devices devices.erase( std::remove_if(devices.begin(), devices.end(), @@ -74,7 +74,7 @@ index 405d8e31..4e67d243 100644 devices.end()); // remove backend -@@ -346,7 +351,7 @@ size_t ggml_backend_dev_count() { +@@ -351,7 +356,7 @@ size_t ggml_backend_dev_count() { ggml_backend_dev_t ggml_backend_dev_get(size_t index) { GGML_ASSERT(index < ggml_backend_dev_count()); diff --git a/llama/patches/0010-add-phony-target-ggml-cpu-for-all-cpu-variants.patch b/llama/patches/0008-add-phony-target-ggml-cpu-for-all-cpu-variants.patch similarity index 58% rename from llama/patches/0010-add-phony-target-ggml-cpu-for-all-cpu-variants.patch rename to llama/patches/0008-add-phony-target-ggml-cpu-for-all-cpu-variants.patch index 21c1fc42f..aa64e1ed7 100644 --- a/llama/patches/0010-add-phony-target-ggml-cpu-for-all-cpu-variants.patch +++ b/llama/patches/0008-add-phony-target-ggml-cpu-for-all-cpu-variants.patch @@ -8,22 +8,22 @@ Subject: [PATCH] add phony target ggml-cpu for all cpu variants 1 file changed, 2 insertions(+) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt -index ddea5ad3..45918bf6 100644 +index 177fb282..f5a5079a 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt -@@ -279,6 +279,7 @@ function(ggml_add_cpu_backend_variant tag_name) - endforeach() +@@ -304,6 +304,7 @@ function(ggml_add_cpu_backend_variant tag_name) + endif() ggml_add_cpu_backend_variant_impl(${tag_name}) + add_dependencies(ggml-cpu ggml-cpu-${tag_name}) endfunction() ggml_add_backend(CPU) -@@ -287,6 +288,7 @@ if (GGML_CPU_ALL_VARIANTS) - if (NOT GGML_BACKEND_DL) - message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS requires GGML_BACKEND_DL") +@@ -314,6 +315,7 @@ if (GGML_CPU_ALL_VARIANTS) + elseif (GGML_CPU_ARM_ARCH) + message(FATAL_ERROR "Cannot use both GGML_CPU_ARM_ARCH and GGML_CPU_ALL_VARIANTS") endif() + add_custom_target(ggml-cpu) - ggml_add_cpu_backend_variant(x64) - ggml_add_cpu_backend_variant(sse42 SSE42) - ggml_add_cpu_backend_variant(sandybridge SSE42 AVX) + if (GGML_SYSTEM_ARCH STREQUAL "x86") + ggml_add_cpu_backend_variant(x64) + ggml_add_cpu_backend_variant(sse42 SSE42) diff --git a/llama/patches/0008-ensure-KV-cache-is-fully-defragmented.patch b/llama/patches/0008-ensure-KV-cache-is-fully-defragmented.patch deleted file mode 100644 index 82fe219c0..000000000 --- a/llama/patches/0008-ensure-KV-cache-is-fully-defragmented.patch +++ /dev/null @@ -1,352 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: jmorganca -Date: Tue, 15 Apr 2025 14:27:40 -0400 -Subject: [PATCH] ensure KV cache is fully defragmented - -Sometimes the KV cache requires defragmentation even without -triggering the threshold heuristic. In this case, decoding -will not being able to find a KV cache slot. This is particularly -difficult for the caller to handle if it happens in between -ubatches. To avoid this, we should immediately trigger a defrag. - -In addition, a heavily fragmented cache can require more than -max_moves to defragment. Currently, we stop when we hit the limit -but this can leave a cache that still does not have adequate space -even after defragmentation is triggered. Instead, we should do -multiple batches of processing until everything is complete. ---- - src/llama-context.cpp | 18 ++++--- - src/llama-context.h | 1 + - src/llama-kv-cache.cpp | 107 ++++++++++++++--------------------------- - src/llama-kv-cache.h | 12 ++++- - 4 files changed, 59 insertions(+), 79 deletions(-) - -diff --git a/src/llama-context.cpp b/src/llama-context.cpp -index dca22d8b..1f3a3956 100644 ---- a/src/llama-context.cpp -+++ b/src/llama-context.cpp -@@ -947,9 +947,12 @@ int llama_context::decode(llama_batch & inp_batch) { - - // find KV slot - if (!kv_self->find_slot(ubatch)) { -- LLAMA_LOG_WARN("%s: failed to find KV cache slot for ubatch of size %d\n", __func__, ubatch.n_tokens); -- -- return 1; -+ kv_self->defrag_sched(-1.0f); -+ kv_self->update(*this); -+ if (!kv_self->find_slot(ubatch)) { -+ LLAMA_LOG_WARN("%s: failed to find KV cache slot for ubatch of size %d\n", __func__, ubatch.n_tokens); -+ return 1; -+ } - } - - ggml_backend_sched_reset(sched.get()); -@@ -1965,9 +1968,12 @@ void llama_context::opt_epoch_iter( - - // TODO: not sure if this is needed - if (!kv_self->find_slot(ubatch)) { -- LLAMA_LOG_WARN("%s: failed to find KV cache slot for ubatch of size %d\n", __func__, ubatch.n_tokens); -- -- GGML_ABORT("TODO: handle this error"); -+ kv_self->defrag_sched(-1.0f); -+ kv_self->update(*this); -+ if (!kv_self->find_slot(ubatch)) { -+ LLAMA_LOG_WARN("%s: failed to find KV cache slot for ubatch of size %d\n", __func__, ubatch.n_tokens); -+ GGML_ABORT("TODO: handle this error"); -+ } - } - - auto * gf = graph_init(); -diff --git a/src/llama-context.h b/src/llama-context.h -index c0ceacb1..0264e937 100644 ---- a/src/llama-context.h -+++ b/src/llama-context.h -@@ -5,6 +5,7 @@ - #include "llama-cparams.h" - #include "llama-graph.h" - #include "llama-adapter.h" -+#include "llama-kv-cache.h" - - #include "ggml-cpp.h" - #include "ggml-opt.h" -diff --git a/src/llama-kv-cache.cpp b/src/llama-kv-cache.cpp -index 3dcad65b..60e67b03 100644 ---- a/src/llama-kv-cache.cpp -+++ b/src/llama-kv-cache.cpp -@@ -364,8 +364,6 @@ void llama_kv_cache_unified::commit() { - } - - bool llama_kv_cache_unified::update(llama_context & lctx) { -- bool need_reserve = false; -- - auto * sched = lctx.get_sched(); - - if (has_shift) { -@@ -388,8 +386,6 @@ bool llama_kv_cache_unified::update(llama_context & lctx) { - res->set_inputs(nullptr); - - lctx.graph_compute(gf, false); -- -- need_reserve = true; - } - - { -@@ -403,27 +399,36 @@ bool llama_kv_cache_unified::update(llama_context & lctx) { - - if (do_defrag) { - LLAMA_LOG_DEBUG("%s: defragmenting KV cache\n", __func__); -+ const uint32_t n_max_nodes = lctx.graph_max_nodes(); -+ const uint32_t max_moves = (n_max_nodes - 2*model.hparams.n_layer)/(6*model.hparams.n_layer); -+ if (!defrag_prepare(n_max_nodes)) { -+ LLAMA_LOG_ERROR("%s: failed to prepare defragmentation\n", __func__); -+ return false; -+ } -+ -+ for (std::size_t i = 0; i < defrag_info.moves.size(); i += max_moves) { -+ std::vector chunk; -+ auto end = std::min(i + max_moves, defrag_info.moves.size()); -+ chunk.assign(defrag_info.moves.begin() + i, defrag_info.moves.begin() + end); - -- if (defrag_prepare(lctx.graph_max_nodes())) { - ggml_backend_sched_reset(sched); - - auto * gf = lctx.graph_init(); - -- auto res = build_graph_defrag(lctx.get_cparams(), lctx.get_ctx_compute(), gf); -+ auto res = build_graph_defrag(lctx.get_cparams(), lctx.get_ctx_compute(), gf, chunk); - - ggml_backend_sched_alloc_graph(sched, gf); - - res->set_inputs(nullptr); - - lctx.graph_compute(gf, false); -- -- need_reserve = true; - } - - do_defrag = false; - } - -- return need_reserve; -+ // we never need to reserve a worst case graph -+ return false; - } - - void llama_kv_cache_unified::defrag_sched(float thold) { -@@ -707,11 +712,10 @@ llm_graph_result_ptr llama_kv_cache_unified::build_graph_shift( - llm_graph_result_ptr llama_kv_cache_unified::build_graph_defrag( - const llama_cparams & cparams, - ggml_context * ctx, -- ggml_cgraph * gf) const { -+ ggml_cgraph * gf, -+ const std::vector & moves) const { - auto res = std::make_unique(); - -- const auto & ids = defrag_info.ids; -- - #if 0 - // CPU defrag - // -@@ -783,32 +787,20 @@ llm_graph_result_ptr llama_kv_cache_unified::build_graph_defrag( - ggml_backend_tensor_set(v_l[il], buf_v.data(), 0, buf_v.size()); - } - #else -- for (uint32_t i = 0; i < ids.size(); ++i) { -- const uint32_t id = ids[i]; -- -- if (i == id || id == ids.size()) { -- continue; -- } -- -- uint32_t nm = 1; -- -- while (i + nm < ids.size() && ids[i + nm] == id + nm) { -- nm++; -- } -- -+ for (const auto & move : moves) { - for (uint32_t il = 0; il < hparams.n_layer; ++il) { // NOLINT - const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il); - const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il); - - ggml_tensor * view_k_src = ggml_view_2d(ctx, k_l[il], -- n_embd_k_gqa, nm, -+ n_embd_k_gqa, move.len, - ggml_row_size(k_l[il]->type, n_embd_k_gqa), -- ggml_row_size(k_l[il]->type, n_embd_k_gqa*i)); -+ ggml_row_size(k_l[il]->type, n_embd_k_gqa*move.src)); - - ggml_tensor * view_k_dst = ggml_view_2d(ctx, k_l[il], -- n_embd_k_gqa, nm, -+ n_embd_k_gqa, move.len, - ggml_row_size(k_l[il]->type, n_embd_k_gqa), -- ggml_row_size(k_l[il]->type, n_embd_k_gqa*id)); -+ ggml_row_size(k_l[il]->type, n_embd_k_gqa*move.dst)); - - ggml_tensor * view_v_src; - ggml_tensor * view_v_dst; -@@ -816,31 +808,29 @@ llm_graph_result_ptr llama_kv_cache_unified::build_graph_defrag( - if (cparams.flash_attn) { - // NOTE: the V cache is not transposed when using flash attention - view_v_src = ggml_view_2d(ctx, v_l[il], -- n_embd_v_gqa, nm, -+ n_embd_v_gqa, move.len, - ggml_row_size(v_l[il]->type, n_embd_v_gqa), -- ggml_row_size(v_l[il]->type, n_embd_v_gqa*i)); -+ ggml_row_size(v_l[il]->type, n_embd_v_gqa*move.dst)); - - view_v_dst = ggml_view_2d(ctx, v_l[il], -- n_embd_v_gqa, nm, -+ move.len, n_embd_v_gqa, - ggml_row_size(v_l[il]->type, n_embd_v_gqa), -- ggml_row_size(v_l[il]->type, n_embd_v_gqa*id)); -+ ggml_row_size(v_l[il]->type, move.src)); - } else { - view_v_src = ggml_view_2d(ctx, v_l[il], -- nm, n_embd_v_gqa, -+ move.len, n_embd_v_gqa, - ggml_row_size(v_l[il]->type, size), -- ggml_row_size(v_l[il]->type, i)); -+ ggml_row_size(v_l[il]->type, move.src)); - - view_v_dst = ggml_view_2d(ctx, v_l[il], -- nm, n_embd_v_gqa, -+ move.len, n_embd_v_gqa, - ggml_row_size(v_l[il]->type, size), -- ggml_row_size(v_l[il]->type, id)); -+ ggml_row_size(v_l[il]->type, move.dst)); - } - - ggml_build_forward_expand(gf, ggml_cpy(ctx, view_k_src, view_k_dst)); - ggml_build_forward_expand(gf, ggml_cpy(ctx, view_v_src, view_v_dst)); - } -- -- i += nm - 1; - } - - //LLAMA_LOG_INFO("gf->n_nodes = %d\n", gf->n_nodes); -@@ -857,17 +847,7 @@ bool llama_kv_cache_unified::defrag_prepare(int32_t n_max_nodes) { - - assert(n_used <= n_kv); - -- //const int64_t t_start = ggml_time_us(); -- -- // number of cells moved -- uint32_t n_moves = 0; -- -- // each move requires 6*n_layer tensors (see graph_build_kv_self_defrag) -- // - source view, destination view, copy operation -- // - x2 for keys and values -- //const uint32_t max_moves = max_nodes()/(6*n_layer); -- // TODO: tmp fix https://github.com/ggerganov/llama.cpp/issues/6685#issuecomment-2057579516 -- const uint32_t max_moves = (n_max_nodes - 2*n_layer)/(6*n_layer); -+ defrag_info.moves.clear(); - - // determine which KV cells to move where - // -@@ -875,10 +855,7 @@ bool llama_kv_cache_unified::defrag_prepare(int32_t n_max_nodes) { - // - // if ids[i] == i || ids[i] == n_kv, then cell i is not moved - // -- auto & ids = defrag_info.ids; -- -- ids.clear(); -- ids.resize(n_kv, n_kv); -+ std::vector ids(n_kv, n_kv); - - for (uint32_t i0 = 0; i0 < n_used; ++i0) { - const auto & cell0 = cells[i0]; -@@ -927,19 +904,11 @@ bool llama_kv_cache_unified::defrag_prepare(int32_t n_max_nodes) { - // are we moving a continuous block of memory? - bool cont = false; - -- // should we stop searching for the next move? -- bool stop = false; -- - // go back and move the nf cells to the hole - for (; i1 < n_kv; ++i1) { - auto & cell1 = cells[i1]; - - if (cell1.is_empty() || ids[i1] != n_kv) { -- if (n_moves == max_moves) { -- stop = true; -- break; -- } -- - cont = false; - continue; - } -@@ -955,8 +924,10 @@ bool llama_kv_cache_unified::defrag_prepare(int32_t n_max_nodes) { - head = n_used; - - if (!cont) { -- n_moves++; -+ defrag_info.moves.push_back({i1, i0 + nf, 1}); - cont = true; -+ } else { -+ defrag_info.moves.back().len++; - } - - nf++; -@@ -966,22 +937,16 @@ bool llama_kv_cache_unified::defrag_prepare(int32_t n_max_nodes) { - } - } - -- if (stop || n_moves == max_moves) { -- break; -- } -- - //LLAMA_LOG_INFO("(tmp log) KV defrag: move [%u, %u) to [%u, %u)\n", is, i1 + 1, i0, i0 + nh); - - i0 += nh - 1; - } - -- if (n_moves == 0) { -+ if (defrag_info.moves.size() == 0) { - return false; - } - -- LLAMA_LOG_DEBUG("%s: (tmp log) KV defrag cell moves: %u\n", __func__, n_moves); -- -- LLAMA_LOG_DEBUG("%s: expected gf nodes: %u\n", __func__, 6*n_moves*n_layer); -+ // LLAMA_LOG_DEBUG("(tmp log) KV defrag cell moves: %u\n", n_moves); - - return true; - } -diff --git a/src/llama-kv-cache.h b/src/llama-kv-cache.h -index bf3b4b6a..928b9712 100644 ---- a/src/llama-kv-cache.h -+++ b/src/llama-kv-cache.h -@@ -82,6 +82,13 @@ struct llama_kv_cache_guard { - private: - llama_kv_cache * kv; - }; -+ -+// block of KV slots to move when defragging -+struct llama_kv_defrag_move { -+ uint32_t src; -+ uint32_t dst; -+ uint32_t len; -+}; - - // - // llama_kv_cache_unified -@@ -207,7 +214,7 @@ private: - - // defrag - struct { -- std::vector ids; -+ std::vector moves; - } defrag_info; - - // return true if cells have been moved -@@ -249,7 +256,8 @@ private: - llm_graph_result_ptr build_graph_defrag( - const llama_cparams & cparams, - ggml_context * ctx, -- ggml_cgraph * gf) const; -+ ggml_cgraph * gf, -+ const std::vector & moves) const; - - void state_write_meta(llama_io_write_i & io, const std::vector> & cell_ranges, llama_seq_id seq_id = -1) const; - void state_write_data(llama_io_write_i & io, const std::vector> & cell_ranges) const; diff --git a/llama/patches/0009-remove-amx.patch b/llama/patches/0009-remove-amx.patch new file mode 100644 index 000000000..cde880ce0 --- /dev/null +++ b/llama/patches/0009-remove-amx.patch @@ -0,0 +1,25 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: jmorganca +Date: Thu, 1 May 2025 15:05:08 -0700 +Subject: [PATCH] remove amx + +disable amx as it reduces performance on some systems +--- + ggml/src/CMakeLists.txt | 4 ---- + 1 file changed, 4 deletions(-) + +diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt +index f5a5079a..5158acd6 100644 +--- a/ggml/src/CMakeLists.txt ++++ b/ggml/src/CMakeLists.txt +@@ -324,10 +324,6 @@ if (GGML_CPU_ALL_VARIANTS) + ggml_add_cpu_backend_variant(skylakex SSE42 AVX F16C AVX2 BMI2 FMA AVX512) + ggml_add_cpu_backend_variant(icelake SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI) + ggml_add_cpu_backend_variant(alderlake SSE42 AVX F16C AVX2 BMI2 FMA AVX_VNNI) +- if (NOT MSVC) +- # MSVC doesn't support AMX +- ggml_add_cpu_backend_variant(sapphirerapids SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) +- endif() + elseif(GGML_SYSTEM_ARCH STREQUAL "ARM") + if (CMAKE_SYSTEM_NAME MATCHES "Linux") + # Many of these features are optional so we build versions with popular diff --git a/llama/patches/0012-fix-string-arr-kv-loading.patch b/llama/patches/0010-fix-string-arr-kv-loading.patch similarity index 92% rename from llama/patches/0012-fix-string-arr-kv-loading.patch rename to llama/patches/0010-fix-string-arr-kv-loading.patch index f879c50ee..b6cf2e916 100644 --- a/llama/patches/0012-fix-string-arr-kv-loading.patch +++ b/llama/patches/0010-fix-string-arr-kv-loading.patch @@ -25,10 +25,10 @@ index 79ee2020..3efb22f0 100644 // get ith C string from array with given key_id GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int64_t key_id, size_t i); diff --git a/ggml/src/gguf.cpp b/ggml/src/gguf.cpp -index 381a9c7d..e45b453d 100644 +index 53504399..0f71d5f3 100644 --- a/ggml/src/gguf.cpp +++ b/ggml/src/gguf.cpp -@@ -777,10 +777,14 @@ enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int64_t key_id +@@ -805,10 +805,14 @@ enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int64_t key_id const void * gguf_get_arr_data(const struct gguf_context * ctx, int64_t key_id) { GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx)); @@ -44,7 +44,7 @@ index 381a9c7d..e45b453d 100644 const char * gguf_get_arr_str(const struct gguf_context * ctx, int64_t key_id, size_t i) { GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx)); GGML_ASSERT(ctx->kv[key_id].get_type() == GGUF_TYPE_STRING); -@@ -874,7 +878,6 @@ const char * gguf_get_val_str(const struct gguf_context * ctx, int64_t key_id) { +@@ -902,7 +906,6 @@ const char * gguf_get_val_str(const struct gguf_context * ctx, int64_t key_id) { const void * gguf_get_val_data(const struct gguf_context * ctx, int64_t key_id) { GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx)); GGML_ASSERT(ctx->kv[key_id].get_ne() == 1); @@ -53,10 +53,10 @@ index 381a9c7d..e45b453d 100644 } diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp -index 10f34d33..9f5fd57b 100644 +index c011008f..fa388b03 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp -@@ -1469,9 +1469,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { +@@ -1760,9 +1760,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { const int precompiled_charsmap_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str()); if (precompiled_charsmap_keyidx != -1) { const gguf_type pc_type = gguf_get_arr_type(ctx, precompiled_charsmap_keyidx); diff --git a/llama/patches/0013-ollama-debug-tensor.patch b/llama/patches/0011-ollama-debug-tensor.patch similarity index 91% rename from llama/patches/0013-ollama-debug-tensor.patch rename to llama/patches/0011-ollama-debug-tensor.patch index 53d911277..5dcd6ee0f 100644 --- a/llama/patches/0013-ollama-debug-tensor.patch +++ b/llama/patches/0011-ollama-debug-tensor.patch @@ -8,7 +8,7 @@ Subject: [PATCH] ollama debug tensor 1 file changed, 6 insertions(+) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c -index a30e67f2..2462d2b8 100644 +index d89cd8f4..a5689c18 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -15,6 +15,8 @@ @@ -20,7 +20,7 @@ index a30e67f2..2462d2b8 100644 #if defined(_MSC_VER) || defined(__MINGW32__) #include // using malloc.h with MSC/MINGW #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__) -@@ -2841,6 +2843,10 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { +@@ -2858,6 +2860,10 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { ggml_compute_forward(¶ms, node); diff --git a/llama/patches/0011-remove-amx.patch b/llama/patches/0011-remove-amx.patch deleted file mode 100644 index 296a37612..000000000 --- a/llama/patches/0011-remove-amx.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: jmorganca -Date: Thu, 1 May 2025 15:05:08 -0700 -Subject: [PATCH] remove amx - -disable amx as it reduces performance on some systems ---- - ggml/src/CMakeLists.txt | 4 ---- - 1 file changed, 4 deletions(-) - -diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt -index 45918bf6..0beaed86 100644 ---- a/ggml/src/CMakeLists.txt -+++ b/ggml/src/CMakeLists.txt -@@ -296,10 +296,6 @@ if (GGML_CPU_ALL_VARIANTS) - ggml_add_cpu_backend_variant(skylakex SSE42 AVX F16C AVX2 BMI2 FMA AVX512) - ggml_add_cpu_backend_variant(icelake SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI) - ggml_add_cpu_backend_variant(alderlake SSE42 AVX F16C AVX2 BMI2 FMA AVX_VNNI) -- if (NOT MSVC) -- # MSVC doesn't support AMX -- ggml_add_cpu_backend_variant(sapphirerapids SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) -- endif() - elseif (GGML_CPU) - ggml_add_cpu_backend_variant_impl("") - endif() diff --git a/llama/patches/0014-add-ollama-vocab-for-grammar-support.patch b/llama/patches/0012-add-ollama-vocab-for-grammar-support.patch similarity index 97% rename from llama/patches/0014-add-ollama-vocab-for-grammar-support.patch rename to llama/patches/0012-add-ollama-vocab-for-grammar-support.patch index ee81800e2..2d3731236 100644 --- a/llama/patches/0014-add-ollama-vocab-for-grammar-support.patch +++ b/llama/patches/0012-add-ollama-vocab-for-grammar-support.patch @@ -10,7 +10,7 @@ Subject: [PATCH] add ollama vocab for grammar support 3 files changed, 58 insertions(+), 9 deletions(-) diff --git a/src/llama-grammar.cpp b/src/llama-grammar.cpp -index 973b47ae..60d58236 100644 +index bed706bb..b51cee09 100644 --- a/src/llama-grammar.cpp +++ b/src/llama-grammar.cpp @@ -907,6 +907,7 @@ llama_grammar_candidates llama_grammar_reject_candidates_for_stack( @@ -90,7 +90,7 @@ index 973b47ae..60d58236 100644 if (grammar.awaiting_trigger) { if (std::find(grammar.trigger_tokens.begin(), grammar.trigger_tokens.end(), token) != grammar.trigger_tokens.end()) { -@@ -1191,13 +1200,14 @@ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token +@@ -1201,13 +1210,14 @@ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token } } @@ -107,7 +107,7 @@ index 973b47ae..60d58236 100644 } llama_grammar_accept_str(grammar, piece); -@@ -1217,3 +1227,28 @@ void llama_grammar_accept_str(struct llama_grammar & grammar, const std::string +@@ -1227,3 +1237,28 @@ void llama_grammar_accept_str(struct llama_grammar & grammar, const std::string throw std::runtime_error("Unexpected empty grammar stack after accepting piece: " + piece); } } @@ -184,7 +184,7 @@ index f8c291de..2a3a62db 100644 const char * grammar_root, bool lazy, diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp -index 804b11e0..15a10ca8 100644 +index bfbf5fa2..11f93f42 100644 --- a/src/llama-sampling.cpp +++ b/src/llama-sampling.cpp @@ -1466,7 +1466,7 @@ static void llama_sampler_grammar_reset(struct llama_sampler * smpl) { diff --git a/llama/patches/0015-add-argsort-and-cuda-copy-for-i32.patch b/llama/patches/0013-add-argsort-and-cuda-copy-for-i32.patch similarity index 68% rename from llama/patches/0015-add-argsort-and-cuda-copy-for-i32.patch rename to llama/patches/0013-add-argsort-and-cuda-copy-for-i32.patch index 174c45a5d..7e821c1e4 100644 --- a/llama/patches/0015-add-argsort-and-cuda-copy-for-i32.patch +++ b/llama/patches/0013-add-argsort-and-cuda-copy-for-i32.patch @@ -4,16 +4,17 @@ Date: Thu, 1 May 2025 13:45:12 -0700 Subject: [PATCH] add argsort and cuda copy for i32 --- - ggml/src/ggml-cpu/ops.cpp | 43 ++++++++++++++ - ggml/src/ggml-cuda/argsort.cu | 102 +++++++++++++++++++++++++++++++++- - ggml/src/ggml-cuda/cpy.cu | 49 ++++++++++++++++ - 3 files changed, 192 insertions(+), 2 deletions(-) + ggml/src/ggml-cpu/ops.cpp | 43 +++++++++++++ + ggml/src/ggml-cuda/argsort.cu | 102 ++++++++++++++++++++++++++++++- + ggml/src/ggml-cuda/cpy-utils.cuh | 6 ++ + ggml/src/ggml-cuda/cpy.cu | 43 +++++++++++++ + 4 files changed, 192 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp -index 955fec59..654e2f28 100644 +index 854f1c2b..a2924757 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp -@@ -6822,6 +6822,45 @@ static void ggml_compute_forward_argsort_f32( +@@ -8146,6 +8146,45 @@ static void ggml_compute_forward_argsort_f32( } } @@ -59,7 +60,7 @@ index 955fec59..654e2f28 100644 void ggml_compute_forward_argsort( const ggml_compute_params * params, ggml_tensor * dst) { -@@ -6833,6 +6872,10 @@ void ggml_compute_forward_argsort( +@@ -8157,6 +8196,10 @@ void ggml_compute_forward_argsort( { ggml_compute_forward_argsort_f32(params, dst); } break; @@ -194,84 +195,78 @@ index 607ded85..53b02634 100644 + argsort_f32_i32_cuda(src0_d, (int *)dst_d, ncols, nrows, order, stream); + } } +diff --git a/ggml/src/ggml-cuda/cpy-utils.cuh b/ggml/src/ggml-cuda/cpy-utils.cuh +index 410c12b7..b8e9e107 100644 +--- a/ggml/src/ggml-cuda/cpy-utils.cuh ++++ b/ggml/src/ggml-cuda/cpy-utils.cuh +@@ -223,3 +223,9 @@ template + static __device__ void cpy_1_flt(const char * cxi, char * cdsti) { + convert_flt((const src_t *)cxi, (dst_t *)cdsti); + } ++ ++static __device__ void cpy_1_i32_i32(const char * cxi, char * cdsti) { ++ const int32_t * src = (const int32_t *)cxi; ++ int32_t * dst = (int32_t *)cdsti; ++ *dst = *src; ++} diff --git a/ggml/src/ggml-cuda/cpy.cu b/ggml/src/ggml-cuda/cpy.cu -index d027271f..4abd01d7 100644 +index f9bb0256..9c3774e5 100644 --- a/ggml/src/ggml-cuda/cpy.cu +++ b/ggml/src/ggml-cuda/cpy.cu -@@ -38,6 +38,13 @@ static __device__ void cpy_1_f16_f32(const char * cxi, char * cdsti) { - *dsti = *xi; +@@ -278,6 +278,47 @@ static void ggml_cpy_f32_iq4_nl_cuda( + (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++); } -+static __device__ void cpy_1_i32_i32(const char * cxi, char * cdsti) { -+ const int32_t * xi = (const int32_t *) cxi; -+ int32_t * dsti = (int32_t *) cdsti; -+ -+ *dsti = *xi; -+} -+ - template - static __global__ void cpy_f32_f16(const char * cx, char * cdst_direct, const int ne, - const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, -@@ -68,6 +75,44 @@ static __global__ void cpy_f32_f16(const char * cx, char * cdst_direct, const in - cpy_1(cx + x_offset, cdst + dst_offset); - } - -+// First, add this template function after the other template functions +template -+static __global__ void cpy_i32_i32(const char * cx, char * cdst, const int ne, -+ const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, -+ const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, -+ const int nb12, const int nb13) { -+ const int64_t i = blockDim.x*blockIdx.x + threadIdx.x; ++static __global__ void cpy_i32_i32( ++ const char *cx, char *cdst, const int ne, ++ const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, ++ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, ++ cudaStream_t stream, char ** cdst_indirect, int & graph_cpynode_index) { ++ ++ const int64_t i = blockDim.x * blockIdx.x + threadIdx.x; + + if (i >= ne) { + return; + } + -+ const int64_t i03 = i/(ne00 * ne01 * ne02); -+ const int64_t i02 = (i - i03*ne00*ne01*ne02 )/ (ne00*ne01); -+ const int64_t i01 = (i - i03*ne00*ne01*ne02 - i02*ne01*ne00) / ne00; -+ const int64_t i00 = i - i03*ne00*ne01*ne02 - i02*ne01*ne00 - i01*ne00; -+ const int64_t x_offset = i00*nb00 + i01*nb01 + i02*nb02 + i03 * nb03; ++ const int64_t i03 = i / (ne00 * ne01 * ne02); ++ const int64_t i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01); ++ const int64_t i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00; ++ const int64_t i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00; ++ const int64_t x_offset = i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; + -+ const int64_t i13 = i/(ne10 * ne11 * ne12); -+ const int64_t i12 = (i - i13*ne10*ne11*ne12) / (ne10*ne11); -+ const int64_t i11 = (i - i13*ne10*ne11*ne12 - i12*ne10*ne11) / ne10; -+ const int64_t i10 = i - i13*ne10*ne11*ne12 - i12*ne10*ne11 - i11*ne10; -+ const int64_t dst_offset = i10*nb10 + i11*nb11 + i12*nb12 + i13 * nb13; ++ const int64_t i13 = i / (ne10 * ne11 * ne12); ++ const int64_t i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11); ++ const int64_t i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10; ++ const int64_t i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10; ++ const int64_t dst_offset = i10 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13; + -+ cpy_1(cx + x_offset, cdst + dst_offset); ++ char * cdst_ptr = (cdst_indirect != nullptr) ? cdst_indirect[graph_cpynode_index] : cdst; ++ cpy_1(cx + x_offset, cdst_ptr + dst_offset); +} + -+// Then modify the ggml_cpy_i32_i32_cuda function to use the new template ++ +static void ggml_cpy_i32_i32_cuda( + const char * cx, char * cdst, const int ne, -+ const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, -+ const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream, char ** cdst_indirect, int graph_cpynode_index) { ++ const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, ++ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, ++ cudaStream_t stream, char ** cdst_indirect, int graph_cpynode_index) { + + const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; + cpy_i32_i32<<>> -+ (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); ++ (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, stream, cdst_indirect, graph_cpynode_index); +} + - static __device__ void cpy_blck_f32_q8_0(const char * cxi, char * cdsti) { - const float * xi = (const float *) cxi; - block_q8_0 * dsti = (block_q8_0 *) cdsti; -@@ -633,6 +678,8 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg - ggml_cpy_f16_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1, bool disable_indirection_for_this_node) { + const int64_t ne = ggml_nelements(src0); + GGML_ASSERT(ne == ggml_nelements(src1)); +@@ -369,6 +410,8 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg + ggml_cpy_flt_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) { - ggml_cpy_f16_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + ggml_cpy_flt_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + } else if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32) { + ggml_cpy_i32_i32_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); - } else { - GGML_ABORT("%s: unsupported type combination (%s to %s)\n", __func__, - ggml_type_name(src0->type), ggml_type_name(src1->type)); -@@ -688,6 +735,8 @@ void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) { - return (void*) cpy_f32_f16; - } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) { - return (void*) cpy_f32_f16; -+ } else if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32) { -+ return (void*) cpy_i32_i32; - } else { - GGML_ABORT("%s: unsupported type combination (%s to %s)\n", __func__, - ggml_type_name(src0->type), ggml_type_name(src1->type)); + } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_BF16) { + ggml_cpy_flt_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_F16) { diff --git a/llama/patches/0016-graph-memory-reporting-on-failure.patch b/llama/patches/0014-graph-memory-reporting-on-failure.patch similarity index 92% rename from llama/patches/0016-graph-memory-reporting-on-failure.patch rename to llama/patches/0014-graph-memory-reporting-on-failure.patch index 921882249..26fe8a8e0 100644 --- a/llama/patches/0016-graph-memory-reporting-on-failure.patch +++ b/llama/patches/0014-graph-memory-reporting-on-failure.patch @@ -28,7 +28,7 @@ index 2cb150fd..781b1e10 100644 // Create a buffer and allocate all the tensors in a ggml_context GGML_API struct ggml_backend_buffer * ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_context * ctx, ggml_backend_buffer_type_t buft); diff --git a/ggml/include/ggml-backend.h b/ggml/include/ggml-backend.h -index 778927f6..74e46716 100644 +index a2977ea2..8a91b381 100644 --- a/ggml/include/ggml-backend.h +++ b/ggml/include/ggml-backend.h @@ -304,6 +304,12 @@ extern "C" { @@ -45,10 +45,10 @@ index 778927f6..74e46716 100644 GGML_API ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node); diff --git a/ggml/src/ggml-alloc.c b/ggml/src/ggml-alloc.c -index 5fd379f6..04812990 100644 +index 8b6e6028..41c8c4a2 100644 --- a/ggml/src/ggml-alloc.c +++ b/ggml/src/ggml-alloc.c -@@ -364,6 +364,7 @@ struct node_alloc { +@@ -350,6 +350,7 @@ struct node_alloc { struct ggml_gallocr { ggml_backend_buffer_type_t * bufts; // [n_buffers] ggml_backend_buffer_t * buffers; // [n_buffers] @@ -56,7 +56,7 @@ index 5fd379f6..04812990 100644 struct ggml_dyn_tallocr ** buf_tallocs; // [n_buffers] int n_buffers; -@@ -387,6 +388,9 @@ ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs +@@ -373,6 +374,9 @@ ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs galloc->buffers = calloc(n_bufs, sizeof(ggml_backend_buffer_t)); GGML_ASSERT(galloc->buffers != NULL); @@ -66,7 +66,7 @@ index 5fd379f6..04812990 100644 galloc->buf_tallocs = calloc(n_bufs, sizeof(struct ggml_dyn_tallocr *)); GGML_ASSERT(galloc->buf_tallocs != NULL); -@@ -453,6 +457,7 @@ void ggml_gallocr_free(ggml_gallocr_t galloc) { +@@ -439,6 +443,7 @@ void ggml_gallocr_free(ggml_gallocr_t galloc) { ggml_hash_set_free(&galloc->hash_set); free(galloc->hash_values); free(galloc->bufts); @@ -74,7 +74,7 @@ index 5fd379f6..04812990 100644 free(galloc->buffers); free(galloc->buf_tallocs); free(galloc->node_allocs); -@@ -748,6 +753,8 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c +@@ -734,6 +739,8 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c } } @@ -83,7 +83,7 @@ index 5fd379f6..04812990 100644 // reallocate buffers if needed for (int i = 0; i < galloc->n_buffers; i++) { // if the buffer type is used multiple times, we reuse the same buffer -@@ -769,15 +776,20 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c +@@ -755,15 +762,20 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c ggml_backend_buffer_free(galloc->buffers[i]); galloc->buffers[i] = ggml_backend_buft_alloc_buffer(galloc->bufts[i], new_size); @@ -108,7 +108,7 @@ index 5fd379f6..04812990 100644 } bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) { -@@ -934,6 +946,24 @@ size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id) { +@@ -920,6 +932,24 @@ size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id) { return ggml_backend_buffer_get_size(galloc->buffers[buffer_id]); } @@ -134,10 +134,10 @@ index 5fd379f6..04812990 100644 static void free_buffers(ggml_backend_buffer_t ** buffers, const size_t * n_buffers) { diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp -index 0ce73a99..be335e8c 100644 +index 97f47abd..eded0291 100644 --- a/ggml/src/ggml-backend.cpp +++ b/ggml/src/ggml-backend.cpp -@@ -1629,6 +1629,16 @@ size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backe +@@ -1631,6 +1631,16 @@ size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backe return ggml_gallocr_get_buffer_size(sched->galloc, backend_index); } diff --git a/llama/patches/0017-ggml-Export-GPU-UUIDs.patch b/llama/patches/0015-ggml-Export-GPU-UUIDs.patch similarity index 89% rename from llama/patches/0017-ggml-Export-GPU-UUIDs.patch rename to llama/patches/0015-ggml-Export-GPU-UUIDs.patch index 2bd938a3f..22e1a724b 100644 --- a/llama/patches/0017-ggml-Export-GPU-UUIDs.patch +++ b/llama/patches/0015-ggml-Export-GPU-UUIDs.patch @@ -12,7 +12,7 @@ with tools (e.g. nvidia-smi) and system management libraries (e.g. nvml). 3 files changed, 63 insertions(+), 6 deletions(-) diff --git a/ggml/include/ggml-backend.h b/ggml/include/ggml-backend.h -index 74e467163..48839339d 100644 +index 8a91b381..9424394e 100644 --- a/ggml/include/ggml-backend.h +++ b/ggml/include/ggml-backend.h @@ -152,6 +152,7 @@ extern "C" { @@ -24,17 +24,17 @@ index 74e467163..48839339d 100644 size_t memory_total; enum ggml_backend_dev_type type; diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu -index cb0d8528d..1492368de 100644 +index 37ee2a6d..57eae461 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu -@@ -173,6 +173,51 @@ static int ggml_cuda_parse_id(char devName[]) { +@@ -179,6 +179,51 @@ static int ggml_cuda_parse_id(char devName[]) { } - #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) + #endif // defined(GGML_USE_HIP) +static std::string ggml_cuda_parse_uuid(cudaDeviceProp prop, int device_num) { + char id[64]; + -+ #if !defined(GGML_USE_HIP) ++#if !defined(GGML_USE_HIP) + snprintf(id, sizeof(id), + "GPU-%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", + (unsigned char)prop.uuid.bytes[0], @@ -54,10 +54,10 @@ index cb0d8528d..1492368de 100644 + (unsigned char)prop.uuid.bytes[14], + (unsigned char)prop.uuid.bytes[15] + ); -+ #else -+ #ifdef _WIN32 ++#else ++#ifdef _WIN32 + snprintf(id, sizeof(id), "%d", device_num); -+ #else ++#else + try { + std::string uuid = std::string(prop.uuid.bytes, 16); + @@ -70,16 +70,16 @@ index cb0d8528d..1492368de 100644 + } catch (const std::exception &e) { + snprintf(id, sizeof(id), "%d", device_num); + } -+ #endif -+ #endif ++#endif ++#endif + + return id; +} + static ggml_cuda_device_info ggml_cuda_init() { - #ifdef __HIP_PLATFORM_AMD__ + #if defined(GGML_USE_HIP) // Workaround for a rocBLAS bug when using multiple graphics cards: -@@ -261,22 +306,24 @@ static ggml_cuda_device_info ggml_cuda_init() { +@@ -267,22 +312,24 @@ static ggml_cuda_device_info ggml_cuda_init() { info.devices[id].cc += prop.minor * 0x10; } } @@ -107,10 +107,10 @@ index cb0d8528d..1492368de 100644 + GGML_LOG_INFO(" Device %d: %s, compute capability %d.%d, VMM: %s, ID: %s\n", + id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no", + ggml_cuda_parse_uuid(prop, id).c_str()); - #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) + #endif // defined(GGML_USE_HIP) } -@@ -2884,6 +2931,7 @@ struct ggml_backend_cuda_device_context { +@@ -3144,6 +3191,7 @@ struct ggml_backend_cuda_device_context { int device; std::string name; std::string description; @@ -118,7 +118,7 @@ index cb0d8528d..1492368de 100644 }; static const char * ggml_backend_cuda_device_get_name(ggml_backend_dev_t dev) { -@@ -2896,6 +2944,11 @@ static const char * ggml_backend_cuda_device_get_description(ggml_backend_dev_t +@@ -3156,6 +3204,11 @@ static const char * ggml_backend_cuda_device_get_description(ggml_backend_dev_t return ctx->description.c_str(); } @@ -130,7 +130,7 @@ index cb0d8528d..1492368de 100644 static void ggml_backend_cuda_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { ggml_backend_cuda_device_context * ctx = (ggml_backend_cuda_device_context *)dev->context; ggml_cuda_set_device(ctx->device); -@@ -2910,6 +2963,7 @@ static enum ggml_backend_dev_type ggml_backend_cuda_device_get_type(ggml_backend +@@ -3170,6 +3223,7 @@ static enum ggml_backend_dev_type ggml_backend_cuda_device_get_type(ggml_backend static void ggml_backend_cuda_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) { props->name = ggml_backend_cuda_device_get_name(dev); props->description = ggml_backend_cuda_device_get_description(dev); @@ -138,7 +138,7 @@ index cb0d8528d..1492368de 100644 props->type = ggml_backend_cuda_device_get_type(dev); ggml_backend_cuda_device_get_memory(dev, &props->memory_free, &props->memory_total); -@@ -3457,6 +3511,7 @@ ggml_backend_reg_t ggml_backend_cuda_reg() { +@@ -3767,6 +3821,7 @@ ggml_backend_reg_t ggml_backend_cuda_reg() { cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, i)); dev_ctx->description = prop.name; @@ -147,10 +147,10 @@ index cb0d8528d..1492368de 100644 ggml_backend_dev_t dev = new ggml_backend_device { /* .iface = */ ggml_backend_cuda_device_interface, diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m -index 1b56f858c..a9eeebc6a 100644 +index 7bccc7bf..fe7b2f0a 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m -@@ -5703,6 +5703,7 @@ static enum ggml_backend_dev_type ggml_backend_metal_device_get_type(ggml_backen +@@ -6522,6 +6522,7 @@ static enum ggml_backend_dev_type ggml_backend_metal_device_get_type(ggml_backen static void ggml_backend_metal_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { props->name = ggml_backend_metal_device_get_name(dev); props->description = ggml_backend_metal_device_get_description(dev); diff --git a/llama/patches/0018-temporary-prevent-rocm-cuda-mixed-loading.patch b/llama/patches/0016-temporary-prevent-rocm-cuda-mixed-loading.patch similarity index 87% rename from llama/patches/0018-temporary-prevent-rocm-cuda-mixed-loading.patch rename to llama/patches/0016-temporary-prevent-rocm-cuda-mixed-loading.patch index 205dc64ae..f085e0c7c 100644 --- a/llama/patches/0018-temporary-prevent-rocm-cuda-mixed-loading.patch +++ b/llama/patches/0016-temporary-prevent-rocm-cuda-mixed-loading.patch @@ -8,10 +8,10 @@ Subject: [PATCH] temporary prevent rocm+cuda mixed loading 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-backend-reg.cpp b/ggml/src/ggml-backend-reg.cpp -index 4e67d243..8f49f084 100644 +index 3040b2aa..f1e9c180 100644 --- a/ggml/src/ggml-backend-reg.cpp +++ b/ggml/src/ggml-backend-reg.cpp -@@ -573,8 +573,16 @@ void ggml_backend_load_all_from_path(const char * dir_path) { +@@ -581,8 +581,16 @@ void ggml_backend_load_all_from_path(const char * dir_path) { ggml_backend_load_best("blas", silent, dir_path); ggml_backend_load_best("cann", silent, dir_path); @@ -20,13 +20,13 @@ index 4e67d243..8f49f084 100644 + + // Avoid mixed hip+cuda configurations + const char * hip_devices = std::getenv("HIP_VISIBLE_DEVICES"); -+ const char * rocr_devices = std::getenv("ROCR_VISIBLE_DEVICES"); ++ const char * rocr_devices = std::getenv("ROCR_VISIBLE_DEVICES"); + if (!hip_devices && !rocr_devices) { + ggml_backend_load_best("cuda", silent, dir_path); + } else { + ggml_backend_load_best("hip", silent, dir_path); + } -+ - ggml_backend_load_best("kompute", silent, dir_path); ++ ggml_backend_load_best("metal", silent, dir_path); ggml_backend_load_best("rpc", silent, dir_path); + ggml_backend_load_best("sycl", silent, dir_path); diff --git a/llama/patches/0017-add-C-API-for-mtmd_input_text.patch b/llama/patches/0017-add-C-API-for-mtmd_input_text.patch new file mode 100644 index 000000000..2c19ae6db --- /dev/null +++ b/llama/patches/0017-add-C-API-for-mtmd_input_text.patch @@ -0,0 +1,46 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Gabe Goodhart +Date: Tue, 24 Jun 2025 16:55:31 -0600 +Subject: [PATCH] add C API for mtmd_input_text + +Signed-off-by: Gabe Goodhart +--- + tools/mtmd/mtmd.cpp | 10 ++++++++++ + tools/mtmd/mtmd.h | 3 +++ + 2 files changed, 13 insertions(+) + +diff --git a/tools/mtmd/mtmd.cpp b/tools/mtmd/mtmd.cpp +index a05373d5..6f70f7f4 100644 +--- a/tools/mtmd/mtmd.cpp ++++ b/tools/mtmd/mtmd.cpp +@@ -79,6 +79,16 @@ enum mtmd_slice_tmpl { + // TODO @ngxson : add support for idefics (SmolVLM) + }; + ++mtmd_input_text* mtmd_input_text_init(const char * text, bool add_special, bool parse_special) { ++ return new mtmd_input_text{text, add_special, parse_special}; ++} ++ ++void mtmd_input_text_free(mtmd_input_text* input_text) { ++ if (input_text) { ++ delete input_text; ++ } ++} ++ + const char * mtmd_default_marker() { + return "<__media__>"; + } +diff --git a/tools/mtmd/mtmd.h b/tools/mtmd/mtmd.h +index f4ea07d3..cf287224 100644 +--- a/tools/mtmd/mtmd.h ++++ b/tools/mtmd/mtmd.h +@@ -75,6 +75,9 @@ typedef struct mtmd_input_chunk mtmd_input_chunk; + typedef struct mtmd_input_chunks mtmd_input_chunks; + typedef struct mtmd_input_text mtmd_input_text; + ++MTMD_API mtmd_input_text* mtmd_input_text_init(const char * text, bool add_special, bool parse_special); ++MTMD_API void mtmd_input_text_free(mtmd_input_text* input_text); ++ + struct mtmd_context_params { + bool use_gpu; + bool print_timings; diff --git a/llama/patches/0018-no-power-throttling-win32-with-gnuc.patch b/llama/patches/0018-no-power-throttling-win32-with-gnuc.patch new file mode 100644 index 000000000..14c50ca0c --- /dev/null +++ b/llama/patches/0018-no-power-throttling-win32-with-gnuc.patch @@ -0,0 +1,22 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Gabe Goodhart +Date: Fri, 11 Jul 2025 15:59:19 -0600 +Subject: [PATCH] no power throttling win32 with gnuc + +--- + ggml/src/ggml-cpu/ggml-cpu.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c +index a5689c18..85af19a3 100644 +--- a/ggml/src/ggml-cpu/ggml-cpu.c ++++ b/ggml/src/ggml-cpu/ggml-cpu.c +@@ -2412,7 +2412,7 @@ static bool ggml_thread_apply_priority(int32_t prio) { + // Newer Windows 11 versions aggresively park (offline) CPU cores and often place + // all our threads onto the first 4 cores which results in terrible performance with + // n_threads > 4 +- #if _WIN32_WINNT >= 0x0602 ++ #if (_WIN32_WINNT >= 0x0602) && !defined(__GNUC__) + THREAD_POWER_THROTTLING_STATE t; + ZeroMemory(&t, sizeof(t)); + t.Version = THREAD_POWER_THROTTLING_CURRENT_VERSION; diff --git a/llama/patches/0022-BF16-macos-version-guard.patch b/llama/patches/0019-BF16-macos-version-guard.patch similarity index 86% rename from llama/patches/0022-BF16-macos-version-guard.patch rename to llama/patches/0019-BF16-macos-version-guard.patch index 88e4f7cb0..6ebc33768 100644 --- a/llama/patches/0022-BF16-macos-version-guard.patch +++ b/llama/patches/0019-BF16-macos-version-guard.patch @@ -9,10 +9,10 @@ Only enable BF16 on supported MacOS versions (v14+) 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m -index 110c9ece..ab46f6e3 100644 +index fe7b2f0a..e4c31268 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m -@@ -89,7 +89,11 @@ static id ggml_backend_metal_device_acq(struct ggml_backend_metal_dev +@@ -106,7 +106,11 @@ static id ggml_backend_metal_device_acq(struct ggml_backend_metal_dev ctx->has_bfloat |= [ctx->mtl_device supportsFamily:MTLGPUFamilyApple6]; #if defined(GGML_METAL_USE_BF16) diff --git a/llama/patches/0019-metal-add-mean-kernel-14267.patch b/llama/patches/0019-metal-add-mean-kernel-14267.patch deleted file mode 100644 index e65aeb7b4..000000000 --- a/llama/patches/0019-metal-add-mean-kernel-14267.patch +++ /dev/null @@ -1,169 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Georgi Gerganov -Date: Thu, 19 Jun 2025 08:05:21 +0300 -Subject: [PATCH] metal : add mean kernel (#14267) - -* metal : add mean kernel - -ggml-ci - -* cont : dedup implementation - -ggml-ci ---- - ggml/src/ggml-metal/ggml-metal.m | 33 ++++++++++++++++--- - ggml/src/ggml-metal/ggml-metal.metal | 48 ++++++++++++++++++++++------ - 2 files changed, 67 insertions(+), 14 deletions(-) - -diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m -index a9eeebc6..110c9ece 100644 ---- a/ggml/src/ggml-metal/ggml-metal.m -+++ b/ggml/src/ggml-metal/ggml-metal.m -@@ -489,6 +489,7 @@ enum ggml_metal_kernel_type { - GGML_METAL_KERNEL_TYPE_COS, - GGML_METAL_KERNEL_TYPE_NEG, - GGML_METAL_KERNEL_TYPE_SUM_ROWS, -+ GGML_METAL_KERNEL_TYPE_MEAN, - GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, - GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32, - GGML_METAL_KERNEL_TYPE_ARGMAX, -@@ -1436,6 +1437,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_COS, cos, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NEG, neg, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true); -+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MEAN, mean, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGMAX, argmax, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, pool_2d_avg_f32, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32, pool_2d_max_f32, true); -@@ -1634,6 +1636,7 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex - case GGML_OP_LOG: - return false; // TODO: implement - case GGML_OP_SUM_ROWS: -+ case GGML_OP_MEAN: - case GGML_OP_SOFT_MAX: - case GGML_OP_GROUP_NORM: - return has_simdgroup_reduction && ggml_is_contiguous(op->src[0]); -@@ -2362,11 +2365,30 @@ static bool ggml_metal_encode_node( - [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; - } break; - case GGML_OP_SUM_ROWS: -+ case GGML_OP_MEAN: - { - GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); - -- id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUM_ROWS].pipeline; -+ id pipeline = nil; -+ -+ switch (dst->op) { -+ case GGML_OP_SUM_ROWS: -+ pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUM_ROWS].pipeline; -+ break; -+ case GGML_OP_MEAN: -+ pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MEAN].pipeline; -+ break; -+ default: -+ GGML_ABORT("fatal error"); -+ } -+ -+ int nth = 32; // SIMD width -+ -+ while (nth < ne00 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) { -+ nth *= 2; -+ } - -+ nth = MIN(nth, ne00); - - ggml_metal_kargs_sum_rows args = { - /*.ne00 =*/ ne00, -@@ -2396,11 +2418,12 @@ static bool ggml_metal_encode_node( - }; - - [encoder setComputePipelineState:pipeline]; -- [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; -- [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; -- [encoder setBytes:&args length:sizeof(args) atIndex:2]; -+ [encoder setBytes:&args length:sizeof(args) atIndex:0]; -+ [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; -+ [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; -+ [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0]; - -- [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; -+ [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; - } break; - case GGML_OP_SOFT_MAX: - { -diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal -index 9cfddf45..08e8d807 100644 ---- a/ggml/src/ggml-metal/ggml-metal.metal -+++ b/ggml/src/ggml-metal/ggml-metal.metal -@@ -956,31 +956,61 @@ kernel void kernel_neg( - dst[tpig] = -src0[tpig]; - } - -+template - kernel void kernel_sum_rows( -+ constant ggml_metal_kargs_sum_rows & args, - device const float * src0, - device float * dst, -- constant ggml_metal_kargs_sum_rows & args, -- uint3 tpig[[thread_position_in_grid]]) { -- int64_t i3 = tpig.z; -- int64_t i2 = tpig.y; -- int64_t i1 = tpig.x; -+ threadgroup float * shmem_f32 [[threadgroup(0)]], -+ uint3 tgpig[[threadgroup_position_in_grid]], -+ ushort3 tpitg[[thread_position_in_threadgroup]], -+ ushort sgitg[[simdgroup_index_in_threadgroup]], -+ ushort tiisg[[thread_index_in_simdgroup]], -+ ushort3 ntg[[threads_per_threadgroup]]) { -+ int64_t i3 = tgpig.z; -+ int64_t i2 = tgpig.y; -+ int64_t i1 = tgpig.x; - - if (i3 >= args.ne03 || i2 >= args.ne02 || i1 >= args.ne01) { - return; - } - -+ if (sgitg == 0) { -+ shmem_f32[tiisg] = 0.0f; -+ } -+ - device const float * src_row = (device const float *) ((device const char *) src0 + i1*args.nb01 + i2*args.nb02 + i3*args.nb03); - device float * dst_row = (device float *) ((device char *) dst + i1*args.nb1 + i2*args.nb2 + i3*args.nb3); - -- float row_sum = 0; -+ float sumf = 0; - -- for (int64_t i0 = 0; i0 < args.ne00; i0++) { -- row_sum += src_row[i0]; -+ for (int64_t i0 = tpitg.x; i0 < args.ne00; i0 += ntg.x) { -+ sumf += src_row[i0]; - } - -- dst_row[0] = row_sum; -+ sumf = simd_sum(sumf); -+ -+ threadgroup_barrier(mem_flags::mem_threadgroup); -+ -+ if (tiisg == 0) { -+ shmem_f32[sgitg] = sumf; -+ } -+ -+ threadgroup_barrier(mem_flags::mem_threadgroup); -+ -+ sumf = shmem_f32[tiisg]; -+ sumf = simd_sum(sumf); -+ -+ if (tpitg.x == 0) { -+ dst_row[0] = norm ? sumf / args.ne00 : sumf; -+ } - } - -+typedef decltype(kernel_sum_rows) kernel_sum_rows_t; -+ -+template [[host_name("kernel_sum_rows")]] kernel kernel_sum_rows_t kernel_sum_rows; -+template [[host_name("kernel_mean")]] kernel kernel_sum_rows_t kernel_sum_rows; -+ - template - kernel void kernel_soft_max( - device const char * src0, diff --git a/llama/patches/0020-CUDA-add-mean-operation-14313.patch b/llama/patches/0020-CUDA-add-mean-operation-14313.patch deleted file mode 100644 index 2f4e37949..000000000 --- a/llama/patches/0020-CUDA-add-mean-operation-14313.patch +++ /dev/null @@ -1,5089 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Aman Gupta -Date: Sun, 22 Jun 2025 12:39:54 +0800 -Subject: [PATCH] CUDA: add mean operation (#14313) - -* CUDA: add mean operation - -* add back sum_rows_f32_cuda - -* Review: early exit if col!=0 ---- - ggml/src/ggml-cuda/common.cuh | 20 + - ggml/src/ggml-cuda/ggml-cuda.cu | 5 + - ggml/src/ggml-cuda/mean.cu | 19 + - ggml/src/ggml-cuda/mean.cuh | 3 + - ggml/src/ggml-cuda/sumrows.cu | 23 +- - ggml/src/ggml-cuda/sumrows.cuh | 1 - - tests/test-backend-ops.cpp | 2990 ++++++++++++++++--------------- - 7 files changed, 1554 insertions(+), 1507 deletions(-) - create mode 100644 ggml/src/ggml-cuda/mean.cu - create mode 100644 ggml/src/ggml-cuda/mean.cuh - -diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh -index 64fb4ff4..5b9a0fe3 100644 ---- a/ggml/src/ggml-cuda/common.cuh -+++ b/ggml/src/ggml-cuda/common.cuh -@@ -362,6 +362,26 @@ static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) { - #endif // FP16_AVAILABLE - } - -+// Row reduction kernel template - compute sum (norm=false) or mean (norm=true) -+template -+static __global__ void reduce_rows_f32(const float * x, float * dst, const int ncols) { -+ const int row = blockIdx.x; -+ const int col = threadIdx.x; -+ -+ float sum = 0.0f; -+ for (int i = col; i < ncols; i += blockDim.x) { -+ sum += x[row * ncols + i]; -+ } -+ -+ sum = warp_reduce_sum(sum); -+ -+ if (col != 0) { -+ return; -+ } -+ -+ dst[row] = norm ? sum / ncols : sum; -+} -+ - template - static __device__ __forceinline__ float warp_reduce_max(float x) { - #pragma unroll -diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu -index d6960174..2b9fabf4 100644 ---- a/ggml/src/ggml-cuda/ggml-cuda.cu -+++ b/ggml/src/ggml-cuda/ggml-cuda.cu -@@ -35,6 +35,7 @@ - #include "ggml-cuda/ssm-scan.cuh" - #include "ggml-cuda/sum.cuh" - #include "ggml-cuda/sumrows.cuh" -+#include "ggml-cuda/mean.cuh" - #include "ggml-cuda/tsembd.cuh" - #include "ggml-cuda/unary.cuh" - #include "ggml-cuda/upscale.cuh" -@@ -2322,6 +2323,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg - case GGML_OP_SUM_ROWS: - ggml_cuda_op_sum_rows(ctx, dst); - break; -+ case GGML_OP_MEAN: -+ ggml_cuda_op_mean(ctx, dst); -+ break; - case GGML_OP_SSM_CONV: - ggml_cuda_op_ssm_conv(ctx, dst); - break; -@@ -3211,6 +3215,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g - case GGML_OP_POOL_2D: - case GGML_OP_SUM: - case GGML_OP_SUM_ROWS: -+ case GGML_OP_MEAN: - case GGML_OP_ARGSORT: - case GGML_OP_ACC: - return true; -diff --git a/ggml/src/ggml-cuda/mean.cu b/ggml/src/ggml-cuda/mean.cu -new file mode 100644 -index 00000000..4b238a39 ---- /dev/null -+++ b/ggml/src/ggml-cuda/mean.cu -@@ -0,0 +1,19 @@ -+#include "mean.cuh" -+ -+void ggml_cuda_op_mean(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { -+ const ggml_tensor * src0 = dst->src[0]; -+ const float * src0_d = (const float *) src0->data; -+ float * dst_d = (float *) dst->data; -+ cudaStream_t stream = ctx.stream(); -+ -+ GGML_ASSERT(src0->type == GGML_TYPE_F32); -+ GGML_ASSERT(dst->type == GGML_TYPE_F32); -+ GGML_ASSERT(ggml_is_contiguous(src0)); -+ -+ const int64_t ncols = src0->ne[0]; -+ const int64_t nrows = ggml_nrows(src0); -+ -+ const dim3 block_dims(WARP_SIZE, 1, 1); -+ const dim3 block_nums(nrows, 1, 1); -+ reduce_rows_f32<<>>(src0_d, dst_d, ncols); -+} -diff --git a/ggml/src/ggml-cuda/mean.cuh b/ggml/src/ggml-cuda/mean.cuh -new file mode 100644 -index 00000000..2b9b1043 ---- /dev/null -+++ b/ggml/src/ggml-cuda/mean.cuh -@@ -0,0 +1,3 @@ -+#include "common.cuh" -+ -+void ggml_cuda_op_mean(ggml_backend_cuda_context & ctx, ggml_tensor * dst); -diff --git a/ggml/src/ggml-cuda/sumrows.cu b/ggml/src/ggml-cuda/sumrows.cu -index 38dbf1b5..2eee08fa 100644 ---- a/ggml/src/ggml-cuda/sumrows.cu -+++ b/ggml/src/ggml-cuda/sumrows.cu -@@ -1,25 +1,9 @@ - #include "sumrows.cuh" - --static __global__ void k_sum_rows_f32(const float * x, float * dst, const int ncols) { -- const int row = blockIdx.x; -- const int col = threadIdx.x; -- -- float sum = 0.0f; -- for (int i = col; i < ncols; i += blockDim.x) { -- sum += x[row * ncols + i]; -- } -- -- sum = warp_reduce_sum(sum); -- -- if (col == 0) { -- dst[row] = sum; -- } --} -- - void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream) { - const dim3 block_dims(WARP_SIZE, 1, 1); - const dim3 block_nums(nrows, 1, 1); -- k_sum_rows_f32<<>>(x, dst, ncols); -+ reduce_rows_f32<<>>(x, dst, ncols); - } - - void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { -@@ -35,5 +19,8 @@ void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - const int64_t ncols = src0->ne[0]; - const int64_t nrows = ggml_nrows(src0); - -- sum_rows_f32_cuda(src0_d, dst_d, ncols, nrows, stream); -+ const dim3 block_dims(WARP_SIZE, 1, 1); -+ const dim3 block_nums(nrows, 1, 1); -+ -+ reduce_rows_f32<<>>(src0_d, dst_d, ncols); - } -diff --git a/ggml/src/ggml-cuda/sumrows.cuh b/ggml/src/ggml-cuda/sumrows.cuh -index 191db1c1..3431c599 100644 ---- a/ggml/src/ggml-cuda/sumrows.cuh -+++ b/ggml/src/ggml-cuda/sumrows.cuh -@@ -1,5 +1,4 @@ - #include "common.cuh" - - void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream); -- - void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst); -diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp -index 543db934..58bdc874 100644 ---- a/tests/test-backend-ops.cpp -+++ b/tests/test-backend-ops.cpp -@@ -9,16 +9,14 @@ - // Quick start for adding a new GGML op: Go to section 2 and create a struct that inherits from test_case, - // then go to section 3 and add an instantiation of your struct. - -- - // ############################## - // ## Section 1: General Setup ## - // ############################## - -- --#include - #include - #include - #include -+#include - - #include - #include -@@ -37,24 +35,26 @@ - #include - - static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float max = 1.0f) { -- size_t nels = ggml_nelements(tensor); -+ size_t nels = ggml_nelements(tensor); - std::vector data(nels); - { - // parallel initialization -- static const size_t n_threads = std::thread::hardware_concurrency(); -+ static const size_t n_threads = std::thread::hardware_concurrency(); - // static RNG initialization (revisit if n_threads stops being constant) - static std::vector generators = []() { -- std::random_device rd; -+ std::random_device rd; - std::vector vec; - vec.reserve(n_threads); - //for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(1234 + i); } // fixed seed -- for (size_t i = 0; i < n_threads; i++) { vec.emplace_back(rd()); } -+ for (size_t i = 0; i < n_threads; i++) { -+ vec.emplace_back(rd()); -+ } - return vec; - }(); - - auto init_thread = [&](size_t ith, size_t start, size_t end) { - std::uniform_real_distribution distribution(min, max); -- auto & gen = generators[ith]; -+ auto & gen = generators[ith]; - for (size_t i = start; i < end; i++) { - data[i] = distribution(gen); - } -@@ -63,8 +63,8 @@ static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float m - std::vector> tasks; - tasks.reserve(n_threads); - for (size_t i = 0; i < n_threads; i++) { -- size_t start = i*nels/n_threads; -- size_t end = (i+1)*nels/n_threads; -+ size_t start = i * nels / n_threads; -+ size_t end = (i + 1) * nels / n_threads; - tasks.push_back(std::async(std::launch::async, init_thread, i, start, end)); - } - for (auto & t : tasks) { -@@ -77,13 +77,13 @@ static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float m - } else if (ggml_is_quantized(tensor->type) || tensor->type == GGML_TYPE_F16 || tensor->type == GGML_TYPE_BF16) { - GGML_ASSERT(nels % ggml_blck_size(tensor->type) == 0); - -- // dummy importance matrix -+ // dummy importance matrix - std::vector imatrix(tensor->ne[0], 1.0f); -- const float * im = imatrix.data(); -+ const float * im = imatrix.data(); - if (!ggml_quantize_requires_imatrix(tensor->type)) { - // when the imatrix is optional, we want to test both quantization with and without imatrix - // use one of the random numbers to decide -- if (data[0] > 0.5f*(min + max)) { -+ if (data[0] > 0.5f * (min + max)) { - im = nullptr; - } - } -@@ -92,21 +92,21 @@ static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float m - { - // parallel quantization by block - size_t blck_size = ggml_blck_size(tensor->type); -- size_t n_blocks = nels / blck_size; -+ size_t n_blocks = nels / blck_size; - - auto quantize_thread = [&](size_t start, size_t end) { -- ggml_quantize_chunk(tensor->type, data.data(), dataq.data(), -- start * blck_size, end - start, blck_size, im); -+ ggml_quantize_chunk(tensor->type, data.data(), dataq.data(), start * blck_size, end - start, blck_size, -+ im); - }; - -- const size_t min_blocks_per_thread = 1; -- const size_t n_threads = std::min(std::thread::hardware_concurrency()/2, -- std::max(1, n_blocks / min_blocks_per_thread)); -+ const size_t min_blocks_per_thread = 1; -+ const size_t n_threads = std::min(std::thread::hardware_concurrency() / 2, -+ std::max(1, n_blocks / min_blocks_per_thread)); - std::vector> tasks; - tasks.reserve(n_threads); - for (size_t i = 0; i < n_threads; i++) { -- size_t start = i*n_blocks/n_threads; -- size_t end = (i+1)*n_blocks/n_threads; -+ size_t start = i * n_blocks / n_threads; -+ size_t end = (i + 1) * n_blocks / n_threads; - tasks.push_back(std::async(std::launch::async, quantize_thread, start, end)); - } - for (auto & t : tasks) { -@@ -119,9 +119,9 @@ static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float m - ggml_backend_tensor_set(tensor, data.data(), 0, ggml_nbytes(tensor)); - } else if (tensor->type == GGML_TYPE_I64) { - // Integers with a size of 8 bytes can be set by mirroring the float data, the specific values are again not really meaningful. -- const size_t nbytes_half = ggml_nbytes(tensor)/2; -- ggml_backend_tensor_set(tensor, data.data(), 0*nbytes_half, nbytes_half); -- ggml_backend_tensor_set(tensor, data.data(), 1*nbytes_half, nbytes_half); -+ const size_t nbytes_half = ggml_nbytes(tensor) / 2; -+ ggml_backend_tensor_set(tensor, data.data(), 0 * nbytes_half, nbytes_half); -+ ggml_backend_tensor_set(tensor, data.data(), 1 * nbytes_half, nbytes_half); - } else { - GGML_ABORT("fatal error"); - } -@@ -134,31 +134,31 @@ static std::vector tensor_to_float(const ggml_tensor * t) { - std::vector buf(ggml_nbytes(t)); - ggml_backend_tensor_get(t, buf.data(), 0, ggml_nbytes(t)); - -- const auto * tt = ggml_get_type_traits(t->type); -- size_t bs = ggml_blck_size(t->type); -+ const auto * tt = ggml_get_type_traits(t->type); -+ size_t bs = ggml_blck_size(t->type); - std::vector vq(ggml_blck_size(t->type)); -- bool quantized = ggml_is_quantized(t->type); -+ bool quantized = ggml_is_quantized(t->type); - - // access elements by index to avoid gaps in views - for (int64_t i3 = 0; i3 < t->ne[3]; i3++) { - for (int64_t i2 = 0; i2 < t->ne[2]; i2++) { - for (int64_t i1 = 0; i1 < t->ne[1]; i1++) { - for (int64_t i0 = 0; i0 < t->ne[0]; i0 += bs) { -- size_t i = i3*t->nb[3] + i2*t->nb[2] + i1*t->nb[1] + i0/bs*t->nb[0]; -+ size_t i = i3 * t->nb[3] + i2 * t->nb[2] + i1 * t->nb[1] + i0 / bs * t->nb[0]; - if (t->type == GGML_TYPE_F16) { -- tv.push_back(ggml_fp16_to_fp32(*(ggml_fp16_t*)&buf[i])); -+ tv.push_back(ggml_fp16_to_fp32(*(ggml_fp16_t *) &buf[i])); - } else if (t->type == GGML_TYPE_BF16) { -- tv.push_back(ggml_bf16_to_fp32(*(ggml_bf16_t*)&buf[i])); -+ tv.push_back(ggml_bf16_to_fp32(*(ggml_bf16_t *) &buf[i])); - } else if (t->type == GGML_TYPE_F32) { - tv.push_back(*(float *) &buf[i]); - } else if (t->type == GGML_TYPE_I64) { -- tv.push_back((float)*(int64_t *) &buf[i]); -+ tv.push_back((float) *(int64_t *) &buf[i]); - } else if (t->type == GGML_TYPE_I32) { -- tv.push_back((float)*(int32_t *) &buf[i]); -+ tv.push_back((float) *(int32_t *) &buf[i]); - } else if (t->type == GGML_TYPE_I16) { -- tv.push_back((float)*(int16_t *) &buf[i]); -+ tv.push_back((float) *(int16_t *) &buf[i]); - } else if (t->type == GGML_TYPE_I8) { -- tv.push_back((float)*(int8_t *) &buf[i]); -+ tv.push_back((float) *(int8_t *) &buf[i]); - } else if (quantized) { - tt->to_float(&buf[i], vq.data(), bs); - tv.insert(tv.end(), vq.begin(), vq.end()); -@@ -195,7 +195,8 @@ static double nmse(const float * a, const float * b, size_t n) { - // n: number of values to compare. - // expected_vals: optional vector of expected values for a. If expected_vals is not empty, filter out all comparisons where - // a does not match any of the expected values. Needed for noncontinuous gradients where the numerical calculation can fail. --static double mean_abs_asymm(const float * a, const float * b, const size_t n, const std::vector & expected_vals) { -+static double mean_abs_asymm(const float * a, const float * b, const size_t n, -+ const std::vector & expected_vals) { - double sum = 0.0f; - - size_t nvalid = 0; -@@ -219,18 +220,16 @@ static double mean_abs_asymm(const float * a, const float * b, const size_t n, c - nvalid++; - } - -- return sum/nvalid; -+ return sum / nvalid; - } - - // utils for printing the variables of the test cases - --template --static std::string var_to_str(const T & x) { -+template static std::string var_to_str(const T & x) { - return std::to_string(x); - } - --template --static std::string var_to_str(const T (&x)[N]) { -+template static std::string var_to_str(const T (&x)[N]) { - std::string s = "["; - for (size_t i = 0; i < N; i++) { - if (i > 0) { -@@ -242,8 +241,7 @@ static std::string var_to_str(const T (&x)[N]) { - return s; - } - --template --static std::string var_to_str(const std::array & x) { -+template static std::string var_to_str(const std::array & x) { - std::string s = "["; - for (size_t i = 0; i < N; i++) { - if (i > 0) { -@@ -265,41 +263,50 @@ static std::string var_to_str(ggml_prec prec) { - - static std::string var_to_str(ggml_op_pool pool) { - switch (pool) { -- case GGML_OP_POOL_AVG: return "avg"; -- case GGML_OP_POOL_MAX: return "max"; -- default: return std::to_string(pool); -+ case GGML_OP_POOL_AVG: -+ return "avg"; -+ case GGML_OP_POOL_MAX: -+ return "max"; -+ default: -+ return std::to_string(pool); - } - } - - static std::string var_to_str(ggml_scale_mode mode) { - switch (mode) { -- case GGML_SCALE_MODE_NEAREST: return "nearest"; -- case GGML_SCALE_MODE_BILINEAR: return "bilinear"; -- default: return std::to_string(mode); -+ case GGML_SCALE_MODE_NEAREST: -+ return "nearest"; -+ case GGML_SCALE_MODE_BILINEAR: -+ return "bilinear"; -+ default: -+ return std::to_string(mode); - } - } - - #define VAR_TO_STR(x) (#x "=" + var_to_str(x)) - --#define VARS_TO_STR1(a) VAR_TO_STR(a) --#define VARS_TO_STR2(a, b) VAR_TO_STR(a) + "," + VAR_TO_STR(b) --#define VARS_TO_STR3(a, b, c) VAR_TO_STR(a) + "," + VARS_TO_STR2(b, c) --#define VARS_TO_STR4(a, b, c, d) VAR_TO_STR(a) + "," + VARS_TO_STR3(b, c, d) --#define VARS_TO_STR5(a, b, c, d, e) VAR_TO_STR(a) + "," + VARS_TO_STR4(b, c, d, e) --#define VARS_TO_STR6(a, b, c, d, e, f) VAR_TO_STR(a) + "," + VARS_TO_STR5(b, c, d, e, f) --#define VARS_TO_STR7(a, b, c, d, e, f, g) VAR_TO_STR(a) + "," + VARS_TO_STR6(b, c, d, e, f, g) --#define VARS_TO_STR8(a, b, c, d, e, f, g, h) VAR_TO_STR(a) + "," + VARS_TO_STR7(b, c, d, e, f, g, h) --#define VARS_TO_STR9(a, b, c, d, e, f, g, h, i) VAR_TO_STR(a) + "," + VARS_TO_STR8(b, c, d, e, f, g, h, i) --#define VARS_TO_STR10(a, b, c, d, e, f, g, h, i, j) VAR_TO_STR(a) + "," + VARS_TO_STR9(b, c, d, e, f, g, h, i, j) -+#define VARS_TO_STR1(a) VAR_TO_STR(a) -+#define VARS_TO_STR2(a, b) VAR_TO_STR(a) + "," + VAR_TO_STR(b) -+#define VARS_TO_STR3(a, b, c) VAR_TO_STR(a) + "," + VARS_TO_STR2(b, c) -+#define VARS_TO_STR4(a, b, c, d) VAR_TO_STR(a) + "," + VARS_TO_STR3(b, c, d) -+#define VARS_TO_STR5(a, b, c, d, e) VAR_TO_STR(a) + "," + VARS_TO_STR4(b, c, d, e) -+#define VARS_TO_STR6(a, b, c, d, e, f) VAR_TO_STR(a) + "," + VARS_TO_STR5(b, c, d, e, f) -+#define VARS_TO_STR7(a, b, c, d, e, f, g) VAR_TO_STR(a) + "," + VARS_TO_STR6(b, c, d, e, f, g) -+#define VARS_TO_STR8(a, b, c, d, e, f, g, h) VAR_TO_STR(a) + "," + VARS_TO_STR7(b, c, d, e, f, g, h) -+#define VARS_TO_STR9(a, b, c, d, e, f, g, h, i) VAR_TO_STR(a) + "," + VARS_TO_STR8(b, c, d, e, f, g, h, i) -+#define VARS_TO_STR10(a, b, c, d, e, f, g, h, i, j) VAR_TO_STR(a) + "," + VARS_TO_STR9(b, c, d, e, f, g, h, i, j) - #define VARS_TO_STR11(a, b, c, d, e, f, g, h, i, j, k) VAR_TO_STR(a) + "," + VARS_TO_STR10(b, c, d, e, f, g, h, i, j, k) --#define VARS_TO_STR12(a, b, c, d, e, f, g, h, i, j, k, l) VAR_TO_STR(a) + "," + VARS_TO_STR11(b, c, d, e, f, g, h, i, j, k, l) -+#define VARS_TO_STR12(a, b, c, d, e, f, g, h, i, j, k, l) \ -+ VAR_TO_STR(a) + "," + VARS_TO_STR11(b, c, d, e, f, g, h, i, j, k, l) - - #ifdef GGML_USE_SYCL - static bool inline _isinf(float f) { -- return (*(uint32_t *)&f & 0x7fffffff) == 0x7f800000; -+ return (*(uint32_t *) &f & 0x7fffffff) == 0x7f800000; - } - #else --static bool inline _isinf(float f) { return std::isinf(f); } -+static bool inline _isinf(float f) { -+ return std::isinf(f); -+} - #endif - - // accept FLT_MAX as infinity -@@ -320,45 +327,29 @@ enum test_mode { - struct test_case { - virtual ~test_case() {} - -- virtual std::string op_desc(ggml_tensor * t) { -- return ggml_op_desc(t); -- } -+ virtual std::string op_desc(ggml_tensor * t) { return ggml_op_desc(t); } - -- virtual std::string vars() { -- return ""; -- } -+ virtual std::string vars() { return ""; } - - virtual ggml_tensor * build_graph(ggml_context * ctx) = 0; - -- virtual double max_nmse_err() { -- return 1e-7; -- } -+ virtual double max_nmse_err() { return 1e-7; } - -- virtual double max_maa_err() { -- return 1e-4; -- } -+ virtual double max_maa_err() { return 1e-4; } - -- virtual float grad_eps() { -- return 1e-1f; -- } -+ virtual float grad_eps() { return 1e-1f; } - - // If false, estimate gradient with 2 points, neglects 3rd order derivative and higher. - // If true, estimate gradient with 4 points, neglects 5th order derivative and higher. -- virtual bool grad_precise() { -- return false; -- } -+ virtual bool grad_precise() { return false; } - - // Skip gradient checks if total number of gradients to be checked is larger than this (to speed up the tests). -- virtual int64_t grad_nmax() { -- return 10000; -- } -+ virtual int64_t grad_nmax() { return 10000; } - - // No effect if empty. - // If not empty, skip all gradient checks where the numerical result does not match any of the values. - // Needed for dealing with noncontinuous gradients (e.g. ReLU) where estimation using finite differences is unreliable. -- virtual std::vector grad_expect() { -- return {}; -- } -+ virtual std::vector grad_expect() { return {}; } - - virtual void initialize_tensors(ggml_context * ctx) { - for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) { -@@ -426,7 +417,8 @@ struct test_case { - return t; - } - -- ggml_tensor * ggml_new_tensor_4d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) { -+ ggml_tensor * ggml_new_tensor_4d(ggml_context * ctx, ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, -+ int64_t ne3) { - ggml_tensor * t = ::ggml_new_tensor_4d(ctx, type, ne0, ne1, ne2, ne3); - add_sentinel(ctx); - return t; -@@ -436,7 +428,7 @@ struct test_case { - mode = MODE_TEST; - - ggml_init_params params = { -- /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead(), -+ /* .mem_size = */ ggml_tensor_overhead() * 128 + ggml_graph_overhead(), - /* .mem_base = */ NULL, - /* .no_alloc = */ true, - }; -@@ -461,7 +453,7 @@ struct test_case { - - // check if the backends support the ops - bool supported = true; -- for (ggml_backend_t backend : {backend1, backend2}) { -+ for (ggml_backend_t backend : { backend1, backend2 }) { - for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { - if (!ggml_backend_supports_op(backend, t)) { - printf("not supported [%s] ", ggml_backend_name(backend)); -@@ -501,23 +493,18 @@ struct test_case { - - // compare - struct callback_userdata { -- bool ok; -- double max_err; -+ bool ok; -+ double max_err; - ggml_backend_t backend1; - ggml_backend_t backend2; - }; - -- callback_userdata ud { -- true, -- max_nmse_err(), -- backend1, -- backend2 -- }; -+ callback_userdata ud{ true, max_nmse_err(), backend1, backend2 }; - - auto callback = [](int index, ggml_tensor * t1, ggml_tensor * t2, void * user_data) -> bool { -- callback_userdata * ud = (callback_userdata *) user_data; -- const char * bn1 = ggml_backend_name(ud->backend1); -- const char * bn2 = ggml_backend_name(ud->backend2); -+ callback_userdata * ud = (callback_userdata *) user_data; -+ const char * bn1 = ggml_backend_name(ud->backend1); -+ const char * bn2 = ggml_backend_name(ud->backend2); - - if (t1->op == GGML_OP_NONE) { - // sentinels must be unchanged -@@ -599,11 +586,11 @@ struct test_case { - static const size_t graph_nodes = 8192; - - ggml_init_params params = { -- /* .mem_size = */ ggml_tensor_overhead()*128 + ggml_graph_overhead_custom(graph_nodes, false), -+ /* .mem_size = */ ggml_tensor_overhead() * 128 + ggml_graph_overhead_custom(graph_nodes, false), - /* .mem_base = */ NULL, - /* .no_alloc = */ true, - }; -- ggml_context_ptr ctx(ggml_init(params)); // smart ptr -+ ggml_context_ptr ctx(ggml_init(params)); // smart ptr - GGML_ASSERT(ctx); - - ggml_tensor * out = build_graph(ctx.get()); -@@ -624,14 +611,14 @@ struct test_case { - - // align while also leaving some margin for variations in parameters - int align = 8; -- int last = (len + align - 1) / align * align; -+ int last = (len + align - 1) / align * align; - if (last - len < 5) { - last += align; - } - printf("%*s", last - len, ""); - - // allocate -- ggml_backend_buffer_ptr buf(ggml_backend_alloc_ctx_tensors(ctx.get(), backend)); // smart ptr -+ ggml_backend_buffer_ptr buf(ggml_backend_alloc_ctx_tensors(ctx.get(), backend)); // smart ptr - - if (buf == NULL) { - printf("failed to allocate tensors\n"); -@@ -648,26 +635,27 @@ struct test_case { - // warmup run - ggml_status status = ggml_backend_graph_compute(backend, gf); - if (status != GGML_STATUS_SUCCESS) { -- fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status)); -+ fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, -+ ggml_status_to_string(status)); - return false; - } - - // determine number of runs -- int n_runs; -+ int n_runs; - bool is_cpu = ggml_backend_dev_type(ggml_backend_get_device(backend)) == GGML_BACKEND_DEVICE_TYPE_CPU; - if (op_flops(out) > 0) { - // based on flops -- const uint64_t GFLOP = 1000 * 1000 * 1000; -- const uint64_t target_flops_cpu = 8ULL * GFLOP; -+ const uint64_t GFLOP = 1000 * 1000 * 1000; -+ const uint64_t target_flops_cpu = 8ULL * GFLOP; - const uint64_t target_flops_gpu = 100ULL * GFLOP; -- uint64_t target_flops = is_cpu ? target_flops_cpu : target_flops_gpu; -+ uint64_t target_flops = is_cpu ? target_flops_cpu : target_flops_gpu; - n_runs = std::min(ggml_graph_size(gf) - ggml_graph_n_nodes(gf), target_flops / op_flops(out)) + 1; - } else { - // based on memory size -- const size_t GB = 1ULL << 30; -- const size_t target_size_cpu = 8 * GB; -+ const size_t GB = 1ULL << 30; -+ const size_t target_size_cpu = 8 * GB; - const size_t target_size_gpu = 32 * GB; -- size_t target_size = is_cpu ? target_size_cpu : target_size_gpu; -+ size_t target_size = is_cpu ? target_size_cpu : target_size_gpu; - n_runs = std::min(ggml_graph_size(gf) - ggml_graph_n_nodes(gf), target_size / op_size(out)) + 1; - } - -@@ -677,8 +665,8 @@ struct test_case { - } - - // calculate memory -- size_t mem = n_runs * op_size(out); -- auto tensor_op_size = [](ggml_tensor * t) { -+ size_t mem = n_runs * op_size(out); -+ auto tensor_op_size = [](ggml_tensor * t) { - size_t size = ggml_nbytes(t); - // add source tensors - for (int i = 0; i < GGML_MAX_SRC; i++) { -@@ -697,13 +685,14 @@ struct test_case { - - // run - int64_t total_time_us = 0; -- int64_t total_mem = 0; -- int total_runs = 0; -+ int64_t total_mem = 0; -+ int total_runs = 0; - do { -- int64_t start_time = ggml_time_us(); -- ggml_status status = ggml_backend_graph_compute(backend, gf); -+ int64_t start_time = ggml_time_us(); -+ ggml_status status = ggml_backend_graph_compute(backend, gf); - if (status != GGML_STATUS_SUCCESS) { -- fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status)); -+ fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, -+ ggml_status_to_string(status)); - return false; - } - int64_t end_time = ggml_time_us(); -@@ -711,15 +700,13 @@ struct test_case { - total_time_us += end_time - start_time; - total_mem += mem; - total_runs += n_runs; -- } while (total_time_us < 1000*1000); // run for at least 1 second -+ } while (total_time_us < 1000 * 1000); // run for at least 1 second - -- printf(" %8d runs - %8.2f us/run - ", -- total_runs, -- (double)total_time_us / total_runs); -+ printf(" %8d runs - %8.2f us/run - ", total_runs, (double) total_time_us / total_runs); - - if (op_flops(out) > 0) { - double flops_per_sec = (op_flops(out) * total_runs) / (total_time_us / 1e6); -- auto format_flops = [](double flops) -> std::string { -+ auto format_flops = [](double flops) -> std::string { - char buf[256]; - if (flops >= 1e12) { - snprintf(buf, sizeof(buf), "%6.2f TFLOP", flops / 1e12); -@@ -732,14 +719,12 @@ struct test_case { - } - return buf; - }; -- printf("%s/run - \033[1;34m%sS\033[0m", -- format_flops(op_flops(out)).c_str(), -- format_flops(flops_per_sec).c_str()); -+ printf("%s/run - \033[1;34m%sS\033[0m", format_flops(op_flops(out)).c_str(), -+ format_flops(flops_per_sec).c_str()); - - } else { -- printf("%8zu kB/run - \033[1;34m%7.2f GB/s\033[0m", -- op_size(out) / 1024, -- total_mem / (total_time_us / 1e6) / 1024.0 / 1024.0 / 1024.0); -+ printf("%8zu kB/run - \033[1;34m%7.2f GB/s\033[0m", op_size(out) / 1024, -+ total_mem / (total_time_us / 1e6) / 1024.0 / 1024.0 / 1024.0); - } - printf("\n"); - -@@ -747,15 +732,16 @@ struct test_case { - } - - bool eval_grad(ggml_backend_t backend, const char * op_name) { -- mode = MODE_GRAD; -+ mode = MODE_GRAD; - const std::vector expect = grad_expect(); - - ggml_init_params params = { -- /* .mem_size = */ ggml_tensor_overhead()*128 + 2*ggml_graph_overhead_custom(GGML_DEFAULT_GRAPH_SIZE, true), -+ /* .mem_size = */ ggml_tensor_overhead() * 128 + -+ 2 * ggml_graph_overhead_custom(GGML_DEFAULT_GRAPH_SIZE, true), - /* .mem_base = */ NULL, - /* .no_alloc = */ true, - }; -- ggml_context_ptr ctx(ggml_init(params)); // smart ptr -+ ggml_context_ptr ctx(ggml_init(params)); // smart ptr - GGML_ASSERT(ctx); - - gf = ggml_new_graph_custom(ctx.get(), GGML_DEFAULT_GRAPH_SIZE, true); -@@ -777,7 +763,7 @@ struct test_case { - } - - // check if the backend supports the ops -- bool supported = true; -+ bool supported = true; - bool any_params = false; - for (ggml_tensor * t = ggml_get_first_tensor(ctx.get()); t != NULL; t = ggml_get_next_tensor(ctx.get(), t)) { - if (!ggml_backend_supports_op(backend, t)) { -@@ -814,7 +800,6 @@ struct test_case { - return true; - } - -- - if (!ggml_is_scalar(out)) { - out = ggml_sum(ctx.get(), out); - ggml_set_name(out, "sum_of_out"); -@@ -826,7 +811,8 @@ struct test_case { - ggml_build_backward_expand(ctx.get(), gb, nullptr); - if (expect.size() != 1 || expect[0] != 0.0f) { - GGML_ASSERT(ggml_graph_n_nodes(gb) > ggml_graph_n_nodes(gf)); -- for (ggml_tensor * t = ggml_get_first_tensor(ctx.get()); t != NULL; t = ggml_get_next_tensor(ctx.get(), t)) { -+ for (ggml_tensor * t = ggml_get_first_tensor(ctx.get()); t != NULL; -+ t = ggml_get_next_tensor(ctx.get(), t)) { - GGML_ASSERT(!(t->flags & GGML_TENSOR_FLAG_PARAM) || ggml_graph_get_grad(gb, t)->op != GGML_OP_NONE); - } - } -@@ -849,44 +835,47 @@ struct test_case { - } - - // allocate -- ggml_backend_buffer_ptr buf(ggml_backend_alloc_ctx_tensors(ctx.get(), backend)); // smart ptr -+ ggml_backend_buffer_ptr buf(ggml_backend_alloc_ctx_tensors(ctx.get(), backend)); // smart ptr - if (buf == NULL) { - printf("failed to allocate tensors [%s] ", ggml_backend_name(backend)); - return false; - } - -- initialize_tensors(ctx.get()); // Randomizes all tensors (including gradients). -- ggml_graph_reset(gb); // Sets gradients to 1 if loss, 0 otherwise. -+ initialize_tensors(ctx.get()); // Randomizes all tensors (including gradients). -+ ggml_graph_reset(gb); // Sets gradients to 1 if loss, 0 otherwise. - - ggml_status status = ggml_backend_graph_compute(backend, gf); - if (status != GGML_STATUS_SUCCESS) { -- fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status)); -+ fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, -+ ggml_status_to_string(status)); - return false; - } - status = ggml_backend_graph_compute(backend, gb); - if (status != GGML_STATUS_SUCCESS) { -- fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status)); -+ fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, -+ ggml_status_to_string(status)); - return false; - } - - bool ok = true; -- for (struct ggml_tensor * t = ggml_get_first_tensor(ctx.get()); t != nullptr; t = ggml_get_next_tensor(ctx.get(), t)) { -+ for (struct ggml_tensor * t = ggml_get_first_tensor(ctx.get()); t != nullptr; -+ t = ggml_get_next_tensor(ctx.get(), t)) { - if (!(t->flags & GGML_TENSOR_FLAG_PARAM)) { - continue; - } - -- const char * bn = ggml_backend_name(backend); -+ const char * bn = ggml_backend_name(backend); - const int64_t ne = ggml_nelements(t); - -- std::vector ga; -+ std::vector ga; - struct ggml_tensor * grad = ggml_graph_get_grad(gb, t); - if (grad) { - ga = tensor_to_float(grad); - } else { -- ga.resize(ne); // default value is 0.0f -+ ga.resize(ne); // default value is 0.0f - } - -- for (int64_t i = 0; i < ne; ++i) { // gradient algebraic -+ for (int64_t i = 0; i < ne; ++i) { // gradient algebraic - // check for nans - if (!std::isfinite(ga[i])) { - printf("[%s] nonfinite gradient at index %" PRId64 " (%s=%f) ", ggml_op_desc(t), i, bn, ga[i]); -@@ -898,58 +887,63 @@ struct test_case { - break; - } - -- std::vector gn(ne); // gradient numeric -+ std::vector gn(ne); // gradient numeric - GGML_ASSERT(ga.size() == gn.size()); - -- std::vector x0 = tensor_to_float(t); // original t data -+ std::vector x0 = tensor_to_float(t); // original t data - GGML_ASSERT(ggml_is_scalar(out)); - GGML_ASSERT(out->type == GGML_TYPE_F32); - - const float eps = grad_eps(); - for (int64_t i = 0; i < ne; ++i) { -- const float xiu = x0[i] + 1.0f*eps; // x, index i, up -- const float xiuh = x0[i] + 0.5f*eps; // x, index i, up half -- const float xidh = x0[i] - 0.5f*eps; // x, index i, down half -- const float xid = x0[i] - 1.0f*eps; // x, index i, down -+ const float xiu = x0[i] + 1.0f * eps; // x, index i, up -+ const float xiuh = x0[i] + 0.5f * eps; // x, index i, up half -+ const float xidh = x0[i] - 0.5f * eps; // x, index i, down half -+ const float xid = x0[i] - 1.0f * eps; // x, index i, down - -- float fu, fuh, fdh, fd; // output values for xiu, xiuh, xid, xidh -+ float fu, fuh, fdh, fd; // output values for xiu, xiuh, xid, xidh - -- ggml_backend_tensor_set(t, &xiu, i*sizeof(float), sizeof(float)); -+ ggml_backend_tensor_set(t, &xiu, i * sizeof(float), sizeof(float)); - status = ggml_backend_graph_compute(backend, gf); - if (status != GGML_STATUS_SUCCESS) { -- fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status)); -+ fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, -+ ggml_status_to_string(status)); - return false; - } - ggml_backend_tensor_get(out, &fu, 0, ggml_nbytes(out)); - -- ggml_backend_tensor_set(t, &xid, i*sizeof(float), sizeof(float)); -+ ggml_backend_tensor_set(t, &xid, i * sizeof(float), sizeof(float)); - status = ggml_backend_graph_compute(backend, gf); - if (status != GGML_STATUS_SUCCESS) { -- fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status)); -+ fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, -+ ggml_status_to_string(status)); - return false; - } - ggml_backend_tensor_get(out, &fd, 0, ggml_nbytes(out)); - - if (grad_precise()) { -- ggml_backend_tensor_set(t, &xiuh, i*sizeof(float), sizeof(float)); -+ ggml_backend_tensor_set(t, &xiuh, i * sizeof(float), sizeof(float)); - status = ggml_backend_graph_compute(backend, gf); - if (status != GGML_STATUS_SUCCESS) { -- fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status)); -+ fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, -+ ggml_status_to_string(status)); - return false; - } - ggml_backend_tensor_get(out, &fuh, 0, ggml_nbytes(out)); - -- ggml_backend_tensor_set(t, &xidh, i*sizeof(float), sizeof(float)); -+ ggml_backend_tensor_set(t, &xidh, i * sizeof(float), sizeof(float)); - status = ggml_backend_graph_compute(backend, gf); - if (status != GGML_STATUS_SUCCESS) { -- fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, ggml_status_to_string(status)); -+ fprintf(stderr, "%s: ggml_backend_graph_compute failed. status=%s \n", __func__, -+ ggml_status_to_string(status)); - return false; - } - ggml_backend_tensor_get(out, &fdh, 0, ggml_nbytes(out)); - -- gn[i] = (8.0*(double)fuh + (double)fd - (8.0*(double)fdh + (double)fu)) / (6.0*(double)eps); -+ gn[i] = -+ (8.0 * (double) fuh + (double) fd - (8.0 * (double) fdh + (double) fu)) / (6.0 * (double) eps); - } else { -- gn[i] = (fu - fd) / (2.0f*eps); -+ gn[i] = (fu - fd) / (2.0f * eps); - } - - ggml_backend_tensor_set(t, x0.data(), 0, ggml_nbytes(t)); -@@ -980,82 +974,77 @@ struct test_case { - } - }; - -- - // ################################### - // ## Section 2: GGML Op Defintions ## - // ################################### - -- - // The following is an example showing the bare minimum for creating a test for a GGML op. - - // GGML_OP_EXAMPLE - struct test_example : public test_case { - // Always define these 2 or variants thereof: -- const ggml_type type; // The type of the input tensors. -- const std::array ne; // The shape of the input tensors. -+ const ggml_type type; // The type of the input tensors. -+ const std::array ne; // The shape of the input tensors. -+ - // For some ops it's necessary to define multiple types or shapes for the inputs. - // Or they may need additional parameters. - - // Put all parameters needed to fully define the test into one of the VARS_TO_STR macros. - // In most cases these are just the properties of the struct that you defined above. - // This is needed for info prints. -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - - // Define a constructor for the struct. - // In most cases it will be sufficient to have the same arguments as the struct has properties - // and just use initializer lists. -- test_example(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 5, 4, 3}) -- : type(type), ne(ne) {} -+ test_example(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 5, 4, 3 }) : type(type), ne(ne) {} - - // Define how a simple GGML compute graph can be constructed for the new GGML op. - ggml_tensor * build_graph(ggml_context * ctx) override { - // Step 1: create input tensors that don't depend on any other tensors: - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -- ggml_set_name(a, "a"); // Setting names is optional but it's useful for debugging. -+ ggml_set_name(a, "a"); // Setting names is optional but it's useful for debugging. - - ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data()); - ggml_set_name(b, "b"); - - // Step 2: use the op that you want to test in the GGML compute graph. -- ggml_tensor * out = ggml_add(ctx, a, b); // For this example we're just doing a simple addition. -+ ggml_tensor * out = ggml_add(ctx, a, b); // For this example we're just doing a simple addition. - ggml_set_name(out, "out"); - - // Step 3: return the output tensor. - return out; - } -+ - // In order to also check the gradients for your op, add calls like ggml_set_param(a) - // immediately after you create the tensors. - // This is optional and only makes sense if a backward pass has actually been implemented for the new op. - }; - -- - // GGML_OP_UNARY - struct test_unary : public test_case { -- const ggml_unary_op op; -- const ggml_type type; -+ const ggml_unary_op op; -+ const ggml_type type; - const std::array ne_a; -- int v; // view (1 : non-contiguous a) -+ int v; // view (1 : non-contiguous a) - -- std::string vars() override { -- return VARS_TO_STR3(type, ne_a, v); -- } -+ std::string vars() override { return VARS_TO_STR3(type, ne_a, v); } - -- test_unary(ggml_unary_op op, -- ggml_type type = GGML_TYPE_F32, -- std::array ne_a = {128, 2, 2, 2}, -- int v = 0) -- : op(op), type(type), ne_a(ne_a), v(v) {} -+ test_unary(ggml_unary_op op, ggml_type type = GGML_TYPE_F32, std::array ne_a = { 128, 2, 2, 2 }, -+ int v = 0) : -+ op(op), -+ type(type), -+ ne_a(ne_a), -+ v(v) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - const bool grad_supported = op == GGML_UNARY_OP_ABS || op == GGML_UNARY_OP_SGN || op == GGML_UNARY_OP_NEG || -- op == GGML_UNARY_OP_STEP || op == GGML_UNARY_OP_RELU || op == GGML_UNARY_OP_SILU; -+ op == GGML_UNARY_OP_STEP || op == GGML_UNARY_OP_RELU || op == GGML_UNARY_OP_SILU; - - ggml_tensor * a; - if (v & 1) { -- auto ne = ne_a; ne[0] *= 3; -+ auto ne = ne_a; -+ ne[0] *= 3; - a = ggml_new_tensor(ctx, type, 4, ne.data()); - if (grad_supported) { - ggml_set_param(a); -@@ -1085,40 +1074,40 @@ struct test_unary : public test_case { - } - } - -- float grad_eps() override { -- return 15.0f; -- } -+ float grad_eps() override { return 15.0f; } - - std::vector grad_expect() override { - if (op == GGML_UNARY_OP_ABS) { -- return {-1.0f, 1.0f}; -+ return { -1.0f, 1.0f }; - } - if (op == GGML_UNARY_OP_SGN || op == GGML_UNARY_OP_STEP) { -- return {0.0f}; -+ return { 0.0f }; - } - if (op == GGML_UNARY_OP_RELU) { -- return {0.0f, 1.0f}; -+ return { 0.0f, 1.0f }; - } - return {}; - } -- - }; - - // GGML_OP_GET_ROWS - struct test_get_rows : public test_case { - const ggml_type type; -- const int n; // cols -- const int m; // rows -- const int r; // rows to get -- const int b; // batch size -- const bool v; // view (non-contiguous src1) -- -- std::string vars() override { -- return VARS_TO_STR6(type, n, m, r, b, v); -- } -- -- test_get_rows(ggml_type type = GGML_TYPE_F32, int n = 10, int m = 5, int r = 3, int b = 1, bool v = false) -- : type(type), n(n), m(m), r(r), b(b), v(v) {} -+ const int n; // cols -+ const int m; // rows -+ const int r; // rows to get -+ const int b; // batch size -+ const bool v; // view (non-contiguous src1) -+ -+ std::string vars() override { return VARS_TO_STR6(type, n, m, r, b, v); } -+ -+ test_get_rows(ggml_type type = GGML_TYPE_F32, int n = 10, int m = 5, int r = 3, int b = 1, bool v = false) : -+ type(type), -+ n(n), -+ m(m), -+ r(r), -+ b(b), -+ v(v) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * in = ggml_new_tensor_3d(ctx, type, n, m, b); -@@ -1127,7 +1116,7 @@ struct test_get_rows : public test_case { - ggml_tensor * rows = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, r, b); - ggml_set_name(rows, "rows"); - if (v) { -- rows = ggml_view_2d(ctx, rows, r/2, b, rows->nb[1], 0); -+ rows = ggml_view_2d(ctx, rows, r / 2, b, rows->nb[1], 0); - ggml_set_name(rows, "view_of_rows"); - } - -@@ -1146,10 +1135,12 @@ struct test_get_rows : public test_case { - void initialize_tensors(ggml_context * ctx) override { - for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { - if (t->type == GGML_TYPE_I32) { -- if (ggml_is_view_op(t->op)) { continue; } -+ if (ggml_is_view_op(t->op)) { -+ continue; -+ } - // rows -- std::vector data(r*b); -- for (int i = 0; i < r*b; i++) { -+ std::vector data(r * b); -+ for (int i = 0; i < r * b; i++) { - data[i] = rand() % m; - } - ggml_backend_tensor_set(t, data.data(), 0, r * b * sizeof(int)); -@@ -1163,18 +1154,21 @@ struct test_get_rows : public test_case { - // GGML_OP_GET_ROWS_BACK - struct test_get_rows_back : public test_case { - const ggml_type type; -- const int n; // cols -- const int m; // rows -- const int r; // rows to get -- const int b; // batch size -- const bool v; // view (non-contiguous src1) -- -- std::string vars() override { -- return VARS_TO_STR6(type, n, m, r, b, v); -- } -- -- test_get_rows_back(ggml_type type = GGML_TYPE_F32, int n = 10, int m = 5, int r = 3, int b = 1, bool v = false) -- : type(type), n(n), m(m), r(r), b(b), v(v) {} -+ const int n; // cols -+ const int m; // rows -+ const int r; // rows to get -+ const int b; // batch size -+ const bool v; // view (non-contiguous src1) -+ -+ std::string vars() override { return VARS_TO_STR6(type, n, m, r, b, v); } -+ -+ test_get_rows_back(ggml_type type = GGML_TYPE_F32, int n = 10, int m = 5, int r = 3, int b = 1, bool v = false) : -+ type(type), -+ n(n), -+ m(m), -+ r(r), -+ b(b), -+ v(v) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * in_forward = ggml_new_tensor_3d(ctx, type, n, m, b); -@@ -1183,7 +1177,7 @@ struct test_get_rows_back : public test_case { - ggml_tensor * rows = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, r, b); - ggml_set_name(rows, "rows"); - if (v) { -- rows = ggml_view_2d(ctx, rows, r/2, b, rows->nb[1], 0); -+ rows = ggml_view_2d(ctx, rows, r / 2, b, rows->nb[1], 0); - ggml_set_name(rows, "view_of_rows"); - } - -@@ -1199,10 +1193,12 @@ struct test_get_rows_back : public test_case { - void initialize_tensors(ggml_context * ctx) override { - for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { - if (t->type == GGML_TYPE_I32) { -- if (ggml_is_view_op(t->op)) { continue; } -+ if (ggml_is_view_op(t->op)) { -+ continue; -+ } - // rows -- std::vector data(r*b); -- for (int i = 0; i < r*b; i++) { -+ std::vector data(r * b); -+ for (int i = 0; i < r * b; i++) { - data[i] = rand() % m; - } - ggml_backend_tensor_set(t, data.data(), 0, r * b * sizeof(int)); -@@ -1215,16 +1211,12 @@ struct test_get_rows_back : public test_case { - - // GGML_OP_ARGMAX - struct test_argmax : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_argmax(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 100, 1, 1}) -- : type(type), ne(ne) {} -+ test_argmax(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 100, 1, 1 }) : type(type), ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -1237,7 +1229,7 @@ struct test_argmax : public test_case { - } - - void initialize_tensors(ggml_context * ctx) override { -- std::random_device rd; -+ std::random_device rd; - std::default_random_engine rng(rd()); - for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { - if (t->type == GGML_TYPE_F32) { -@@ -1256,23 +1248,19 @@ struct test_argmax : public test_case { - } - } - -- double max_nmse_err() override { -- return 0.0; -- } -+ double max_nmse_err() override { return 0.0; } - }; - - // GGML_OP_COUNT_EQUAL - struct test_count_equal : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_count_equal(ggml_type type = GGML_TYPE_F32, -- std::array ne = {4, 500, 1, 1}) -- : type(type), ne(ne) {} -+ test_count_equal(ggml_type type = GGML_TYPE_F32, std::array ne = { 4, 500, 1, 1 }) : -+ type(type), -+ ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -1293,32 +1281,28 @@ struct test_count_equal : public test_case { - return out; - } - -- double max_nmse_err() override { -- return 0.0; -- } -+ double max_nmse_err() override { return 0.0; } - }; - - // GGML_OP_REPEAT - struct test_repeat : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- const std::array nr; -+ const std::array nr; - -- std::string vars() override { -- return VARS_TO_STR3(type, ne, nr); -- } -+ std::string vars() override { return VARS_TO_STR3(type, ne, nr); } - -- size_t op_size(ggml_tensor * t) override { -- return ggml_nbytes(t) * 2; -- } -+ size_t op_size(ggml_tensor * t) override { return ggml_nbytes(t) * 2; } - -- test_repeat(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 5, 4, 3}, -- std::array nr = {2, 2, 2, 2}) -- : type(type), ne(ne), nr(nr) {} -+ test_repeat(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 5, 4, 3 }, -+ std::array nr = { 2, 2, 2, 2 }) : -+ type(type), -+ ne(ne), -+ nr(nr) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { -- ggml_tensor * target = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]); -+ ggml_tensor * target = -+ ggml_new_tensor_4d(ctx, type, ne[0] * nr[0], ne[1] * nr[1], ne[2] * nr[2], ne[3] * nr[3]); - ggml_set_name(target, "target"); - - ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -1334,27 +1318,24 @@ struct test_repeat : public test_case { - - // GGML_OP_REPEAT_BACK - struct test_repeat_back : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- const std::array nr; -- const bool v; // whether src is a noncontiguous view -+ const std::array nr; -+ const bool v; // whether src is a noncontiguous view - -- std::string vars() override { -- return VARS_TO_STR4(type, ne, nr, v); -- } -+ std::string vars() override { return VARS_TO_STR4(type, ne, nr, v); } - -- size_t op_size(ggml_tensor * t) override { -- return ggml_nbytes(t) * 2; -- } -+ size_t op_size(ggml_tensor * t) override { return ggml_nbytes(t) * 2; } - -- test_repeat_back(ggml_type type = GGML_TYPE_F32, -- std::array ne = {8, 6, 4, 2}, -- std::array nr = {2, 2, 2, 2}, -- bool v = false) -- : type(type), ne(ne), nr(nr), v(v) {} -+ test_repeat_back(ggml_type type = GGML_TYPE_F32, std::array ne = { 8, 6, 4, 2 }, -+ std::array nr = { 2, 2, 2, 2 }, bool v = false) : -+ type(type), -+ ne(ne), -+ nr(nr), -+ v(v) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { -- ggml_tensor * src = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]); -+ ggml_tensor * src = ggml_new_tensor_4d(ctx, type, ne[0] * nr[0], ne[1] * nr[1], ne[2] * nr[2], ne[3] * nr[3]); - ggml_set_name(src, "src"); - - if (v) { -@@ -1387,22 +1368,25 @@ struct test_repeat_back : public test_case { - - // GGML_OP_DUP - struct test_dup : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - const std::array permute; -- bool _use_permute; -+ bool _use_permute; - - std::string vars() override { - std::string v = VARS_TO_STR2(type, ne); -- if (_use_permute) v += "," + VAR_TO_STR(permute); -+ if (_use_permute) { -+ v += "," + VAR_TO_STR(permute); -+ } - return v; - } - -- test_dup(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 10, 20, 1}, -- std::array permute = {0, 0, 0, 0}) -- : type(type), ne(ne), permute(permute), -- _use_permute(permute[0] + permute[1] + permute[2] + permute[3] > 0) {} -+ test_dup(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 10, 20, 1 }, -+ std::array permute = { 0, 0, 0, 0 }) : -+ type(type), -+ ne(ne), -+ permute(permute), -+ _use_permute(permute[0] + permute[1] + permute[2] + permute[3] > 0) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -1423,22 +1407,21 @@ struct test_dup : public test_case { - - // GGML_OP_SET - struct test_set : public test_case { -- const ggml_type type_src; -- const ggml_type type_dst; -+ const ggml_type type_src; -+ const ggml_type type_dst; - const std::array ne; -- const int dim; -+ const int dim; - -- std::string vars() override { -- return VARS_TO_STR4(type_src, type_dst, ne, dim); -- } -+ std::string vars() override { return VARS_TO_STR4(type_src, type_dst, ne, dim); } - -- size_t op_size(ggml_tensor * t) override { -- return ggml_nbytes(t) + ggml_nbytes(t->src[0]); -- } -+ size_t op_size(ggml_tensor * t) override { return ggml_nbytes(t) + ggml_nbytes(t->src[0]); } - - test_set(ggml_type type_src = GGML_TYPE_F32, ggml_type type_dst = GGML_TYPE_F32, -- std::array ne = {6, 5, 4, 3}, int dim = 1) -- : type_src(type_src), type_dst(type_dst), ne(ne), dim(dim) {} -+ std::array ne = { 6, 5, 4, 3 }, int dim = 1) : -+ type_src(type_src), -+ type_dst(type_dst), -+ ne(ne), -+ dim(dim) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * src = ggml_new_tensor(ctx, type_src, 4, ne.data()); -@@ -1449,17 +1432,17 @@ struct test_set : public test_case { - for (int i = 0; i < dim; ++i) { - ne_dst[i] *= 2; - } -- ggml_tensor* dst = ggml_new_tensor(ctx, type_dst, 4, ne_dst.data()); -+ ggml_tensor * dst = ggml_new_tensor(ctx, type_dst, 4, ne_dst.data()); - ggml_set_param(dst); - ggml_set_name(dst, "dst"); - - size_t offset = 0; - for (int i = 0; i < dim; ++i) { -- offset += ((ne_dst[i] - ne[i])/2)*dst->nb[i]; -+ offset += ((ne_dst[i] - ne[i]) / 2) * dst->nb[i]; - } - ggml_tensor * out = ggml_set(ctx, dst, src, -- // The backward pass requires setting a contiguous region: -- src->nb[1], src->nb[2], src->nb[3], offset); -+ // The backward pass requires setting a contiguous region: -+ src->nb[1], src->nb[2], src->nb[3], offset); - ggml_set_name(out, "out"); - - return out; -@@ -1468,33 +1451,30 @@ struct test_set : public test_case { - - // GGML_OP_CPY - struct test_cpy : public test_case { -- const ggml_type type_src; -- const ggml_type type_dst; -+ const ggml_type type_src; -+ const ggml_type type_dst; - const std::array ne; - const std::array permute_src; - const std::array permute_dst; -- bool _src_use_permute; -- bool _dst_use_permute; -+ bool _src_use_permute; -+ bool _dst_use_permute; - -- std::string vars() override { -- return VARS_TO_STR5(type_src, type_dst, ne, permute_src, permute_dst); -- } -+ std::string vars() override { return VARS_TO_STR5(type_src, type_dst, ne, permute_src, permute_dst); } - -- double max_nmse_err() override { -- return 1e-6; -- } -+ double max_nmse_err() override { return 1e-6; } - -- size_t op_size(ggml_tensor * t) override { -- return ggml_nbytes(t) + ggml_nbytes(t->src[0]); -- } -+ size_t op_size(ggml_tensor * t) override { return ggml_nbytes(t) + ggml_nbytes(t->src[0]); } - - test_cpy(ggml_type type_src = GGML_TYPE_F32, ggml_type type_dst = GGML_TYPE_F32, -- std::array ne = {10, 10, 10, 1}, -- std::array permute_src = {0, 0, 0, 0}, -- std::array permute_dst = {0, 0, 0, 0}) -- : type_src(type_src), type_dst(type_dst), ne(ne), permute_src(permute_src), permute_dst(permute_dst), -- _src_use_permute(permute_src[0] + permute_src[1] + permute_src[2] + permute_src[3] > 0), -- _dst_use_permute(permute_dst[0] + permute_dst[1] + permute_dst[2] + permute_dst[3] > 0) {} -+ std::array ne = { 10, 10, 10, 1 }, std::array permute_src = { 0, 0, 0, 0 }, -+ std::array permute_dst = { 0, 0, 0, 0 }) : -+ type_src(type_src), -+ type_dst(type_dst), -+ ne(ne), -+ permute_src(permute_src), -+ permute_dst(permute_dst), -+ _src_use_permute(permute_src[0] + permute_src[1] + permute_src[2] + permute_src[3] > 0), -+ _dst_use_permute(permute_dst[0] + permute_dst[1] + permute_dst[2] + permute_dst[3] > 0) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * src = ggml_new_tensor(ctx, type_src, 4, ne.data()); -@@ -1523,16 +1503,12 @@ struct test_cpy : public test_case { - - // GGML_OP_CONT - struct test_cont : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_cont(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 10, 10, 1}) -- : type(type), ne(ne) {} -+ test_cont(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 10, 10, 1 }) : type(type), ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -1555,26 +1531,24 @@ struct test_cont : public test_case { - // GGML_OP_DIV - struct test_bin_bcast : public test_case { - using op_t = ggml_tensor * (*) (ggml_context *, ggml_tensor *, ggml_tensor *); -- op_t op; -- const ggml_type type; -+ op_t op; -+ const ggml_type type; - const std::array ne; -- const std::array nr; -+ const std::array nr; - -- std::string vars() override { -- return VARS_TO_STR3(type, ne, nr); -- } -+ std::string vars() override { return VARS_TO_STR3(type, ne, nr); } - -- size_t op_size(ggml_tensor * t) override { -- return ggml_nbytes(t) * 3; -- } -+ size_t op_size(ggml_tensor * t) override { return ggml_nbytes(t) * 3; } - -- test_bin_bcast(op_t op, ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 10, 1, 1}, -- std::array nr = {1, 2, 1, 1}) -- : op(op), type(type), ne(ne), nr(nr) {} -+ test_bin_bcast(op_t op, ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 10, 1, 1 }, -+ std::array nr = { 1, 2, 1, 1 }) : -+ op(op), -+ type(type), -+ ne(ne), -+ nr(nr) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { -- ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]); -+ ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0] * nr[0], ne[1] * nr[1], ne[2] * nr[2], ne[3] * nr[3]); - ggml_set_name(a, "a"); - - ggml_tensor * b = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -1604,31 +1578,21 @@ struct test_bin_bcast : public test_case { - } - } - -- float grad_eps() override { -- return 0.1f * (op == ggml_mul ? ne[0]*ne[1]*ne[2]*ne[3] : 1); -- } -+ float grad_eps() override { return 0.1f * (op == ggml_mul ? ne[0] * ne[1] * ne[2] * ne[3] : 1); } - -- bool grad_precise() override { -- return op == ggml_div; -- } -+ bool grad_precise() override { return op == ggml_div; } - -- double max_maa_err() override { -- return op == ggml_add ? 1e-4 : 1e-3; -- } -+ double max_maa_err() override { return op == ggml_add ? 1e-4 : 1e-3; } - }; - - // GGML_OP_ADD1 - struct test_add1 : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_add1(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 5, 4, 3}) -- : type(type), ne(ne) {} -+ test_add1(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 5, 4, 3 }) : type(type), ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -1645,25 +1609,21 @@ struct test_add1 : public test_case { - return out; - } - -- float grad_eps() override { -- return 0.1f * ne[0]*ne[1]*ne[2]*ne[3]; -- } -+ float grad_eps() override { return 0.1f * ne[0] * ne[1] * ne[2] * ne[3]; } - }; - - // GGML_OP_SCALE - struct test_scale : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- float scale; -+ float scale; - -- std::string vars() override { -- return VARS_TO_STR3(type, ne, scale); -- } -+ std::string vars() override { return VARS_TO_STR3(type, ne, scale); } - -- test_scale(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 10, 10, 10}, -- float scale = 2.0f) -- : type(type), ne(ne), scale(scale) {} -+ test_scale(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 10, 10, 10 }, float scale = 2.0f) : -+ type(type), -+ ne(ne), -+ scale(scale) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -1679,18 +1639,16 @@ struct test_scale : public test_case { - - // GGML_OP_SILU_BACK - struct test_silu_back : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- float eps; -+ float eps; - -- std::string vars() override { -- return VARS_TO_STR3(type, ne, eps); -- } -+ std::string vars() override { return VARS_TO_STR3(type, ne, eps); } - -- test_silu_back(ggml_type type = GGML_TYPE_F32, -- std::array ne = {64, 5, 4, 3}, -- float eps = 1e-6f) -- : type(type), ne(ne), eps(eps) {} -+ test_silu_back(ggml_type type = GGML_TYPE_F32, std::array ne = { 64, 5, 4, 3 }, float eps = 1e-6f) : -+ type(type), -+ ne(ne), -+ eps(eps) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -1705,34 +1663,32 @@ struct test_silu_back : public test_case { - return out; - } - -- bool grad_precise() override { -- return true; -- } -+ bool grad_precise() override { return true; } - }; - - // GGML_OP_NORM - struct test_norm : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- const bool v; // whether a is a non-contiguous view -- const float eps; -+ const bool v; // whether a is a non-contiguous view -+ const float eps; - -- std::string vars() override { -- return VARS_TO_STR4(type, ne, v, eps); -- } -+ std::string vars() override { return VARS_TO_STR4(type, ne, v, eps); } - -- test_norm(ggml_type type = GGML_TYPE_F32, -- std::array ne = {64, 5, 4, 3}, -- bool v = false, -- float eps = 1e-6f) -- : type(type), ne(ne), v(v), eps(eps) {} -+ test_norm(ggml_type type = GGML_TYPE_F32, std::array ne = { 64, 5, 4, 3 }, bool v = false, -+ float eps = 1e-6f) : -+ type(type), -+ ne(ne), -+ v(v), -+ eps(eps) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); - ggml_set_name(a, "a"); - - if (v) { -- a = ggml_view_4d(ctx, a, a->ne[0]/2, a->ne[1]/2, a->ne[2]/2, a->ne[3]/2, a->nb[1], a->nb[2], a->nb[3], 0); -+ a = ggml_view_4d(ctx, a, a->ne[0] / 2, a->ne[1] / 2, a->ne[2] / 2, a->ne[3] / 2, a->nb[1], a->nb[2], -+ a->nb[3], 0); - ggml_set_name(a, "view of a"); - } - -@@ -1745,20 +1701,19 @@ struct test_norm : public test_case { - - // GGML_OP_RMS_NORM - struct test_rms_norm : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- const bool v; // whether a is a non-contiguous view -- const float eps; -+ const bool v; // whether a is a non-contiguous view -+ const float eps; - -- std::string vars() override { -- return VARS_TO_STR4(type, ne, v, eps); -- } -+ std::string vars() override { return VARS_TO_STR4(type, ne, v, eps); } - -- test_rms_norm(ggml_type type = GGML_TYPE_F32, -- std::array ne = {64, 5, 4, 3}, -- bool v = false, -- float eps = 1e-6f) -- : type(type), ne(ne), v(v), eps(eps) {} -+ test_rms_norm(ggml_type type = GGML_TYPE_F32, std::array ne = { 64, 5, 4, 3 }, bool v = false, -+ float eps = 1e-6f) : -+ type(type), -+ ne(ne), -+ v(v), -+ eps(eps) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -1766,7 +1721,8 @@ struct test_rms_norm : public test_case { - ggml_set_name(a, "a"); - - if (v) { -- a = ggml_view_4d(ctx, a, a->ne[0]/2, a->ne[1]/2, a->ne[2]/2, a->ne[3]/2, a->nb[1], a->nb[2], a->nb[3], 0); -+ a = ggml_view_4d(ctx, a, a->ne[0] / 2, a->ne[1] / 2, a->ne[2] / 2, a->ne[3] / 2, a->nb[1], a->nb[2], -+ a->nb[3], 0); - ggml_set_name(a, "view of a"); - } - -@@ -1782,29 +1738,23 @@ struct test_rms_norm : public test_case { - } - } - -- float grad_eps() override { -- return 1.0f; -- } -+ float grad_eps() override { return 1.0f; } - -- bool grad_precise() override { -- return true; -- } -+ bool grad_precise() override { return true; } - }; - - // GGML_OP_RMS_NORM_BACK - struct test_rms_norm_back : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- const float eps; -+ const float eps; - -- std::string vars() override { -- return VARS_TO_STR3(type, ne, eps); -- } -+ std::string vars() override { return VARS_TO_STR3(type, ne, eps); } - -- test_rms_norm_back(ggml_type type = GGML_TYPE_F32, -- std::array ne = {64, 5, 4, 3}, -- float eps = 1e-6f) -- : type(type), ne(ne), eps(eps) {} -+ test_rms_norm_back(ggml_type type = GGML_TYPE_F32, std::array ne = { 64, 5, 4, 3 }, float eps = 1e-6f) : -+ type(type), -+ ne(ne), -+ eps(eps) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -1828,18 +1778,17 @@ struct test_rms_norm_back : public test_case { - - // GGML_OP_SSM_CONV - struct test_ssm_conv : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne_a; - const std::array ne_b; - -- std::string vars() override { -- return VARS_TO_STR3(type, ne_a, ne_b); -- } -+ std::string vars() override { return VARS_TO_STR3(type, ne_a, ne_b); } - -- test_ssm_conv(ggml_type type = GGML_TYPE_F32, -- std::array ne_a = {10, 10, 10, 1}, -- std::array ne_b = {3, 3, 1, 1}) -- : type(type), ne_a(ne_a), ne_b(ne_b) {} -+ test_ssm_conv(ggml_type type = GGML_TYPE_F32, std::array ne_a = { 10, 10, 10, 1 }, -+ std::array ne_b = { 3, 3, 1, 1 }) : -+ type(type), -+ ne_a(ne_a), -+ ne_b(ne_b) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data()); -@@ -1858,21 +1807,27 @@ struct test_ssm_scan : public test_case { - const int64_t n_seq_tokens; - const int64_t n_seqs; - -- std::string vars() override { -- return VARS_TO_STR5(type, d_state, d_inner, n_seq_tokens, n_seqs); -- } -+ std::string vars() override { return VARS_TO_STR5(type, d_state, d_inner, n_seq_tokens, n_seqs); } - -- test_ssm_scan(ggml_type type = GGML_TYPE_F32, -- int64_t d_state = 32, int64_t d_inner = 32, int64_t n_seq_tokens = 32, int64_t n_seqs = 32) -- : type(type), d_state(d_state), d_inner(d_inner), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {} -+ test_ssm_scan(ggml_type type = GGML_TYPE_F32, int64_t d_state = 32, int64_t d_inner = 32, int64_t n_seq_tokens = 32, -+ int64_t n_seqs = 32) : -+ type(type), -+ d_state(d_state), -+ d_inner(d_inner), -+ n_seq_tokens(n_seq_tokens), -+ n_seqs(n_seqs) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { -- ggml_tensor * s = ggml_new_tensor(ctx, type, 4, std::vector{ d_state, d_inner, n_seqs, 1 }.data()); -- ggml_tensor * x = ggml_new_tensor(ctx, type, 4, std::vector{ d_inner, n_seq_tokens, n_seqs, 1 }.data()); -- ggml_tensor * dt = ggml_new_tensor(ctx, type, 4, std::vector{ d_inner, n_seq_tokens, n_seqs, 1 }.data()); -- ggml_tensor * A = ggml_new_tensor(ctx, type, 4, std::vector{ d_state, d_inner, 1 , 1 }.data()); -- ggml_tensor * B = ggml_new_tensor(ctx, type, 4, std::vector{ d_state, n_seq_tokens, n_seqs, 1 }.data()); -- ggml_tensor * C = ggml_new_tensor(ctx, type, 4, std::vector{ d_state, n_seq_tokens, n_seqs, 1 }.data()); -+ ggml_tensor * s = ggml_new_tensor(ctx, type, 4, std::vector{ d_state, d_inner, n_seqs, 1 }.data()); -+ ggml_tensor * x = -+ ggml_new_tensor(ctx, type, 4, std::vector{ d_inner, n_seq_tokens, n_seqs, 1 }.data()); -+ ggml_tensor * dt = -+ ggml_new_tensor(ctx, type, 4, std::vector{ d_inner, n_seq_tokens, n_seqs, 1 }.data()); -+ ggml_tensor * A = ggml_new_tensor(ctx, type, 4, std::vector{ d_state, d_inner, 1, 1 }.data()); -+ ggml_tensor * B = -+ ggml_new_tensor(ctx, type, 4, std::vector{ d_state, n_seq_tokens, n_seqs, 1 }.data()); -+ ggml_tensor * C = -+ ggml_new_tensor(ctx, type, 4, std::vector{ d_state, n_seq_tokens, n_seqs, 1 }.data()); - ggml_tensor * out = ggml_ssm_scan(ctx, s, x, dt, A, B, C); - return out; - } -@@ -1887,22 +1842,26 @@ struct test_rwkv_wkv6 : public test_case { - const int64_t n_seq_tokens; - const int64_t n_seqs; - -- std::string vars() override { -- return VARS_TO_STR5(type, head_count, head_size, n_seq_tokens, n_seqs); -- } -+ std::string vars() override { return VARS_TO_STR5(type, head_count, head_size, n_seq_tokens, n_seqs); } - -- test_rwkv_wkv6(ggml_type type = GGML_TYPE_F32, -- int64_t head_count = 32, int64_t head_size = 64, int64_t n_seq_tokens = 32, int64_t n_seqs = 32) -- : type(type), head_count(head_count), head_size(head_size), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {} -+ test_rwkv_wkv6(ggml_type type = GGML_TYPE_F32, int64_t head_count = 32, int64_t head_size = 64, -+ int64_t n_seq_tokens = 32, int64_t n_seqs = 32) : -+ type(type), -+ head_count(head_count), -+ head_size(head_size), -+ n_seq_tokens(n_seq_tokens), -+ n_seqs(n_seqs) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - const int64_t n_tokens = n_seq_tokens * n_seqs; -- ggml_tensor * r = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -- ggml_tensor * k = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -- ggml_tensor * v = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -- ggml_tensor * tf = ggml_new_tensor(ctx, type, 2, std::vector{ head_size, head_count }.data()); -- ggml_tensor * td = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -- ggml_tensor * s = ggml_new_tensor(ctx, type, 2, std::vector{ head_size * head_size * head_count, n_seqs }.data()); -+ ggml_tensor * r = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -+ ggml_tensor * k = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -+ ggml_tensor * v = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -+ ggml_tensor * tf = ggml_new_tensor(ctx, type, 2, std::vector{ head_size, head_count }.data()); -+ ggml_tensor * td = -+ ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -+ ggml_tensor * s = -+ ggml_new_tensor(ctx, type, 2, std::vector{ head_size * head_size * head_count, n_seqs }.data()); - ggml_tensor * out = ggml_rwkv_wkv6(ctx, k, v, r, tf, td, s); - return out; - } -@@ -1917,21 +1876,24 @@ struct test_gla : public test_case { - const int64_t n_seq_tokens; - const int64_t n_seqs; - -- std::string vars() override { -- return VARS_TO_STR5(type, head_count, head_size, n_seq_tokens, n_seqs); -- } -+ std::string vars() override { return VARS_TO_STR5(type, head_count, head_size, n_seq_tokens, n_seqs); } - -- test_gla(ggml_type type = GGML_TYPE_F32, -- int64_t head_count = 32, int64_t head_size = 64, int64_t n_seq_tokens = 32, int64_t n_seqs = 32) -- : type(type), head_count(head_count), head_size(head_size), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {} -+ test_gla(ggml_type type = GGML_TYPE_F32, int64_t head_count = 32, int64_t head_size = 64, int64_t n_seq_tokens = 32, -+ int64_t n_seqs = 32) : -+ type(type), -+ head_count(head_count), -+ head_size(head_size), -+ n_seq_tokens(n_seq_tokens), -+ n_seqs(n_seqs) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - const int64_t n_tokens = n_seq_tokens * n_seqs; -- ggml_tensor * q = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -- ggml_tensor * k = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -- ggml_tensor * v = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -- ggml_tensor * g = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -- ggml_tensor * s = ggml_new_tensor(ctx, type, 2, std::vector{ head_size * head_size * head_count, n_seqs }.data()); -+ ggml_tensor * q = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -+ ggml_tensor * k = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -+ ggml_tensor * v = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -+ ggml_tensor * g = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -+ ggml_tensor * s = -+ ggml_new_tensor(ctx, type, 2, std::vector{ head_size * head_size * head_count, n_seqs }.data()); - ggml_tensor * out = ggml_gated_linear_attn(ctx, k, v, q, g, s, pow(head_size, -0.5)); - return out; - } -@@ -1946,26 +1908,29 @@ struct test_rwkv_wkv7 : public test_case { - const int64_t n_seq_tokens; - const int64_t n_seqs; - -- std::string vars() override { -- return VARS_TO_STR5(type, head_count, head_size, n_seq_tokens, n_seqs); -- } -+ std::string vars() override { return VARS_TO_STR5(type, head_count, head_size, n_seq_tokens, n_seqs); } - -- test_rwkv_wkv7(ggml_type type = GGML_TYPE_F32, -- int64_t head_count = 32, int64_t head_size = 64, int64_t n_seq_tokens = 32, int64_t n_seqs = 32) -- : type(type), head_count(head_count), head_size(head_size), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {} -+ test_rwkv_wkv7(ggml_type type = GGML_TYPE_F32, int64_t head_count = 32, int64_t head_size = 64, -+ int64_t n_seq_tokens = 32, int64_t n_seqs = 32) : -+ type(type), -+ head_count(head_count), -+ head_size(head_size), -+ n_seq_tokens(n_seq_tokens), -+ n_seqs(n_seqs) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - const int64_t n_tokens = n_seq_tokens * n_seqs; -- ggml_tensor * r = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -- ggml_tensor * w = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -- ggml_tensor * k = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -- ggml_tensor * v = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -- ggml_tensor * a = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -- ggml_tensor * b = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -+ ggml_tensor * r = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -+ ggml_tensor * w = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -+ ggml_tensor * k = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -+ ggml_tensor * v = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -+ ggml_tensor * a = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); -+ ggml_tensor * b = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); - // Outputs may become NaN with long seqlen without these normalization -- a = ggml_l2_norm(ctx, a, 1e-7F); -- b = ggml_l2_norm(ctx, b, 1e-7F); -- ggml_tensor * s = ggml_new_tensor(ctx, type, 2, std::vector{ head_size * head_size * head_count, n_seqs }.data()); -+ a = ggml_l2_norm(ctx, a, 1e-7F); -+ b = ggml_l2_norm(ctx, b, 1e-7F); -+ ggml_tensor * s = -+ ggml_new_tensor(ctx, type, 2, std::vector{ head_size * head_size * head_count, n_seqs }.data()); - ggml_tensor * out = ggml_rwkv_wkv7(ctx, r, w, k, v, a, b, s); - return out; - } -@@ -1973,40 +1938,39 @@ struct test_rwkv_wkv7 : public test_case { - - // GGML_OP_MUL_MAT - struct test_mul_mat : public test_case { -- const ggml_type type_a; -- const ggml_type type_b; -- const int64_t m; -- const int64_t n; -- const int64_t k; -- const std::array bs; // dims 3 and 4 -- const std::array nr; // repeat in dims 3 and 4 -- const std::array per; // permutation of dimensions -- const bool v; // whether a and b are non-contiguous views -+ const ggml_type type_a; -+ const ggml_type type_b; -+ const int64_t m; -+ const int64_t n; -+ const int64_t k; -+ const std::array bs; // dims 3 and 4 -+ const std::array nr; // repeat in dims 3 and 4 -+ const std::array per; // permutation of dimensions -+ const bool v; // whether a and b are non-contiguous views - -- std::string vars() override { -- return VARS_TO_STR9(type_a, type_b, m, n, k, bs, nr, per, v); -- } -+ std::string vars() override { return VARS_TO_STR9(type_a, type_b, m, n, k, bs, nr, per, v); } - -- double max_nmse_err() override { -- return 5e-4; -- } -+ double max_nmse_err() override { return 5e-4; } - -- int64_t grad_nmax() override { -- return 20000; -- } -+ int64_t grad_nmax() override { return 20000; } - - uint64_t op_flops(ggml_tensor * t) override { - GGML_UNUSED(t); - return 2 * m * n * k * bs[0] * nr[0] * bs[1] * nr[1]; - } - -- test_mul_mat(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32, -- int64_t m = 32, int64_t n = 32, int64_t k = 32, -- std::array bs = {10, 10}, -- std::array nr = {2, 2}, -- std::array per = {0, 1, 2, 3}, -- bool v = false) -- : type_a(type_a), type_b(type_b), m(m), n(n), k(k), bs(bs), nr(nr), per(per), v(v) {} -+ test_mul_mat(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32, int64_t m = 32, int64_t n = 32, -+ int64_t k = 32, std::array bs = { 10, 10 }, std::array nr = { 2, 2 }, -+ std::array per = { 0, 1, 2, 3 }, bool v = false) : -+ type_a(type_a), -+ type_b(type_b), -+ m(m), -+ n(n), -+ k(k), -+ bs(bs), -+ nr(nr), -+ per(per), -+ v(v) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - // C^T = A * B^T: (k, m) * (k, n) => (m, n) -@@ -2016,13 +1980,13 @@ struct test_mul_mat : public test_case { - const int npermuted = (per[0] != 0) + (per[1] != 1) + (per[2] != 2) + (per[3] != 3); - if (npermuted > 0) { - GGML_ASSERT(npermuted == 2); -- GGML_ASSERT(!v); // not handled -+ GGML_ASSERT(!v); // not handled - GGML_ASSERT(!ggml_is_quantized(type_a) || per[0] == 0); - GGML_ASSERT(!ggml_is_quantized(type_b) || per[0] == 0); - - // Create tensors with the permuted dimensions, then permute them back to the dimensions given by m,n,k. -- const int64_t ne_a[4] = {k, m, bs[0], bs[1]}; -- const int64_t ne_b[4] = {k, n, bs[0]*nr[0], bs[1]*nr[1]}; -+ const int64_t ne_a[4] = { k, m, bs[0], bs[1] }; -+ const int64_t ne_b[4] = { k, n, bs[0] * nr[0], bs[1] * nr[1] }; - - a = ggml_new_tensor_4d(ctx, type_a, ne_a[per[0]], ne_a[per[1]], ne_a[per[2]], ne_a[per[3]]); - b = ggml_new_tensor_4d(ctx, type_b, ne_b[per[0]], ne_b[per[1]], ne_b[per[2]], ne_b[per[3]]); -@@ -2041,8 +2005,8 @@ struct test_mul_mat : public test_case { - ggml_set_name(b, "b_permuted"); - } else { - if (v) { -- a = ggml_new_tensor_4d(ctx, type_a, k*2, m, bs[0], bs[1]); -- b = ggml_new_tensor_4d(ctx, type_b, k*2, n, bs[0]*nr[0], bs[1]*nr[1]); -+ a = ggml_new_tensor_4d(ctx, type_a, k * 2, m, bs[0], bs[1]); -+ b = ggml_new_tensor_4d(ctx, type_b, k * 2, n, bs[0] * nr[0], bs[1] * nr[1]); - - if (!ggml_is_quantized(type_a)) { - if (bs[1] == 1 && nr[1] == 1) { -@@ -2051,11 +2015,11 @@ struct test_mul_mat : public test_case { - ggml_set_param(b); - } - -- a = ggml_view_4d(ctx, a, k, m, bs[0], bs[1], a->nb[1], a->nb[2], a->nb[3], 0); -- b = ggml_view_4d(ctx, b, k, n, bs[0]*nr[0], bs[1]*nr[1], b->nb[1], b->nb[2], b->nb[3], 0); -+ a = ggml_view_4d(ctx, a, k, m, bs[0], bs[1], a->nb[1], a->nb[2], a->nb[3], 0); -+ b = ggml_view_4d(ctx, b, k, n, bs[0] * nr[0], bs[1] * nr[1], b->nb[1], b->nb[2], b->nb[3], 0); - } else { -- a = ggml_new_tensor_4d(ctx, type_a, k, m, bs[0], bs[1]); -- b = ggml_new_tensor_4d(ctx, type_b, k, n, bs[0]*nr[0], bs[1]*nr[1]); -+ a = ggml_new_tensor_4d(ctx, type_a, k, m, bs[0], bs[1]); -+ b = ggml_new_tensor_4d(ctx, type_b, k, n, bs[0] * nr[0], bs[1] * nr[1]); - - if (!ggml_is_quantized(type_a)) { - if (bs[1] == 1 && nr[1] == 1) { -@@ -2079,33 +2043,34 @@ struct test_mul_mat : public test_case { - struct test_mul_mat_id : public test_case { - const ggml_type type_a; - const ggml_type type_b; -- const int n_mats; -- const int n_used; -- const bool b; // broadcast b matrix -- const int64_t m; -- const int64_t n; -- const int64_t k; -+ const int n_mats; -+ const int n_used; -+ const bool b; // broadcast b matrix -+ const int64_t m; -+ const int64_t n; -+ const int64_t k; - -- std::string vars() override { -- return VARS_TO_STR8(type_a, type_b, n_mats, n_used, b, m, n, k); -- } -+ std::string vars() override { return VARS_TO_STR8(type_a, type_b, n_mats, n_used, b, m, n, k); } - -- double max_nmse_err() override { -- return 5e-4; -- } -+ double max_nmse_err() override { return 5e-4; } - - uint64_t op_flops(ggml_tensor * t) override { - GGML_UNUSED(t); - return 2 * m * k * n * n_used; - } - -- test_mul_mat_id(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32, -- int n_mats = 8, int n_used = 2, bool b = false, -- int64_t m = 32, int64_t n = 32, int64_t k = 32) -- : type_a(type_a), type_b(type_b), n_mats(n_mats), n_used(n_used), b(b), -- m(m), n(n), k(k) { -- GGML_ASSERT(n_used <= n_mats); -- } -+ test_mul_mat_id(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32, int n_mats = 8, int n_used = 2, -+ bool b = false, int64_t m = 32, int64_t n = 32, int64_t k = 32) : -+ type_a(type_a), -+ type_b(type_b), -+ n_mats(n_mats), -+ n_used(n_used), -+ b(b), -+ m(m), -+ n(n), -+ k(k) { -+ GGML_ASSERT(n_used <= n_mats); -+ } - - ggml_tensor * build_graph(ggml_context * ctx) override { - // C^T = A * B^T: (k, m) * (k, n) => (m, n) -@@ -2129,11 +2094,13 @@ struct test_mul_mat_id : public test_case { - } - - void initialize_tensors(ggml_context * ctx) override { -- std::random_device rd; -+ std::random_device rd; - std::default_random_engine rng(rd()); - for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { - if (t->type == GGML_TYPE_I32) { -- if (ggml_is_view_op(t->op)) { continue; } -+ if (ggml_is_view_op(t->op)) { -+ continue; -+ } - // ids - for (int64_t r = 0; r < ggml_nrows(t); r++) { - std::vector data(t->ne[0]); -@@ -2152,29 +2119,30 @@ struct test_mul_mat_id : public test_case { - - // GGML_OP_OUT_PROD - struct test_out_prod : public test_case { -- const ggml_type type_a; -- const ggml_type type_b; -- const int64_t m; -- const int64_t n; -- const int64_t k; -- const std::array bs; // dims 3 and 4 -- const std::array nr; // repeat in dims 3 and 4 -- const bool trans_b; -+ const ggml_type type_a; -+ const ggml_type type_b; -+ const int64_t m; -+ const int64_t n; -+ const int64_t k; -+ const std::array bs; // dims 3 and 4 -+ const std::array nr; // repeat in dims 3 and 4 -+ const bool trans_b; - -- std::string vars() override { -- return VARS_TO_STR8(type_a, type_b, m, n, k, bs, nr, trans_b); -- } -+ std::string vars() override { return VARS_TO_STR8(type_a, type_b, m, n, k, bs, nr, trans_b); } - -- double max_nmse_err() override { -- return 5e-4; -- } -+ double max_nmse_err() override { return 5e-4; } - -- test_out_prod(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32, -- int64_t m = 32, int64_t n = 32, int64_t k = 32, -- std::array bs = {10, 10}, -- std::array nr = {2, 2}, -- bool trans_b = false) -- : type_a(type_a), type_b(type_b), m(m), n(n), k(k), bs(bs), nr(nr), trans_b(trans_b) {} -+ test_out_prod(ggml_type type_a = GGML_TYPE_F32, ggml_type type_b = GGML_TYPE_F32, int64_t m = 32, int64_t n = 32, -+ int64_t k = 32, std::array bs = { 10, 10 }, std::array nr = { 2, 2 }, -+ bool trans_b = false) : -+ type_a(type_a), -+ type_b(type_b), -+ m(m), -+ n(n), -+ k(k), -+ bs(bs), -+ nr(nr), -+ trans_b(trans_b) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor_4d(ctx, type_a, m, k, bs[0], bs[1]); -@@ -2182,10 +2150,10 @@ struct test_out_prod : public test_case { - - ggml_tensor * b; - if (trans_b) { -- b = ggml_new_tensor_4d(ctx, type_b, k, n, bs[0]*nr[0], bs[1]*nr[1]); -+ b = ggml_new_tensor_4d(ctx, type_b, k, n, bs[0] * nr[0], bs[1] * nr[1]); - b = ggml_transpose(ctx, b); - } else { -- b = ggml_new_tensor_4d(ctx, type_b, n, k, bs[0]*nr[0], bs[1]*nr[1]); -+ b = ggml_new_tensor_4d(ctx, type_b, n, k, bs[0] * nr[0], bs[1] * nr[1]); - } - ggml_set_name(b, "b"); - -@@ -2198,16 +2166,12 @@ struct test_out_prod : public test_case { - - // GGML_OP_SQR - struct test_sqr : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_sqr(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 5, 4, 3}) -- : type(type), ne(ne) {} -+ test_sqr(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 5, 4, 3 }) : type(type), ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -2221,22 +2185,18 @@ struct test_sqr : public test_case { - } - - float grad_eps() override { -- return 0.1f * 0.25f*ne[0]*ne[1]*ne[2]*ne[3]; // 10% of expected value of sum. -+ return 0.1f * 0.25f * ne[0] * ne[1] * ne[2] * ne[3]; // 10% of expected value of sum. - } - }; - - // GGML_OP_SQRT - struct test_sqrt : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_sqrt(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 3, 3, 2}) -- : type(type), ne(ne) {} -+ test_sqrt(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 3, 3, 2 }) : type(type), ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -2256,27 +2216,19 @@ struct test_sqrt : public test_case { - } - } - -- float grad_eps() override { -- return 20.0f; -- } -+ float grad_eps() override { return 20.0f; } - -- bool grad_precise() override { -- return true; -- } -+ bool grad_precise() override { return true; } - }; - - // GGML_OP_LOG - struct test_log : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_log(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 5, 4, 3}) -- : type(type), ne(ne) {} -+ test_log(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 5, 4, 3 }) : type(type), ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -2296,23 +2248,17 @@ struct test_log : public test_case { - } - } - -- bool grad_precise() override { -- return true; -- } -+ bool grad_precise() override { return true; } - }; - - // GGML_OP_SIN - struct test_sin : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_sin(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 2, 2, 2}) -- : type(type), ne(ne) {} -+ test_sin(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 2, 2, 2 }) : type(type), ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -2327,35 +2273,25 @@ struct test_sin : public test_case { - - void initialize_tensors(ggml_context * ctx) override { - for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { -- init_tensor_uniform(t, -6.5f, 6.5f); // Covers interval [-2*pi, 2*pi]. -+ init_tensor_uniform(t, -6.5f, 6.5f); // Covers interval [-2*pi, 2*pi]. - } - } - -- double max_maa_err() override { -- return 1e-3; -- } -+ double max_maa_err() override { return 1e-3; } - -- float grad_eps() override { -- return 0.2f; -- } -+ float grad_eps() override { return 0.2f; } - -- bool grad_precise() override { -- return true; -- } -+ bool grad_precise() override { return true; } - }; - - // GGML_OP_COS - struct test_cos : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_cos(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 2, 2, 2}) -- : type(type), ne(ne) {} -+ test_cos(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 2, 2, 2 }) : type(type), ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -2370,38 +2306,32 @@ struct test_cos : public test_case { - - void initialize_tensors(ggml_context * ctx) override { - for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { -- init_tensor_uniform(t, -6.5f, 6.5f); // Covers interval [-2*pi, 2*pi]. -+ init_tensor_uniform(t, -6.5f, 6.5f); // Covers interval [-2*pi, 2*pi]. - } - } - -- double max_maa_err() override { -- return 1e-3; -- } -+ double max_maa_err() override { return 1e-3; } - -- float grad_eps() override { -- return 0.2f; -- } -+ float grad_eps() override { return 0.2f; } - -- bool grad_precise() override { -- return true; -- } -+ bool grad_precise() override { return true; } - }; - - // GGML_OP_CLAMP - struct test_clamp : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- float min; -- float max; -+ float min; -+ float max; - -- std::string vars() override { -- return VARS_TO_STR4(type, ne, min, max); -- } -+ std::string vars() override { return VARS_TO_STR4(type, ne, min, max); } - -- test_clamp(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 5, 4, 3}, -- float min = -0.5f, float max = 0.5f) -- : type(type), ne(ne), min(min), max(max) {} -+ test_clamp(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 5, 4, 3 }, float min = -0.5f, -+ float max = 0.5f) : -+ type(type), -+ ne(ne), -+ min(min), -+ max(max) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -2413,29 +2343,23 @@ struct test_clamp : public test_case { - return out; - } - -- float grad_eps() override { -- return 1e-2f; -- } -+ float grad_eps() override { return 1e-2f; } - -- std::vector grad_expect() override { -- return {0.0f, 1.0f}; -- } -+ std::vector grad_expect() override { return { 0.0f, 1.0f }; } - }; - - // GGML_OP_DIAG_MASK_INF - struct test_diag_mask_inf : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- const int n_past; -+ const int n_past; - -- std::string vars() override { -- return VARS_TO_STR3(type, ne, n_past); -- } -+ std::string vars() override { return VARS_TO_STR3(type, ne, n_past); } - -- test_diag_mask_inf(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 10, 3, 2}, -- int n_past = 5) -- : type(type), ne(ne), n_past(n_past) {} -+ test_diag_mask_inf(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 10, 3, 2 }, int n_past = 5) : -+ type(type), -+ ne(ne), -+ n_past(n_past) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -2451,30 +2375,27 @@ struct test_diag_mask_inf : public test_case { - - // GGML_OP_SOFT_MAX - struct test_soft_max : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- const bool mask; -- const ggml_type m_prec; -- const float scale; -- const float max_bias; -+ const bool mask; -+ const ggml_type m_prec; -+ const float scale; -+ const float max_bias; - -- std::string vars() override { -- return VARS_TO_STR6(type, ne, mask, m_prec, scale, max_bias); -- } -+ std::string vars() override { return VARS_TO_STR6(type, ne, mask, m_prec, scale, max_bias); } - - // the 1024 test with bias occasionally fails: - // SOFT_MAX(type=f32,ne=[1024,16,1,1],mask=1,scale=1.000000,max_bias=8.000000): [SOFT_MAX] NMSE = 0.000000103 > 0.000000100 FAIL -- virtual double max_nmse_err() override { -- return 1e-6; -- } -+ virtual double max_nmse_err() override { return 1e-6; } - -- test_soft_max(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 5, 4, 3}, -- bool mask = false, -- ggml_type m_prec = GGML_TYPE_F32, -- float scale = 1.0f, -- float max_bias = 0.0f) -- : type(type), ne(ne), mask(mask), m_prec(m_prec), scale(scale), max_bias(max_bias) {} -+ test_soft_max(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 5, 4, 3 }, bool mask = false, -+ ggml_type m_prec = GGML_TYPE_F32, float scale = 1.0f, float max_bias = 0.0f) : -+ type(type), -+ ne(ne), -+ mask(mask), -+ m_prec(m_prec), -+ scale(scale), -+ max_bias(max_bias) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -2493,27 +2414,24 @@ struct test_soft_max : public test_case { - return out; - } - -- bool grad_precise() override { -- return true; -- } -+ bool grad_precise() override { return true; } - }; - - // GGML_OP_SOFT_MAX_BACK - struct test_soft_max_back : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- const float scale; -- const float max_bias; -+ const float scale; -+ const float max_bias; - -- std::string vars() override { -- return VARS_TO_STR4(type, ne, scale, max_bias); -- } -+ std::string vars() override { return VARS_TO_STR4(type, ne, scale, max_bias); } - -- test_soft_max_back(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 5, 4, 3}, -- float scale = 1.0f, -- float max_bias = 0.0f) -- : type(type), ne(ne), scale(scale), max_bias(max_bias) {} -+ test_soft_max_back(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 5, 4, 3 }, float scale = 1.0f, -+ float max_bias = 0.0f) : -+ type(type), -+ ne(ne), -+ scale(scale), -+ max_bias(max_bias) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -2531,33 +2449,45 @@ struct test_soft_max_back : public test_case { - - // GGML_OP_ROPE + GGML_OP_ROPE_BACK - struct test_rope : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne_a; -- int n_dims; -- int mode; -- int n_ctx; // used to generate positions -- float fs; // freq_scale -- float ef; // ext_factor -- float af; // attn_factor -- bool ff; -- int v; // view (1 : non-contiguous a) -- bool forward; -+ int n_dims; -+ int mode; -+ int n_ctx; // used to generate positions -+ float fs; // freq_scale -+ float ef; // ext_factor -+ float af; // attn_factor -+ bool ff; -+ int v; // view (1 : non-contiguous a) -+ bool forward; - - std::string vars() override { - // forward can be inferred from the op, does not need to be printed - return VARS_TO_STR10(type, ne_a, n_dims, mode, n_ctx, fs, ef, af, ff, v); - } - -- test_rope(ggml_type type = GGML_TYPE_F32, -- std::array ne_a = {10, 5, 3, 1}, -- int n_dims = 10, int mode = 0, int n_ctx = 512, float fs = 1.0f, -- float ef = 0.0f, float af = 0.0f, bool ff = false, int v = 0, bool forward = true) -- : type(type), ne_a(ne_a), n_dims(n_dims), mode(mode), n_ctx(n_ctx), fs(fs), ef(ef), af(af), ff(ff), v(v), forward(forward) {} -+ test_rope(ggml_type type = GGML_TYPE_F32, std::array ne_a = { 10, 5, 3, 1 }, int n_dims = 10, -+ int mode = 0, int n_ctx = 512, float fs = 1.0f, float ef = 0.0f, float af = 0.0f, bool ff = false, -+ int v = 0, bool forward = true) : -+ type(type), -+ ne_a(ne_a), -+ n_dims(n_dims), -+ mode(mode), -+ n_ctx(n_ctx), -+ fs(fs), -+ ef(ef), -+ af(af), -+ ff(ff), -+ v(v), -+ forward(forward) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a; - if (v & 1) { -- auto ne = ne_a; ne[0] *= 2; ne[1] *= 4; ne[2] *= 3; -+ auto ne = ne_a; -+ ne[0] *= 2; -+ ne[1] *= 4; -+ ne[2] *= 3; - a = ggml_new_tensor(ctx, type, 4, ne.data()); - if (forward) { - ggml_set_param(a); -@@ -2574,7 +2504,7 @@ struct test_rope : public test_case { - ggml_set_name(a, "a"); - } - -- const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE; -+ const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE; - const bool is_vision = mode == GGML_ROPE_TYPE_VISION; - - ggml_tensor * pos; -@@ -2587,32 +2517,37 @@ struct test_rope : public test_case { - - ggml_tensor * freq = nullptr; - if (ff) { -- freq = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_dims/2); -+ freq = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_dims / 2); - ggml_set_name(freq, "freq"); - } - - ggml_tensor * out; - if (is_mrope) { - if (is_vision) { -- GGML_ASSERT(n_dims/4 > 0); -- int rope_sections[4] = {n_dims/4, n_dims/4, 0, 0}; // Vision-RoPE only use first two dimension for image (x, y) coordinate -+ GGML_ASSERT(n_dims / 4 > 0); -+ int rope_sections[4] = { n_dims / 4, n_dims / 4, 0, -+ 0 }; // Vision-RoPE only use first two dimension for image (x, y) coordinate - if (forward) { -- out = ggml_rope_multi (ctx, a, pos, freq, n_dims/2, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f); -+ out = ggml_rope_multi(ctx, a, pos, freq, n_dims / 2, rope_sections, mode, 0, 10000.0f, fs, ef, af, -+ 1.0f, 1.0f); - } else { -- out = ggml_rope_multi_back(ctx, a, pos, freq, n_dims/2, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f); -+ out = ggml_rope_multi_back(ctx, a, pos, freq, n_dims / 2, rope_sections, mode, 0, 10000.0f, fs, ef, -+ af, 1.0f, 1.0f); - } - } else { -- GGML_ASSERT(n_dims/3 > 0); -- int rope_sections[4] = {n_dims/3, n_dims/3, n_dims/3, 0}; -+ GGML_ASSERT(n_dims / 3 > 0); -+ int rope_sections[4] = { n_dims / 3, n_dims / 3, n_dims / 3, 0 }; - if (forward) { -- out = ggml_rope_multi (ctx, a, pos, freq, n_dims, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f); -+ out = ggml_rope_multi(ctx, a, pos, freq, n_dims, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, -+ 1.0f); - } else { -- out = ggml_rope_multi_back(ctx, a, pos, freq, n_dims, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f); -+ out = ggml_rope_multi_back(ctx, a, pos, freq, n_dims, rope_sections, mode, 0, 10000.0f, fs, ef, af, -+ 1.0f, 1.0f); - } - } - } else { - if (forward) { -- out = ggml_rope_ext (ctx, a, pos, freq, n_dims, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f); -+ out = ggml_rope_ext(ctx, a, pos, freq, n_dims, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f); - } else { - out = ggml_rope_ext_back(ctx, a, pos, freq, n_dims, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f); - } -@@ -2628,14 +2563,14 @@ struct test_rope : public test_case { - for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { - if (t->type == GGML_TYPE_I32) { - // pos -- const int num_pos_ids = (mode & GGML_ROPE_TYPE_MROPE) ? ne_a[2] * 4 : ne_a[2]; -+ const int num_pos_ids = (mode & GGML_ROPE_TYPE_MROPE) ? ne_a[2] * 4 : ne_a[2]; - std::vector data(num_pos_ids); - for (int i = 0; i < num_pos_ids; i++) { - data[i] = rand() % n_ctx; - } - ggml_backend_tensor_set(t, data.data(), 0, num_pos_ids * sizeof(int)); - } else { -- if (t->ne[0] == n_dims/2) { -+ if (t->ne[0] == n_dims / 2) { - // frequency factors in the range [0.9f, 1.1f] - init_tensor_uniform(t, 0.9f, 1.1f); - } else { -@@ -2645,41 +2580,40 @@ struct test_rope : public test_case { - } - } - -- double max_maa_err() override { -- return 1e-3; -- } -+ double max_maa_err() override { return 1e-3; } - -- bool grad_precise() override { -- return true; -- } -+ bool grad_precise() override { return true; } - }; - - // GGML_OP_POOL2D - struct test_pool2d : public test_case { -- enum ggml_op_pool pool_type; -- const ggml_type type_input; -+ enum ggml_op_pool pool_type; -+ const ggml_type type_input; - const std::array ne_input; - // kernel size -- const int k0; -- const int k1; -+ const int k0; -+ const int k1; - // stride -- const int s0; -- const int s1; -+ const int s0; -+ const int s1; - // padding -- const int p0; -- const int p1; -- -- std::string vars() override { -- return VARS_TO_STR9(pool_type, type_input, ne_input, k0, k1, s0, s1, p0, p1); -- } -- -- test_pool2d(ggml_op_pool pool_type = GGML_OP_POOL_AVG, -- ggml_type type_input = GGML_TYPE_F32, -- std::array ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1] -- int k0 = 3, int k1 = 3, -- int s0 = 1, int s1 = 1, -- int p0 = 1, int p1 = 1) -- : pool_type(pool_type), type_input(type_input), ne_input(ne_input), k0(k0), k1(k1), s0(s0), s1(s1), p0(p0), p1(p1) {} -+ const int p0; -+ const int p1; -+ -+ std::string vars() override { return VARS_TO_STR9(pool_type, type_input, ne_input, k0, k1, s0, s1, p0, p1); } -+ -+ test_pool2d(ggml_op_pool pool_type = GGML_OP_POOL_AVG, ggml_type type_input = GGML_TYPE_F32, -+ std::array ne_input = { 10, 10, 3, 1 }, // [input_width, input_height, input_channels, 1] -+ int k0 = 3, int k1 = 3, int s0 = 1, int s1 = 1, int p0 = 1, int p1 = 1) : -+ pool_type(pool_type), -+ type_input(type_input), -+ ne_input(ne_input), -+ k0(k0), -+ k1(k1), -+ s0(s0), -+ s1(s1), -+ p0(p0), -+ p1(p1) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * input = ggml_new_tensor(ctx, type_input, 4, ne_input.data()); -@@ -2698,18 +2632,21 @@ struct test_conv_transpose_1d : public test_case { - const std::array ne_input; - const std::array ne_kernel; - -- const int s0; // stride -- const int p0; // padding -- const int d0; // dilation -+ const int s0; // stride -+ const int p0; // padding -+ const int d0; // dilation - -- std::string vars() override { -- return VARS_TO_STR5(ne_input, ne_kernel, s0, p0, d0); -- } -+ std::string vars() override { return VARS_TO_STR5(ne_input, ne_kernel, s0, p0, d0); } - -- test_conv_transpose_1d(std::array ne_input = {197, 32, 1, 1}, // [input_width, input_height, input_channels, 1] -- std::array ne_kernel = {16, 32, 32, 1}, // [kernel_width, kernel_height, input_channels, 1] -- int s0 = 1, int p0 = 0, int d0 = 1) -- : ne_input(ne_input), ne_kernel(ne_kernel), s0(s0), p0(p0), d0(d0) {} -+ test_conv_transpose_1d( -+ std::array ne_input = { 197, 32, 1, 1 }, // [input_width, input_height, input_channels, 1] -+ std::array ne_kernel = { 16, 32, 32, 1 }, // [kernel_width, kernel_height, input_channels, 1] -+ int s0 = 1, int p0 = 0, int d0 = 1) : -+ ne_input(ne_input), -+ ne_kernel(ne_kernel), -+ s0(s0), -+ p0(p0), -+ d0(d0) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); -@@ -2727,35 +2664,44 @@ struct test_conv_transpose_1d : public test_case { - - // GGML_OP_IM2COL - struct test_im2col : public test_case { -- const ggml_type type_input; -- const ggml_type type_kernel; -- const ggml_type dst_type; -+ const ggml_type type_input; -+ const ggml_type type_kernel; -+ const ggml_type dst_type; - const std::array ne_input; - const std::array ne_kernel; - // stride -- const int s0; -- const int s1; -+ const int s0; -+ const int s1; - // padding -- const int p0; -- const int p1; -+ const int p0; -+ const int p1; - // dilation -- const int d0; -- const int d1; -+ const int d0; -+ const int d1; - // mode -- const bool is_2D; -+ const bool is_2D; - - std::string vars() override { - return VARS_TO_STR12(type_input, type_kernel, dst_type, ne_input, ne_kernel, s0, s1, p0, p1, d0, d1, is_2D); - } - -- test_im2col(ggml_type type_input = GGML_TYPE_F32, ggml_type type_kernel = GGML_TYPE_F16, ggml_type dst_type = GGML_TYPE_F32, -- std::array ne_input = {10, 10, 3, 1}, // [input_width, input_height, input_channels, 1] -- std::array ne_kernel = {3, 3, 3, 1}, // [kernel_width, kernel_height, input_channels, 1] -- int s0 = 1, int s1 = 1, -- int p0 = 1, int p1 = 1, -- int d0 = 1, int d1 = 1, -- bool is_2D = true) -- : type_input(type_input), type_kernel(type_kernel), dst_type(dst_type), ne_input(ne_input), ne_kernel(ne_kernel), s0(s0), s1(s1), p0(p0), p1(p1), d0(d0), d1(d1), is_2D(is_2D) {} -+ test_im2col(ggml_type type_input = GGML_TYPE_F32, ggml_type type_kernel = GGML_TYPE_F16, -+ ggml_type dst_type = GGML_TYPE_F32, -+ std::array ne_input = { 10, 10, 3, 1 }, // [input_width, input_height, input_channels, 1] -+ std::array ne_kernel = { 3, 3, 3, 1 }, // [kernel_width, kernel_height, input_channels, 1] -+ int s0 = 1, int s1 = 1, int p0 = 1, int p1 = 1, int d0 = 1, int d1 = 1, bool is_2D = true) : -+ type_input(type_input), -+ type_kernel(type_kernel), -+ dst_type(dst_type), -+ ne_input(ne_input), -+ ne_kernel(ne_kernel), -+ s0(s0), -+ s1(s1), -+ p0(p0), -+ p1(p1), -+ d0(d0), -+ d1(d1), -+ is_2D(is_2D) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * input = ggml_new_tensor(ctx, type_input, 4, ne_input.data()); -@@ -2776,19 +2722,22 @@ struct test_im2col : public test_case { - struct test_conv_2d_dw : public test_case { - const std::array ne_input; - const std::array ne_kernel; -- const int stride; -- const int padding; -- const int dilation; -- const bool cwhn; -- -- std::string vars() override { -- return VARS_TO_STR6(ne_input, ne_kernel, stride, padding, dilation, cwhn); -- } -- -- test_conv_2d_dw(std::array ne_input = {64, 64, 16, 1}, -- std::array ne_kernel = {3, 3, 1, 16}, -- int stride = 1, int padding = 0, int dilation = 1, bool cwhn = false) -- : ne_input(ne_input), ne_kernel(ne_kernel), stride(stride), padding(padding), dilation(dilation), cwhn(cwhn) {} -+ const int stride; -+ const int padding; -+ const int dilation; -+ const bool cwhn; -+ -+ std::string vars() override { return VARS_TO_STR6(ne_input, ne_kernel, stride, padding, dilation, cwhn); } -+ -+ test_conv_2d_dw(std::array ne_input = { 64, 64, 16, 1 }, -+ std::array ne_kernel = { 3, 3, 1, 16 }, int stride = 1, int padding = 0, -+ int dilation = 1, bool cwhn = false) : -+ ne_input(ne_input), -+ ne_kernel(ne_kernel), -+ stride(stride), -+ padding(padding), -+ dilation(dilation), -+ cwhn(cwhn) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); -@@ -2800,15 +2749,14 @@ struct test_conv_2d_dw : public test_case { - if (cwhn) { - // change memory layout to channel-most-contiguous (CWHN), - // then permute it back so NE matches the original input -- input = ggml_cont(ctx, ggml_permute(ctx, input, 1, 2, 0, 3)); -- input = ggml_permute(ctx, input, 2, 0, 1, 3); -+ input = ggml_cont(ctx, ggml_permute(ctx, input, 1, 2, 0, 3)); -+ input = ggml_permute(ctx, input, 2, 0, 1, 3); - kernel = ggml_cont(ctx, ggml_permute(ctx, kernel, 2, 3, 1, 0)); - kernel = ggml_permute(ctx, kernel, 3, 2, 0, 1); - } - -- ggml_tensor * out = ggml_conv_2d_dw_direct( -- ctx, kernel, input, -- stride, stride, padding, padding, dilation, dilation); -+ ggml_tensor * out = -+ ggml_conv_2d_dw_direct(ctx, kernel, input, stride, stride, padding, padding, dilation, dilation); - ggml_set_name(out, "out"); - return out; - } -@@ -2816,28 +2764,31 @@ struct test_conv_2d_dw : public test_case { - - // GGML_OP_CONCAT - struct test_concat : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne_a; -- const int64_t ne_b_d; -- const int dim; -- const int v; // view (1 << 0: non-cont a, 1 << 1: non-cont b) -+ const int64_t ne_b_d; -+ const int dim; -+ const int v; // view (1 << 0: non-cont a, 1 << 1: non-cont b) - -- std::string vars() override { -- return VARS_TO_STR5(type, ne_a, ne_b_d, dim, v); -- } -+ std::string vars() override { return VARS_TO_STR5(type, ne_a, ne_b_d, dim, v); } - -- test_concat(ggml_type type = GGML_TYPE_F32, -- std::array ne_a = {10, 5, 5, 5}, -- int64_t ne_b_d = 5, -- int dim = 2, int v = 0) -- : type(type), ne_a(ne_a), ne_b_d(ne_b_d), dim(dim), v(v) {} -+ test_concat(ggml_type type = GGML_TYPE_F32, std::array ne_a = { 10, 5, 5, 5 }, int64_t ne_b_d = 5, -+ int dim = 2, int v = 0) : -+ type(type), -+ ne_a(ne_a), -+ ne_b_d(ne_b_d), -+ dim(dim), -+ v(v) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - auto ne_b = ne_a; - ne_b[dim] = ne_b_d; - ggml_tensor * a; - if (v & 1) { -- auto ne = ne_a; ne[0] *= 2; ne[1] *= 4; ne[2] *= 3; -+ auto ne = ne_a; -+ ne[0] *= 2; -+ ne[1] *= 4; -+ ne[2] *= 3; - a = ggml_new_tensor(ctx, type, 4, ne.data()); - ggml_set_name(a, "a"); - -@@ -2849,7 +2800,10 @@ struct test_concat : public test_case { - } - ggml_tensor * b; - if (v & 2) { -- auto ne = ne_b; ne[0] *= 3; ne[1] *= 2; ne[2] *= 4; -+ auto ne = ne_b; -+ ne[0] *= 3; -+ ne[1] *= 2; -+ ne[2] *= 4; - b = ggml_new_tensor(ctx, type, 4, ne.data()); - ggml_set_name(b, "b"); - -@@ -2869,18 +2823,17 @@ struct test_concat : public test_case { - - // GGML_OP_ARGSORT - struct test_argsort : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- ggml_sort_order order; -+ ggml_sort_order order; - -- std::string vars() override { -- return VARS_TO_STR3(type, ne, order); -- } -+ std::string vars() override { return VARS_TO_STR3(type, ne, order); } - -- test_argsort(ggml_type type = GGML_TYPE_F32, -- std::array ne = {16, 10, 10, 10}, -- ggml_sort_order order = GGML_SORT_ORDER_ASC) -- : type(type), ne(ne), order(order) {} -+ test_argsort(ggml_type type = GGML_TYPE_F32, std::array ne = { 16, 10, 10, 10 }, -+ ggml_sort_order order = GGML_SORT_ORDER_ASC) : -+ type(type), -+ ne(ne), -+ order(order) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -2893,7 +2846,7 @@ struct test_argsort : public test_case { - } - - void initialize_tensors(ggml_context * ctx) override { -- std::random_device rd; -+ std::random_device rd; - std::default_random_engine rng(rd()); - for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { - if (t->type == GGML_TYPE_I32) { -@@ -2903,7 +2856,7 @@ struct test_argsort : public test_case { - data[i] = rand(); - } - std::shuffle(data.begin(), data.end(), rng); -- ggml_backend_tensor_set(t, data.data(), 0, ne[0]*ne[1]*ne[2]*ne[3] * sizeof(int)); -+ ggml_backend_tensor_set(t, data.data(), 0, ne[0] * ne[1] * ne[2] * ne[3] * sizeof(int)); - } else if (t->type == GGML_TYPE_F32) { - // initialize with unique values to avoid ties - for (int64_t r = 0; r < ggml_nrows(t); r++) { -@@ -2923,16 +2876,12 @@ struct test_argsort : public test_case { - - // GGML_OP_SUM - struct test_sum : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_sum(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 5, 4, 3}) -- : type(type), ne(ne) {} -+ test_sum(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 5, 4, 3 }) : type(type), ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -2945,23 +2894,17 @@ struct test_sum : public test_case { - return out; - } - -- float grad_eps() override { -- return 0.1f * sqrtf(ne[0]*ne[1]*ne[2]*ne[3]); -- } -+ float grad_eps() override { return 0.1f * sqrtf(ne[0] * ne[1] * ne[2] * ne[3]); } - }; - - // GGML_OP_SUM_ROWS - struct test_sum_rows : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_sum_rows(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 5, 4, 3}) -- : type(type), ne(ne) {} -+ test_sum_rows(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 5, 4, 3 }) : type(type), ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -2977,16 +2920,12 @@ struct test_sum_rows : public test_case { - - // GGML_OP_MEAN - struct test_mean : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_mean(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 5, 4, 3}) -- : type(type), ne(ne) {} -+ test_mean(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 5, 4, 3 }) : type(type), ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -2999,27 +2938,26 @@ struct test_mean : public test_case { - return out; - } - -- float grad_eps() override { -- return 0.1f * ne[0]*ne[1]*ne[2]*ne[3]; -- } -+ float grad_eps() override { return 0.1f * ne[0] * ne[1] * ne[2] * ne[3]; } - }; - - // GGML_OP_UPSCALE - struct test_upscale : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- const int32_t scale_factor; -- const bool transpose; -- const ggml_scale_mode mode; -+ const int32_t scale_factor; -+ const bool transpose; -+ const ggml_scale_mode mode; - -- std::string vars() override { -- return VARS_TO_STR5(type, ne, scale_factor, mode, transpose); -- } -+ std::string vars() override { return VARS_TO_STR5(type, ne, scale_factor, mode, transpose); } - -- test_upscale(ggml_type type = GGML_TYPE_F32, -- std::array ne = {512, 512, 3, 1}, -- int32_t scale_factor = 2, ggml_scale_mode mode = GGML_SCALE_MODE_NEAREST, bool transpose = false) -- : type(type), ne(ne), scale_factor(scale_factor), transpose(transpose), mode(mode) {} -+ test_upscale(ggml_type type = GGML_TYPE_F32, std::array ne = { 512, 512, 3, 1 }, -+ int32_t scale_factor = 2, ggml_scale_mode mode = GGML_SCALE_MODE_NEAREST, bool transpose = false) : -+ type(type), -+ ne(ne), -+ scale_factor(scale_factor), -+ transpose(transpose), -+ mode(mode) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -3039,26 +2977,25 @@ struct test_upscale : public test_case { - - // GGML_OP_UPSCALE (ext) - struct test_upscale_ext : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - const std::array ne_tgt; -- const ggml_scale_mode mode = GGML_SCALE_MODE_NEAREST; -+ const ggml_scale_mode mode = GGML_SCALE_MODE_NEAREST; - -- std::string vars() override { -- return VARS_TO_STR4(type, ne, ne_tgt, mode); -- } -+ std::string vars() override { return VARS_TO_STR4(type, ne, ne_tgt, mode); } - -- test_upscale_ext(ggml_type type = GGML_TYPE_F32, -- std::array ne = {2, 5, 7, 11}, -- std::array ne_tgt = {5, 7, 11, 13}, -- ggml_scale_mode mode = GGML_SCALE_MODE_NEAREST) -- : type(type), ne(ne), ne_tgt(ne_tgt), mode(mode) {} -+ test_upscale_ext(ggml_type type = GGML_TYPE_F32, std::array ne = { 2, 5, 7, 11 }, -+ std::array ne_tgt = { 5, 7, 11, 13 }, ggml_scale_mode mode = GGML_SCALE_MODE_NEAREST) : -+ type(type), -+ ne(ne), -+ ne_tgt(ne_tgt), -+ mode(mode) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); - ggml_set_name(a, "a"); - -- ggml_tensor * out = ggml_upscale_ext(ctx, a, ne_tgt[0], ne_tgt[1],ne_tgt[2], ne_tgt[3], mode); -+ ggml_tensor * out = ggml_upscale_ext(ctx, a, ne_tgt[0], ne_tgt[1], ne_tgt[2], ne_tgt[3], mode); - ggml_set_name(out, "out"); - - return out; -@@ -3067,20 +3004,19 @@ struct test_upscale_ext : public test_case { - - // GGML_OP_GROUP_NORM - struct test_group_norm : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- const int32_t num_groups; -- const float eps; -+ const int32_t num_groups; -+ const float eps; - -- std::string vars() override { -- return VARS_TO_STR4(type, ne, num_groups, eps); -- } -+ std::string vars() override { return VARS_TO_STR4(type, ne, num_groups, eps); } - -- test_group_norm(ggml_type type = GGML_TYPE_F32, -- std::array ne = {64, 64, 320, 1}, -- int32_t num_groups = 32, -- float eps = 1e-6f) -- : type(type), ne(ne), num_groups(num_groups), eps(eps) {} -+ test_group_norm(ggml_type type = GGML_TYPE_F32, std::array ne = { 64, 64, 320, 1 }, -+ int32_t num_groups = 32, float eps = 1e-6f) : -+ type(type), -+ ne(ne), -+ num_groups(num_groups), -+ eps(eps) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -3095,18 +3031,16 @@ struct test_group_norm : public test_case { - - // GGML_OP_L2_NORM - struct test_l2_norm : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; -- const float eps; -+ const float eps; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_l2_norm(ggml_type type = GGML_TYPE_F32, -- std::array ne = {64, 64, 320, 1}, -- float eps = 1e-12f) -- : type(type), ne(ne), eps(eps) {} -+ test_l2_norm(ggml_type type = GGML_TYPE_F32, std::array ne = { 64, 64, 320, 1 }, float eps = 1e-12f) : -+ type(type), -+ ne(ne), -+ eps(eps) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -3121,18 +3055,17 @@ struct test_l2_norm : public test_case { - - // GGML_OP_ACC - struct test_acc : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne_a; - const std::array ne_b; - -- std::string vars() override { -- return VARS_TO_STR3(type, ne_a, ne_b); -- } -+ std::string vars() override { return VARS_TO_STR3(type, ne_a, ne_b); } - -- test_acc(ggml_type type = GGML_TYPE_F32, -- std::array ne_a = {256, 17, 1, 1}, -- std::array ne_b = {256, 16, 1, 1}) -- : type(type), ne_a(ne_a), ne_b(ne_b) {} -+ test_acc(ggml_type type = GGML_TYPE_F32, std::array ne_a = { 256, 17, 1, 1 }, -+ std::array ne_b = { 256, 16, 1, 1 }) : -+ type(type), -+ ne_a(ne_a), -+ ne_b(ne_b) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data()); -@@ -3152,19 +3085,19 @@ struct test_acc : public test_case { - - // GGML_OP_PAD - struct test_pad : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne_a; -- const int pad_0; -- const int pad_1; -+ const int pad_0; -+ const int pad_1; - -- std::string vars() override { -- return VARS_TO_STR4(type, ne_a, pad_0, pad_1); -- } -+ std::string vars() override { return VARS_TO_STR4(type, ne_a, pad_0, pad_1); } - -- test_pad(ggml_type type = GGML_TYPE_F32, -- std::array ne_a = {512, 512, 1, 1}, -- int pad_0 = 1, int pad_1 = 1) -- : type(type), ne_a(ne_a), pad_0(pad_0), pad_1(pad_1) {} -+ test_pad(ggml_type type = GGML_TYPE_F32, std::array ne_a = { 512, 512, 1, 1 }, int pad_0 = 1, -+ int pad_1 = 1) : -+ type(type), -+ ne_a(ne_a), -+ pad_0(pad_0), -+ pad_1(pad_1) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data()); -@@ -3179,19 +3112,19 @@ struct test_pad : public test_case { - - // GGML_OP_PAD_REFLECT_1D - struct test_pad_reflect_1d : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne_a; -- const int pad_0; -- const int pad_1; -+ const int pad_0; -+ const int pad_1; - -- std::string vars() override { -- return VARS_TO_STR4(type, ne_a, pad_0, pad_1); -- } -+ std::string vars() override { return VARS_TO_STR4(type, ne_a, pad_0, pad_1); } - -- test_pad_reflect_1d(ggml_type type = GGML_TYPE_F32, -- std::array ne_a = {512, 34, 2, 1}, -- int pad_0 = 10, int pad_1 = 9) -- : type(type), ne_a(ne_a), pad_0(pad_0), pad_1(pad_1) {} -+ test_pad_reflect_1d(ggml_type type = GGML_TYPE_F32, std::array ne_a = { 512, 34, 2, 1 }, int pad_0 = 10, -+ int pad_1 = 9) : -+ type(type), -+ ne_a(ne_a), -+ pad_0(pad_0), -+ pad_1(pad_1) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 2, ne_a.data()); -@@ -3207,17 +3140,17 @@ struct test_pad_reflect_1d : public test_case { - // GGML_OP_ARANGE - struct test_arange : public test_case { - const ggml_type type; -- const float start; -- const float stop; -- const float step; -+ const float start; -+ const float stop; -+ const float step; - -- std::string vars() override { -- return VARS_TO_STR4(type, start, stop, step); -- } -+ std::string vars() override { return VARS_TO_STR4(type, start, stop, step); } - -- test_arange(ggml_type type = GGML_TYPE_F32, -- float start = 0.f, float stop = 10.f, float step = 1.f) -- : type(type), start(start), stop(stop), step(step) {} -+ test_arange(ggml_type type = GGML_TYPE_F32, float start = 0.f, float stop = 10.f, float step = 1.f) : -+ type(type), -+ start(start), -+ stop(stop), -+ step(step) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * out = ggml_arange(ctx, start, stop, step); -@@ -3229,19 +3162,19 @@ struct test_arange : public test_case { - - // GGML_OP_TIMESTEP_EMBEDDING - struct test_timestep_embedding : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne_a; -- const int dim; -- const int max_period; -+ const int dim; -+ const int max_period; - -- std::string vars() override { -- return VARS_TO_STR4(type, ne_a, dim, max_period); -- } -+ std::string vars() override { return VARS_TO_STR4(type, ne_a, dim, max_period); } - -- test_timestep_embedding(ggml_type type = GGML_TYPE_F32, -- std::array ne_a = {2, 1, 1, 1}, -- int dim = 320, int max_period=10000) -- : type(type), ne_a(ne_a), dim(dim), max_period(max_period) {} -+ test_timestep_embedding(ggml_type type = GGML_TYPE_F32, std::array ne_a = { 2, 1, 1, 1 }, int dim = 320, -+ int max_period = 10000) : -+ type(type), -+ ne_a(ne_a), -+ dim(dim), -+ max_period(max_period) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data()); -@@ -3256,18 +3189,17 @@ struct test_timestep_embedding : public test_case { - - // GGML_OP_LEAKY_RELU - struct test_leaky_relu : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne_a; -- const float negative_slope; -+ const float negative_slope; - -- std::string vars() override { -- return VARS_TO_STR3(type, ne_a, negative_slope); -- } -+ std::string vars() override { return VARS_TO_STR3(type, ne_a, negative_slope); } - -- test_leaky_relu(ggml_type type = GGML_TYPE_F32, -- std::array ne_a = {10, 5, 4, 3}, -- float negative_slope = 0.1f) -- : type(type), ne_a(ne_a), negative_slope(negative_slope) {} -+ test_leaky_relu(ggml_type type = GGML_TYPE_F32, std::array ne_a = { 10, 5, 4, 3 }, -+ float negative_slope = 0.1f) : -+ type(type), -+ ne_a(ne_a), -+ negative_slope(negative_slope) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data()); -@@ -3282,66 +3214,77 @@ struct test_leaky_relu : public test_case { - - // GGML_OP_FLASH_ATTN_EXT - struct test_flash_attn_ext : public test_case { -- const int64_t hsk; // K head size -- const int64_t hsv; // V head size -- const int64_t nh; // num heads -- const int64_t nr; // repeat in Q, tests for grouped-query attention -- const int64_t kv; // kv size -- const int64_t nb; // batch size -+ const int64_t hsk; // K head size -+ const int64_t hsv; // V head size -+ const int64_t nh; // num heads -+ const int64_t nr; // repeat in Q, tests for grouped-query attention -+ const int64_t kv; // kv size -+ const int64_t nb; // batch size - -- const bool mask; // use mask -+ const bool mask; // use mask - -- const float max_bias; // ALiBi -- const float logit_softcap; // Gemma 2 -+ const float max_bias; // ALiBi -+ const float logit_softcap; // Gemma 2 - -- const ggml_prec prec; -- const ggml_type type_KV; -+ const ggml_prec prec; -+ const ggml_type type_KV; - std::array permute; - - std::string vars() override { - return VARS_TO_STR12(hsk, hsv, nh, nr, kv, nb, mask, max_bias, logit_softcap, prec, type_KV, permute); - } - -- double max_nmse_err() override { -- return 5e-4; -- } -+ double max_nmse_err() override { return 5e-4; } - - uint64_t op_flops(ggml_tensor * t) override { - GGML_UNUSED(t); - // Just counting matmul costs: - // Q*K^T is nb x hsk x kv, P*V is nb x kv x hsv, per head -- return 2 * nh*nr * nb * (hsk + hsv) * kv; -- } -- -- test_flash_attn_ext(int64_t hsk = 128, int64_t hsv = 128, int64_t nh = 32, int64_t nr = 1, int64_t kv = 96, int64_t nb = 8, -- bool mask = true, float max_bias = 0.0f, float logit_softcap = 0.0f, ggml_prec prec = GGML_PREC_F32, -- ggml_type type_KV = GGML_TYPE_F16, std::array permute = {0, 1, 2, 3}) -- : hsk(hsk), hsv(hsv), nh(nh), nr(nr), kv(kv), nb(nb), mask(mask), max_bias(max_bias), logit_softcap(logit_softcap), prec(prec), type_KV(type_KV), permute(permute) {} -+ return 2 * nh * nr * nb * (hsk + hsv) * kv; -+ } -+ -+ test_flash_attn_ext(int64_t hsk = 128, int64_t hsv = 128, int64_t nh = 32, int64_t nr = 1, int64_t kv = 96, -+ int64_t nb = 8, bool mask = true, float max_bias = 0.0f, float logit_softcap = 0.0f, -+ ggml_prec prec = GGML_PREC_F32, ggml_type type_KV = GGML_TYPE_F16, -+ std::array permute = { 0, 1, 2, 3 }) : -+ hsk(hsk), -+ hsv(hsv), -+ nh(nh), -+ nr(nr), -+ kv(kv), -+ nb(nb), -+ mask(mask), -+ max_bias(max_bias), -+ logit_softcap(logit_softcap), -+ prec(prec), -+ type_KV(type_KV), -+ permute(permute) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - const int64_t hsk_padded = GGML_PAD(hsk, ggml_blck_size(type_KV)); - const int64_t hsv_padded = GGML_PAD(hsv, ggml_blck_size(type_KV)); - -- auto const &create_permuted = [&](ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) -> ggml_tensor * { -- int64_t ne[4] = {ne0, ne1, ne2, ne3}; -+ const auto & create_permuted = [&](ggml_type type, int64_t ne0, int64_t ne1, int64_t ne2, -+ int64_t ne3) -> ggml_tensor * { -+ int64_t ne[4] = { ne0, ne1, ne2, ne3 }; - int64_t ne_perm[4]; - for (int i = 0; i < 4; ++i) { - ne_perm[permute[i]] = ne[i]; - } - ggml_tensor * t = ggml_new_tensor_4d(ctx, type, ne_perm[0], ne_perm[1], ne_perm[2], ne_perm[3]); -- if (permute != std::array{0, 1, 2, 3}) { -+ if (permute != std::array{ 0, 1, 2, 3 }) { - t = ggml_permute(ctx, t, permute[0], permute[1], permute[2], permute[3]); - } - return t; - }; - -- ggml_tensor * q = create_permuted(GGML_TYPE_F32, hsk_padded, nb, nh*nr, 1); -+ ggml_tensor * q = create_permuted(GGML_TYPE_F32, hsk_padded, nb, nh * nr, 1); - ggml_set_name(q, "q"); - -- ggml_tensor * k = create_permuted(type_KV, hsk_padded, kv, nh, 1); -+ ggml_tensor * k = create_permuted(type_KV, hsk_padded, kv, nh, 1); - ggml_set_name(k, "k"); - -- ggml_tensor * v = create_permuted(type_KV, hsv_padded, kv, nh, 1); -+ ggml_tensor * v = create_permuted(type_KV, hsv_padded, kv, nh, 1); - ggml_set_name(v, "v"); - - ggml_tensor * m = nullptr; -@@ -3350,30 +3293,26 @@ struct test_flash_attn_ext : public test_case { - ggml_set_name(m, "m"); - } - -- ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, m, 1.0f/sqrtf(hsk), max_bias, logit_softcap); -+ ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, m, 1.0f / sqrtf(hsk), max_bias, logit_softcap); - ggml_flash_attn_ext_set_prec(out, prec); - ggml_set_name(out, "out"); - - return out; - } - -- bool grad_precise() override { -- return true; -- } -+ bool grad_precise() override { return true; } - }; - - // GGML_OP_CROSS_ENTROPY_LOSS - struct test_cross_entropy_loss : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_cross_entropy_loss(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 5, 4, 3}) -- : type(type), ne(ne) {} -+ test_cross_entropy_loss(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 5, 4, 3 }) : -+ type(type), -+ ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * logits = ggml_new_tensor(ctx, type, 4, ne.data()); -@@ -3401,27 +3340,21 @@ struct test_cross_entropy_loss : public test_case { - } - } - -- float grad_eps() override { -- return 1.0f; -- } -+ float grad_eps() override { return 1.0f; } - -- bool grad_precise() override { -- return true; -- } -+ bool grad_precise() override { return true; } - }; - - // GGML_OP_CROSS_ENTROPY_LOSS_BACK - struct test_cross_entropy_loss_back : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_cross_entropy_loss_back(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 5, 4, 3}) -- : type(type), ne(ne) {} -+ test_cross_entropy_loss_back(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 5, 4, 3 }) : -+ type(type), -+ ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * grad = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); -@@ -3446,20 +3379,18 @@ struct test_cross_entropy_loss_back : public test_case { - - // GGML_OP_OPT_STEP_ADAMW - struct test_opt_step_adamw : public test_case { -- const ggml_type type; -+ const ggml_type type; - const std::array ne; - -- std::string vars() override { -- return VARS_TO_STR2(type, ne); -- } -+ std::string vars() override { return VARS_TO_STR2(type, ne); } - -- test_opt_step_adamw(ggml_type type = GGML_TYPE_F32, -- std::array ne = {10, 5, 4, 3}) -- : type(type), ne(ne) {} -+ test_opt_step_adamw(ggml_type type = GGML_TYPE_F32, std::array ne = { 10, 5, 4, 3 }) : -+ type(type), -+ ne(ne) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]); -- ggml_set_param(a); // Despite tensor a having gradients the output tensor will not. -+ ggml_set_param(a); // Despite tensor a having gradients the output tensor will not. - ggml_set_name(a, "a"); - - ggml_tensor * grad = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]); -@@ -3482,13 +3413,11 @@ struct test_opt_step_adamw : public test_case { - - void initialize_tensors(ggml_context * ctx) override { - for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { -- init_tensor_uniform(t, 0.0f, 1.0f); // grad_v and adamw_params need non-negative values. -+ init_tensor_uniform(t, 0.0f, 1.0f); // grad_v and adamw_params need non-negative values. - } - } - -- bool grad_precise() override { -- return true; -- } -+ bool grad_precise() override { return true; } - }; - - enum llm_norm_type { -@@ -3497,30 +3426,30 @@ enum llm_norm_type { - }; - - struct llama_hparams { -- uint32_t n_vocab; -- uint32_t n_embd; -- uint32_t n_head; -- uint32_t n_head_kv; -+ uint32_t n_vocab; -+ uint32_t n_embd; -+ uint32_t n_head; -+ uint32_t n_head_kv; - static constexpr uint32_t n_layer = 1; -- uint32_t n_rot; -- uint32_t n_embd_head; // dimension of values (d_v) -- uint32_t n_ff; -+ uint32_t n_rot; -+ uint32_t n_embd_head; // dimension of values (d_v) -+ uint32_t n_ff; - - float f_norm_eps; - float f_norm_rms_eps; - - // cparams -- static constexpr uint32_t n_ctx = 512; // user-specified context size -+ static constexpr uint32_t n_ctx = 512; // user-specified context size - static constexpr uint32_t n_ctx_orig = n_ctx; - - // batch - int32_t n_tokens; - - // llm_build_context -- static constexpr int32_t n_kv = 32; // size of KV cache to consider (n_kv <= n_ctx -- static constexpr int32_t kv_head = 1; // index of where we store new KV data in the cache -+ static constexpr int32_t n_kv = 32; // size of KV cache to consider (n_kv <= n_ctx -+ static constexpr int32_t kv_head = 1; // index of where we store new KV data in the cache - -- uint32_t n_embd_gqa() const { // dimension of key embeddings across all k-v heads -+ uint32_t n_embd_gqa() const { // dimension of key embeddings across all k-v heads - return n_embd_head * n_head_kv; - } - }; -@@ -3529,21 +3458,19 @@ struct llama_hparams { - struct test_llm : public test_case { - llama_hparams hp; - --protected: -- test_llm(llama_hparams hp) -- : hp(std::move(hp)) { -- } -+ protected: -+ test_llm(llama_hparams hp) : hp(std::move(hp)) {} - --public: -- struct ggml_tensor * llm_build_norm( -- struct ggml_context * ctx, -- struct ggml_tensor * cur, -- struct ggml_tensor * mw, -- struct ggml_tensor * mb, -- llm_norm_type type) { -+ public: -+ struct ggml_tensor * llm_build_norm(struct ggml_context * ctx, struct ggml_tensor * cur, struct ggml_tensor * mw, -+ struct ggml_tensor * mb, llm_norm_type type) { - switch (type) { -- case LLM_NORM: cur = ggml_norm (ctx, cur, hp.f_norm_eps); break; -- case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hp.f_norm_rms_eps); break; -+ case LLM_NORM: -+ cur = ggml_norm(ctx, cur, hp.f_norm_eps); -+ break; -+ case LLM_NORM_RMS: -+ cur = ggml_rms_norm(ctx, cur, hp.f_norm_rms_eps); -+ break; - } - cur = ggml_mul(ctx, cur, mw); - if (mb) { -@@ -3552,42 +3479,30 @@ public: - return cur; - } - -- void llm_build_kv_store( -- struct ggml_context * ctx, -- struct ggml_tensor * k_l, -- struct ggml_tensor * v_l, -- struct ggml_tensor * k_cur, -- struct ggml_tensor * v_cur) { -+ void llm_build_kv_store(struct ggml_context * ctx, struct ggml_tensor * k_l, struct ggml_tensor * v_l, -+ struct ggml_tensor * k_cur, struct ggml_tensor * v_cur) { - // compute the transposed [n_tokens, n_embd] V matrix - struct ggml_tensor * v_cur_t = ggml_transpose(ctx, ggml_reshape_2d(ctx, v_cur, hp.n_embd_gqa(), hp.n_tokens)); - -- struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, k_l, hp.n_tokens*hp.n_embd_gqa(), -- (ggml_row_size(k_l->type, hp.n_embd_gqa()))*hp.kv_head); -+ struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, k_l, hp.n_tokens * hp.n_embd_gqa(), -+ (ggml_row_size(k_l->type, hp.n_embd_gqa())) *hp.kv_head); - -- struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, v_l, hp.n_tokens, hp.n_embd_gqa(), -- ( hp.n_ctx)*ggml_element_size(v_l), -- (hp.kv_head)*ggml_element_size(v_l)); -+ struct ggml_tensor * v_cache_view = -+ ggml_view_2d(ctx, v_l, hp.n_tokens, hp.n_embd_gqa(), (hp.n_ctx) * ggml_element_size(v_l), -+ (hp.kv_head) * ggml_element_size(v_l)); - - // important: storing RoPE-ed version of K in the KV cache! -- ggml_cpy(ctx, k_cur, k_cache_view); -+ ggml_cpy(ctx, k_cur, k_cache_view); - ggml_cpy(ctx, v_cur_t, v_cache_view); - } - -- struct ggml_tensor * llm_build_kqv( -- struct ggml_context * ctx, -- struct ggml_tensor * k_l, -- struct ggml_tensor * v_l, -- struct ggml_tensor * q_cur, -- struct ggml_tensor * kq_mask, -- float kq_scale) { -+ struct ggml_tensor * llm_build_kqv(struct ggml_context * ctx, struct ggml_tensor * k_l, struct ggml_tensor * v_l, -+ struct ggml_tensor * q_cur, struct ggml_tensor * kq_mask, float kq_scale) { - struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3); - - struct ggml_tensor * k = -- ggml_view_3d(ctx, k_l, -- hp.n_embd_head, hp.n_kv, hp.n_head_kv, -- ggml_row_size(k_l->type, hp.n_embd_gqa()), -- ggml_row_size(k_l->type, hp.n_embd_head), -- 0); -+ ggml_view_3d(ctx, k_l, hp.n_embd_head, hp.n_kv, hp.n_head_kv, ggml_row_size(k_l->type, hp.n_embd_gqa()), -+ ggml_row_size(k_l->type, hp.n_embd_head), 0); - - struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q); - -@@ -3595,20 +3510,17 @@ public: - - // split cached v into n_head heads - struct ggml_tensor * v = -- ggml_view_3d(ctx, v_l, -- hp.n_kv, hp.n_embd_head, hp.n_head_kv, -- ggml_element_size(v_l)*hp.n_ctx, -- ggml_element_size(v_l)*hp.n_ctx*hp.n_embd_head, -- 0); -+ ggml_view_3d(ctx, v_l, hp.n_kv, hp.n_embd_head, hp.n_head_kv, ggml_element_size(v_l) * hp.n_ctx, -+ ggml_element_size(v_l) * hp.n_ctx * hp.n_embd_head, 0); - - struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq); - - struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3); - -- struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, hp.n_embd_head*hp.n_head, hp.n_tokens); -+ struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, hp.n_embd_head * hp.n_head, hp.n_tokens); - - struct ggml_tensor * wo = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd); -- cur = ggml_mul_mat(ctx, wo, cur); -+ cur = ggml_mul_mat(ctx, wo, cur); - - return cur; - } -@@ -3631,12 +3543,12 @@ public: - - // Llama - struct test_llama : public test_llm { -- static constexpr float freq_base = 10000.0f; -- static constexpr float freq_scale = 1.0f; -- static constexpr float ext_factor = 0.0f; -+ static constexpr float freq_base = 10000.0f; -+ static constexpr float freq_scale = 1.0f; -+ static constexpr float ext_factor = 0.0f; - static constexpr float attn_factor = 1.0f; -- static constexpr float beta_fast = 32.0f; -- static constexpr float beta_slow = 1.0f; -+ static constexpr float beta_fast = 32.0f; -+ static constexpr float beta_slow = 1.0f; - - std::string op_desc(ggml_tensor * t) override { - GGML_UNUSED(t); -@@ -3648,24 +3560,21 @@ struct test_llama : public test_llm { - return VARS_TO_STR1(n_tokens); - } - -- double max_nmse_err() override { -- return 2e-3; -- } -+ double max_nmse_err() override { return 2e-3; } - -- test_llama(int n_tokens = 1) -- : test_llm({ -- /*n_vocab =*/ 32000, -- /*n_embd =*/ 3200, -- /*n_head =*/ 32, -- /*n_head_kv =*/ 32, -- /*n_rot =*/ 100, -- /*n_embd_head =*/ 100, -- /*n_ff =*/ 8640, -- /*f_norm_eps =*/ 0.f, -- /*f_norm_rms_eps =*/ 1e-5f, -- /*n_tokens =*/ n_tokens, -- }) { -- } -+ test_llama(int n_tokens = 1) : -+ test_llm({ -+ /*n_vocab =*/32000, -+ /*n_embd =*/3200, -+ /*n_head =*/32, -+ /*n_head_kv =*/32, -+ /*n_rot =*/100, -+ /*n_embd_head =*/100, -+ /*n_ff =*/8640, -+ /*f_norm_eps =*/0.f, -+ /*f_norm_rms_eps =*/1e-5f, -+ /*n_tokens =*/n_tokens, -+ }) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - struct ggml_tensor * cur; -@@ -3687,7 +3596,7 @@ struct test_llama : public test_llm { - - // norm - ggml_tensor * attn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd); -- cur = llm_build_norm(ctx, inpL, attn_norm, nullptr, LLM_NORM_RMS); -+ cur = llm_build_norm(ctx, inpL, attn_norm, nullptr, LLM_NORM_RMS); - - // self-attention - { -@@ -3700,37 +3609,33 @@ struct test_llama : public test_llm { - struct ggml_tensor * Kcur = ggml_mul_mat(ctx, wk, cur); - struct ggml_tensor * Vcur = ggml_mul_mat(ctx, wv, cur); - -- Qcur = ggml_rope_ext( -- ctx, ggml_reshape_3d(ctx, Qcur, hp.n_embd_head, hp.n_head, hp.n_tokens), inp_pos, nullptr, -- hp.n_rot, 0, hp.n_ctx_orig, freq_base, freq_scale, -- ext_factor, attn_factor, beta_fast, beta_slow -- ); -+ Qcur = ggml_rope_ext(ctx, ggml_reshape_3d(ctx, Qcur, hp.n_embd_head, hp.n_head, hp.n_tokens), inp_pos, -+ nullptr, hp.n_rot, 0, hp.n_ctx_orig, freq_base, freq_scale, ext_factor, -+ attn_factor, beta_fast, beta_slow); - -- Kcur = ggml_rope_ext( -- ctx, ggml_reshape_3d(ctx, Kcur, hp.n_embd_head, hp.n_head_kv, hp.n_tokens), inp_pos, nullptr, -- hp.n_rot, 0, hp.n_ctx_orig, freq_base, freq_scale, -- ext_factor, attn_factor, beta_fast, beta_slow -- ); -+ Kcur = ggml_rope_ext(ctx, ggml_reshape_3d(ctx, Kcur, hp.n_embd_head, hp.n_head_kv, hp.n_tokens), -+ inp_pos, nullptr, hp.n_rot, 0, hp.n_ctx_orig, freq_base, freq_scale, ext_factor, -+ attn_factor, beta_fast, beta_slow); - - llm_build_kv_store(ctx, k_l, v_l, Kcur, Vcur); - -- cur = llm_build_kqv(ctx, k_l, v_l, Qcur, KQ_mask, 1.0f/sqrtf(float(hp.n_embd_head))); -+ cur = llm_build_kqv(ctx, k_l, v_l, Qcur, KQ_mask, 1.0f / sqrtf(float(hp.n_embd_head))); - } - - struct ggml_tensor * ffn_inp = ggml_add(ctx, cur, inpSA); - - // feed-forward network - ggml_tensor * ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd); -- cur = llm_build_norm(ctx, ffn_inp, ffn_norm, nullptr, LLM_NORM_RMS); -+ cur = llm_build_norm(ctx, ffn_inp, ffn_norm, nullptr, LLM_NORM_RMS); - -- ggml_tensor * ffn_gate = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff); -- ggml_tensor * ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_ff, hp.n_embd); -- ggml_tensor * ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff); -- struct ggml_tensor * tmp = ggml_mul_mat(ctx, ffn_up, cur); -- cur = ggml_mul_mat(ctx, ffn_gate, cur); -- cur = ggml_silu(ctx, cur); -- cur = ggml_mul(ctx, cur, tmp); -- cur = ggml_mul_mat(ctx, ffn_down, cur); -+ ggml_tensor * ffn_gate = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff); -+ ggml_tensor * ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_ff, hp.n_embd); -+ ggml_tensor * ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff); -+ struct ggml_tensor * tmp = ggml_mul_mat(ctx, ffn_up, cur); -+ cur = ggml_mul_mat(ctx, ffn_gate, cur); -+ cur = ggml_silu(ctx, cur); -+ cur = ggml_mul(ctx, cur, tmp); -+ cur = ggml_mul_mat(ctx, ffn_down, cur); - - cur = ggml_add(ctx, cur, ffn_inp); - -@@ -3741,11 +3646,11 @@ struct test_llama : public test_llm { - cur = inpL; - - ggml_tensor * output_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd); -- cur = llm_build_norm(ctx, cur, output_norm, nullptr, LLM_NORM_RMS); -+ cur = llm_build_norm(ctx, cur, output_norm, nullptr, LLM_NORM_RMS); - - // lm_head - ggml_tensor * output = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_vocab); -- cur = ggml_mul_mat(ctx, output, cur); -+ cur = ggml_mul_mat(ctx, output, cur); - - return cur; - } -@@ -3753,12 +3658,12 @@ struct test_llama : public test_llm { - - // Falcon - struct test_falcon : public test_llm { -- static constexpr float freq_base = 10000.0f; -- static constexpr float freq_scale = 1.0f; -- static constexpr float ext_factor = 0.0f; -+ static constexpr float freq_base = 10000.0f; -+ static constexpr float freq_scale = 1.0f; -+ static constexpr float ext_factor = 0.0f; - static constexpr float attn_factor = 1.0f; -- static constexpr float beta_fast = 32.0f; -- static constexpr float beta_slow = 1.0f; -+ static constexpr float beta_fast = 32.0f; -+ static constexpr float beta_slow = 1.0f; - - std::string op_desc(ggml_tensor * t) override { - GGML_UNUSED(t); -@@ -3770,24 +3675,21 @@ struct test_falcon : public test_llm { - return VARS_TO_STR1(n_tokens); - } - -- double max_nmse_err() override { -- return 2e-3; -- } -+ double max_nmse_err() override { return 2e-3; } - -- test_falcon(int n_tokens = 1) -- : test_llm({ -- /*n_vocab =*/ 32000, -- /*n_embd =*/ 3200, -- /*n_head =*/ 50, -- /*n_head_kv =*/ 1, -- /*n_rot =*/ 64, -- /*n_embd_head =*/ 64, -- /*n_ff =*/ 8640, -- /*f_norm_eps =*/ 1e-5f, -- /*f_norm_rms_eps =*/ 0.f, -- /*n_tokens =*/ n_tokens, -- }) { -- } -+ test_falcon(int n_tokens = 1) : -+ test_llm({ -+ /*n_vocab =*/32000, -+ /*n_embd =*/3200, -+ /*n_head =*/50, -+ /*n_head_kv =*/1, -+ /*n_rot =*/64, -+ /*n_embd_head =*/64, -+ /*n_ff =*/8640, -+ /*f_norm_eps =*/1e-5f, -+ /*f_norm_rms_eps =*/0.f, -+ /*n_tokens =*/n_tokens, -+ }) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - struct ggml_tensor * cur; -@@ -3808,37 +3710,38 @@ struct test_falcon : public test_llm { - // norm - ggml_tensor * attn_norm_w = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd); - ggml_tensor * attn_norm_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd); -- ggml_tensor * attn_norm = llm_build_norm(ctx, inpL, attn_norm_w, attn_norm_b, LLM_NORM); -+ ggml_tensor * attn_norm = llm_build_norm(ctx, inpL, attn_norm_w, attn_norm_b, LLM_NORM); - - // self-attention - { - cur = attn_norm; - -- ggml_tensor * wqkv = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd + 2*hp.n_embd_gqa()); -+ ggml_tensor * wqkv = -+ ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_embd + 2 * hp.n_embd_gqa()); - - cur = ggml_mul_mat(ctx, wqkv, cur); - -- struct ggml_tensor * Qcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd, hp.n_tokens, cur->nb[1], 0*sizeof(float)*(hp.n_embd))); -- struct ggml_tensor * Kcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd_gqa(), hp.n_tokens, cur->nb[1], 1*sizeof(float)*(hp.n_embd))); -- struct ggml_tensor * Vcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd_gqa(), hp.n_tokens, cur->nb[1], 1*sizeof(float)*(hp.n_embd + hp.n_embd_gqa()))); -+ struct ggml_tensor * Qcur = ggml_cont( -+ ctx, ggml_view_2d(ctx, cur, hp.n_embd, hp.n_tokens, cur->nb[1], 0 * sizeof(float) * (hp.n_embd))); -+ struct ggml_tensor * Kcur = ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd_gqa(), hp.n_tokens, -+ cur->nb[1], 1 * sizeof(float) * (hp.n_embd))); -+ struct ggml_tensor * Vcur = -+ ggml_cont(ctx, ggml_view_2d(ctx, cur, hp.n_embd_gqa(), hp.n_tokens, cur->nb[1], -+ 1 * sizeof(float) * (hp.n_embd + hp.n_embd_gqa()))); - -- Qcur = ggml_reshape_3d(ctx, Qcur, hp.n_embd_head, hp.n_head, hp.n_tokens); -+ Qcur = ggml_reshape_3d(ctx, Qcur, hp.n_embd_head, hp.n_head, hp.n_tokens); - Kcur = ggml_reshape_3d(ctx, Kcur, hp.n_embd_head, hp.n_head_kv, hp.n_tokens); - - // using mode = 2 for neox mode -- Qcur = ggml_rope_ext( -- ctx, Qcur, inp_pos, nullptr, hp.n_rot, 2, hp.n_ctx_orig, -- freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow -- ); -+ Qcur = ggml_rope_ext(ctx, Qcur, inp_pos, nullptr, hp.n_rot, 2, hp.n_ctx_orig, freq_base, freq_scale, -+ ext_factor, attn_factor, beta_fast, beta_slow); - -- Kcur = ggml_rope_ext( -- ctx, Kcur, inp_pos, nullptr, hp.n_rot, 2, hp.n_ctx_orig, -- freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow -- ); -+ Kcur = ggml_rope_ext(ctx, Kcur, inp_pos, nullptr, hp.n_rot, 2, hp.n_ctx_orig, freq_base, freq_scale, -+ ext_factor, attn_factor, beta_fast, beta_slow); - - llm_build_kv_store(ctx, k_l, v_l, Kcur, Vcur); - -- cur = llm_build_kqv(ctx, k_l, v_l, Qcur, KQ_mask, 1.0f/sqrtf(float(hp.n_embd_head))); -+ cur = llm_build_kqv(ctx, k_l, v_l, Qcur, KQ_mask, 1.0f / sqrtf(float(hp.n_embd_head))); - } - - struct ggml_tensor * ffn_inp = cur; -@@ -3847,10 +3750,10 @@ struct test_falcon : public test_llm { - { - ggml_tensor * ffn_up = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_embd, hp.n_ff); - ggml_tensor * ffn_down = ggml_new_tensor_2d(ctx, GGML_TYPE_Q4_0, hp.n_ff, hp.n_embd); -- cur = attn_norm; -- cur = ggml_mul_mat(ctx, ffn_up, cur); -- cur = ggml_gelu(ctx, cur); -- cur = ggml_mul_mat(ctx, ffn_down, cur); -+ cur = attn_norm; -+ cur = ggml_mul_mat(ctx, ffn_up, cur); -+ cur = ggml_gelu(ctx, cur); -+ cur = ggml_mul_mat(ctx, ffn_down, cur); - } - - cur = ggml_add(ctx, cur, ffn_inp); -@@ -3865,65 +3768,80 @@ struct test_falcon : public test_llm { - - ggml_tensor * output_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd); - ggml_tensor * output_norm_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hp.n_embd); -- cur = llm_build_norm(ctx, cur, output_norm, output_norm_b, LLM_NORM); -+ cur = llm_build_norm(ctx, cur, output_norm, output_norm_b, LLM_NORM); - - // lm_head - ggml_tensor * output = ggml_new_tensor_2d(ctx, GGML_TYPE_Q8_0, hp.n_embd, hp.n_vocab); -- cur = ggml_mul_mat(ctx, output, cur); -+ cur = ggml_mul_mat(ctx, output, cur); - - return cur; - } - }; - -- - // ########################################### - // ## Section 3: GGML Op Test Instantiation ## - // ########################################### - static const ggml_type all_types[] = { -- GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_BF16, -- GGML_TYPE_Q4_0, GGML_TYPE_Q4_1, -- GGML_TYPE_Q5_0, GGML_TYPE_Q5_1, -+ GGML_TYPE_F32, -+ GGML_TYPE_F16, -+ GGML_TYPE_BF16, -+ GGML_TYPE_Q4_0, -+ GGML_TYPE_Q4_1, -+ GGML_TYPE_Q5_0, -+ GGML_TYPE_Q5_1, - GGML_TYPE_Q8_0, -- GGML_TYPE_Q2_K, GGML_TYPE_Q3_K, -- GGML_TYPE_Q4_K, GGML_TYPE_Q5_K, -+ GGML_TYPE_Q2_K, -+ GGML_TYPE_Q3_K, -+ GGML_TYPE_Q4_K, -+ GGML_TYPE_Q5_K, - GGML_TYPE_Q6_K, - // GGML_TYPE_TQ1_0, GGML_TYPE_TQ2_0, // TODO: implement for all backends -- GGML_TYPE_IQ2_XXS, GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S, -- GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M, -- GGML_TYPE_IQ4_NL, GGML_TYPE_IQ3_S, GGML_TYPE_IQ4_XS, -+ GGML_TYPE_IQ2_XXS, -+ GGML_TYPE_IQ2_XS, -+ GGML_TYPE_IQ2_S, -+ GGML_TYPE_IQ3_XXS, -+ GGML_TYPE_IQ1_S, -+ GGML_TYPE_IQ1_M, -+ GGML_TYPE_IQ4_NL, -+ GGML_TYPE_IQ3_S, -+ GGML_TYPE_IQ4_XS, - }; - --static const ggml_type base_types[] = { -- GGML_TYPE_F32, GGML_TYPE_F16, -- GGML_TYPE_Q8_0, // for I8MM tests -- GGML_TYPE_Q4_0, -- GGML_TYPE_Q4_1, // for I8MM tests -- GGML_TYPE_Q4_K, -- GGML_TYPE_IQ2_XXS --}; -+static const ggml_type base_types[] = { GGML_TYPE_F32, GGML_TYPE_F16, -+ GGML_TYPE_Q8_0, // for I8MM tests -+ GGML_TYPE_Q4_0, -+ GGML_TYPE_Q4_1, // for I8MM tests -+ GGML_TYPE_Q4_K, GGML_TYPE_IQ2_XXS }; - - static const ggml_type other_types[] = { - GGML_TYPE_Q4_1, -- GGML_TYPE_Q5_0, GGML_TYPE_Q5_1, -+ GGML_TYPE_Q5_0, -+ GGML_TYPE_Q5_1, - GGML_TYPE_Q8_0, -- GGML_TYPE_Q2_K, GGML_TYPE_Q3_K, -+ GGML_TYPE_Q2_K, -+ GGML_TYPE_Q3_K, - GGML_TYPE_Q5_K, - GGML_TYPE_Q6_K, - // GGML_TYPE_TQ1_0, GGML_TYPE_TQ2_0, // TODO: implement for all backends -- GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S, -- GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M, -- GGML_TYPE_IQ4_NL, GGML_TYPE_IQ3_S, GGML_TYPE_IQ4_XS, -+ GGML_TYPE_IQ2_XS, -+ GGML_TYPE_IQ2_S, -+ GGML_TYPE_IQ3_XXS, -+ GGML_TYPE_IQ1_S, -+ GGML_TYPE_IQ1_M, -+ GGML_TYPE_IQ4_NL, -+ GGML_TYPE_IQ3_S, -+ GGML_TYPE_IQ4_XS, - GGML_TYPE_BF16, - }; - - // Test cases for evaluation: should try to cover edge cases while using small input sizes to keep the runtime low - static std::vector> make_test_cases_eval() { - std::vector> test_cases; -- std::default_random_engine rng(0); -+ std::default_random_engine rng(0); - - // unary ops -- for (ggml_type type : {GGML_TYPE_F16, GGML_TYPE_F32}) { -- for (int v : {0, 1}) { -+ for (ggml_type type : { GGML_TYPE_F16, GGML_TYPE_F32 }) { -+ for (int v : { 0, 1 }) { - for (int op = 0; op < GGML_UNARY_OP_COUNT; op++) { - test_cases.emplace_back(new test_unary((ggml_unary_op) op, type, { 128, 2, 2, 2 }, v)); - test_cases.emplace_back(new test_unary((ggml_unary_op) op, type, { 5, 7, 11, 13 }, v)); -@@ -3933,37 +3851,38 @@ static std::vector> make_test_cases_eval() { - - test_cases.emplace_back(new test_get_rows(GGML_TYPE_F32, 1, 8, 2, 1, false)); - for (ggml_type type : all_types) { -- for (int b : {1, 7}) { -- for (bool v : {false, true}) { -+ for (int b : { 1, 7 }) { -+ for (bool v : { false, true }) { - test_cases.emplace_back(new test_get_rows(type, 256, 5, 4, b, v)); - } - } - } -- for (int b : {1, 7}) { -- for (bool v : {false, true}) { -+ for (int b : { 1, 7 }) { -+ for (bool v : { false, true }) { - test_cases.emplace_back(new test_get_rows(GGML_TYPE_I32, 256, 5, 4, b, v)); - } - } - - test_cases.emplace_back(new test_get_rows_back(GGML_TYPE_F32, 1, 8, 2, 1, false)); - for (ggml_type type : all_types) { -- for (bool v : {false, true}) { -+ for (bool v : { false, true }) { - test_cases.emplace_back(new test_get_rows_back(type, 256, 5, 4, 1, v)); - } - } -- for (bool v : {false, true}) { -+ for (bool v : { false, true }) { - test_cases.emplace_back(new test_get_rows_back(GGML_TYPE_I32, 256, 5, 4, 1, v)); - } - -- for (ggml_type type_input : {GGML_TYPE_F32}) { -- for (ggml_op_pool pool_type : {GGML_OP_POOL_AVG, GGML_OP_POOL_MAX}) { -- for (int k0 : {1, 3}) { -- for (int k1 : {1, 3}) { -- for (int s0 : {1, 2}) { -- for (int s1 : {1, 2}) { -- for (int p0 : {0, 1}) { -- for (int p1 : {0, 1}) { -- test_cases.emplace_back(new test_pool2d(pool_type, type_input, {10, 10, 3, 1}, k0, k1, s0, s1, p0, p1)); -+ for (ggml_type type_input : { GGML_TYPE_F32 }) { -+ for (ggml_op_pool pool_type : { GGML_OP_POOL_AVG, GGML_OP_POOL_MAX }) { -+ for (int k0 : { 1, 3 }) { -+ for (int k1 : { 1, 3 }) { -+ for (int s0 : { 1, 2 }) { -+ for (int s1 : { 1, 2 }) { -+ for (int p0 : { 0, 1 }) { -+ for (int p1 : { 0, 1 }) { -+ test_cases.emplace_back(new test_pool2d(pool_type, type_input, { 10, 10, 3, 1 }, k0, -+ k1, s0, s1, p0, p1)); - } - } - } -@@ -3974,15 +3893,17 @@ static std::vector> make_test_cases_eval() { - } - - // im2col 1D -- test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32, {3000, 128, 1, 1}, {3, 128, 1280, 1}, 1, 0, 1, 0, 1, 0, false)); -- test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {3000, 128, 1, 1}, {3, 128, 1280, 1}, 1, 0, 1, 0, 1, 0, false)); -- test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {3000, 128, 1, 1}, {3, 128, 1280, 1}, 1, 0, 1, 0, 1, 0, false)); -- for (int s0 : {1, 3}) { -- for (int p0 : {0, 3}) { -- for (int d0 : {1, 3}) { -- test_cases.emplace_back(new test_im2col( -- GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32, {20, 2, 2, 1}, {3, 2, 2, 1}, -- s0, 0, p0, 0, d0, 0, false)); -+ test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32, { 3000, 128, 1, 1 }, -+ { 3, 128, 1280, 1 }, 1, 0, 1, 0, 1, 0, false)); -+ test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, { 3000, 128, 1, 1 }, -+ { 3, 128, 1280, 1 }, 1, 0, 1, 0, 1, 0, false)); -+ test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, { 3000, 128, 1, 1 }, -+ { 3, 128, 1280, 1 }, 1, 0, 1, 0, 1, 0, false)); -+ for (int s0 : { 1, 3 }) { -+ for (int p0 : { 0, 3 }) { -+ for (int d0 : { 1, 3 }) { -+ test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32, { 20, 2, 2, 1 }, -+ { 3, 2, 2, 1 }, s0, 0, p0, 0, d0, 0, false)); - } - } - } -@@ -3991,15 +3912,15 @@ static std::vector> make_test_cases_eval() { - test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32)); - test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32)); - test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16)); -- for (int s0 : {1, 3}) { -- for (int s1 : {1, 3}) { -- for (int p0 : {0, 3}) { -- for (int p1 : {0, 3}) { -- for (int d0 : {1, 3}) { -- for (int d1 : {1, 3}) { -- test_cases.emplace_back(new test_im2col( -- GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32, {20, 20, 2, 2}, {3, 3, 2, 2}, -- s0, s1, p0, p1, d0, d1, true)); -+ for (int s0 : { 1, 3 }) { -+ for (int s1 : { 1, 3 }) { -+ for (int p0 : { 0, 3 }) { -+ for (int p1 : { 0, 3 }) { -+ for (int d0 : { 1, 3 }) { -+ for (int d1 : { 1, 3 }) { -+ test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F32, GGML_TYPE_F32, -+ { 20, 20, 2, 2 }, { 3, 3, 2, 2 }, s0, s1, p0, p1, -+ d0, d1, true)); - } - } - } -@@ -4008,14 +3929,22 @@ static std::vector> make_test_cases_eval() { - } - - // extra tests for im2col 2D -- test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 1, 32}, {3, 3, 1, 32}, 1, 1, 1, 1, 1, 1, true)); -- test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 2, 32}, {3, 3, 2, 32}, 1, 1, 1, 1, 1, 1, true)); -- test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 1, 1024}, {3, 3, 1, 1024}, 1, 1, 1, 1, 1, 1, true)); -- test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 2, 1024}, {3, 3, 2, 1024}, 1, 1, 1, 1, 1, 1, true)); -- test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 1, 2048}, {3, 3, 1, 2048}, 1, 1, 1, 1, 1, 1, true)); -- test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 2, 2048}, {3, 3, 2, 2048}, 1, 1, 1, 1, 1, 1, true)); -- test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 1, 2560}, {3, 3, 1, 2560}, 1, 1, 1, 1, 1, 1, true)); -- test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {12, 12, 2, 2560}, {3, 3, 2, 2560}, 1, 1, 1, 1, 1, 1, true)); -+ test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, { 12, 12, 1, 32 }, -+ { 3, 3, 1, 32 }, 1, 1, 1, 1, 1, 1, true)); -+ test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, { 12, 12, 2, 32 }, -+ { 3, 3, 2, 32 }, 1, 1, 1, 1, 1, 1, true)); -+ test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, { 12, 12, 1, 1024 }, -+ { 3, 3, 1, 1024 }, 1, 1, 1, 1, 1, 1, true)); -+ test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, { 12, 12, 2, 1024 }, -+ { 3, 3, 2, 1024 }, 1, 1, 1, 1, 1, 1, true)); -+ test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, { 12, 12, 1, 2048 }, -+ { 3, 3, 1, 2048 }, 1, 1, 1, 1, 1, 1, true)); -+ test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, { 12, 12, 2, 2048 }, -+ { 3, 3, 2, 2048 }, 1, 1, 1, 1, 1, 1, true)); -+ test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, { 12, 12, 1, 2560 }, -+ { 3, 3, 1, 2560 }, 1, 1, 1, 1, 1, 1, true)); -+ test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, { 12, 12, 2, 2560 }, -+ { 3, 3, 2, 2560 }, 1, 1, 1, 1, 1, 1, true)); - - // sycl backend will limit task global_range < MAX_INT - // test cases for 2D im2col with large input W and H (occurs in stable-diffusion) -@@ -4024,65 +3953,65 @@ static std::vector> make_test_cases_eval() { - // test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {1024, 1024, 256, 1}, {3, 3, 256, 1}, 1, 1, 1, 1, 1, 1, true)); - // test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {1024, 1024, 256, 1}, {3, 3, 256, 1}, 1, 1, 1, 1, 1, 1, true)); - -- test_cases.emplace_back(new test_conv_2d_dw({17, 34, 9, 1}, {3, 3, 1, 9}, 1, 0, 1, false)); -- test_cases.emplace_back(new test_conv_2d_dw({17, 34, 9, 1}, {3, 3, 1, 9}, 1, 0, 1, true)); -- test_cases.emplace_back(new test_conv_2d_dw({32, 8, 64, 1}, {3, 3, 1, 64}, 2, 1, 1, false)); -- test_cases.emplace_back(new test_conv_2d_dw({32, 8, 64, 1}, {3, 3, 1, 64}, 2, 1, 1, true)); -+ test_cases.emplace_back(new test_conv_2d_dw({ 17, 34, 9, 1 }, { 3, 3, 1, 9 }, 1, 0, 1, false)); -+ test_cases.emplace_back(new test_conv_2d_dw({ 17, 34, 9, 1 }, { 3, 3, 1, 9 }, 1, 0, 1, true)); -+ test_cases.emplace_back(new test_conv_2d_dw({ 32, 8, 64, 1 }, { 3, 3, 1, 64 }, 2, 1, 1, false)); -+ test_cases.emplace_back(new test_conv_2d_dw({ 32, 8, 64, 1 }, { 3, 3, 1, 64 }, 2, 1, 1, true)); - - test_cases.emplace_back(new test_conv_transpose_1d()); -- test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 3, 0, 1)); -- test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 2, 0, 1)); -- test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 1, 0, 1)); -- test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,2,2,1}, 2, 0, 1)); -- test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,2,2,1}, 1, 0, 1)); -- test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,1,2,1}, 1, 0, 1)); -- test_cases.emplace_back(new test_conv_transpose_1d({2,1,1,1}, {3,1,1,1}, 1, 0, 1)); -- -- test_cases.emplace_back(new test_count_equal(GGML_TYPE_F32, {4, 500, 1, 1})); -- test_cases.emplace_back(new test_count_equal(GGML_TYPE_F32, {4, 5000, 1, 1})); -- -- test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32, 1, 1, 1})); -- test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {100, 10, 1, 1})); -- test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1})); -- test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 12, 1, 1})); -- test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {2000, 10, 1, 1})); -- test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {5438, 3, 1, 1})); -- -- for (int ne3 : {1, 3}) { // CUDA backward pass only supports ne3 == 1 -- test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {1, 1, 1, 1})); -- test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {2, 1, 1, 1})); -- test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {1, 2, 1, 1})); -- test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {1, 1, 2, 1})); -- test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {1, 1, 1, 2})); -- test_cases.emplace_back(new test_repeat(GGML_TYPE_I32, {10, 5, 4, ne3}, {2, 1, 1, 1})); -- test_cases.emplace_back(new test_repeat(GGML_TYPE_I16, {10, 5, 4, ne3}, {1, 1, 1, 2})); -- } -- -- for (bool view : {false, true}) { -- test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {1, 1, 1, 1}, view)); -- test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {2, 1, 1, 1}, view)); -- test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {1, 2, 1, 1}, view)); -- test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {1, 1, 2, 1}, view)); -- test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {1, 1, 1, 2}, view)); -+ test_cases.emplace_back(new test_conv_transpose_1d({ 3, 2, 1, 1 }, { 2, 3, 2, 1 }, 3, 0, 1)); -+ test_cases.emplace_back(new test_conv_transpose_1d({ 3, 2, 1, 1 }, { 2, 3, 2, 1 }, 2, 0, 1)); -+ test_cases.emplace_back(new test_conv_transpose_1d({ 3, 2, 1, 1 }, { 2, 3, 2, 1 }, 1, 0, 1)); -+ test_cases.emplace_back(new test_conv_transpose_1d({ 3, 2, 1, 1 }, { 3, 2, 2, 1 }, 2, 0, 1)); -+ test_cases.emplace_back(new test_conv_transpose_1d({ 3, 2, 1, 1 }, { 3, 2, 2, 1 }, 1, 0, 1)); -+ test_cases.emplace_back(new test_conv_transpose_1d({ 3, 2, 1, 1 }, { 3, 1, 2, 1 }, 1, 0, 1)); -+ test_cases.emplace_back(new test_conv_transpose_1d({ 2, 1, 1, 1 }, { 3, 1, 1, 1 }, 1, 0, 1)); -+ -+ test_cases.emplace_back(new test_count_equal(GGML_TYPE_F32, { 4, 500, 1, 1 })); -+ test_cases.emplace_back(new test_count_equal(GGML_TYPE_F32, { 4, 5000, 1, 1 })); -+ -+ test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, { 32, 1, 1, 1 })); -+ test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, { 100, 10, 1, 1 })); -+ test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, { 1024, 10, 1, 1 })); -+ test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, { 1024, 12, 1, 1 })); -+ test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, { 2000, 10, 1, 1 })); -+ test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, { 5438, 3, 1, 1 })); -+ -+ for (int ne3 : { 1, 3 }) { // CUDA backward pass only supports ne3 == 1 -+ test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, { 10, 5, 4, ne3 }, { 1, 1, 1, 1 })); -+ test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, { 10, 5, 4, ne3 }, { 2, 1, 1, 1 })); -+ test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, { 10, 5, 4, ne3 }, { 1, 2, 1, 1 })); -+ test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, { 10, 5, 4, ne3 }, { 1, 1, 2, 1 })); -+ test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, { 10, 5, 4, ne3 }, { 1, 1, 1, 2 })); -+ test_cases.emplace_back(new test_repeat(GGML_TYPE_I32, { 10, 5, 4, ne3 }, { 2, 1, 1, 1 })); -+ test_cases.emplace_back(new test_repeat(GGML_TYPE_I16, { 10, 5, 4, ne3 }, { 1, 1, 1, 2 })); -+ } -+ -+ for (bool view : { false, true }) { -+ test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, { 8, 6, 4, 2 }, { 1, 1, 1, 1 }, view)); -+ test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, { 8, 6, 4, 2 }, { 2, 1, 1, 1 }, view)); -+ test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, { 8, 6, 4, 2 }, { 1, 2, 1, 1 }, view)); -+ test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, { 8, 6, 4, 2 }, { 1, 1, 2, 1 }, view)); -+ test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, { 8, 6, 4, 2 }, { 1, 1, 1, 2 }, view)); - } - - test_cases.emplace_back(new test_dup(GGML_TYPE_F32)); - test_cases.emplace_back(new test_dup(GGML_TYPE_F16)); - test_cases.emplace_back(new test_dup(GGML_TYPE_I32)); - test_cases.emplace_back(new test_dup(GGML_TYPE_I16)); -- test_cases.emplace_back(new test_dup(GGML_TYPE_F32, {10, 10, 5, 1}, {0, 2, 1, 3})); -- test_cases.emplace_back(new test_dup(GGML_TYPE_F16, {10, 10, 5, 1}, {0, 2, 1, 3})); // dup by rows -- test_cases.emplace_back(new test_dup(GGML_TYPE_F32, {10, 10, 5, 1}, {1, 0, 2, 3})); -- test_cases.emplace_back(new test_dup(GGML_TYPE_F16, {10, 10, 5, 1}, {1, 0, 2, 3})); // dup dst not-contiguous -- test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {0, 2, 1, 3})); -- test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {1, 2, 0, 3})); -+ test_cases.emplace_back(new test_dup(GGML_TYPE_F32, { 10, 10, 5, 1 }, { 0, 2, 1, 3 })); -+ test_cases.emplace_back(new test_dup(GGML_TYPE_F16, { 10, 10, 5, 1 }, { 0, 2, 1, 3 })); // dup by rows -+ test_cases.emplace_back(new test_dup(GGML_TYPE_F32, { 10, 10, 5, 1 }, { 1, 0, 2, 3 })); -+ test_cases.emplace_back(new test_dup(GGML_TYPE_F16, { 10, 10, 5, 1 }, { 1, 0, 2, 3 })); // dup dst not-contiguous -+ test_cases.emplace_back(new test_dup(GGML_TYPE_I16, { 10, 8, 3, 1 }, { 0, 2, 1, 3 })); -+ test_cases.emplace_back(new test_dup(GGML_TYPE_I16, { 10, 8, 3, 1 }, { 1, 2, 0, 3 })); - - for (int dim = 1; dim < GGML_MAX_DIMS; ++dim) { -- test_cases.emplace_back(new test_set(GGML_TYPE_F32, GGML_TYPE_F32, {6, 5, 4, 3}, dim)); -+ test_cases.emplace_back(new test_set(GGML_TYPE_F32, GGML_TYPE_F32, { 6, 5, 4, 3 }, dim)); - } - - for (int dim = 1; dim < GGML_MAX_DIMS; ++dim) { -- test_cases.emplace_back(new test_set(GGML_TYPE_I32, GGML_TYPE_I32, {6, 5, 4, 3}, dim)); -+ test_cases.emplace_back(new test_set(GGML_TYPE_I32, GGML_TYPE_I32, { 6, 5, 4, 3 }, dim)); - } - - // same-type copy -@@ -4090,75 +4019,76 @@ static std::vector> make_test_cases_eval() { - const auto nk = ggml_blck_size(type); - - for (int k = 1; k < 4; ++k) { -- test_cases.emplace_back(new test_cpy(type, type, {k*nk, 2, 3, 4})); -- test_cases.emplace_back(new test_cpy(type, type, {k*nk, 2, 3, 4}, {0, 2, 1, 3})); -- test_cases.emplace_back(new test_cpy(type, type, {k*nk, 2, 3, 4}, {0, 3, 1, 2}, {0, 2, 1, 3})); -+ test_cases.emplace_back(new test_cpy(type, type, { k * nk, 2, 3, 4 })); -+ test_cases.emplace_back(new test_cpy(type, type, { k * nk, 2, 3, 4 }, { 0, 2, 1, 3 })); -+ test_cases.emplace_back(new test_cpy(type, type, { k * nk, 2, 3, 4 }, { 0, 3, 1, 2 }, { 0, 2, 1, 3 })); - } - } - -- for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_BF16, GGML_TYPE_F32}) { -+ for (ggml_type type_src : { GGML_TYPE_F16, GGML_TYPE_BF16, GGML_TYPE_F32 }) { - for (ggml_type type_dst : all_types) { -- test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4})); -- test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {0, 2, 1, 3})); // cpy by rows -+ test_cases.emplace_back(new test_cpy(type_src, type_dst, { 256, 4, 4, 4 })); -+ test_cases.emplace_back(new test_cpy(type_src, type_dst, { 256, 2, 3, 4 }, { 0, 2, 1, 3 })); // cpy by rows - } - } - for (ggml_type type_src : all_types) { -- for (ggml_type type_dst : {GGML_TYPE_F32}) { -- test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4})); -- test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {0, 2, 1, 3})); // cpy by rows -+ for (ggml_type type_dst : { GGML_TYPE_F32 }) { -+ test_cases.emplace_back(new test_cpy(type_src, type_dst, { 256, 4, 4, 4 })); -+ test_cases.emplace_back(new test_cpy(type_src, type_dst, { 256, 2, 3, 4 }, { 0, 2, 1, 3 })); // cpy by rows - } - } -- for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) { -- for (ggml_type type_dst : {GGML_TYPE_F16, GGML_TYPE_F32}) { -- test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {1, 0, 2, 3})); // cpy not-contiguous -+ for (ggml_type type_src : { GGML_TYPE_F16, GGML_TYPE_F32 }) { -+ for (ggml_type type_dst : { GGML_TYPE_F16, GGML_TYPE_F32 }) { -+ test_cases.emplace_back( -+ new test_cpy(type_src, type_dst, { 256, 2, 3, 4 }, { 1, 0, 2, 3 })); // cpy not-contiguous - } - } - - test_cases.emplace_back(new test_cont()); -- test_cases.emplace_back(new test_cont(GGML_TYPE_F32, {2, 1, 1 ,1})); -- test_cases.emplace_back(new test_cont(GGML_TYPE_F32, {2, 1, 3 ,5})); -- test_cases.emplace_back(new test_cont(GGML_TYPE_F32, {2, 3, 5 ,7})); -- test_cases.emplace_back(new test_cont(GGML_TYPE_F16, {2, 1, 1 ,1})); -- test_cases.emplace_back(new test_cont(GGML_TYPE_F16, {2, 1, 3 ,5})); -- test_cases.emplace_back(new test_cont(GGML_TYPE_F16, {2, 3, 5 ,7})); -- test_cases.emplace_back(new test_cont(GGML_TYPE_BF16, {2, 1, 1 ,1})); -- test_cases.emplace_back(new test_cont(GGML_TYPE_BF16, {2, 1, 3 ,5})); -- test_cases.emplace_back(new test_cont(GGML_TYPE_BF16, {2, 3, 5 ,7})); -+ test_cases.emplace_back(new test_cont(GGML_TYPE_F32, { 2, 1, 1, 1 })); -+ test_cases.emplace_back(new test_cont(GGML_TYPE_F32, { 2, 1, 3, 5 })); -+ test_cases.emplace_back(new test_cont(GGML_TYPE_F32, { 2, 3, 5, 7 })); -+ test_cases.emplace_back(new test_cont(GGML_TYPE_F16, { 2, 1, 1, 1 })); -+ test_cases.emplace_back(new test_cont(GGML_TYPE_F16, { 2, 1, 3, 5 })); -+ test_cases.emplace_back(new test_cont(GGML_TYPE_F16, { 2, 3, 5, 7 })); -+ test_cases.emplace_back(new test_cont(GGML_TYPE_BF16, { 2, 1, 1, 1 })); -+ test_cases.emplace_back(new test_cont(GGML_TYPE_BF16, { 2, 1, 3, 5 })); -+ test_cases.emplace_back(new test_cont(GGML_TYPE_BF16, { 2, 3, 5, 7 })); - - auto add_test_bin_bcast = [&](ggml_type type, std::array ne, std::array nr) { -- for (auto op : {ggml_add, ggml_sub, ggml_mul, ggml_div}) { -+ for (auto op : { ggml_add, ggml_sub, ggml_mul, ggml_div }) { - test_cases.emplace_back(new test_bin_bcast(op, type, ne, nr)); - } - }; -- for (ggml_type type : {GGML_TYPE_F16, GGML_TYPE_F32}) { -- add_test_bin_bcast(type, {1, 1, 8, 1}, {1, 1, 1, 1}); -- add_test_bin_bcast(type, {1, 1, 1, 1}, {32, 1, 1, 1}); -- add_test_bin_bcast(type, {1, 1, 320, 320}, {1, 1, 1, 1}); -- add_test_bin_bcast(type, {10, 5, 1, 1}, {1, 1, 1, 1}); -- add_test_bin_bcast(type, {10, 5, 4, 1}, {1, 1, 1, 1}); -- add_test_bin_bcast(type, {10, 5, 4, 3}, {1, 1, 1, 1}); -- add_test_bin_bcast(type, {10, 5, 4, 3}, {2, 1, 1, 1}); -- add_test_bin_bcast(type, {10, 5, 4, 3}, {1, 2, 1, 1}); -- add_test_bin_bcast(type, {10, 5, 4, 3}, {1, 1, 2, 1}); -- add_test_bin_bcast(type, {10, 5, 4, 3}, {1, 1, 1, 2}); -- add_test_bin_bcast(type, {10, 5, 4, 3}, {1, 1, 2, 2}); -- add_test_bin_bcast(type, {10, 5, 4, 3}, {1, 2, 2, 2}); -- add_test_bin_bcast(type, {10, 5, 4, 3}, {2, 2, 2, 2}); -+ for (ggml_type type : { GGML_TYPE_F16, GGML_TYPE_F32 }) { -+ add_test_bin_bcast(type, { 1, 1, 8, 1 }, { 1, 1, 1, 1 }); -+ add_test_bin_bcast(type, { 1, 1, 1, 1 }, { 32, 1, 1, 1 }); -+ add_test_bin_bcast(type, { 1, 1, 320, 320 }, { 1, 1, 1, 1 }); -+ add_test_bin_bcast(type, { 10, 5, 1, 1 }, { 1, 1, 1, 1 }); -+ add_test_bin_bcast(type, { 10, 5, 4, 1 }, { 1, 1, 1, 1 }); -+ add_test_bin_bcast(type, { 10, 5, 4, 3 }, { 1, 1, 1, 1 }); -+ add_test_bin_bcast(type, { 10, 5, 4, 3 }, { 2, 1, 1, 1 }); -+ add_test_bin_bcast(type, { 10, 5, 4, 3 }, { 1, 2, 1, 1 }); -+ add_test_bin_bcast(type, { 10, 5, 4, 3 }, { 1, 1, 2, 1 }); -+ add_test_bin_bcast(type, { 10, 5, 4, 3 }, { 1, 1, 1, 2 }); -+ add_test_bin_bcast(type, { 10, 5, 4, 3 }, { 1, 1, 2, 2 }); -+ add_test_bin_bcast(type, { 10, 5, 4, 3 }, { 1, 2, 2, 2 }); -+ add_test_bin_bcast(type, { 10, 5, 4, 3 }, { 2, 2, 2, 2 }); - - // stable diffusion -- add_test_bin_bcast(type, {1280, 1, 1, 1}, {1, 1, 1, 1}); -- add_test_bin_bcast(type, {1280, 1, 1, 1}, {1, 16, 16, 1}); -- add_test_bin_bcast(type, {1280, 16, 16, 1}, {1, 1, 1, 1}); -- add_test_bin_bcast(type, {1280, 1, 1, 1}, {1, 256, 1, 1}); -- add_test_bin_bcast(type, {1, 1, 1280, 1}, {16, 16, 1, 1}); -- add_test_bin_bcast(type, {16, 16, 1280, 1}, {1, 1, 1, 1}); -- add_test_bin_bcast(type, {1, 1, 1920, 1}, {16, 16, 1, 1}); -- add_test_bin_bcast(type, {1, 1, 2560, 1}, {16, 16, 1, 1}); -- add_test_bin_bcast(type, {1, 1, 1280, 1}, {32, 32, 1, 1}); -- add_test_bin_bcast(type, {1, 1, 1920, 1}, {32, 32, 1, 1}); -- add_test_bin_bcast(type, {1, 1, 640, 1}, {32, 32, 1, 1}); -- add_test_bin_bcast(type, {5120, 1, 1, 1}, {1, 256, 1, 1}); -- add_test_bin_bcast(type, {640, 1, 1, 1}, {1, 1, 1, 1}); -+ add_test_bin_bcast(type, { 1280, 1, 1, 1 }, { 1, 1, 1, 1 }); -+ add_test_bin_bcast(type, { 1280, 1, 1, 1 }, { 1, 16, 16, 1 }); -+ add_test_bin_bcast(type, { 1280, 16, 16, 1 }, { 1, 1, 1, 1 }); -+ add_test_bin_bcast(type, { 1280, 1, 1, 1 }, { 1, 256, 1, 1 }); -+ add_test_bin_bcast(type, { 1, 1, 1280, 1 }, { 16, 16, 1, 1 }); -+ add_test_bin_bcast(type, { 16, 16, 1280, 1 }, { 1, 1, 1, 1 }); -+ add_test_bin_bcast(type, { 1, 1, 1920, 1 }, { 16, 16, 1, 1 }); -+ add_test_bin_bcast(type, { 1, 1, 2560, 1 }, { 16, 16, 1, 1 }); -+ add_test_bin_bcast(type, { 1, 1, 1280, 1 }, { 32, 32, 1, 1 }); -+ add_test_bin_bcast(type, { 1, 1, 1920, 1 }, { 32, 32, 1, 1 }); -+ add_test_bin_bcast(type, { 1, 1, 640, 1 }, { 32, 32, 1, 1 }); -+ add_test_bin_bcast(type, { 5120, 1, 1, 1 }, { 1, 256, 1, 1 }); -+ add_test_bin_bcast(type, { 640, 1, 1, 1 }, { 1, 1, 1, 1 }); - //add_test_bin_bcast(type, {3, 3, 2560, 1280}, {1, 1, 1, 1}); - //add_test_bin_bcast(type, {3, 3, 2560, 1280}, {2, 1, 1, 1}); - } -@@ -4167,20 +4097,20 @@ static std::vector> make_test_cases_eval() { - test_cases.emplace_back(new test_scale()); - test_cases.emplace_back(new test_silu_back()); - -- for (float eps : {0.0f, 1e-6f, 1e-4f, 1e-1f}) { -- for (bool v : {false, true}) { -- test_cases.emplace_back(new test_norm (GGML_TYPE_F32, {64, 5, 4, 3}, v, eps)); -- test_cases.emplace_back(new test_rms_norm(GGML_TYPE_F32, {64, 5, 4, 3}, v, eps)); -+ for (float eps : { 0.0f, 1e-6f, 1e-4f, 1e-1f }) { -+ for (bool v : { false, true }) { -+ test_cases.emplace_back(new test_norm(GGML_TYPE_F32, { 64, 5, 4, 3 }, v, eps)); -+ test_cases.emplace_back(new test_rms_norm(GGML_TYPE_F32, { 64, 5, 4, 3 }, v, eps)); - } -- test_cases.emplace_back(new test_rms_norm_back(GGML_TYPE_F32, {64, 5, 4, 3}, eps)); -- test_cases.emplace_back(new test_l2_norm (GGML_TYPE_F32, {64, 5, 4, 3}, eps)); -+ test_cases.emplace_back(new test_rms_norm_back(GGML_TYPE_F32, { 64, 5, 4, 3 }, eps)); -+ test_cases.emplace_back(new test_l2_norm(GGML_TYPE_F32, { 64, 5, 4, 3 }, eps)); - } - -- test_cases.emplace_back(new test_l2_norm(GGML_TYPE_F32, {64, 5, 4, 3}, 1e-12f)); -+ test_cases.emplace_back(new test_l2_norm(GGML_TYPE_F32, { 64, 5, 4, 3 }, 1e-12f)); - -- test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {4, 1536, 1, 1}, {4, 1536, 1, 1})); -- test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {8, 1536, 1, 1}, {4, 1536, 1, 1})); -- test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, {4, 1536, 4, 1}, {4, 1536, 1, 1})); -+ test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, { 4, 1536, 1, 1 }, { 4, 1536, 1, 1 })); -+ test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, { 8, 1536, 1, 1 }, { 4, 1536, 1, 1 })); -+ test_cases.emplace_back(new test_ssm_conv(GGML_TYPE_F32, { 4, 1536, 4, 1 }, { 4, 1536, 1, 1 })); - - test_cases.emplace_back(new test_ssm_scan(GGML_TYPE_F32, 16, 1024, 32, 4)); - -@@ -4201,59 +4131,60 @@ static std::vector> make_test_cases_eval() { - - for (ggml_type type_a : all_types) { - for (int i = 1; i < 10; ++i) { -- test_cases.emplace_back(new test_mul_mat(type_a, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1})); -+ test_cases.emplace_back(new test_mul_mat(type_a, GGML_TYPE_F32, 16, i, 256, { 1, 1 }, { 1, 1 })); - } - } - - #if 1 - for (ggml_type type_a : base_types) { -- for (ggml_type type_b : {GGML_TYPE_F32, GGML_TYPE_F16}) { -+ for (ggml_type type_b : { GGML_TYPE_F32, GGML_TYPE_F16 }) { - // test cases without permutation -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {1, 1}, {1, 1})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {1, 1}, {2, 1})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {1, 1}, {1, 2})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 1}, {1, 1})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 1}, {2, 1})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 2}, {1, 1})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 2}, {2, 1})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 2}, {1, 2})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 2}, {2, 2})); -- -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {1, 1}, {1, 1})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {1, 1}, {2, 1})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {1, 1}, {1, 2})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 1}, {1, 1})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 1}, {2, 1})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 2}, {1, 1})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 2}, {2, 1})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 2}, {1, 2})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 2}, {2, 2})); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 1, 1 }, { 1, 1 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 1, 1 }, { 2, 1 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 1, 1 }, { 1, 2 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 3, 1 }, { 1, 1 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 3, 1 }, { 2, 1 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 3, 2 }, { 1, 1 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 3, 2 }, { 2, 1 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 3, 2 }, { 1, 2 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 3, 2 }, { 2, 2 })); -+ -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 1, 1 }, { 1, 1 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 1, 1 }, { 2, 1 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 1, 1 }, { 1, 2 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 3, 1 }, { 1, 1 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 3, 1 }, { 2, 1 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 3, 2 }, { 1, 1 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 3, 2 }, { 2, 1 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 3, 2 }, { 1, 2 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 3, 2 }, { 2, 2 })); - - // test cases with permutation -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {2, 3}, {1, 1}, {0, 2, 1, 3})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {2, 3}, {1, 1}, {0, 1, 3, 2})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {2, 3}, {1, 1}, {0, 3, 2, 1})); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 2, 3 }, { 1, 1 }, { 0, 2, 1, 3 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 2, 3 }, { 1, 1 }, { 0, 1, 3, 2 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 2, 3 }, { 1, 1 }, { 0, 3, 2, 1 })); - -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 256, {2, 3}, {1, 1}, {0, 2, 1, 3})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 256, {2, 3}, {1, 1}, {0, 1, 3, 2})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 256, {2, 3}, {1, 1}, {0, 3, 2, 1})); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 256, { 2, 3 }, { 1, 1 }, { 0, 2, 1, 3 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 256, { 2, 3 }, { 1, 1 }, { 0, 1, 3, 2 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 256, { 2, 3 }, { 1, 1 }, { 0, 3, 2, 1 })); - -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {2, 3}, {1, 1}, {0, 2, 1, 3})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {2, 3}, {1, 1}, {0, 1, 3, 2})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {2, 3}, {1, 1}, {0, 3, 2, 1})); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 2, 3 }, { 1, 1 }, { 0, 2, 1, 3 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 2, 3 }, { 1, 1 }, { 0, 1, 3, 2 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 2, 3 }, { 1, 1 }, { 0, 3, 2, 1 })); - - // test cases with large ne00/ne10 to cover stream-k fixup -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 1024, {3, 2}, {1, 1})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 1024, {3, 2}, {1, 1})); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 1024, {3, 2}, {1, 1})); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 1024, { 3, 2 }, { 1, 1 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 8, 1024, { 3, 2 }, { 1, 1 })); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 1024, { 3, 2 }, { 1, 1 })); - } - } - for (ggml_type type_a : other_types) { -- for (ggml_type type_b : {GGML_TYPE_F32}) { -+ for (ggml_type type_b : { GGML_TYPE_F32 }) { - if (ggml_blck_size(type_a) != 256) { -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, ggml_blck_size(type_a), {1, 1}, {1, 1})); -+ test_cases.emplace_back( -+ new test_mul_mat(type_a, type_b, 16, 1, ggml_blck_size(type_a), { 1, 1 }, { 1, 1 })); - } -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {1, 1}, {1, 1})); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 1, 1 }, { 1, 1 })); - } - } - #else -@@ -4265,31 +4196,35 @@ static std::vector> make_test_cases_eval() { - std::uniform_int_distribution<> dist_k(1, 16); - for (int i = 0; i < 1000; i++) { - for (ggml_type type_a : all_types) { -- for (ggml_type type_b : {GGML_TYPE_F32}) { -+ for (ggml_type type_b : { GGML_TYPE_F32 }) { - int m = dist_m(rng); - int n = dist_n(rng); - int k = dist_k(rng) * ggml_blck_size(type_a); -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, m, n, k, { 1, 1}, {1, 1})); -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, m, n, k, { 1, 1 }, { 1, 1 })); - } - } - } - #endif - -- test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 128, { 8, 1}, {1, 1})); -- test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 128, { 8, 1}, {4, 1})); -- test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 64, { 8, 1}, {4, 1})); -- test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 64, { 8, 1}, {4, 1})); -- test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 45, 128, { 8, 1}, {4, 1})); -- test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 45, 64, { 8, 1}, {4, 1})); -- test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 1056, 1, 193, {1, 1}, {4, 1}, {0, 2, 1, 3})); -- test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 1056, 1, 67, {1, 1}, {4, 1}, {0, 2, 1, 3})); -- -- for (auto bs : {1,2,4,8}) { -- for (auto nr : {1,4}) { -+ test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 128, { 8, 1 }, { 1, 1 })); -+ test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 128, { 8, 1 }, { 4, 1 })); -+ test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 64, { 8, 1 }, { 4, 1 })); -+ test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 64, { 8, 1 }, { 4, 1 })); -+ test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 45, 128, { 8, 1 }, { 4, 1 })); -+ test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 45, 64, { 8, 1 }, { 4, 1 })); -+ test_cases.emplace_back( -+ new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 1056, 1, 193, { 1, 1 }, { 4, 1 }, { 0, 2, 1, 3 })); -+ test_cases.emplace_back( -+ new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 1056, 1, 67, { 1, 1 }, { 4, 1 }, { 0, 2, 1, 3 })); -+ -+ for (auto bs : { 1, 2, 4, 8 }) { -+ for (auto nr : { 1, 4 }) { - for (uint32_t m = 0; m < 2; ++m) { - for (uint32_t k = 0; k < 2; ++k) { -- test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 1056 + m, 1, 128 + k, {bs, 1}, {nr, 1}, {0, 2, 1, 3})); -- test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128 + m, 1, 1056 + k, {bs, 1}, {nr, 1}, {0, 1, 2, 3}, true)); -+ test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 1056 + m, 1, 128 + k, -+ { bs, 1 }, { nr, 1 }, { 0, 2, 1, 3 })); -+ test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128 + m, 1, 1056 + k, -+ { bs, 1 }, { nr, 1 }, { 0, 1, 2, 3 }, true)); - } - } - } -@@ -4302,11 +4237,11 @@ static std::vector> make_test_cases_eval() { - // test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F16, 512, 262144, 9216, {1, 1}, {1, 1})); - - for (ggml_type type_a : base_types) { -- for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) { -- for (int n_mats : {4, 8}) { -- for (int n_used : {1, 2, 4}) { -- for (bool b : {false, true}) { -- for (int n : {1, 32, 129}) { -+ for (ggml_type type_b : { GGML_TYPE_F32 /*, GGML_TYPE_F16 */ }) { -+ for (int n_mats : { 4, 8 }) { -+ for (int n_used : { 1, 2, 4 }) { -+ for (bool b : { false, true }) { -+ for (int n : { 1, 32, 129 }) { - int m = 512; - int k = 256; - test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_mats, n_used, b, m, n, k)); -@@ -4318,11 +4253,11 @@ static std::vector> make_test_cases_eval() { - } - - for (ggml_type type_a : other_types) { -- for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) { -- for (int n_mats : {4}) { -- for (int n_used : {2}) { -- for (bool b : {false}) { -- for (int n : {1, 32}) { -+ for (ggml_type type_b : { GGML_TYPE_F32 /*, GGML_TYPE_F16 */ }) { -+ for (int n_mats : { 4 }) { -+ for (int n_used : { 2 }) { -+ for (bool b : { false }) { -+ for (int n : { 1, 32 }) { - int m = 512; - int k = 256; - test_cases.emplace_back(new test_mul_mat_id(type_a, type_b, n_mats, n_used, b, m, n, k)); -@@ -4334,14 +4269,15 @@ static std::vector> make_test_cases_eval() { - } - - for (ggml_type type_a : base_types) { -- for (ggml_type type_b : {GGML_TYPE_F32, GGML_TYPE_F16}) { -- for (int n : {1, 16}) { -- for (int k : {1, 16}) { -- for (int bs2 : {1, 3}) { -- for (int bs3 : {1, 3}) { -- for (int nr2 : {1, 2}) { -- for (int nr3 : {1, 2}) { -- test_cases.emplace_back(new test_out_prod(type_a, type_b, 256, n, k, {bs2, bs3}, {nr2, nr3})); -+ for (ggml_type type_b : { GGML_TYPE_F32, GGML_TYPE_F16 }) { -+ for (int n : { 1, 16 }) { -+ for (int k : { 1, 16 }) { -+ for (int bs2 : { 1, 3 }) { -+ for (int bs3 : { 1, 3 }) { -+ for (int nr2 : { 1, 2 }) { -+ for (int nr3 : { 1, 2 }) { -+ test_cases.emplace_back( -+ new test_out_prod(type_a, type_b, 256, n, k, { bs2, bs3 }, { nr2, nr3 })); - } - } - } -@@ -4351,7 +4287,7 @@ static std::vector> make_test_cases_eval() { - } - } - -- for (ggml_type type : {GGML_TYPE_F16, GGML_TYPE_F32}) { -+ for (ggml_type type : { GGML_TYPE_F16, GGML_TYPE_F32 }) { - test_cases.emplace_back(new test_sqr(type)); - test_cases.emplace_back(new test_sqrt(type)); - test_cases.emplace_back(new test_log(type)); -@@ -4360,9 +4296,9 @@ static std::vector> make_test_cases_eval() { - test_cases.emplace_back(new test_clamp(type)); - } - -- test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 1, 1}, 5)); -- test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 3, 1}, 5)); -- test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 3, 2}, 5)); -+ test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, { 10, 10, 1, 1 }, 5)); -+ test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, { 10, 10, 3, 1 }, 5)); -+ test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, { 10, 10, 3, 2 }, 5)); - - #if 0 - std::uniform_int_distribution<> dist_ne1(1, 50); -@@ -4379,78 +4315,101 @@ static std::vector> make_test_cases_eval() { - exponent <<= 1; - } - #endif -- for (bool mask : {false, true}) { -- for (float max_bias : {0.0f, 8.0f}) { -- if (!mask && max_bias > 0.0f) continue; -- for (float scale : {1.0f, 0.1f}) { -- for (int64_t ne0 : {16, 1024}) { -- for (int64_t ne1 : {16, 1024}) { -+ for (bool mask : { false, true }) { -+ for (float max_bias : { 0.0f, 8.0f }) { -+ if (!mask && max_bias > 0.0f) { -+ continue; -+ } -+ for (float scale : { 1.0f, 0.1f }) { -+ for (int64_t ne0 : { 16, 1024 }) { -+ for (int64_t ne1 : { 16, 1024 }) { - if (mask) { -- for (ggml_type m_prec : {GGML_TYPE_F32, GGML_TYPE_F16}) { -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, m_prec, scale, max_bias)); -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, m_prec, scale, max_bias)); -+ for (ggml_type m_prec : { GGML_TYPE_F32, GGML_TYPE_F16 }) { -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { ne0, ne1, 1, 1 }, mask, -+ m_prec, scale, max_bias)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { ne0 - 1, ne1 - 1, 1, 1 }, -+ mask, m_prec, scale, max_bias)); - } - } else { - /* The precision of mask here doesn't matter as boolean mask is false */ -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, GGML_TYPE_F32, scale, max_bias)); -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, GGML_TYPE_F32, scale, max_bias)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { ne0, ne1, 1, 1 }, mask, -+ GGML_TYPE_F32, scale, max_bias)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { ne0 - 1, ne1 - 1, 1, 1 }, mask, -+ GGML_TYPE_F32, scale, max_bias)); - } - } - } - } - } - } -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, true, GGML_TYPE_F32, 0.1f, 0.0f)); -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, true, GGML_TYPE_F16, 0.1f, 0.0f)); -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, false, GGML_TYPE_F32, 0.1f, 0.0f)); -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F32, 0.1f, 0.0f)); -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F16, 0.1f, 0.0f)); -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F32, 0.1f, 8.0f)); -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F16, 0.1f, 8.0f)); -- -- for (float max_bias : {0.0f, 8.0f}) { -- for (float scale : {1.0f, 0.1f}) { -- for (int64_t ne0 : {16, 1024}) { -- for (int64_t ne1 : {16, 1024}) { -- test_cases.emplace_back(new test_soft_max_back(GGML_TYPE_F32, {ne0, ne1, 1, 1}, scale, max_bias)); -- test_cases.emplace_back(new test_soft_max_back(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, scale, max_bias)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { 16, 2, 32, 1 }, true, GGML_TYPE_F32, 0.1f, 0.0f)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { 16, 2, 32, 1 }, true, GGML_TYPE_F16, 0.1f, 0.0f)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { 16, 2, 32, 1 }, false, GGML_TYPE_F32, 0.1f, 0.0f)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { 32, 2, 32, 1 }, true, GGML_TYPE_F32, 0.1f, 0.0f)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { 32, 2, 32, 1 }, true, GGML_TYPE_F16, 0.1f, 0.0f)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { 32, 2, 32, 1 }, true, GGML_TYPE_F32, 0.1f, 8.0f)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { 32, 2, 32, 1 }, true, GGML_TYPE_F16, 0.1f, 8.0f)); -+ -+ for (float max_bias : { 0.0f, 8.0f }) { -+ for (float scale : { 1.0f, 0.1f }) { -+ for (int64_t ne0 : { 16, 1024 }) { -+ for (int64_t ne1 : { 16, 1024 }) { -+ test_cases.emplace_back(new test_soft_max_back(GGML_TYPE_F32, { ne0, ne1, 1, 1 }, scale, max_bias)); -+ test_cases.emplace_back( -+ new test_soft_max_back(GGML_TYPE_F32, { ne0 - 1, ne1 - 1, 1, 1 }, scale, max_bias)); - } - } - } - } - -- for (bool fw : {true, false}) { // fw == forward -+ for (bool fw : { true, false }) { // fw == forward - bool all = true; - - for (float v : { 0, 1 }) { - for (float fs : { 1.0f, 1.4245f }) { - for (float ef : { 0.0f, 0.7465f }) { - for (float af : { 1.0f, 1.4245f }) { -- for (ggml_type type : {GGML_TYPE_F32, GGML_TYPE_F16}) { -- for (bool ff : {false, true}) { // freq_factors -- test_cases.emplace_back(new test_rope(type, {128, 32, 2, 1}, 128, 0, 512, fs, ef, af, ff, v, fw)); // llama 7B -+ for (ggml_type type : { GGML_TYPE_F32, GGML_TYPE_F16 }) { -+ for (bool ff : { false, true }) { // freq_factors -+ test_cases.emplace_back(new test_rope(type, { 128, 32, 2, 1 }, 128, 0, 512, fs, ef, af, -+ ff, v, fw)); // llama 7B - - if (all) { -- test_cases.emplace_back(new test_rope(type, {128, 40, 2, 1}, 128, 0, 512, fs, ef, af, ff, v, fw)); // llama 13B -- test_cases.emplace_back(new test_rope(type, {128, 52, 2, 1}, 128, 0, 512, fs, ef, af, ff, v, fw)); // llama 30B -- test_cases.emplace_back(new test_rope(type, {128, 64, 2, 1}, 128, 0, 512, fs, ef, af, ff, v, fw)); // llama 65B -+ test_cases.emplace_back(new test_rope(type, { 128, 40, 2, 1 }, 128, 0, 512, fs, ef, -+ af, ff, v, fw)); // llama 13B -+ test_cases.emplace_back(new test_rope(type, { 128, 52, 2, 1 }, 128, 0, 512, fs, ef, -+ af, ff, v, fw)); // llama 30B -+ test_cases.emplace_back(new test_rope(type, { 128, 64, 2, 1 }, 128, 0, 512, fs, ef, -+ af, ff, v, fw)); // llama 65B - } - - if (all) { -- test_cases.emplace_back(new test_rope(type, { 64, 1, 2, 1}, 64, 2, 512, fs, ef, af, ff, v, fw)); // neox (falcon 7B) -- test_cases.emplace_back(new test_rope(type, { 64, 71, 2, 1}, 64, 2, 512, fs, ef, af, ff, v, fw)); // neox (falcon 7B) -- test_cases.emplace_back(new test_rope(type, { 64, 8, 2, 1}, 64, 2, 512, fs, ef, af, ff, v, fw)); // neox (falcon 40B) -- test_cases.emplace_back(new test_rope(type, { 80, 32, 2, 1}, 20, 2, 512, fs, ef, af, ff, v, fw)); // neox (stablelm) -- test_cases.emplace_back(new test_rope(type, { 80, 32, 2, 1}, 32, 2, 512, fs, ef, af, ff, v, fw)); // neox (phi-2) -+ test_cases.emplace_back(new test_rope(type, { 64, 1, 2, 1 }, 64, 2, 512, fs, ef, af, -+ ff, v, fw)); // neox (falcon 7B) -+ test_cases.emplace_back(new test_rope(type, { 64, 71, 2, 1 }, 64, 2, 512, fs, ef, -+ af, ff, v, fw)); // neox (falcon 7B) -+ test_cases.emplace_back(new test_rope(type, { 64, 8, 2, 1 }, 64, 2, 512, fs, ef, af, -+ ff, v, fw)); // neox (falcon 40B) -+ test_cases.emplace_back(new test_rope(type, { 80, 32, 2, 1 }, 20, 2, 512, fs, ef, -+ af, ff, v, fw)); // neox (stablelm) -+ test_cases.emplace_back(new test_rope(type, { 80, 32, 2, 1 }, 32, 2, 512, fs, ef, -+ af, ff, v, fw)); // neox (phi-2) - } - - if (all) { -- test_cases.emplace_back(new test_rope(type, {128, 12, 2, 1}, 128, GGML_ROPE_TYPE_MROPE, 512, fs, ef, af, ff, v, fw)); // rope_multi,m-rope (qwen2vl 2B) -- test_cases.emplace_back(new test_rope(type, {128, 28, 2, 1}, 128, GGML_ROPE_TYPE_MROPE, 512, fs, ef, af, ff, v, fw)); // rope_multi,m-rope (qwen2vl 7B) -- test_cases.emplace_back(new test_rope(type, { 80, 16, 2, 1}, 80, GGML_ROPE_TYPE_VISION, 512, fs, ef, af, ff, v, fw)); // rope_multi,m-rope (qwen2vl ViT) -+ test_cases.emplace_back(new test_rope(type, { 128, 12, 2, 1 }, 128, -+ GGML_ROPE_TYPE_MROPE, 512, fs, ef, af, ff, v, -+ fw)); // rope_multi,m-rope (qwen2vl 2B) -+ test_cases.emplace_back(new test_rope(type, { 128, 28, 2, 1 }, 128, -+ GGML_ROPE_TYPE_MROPE, 512, fs, ef, af, ff, v, -+ fw)); // rope_multi,m-rope (qwen2vl 7B) -+ test_cases.emplace_back(new test_rope(type, { 80, 16, 2, 1 }, 80, -+ GGML_ROPE_TYPE_VISION, 512, fs, ef, af, ff, v, -+ fw)); // rope_multi,m-rope (qwen2vl ViT) - } - -- test_cases.emplace_back(new test_rope(type, { 64, 128, 2, 1}, 64, 2, 512, fs, ef, af, ff, v, fw)); // neox (falcon 40B) -+ test_cases.emplace_back(new test_rope(type, { 64, 128, 2, 1 }, 64, 2, 512, fs, ef, af, -+ ff, v, fw)); // neox (falcon 40B) - } - } - -@@ -4462,29 +4421,34 @@ static std::vector> make_test_cases_eval() { - } - - for (int v : { 0, 1, 2, 3 }) { -- for (int dim : { 0, 1, 2, 3, }) { -- test_cases.emplace_back(new test_concat(GGML_TYPE_F32, {11, 12, 13, 14}, 7, dim, v)); -- test_cases.emplace_back(new test_concat(GGML_TYPE_I32, {11, 12, 13, 14}, 7, dim, v)); -+ for (int dim : { -+ 0, -+ 1, -+ 2, -+ 3, -+ }) { -+ test_cases.emplace_back(new test_concat(GGML_TYPE_F32, { 11, 12, 13, 14 }, 7, dim, v)); -+ test_cases.emplace_back(new test_concat(GGML_TYPE_I32, { 11, 12, 13, 14 }, 7, dim, v)); - } - } - -- for (ggml_sort_order order : {GGML_SORT_ORDER_ASC, GGML_SORT_ORDER_DESC}) { -- test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {8, 1, 1, 1}, order)); -- test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {16, 10, 10, 10}, order)); -- test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {60, 10, 10, 10}, order)); // qwen -+ for (ggml_sort_order order : { GGML_SORT_ORDER_ASC, GGML_SORT_ORDER_DESC }) { -+ test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, { 8, 1, 1, 1 }, order)); -+ test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, { 16, 10, 10, 10 }, order)); -+ test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, { 60, 10, 10, 10 }, order)); // qwen - } - -- for (ggml_scale_mode mode : {GGML_SCALE_MODE_NEAREST, GGML_SCALE_MODE_BILINEAR}) { -- test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, {512, 512, 3, 2}, 2, mode)); -- test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, {512, 512, 3, 2}, 2, mode, true)); -- test_cases.emplace_back(new test_upscale_ext(GGML_TYPE_F32, {2, 5, 7, 11}, {5, 7, 11, 13}, mode)); -+ for (ggml_scale_mode mode : { GGML_SCALE_MODE_NEAREST, GGML_SCALE_MODE_BILINEAR }) { -+ test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, { 512, 512, 3, 2 }, 2, mode)); -+ test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, { 512, 512, 3, 2 }, 2, mode, true)); -+ test_cases.emplace_back(new test_upscale_ext(GGML_TYPE_F32, { 2, 5, 7, 11 }, { 5, 7, 11, 13 }, mode)); - } - - test_cases.emplace_back(new test_sum()); - test_cases.emplace_back(new test_sum_rows()); - test_cases.emplace_back(new test_mean()); -- test_cases.emplace_back(new test_group_norm(GGML_TYPE_F32, {64, 64, 320, 1})); -- test_cases.emplace_back(new test_group_norm(GGML_TYPE_F32, {9, 9, 1280, 1})); -+ test_cases.emplace_back(new test_group_norm(GGML_TYPE_F32, { 64, 64, 320, 1 })); -+ test_cases.emplace_back(new test_group_norm(GGML_TYPE_F32, { 9, 9, 1280, 1 })); - test_cases.emplace_back(new test_acc()); - test_cases.emplace_back(new test_pad()); - test_cases.emplace_back(new test_pad_reflect_1d()); -@@ -4494,30 +4458,60 @@ static std::vector> make_test_cases_eval() { - - for (int hsk : { 64, 80, 128, 192, 256, 576 }) { - for (int hsv : { 64, 80, 128, 192, 256, 512 }) { -- if (hsk != 192 && hsk != 576 && hsk != hsv) continue; -- if (hsk == 192 && (hsv != 128 && hsv != 192)) continue; -- if (hsk == 576 && hsv != 512) continue; // DeepSeek MLA -+ if (hsk != 192 && hsk != 576 && hsk != hsv) { -+ continue; -+ } -+ if (hsk == 192 && (hsv != 128 && hsv != 192)) { -+ continue; -+ } -+ if (hsk == 576 && hsv != 512) { -+ continue; // DeepSeek MLA -+ } - -- for (bool mask : { true, false } ) { -+ for (bool mask : { true, false }) { - for (float max_bias : { 0.0f, 8.0f }) { -- if (!mask && max_bias > 0.0f) continue; -- for (float logit_softcap : {0.0f, 10.0f}) { -- if (hsk != 128 && logit_softcap != 0.0f) continue; -- for (int nh : { 4, }) { -+ if (!mask && max_bias > 0.0f) { -+ continue; -+ } -+ for (float logit_softcap : { 0.0f, 10.0f }) { -+ if (hsk != 128 && logit_softcap != 0.0f) { -+ continue; -+ } -+ for (int nh : { -+ 4, -+ }) { - for (int nr : { 1, 4, 16 }) { -- if (nr == 16 && hsk != 128) continue; -- for (int kv : { 512, 1024, }) { -- if (nr != 1 && kv != 512) continue; -- for (int nb : { 1, 3, 32, 35, }) { -- for (ggml_prec prec : {GGML_PREC_F32, GGML_PREC_DEFAULT}) { -- if (hsk != 128 && prec == GGML_PREC_DEFAULT) continue; -- for (ggml_type type_KV : {GGML_TYPE_F16, GGML_TYPE_BF16, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0}) { -- test_cases.emplace_back(new test_flash_attn_ext( -- hsk, hsv, nh, nr, kv, nb, mask, max_bias, logit_softcap, prec, type_KV)); -+ if (nr == 16 && hsk != 128) { -+ continue; -+ } -+ for (int kv : { -+ 512, -+ 1024, -+ }) { -+ if (nr != 1 && kv != 512) { -+ continue; -+ } -+ for (int nb : { -+ 1, -+ 3, -+ 32, -+ 35, -+ }) { -+ for (ggml_prec prec : { GGML_PREC_F32, GGML_PREC_DEFAULT }) { -+ if (hsk != 128 && prec == GGML_PREC_DEFAULT) { -+ continue; -+ } -+ for (ggml_type type_KV : -+ { GGML_TYPE_F16, GGML_TYPE_BF16, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0 }) { -+ test_cases.emplace_back( -+ new test_flash_attn_ext(hsk, hsv, nh, nr, kv, nb, mask, max_bias, -+ logit_softcap, prec, type_KV)); - // run fewer test cases permuted -- if (mask == true && max_bias == 0.0f && logit_softcap == 0 && kv == 512) { -+ if (mask == true && max_bias == 0.0f && logit_softcap == 0 && -+ kv == 512) { - test_cases.emplace_back(new test_flash_attn_ext( -- hsk, hsv, nh, nr, kv, nb, mask, max_bias, logit_softcap, prec, type_KV, {0, 2, 1, 3})); -+ hsk, hsv, nh, nr, kv, nb, mask, max_bias, logit_softcap, prec, -+ type_KV, { 0, 2, 1, 3 })); - } - } - } -@@ -4531,12 +4525,12 @@ static std::vector> make_test_cases_eval() { - } - } - -- test_cases.emplace_back(new test_cross_entropy_loss (GGML_TYPE_F32, { 10, 5, 4, 3})); -- test_cases.emplace_back(new test_cross_entropy_loss (GGML_TYPE_F32, {30000, 1, 1, 1})); -- test_cases.emplace_back(new test_cross_entropy_loss_back(GGML_TYPE_F32, { 10, 5, 4, 3})); -- test_cases.emplace_back(new test_cross_entropy_loss_back(GGML_TYPE_F32, {30000, 1, 1, 1})); -+ test_cases.emplace_back(new test_cross_entropy_loss(GGML_TYPE_F32, { 10, 5, 4, 3 })); -+ test_cases.emplace_back(new test_cross_entropy_loss(GGML_TYPE_F32, { 30000, 1, 1, 1 })); -+ test_cases.emplace_back(new test_cross_entropy_loss_back(GGML_TYPE_F32, { 10, 5, 4, 3 })); -+ test_cases.emplace_back(new test_cross_entropy_loss_back(GGML_TYPE_F32, { 30000, 1, 1, 1 })); - -- test_cases.emplace_back(new test_opt_step_adamw(GGML_TYPE_F32, {10, 5, 4, 3})); -+ test_cases.emplace_back(new test_opt_step_adamw(GGML_TYPE_F32, { 10, 5, 4, 3 })); - - // these tests are disabled to save execution time, but they can be handy for debugging - #if 0 -@@ -4553,58 +4547,77 @@ static std::vector> make_test_cases_eval() { - static std::vector> make_test_cases_perf() { - std::vector> test_cases; - -- test_cases.emplace_back(new test_bin_bcast(ggml_add, GGML_TYPE_F32, {4096, 1, 1, 1}, {1, 1, 1, 1})); -- test_cases.emplace_back(new test_bin_bcast(ggml_add, GGML_TYPE_F32, {4096, 1, 1, 1}, {1, 512, 1, 1})); -+ test_cases.emplace_back(new test_bin_bcast(ggml_add, GGML_TYPE_F32, { 4096, 1, 1, 1 }, { 1, 1, 1, 1 })); -+ test_cases.emplace_back(new test_bin_bcast(ggml_add, GGML_TYPE_F32, { 4096, 1, 1, 1 }, { 1, 512, 1, 1 })); - -- test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F16, {512, 3072, 1, 1})); -- test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, {8192, 512, 2, 1}, {0, 2, 1, 3})); -- test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, {3072, 512, 2, 1}, {0, 2, 1, 3})); -+ test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F16, { 512, 3072, 1, 1 })); -+ test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, { 8192, 512, 2, 1 }, { 0, 2, 1, 3 })); -+ test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, { 3072, 512, 2, 1 }, { 0, 2, 1, 3 })); - -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {4096, 4096, 5, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f)); -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 4096, 5, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f)); -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {1024, 1024, 10, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f)); -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 1024, 10, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f)); -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {256, 256, 20, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f)); -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {64, 64, 20, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f)); -- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 64, 20, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { 4096, 4096, 5, 1 }, false, GGML_TYPE_F32, 1.0f, 0.0f)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { 77, 4096, 5, 1 }, false, GGML_TYPE_F32, 1.0f, 0.0f)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { 1024, 1024, 10, 1 }, false, GGML_TYPE_F32, 1.0f, 0.0f)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { 77, 1024, 10, 1 }, false, GGML_TYPE_F32, 1.0f, 0.0f)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { 256, 256, 20, 1 }, false, GGML_TYPE_F32, 1.0f, 0.0f)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { 64, 64, 20, 1 }, false, GGML_TYPE_F32, 1.0f, 0.0f)); -+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, { 77, 64, 20, 1 }, false, GGML_TYPE_F32, 1.0f, 0.0f)); - -- test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32, 10, 1, 1})); -- test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1})); -- test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32000, 512, 1, 1})); -+ test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, { 32, 10, 1, 1 })); -+ test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, { 1024, 10, 1, 1 })); -+ test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, { 32000, 512, 1, 1 })); - -- test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 16416, 1, 128, {8, 1}, {4, 1}, {0, 2, 1, 3})); -- test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 1, 16416, {8, 1}, {4, 1}, {0, 1, 2, 3}, true)); -+ test_cases.emplace_back( -+ new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 16416, 1, 128, { 8, 1 }, { 4, 1 }, { 0, 2, 1, 3 })); -+ test_cases.emplace_back( -+ new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 1, 16416, { 8, 1 }, { 4, 1 }, { 0, 1, 2, 3 }, true)); - -- for (int bs : {1, 2, 3, 4, 5, 8, 512}) { -+ for (int bs : { 1, 2, 3, 4, 5, 8, 512 }) { - for (ggml_type type_a : all_types) { -- for (ggml_type type_b : {GGML_TYPE_F32}) { -- test_cases.emplace_back(new test_mul_mat(type_a, type_b, 4096, bs, 14336, {1, 1}, {1, 1})); -+ for (ggml_type type_b : { GGML_TYPE_F32 }) { -+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, 4096, bs, 14336, { 1, 1 }, { 1, 1 })); - } - } - } - -- for (int K : {3, 5}) { -- for (int IC : {256, 2560}) { -- for (int IW_IH : {32, 64, 256}) { -+ for (int K : { 3, 5 }) { -+ for (int IC : { 256, 2560 }) { -+ for (int IW_IH : { 32, 64, 256 }) { - if (IC == 2560 && IW_IH == 256) { - // too big - continue; - } -- test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {IW_IH, IW_IH, IC, 1}, {K, K, IC, 1}, 1, 1, 1, 1, 1, 1, true)); -+ test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, -+ { IW_IH, IW_IH, IC, 1 }, { K, K, IC, 1 }, 1, 1, 1, 1, 1, 1, -+ true)); - } - } - } - -- for (int kv : { 4096, 8192, 16384, }) { -- for (int hs : { 64, 128, }) { -- for (int nr : { 1, 4, }) { -- test_cases.emplace_back(new test_flash_attn_ext(hs, hs, 8, nr, kv, 1, true, 0, 0, GGML_PREC_F32, GGML_TYPE_F16)); -+ for (int kv : { -+ 4096, -+ 8192, -+ 16384, -+ }) { -+ for (int hs : { -+ 64, -+ 128, -+ }) { -+ for (int nr : { -+ 1, -+ 4, -+ }) { -+ test_cases.emplace_back( -+ new test_flash_attn_ext(hs, hs, 8, nr, kv, 1, true, 0, 0, GGML_PREC_F32, GGML_TYPE_F16)); - } - } - } - -- test_cases.emplace_back(new test_conv_2d_dw({512, 512, 256, 1}, {3, 3, 1, 256}, 1, 1, 1, false)); -- test_cases.emplace_back(new test_conv_2d_dw({512, 512, 256, 1}, {3, 3, 1, 256}, 1, 1, 1, true)); -+ test_cases.emplace_back(new test_conv_2d_dw({ 512, 512, 256, 1 }, { 3, 3, 1, 256 }, 1, 1, 1, false)); -+ test_cases.emplace_back(new test_conv_2d_dw({ 512, 512, 256, 1 }, { 3, 3, 1, 256 }, 1, 1, 1, true)); -+ -+ test_cases.emplace_back(new test_conv_transpose_2d({ 256, 256, 256, 1 }, { 3, 3, 16, 256 }, 1)); -+ -+ test_cases.emplace_back(new test_mean(GGML_TYPE_F32, { 256, 256, 3, 1 })); - - return test_cases; - } -@@ -4685,10 +4698,10 @@ static void usage(char ** argv) { - } - - int main(int argc, char ** argv) { -- test_mode mode = MODE_TEST; -+ test_mode mode = MODE_TEST; - const char * op_name_filter = nullptr; - const char * backend_filter = nullptr; -- const char * params_filter = nullptr; -+ const char * params_filter = nullptr; - - for (int i = 1; i < argc; i++) { - if (strcmp(argv[i], "test") == 0) { -@@ -4752,14 +4765,15 @@ int main(int argc, char ** argv) { - GGML_ASSERT(backend != NULL); - - ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev); -- auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads"); -+ auto ggml_backend_set_n_threads_fn = -+ (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads"); - if (ggml_backend_set_n_threads_fn) { - // TODO: better value for n_threads - ggml_backend_set_n_threads_fn(backend, std::thread::hardware_concurrency()); - } - - printf(" Device description: %s\n", ggml_backend_dev_description(dev)); -- size_t free, total; // NOLINT -+ size_t free, total; // NOLINT - ggml_backend_dev_memory(dev, &free, &total); - printf(" Device memory: %zu MB (%zu MB free)\n", total / 1024 / 1024, free / 1024 / 1024); - printf("\n"); diff --git a/llama/patches/0020-Enable-CUDA-Graphs-for-gemma3n.patch b/llama/patches/0020-Enable-CUDA-Graphs-for-gemma3n.patch new file mode 100644 index 000000000..c8b227c0c --- /dev/null +++ b/llama/patches/0020-Enable-CUDA-Graphs-for-gemma3n.patch @@ -0,0 +1,56 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Oliver Simons +Date: Tue, 22 Jul 2025 11:02:28 +0200 +Subject: [PATCH] Enable CUDA Graphs for gemma3n. + +Similar to +https://github.com/ggml-org/llama.cpp/pull/14741, +though ollama has a slightly different model graph +than llama.cpp which requires different workaround +checks. +--- + ggml/src/ggml-cuda/ggml-cuda.cu | 18 ++++++++++++++++++ + 1 file changed, 18 insertions(+) + +diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu +index 57eae461..9db0c8b5 100644 +--- a/ggml/src/ggml-cuda/ggml-cuda.cu ++++ b/ggml/src/ggml-cuda/ggml-cuda.cu +@@ -2671,12 +2671,24 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud + // Loop over nodes in GGML graph to obtain info needed for CUDA graph + cuda_ctx->cuda_graph->cpy_dest_ptrs.clear(); + ++ // This fix was added in llama.cpp and Ollama in parallel, but with ++ // different tensor names. ++ // llama.cpp: https://github.com/ggml-org/llama.cpp/pull/14741 ++ // ollama: https://github.com/ollama/ollama/pull/11525 ++ ++ const std::string gemma3n_per_layer_proj_src1_name_ollama = " (reshaped)"; ++ const std::string gemma3n_node_name_ollama = "node_"; ++ + const std::string gemma3n_per_layer_proj_src0_name = "inp_per_layer_selected"; + const std::string gemma3n_per_layer_proj_src1_name = "per_layer_proj"; ++ ++ const std::string ffn_moe_bias_suffix = "_exps.bias"; ++ + const std::string ffn_moe_gate_bias_prefix = "ffn_moe_gate_biased"; + const std::string ffn_moe_up_bias_prefix = "ffn_moe_up_biased"; + const std::string ffn_moe_down_bias_prefix = "ffn_moe_down_biased"; + ++ + for (int i = 0; i < cgraph->n_nodes; i++) { + ggml_tensor * node = cgraph->nodes[i]; + +@@ -2700,6 +2712,12 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud + + if (node->op == GGML_OP_ADD && + node->src[1] && node->src[1]->ne[1] > 1 && ++ // ollama ++ // workarounds to exclude Gemma3n's `project_per_layer_input` operation from the batch-size heuristic, specific to ollama's implementation of gemma3n ++ // number of layers is different for per_layer_proj between gemma3n:2b and gemma3n:4b, which is why we don't check that value here ++ !(node->ne[0] == 256 && node->ne[2] == 1 && node->ne[3] == 1 && node->src[0] ? std::string(node->src[0]->name).find(gemma3n_node_name_ollama) != std::string::npos : false && node->src[1] ? node->src[1]->name == gemma3n_per_layer_proj_src1_name_ollama : false) && ++ node->src[1] ? std::string(node->src[1]->name).find(ffn_moe_bias_suffix) == std::string::npos : false && ++ // upstream + (node->src[0] ? node->src[0]->name != gemma3n_per_layer_proj_src0_name : true) && + (node->src[1] ? node->src[1]->name != gemma3n_per_layer_proj_src1_name : true) && + strncmp(node->name, ffn_moe_gate_bias_prefix.c_str(), ffn_moe_gate_bias_prefix.size()) != 0 && diff --git a/llama/patches/0025-Disable-ggml-blas-on-macos-v13-and-older.patch b/llama/patches/0021-Disable-ggml-blas-on-macos-v13-and-older.patch similarity index 96% rename from llama/patches/0025-Disable-ggml-blas-on-macos-v13-and-older.patch rename to llama/patches/0021-Disable-ggml-blas-on-macos-v13-and-older.patch index 465792600..fbcbfd4fc 100644 --- a/llama/patches/0025-Disable-ggml-blas-on-macos-v13-and-older.patch +++ b/llama/patches/0021-Disable-ggml-blas-on-macos-v13-and-older.patch @@ -8,7 +8,7 @@ Subject: [PATCH] Disable ggml-blas on macos v13 and older 1 file changed, 5 insertions(+) diff --git a/ggml/src/ggml-blas/ggml-blas.cpp b/ggml/src/ggml-blas/ggml-blas.cpp -index ec158dfa..22926d75 100644 +index aeac2e57..40738d5b 100644 --- a/ggml/src/ggml-blas/ggml-blas.cpp +++ b/ggml/src/ggml-blas/ggml-blas.cpp @@ -505,6 +505,11 @@ static const struct ggml_backend_reg_i ggml_backend_blas_reg_i = { diff --git a/llama/patches/0021-Enable-CUDA-Graphs-for-gemma3n.patch b/llama/patches/0021-Enable-CUDA-Graphs-for-gemma3n.patch deleted file mode 100644 index b9dd6cdc6..000000000 --- a/llama/patches/0021-Enable-CUDA-Graphs-for-gemma3n.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Oliver Simons -Date: Tue, 22 Jul 2025 11:02:28 +0200 -Subject: [PATCH] Enable CUDA Graphs for gemma3n. - -Similar to -https://github.com/ggml-org/llama.cpp/pull/14741, -though ollama has a slightly different model graph -than llama.cpp which requires different workaround -checks. ---- - ggml/src/ggml-cuda/ggml-cuda.cu | 16 ++++++++++++---- - 1 file changed, 12 insertions(+), 4 deletions(-) - -diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu -index 2b9fabf4..28ccf4be 100644 ---- a/ggml/src/ggml-cuda/ggml-cuda.cu -+++ b/ggml/src/ggml-cuda/ggml-cuda.cu -@@ -2474,6 +2474,9 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud - // Loop over nodes in GGML graph to obtain info needed for CUDA graph - cuda_ctx->cuda_graph->cpy_dest_ptrs.clear(); - -+ const std::string gemma3n_per_layer_proj_src1_name = " (reshaped)"; -+ const std::string gemma3n_node_name = "node_"; -+ - for (int i = 0; i < cgraph->n_nodes; i++) { - ggml_tensor * node = cgraph->nodes[i]; - -@@ -2495,12 +2498,17 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud - #endif - } - -- if (node->op == GGML_OP_ADD && node->src[1] && node->src[1]->ne[1] > 1) { -- // disable CUDA graphs for batch size > 1 for now. -- // Changes in batch size or context size can cause changes to the grid size of some kernels. -+ // workarounds to exclude Gemma3n's `project_per_layer_input` operation from the batch-size heuristic, specific to ollama's implementation of gemma3n -+ // number of layers is different for per_layer_proj between gemma3n:2b and gemma3n:4b, which is why we don't check that value here -+ if (node->op == GGML_OP_ADD && node->src[1] && node->src[1]->ne[1] > 1 && !(node->ne[0] == 256 -+ && node->ne[2] == 1 -+ && node->ne[3] == 1 -+ && node->src[0] ? std::string(node->src[0]->name).find(gemma3n_node_name) != std::string::npos : false -+ && node->src[1] ? node->src[1]->name == gemma3n_per_layer_proj_src1_name : false)) { -+ // Generally, changes in batch size or context size can cause changes to the grid size of some kernels. - use_cuda_graph = false; - #ifndef NDEBUG -- GGML_LOG_DEBUG("%s: disabling CUDA graphs due to batch size > 1 [%s] [%ld %ld %ld %ld]\n", __func__, node->name, node->ne[0], node->ne[1], node->ne[2], node->ne[3]); -+ GGML_LOG_INFO("%s: disabling CUDA graphs due to batch size > 1 [%s] [%ld %ld %ld %ld]\n", __func__, node->name, node->ne[0], node->ne[1], node->ne[2], node->ne[3]); - #endif - } - diff --git a/llama/patches/0022-fix-mtmd-audio.cpp-build-on-windows.patch b/llama/patches/0022-fix-mtmd-audio.cpp-build-on-windows.patch new file mode 100644 index 000000000..8c2468c0c --- /dev/null +++ b/llama/patches/0022-fix-mtmd-audio.cpp-build-on-windows.patch @@ -0,0 +1,21 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Daniel Hiltgen +Date: Wed, 6 Aug 2025 12:35:29 -0700 +Subject: [PATCH] fix mtmd-audio.cpp build on windows + +--- + tools/mtmd/mtmd-audio.cpp | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tools/mtmd/mtmd-audio.cpp b/tools/mtmd/mtmd-audio.cpp +index 4d053895..84bdc277 100644 +--- a/tools/mtmd/mtmd-audio.cpp ++++ b/tools/mtmd/mtmd-audio.cpp +@@ -1,6 +1,6 @@ ++#define _USE_MATH_DEFINES // for M_PI + #include "mtmd-audio.h" + +-#define _USE_MATH_DEFINES // for M_PI + #include + #include + #include diff --git a/llama/patches/0023-MXFP4.patch b/llama/patches/0023-MXFP4.patch deleted file mode 100644 index 2beb1518d..000000000 --- a/llama/patches/0023-MXFP4.patch +++ /dev/null @@ -1,1293 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Daniel Hiltgen -Date: Mon, 21 Jul 2025 12:06:13 -0700 -Subject: [PATCH] MXFP4 - -Partial implementation of MXFP4 tensor type ---- - ggml/include/ggml.h | 2 +- - ggml/src/ggml-common.h | 7 + - ggml/src/ggml-cpu/ggml-cpu-quants.h | 2 + - ggml/src/ggml-cpu/ggml-cpu.c | 5 + - ggml/src/ggml-cpu/ops.cpp | 1 + - ggml/src/ggml-cpu/vec.cpp | 90 ++++++++ - ggml/src/ggml-cpu/vec.h | 2 + - ggml/src/ggml-cuda/convert.cu | 80 +++++++ - ggml/src/ggml-cuda/ggml-cuda.cu | 16 +- - ggml/src/ggml-cuda/mmvmxfp4.cu | 307 ++++++++++++++++++++++++++ - ggml/src/ggml-cuda/mmvmxfp4.cuh | 9 + - ggml/src/ggml-metal/ggml-metal-impl.h | 3 + - ggml/src/ggml-metal/ggml-metal.m | 25 ++- - ggml/src/ggml-metal/ggml-metal.metal | 173 ++++++++++++++- - ggml/src/ggml-quants.c | 142 +++++++++++- - ggml/src/ggml-quants.h | 6 + - ggml/src/ggml.c | 13 +- - 17 files changed, 868 insertions(+), 15 deletions(-) - create mode 100644 ggml/src/ggml-cuda/mmvmxfp4.cu - create mode 100644 ggml/src/ggml-cuda/mmvmxfp4.cuh - -diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h -index e91dedf1..873baa24 100644 ---- a/ggml/include/ggml.h -+++ b/ggml/include/ggml.h -@@ -353,7 +353,7 @@ extern "C" { - GGML_TYPE_F16 = 1, - GGML_TYPE_Q4_0 = 2, - GGML_TYPE_Q4_1 = 3, -- // GGML_TYPE_Q4_2 = 4, support has been removed -+ GGML_TYPE_MXFP4 = 4, // Formerly removed type GGML_TYPE_Q4_2 - // GGML_TYPE_Q4_3 = 5, support has been removed - GGML_TYPE_Q5_0 = 6, - GGML_TYPE_Q5_1 = 7, -diff --git a/ggml/src/ggml-common.h b/ggml/src/ggml-common.h -index 086c822d..e0d71451 100644 ---- a/ggml/src/ggml-common.h -+++ b/ggml/src/ggml-common.h -@@ -417,6 +417,13 @@ typedef struct { - } block_iq4_xs; - static_assert(sizeof(block_iq4_xs) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/64 + QK_K/2, "wrong iq4_xs block size/padding"); - -+#define MXFP4 32 -+typedef struct { -+ uint8_t d; // scale E8M0 float -+ uint8_t qs[MXFP4 / 2]; // (32) 4 bit elements E2M1 float -+} block_mxfp4; -+static_assert(sizeof(block_mxfp4) == sizeof(uint8_t) + MXFP4/2, "wrong mxfp4 block size/padding"); -+ - #endif // GGML_COMMON_DECL - #endif // GGML_COMMON_DECL - -diff --git a/ggml/src/ggml-cpu/ggml-cpu-quants.h b/ggml/src/ggml-cpu/ggml-cpu-quants.h -index e33d9d47..6a25d062 100644 ---- a/ggml/src/ggml-cpu/ggml-cpu-quants.h -+++ b/ggml/src/ggml-cpu/ggml-cpu-quants.h -@@ -58,6 +58,8 @@ void ggml_vec_dot_iq4_nl_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const - void ggml_vec_dot_iq4_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); - void ggml_vec_dot_iq3_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); - -+void ggml_vec_dot_mxfp4(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const float * GGML_RESTRICT y, size_t by, int nrc); -+ - #ifdef __cplusplus - } - #endif -diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c -index 2462d2b8..bff9c426 100644 ---- a/ggml/src/ggml-cpu/ggml-cpu.c -+++ b/ggml/src/ggml-cpu/ggml-cpu.c -@@ -362,6 +362,11 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { - .vec_dot_type = GGML_TYPE_Q8_K, - .nrows = 1, - }, -+ [GGML_TYPE_MXFP4] = { -+ .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_mxfp4, -+ .vec_dot_type = GGML_TYPE_F32, -+ .nrows = 1, -+ }, - }; - - const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type) { -diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp -index 654e2f28..be0aa683 100644 ---- a/ggml/src/ggml-cpu/ops.cpp -+++ b/ggml/src/ggml-cpu/ops.cpp -@@ -4965,6 +4965,7 @@ void ggml_compute_forward_clamp( - case GGML_TYPE_I32: - case GGML_TYPE_I64: - case GGML_TYPE_F64: -+ case GGML_TYPE_MXFP4: - case GGML_TYPE_COUNT: - { - GGML_ABORT("fatal error"); -diff --git a/ggml/src/ggml-cpu/vec.cpp b/ggml/src/ggml-cpu/vec.cpp -index 02d40618..ec3ec9b1 100644 ---- a/ggml/src/ggml-cpu/vec.cpp -+++ b/ggml/src/ggml-cpu/vec.cpp -@@ -250,3 +250,93 @@ ggml_float ggml_vec_log_soft_max_f32(const int n, float * y, const float * x, fl - } - return sum = (ggml_float)logf(sum); - } -+ -+#define MXFP4 32 -+typedef struct { -+ uint8_t d; // scale E8M0 float -+ uint8_t qs[MXFP4 / 2]; // (32) 4 bit elements E2M1 float -+} block_mxfp4; -+static_assert(sizeof(block_mxfp4) == sizeof(uint8_t) + MXFP4/2, "wrong mxfp4 block size/padding"); -+#define MXFP4_VALS {0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 6.0, 0.0, -0.5, -1.0, -1.5, -2.0, -3.0, -4.0, -6.0} -+ -+void ggml_vec_dot_mxfp4(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const float * GGML_RESTRICT y, size_t by, int nrc) { -+ assert(nrc == 1); -+ GGML_UNUSED(nrc); -+ GGML_UNUSED(bx); -+ GGML_UNUSED(by); -+ GGML_UNUSED(bs); -+ ggml_float mxfp4_table[] = MXFP4_VALS; -+ -+#if defined(GGML_SIMD) -+ float sumf = 0.0f; -+ const int np = (n & ~(GGML_F32_STEP - 1)); -+ const block_mxfp4 * GGML_RESTRICT xx = (const block_mxfp4 *) vx; -+ GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; -+ -+ GGML_F32_VEC scalev; -+ GGML_F32_VEC ax[GGML_F32_ARR]; -+ GGML_F32_VEC ay[GGML_F32_ARR]; -+ for (int i = 0; i < np; i += GGML_F32_STEP) { // ARM: +16 AVX512: +64 -+ for (int j = 0; j < GGML_F32_ARR; j++) { // ARM: 0 .. 4 AVX512: 0 .. 4 -+ // convert GGML_F32_ARR X elements -+ const int ib = (i + j*GGML_F32_EPR) / MXFP4; -+ const block_mxfp4 * GGML_RESTRICT x = &xx[ib]; -+ union { -+ uint32_t as_bits; -+ float as_value; -+ } scale; -+ scale.as_bits = (((uint32_t)x->d) << 23); -+ scalev = GGML_F32_VEC_SET1(scale.as_value); -+ float xf[GGML_F32_EPR]= {0.f}; -+ assert(((i+j*GGML_F32_EPR) % MXFP4)+GGML_F32_ARR < MXFP4 && "block overrun"); -+ for (int qi = 0; qi < GGML_F32_EPR/2 ; ++qi) { -+ xf[qi*2] = mxfp4_table[(x->qs[((i+j*GGML_F32_EPR)%MXFP4)/2+qi] & 0xf)]; -+ xf[qi*2+1] = mxfp4_table[(x->qs[((i+j*GGML_F32_EPR)%MXFP4)/2+qi] & 0xf0) >> 4]; -+ } -+ -+ ax[j] = GGML_F32_VEC_MUL(GGML_F32_VEC_LOAD(xf), scalev); -+ ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); -+ sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); -+ } -+ } -+ GGML_F32_VEC_REDUCE(sumf, sum); -+ -+ // leftovers -+ for (int i = np; i < n; i+=2) { -+ const int ib = i / MXFP4; -+ const block_mxfp4 * GGML_RESTRICT x = &xx[ib]; -+ union { -+ uint32_t as_bits; -+ float as_value; -+ } scale; -+ scale.as_bits = (((uint32_t)x->d) << 23); -+ sumf += y[i] * scale.as_value * mxfp4_table[(x->qs[(i%MXFP4)/2] & 0xf)]; -+ sumf += y[i+1] * scale.as_value * mxfp4_table[(x->qs[(i%MXFP4)/2] & 0xf0) >> 4]; -+ } -+ -+ -+#else // defined(GGML_SIMD) -+ const int nb = n / MXFP4; -+ assert(n % MXFP4 == 0); -+ -+ int yi = 0; -+ -+ const block_mxfp4 * GGML_RESTRICT xx = (const block_mxfp4 *) vx; -+ -+ ggml_float sumf = 0.0; -+ for (int ib = 0; ib < nb; ++ib) { -+ const block_mxfp4 * GGML_RESTRICT x = &xx[ib + 0]; -+ union { -+ uint32_t as_bits; -+ float as_value; -+ } scale; -+ scale.as_bits = (((uint32_t)x->d) << 23); -+ for (int i = 0; i < MXFP4/2; ++i) { -+ sumf += mxfp4_table[(x->qs[i] & 0xf)] * (ggml_float)(scale.as_value) * (ggml_float)(y[ib*MXFP4 + i*2]); -+ sumf += mxfp4_table[(x->qs[i] & 0xf0) >> 4] * (ggml_float)(scale.as_value) * (ggml_float)(y[ib*MXFP4 + i*2+1]); -+ } -+ } -+#endif -+ -+ *s = sumf; -+} -diff --git a/ggml/src/ggml-cpu/vec.h b/ggml/src/ggml-cpu/vec.h -index 23cbb305..7480ca08 100644 ---- a/ggml/src/ggml-cpu/vec.h -+++ b/ggml/src/ggml-cpu/vec.h -@@ -42,6 +42,8 @@ void ggml_vec_dot_f32(int n, float * GGML_RESTRICT s, size_t bs, const float * G - void ggml_vec_dot_bf16(int n, float * GGML_RESTRICT s, size_t bs, ggml_bf16_t * GGML_RESTRICT x, size_t bx, ggml_bf16_t * GGML_RESTRICT y, size_t by, int nrc); - void ggml_vec_dot_f16(int n, float * GGML_RESTRICT s, size_t bs, ggml_fp16_t * GGML_RESTRICT x, size_t bx, ggml_fp16_t * GGML_RESTRICT y, size_t by, int nrc); - -+void ggml_vec_dot_mxfp4(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const float * GGML_RESTRICT y, size_t by, int nrc); -+ - void ggml_vec_silu_f32(const int n, float * y, const float * x); - ggml_float ggml_vec_soft_max_f32(const int n, float * y, const float * x, float max); - ggml_float ggml_vec_log_soft_max_f32(const int n, float * y, const float * x, float max); -diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu -index c6dec427..0e016ccc 100644 ---- a/ggml/src/ggml-cuda/convert.cu -+++ b/ggml/src/ggml-cuda/convert.cu -@@ -571,6 +571,82 @@ static void dequantize_row_iq4_xs_cuda(const void * vx, dst_t * y, const int64_t - dequantize_block_iq4_xs<<>>(vx, y); - } - -+// MXFP4 dequantize derived from dequantize_block_q4_0 -+template -+static __global__ void dequantize_block_mxfp4(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { -+ const uint16_t dst_bias = 15; -+ const uint16_t dst_0p5 = 0x3800; -+ const uint16_t dst_m_bits = 10; -+ const int64_t i = blockIdx.x; -+ -+ // assume 32 threads -+ const int64_t tid = threadIdx.x; -+ const int64_t il = tid/8; -+ const int64_t ir = tid%8; -+ const int64_t ib = 8*i + ir; -+ if (ib >= nb32) { -+ return; -+ } -+ -+ const uint64_t offset = 256*i + MXFP4*ir + 8*il; -+ dst_t * y = yy + offset; -+ -+ const block_mxfp4 * x = (const block_mxfp4 *)vx + ib; -+ union { -+ uint32_t as_bits; -+ float as_value; -+ } scale; -+ scale.as_bits = (((uint32_t)x->d) << 23); -+ -+ // offset within the block 1/4 chunks (8 items) -+ const uint8_t * q = x->qs + 4*il; -+ -+ for (int l = 0; l < 4; ++l) { -+ uint16_t em0 = q[l] & 0x07; -+ uint16_t em1 = q[l] & 0x70; -+ // float16 values -+ iq1m_scale_t x0; -+ iq1m_scale_t x1; -+ -+ x0.u16 = (em0 << (dst_m_bits - 1)) | ((q[l] & 0x08) << 12); -+ x1.u16 = (em1 << (dst_m_bits - 5)) | ((q[l] & 0x80) << 8); -+ -+ // Three cases: -+ // x is normal and non-zero: Correct bias -+ if ((em0 & 0x06) != 0) { -+ x0.u16 = x0.u16 + ((dst_bias - 1) << dst_m_bits); -+ } -+ if ((em1 & 0x60) != 0) { -+ x1.u16 = x1.u16 + ((dst_bias - 1) << dst_m_bits); -+ } -+ // x is subnormal (x == 0bs001 where s is the sign): Map to +-0.5 in the dst type -+ if (em0 == 0x01) { -+ x0.u16 = dst_0p5 | (x0.u16 & 0x8000); -+ } -+ if (em1 == 0x10) { -+ x1.u16 = dst_0p5 | (x1.u16 & 0x8000); -+ } -+ // x is zero, do nothing -+ -+ // XXX it looks correct here - but mulmat still gives bad results... -+ // printf("i:%lld ir:%lld il:%lld l:%d y_offset:[%3lld +%d] = %f \n", -+ // i, ir, il, l, 256*i + 32*ir + 4*il, l*2+ 0, scale * float(x0.f16)); -+ // printf("i:%lld ir:%lld il:%lld l:%d y_offset:[%3lld +%d] = %f \n", -+ // i, ir, il, l, 256*i + 32*ir + 4*il, l*2+ 1, scale * float(x1.f16)); -+ -+ y[l*2] = scale.as_value * float(x0.f16); -+ y[l*2+1] = scale.as_value * float(x1.f16); -+ } -+} -+ -+// derived from dequantize_row_q4_0_cuda -+template -+static void dequantize_row_mxfp4_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { -+ const int nb32 = k / 32; -+ const int nb = (k + 255) / 256; -+ dequantize_block_mxfp4<<>>(vx, y, nb32); -+} -+ - template - static __global__ void convert_unary( - const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t ne00, const int64_t ne01, const int64_t ne02, -@@ -664,6 +740,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { - return convert_unary_cont_cuda; - case GGML_TYPE_BF16: - return convert_unary_cont_cuda; -+ case GGML_TYPE_MXFP4: -+ return dequantize_row_mxfp4_cuda; - default: - return nullptr; - } -@@ -713,6 +791,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) { - return convert_unary_cont_cuda; - case GGML_TYPE_BF16: - return convert_unary_cont_cuda; -+ case GGML_TYPE_MXFP4: -+ return dequantize_row_mxfp4_cuda; - default: - return nullptr; - } -diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu -index 28ccf4be..bb19b06e 100644 ---- a/ggml/src/ggml-cuda/ggml-cuda.cu -+++ b/ggml/src/ggml-cuda/ggml-cuda.cu -@@ -21,6 +21,7 @@ - #include "ggml-cuda/im2col.cuh" - #include "ggml-cuda/mmq.cuh" - #include "ggml-cuda/mmv.cuh" -+#include "ggml-cuda/mmvmxfp4.cuh" - #include "ggml-cuda/mmvq.cuh" - #include "ggml-cuda/norm.cuh" - #include "ggml-cuda/opt-step-adamw.cuh" -@@ -1202,7 +1203,7 @@ static void ggml_cuda_op_mul_mat_cublas( - - const int cc = ggml_cuda_info().devices[id].cc; - -- const bool use_fp16 = (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT; -+ const bool use_fp16 = (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT && src0->type != GGML_TYPE_MXFP4; - - if (src0->type == GGML_TYPE_BF16 && ggml_is_contiguous(src0) && row_diff == src0->ne[1]) { - ggml_cuda_pool_alloc src1_as_bf16(ctx.pool(id)); -@@ -1924,7 +1925,11 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor - && src0->ne[0] % 2 == 0 && src1->ne[1] == 1; - bool use_mul_mat_vec_q = ggml_is_quantized(src0->type) && !bad_padding_clear - && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 -- && src1->ne[1] <= MMVQ_MAX_BATCH_SIZE; -+ && src1->ne[1] <= MMVQ_MAX_BATCH_SIZE -+ && src0->type != GGML_TYPE_MXFP4; -+ bool use_mul_mat_vec_mxfp4 = src0->type == GGML_TYPE_MXFP4 -+ && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 -+ && src0->ne[0] % 2 == 0 && src1->ne[1] == 1; - bool use_mul_mat_q = ggml_is_quantized(src0->type) && !bad_padding_clear - && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; - -@@ -1978,6 +1983,8 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor - ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_vec_q, quantize_row_q8_1_cuda); - } else if (use_mul_mat_q) { - ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_q, quantize_mmq_q8_1_cuda); -+ } else if (use_mul_mat_vec_mxfp4) { -+ ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_vec_mxfp4, nullptr); - } else { - ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_cublas, nullptr); - } -@@ -1997,6 +2004,10 @@ static void ggml_cuda_mul_mat_id(ggml_backend_cuda_context & ctx, ggml_tensor * - const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; - - if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { -+ if (ne2 == 1 && src0->type == GGML_TYPE_MXFP4) { -+ ggml_cuda_mul_mat_vec_mxfp4(ctx, src0, src1, ids, dst); -+ return; -+ } - if (ne2 == 1) { - if (ggml_is_quantized(src0->type)) { - ggml_cuda_mul_mat_vec_q(ctx, src0, src1, ids, dst); -@@ -3056,6 +3067,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_BF16: -+ case GGML_TYPE_MXFP4: - #ifdef GGML_USE_MUSA - if (a->type == GGML_TYPE_Q3_K) { - return false; -diff --git a/ggml/src/ggml-cuda/mmvmxfp4.cu b/ggml/src/ggml-cuda/mmvmxfp4.cu -new file mode 100644 -index 00000000..da62062b ---- /dev/null -+++ b/ggml/src/ggml-cuda/mmvmxfp4.cu -@@ -0,0 +1,307 @@ -+#include "ggml.h" -+#include "common.cuh" -+#include "mmvmxfp4.cuh" -+ -+// MXFP4 implementation derived from mmv.cu float32 code paths -+typedef union { -+ half f16; -+ uint16_t u16; -+} f16_t; -+ -+template // TODO type_acc unused - consider bf16 support -+static __global__ void mul_mat_vec_mxfp4( -+ const block_mxfp4 * __restrict__ x, const float * __restrict__ y, const int32_t * __restrict__ ids, float * __restrict__ dst, -+ const int64_t ncols2, const int64_t nchannels_y, const int64_t stride_row, -+ const int64_t channel_ratio, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, -+ const int64_t sample_ratio, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst) { -+ const int64_t row = blockIdx.x; -+ const int64_t channel_dst = blockIdx.y; -+ const int64_t channel_x = ids ? ids[channel_dst] : channel_dst / channel_ratio; -+ const int64_t channel_y = ids ? channel_dst % nchannels_y : channel_dst; -+ const int64_t sample_dst = blockIdx.z; -+ const int64_t sample_x = sample_dst / sample_ratio; -+ const int64_t sample_y = sample_dst; -+ const int tid = threadIdx.x; -+ constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -+ -+ const uint16_t dst_bias = 15; -+ const uint16_t dst_0p5 = 0x3800; -+ const uint16_t dst_m_bits = 10; -+ -+ x += sample_x *stride_sample_x + channel_x *stride_channel_x + row*stride_row; -+ y += sample_y *stride_sample_y + channel_y *stride_channel_y; -+ dst += sample_dst*stride_sample_dst + channel_dst*stride_channel_dst; -+ -+ const float2 * y2 = (const float2 *) y; -+ -+ extern __shared__ char data_mmv[]; // allocated in GPU shared memory: warp_size*sizeof(float) -+ float * buf_iw = (float *) data_mmv; -+ -+ if (block_size > warp_size) { -+ if (tid < warp_size) { -+ buf_iw[tid] = 0.0f; -+ } -+ __syncthreads(); -+ } -+ -+ float sumf = 0.0f; -+ -+ for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) { -+ int offset0 = col2 / (MXFP4/2); -+ int i = col2 % (MXFP4/2); -+ const block_mxfp4 *x2 = x+offset0; -+ -+ union { -+ uint32_t as_bits; -+ float as_value; -+ } scale; -+ scale.as_bits = (((uint32_t)x2->d) << 23); -+ uint16_t em0 = x2->qs[i] & 0x07; -+ uint16_t em1 = x2->qs[i] & 0x70; -+ // float16 values -+ f16_t x0; -+ f16_t x1; -+ x0.u16 = (em0 << (dst_m_bits - 1)) | ((x2->qs[i] & 0x08) << 12); -+ x1.u16 = (em1 << (dst_m_bits - 5)) | ((x2->qs[i] & 0x80) << 8); -+ -+ // Three cases: -+ // x is normal and non-zero: Correct bias -+ if ((em0 & 0x06) != 0) { -+ x0.u16 = x0.u16 + ((dst_bias - 1) << dst_m_bits); -+ } -+ if ((em1 & 0x60) != 0) { -+ x1.u16 = x1.u16 + ((dst_bias - 1) << dst_m_bits); -+ } -+ // x is subnormal (x == 0bs001 where s is the sign): Map to +-0.5 in the dst type -+ if (em0 == 0x01) { -+ x0.u16 = dst_0p5 | (x0.u16 & 0x8000); -+ } -+ if (em1 == 0x10) { -+ x1.u16 = dst_0p5 | (x1.u16 & 0x8000); -+ } -+ // x is zero, do nothing -+ -+ if (isnan(scale.as_value)) { -+ sumf = scale.as_value; -+ break; -+ } -+ -+ const float2 tmpx = {x0.f16, x1.f16}; -+ const float2 tmpy = y2[col2]; -+ sumf += tmpx.x*tmpy.x*scale.as_value; -+ sumf += tmpx.y*tmpy.y*scale.as_value; -+ } -+ -+ sumf = warp_reduce_sum(sumf); -+ -+ if (block_size > warp_size) { -+ buf_iw[tid/warp_size] = sumf; -+ __syncthreads(); -+ if (tid >= warp_size) { -+ return; -+ } -+ sumf = buf_iw[tid]; -+ sumf = warp_reduce_sum(sumf); -+ } -+ -+ if (tid != 0) { -+ return; -+ } -+ -+ dst[row] = sumf; -+} -+ -+template -+static void launch_mul_mat_vec_cuda_mxfp4( -+ const block_mxfp4 * x, const float * y, const int32_t * ids, float * dst, -+ const int64_t ncols, const int64_t nrows, const int64_t stride_row, const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, -+ const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, -+ const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, -+ cudaStream_t stream) { -+ GGML_ASSERT(ncols % 2 == 0); -+ // GGML_ASSERT(stride_row % 2 == 0); // TODO -+ GGML_ASSERT(ids || nchannels_dst % nchannels_x == 0); -+ GGML_ASSERT( nsamples_dst % nsamples_x == 0); -+ const int64_t channel_ratio = nchannels_dst / nchannels_x; -+ const int64_t sample_ratio = nsamples_dst / nsamples_x; -+ int device; -+ int warp_size; -+ -+ CUDA_CHECK(cudaGetDevice(&device)); -+ warp_size = ggml_cuda_info().devices[device].warp_size; -+ -+ int64_t block_size_best = warp_size; -+ int64_t niter_best = (ncols + 2*warp_size - 1) / (2*warp_size); -+ int64_t max_block_size = 256; -+ if(ggml_cuda_info().devices[device].cc > GGML_CUDA_CC_OFFSET_AMD && ggml_cuda_info().devices[device].cc < GGML_CUDA_CC_RDNA1) { -+ max_block_size = 128; -+ } -+ for (int64_t block_size = 2*warp_size; block_size <= max_block_size; block_size += warp_size) { -+ const int64_t niter = (ncols + 2*block_size - 1) / (2*block_size); -+ if (niter < niter_best) { -+ niter_best = niter; -+ block_size_best = block_size; -+ } -+ } -+ -+ const int smem = warp_size*sizeof(float); -+ const dim3 block_nums(nrows, nchannels_dst, nsamples_dst); -+ const dim3 block_dims(block_size_best, 1, 1); -+ -+ switch (block_size_best) { -+ case 32: { -+ mul_mat_vec_mxfp4<<>> -+ (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, -+ stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); -+ } break; -+ case 64: { -+ mul_mat_vec_mxfp4<<>> -+ (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, -+ stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); -+ } break; -+ case 96: { -+ mul_mat_vec_mxfp4<<>> -+ (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, -+ stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); -+ } break; -+ case 128: { -+ mul_mat_vec_mxfp4<<>> -+ (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, -+ stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); -+ } break; -+ case 160: { -+ mul_mat_vec_mxfp4<<>> -+ (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, -+ stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); -+ } break; -+ case 192: { -+ mul_mat_vec_mxfp4<<>> -+ (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, -+ stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); -+ } break; -+ case 224: { -+ mul_mat_vec_mxfp4<<>> -+ (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, -+ stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); -+ } break; -+ case 256: { -+ mul_mat_vec_mxfp4<<>> -+ (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, -+ stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); -+ } break; -+ default: { -+ GGML_ABORT("fatal error"); -+ } break; -+ } -+} -+ -+static void mul_mat_vec_cuda_mxfp4( -+ const block_mxfp4 * x, const float * y, const int32_t * ids, float * dst, -+ const int64_t ncols, const int64_t nrows, const int64_t stride_row, const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, -+ const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, -+ const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, -+ enum ggml_prec prec, cudaStream_t stream) { -+ launch_mul_mat_vec_cuda_mxfp4 -+ (x, y, ids, dst, ncols, nrows, stride_row, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, -+ stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); -+} -+ -+void ggml_cuda_mul_mat_vec_mxfp4(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst) { -+ GGML_ASSERT( src1->type == GGML_TYPE_F32); -+ GGML_ASSERT(!ids || ids->type == GGML_TYPE_I32); -+ GGML_ASSERT( dst->type == GGML_TYPE_F32); -+ -+ GGML_TENSOR_BINARY_OP_LOCALS; -+ -+ const size_t ts_src0 = ggml_type_size(src0->type); -+ const size_t ts_src1 = ggml_type_size(src1->type); -+ const size_t ts_dst = ggml_type_size(dst->type); -+ -+ GGML_ASSERT(!ids || ne12 == 1); // Implementation is only correct for batch size 1. -+ GGML_ASSERT(ne13 == ne3); -+ -+ // GGML_ASSERT( nb00 == ts_src0); // TODO adjust for block sizing logic -+ GGML_ASSERT( nb10 == ts_src1); -+ GGML_ASSERT(!ids || ids->nb[0] == ggml_type_size(ids->type)); -+ GGML_ASSERT( nb0 == ts_dst); -+ -+ const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; -+ const enum ggml_prec prec = fast_fp16_available(cc) ? ggml_prec(dst->op_params[0]) : GGML_PREC_F32; -+ -+ const float * src1_d = (const float *) src1->data; -+ const int32_t * ids_d = ids ? (const int32_t *) ids->data : nullptr; -+ float * dst_d = (float *) dst->data; -+ -+ const int64_t stride_row = src0->nb[1] / ts_src0; -+ const int64_t s11 = src1->nb[1] / ts_src1; -+ const int64_t s1 = dst->nb[1] / ts_dst; -+ const int64_t stride_channel_x = src0->nb[2] / ts_src0; -+ const int64_t s12 = src1->nb[2] / ts_src1; -+ const int64_t s2 = dst->nb[2] / ts_dst; -+ const int64_t stride_sample_x = src0->nb[3] / ts_src0; -+ const int64_t stride_sample_y = src1->nb[3] / ts_src1; -+ const int64_t stride_sample_dst = dst->nb[3] / ts_dst; -+ const int64_t nsamples_dst = ne3; -+ const int64_t nsamples_x = ne03; -+ const int64_t nchannels_x = ne02; -+ const int64_t nrows = ne01; -+ const int64_t ncols = ne00; -+ -+ // For MUL_MAT_ID the memory layout is different than for MUL_MAT: -+ const int64_t ncols_dst = ids ? ne2 : ne1; -+ const int64_t nchannels_y = ids ? ne11 : ne12; -+ const int64_t nchannels_dst = ids ? ne1 : ne2; -+ const int64_t stride_channel_dst = ids ? s1 : s2; -+ const int64_t stride_channel_y = ids ? s11 : s12; -+ -+ GGML_ASSERT(ncols_dst == 1); -+ -+ const block_mxfp4 * src0_d = (const block_mxfp4 *) src0->data; -+ mul_mat_vec_cuda_mxfp4(src0_d, src1_d, ids_d, dst_d, ncols, nrows, stride_row, -+ nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, -+ nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, ctx.stream()); -+} -+ -+void ggml_cuda_op_mul_mat_vec_mxfp4( -+ ggml_backend_cuda_context & ctx, -+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, -+ const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, -+ const int64_t src1_padded_row_size, cudaStream_t stream) { -+ -+ GGML_ASSERT(src1->type == GGML_TYPE_F32); -+ GGML_ASSERT(dst->type == GGML_TYPE_F32); -+ -+ const int64_t ne00 = src0->ne[0]; -+ const int64_t row_diff = row_high - row_low; -+ -+ GGML_ASSERT(src1_ncols == 1); -+ -+ const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; -+ const enum ggml_prec prec = fast_fp16_available(cc) ? ggml_prec(dst->op_params[0]) : GGML_PREC_F32; -+ -+ // ggml_cuda_op provides single, contiguous matrices -+ const int64_t stride_row = ne00 / MXFP4; -+ const int64_t nchannels_x = 1; -+ const int64_t nchannels_y = 1; -+ const int64_t nchannels_dst = 1; -+ const int64_t stride_channel_x = 0; -+ const int64_t stride_channel_y = 0; -+ const int64_t stride_channel_dst = 0; -+ const int64_t nsamples_x = 1; -+ const int64_t nsamples_dst = 1; -+ const int64_t stride_sample_x = 0; -+ const int64_t stride_sample_y = 0; -+ const int64_t stride_sample_dst = 0; -+ -+ const block_mxfp4 * src0_d = (const block_mxfp4 *) src0_dd_i; -+ mul_mat_vec_cuda_mxfp4(src0_d, src1_ddf_i, nullptr, dst_dd_i, ne00, row_diff, stride_row, -+ nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, -+ nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, stream); -+ -+ GGML_UNUSED(ctx); -+ GGML_UNUSED(src1); -+ GGML_UNUSED(dst); -+ GGML_UNUSED(src1_ddq_i); -+ GGML_UNUSED(src1_ncols); -+ GGML_UNUSED(src1_padded_row_size); -+} -diff --git a/ggml/src/ggml-cuda/mmvmxfp4.cuh b/ggml/src/ggml-cuda/mmvmxfp4.cuh -new file mode 100644 -index 00000000..a08fc780 ---- /dev/null -+++ b/ggml/src/ggml-cuda/mmvmxfp4.cuh -@@ -0,0 +1,9 @@ -+#include "common.cuh" -+ -+void ggml_cuda_mul_mat_vec_mxfp4(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst); -+ -+void ggml_cuda_op_mul_mat_vec_mxfp4( -+ ggml_backend_cuda_context & ctx, -+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, -+ const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, -+ const int64_t src1_padded_row_size, cudaStream_t stream); -diff --git a/ggml/src/ggml-metal/ggml-metal-impl.h b/ggml/src/ggml-metal/ggml-metal-impl.h -index 17eab976..938386ba 100644 ---- a/ggml/src/ggml-metal/ggml-metal-impl.h -+++ b/ggml/src/ggml-metal/ggml-metal-impl.h -@@ -65,6 +65,9 @@ - #define N_R0_IQ4_XS 2 - #define N_SG_IQ4_XS 2 - -+#define N_R0_MXFP4 4 -+#define N_SG_MXFP4 2 -+ - // kernel argument structs - // - // - element counters (e.g. ne00) typically use int32_t to reduce register usage -diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m -index ab46f6e3..d8e05a21 100644 ---- a/ggml/src/ggml-metal/ggml-metal.m -+++ b/ggml/src/ggml-metal/ggml-metal.m -@@ -40,6 +40,7 @@ static const NSInteger MTLGPUFamilyMetal3_GGML = 5001; - static struct ggml_backend_reg g_ggml_backend_metal_reg; - static struct ggml_backend_device g_ggml_backend_metal_device; - -+ - // information about a Metal device - // note: assumes single GPU device - the default one - // TODO: support multiple GPU devices -@@ -209,6 +210,7 @@ enum ggml_metal_kernel_type { - GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32, - GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32, - GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32, -+ GGML_METAL_KERNEL_TYPE_MUL_MV_MXFP4_F32, - GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_2, - GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_3, - GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_4, -@@ -288,6 +290,7 @@ enum ggml_metal_kernel_type { - GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32, - GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32, - GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32, -+ GGML_METAL_KERNEL_TYPE_MUL_MV_ID_MXFP4_F32, - GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32, - GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32, - GGML_METAL_KERNEL_TYPE_MUL_MM_BF16_F32, -@@ -310,6 +313,7 @@ enum ggml_metal_kernel_type { - GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32, - GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32, - GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32, -+ GGML_METAL_KERNEL_TYPE_MUL_MM_MXFP4_F32, - GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16, - GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP1_F32, - GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F16, -@@ -334,6 +338,7 @@ enum ggml_metal_kernel_type { - GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F16, - GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F16, - GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F16, -+ GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MXFP4_F16, - GGML_METAL_KERNEL_TYPE_ROPE_NORM_F32, - GGML_METAL_KERNEL_TYPE_ROPE_NORM_F16, - GGML_METAL_KERNEL_TYPE_ROPE_MULTI_F32, -@@ -934,7 +939,7 @@ static id ggml_metal_load_library(id device, bool use_bfl - - MTLCompileOptions * options = [MTLCompileOptions new]; - options.preprocessorMacros = prep; -- -+ - //[options setFastMathEnabled:false]; - - metal_library = [device newLibraryWithSource:src options:options error:&error]; -@@ -1157,6 +1162,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32, mul_mv_q5_0_f32, has_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32, mul_mv_q5_1_f32, has_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32, mul_mv_q8_0_f32, has_simdgroup_reduction); -+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_MXFP4_F32, mul_mv_mxfp4_f32, has_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_2, mul_mv_ext_f16_f32_r1_2, has_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_3, mul_mv_ext_f16_f32_r1_3, has_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_4, mul_mv_ext_f16_f32_r1_4, has_simdgroup_reduction); -@@ -1236,6 +1242,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32, mul_mv_id_iq1_m_f32, has_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32, mul_mv_id_iq4_nl_f32, has_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32, mul_mv_id_iq4_xs_f32, has_simdgroup_reduction); -+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_MXFP4_F32, mul_mv_id_mxfp4_f32, has_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32, mul_mm_f32_f32, has_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32, mul_mm_f16_f32, has_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_BF16_F32, mul_mm_bf16_f32, has_simdgroup_mm && use_bfloat); -@@ -1258,6 +1265,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32, mul_mm_iq1_m_f32, has_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32, mul_mm_iq4_nl_f32, has_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32, mul_mm_iq4_xs_f32, has_simdgroup_mm); -+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_MXFP4_F32, mul_mm_mxfp4_f32, has_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16, mul_mm_id_map0_f16, has_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP1_F32, mul_mm_id_map1_f32, has_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F16, mul_mm_id_f32_f16, has_simdgroup_mm); -@@ -1282,6 +1290,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F16, mul_mm_id_iq1_m_f16, has_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F16, mul_mm_id_iq4_nl_f16, has_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F16, mul_mm_id_iq4_xs_f16, has_simdgroup_mm); -+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MXFP4_F16, mul_mm_id_mxfp4_f16, has_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_NORM_F32, rope_norm_f32, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_NORM_F16, rope_norm_f16, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_MULTI_F32, rope_multi_f32, true); -@@ -3007,6 +3016,7 @@ static bool ggml_metal_encode_node( - case GGML_TYPE_IQ1_M: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32 ].pipeline; break; - case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32 ].pipeline; break; - case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32 ].pipeline; break; -+ case GGML_TYPE_MXFP4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_MXFP4_F32 ].pipeline; break; - default: GGML_ABORT("MUL MAT-MAT not implemented"); - } - -@@ -3212,6 +3222,12 @@ static bool ggml_metal_encode_node( - smem = 32*sizeof(float); - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32].pipeline; - } break; -+ case GGML_TYPE_MXFP4: -+ { -+ nsg = N_SG_MXFP4; -+ nr0 = N_R0_MXFP4; -+ pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_MXFP4_F32].pipeline; -+ } break; - default: - { - GGML_LOG_ERROR("Asserting on type %d\n", (int)src0t); -@@ -3396,6 +3412,7 @@ static bool ggml_metal_encode_node( - case GGML_TYPE_IQ1_M: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F16 ].pipeline; break; - case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F16 ].pipeline; break; - case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F16 ].pipeline; break; -+ case GGML_TYPE_MXFP4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MXFP4_F16 ].pipeline; break; - default: GGML_ABORT("MUL_MAT_ID not implemented"); - } - -@@ -3607,6 +3624,12 @@ static bool ggml_metal_encode_node( - smem = 32*sizeof(float); - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32].pipeline; - } break; -+ case GGML_TYPE_MXFP4: -+ { -+ nsg = N_SG_MXFP4; -+ nr0 = N_R0_MXFP4; -+ pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_MXFP4_F32].pipeline; -+ } break; - default: - { - GGML_LOG_ERROR("Asserting on type %d\n", (int)src2t); -diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal -index 08e8d807..69fa17de 100644 ---- a/ggml/src/ggml-metal/ggml-metal.metal -+++ b/ggml/src/ggml-metal/ggml-metal.metal -@@ -1902,16 +1902,16 @@ void mul_vec_q_n_f32_impl( - device const char * src1, - device char * dst, - threadgroup char * shmem, -- uint3 tgpig, -- ushort tiisg, -- ushort sgitg) { -- const int nb = args.ne00/QK4_0; -+ uint3 tgpig, // Threadgroup Position in Grid -+ ushort tiisg, // Thread Index in SIMD Group -+ ushort sgitg) { // SIMD Group Index in ThreadGroup -+ const int nb = args.ne00/QK4_0; // src0->ne[0] / 32 - - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - -- const int first_row = (r0 * nsg + sgitg) * nr0; -+ const int first_row = (r0 * nsg + sgitg) * nr0; // nsg=2 nr0=4 - - const uint i12 = im%args.ne12; - const uint i13 = im/args.ne12; -@@ -6744,6 +6744,49 @@ kernel void kernel_mul_mm_id( - } - } - -+template -+void dequantize_mxfp4(device const block_mxfp4 * xb, short il, thread type4x4 & reg) { -+ float4x4 reg_f; -+ const ushort dst_bias = 15; -+ const ushort dst_0p5 = 0x3800; -+ const ushort dst_m_bits = 10; -+ const half scale = (half)(as_type(((uint32_t)xb->d) << 23)); -+ // il:0 first 16, il:1 last 16 -+ for (int i = 0; i < 8; i++) { -+ ushort em0 = xb->qs[il*8 + i] & 0x07; -+ ushort em1 = xb->qs[il*8 + i] & 0x70; -+ // float16 values -+ ushort x0 = (em0 << (dst_m_bits - 1)) | ((xb->qs[il*8 + i] & 0x08) << 12); -+ ushort x1 = (em1 << (dst_m_bits - 5)) | ((xb->qs[il*8 + i] & 0x80) << 8); -+ -+ // Three cases: -+ // x is normal and non-zero: Correct bias -+ if ((em0 & 0x06) != 0) { -+ x0 = x0 + ((dst_bias - 1) << dst_m_bits); -+ } -+ if ((em1 & 0x60) != 0) { -+ x1 = x1 + ((dst_bias - 1) << dst_m_bits); -+ } -+ // x is subnormal (x == 0bs001 where s is the sign): Map to +-0.5 in the dst type -+ if (em0 == 0x01) { -+ x0 = dst_0p5 | (x0 & 0x8000); -+ } -+ if (em1 == 0x10) { -+ x1 = dst_0p5 | (x1 & 0x8000); -+ } -+ // x is zero, do nothing -+ -+ if (isnan(scale)) { -+ reg_f[i/2][2*(i%2) + 0] = scale; -+ reg_f[i/2][2*(i%2) + 1] = scale; -+ } else { -+ reg_f[i/2][2*(i%2) + 0] = scale * as_type(x0); -+ reg_f[i/2][2*(i%2) + 1] = scale * as_type(x1); -+ } -+ } -+ reg = (type4x4) reg_f; -+} -+ - #define QK_NL 16 - - // -@@ -6811,6 +6854,8 @@ template [[host_name("kernel_mul_mm_iq1_m_f32")]] kernel mul_mm_t kernel_mul_m - template [[host_name("kernel_mul_mm_iq4_nl_f32")]] kernel mul_mm_t kernel_mul_mm; - template [[host_name("kernel_mul_mm_iq4_xs_f32")]] kernel mul_mm_t kernel_mul_mm; - -+template [[host_name("kernel_mul_mm_mxfp4_f32")]] kernel mul_mm_t kernel_mul_mm; -+ - // - // indirect matrix-matrix multiplication - // -@@ -6842,6 +6887,8 @@ template [[host_name("kernel_mul_mm_id_iq1_m_f16")]] kernel mul_mm_id kernel_m - template [[host_name("kernel_mul_mm_id_iq4_nl_f16")]] kernel mul_mm_id kernel_mul_mm_id; - template [[host_name("kernel_mul_mm_id_iq4_xs_f16")]] kernel mul_mm_id kernel_mul_mm_id; - -+template [[host_name("kernel_mul_mm_id_mxfp4_f16")]] kernel mul_mm_id kernel_mul_mm_id; -+ - - // - // matrix-vector multiplication -@@ -6958,6 +7005,120 @@ kernel void kernel_mul_mv_id( - sgitg); - } - -+// MXFP32 implementation derived from mul_vec_q_n_f32_impl and block_q_n_dot_y -+void mul_mv_mxfp4_f32_impl( -+ ggml_metal_kargs_mul_mv args, -+ device const char * src0, -+ device const char * src1, -+ device char * dst, -+ threadgroup char * shmem, -+ uint3 tgpig, -+ ushort tiisg, -+ ushort sgitg) { -+ const ushort dst_bias = 15; -+ const ushort dst_0p5 = 0x3800; -+ const ushort dst_m_bits = 10; -+ const int nr0 = N_R0_MXFP4; -+ const int nsg = N_SG_MXFP4; -+ const int nw = N_SIMDWIDTH; -+ const int nb = args.ne00/MXFP4; -+ -+ const int r0 = tgpig.x; -+ const int r1 = tgpig.y; -+ const int im = tgpig.z; -+ -+ const int first_row = (r0 * nsg + sgitg) * nr0; -+ -+ const uint i12 = im%args.ne12; -+ const uint i13 = im/args.ne12; -+ -+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; -+ -+ device const float * y = (device const float *) (src1 + offset1); -+ -+ // pointers to src0 rows -+ device const block_mxfp4 * ax[nr0]; -+ for (int row = 0; row < nr0; ++row) { -+ const uint64_t offset0 = (first_row + row)*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; -+ -+ ax[row] = (device const block_mxfp4 *) ((device char *) src0 + offset0); -+ } -+ -+ float yl[16]; // src1 vector cache -+ float sumf[nr0] = {0.f}; -+ -+ const short ix = (tiisg/2); -+ const short il = (tiisg%2)*16; -+ -+ device const float * yb = y + ix*MXFP4 + il; -+ -+ // each thread in a SIMD group deals with half a block. -+ for (int ib = ix; ib < nb; ib += nw/2) { -+ -+#pragma unroll -+ for (short row = 0; row < nr0; row++) { -+ // Processes 16 items -+ device const block_mxfp4 * qb_curr = ax[row] + ib; -+ float d = as_type(((uint32_t)(ax[row] + ib)->d) << 23); -+ // il = 0 or 16 -+ device const uint8_t *qs = ((device const uint8_t *) qb_curr + 1 + il/2); -+ for (int i = 0; i < 8; ++i) { -+ ushort em0 = qs[i] & 0x07; -+ ushort em1 = qs[i] & 0x70; -+ ushort x0 = (em0 << (dst_m_bits - 1)) | ((qs[i] & 0x08) << 12); -+ ushort x1 = (em1 << (dst_m_bits - 5)) | ((qs[i] & 0x80) << 8); -+ // Three cases: -+ // x is normal and non-zero: Correct bias -+ if ((em0 & 0x06) != 0) { -+ x0 = x0 + ((dst_bias - 1) << dst_m_bits); -+ } -+ if ((em1 & 0x60) != 0) { -+ x1 = x1 + ((dst_bias - 1) << dst_m_bits); -+ } -+ // x is subnormal (x == 0bs001 where s is the sign): Map to +-0.5 in the dst type -+ if (em0 == 0x01) { -+ x0 = dst_0p5 | (x0 & 0x8000); -+ } -+ if (em1 == 0x10) { -+ x1 = dst_0p5 | (x1 & 0x8000); -+ } -+ // x is zero, do nothing -+ if (!isnan(d)) { -+ sumf[row] += yb[i*2] * as_type(x0) * d -+ + yb[i*2+1] * as_type(x1) * d; -+ } else { -+ sumf[row] = d; -+ } -+ } -+ } -+ -+ yb += MXFP4 * 16; -+ } -+ -+ device float * dst_f32 = (device float *) dst + im*args.ne0*args.ne1 + r1*args.ne0; -+ -+ for (int row = 0; row < nr0; ++row) { -+ const float tot = simd_sum(sumf[row]); -+ -+ if (tiisg == 0 && first_row + row < args.ne01) { -+ dst_f32[first_row + row] = tot; -+ } -+ } -+} -+ -+[[host_name("kernel_mul_mv_mxfp4_f32")]] -+kernel void kernel_mul_mv_mxfp4_f32( -+ constant ggml_metal_kargs_mul_mv & args, -+ device const char * src0, -+ device const char * src1, -+ device char * dst, -+ threadgroup char * shmem [[threadgroup(0)]], -+ uint3 tgpig[[threadgroup_position_in_grid]], -+ ushort tiisg[[thread_index_in_simdgroup]], -+ ushort sgitg[[simdgroup_index_in_threadgroup]]) { -+ mul_mv_mxfp4_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); -+} -+ - typedef decltype(kernel_mul_mv_id>>) kernel_mul_mv_id_t; - - template [[host_name("kernel_mul_mv_id_f32_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; -@@ -6987,6 +7148,8 @@ template [[host_name("kernel_mul_mv_id_iq2_s_f32")]] kernel kernel_mul_mv_id_t - template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; - template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; - -+template [[host_name("kernel_mul_mv_id_mxfp4_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; -+ - kernel void kernel_pool_2d_max_f32( - device const float * src0, - device float * dst, -diff --git a/ggml/src/ggml-quants.c b/ggml/src/ggml-quants.c -index 84ec6dfe..17c308aa 100644 ---- a/ggml/src/ggml-quants.c -+++ b/ggml/src/ggml-quants.c -@@ -4925,6 +4925,144 @@ void quantize_row_iq2_s_ref(const float * GGML_RESTRICT x, block_iq2_s * GGML_RE - quantize_iq2_s(x, y, 1, k, NULL); - } - -+// =============================== mxfp4 (de)-quantization -+ -+void quantize_row_mxfp4_ref(const float * GGML_RESTRICT x, block_mxfp4 * GGML_RESTRICT y, int64_t k) { -+ static const int qk = MXFP4; -+ static const uint32_t E8_BIAS = 127; -+ static const uint32_t E2_BIAS = 1; -+ -+ assert(k % qk == 0); -+ -+ const int nb = k / qk; -+ -+ for (int i = 0; i < nb; i++) { -+ float amax = 0.0f; // absolute max -+ -+ for (int j = 0; j < qk; j++) { -+ const float v = x[i*qk + j]; -+ if (amax < fabsf(v)) { -+ amax = fabsf(v); -+ } -+ } -+ -+ const float dequant_scale = amax / 6.0f; -+ uint32_t dequant_scale_exponent = 0; -+ memcpy(&dequant_scale_exponent, &dequant_scale, sizeof(dequant_scale_exponent)); -+ -+ // Rounding up -+ dequant_scale_exponent = (dequant_scale_exponent + 0x007FFFFF) & 0x7F800000; -+ // Rounding down -+ // dequant_scale_exponent = dequant_scale_exponent & 0x7F800000; -+ -+ float dequant_scale_rounded = 0.0f; -+ memcpy(&dequant_scale_rounded, &dequant_scale_exponent, sizeof(dequant_scale_rounded)); -+ float quant_scale = 0.0f; -+ if (dequant_scale_rounded != 0.0f) { -+ quant_scale = 1.0f / dequant_scale_rounded; -+ } -+ -+ y[i].d = (uint8_t)(dequant_scale_exponent >> 23); -+ -+ for (int j = 0; j < qk/2; ++j) { -+ const float x0 = x[i*qk + j*2]*quant_scale; -+ const float x1 = x[i*qk + j*2+1]*quant_scale; -+ -+ uint32_t xi0 = 0; -+ uint32_t xi1 = 0; -+ memcpy(&xi0, &x0, sizeof(xi0)); -+ memcpy(&xi1, &x1, sizeof(xi1)); -+ -+ uint32_t s0 = xi0 & 0x80000000; -+ uint32_t s1 = xi1 & 0x80000000; -+ uint32_t e0 = (xi0 >> 23) & 0xFF; -+ uint32_t e1 = (xi1 >> 23) & 0xFF; -+ uint32_t m0 = (xi0 & 0x7FFFFF); -+ uint32_t m1 = (xi1 & 0x7FFFFF); -+ -+ // 0.25 <= x < 0.75 maps to 0.5, a denormal number -+ // Move implicit bit 1 at the beginning to mantissa for denormals -+ // adjusted_exponents -+ uint32_t ae0 = E8_BIAS - (e0 + 1); -+ uint32_t ae1 = E8_BIAS - (e1 + 1); -+ if (e0 < E8_BIAS) { -+ m0 = (0x400000 | (m0 >> 1)) >> ae0; -+ } -+ if (e1 < E8_BIAS) { -+ m1 = (0x400000 | (m1 >> 1)) >> ae1; -+ } -+ -+ // For normal numbers, we change the bias from 127 to 1, and for subnormals, we keep exponent as 0. -+ e0 = MAX(e0, E8_BIAS - E2_BIAS) - (E8_BIAS - E2_BIAS); -+ e1 = MAX(e1, E8_BIAS - E2_BIAS) - (E8_BIAS - E2_BIAS); -+ -+ // Combine sign, exponent, and mantissa, while saturating -+ // rounding nearest with tie breaking up by adding +1 to one bit right of the LSB, then shift right -+ uint32_t tmp0 = MIN((((e0 << 2) | (m0 >> 21)) + 1) >> 1, 0x7); -+ uint32_t tmp1 = MIN((((e1 << 2) | (m1 >> 21)) + 1) >> 1, 0x7); -+ uint8_t v0 = (uint8_t)((s0 >> 28) | tmp0); -+ uint8_t v1 = (uint8_t)((s1 >> 28) | tmp1); -+ y[i].qs[j] = v0; -+ y[i].qs[j] |= v1 << 4; -+ } -+ } -+} -+ -+void dequantize_row_mxfp4(const block_mxfp4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { -+ assert(k % MXFP4 == 0); -+ -+ const int nb = k / MXFP4; -+ const uint16_t dst_bias = 15; -+ const uint16_t dst_0p5 = 0x3800; -+ const uint16_t dst_m_bits = 10; -+ -+ for (int i = 0; i < nb; i++) { -+ union { -+ uint32_t as_bits; -+ float as_value; -+ } scale; -+ scale.as_bits = (((uint32_t)x[i].d) << 23); -+ for (int j = 0; j < MXFP4/2; ++j) { -+ uint16_t em0 = x[i].qs[j] & 0x07; -+ uint16_t em1 = x[i].qs[j] & 0x70; -+ // float16 values -+ uint16_t x0 = (em0 << (dst_m_bits - 1)) | ((x[i].qs[j] & 0x08) << 12); -+ uint16_t x1 = (em1 << (dst_m_bits - 5)) | ((x[i].qs[j] & 0x80) << 8); -+ -+ // Three cases: -+ // x is normal and non-zero: Correct bias -+ if ((em0 & 0x06) != 0) { -+ x0 = x0 + ((dst_bias - 1) << dst_m_bits); -+ } -+ if ((em1 & 0x60) != 0) { -+ x1 = x1 + ((dst_bias - 1) << dst_m_bits); -+ } -+ // x is subnormal (x == 0bs001 where s is the sign): Map to +-0.5 in the dst type -+ if (em0 == 0x01) { -+ x0 = dst_0p5 | (x0 & 0x8000); -+ } -+ if (em1 == 0x10) { -+ x1 = dst_0p5 | (x1 & 0x8000); -+ } -+ // x is zero, do nothing -+ -+ if (isnan(scale.as_value)) { -+ y[i*MXFP4 + j*2] = scale.as_value; -+ y[i*MXFP4 + j*2+1] = scale.as_value; -+ } else { -+ y[i*MXFP4 + j*2] = GGML_FP16_TO_FP32(x0)*scale.as_value; -+ y[i*MXFP4 + j*2+1] = GGML_FP16_TO_FP32(x1)*scale.as_value; -+ } -+ } -+ } -+} -+ -+ -+size_t quantize_mxfp4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { -+ quantize_row_mxfp4_ref(src, dst, (int64_t)nrow*n_per_row); -+ return nrow * ggml_row_size(GGML_TYPE_MXFP4, n_per_row); -+} -+ - // =============================== data validation - - static bool validate_float(float f, size_t i) { -@@ -5214,7 +5352,9 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte - { - VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_nl, data, nb); - } break; -- -+ case GGML_TYPE_MXFP4: -+ // TODO - anything to validate? -+ break; - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: -diff --git a/ggml/src/ggml-quants.h b/ggml/src/ggml-quants.h -index d09173e1..2fc40f75 100644 ---- a/ggml/src/ggml-quants.h -+++ b/ggml/src/ggml-quants.h -@@ -37,6 +37,8 @@ GGML_API void quantize_row_iq4_xs_ref (const float * GGML_RESTRICT x, block_iq4_ - GGML_API void quantize_row_iq3_s_ref (const float * GGML_RESTRICT x, block_iq3_s * GGML_RESTRICT y, int64_t k); - GGML_API void quantize_row_iq2_s_ref (const float * GGML_RESTRICT x, block_iq2_s * GGML_RESTRICT y, int64_t k); - -+GGML_API void quantize_row_mxfp4_ref(const float * GGML_RESTRICT x, block_mxfp4 * GGML_RESTRICT y, int64_t k); -+ - // Dequantization - GGML_API void dequantize_row_q4_0(const block_q4_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); - GGML_API void dequantize_row_q4_1(const block_q4_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -@@ -65,6 +67,8 @@ GGML_API void dequantize_row_iq4_nl (const block_iq4_nl * GGML_RESTRICT x, floa - GGML_API void dequantize_row_iq4_xs (const block_iq4_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); - GGML_API void dequantize_row_iq3_s (const block_iq3_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); - -+GGML_API void dequantize_row_mxfp4(const block_mxfp4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -+ - // Quantization utilizing an importance matrix (a.k.a. "Activation aWare Quantization") - GGML_API size_t quantize_iq2_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); - GGML_API size_t quantize_iq2_xs (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -@@ -90,6 +94,8 @@ GGML_API size_t quantize_q5_0(const float * GGML_RESTRICT src, void * GGML_RESTR - GGML_API size_t quantize_q5_1(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); - GGML_API size_t quantize_q8_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); - -+GGML_API size_t quantize_mxfp4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -+ - GGML_API void iq2xs_init_impl(enum ggml_type type); - GGML_API void iq2xs_free_impl(enum ggml_type type); - GGML_API void iq3xs_init_impl(int grid_size); -diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c -index 8a654624..0f3c9834 100644 ---- a/ggml/src/ggml.c -+++ b/ggml/src/ggml.c -@@ -589,11 +589,13 @@ static const struct ggml_type_traits type_traits[GGML_TYPE_COUNT] = { - .to_float = (ggml_to_float_t) dequantize_row_q4_1, - .from_float_ref = (ggml_from_float_t) quantize_row_q4_1_ref, - }, -- [4] = { // GGML_TYPE_Q4_2 -- .type_name = "DEPRECATED", -- .blck_size = 0, -- .type_size = 0, -- .is_quantized = false, -+ [GGML_TYPE_MXFP4] = { // formerly deprecated GGML_TYPE_Q4_2 -+ .type_name = "mxfp4", -+ .blck_size = MXFP4, -+ .type_size = sizeof(block_mxfp4), -+ .is_quantized = true, -+ .to_float = (ggml_to_float_t) dequantize_row_mxfp4, -+ .from_float_ref = (ggml_from_float_t) quantize_row_mxfp4_ref, - }, - [5] = { // GGML_TYPE_Q4_3 - .type_name = "DEPRECATED", -@@ -6446,6 +6448,7 @@ size_t ggml_quantize_chunk( - case GGML_TYPE_IQ1_M: result = quantize_iq1_m (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; - case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; - case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; -+ case GGML_TYPE_MXFP4: result = quantize_mxfp4 (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; - case GGML_TYPE_F16: - { - size_t elemsize = sizeof(ggml_fp16_t); diff --git a/llama/patches/0026-ggml-No-alloc-mode.patch b/llama/patches/0023-ggml-No-alloc-mode.patch similarity index 98% rename from llama/patches/0026-ggml-No-alloc-mode.patch rename to llama/patches/0023-ggml-No-alloc-mode.patch index 2a8dd07ed..fa738452f 100644 --- a/llama/patches/0026-ggml-No-alloc-mode.patch +++ b/llama/patches/0023-ggml-No-alloc-mode.patch @@ -16,7 +16,7 @@ Defaults to false for newly created backend buffer types. 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/ggml/include/ggml-backend.h b/ggml/include/ggml-backend.h -index 48839339..3903c3cb 100644 +index 9424394e..b602a7c7 100644 --- a/ggml/include/ggml-backend.h +++ b/ggml/include/ggml-backend.h @@ -35,6 +35,7 @@ extern "C" { @@ -48,7 +48,7 @@ index c36c12d6..81749a5a 100644 GGML_API ggml_backend_buffer_t ggml_backend_buffer_init( diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp -index be335e8c..84928bc3 100644 +index eded0291..05a842ed 100644 --- a/ggml/src/ggml-backend.cpp +++ b/ggml/src/ggml-backend.cpp @@ -35,12 +35,22 @@ const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) { diff --git a/llama/patches/0024-cuda-disable-graph-compat-check-for-OP_ADD.patch b/llama/patches/0024-cuda-disable-graph-compat-check-for-OP_ADD.patch deleted file mode 100644 index 535b09eb1..000000000 --- a/llama/patches/0024-cuda-disable-graph-compat-check-for-OP_ADD.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Michael Yang -Date: Thu, 31 Jul 2025 12:31:58 -0700 -Subject: [PATCH] cuda: disable graph compat check for OP_ADD - ---- - ggml/src/ggml-cuda/ggml-cuda.cu | 14 -------------- - 1 file changed, 14 deletions(-) - -diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu -index bb19b06e..080e7467 100644 ---- a/ggml/src/ggml-cuda/ggml-cuda.cu -+++ b/ggml/src/ggml-cuda/ggml-cuda.cu -@@ -2509,20 +2509,6 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud - #endif - } - -- // workarounds to exclude Gemma3n's `project_per_layer_input` operation from the batch-size heuristic, specific to ollama's implementation of gemma3n -- // number of layers is different for per_layer_proj between gemma3n:2b and gemma3n:4b, which is why we don't check that value here -- if (node->op == GGML_OP_ADD && node->src[1] && node->src[1]->ne[1] > 1 && !(node->ne[0] == 256 -- && node->ne[2] == 1 -- && node->ne[3] == 1 -- && node->src[0] ? std::string(node->src[0]->name).find(gemma3n_node_name) != std::string::npos : false -- && node->src[1] ? node->src[1]->name == gemma3n_per_layer_proj_src1_name : false)) { -- // Generally, changes in batch size or context size can cause changes to the grid size of some kernels. -- use_cuda_graph = false; --#ifndef NDEBUG -- GGML_LOG_INFO("%s: disabling CUDA graphs due to batch size > 1 [%s] [%ld %ld %ld %ld]\n", __func__, node->name, node->ne[0], node->ne[1], node->ne[2], node->ne[3]); --#endif -- } -- - if (node->op == GGML_OP_CPY) { - - // Store the pointers which are updated for each token, such that these can be sent diff --git a/llama/sampling_ext.cpp b/llama/sampling_ext.cpp index 1cacd3015..e04fb5a3d 100644 --- a/llama/sampling_ext.cpp +++ b/llama/sampling_ext.cpp @@ -6,6 +6,7 @@ #include "llama-model.h" #include "llama-model-loader.h" #include "llama-grammar.h" +#include "nlohmann/json.hpp" struct common_sampler *common_sampler_cinit(const struct llama_model *model, struct common_sampler_cparams *params) { try { @@ -94,7 +95,7 @@ struct llama_grammar *grammar_init(char* grammar, uint32_t* tokens, size_t n_tok ollama_vocab *vocab = new ollama_vocab(); vocab->set_eog_tokens(eog_tokens, n_eog_tokens); vocab->add_token_pieces(tokens, n_tokens, pieces); - + struct llama_grammar *g = llama_grammar_init_impl(nullptr, vocab, grammar, "root", false, nullptr, 0, nullptr, 0); if (g == nullptr) { LLAMA_LOG_ERROR("%s: failed to initialize grammar\n", __func__); diff --git a/ml/backend.go b/ml/backend.go index 6e76d32d5..3eb84c726 100644 --- a/ml/backend.go +++ b/ml/backend.go @@ -263,6 +263,7 @@ type Tensor interface { Mulmat(ctx Context, t2 Tensor) Tensor MulmatFullPrec(ctx Context, t2 Tensor) Tensor MulmatID(ctx Context, t2, ids Tensor) Tensor + AddID(ctx Context, t2, ids Tensor) Tensor Softmax(ctx Context) Tensor LayerNorm(ctx Context, weight, bias Tensor, eps float32) Tensor @@ -283,6 +284,7 @@ type Tensor interface { SILU(ctx Context) Tensor RELU(ctx Context) Tensor Sigmoid(ctx Context) Tensor + SwiGLU(ctx Context, up Tensor, alpha, limit float32) Tensor Reshape(ctx Context, shape ...int) Tensor View(ctx Context, offset int, shape ...int) Tensor @@ -332,7 +334,7 @@ type Tensor interface { // kqv := value.Mulmat(ctx, kq) // return kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) type ScaledDotProductAttention interface { - ScaledDotProductAttention(ctx Context, key, value, mask Tensor, scale float64) Tensor + ScaledDotProductAttention(ctx Context, key, value, mask, sinks Tensor, scale float64) Tensor } type number interface { diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index aa241e9b6..d64e27fa7 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -267,7 +267,16 @@ func New(modelPath string, params ml.BackendParams) (ml.Backend, error) { return tt } - tt := C.ggml_new_tensor(ctxs[bt], t.source.Kind, C.int(len(t.source.Shape)), (*C.int64_t)(unsafe.Pointer(&t.source.Shape[0]))) + kind := t.source.Kind + if t.source.Kind == 4 { + // transform raw mxfp4 stream to ggml mxfp4 format + kind = 39 + } else if t.source.Kind == uint32(fsggml.TensorTypeBF16) && strings.HasSuffix(t.source.Name, "_exps.bias") { + // transform "_exps.bias" from bf16 to fp32; add_ids only supports fp32 tensors + kind = uint32(fsggml.TensorTypeF32) + } + + tt := C.ggml_new_tensor(ctxs[bt], kind, C.int(len(t.source.Shape)), (*C.int64_t)(unsafe.Pointer(&t.source.Shape[0]))) C.ggml_set_name(tt, cname) slog.Log(context.TODO(), logutil.LevelTrace, "created tensor", "name", name, "shape", t.source.Shape, "dtype", t.source.Kind, "buffer_type", C.GoString(C.ggml_backend_buft_name(bt))) @@ -507,6 +516,99 @@ func (b *Backend) Load(ctx context.Context, progress func(float32)) error { } defer file.Close() sr := io.NewSectionReader(file, int64(b.meta.Tensors().Offset+t.Offset), int64(t.Size())) + + if t.Kind == 4 && tts[0]._type == 39 { + // source is mxfp4, target is ggml mxfp4 + + const BS = 17 // MXFP4 block size + bts := make([]byte, 8*BS*format.KibiByte) // ~128k block aligned + var s uint64 + for s < t.Size() { + // Stop if either the parent context has been canceled or if any of the other tensors returned an error + if err := ctx.Err(); err != nil { + return err + } + n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Size()-s))]) + if err != nil { + slog.Warn("file read error", "file", b.modelPath, "error", err) + return err + } + for j := range n / BS { + for i := 1; i < BS; i++ { + // swap nibbles + t_lo := bts[j*BS+i] & 0x0F + t_hi := bts[j*BS+i] & 0xF0 + bts[j*BS+i] = (t_lo << 4) | (t_hi >> 4) + } + // transform aaaa...bbbb... to abababab... + oi := 0 + tmp := [16]byte{} + for i := 1; i < 9; i++ { + blk_a0 := bts[j*BS+i] & 0xF0 + blk_a1 := bts[j*BS+i] << 4 + blk_b0 := bts[j*BS+i+8] >> 4 + blk_b1 := bts[j*BS+i+8] & 0x0F + // swap once more + out0 := blk_a0 | blk_b0 + out1 := blk_a1 | blk_b1 + out_h0 := out0 & 0xF0 + out_l0 := out0 & 0x0F + out_h1 := out1 & 0xF0 + out_l1 := out1 & 0x0F + out0 = (out_h0 >> 4) | (out_l0 << 4) + out1 = (out_h1 >> 4) | (out_l1 << 4) + tmp[oi] = out0 + oi++ + tmp[oi] = out1 + oi++ + } + for i := range tmp { + bts[j*BS+i+1] = tmp[i] + } + } + + for _, tt := range tts { + C.ggml_backend_tensor_set(tt, unsafe.Pointer(&bts[0]), C.size_t(s), C.size_t(n)) + } + + s += uint64(n) + + if progress != nil { + done := doneBytes.Add(uint64(n)) + progress(float32(done) / float32(totalBytes)) + } + } + return nil + } else if strings.HasSuffix(t.Name, "_exps.bias") && t.Kind == 30 && tts[0]._type == 0 { + // source is bf16, target is ggml fp32 + + // data is bf16 but we need to convert to fp32 + bts := make([]byte, 128*format.KibiByte) + var e uint64 + for e < t.Elements() { + // Stop if either the parent context has been canceled or if any of the other tensors returned an error + if err := ctx.Err(); err != nil { + return err + } + n, err := io.ReadFull(sr, bts[:min(len(bts), int(t.Elements()-e)*2)]) + if err != nil { + slog.Warn("file read error", "file", b.modelPath, "error", err) + return err + } + fp32 := ConvertToF32(bts, uint32(fsggml.TensorTypeBF16), uint64(n/2)) + + for _, tt := range tts { + C.ggml_backend_tensor_set(tt, unsafe.Pointer(&fp32[0]), C.size_t(e*4), C.size_t(n*2)) + } + e += uint64(n / 2) + if progress != nil { + done := doneBytes.Add(uint64(n)) + progress(float32(done) / float32(totalBytes)) + } + } + return nil + } + bts := make([]byte, 128*format.KibiByte) var s uint64 @@ -1063,6 +1165,13 @@ func (t *Tensor) MulmatID(ctx ml.Context, t2, ids ml.Tensor) ml.Tensor { } } +func (t *Tensor) AddID(ctx ml.Context, t2, ids ml.Tensor) ml.Tensor { + return &Tensor{ + b: t.b, + t: C.ggml_add_id(ctx.(*Context).ctx, t.t, t2.(*Tensor).t, ids.(*Tensor).t), + } +} + func (t *Tensor) LayerNorm(ctx ml.Context, w, b ml.Tensor, eps float32) ml.Tensor { tt := C.ggml_norm(ctx.(*Context).ctx, t.t, C.float(eps)) if w != nil { @@ -1310,6 +1419,13 @@ func (t *Tensor) RELU(ctx ml.Context) ml.Tensor { } } +func (t *Tensor) SwiGLU(ctx ml.Context, up ml.Tensor, alpha, limit float32) ml.Tensor { + return &Tensor{ + b: t.b, + t: C.ggml_swiglu_oai(ctx.(*Context).ctx, t.t, up.(*Tensor).t, C.float(alpha), C.float(limit)), + } +} + func (t *Tensor) Conv2D(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor { return &Tensor{ b: t.b, @@ -1338,7 +1454,7 @@ func (t *Tensor) Set(ctx ml.Context, t2 ml.Tensor, offset int, strides ...int) m return &Tensor{b: t.b, t: tt} } -func (t *Tensor) ScaledDotProductAttention(ctx ml.Context, key, value, mask ml.Tensor, scale float64) ml.Tensor { +func (t *Tensor) ScaledDotProductAttention(ctx ml.Context, key, value, mask, sinks ml.Tensor, scale float64) ml.Tensor { var kqMask *C.struct_ggml_tensor if mask != nil { kqMask = mask.(*Tensor).t @@ -1351,6 +1467,9 @@ func (t *Tensor) ScaledDotProductAttention(ctx ml.Context, key, value, mask ml.T value = value.Permute(ctx, 0, 2, 1, 3) kqv := C.ggml_flash_attn_ext(ctx.(*Context).ctx, query.(*Tensor).t, key.(*Tensor).t, value.(*Tensor).t, kqMask, C.float(scale), 0, 0) + if sinks != nil { + C.ggml_flash_attn_ext_add_sinks(kqv, sinks.(*Tensor).t) + } C.ggml_flash_attn_ext_set_prec(kqv, C.GGML_PREC_F32) return &Tensor{b: t.b, t: kqv} } else { @@ -1359,6 +1478,9 @@ func (t *Tensor) ScaledDotProductAttention(ctx ml.Context, key, value, mask ml.T b: t.b, t: C.ggml_soft_max_ext(ctx.(*Context).ctx, kq.(*Tensor).t, kqMask, C.float(scale), 0), } + if sinks != nil { + C.ggml_soft_max_add_sinks(kq.(*Tensor).t, sinks.(*Tensor).t) + } kqv := value.Mulmat(ctx, kq) return kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) diff --git a/ml/backend/ggml/ggml/.rsync-filter b/ml/backend/ggml/ggml/.rsync-filter index ddad16e21..a2b1b7d94 100644 --- a/ml/backend/ggml/ggml/.rsync-filter +++ b/ml/backend/ggml/ggml/.rsync-filter @@ -1,23 +1,29 @@ +protect .rsync-filter protect *.go protect *-embed.* -include include/ -include src/ -include src/CMakeLists.txt -include src/**/CMakeLists.txt -include src/ggml-blas/ -include src/ggml-cpu/ -include src/ggml-cpu/amx/ -include src/ggml-cpu/llamafile/ -include src/ggml-cuda/ -include src/ggml-cuda/vendors/ -include src/ggml-cuda/template-instances/ -include src/ggml-hip/ -include src/ggml-metal/ -include *.c -include *.h +protect ollama-* +hide /CMakeLists.txt +hide /include/ggml-webgpu.h +include /cmake/ +include /cmake/common.cmake +include /include/ +include /src/ +include /src/ggml-blas/ +include /src/ggml-cpu/ +include /src/ggml-cpu/amx/ +include /src/ggml-cpu/arch/ +include /src/ggml-cpu/arch/arm/ +include /src/ggml-cpu/arch/x86/ +include /src/ggml-cpu/llamafile/ +include /src/ggml-cuda/ +include /src/ggml-cuda/vendors/ +include /src/ggml-cuda/template-instances/ +include /src/ggml-hip/ +include /src/ggml-metal/ +include CMakeLists.txt +include *.[chm] include *.cpp include *.cu include *.cuh -include *.m include *.metal -exclude * +hide * diff --git a/ml/backend/ggml/ggml/cmake/common.cmake b/ml/backend/ggml/ggml/cmake/common.cmake index 1976d0ae9..cb6638833 100644 --- a/ml/backend/ggml/ggml/cmake/common.cmake +++ b/ml/backend/ggml/ggml/cmake/common.cmake @@ -24,3 +24,27 @@ function(ggml_get_flags CCID CCVER) set(GF_C_FLAGS ${C_FLAGS} PARENT_SCOPE) set(GF_CXX_FLAGS ${CXX_FLAGS} PARENT_SCOPE) endfunction() + +function(ggml_get_system_arch) + if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR + CMAKE_GENERATOR_PLATFORM_LWR STREQUAL "arm64" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm.*|ARM64)$")) + set(GGML_SYSTEM_ARCH "ARM" PARENT_SCOPE) + elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR + CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64|amd64)$")) + set(GGML_SYSTEM_ARCH "x86" PARENT_SCOPE) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc|power") + set(GGML_SYSTEM_ARCH "PowerPC" PARENT_SCOPE) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") + set(GGML_SYSTEM_ARCH "loongarch64" PARENT_SCOPE) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "riscv64") + set(GGML_SYSTEM_ARCH "riscv64" PARENT_SCOPE) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x") + set(GGML_SYSTEM_ARCH "s390x" PARENT_SCOPE) + else() + set(GGML_SYSTEM_ARCH "UNKNOWN" PARENT_SCOPE) + endif() +endfunction() diff --git a/ml/backend/ggml/ggml/include/ggml-backend.h b/ml/backend/ggml/ggml/include/ggml-backend.h index 3903c3cbc..b602a7c78 100644 --- a/ml/backend/ggml/ggml/include/ggml-backend.h +++ b/ml/backend/ggml/ggml/include/ggml-backend.h @@ -347,7 +347,7 @@ extern "C" { typedef bool (*ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data); // Compare the output of two backends - GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data); + GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data, struct ggml_tensor * test_node); // Tensor initialization GGML_API enum ggml_status ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr); diff --git a/ml/backend/ggml/ggml/include/ggml-cpu.h b/ml/backend/ggml/ggml/include/ggml-cpu.h index de77a875e..be40b1009 100644 --- a/ml/backend/ggml/ggml/include/ggml-cpu.h +++ b/ml/backend/ggml/ggml/include/ggml-cpu.h @@ -101,6 +101,7 @@ extern "C" { GGML_BACKEND_API int ggml_cpu_has_riscv_v (void); GGML_BACKEND_API int ggml_cpu_has_vsx (void); GGML_BACKEND_API int ggml_cpu_has_vxe (void); + GGML_BACKEND_API int ggml_cpu_has_nnpa (void); GGML_BACKEND_API int ggml_cpu_has_wasm_simd (void); GGML_BACKEND_API int ggml_cpu_has_llamafile (void); @@ -133,6 +134,7 @@ extern "C" { GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cpu_reg(void); + GGML_BACKEND_API void ggml_cpu_fp32_to_fp32(const float *, float *, int64_t); GGML_BACKEND_API void ggml_cpu_fp32_to_fp16(const float *, ggml_fp16_t *, int64_t); GGML_BACKEND_API void ggml_cpu_fp16_to_fp32(const ggml_fp16_t *, float *, int64_t); GGML_BACKEND_API void ggml_cpu_fp32_to_bf16(const float *, ggml_bf16_t *, int64_t); diff --git a/ml/backend/ggml/ggml/include/ggml-kompute.h b/ml/backend/ggml/ggml/include/ggml-kompute.h deleted file mode 100644 index 154aa56a7..000000000 --- a/ml/backend/ggml/ggml/include/ggml-kompute.h +++ /dev/null @@ -1,50 +0,0 @@ -#pragma once - -#include "ggml.h" -#include "ggml-backend.h" - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#define GGML_KOMPUTE_MAX_DEVICES 16 - -struct ggml_vk_device { - int index; - int type; // same as VkPhysicalDeviceType - size_t heapSize; - const char * name; - const char * vendor; - int subgroupSize; - uint64_t bufferAlignment; - uint64_t maxAlloc; -}; - -struct ggml_vk_device * ggml_vk_available_devices(size_t memoryRequired, size_t * count); -bool ggml_vk_get_device(struct ggml_vk_device * device, size_t memoryRequired, const char * name); -bool ggml_vk_has_vulkan(void); -bool ggml_vk_has_device(void); -struct ggml_vk_device ggml_vk_current_device(void); - -// -// backend API -// - -// forward declaration -typedef struct ggml_backend * ggml_backend_t; - -GGML_BACKEND_API ggml_backend_t ggml_backend_kompute_init(int device); - -GGML_BACKEND_API bool ggml_backend_is_kompute(ggml_backend_t backend); - -GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_kompute_buffer_type(int device); - -GGML_BACKEND_API ggml_backend_reg_t ggml_backend_kompute_reg(void); - -#ifdef __cplusplus -} -#endif diff --git a/ml/backend/ggml/ggml/include/ggml-opt.h b/ml/backend/ggml/ggml/include/ggml-opt.h index da0c24b46..74ec080a0 100644 --- a/ml/backend/ggml/ggml/include/ggml-opt.h +++ b/ml/backend/ggml/ggml/include/ggml-opt.h @@ -128,6 +128,8 @@ extern "C" { // set gradients to zero, initilize loss, and optionally reset the optimizer GGML_API void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer); + GGML_API bool ggml_opt_static_graphs(ggml_opt_context_t opt_ctx); // whether the graphs are allocated_statically + // get underlying tensors that store data // if not using static graphs these pointers become invalid with the next call to ggml_opt_alloc GGML_API struct ggml_tensor * ggml_opt_inputs( ggml_opt_context_t opt_ctx); // forward graph input tensor diff --git a/ml/backend/ggml/ggml/include/ggml.h b/ml/backend/ggml/ggml/include/ggml.h index 873baa24f..2f06e1e39 100644 --- a/ml/backend/ggml/ggml/include/ggml.h +++ b/ml/backend/ggml/ggml/include/ggml.h @@ -304,6 +304,16 @@ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \ GGML_TENSOR_LOCALS(size_t, nb, dst, nb) +#define GGML_TENSOR_TERNARY_OP_LOCALS \ + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \ + GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \ + GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \ + GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) \ + GGML_TENSOR_LOCALS(int64_t, ne2, src2, ne) \ + GGML_TENSOR_LOCALS(size_t, nb2, src2, nb) \ + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \ + GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + #define GGML_TENSOR_BINARY_OP_LOCALS01 \ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \ GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \ @@ -314,6 +324,13 @@ extern "C" { #endif + // Function type used in fatal error callbacks + typedef void (*ggml_abort_callback_t)(const char * error_message); + + // Set the abort callback (passing null will restore original abort functionality: printing a message to stdout) + // Returns the old callback for chaining + GGML_API ggml_abort_callback_t ggml_set_abort_callback(ggml_abort_callback_t callback); + GGML_NORETURN GGML_ATTRIBUTE_FORMAT(3, 4) GGML_API void ggml_abort(const char * file, int line, const char * fmt, ...); @@ -353,7 +370,7 @@ extern "C" { GGML_TYPE_F16 = 1, GGML_TYPE_Q4_0 = 2, GGML_TYPE_Q4_1 = 3, - GGML_TYPE_MXFP4 = 4, // Formerly removed type GGML_TYPE_Q4_2 + // GGML_TYPE_Q4_2 = 4, support has been removed // GGML_TYPE_Q4_3 = 5, support has been removed GGML_TYPE_Q5_0 = 6, GGML_TYPE_Q5_1 = 7, @@ -388,7 +405,8 @@ extern "C" { // GGML_TYPE_IQ4_NL_4_4 = 36, // GGML_TYPE_IQ4_NL_4_8 = 37, // GGML_TYPE_IQ4_NL_8_8 = 38, - GGML_TYPE_COUNT = 39, + GGML_TYPE_MXFP4 = 39, // MXFP4 (1 block) + GGML_TYPE_COUNT = 40, }; // precision @@ -423,6 +441,7 @@ extern "C" { GGML_FTYPE_MOSTLY_IQ4_XS = 22, // except 1d tensors GGML_FTYPE_MOSTLY_IQ1_M = 23, // except 1d tensors GGML_FTYPE_MOSTLY_BF16 = 24, // except 1d tensors + GGML_FTYPE_MOSTLY_MXFP4 = 25, // except 1d tensors }; // available tensor operations: @@ -431,6 +450,7 @@ extern "C" { GGML_OP_DUP, GGML_OP_ADD, + GGML_OP_ADD_ID, GGML_OP_ADD1, GGML_OP_ACC, GGML_OP_SUB, @@ -470,6 +490,7 @@ extern "C" { GGML_OP_TRANSPOSE, GGML_OP_GET_ROWS, GGML_OP_GET_ROWS_BACK, + GGML_OP_SET_ROWS, GGML_OP_DIAG, GGML_OP_DIAG_MASK_INF, GGML_OP_DIAG_MASK_ZERO, @@ -481,14 +502,16 @@ extern "C" { GGML_OP_CONV_TRANSPOSE_1D, GGML_OP_IM2COL, GGML_OP_IM2COL_BACK, + GGML_OP_CONV_2D, GGML_OP_CONV_2D_DW, GGML_OP_CONV_TRANSPOSE_2D, GGML_OP_POOL_1D, GGML_OP_POOL_2D, GGML_OP_POOL_2D_BACK, - GGML_OP_UPSCALE, // nearest interpolate + GGML_OP_UPSCALE, GGML_OP_PAD, GGML_OP_PAD_REFLECT_1D, + GGML_OP_ROLL, GGML_OP_ARANGE, GGML_OP_TIMESTEP_EMBEDDING, GGML_OP_ARGSORT, @@ -518,6 +541,8 @@ extern "C" { GGML_OP_CROSS_ENTROPY_LOSS_BACK, GGML_OP_OPT_STEP_ADAMW, + GGML_OP_GLU, + GGML_OP_COUNT, }; @@ -536,10 +561,22 @@ extern "C" { GGML_UNARY_OP_HARDSWISH, GGML_UNARY_OP_HARDSIGMOID, GGML_UNARY_OP_EXP, + GGML_UNARY_OP_GELU_ERF, GGML_UNARY_OP_COUNT, }; + enum ggml_glu_op { + GGML_GLU_OP_REGLU, + GGML_GLU_OP_GEGLU, + GGML_GLU_OP_SWIGLU, + GGML_GLU_OP_SWIGLU_OAI, + GGML_GLU_OP_GEGLU_ERF, + GGML_GLU_OP_GEGLU_QUICK, + + GGML_GLU_OP_COUNT, + }; + enum ggml_object_type { GGML_OBJECT_TYPE_TENSOR, GGML_OBJECT_TYPE_GRAPH, @@ -625,6 +662,9 @@ extern "C" { // misc + GGML_API const char * ggml_version(void); + GGML_API const char * ggml_commit(void); + GGML_API void ggml_time_init(void); // call this once at the beginning of the program GGML_API int64_t ggml_time_ms(void); GGML_API int64_t ggml_time_us(void); @@ -655,6 +695,7 @@ extern "C" { GGML_API const char * ggml_op_symbol(enum ggml_op op); GGML_API const char * ggml_unary_op_name(enum ggml_unary_op op); + GGML_API const char * ggml_glu_op_name(enum ggml_glu_op op); GGML_API const char * ggml_op_desc(const struct ggml_tensor * t); // unary or op name GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor); @@ -685,6 +726,9 @@ extern "C" { // true for tensor that is stored in memory as CxWxHxN and has been permuted to WxHxCxN GGML_API bool ggml_is_contiguous_channels(const struct ggml_tensor * tensor); + // true if the elements in dimension 0 are contiguous, or there is just 1 block of elements + GGML_API bool ggml_is_contiguous_rows(const struct ggml_tensor * tensor); + GGML_API bool ggml_are_same_shape (const struct ggml_tensor * t0, const struct ggml_tensor * t1); GGML_API bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1); @@ -756,6 +800,7 @@ extern "C" { GGML_API void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3); GGML_API enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor); + GGML_API enum ggml_glu_op ggml_get_glu_op(const struct ggml_tensor * tensor); GGML_API void * ggml_get_data (const struct ggml_tensor * tensor); GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor); @@ -800,6 +845,13 @@ extern "C" { struct ggml_tensor * b, enum ggml_type type); + // dst[i0, i1, i2] = a[i0, i1, i2] + b[i0, ids[i1, i2]] + GGML_API struct ggml_tensor * ggml_add_id( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + struct ggml_tensor * ids); + GGML_API struct ggml_tensor * ggml_add1( struct ggml_context * ctx, struct ggml_tensor * a, @@ -934,6 +986,15 @@ extern "C" { struct ggml_tensor * a, struct ggml_tensor * b); + // repeat a to the specified shape + GGML_API struct ggml_tensor * ggml_repeat_4d( + struct ggml_context * ctx, + struct ggml_tensor * a, + int64_t ne0, + int64_t ne1, + int64_t ne2, + int64_t ne3); + // sums repetitions in a into shape of b GGML_API struct ggml_tensor * ggml_repeat_back( struct ggml_context * ctx, @@ -1024,6 +1085,16 @@ extern "C" { struct ggml_context * ctx, struct ggml_tensor * a); + // GELU using erf (error function) when possible + // some backends may fallback to approximation based on Abramowitz and Stegun formula + GGML_API struct ggml_tensor * ggml_gelu_erf( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_gelu_erf_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a); + GGML_API struct ggml_tensor * ggml_gelu_quick( struct ggml_context * ctx, struct ggml_tensor * a); @@ -1065,6 +1136,96 @@ extern "C" { struct ggml_context * ctx, struct ggml_tensor * a); + // gated linear unit ops + // A: n columns, r rows, + // result is n / 2 columns, r rows, + // expects gate in second half of row, unless swapped is true + GGML_API struct ggml_tensor * ggml_glu( + struct ggml_context * ctx, + struct ggml_tensor * a, + enum ggml_glu_op op, + bool swapped); + + GGML_API struct ggml_tensor * ggml_reglu( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_reglu_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_geglu( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_geglu_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_swiglu( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_swiglu_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_geglu_erf( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_geglu_erf_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_geglu_quick( + struct ggml_context * ctx, + struct ggml_tensor * a); + + GGML_API struct ggml_tensor * ggml_geglu_quick_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a); + + // A: n columns, r rows, + // B: n columns, r rows, + GGML_API struct ggml_tensor * ggml_glu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + enum ggml_glu_op op); + + GGML_API struct ggml_tensor * ggml_reglu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + + GGML_API struct ggml_tensor * ggml_geglu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + + GGML_API struct ggml_tensor * ggml_swiglu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + + GGML_API struct ggml_tensor * ggml_geglu_erf_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + + GGML_API struct ggml_tensor * ggml_geglu_quick_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b); + + GGML_API struct ggml_tensor * ggml_swiglu_oai( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + float alpha, + float limit); + // normalize along rows GGML_API struct ggml_tensor * ggml_norm( struct ggml_context * ctx, @@ -1164,6 +1325,19 @@ extern "C" { struct ggml_tensor * a, float s); + // x = s * a + b + GGML_API struct ggml_tensor * ggml_scale_bias( + struct ggml_context * ctx, + struct ggml_tensor * a, + float s, + float b); + + GGML_API struct ggml_tensor * ggml_scale_bias_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a, + float s, + float b); + // b -> view(a,offset,nb1,nb2,3), return modified a GGML_API struct ggml_tensor * ggml_set( struct ggml_context * ctx, @@ -1354,6 +1528,23 @@ extern "C" { struct ggml_tensor * b, // row indices struct ggml_tensor * c); // data for ggml_get_rows, only used for its shape + // a TD [n_embd, ne1, ne2, ne3] + // b TS [n_embd, n_rows, ne02, ne03] | ne02 == ne2, ne03 == ne3 + // c I64 [n_rows, ne11, ne12, 1] | c[i] in [0, ne1) + // + // undefined behavior if destination rows overlap + // + // broadcast: + // ne2 % ne11 == 0 + // ne3 % ne12 == 0 + // + // return view(a) + GGML_API struct ggml_tensor * ggml_set_rows( + struct ggml_context * ctx, + struct ggml_tensor * a, // destination + struct ggml_tensor * b, // source + struct ggml_tensor * c); // row indices + GGML_API struct ggml_tensor * ggml_diag( struct ggml_context * ctx, struct ggml_tensor * a); @@ -1391,8 +1582,14 @@ extern "C" { struct ggml_context * ctx, struct ggml_tensor * a); + // a [ne0, ne01, ne02, ne03] + // mask [ne0, ne11, ne12, ne13] | ne11 >= ne01, F16 or F32, optional + // + // broadcast: + // ne02 % ne12 == 0 + // ne03 % ne13 == 0 + // // fused soft_max(a*scale + mask*(ALiBi slope)) - // mask is optional // max_bias = 0.0f for no ALiBi GGML_API struct ggml_tensor * ggml_soft_max_ext( struct ggml_context * ctx, @@ -1401,6 +1598,10 @@ extern "C" { float scale, float max_bias); + GGML_API void ggml_soft_max_add_sinks( + struct ggml_tensor * a, + struct ggml_tensor * sinks); + GGML_API struct ggml_tensor * ggml_soft_max_ext_back( struct ggml_context * ctx, struct ggml_tensor * a, @@ -1702,6 +1903,17 @@ extern "C" { struct ggml_tensor * b, int stride); + GGML_API struct ggml_tensor * ggml_conv_2d_direct( + struct ggml_context * ctx, + struct ggml_tensor * a, // convolution kernel [KW, KH, IC, OC] + struct ggml_tensor * b, // input data [W, H, C, N] + int s0, // stride dimension 0 + int s1, // stride dimension 1 + int p0, // padding dimension 0 + int p1, // padding dimension 1 + int d0, // dilation dimension 0 + int d1); // dilation dimension 1 + enum ggml_op_pool { GGML_OP_POOL_MAX, GGML_OP_POOL_AVG, @@ -1744,6 +1956,12 @@ extern "C" { enum ggml_scale_mode { GGML_SCALE_MODE_NEAREST = 0, GGML_SCALE_MODE_BILINEAR = 1, + + GGML_SCALE_MODE_COUNT + }; + + enum ggml_scale_flag { + GGML_SCALE_FLAG_ALIGN_CORNERS = (1 << 8) }; // interpolate @@ -1756,14 +1974,26 @@ extern "C" { // interpolate // interpolate scale to specified dimensions - GGML_API struct ggml_tensor * ggml_upscale_ext( + GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_upscale_ext( struct ggml_context * ctx, struct ggml_tensor * a, int ne0, int ne1, int ne2, int ne3, - enum ggml_scale_mode mode); + enum ggml_scale_mode mode), + "use ggml_interpolate instead"); + + // Up- or downsamples the input to the specified size. + // 2D scale modes (eg. bilinear) are applied to the first two dimensions. + GGML_API struct ggml_tensor * ggml_interpolate( + struct ggml_context * ctx, + struct ggml_tensor * a, + int64_t ne0, + int64_t ne1, + int64_t ne2, + int64_t ne3, + uint32_t mode); // ggml_scale_mode [ | ggml_scale_flag...] // pad each dimension with zeros: [x, ..., x] -> [x, ..., x, 0, ..., 0] GGML_API struct ggml_tensor * ggml_pad( @@ -1781,6 +2011,17 @@ extern "C" { int p0, int p1); + // Move tensor elements by an offset given for each dimension. Elements that + // are shifted beyond the last position are wrapped around to the beginning. + GGML_API struct ggml_tensor * ggml_roll( + struct ggml_context * ctx, + struct ggml_tensor * a, + int shift0, + int shift1, + int shift2, + int shift3); + + // Ref: https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/util.py#L151 // timesteps: [N,] // return: [N, dim] @@ -1815,11 +2056,17 @@ extern "C" { #define GGML_KQ_MASK_PAD 64 - // q: [n_embd_k, n_batch, n_head, 1] - // k: [n_embd_k, n_kv, n_head_kv, 1] - // v: [n_embd_v, n_kv, n_head_kv, 1] !! not transposed !! - // mask: [n_kv, n_batch_pad, 1, 1] !! n_batch_pad = GGML_PAD(n_batch, GGML_KQ_MASK_PAD) !! - // res: [n_embd_v, n_head, n_batch, 1] !! permuted !! + // q: [n_embd_k, n_batch, n_head, ne3 ] + // k: [n_embd_k, n_kv, n_head_kv, ne3 ] + // v: [n_embd_v, n_kv, n_head_kv, ne3 ] !! not transposed !! + // mask: [n_kv, n_batch_pad, ne32, ne33] !! n_batch_pad = GGML_PAD(n_batch, GGML_KQ_MASK_PAD) !! + // res: [n_embd_v, n_head, n_batch, ne3 ] !! permuted !! + // + // broadcast: + // n_head % n_head_kv == 0 + // n_head % ne32 == 0 + // ne3 % ne33 == 0 + // GGML_API struct ggml_tensor * ggml_flash_attn_ext( struct ggml_context * ctx, struct ggml_tensor * q, @@ -1837,6 +2084,10 @@ extern "C" { GGML_API enum ggml_prec ggml_flash_attn_ext_get_prec( const struct ggml_tensor * a); + GGML_API void ggml_flash_attn_ext_add_sinks( + struct ggml_tensor * a, + struct ggml_tensor * sinks); + // TODO: needs to be adapted to ggml_flash_attn_ext GGML_API struct ggml_tensor * ggml_flash_attn_back( struct ggml_context * ctx, @@ -1858,7 +2109,8 @@ extern "C" { struct ggml_tensor * dt, struct ggml_tensor * A, struct ggml_tensor * B, - struct ggml_tensor * C); + struct ggml_tensor * C, + struct ggml_tensor * ids); // partition into non-overlapping windows with padding if needed // example: @@ -2075,9 +2327,6 @@ extern "C" { GGML_API struct ggml_tensor * ggml_graph_get_grad (const struct ggml_cgraph * cgraph, const struct ggml_tensor * node); GGML_API struct ggml_tensor * ggml_graph_get_grad_acc(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node); - GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname); - GGML_API struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval); - // print info and performance information for the graph GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph); @@ -2161,6 +2410,7 @@ extern "C" { // scheduling priorities enum ggml_sched_priority { + GGML_SCHED_PRIO_LOW = -1, GGML_SCHED_PRIO_NORMAL, GGML_SCHED_PRIO_MEDIUM, GGML_SCHED_PRIO_HIGH, diff --git a/ml/backend/ggml/ggml/src/CMakeLists.txt b/ml/backend/ggml/ggml/src/CMakeLists.txt index 0beaed866..5158acd6a 100644 --- a/ml/backend/ggml/ggml/src/CMakeLists.txt +++ b/ml/backend/ggml/ggml/src/CMakeLists.txt @@ -109,6 +109,8 @@ if (MSVC) else () set(CMAKE_GENERATOR_PLATFORM_LWR "") endif () +ggml_get_system_arch() +message(STATUS "GGML_SYSTEM_ARCH: ${GGML_SYSTEM_ARCH}") if (NOT MSVC) if (GGML_STATIC) @@ -123,7 +125,6 @@ if (NOT MSVC) endif() if (MINGW) - # Target Windows 8 for PrefetchVirtualMemory add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER}) endif() @@ -194,6 +195,7 @@ add_library(ggml-base ../include/ggml-opt.h ../include/gguf.h ggml.c + ggml.cpp ggml-alloc.c ggml-backend.cpp ggml-opt.cpp @@ -210,6 +212,14 @@ endif() add_library(ggml ggml-backend-reg.cpp) +add_library(ggml::ggml ALIAS ggml) + +if (GGML_BACKEND_DIR) + if (NOT GGML_BACKEND_DL) + message(FATAL_ERROR "GGML_BACKEND_DIR requires GGML_BACKEND_DL") + endif() + target_compile_definitions(ggml PUBLIC GGML_BACKEND_DIR="${GGML_BACKEND_DIR}") +endif() target_link_libraries(ggml PUBLIC ggml-base) @@ -224,6 +234,11 @@ function(ggml_add_backend_library backend) set_target_properties(${backend} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) target_compile_definitions(${backend} PRIVATE GGML_BACKEND_DL) add_dependencies(ggml ${backend}) + if (GGML_BACKEND_DIR) + install(TARGETS ${backend} LIBRARY DESTINATION ${GGML_BACKEND_DIR}) + else() + install(TARGETS ${backend} LIBRARY DESTINATION ${CMAKE_INSTALL_BINDIR}) + endif() else() add_library(${backend} ${ARGN}) target_link_libraries(ggml PUBLIC ${backend}) @@ -266,17 +281,27 @@ endfunction() function(ggml_add_cpu_backend_variant tag_name) set(GGML_CPU_TAG_NAME ${tag_name}) # other: OPENMP LLAMAFILE CPU_HBM - foreach (feat NATIVE - SSE42 - AVX AVX2 BMI2 AVX_VNNI FMA F16C - AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 - AMX_TILE AMX_INT8 AMX_BF16) - set(GGML_${feat} OFF) - endforeach() + if (GGML_SYSTEM_ARCH STREQUAL "x86") + foreach (feat NATIVE + SSE42 + AVX AVX2 BMI2 AVX_VNNI FMA F16C + AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 + AMX_TILE AMX_INT8 AMX_BF16) + set(GGML_${feat} OFF) + endforeach() - foreach (feat ${ARGN}) - set(GGML_${feat} ON) - endforeach() + foreach (feat ${ARGN}) + set(GGML_${feat} ON) + endforeach() + elseif (GGML_SYSTEM_ARCH STREQUAL "ARM") + foreach (feat ${ARGN}) + set(GGML_INTERNAL_${feat} ON) + endforeach() + elseif (GGML_SYSTEM_ARCH STREQUAL "PowerPC") + foreach (feat ${ARGN}) + set(GGML_INTERNAL_${feat} ON) + endforeach() + endif() ggml_add_cpu_backend_variant_impl(${tag_name}) add_dependencies(ggml-cpu ggml-cpu-${tag_name}) @@ -287,15 +312,60 @@ ggml_add_backend(CPU) if (GGML_CPU_ALL_VARIANTS) if (NOT GGML_BACKEND_DL) message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS requires GGML_BACKEND_DL") + elseif (GGML_CPU_ARM_ARCH) + message(FATAL_ERROR "Cannot use both GGML_CPU_ARM_ARCH and GGML_CPU_ALL_VARIANTS") endif() add_custom_target(ggml-cpu) - ggml_add_cpu_backend_variant(x64) - ggml_add_cpu_backend_variant(sse42 SSE42) - ggml_add_cpu_backend_variant(sandybridge SSE42 AVX) - ggml_add_cpu_backend_variant(haswell SSE42 AVX F16C AVX2 BMI2 FMA) - ggml_add_cpu_backend_variant(skylakex SSE42 AVX F16C AVX2 BMI2 FMA AVX512) - ggml_add_cpu_backend_variant(icelake SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI) - ggml_add_cpu_backend_variant(alderlake SSE42 AVX F16C AVX2 BMI2 FMA AVX_VNNI) + if (GGML_SYSTEM_ARCH STREQUAL "x86") + ggml_add_cpu_backend_variant(x64) + ggml_add_cpu_backend_variant(sse42 SSE42) + ggml_add_cpu_backend_variant(sandybridge SSE42 AVX) + ggml_add_cpu_backend_variant(haswell SSE42 AVX F16C AVX2 BMI2 FMA) + ggml_add_cpu_backend_variant(skylakex SSE42 AVX F16C AVX2 BMI2 FMA AVX512) + ggml_add_cpu_backend_variant(icelake SSE42 AVX F16C AVX2 BMI2 FMA AVX512 AVX512_VBMI AVX512_VNNI) + ggml_add_cpu_backend_variant(alderlake SSE42 AVX F16C AVX2 BMI2 FMA AVX_VNNI) + elseif(GGML_SYSTEM_ARCH STREQUAL "ARM") + if (CMAKE_SYSTEM_NAME MATCHES "Linux") + # Many of these features are optional so we build versions with popular + # combinations and name the backends based on the version they were + # first released with + ggml_add_cpu_backend_variant(armv8.0_1) + ggml_add_cpu_backend_variant(armv8.2_1 DOTPROD) + ggml_add_cpu_backend_variant(armv8.2_2 DOTPROD FP16_VECTOR_ARITHMETIC) + ggml_add_cpu_backend_variant(armv8.2_3 DOTPROD FP16_VECTOR_ARITHMETIC SVE) + ggml_add_cpu_backend_variant(armv8.6_1 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8) + ggml_add_cpu_backend_variant(armv8.6_2 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SVE2) + ggml_add_cpu_backend_variant(armv9.2_1 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SME) + ggml_add_cpu_backend_variant(armv9.2_2 DOTPROD FP16_VECTOR_ARITHMETIC SVE MATMUL_INT8 SVE2 SME) + elseif (CMAKE_SYSTEM_NAME MATCHES "Android") + # Android-specific backends with SoC-compatible feature sets + ggml_add_cpu_backend_variant(android_armv8.0_1) + ggml_add_cpu_backend_variant(android_armv8.2_1 DOTPROD) + ggml_add_cpu_backend_variant(android_armv8.2_2 DOTPROD FP16_VECTOR_ARITHMETIC) + ggml_add_cpu_backend_variant(android_armv8.6_1 DOTPROD FP16_VECTOR_ARITHMETIC MATMUL_INT8) + elseif (APPLE) + ggml_add_cpu_backend_variant(apple_m1 DOTPROD) + ggml_add_cpu_backend_variant(apple_m2_m3 DOTPROD MATMUL_INT8) + ggml_add_cpu_backend_variant(apple_m4 DOTPROD MATMUL_INT8 NOSVE SME) + else() + message(FATAL_ERROR "Unsupported ARM target OS: ${CMAKE_SYSTEM_NAME}") + endif() + elseif (GGML_SYSTEM_ARCH STREQUAL "PowerPC") + if (CMAKE_SYSTEM_NAME MATCHES "Linux") + ggml_add_cpu_backend_variant(power0) + ggml_add_cpu_backend_variant(power7_1 POWER7) + ggml_add_cpu_backend_variant(power7_2 POWER7 VSX) + ggml_add_cpu_backend_variant(power8_1 POWER8) + ggml_add_cpu_backend_variant(power8_2 POWER8 VSX) + ggml_add_cpu_backend_variant(power9 POWER9 VSX) + ggml_add_cpu_backend_variant(power10 POWER10 VSX) + ggml_add_cpu_backend_variant(power11 POWER11 VSX) + else() + message(FATAL_ERROR "Unsupported PowerPC target OS: ${CMAKE_SYSTEM_NAME}") + endif() + else() + message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS not yet supported with ${GGML_SYSTEM_ARCH} on ${CMAKE_SYSTEM_NAME}") + endif() elseif (GGML_CPU) ggml_add_cpu_backend_variant_impl("") endif() @@ -304,12 +374,12 @@ ggml_add_backend(BLAS) ggml_add_backend(CANN) ggml_add_backend(CUDA) ggml_add_backend(HIP) -ggml_add_backend(Kompute) ggml_add_backend(METAL) ggml_add_backend(MUSA) ggml_add_backend(RPC) ggml_add_backend(SYCL) ggml_add_backend(Vulkan) +ggml_add_backend(WebGPU) ggml_add_backend(OpenCL) foreach (target ggml-base ggml) diff --git a/ml/backend/ggml/ggml/src/ggml-alloc.c b/ml/backend/ggml/ggml/src/ggml-alloc.c index 048129908..41c8c4a2f 100644 --- a/ml/backend/ggml/ggml/src/ggml-alloc.c +++ b/ml/backend/ggml/ggml/src/ggml-alloc.c @@ -22,21 +22,6 @@ static bool ggml_is_view(const struct ggml_tensor * t) { return t->view_src != NULL; } -static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) { - if (a->type != b->type) { - return false; - } - for (int i = 0; i < GGML_MAX_DIMS; i++) { - if (a->ne[i] != b->ne[i]) { - return false; - } - if (a->nb[i] != b->nb[i]) { - return false; - } - } - return true; -} - // ops that return true for this function must not use restrict pointers for their backend implementations static bool ggml_op_can_inplace(enum ggml_op op) { switch (op) { @@ -44,6 +29,7 @@ static bool ggml_op_can_inplace(enum ggml_op op) { case GGML_OP_DIAG_MASK_ZERO: case GGML_OP_DIAG_MASK_INF: case GGML_OP_ADD: + case GGML_OP_ADD_ID: case GGML_OP_ADD1: case GGML_OP_SUB: case GGML_OP_MUL: diff --git a/ml/backend/ggml/ggml/src/ggml-backend-reg.cpp b/ml/backend/ggml/ggml/src/ggml-backend-reg.cpp index 8f49f0846..f1e9c1801 100644 --- a/ml/backend/ggml/ggml/src/ggml-backend-reg.cpp +++ b/ml/backend/ggml/ggml/src/ggml-backend-reg.cpp @@ -45,6 +45,10 @@ #include "ggml-vulkan.h" #endif +#ifdef GGML_USE_WEBGPU +#include "ggml-webgpu.h" +#endif + #ifdef GGML_USE_OPENCL #include "ggml-opencl.h" #endif @@ -61,14 +65,13 @@ #include "ggml-cann.h" #endif -#ifdef GGML_USE_KOMPUTE -#include "ggml-kompute.h" -#endif - // disable C++17 deprecation warning for std::codecvt_utf8 #if defined(__clang__) # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wdeprecated-declarations" +#elif defined(__GNUC__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wdeprecated-declarations" #endif namespace fs = std::filesystem; @@ -91,6 +94,8 @@ static std::string path_str(const fs::path & path) { #if defined(__clang__) # pragma clang diagnostic pop +#elif defined(__GNUC__) +# pragma GCC diagnostic pop #endif #ifdef _WIN32 @@ -172,6 +177,9 @@ struct ggml_backend_registry { #ifdef GGML_USE_VULKAN register_backend(ggml_backend_vk_reg()); #endif +#ifdef GGML_USE_WEBGPU + register_backend(ggml_backend_webgpu_reg()); +#endif #ifdef GGML_USE_OPENCL register_backend(ggml_backend_opencl_reg()); #endif @@ -184,9 +192,6 @@ struct ggml_backend_registry { #ifdef GGML_USE_RPC register_backend(ggml_backend_rpc_reg()); #endif -#ifdef GGML_USE_KOMPUTE - register_backend(ggml_backend_kompute_reg()); -#endif #ifdef GGML_USE_CPU register_backend(ggml_backend_cpu_reg()); #endif @@ -498,6 +503,9 @@ static ggml_backend_reg_t ggml_backend_load_best(const char * name, bool silent, std::vector search_paths; if (user_search_path == nullptr) { +#ifdef GGML_BACKEND_DIR + search_paths.push_back(fs::u8path(GGML_BACKEND_DIR)); +#endif // default search paths: executable directory, current directory search_paths.push_back(get_executable_path()); search_paths.push_back(fs::current_path()); @@ -576,14 +584,13 @@ void ggml_backend_load_all_from_path(const char * dir_path) { // Avoid mixed hip+cuda configurations const char * hip_devices = std::getenv("HIP_VISIBLE_DEVICES"); - const char * rocr_devices = std::getenv("ROCR_VISIBLE_DEVICES"); + const char * rocr_devices = std::getenv("ROCR_VISIBLE_DEVICES"); if (!hip_devices && !rocr_devices) { ggml_backend_load_best("cuda", silent, dir_path); } else { ggml_backend_load_best("hip", silent, dir_path); } - - ggml_backend_load_best("kompute", silent, dir_path); + ggml_backend_load_best("metal", silent, dir_path); ggml_backend_load_best("rpc", silent, dir_path); ggml_backend_load_best("sycl", silent, dir_path); diff --git a/ml/backend/ggml/ggml/src/ggml-backend.cpp b/ml/backend/ggml/ggml/src/ggml-backend.cpp index 84928bc3b..05a842ed5 100644 --- a/ml/backend/ggml/ggml/src/ggml-backend.cpp +++ b/ml/backend/ggml/ggml/src/ggml-backend.cpp @@ -368,21 +368,6 @@ ggml_backend_dev_t ggml_backend_get_device(ggml_backend_t backend) { // backend copy -static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) { - if (a->type != b->type) { - return false; - } - for (int i = 0; i < GGML_MAX_DIMS; i++) { - if (a->ne[i] != b->ne[i]) { - return false; - } - if (a->nb[i] != b->nb[i]) { - return false; - } - } - return true; -} - void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) { GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts"); @@ -679,6 +664,7 @@ struct ggml_backend_sched { // pipeline parallelism support int n_copies; int cur_copy; + int next_copy; ggml_backend_event_t events[GGML_SCHED_MAX_BACKENDS][GGML_SCHED_MAX_COPIES]; struct ggml_tensor * graph_inputs[GGML_SCHED_MAX_SPLIT_INPUTS]; int n_graph_inputs; @@ -834,8 +820,9 @@ static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, str } if (sched->debug > 1) { ggml_backend_t tensor_backend = ggml_backend_sched_get_tensor_backend(sched, node); - GGML_LOG_DEBUG("node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s]:", i, ggml_op_name(node->op), node->name, - fmt_size(ggml_nbytes(node)), tensor_backend ? ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node)); + GGML_LOG_DEBUG("node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s] use=%d:", i, ggml_op_name(node->op), node->name, + fmt_size(ggml_nbytes(node)), tensor_backend ? ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node), + graph->use_counts[ggml_hash_find(&graph->visited_hash_set, node)]); for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (src == NULL) { @@ -1101,6 +1088,11 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg } } } + // if the node is still unassigned, assign it to the first backend that supports it + for (int b = 0; b < sched->n_backends && *cur_backend_id == -1; b++) { + ggml_backend_sched_set_if_supported(sched, node, b, cur_backend_id); + } + GGML_ASSERT(*cur_backend_id != -1); } // pass 5: split graph, find tensors that need to be copied @@ -1128,7 +1120,7 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg const int node_backend_id = tensor_backend_id(node); - assert(node_backend_id != -1); // all nodes should be assigned by now, this can happen if there is no CPU fallback + GGML_ASSERT(node_backend_id != -1); // all nodes should be assigned by now, this can happen if there is no CPU fallback // check if we should start a new split based on the sources of the current node bool need_new_split = false; @@ -1186,7 +1178,7 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg size_t src_id = hash_id(src); const int src_backend_id = sched->hv_tensor_backend_ids[src_id]; - assert(src_backend_id != -1); // all inputs should be assigned by now + GGML_ASSERT(src_backend_id != -1); // all inputs should be assigned by now if (src->flags & GGML_TENSOR_FLAG_INPUT && sched->n_copies > 1) { if (tensor_id_copy(src_id, src_backend_id, 0) == NULL) { @@ -1357,7 +1349,10 @@ static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) { // allocate graph if (backend_ids_changed || !ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) { // the re-allocation may cause the split inputs to be moved to a different address - ggml_backend_sched_synchronize(sched); + // synchronize without ggml_backend_sched_synchronize to avoid changing cur_copy + for (int i = 0; i < sched->n_backends; i++) { + ggml_backend_synchronize(sched->backends[i]); + } #ifndef NDEBUG GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed); #endif @@ -1461,8 +1456,6 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s } } - sched->cur_copy = (sched->cur_copy + 1) % sched->n_copies; - return GGML_STATUS_SUCCESS; } @@ -1563,10 +1556,10 @@ void ggml_backend_sched_reset(ggml_backend_sched_t sched) { bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) { GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs); - ggml_backend_sched_split_graph(sched, measure_graph); - ggml_backend_sched_synchronize(sched); + ggml_backend_sched_split_graph(sched, measure_graph); + if (!ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids)) { return false; } @@ -1578,10 +1571,13 @@ bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + graph->n_leafs); + GGML_ASSERT(!sched->is_alloc); + + sched->cur_copy = sched->next_copy; + sched->next_copy = (sched->next_copy + 1) % sched->n_copies; ggml_backend_sched_split_graph(sched, graph); - if (!ggml_backend_sched_alloc_splits(sched)) { return false; } @@ -1615,6 +1611,12 @@ void ggml_backend_sched_synchronize(ggml_backend_sched_t sched) { for (int i = 0; i < sched->n_backends; i++) { ggml_backend_synchronize(sched->backends[i]); } + if (!sched->is_alloc) { + // if the graph is not already allocated, always use copy 0 after a synchronization + // this ensures that during generation the same copy is used every time, + // which avoids changes in the graph that could cause CUDA or other graphs to be disabled + sched->next_copy = 0; + } } void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) { @@ -1845,7 +1847,7 @@ void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy) { ggml_free(copy.ctx_unallocated); } -bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data) { +bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data, struct ggml_tensor * test_node) { struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph); if (copy.buffer == NULL) { return false; @@ -1856,28 +1858,45 @@ bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t assert(g1->n_nodes == g2->n_nodes); - for (int i = 0; i < g1->n_nodes; i++) { - struct ggml_tensor * t1 = g1->nodes[i]; - struct ggml_tensor * t2 = g2->nodes[i]; + if (test_node != nullptr) { + // Compute the whole graph and only test the output for a specific tensor + ggml_backend_graph_compute(backend1, g1); + ggml_backend_graph_compute(backend2, g2); - assert(t1->op == t2->op && ggml_are_same_layout(t1, t2)); - - struct ggml_cgraph g1v = ggml_graph_view(g1, i, i + 1); - struct ggml_cgraph g2v = ggml_graph_view(g2, i, i + 1); - - ggml_backend_graph_compute(backend1, &g1v); - ggml_backend_graph_compute(backend2, &g2v); - - if (ggml_is_view_op(t1->op)) { - continue; + int test_node_idx = -1; + for (int i = 0; i < g1->n_nodes; i++) { + struct ggml_tensor * t1 = g1->nodes[i]; + if (t1 == test_node) { + test_node_idx = i; + break; + } } + GGML_ASSERT(test_node_idx != -1); - // compare results, calculate rms etc - if (!callback(i, t1, t2, user_data)) { - break; + callback(test_node_idx, g1->nodes[test_node_idx], g2->nodes[test_node_idx], user_data); + } else { + for (int i = 0; i < g1->n_nodes; i++) { + struct ggml_tensor * t1 = g1->nodes[i]; + struct ggml_tensor * t2 = g2->nodes[i]; + + assert(t1->op == t2->op && ggml_are_same_layout(t1, t2)); + + struct ggml_cgraph g1v = ggml_graph_view(g1, i, i + 1); + struct ggml_cgraph g2v = ggml_graph_view(g2, i, i + 1); + + ggml_backend_graph_compute(backend1, &g1v); + ggml_backend_graph_compute(backend2, &g2v); + + if (ggml_is_view_op(t1->op)) { + continue; + } + + // compare results, calculate rms etc + if (!callback(i, t1, t2, user_data)) { + break; + } } } - ggml_backend_graph_copy_free(copy); return true; diff --git a/ml/backend/ggml/ggml/src/ggml-blas/CMakeLists.txt b/ml/backend/ggml/ggml/src/ggml-blas/CMakeLists.txt index 0bf3c05d9..76064c3fd 100644 --- a/ml/backend/ggml/ggml/src/ggml-blas/CMakeLists.txt +++ b/ml/backend/ggml/ggml/src/ggml-blas/CMakeLists.txt @@ -81,7 +81,7 @@ if (BLAS_FOUND) target_link_libraries (ggml-blas PRIVATE ${BLAS_LIBRARIES}) target_include_directories(ggml-blas PRIVATE ${BLAS_INCLUDE_DIRS}) else() - message(ERROR "BLAS not found, please refer to " - "https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors" - " to set correct GGML_BLAS_VENDOR") + message(FATAL_ERROR "BLAS not found, please refer to " + "https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors" + " to set correct GGML_BLAS_VENDOR") endif() diff --git a/ml/backend/ggml/ggml/src/ggml-blas/ggml-blas.cpp b/ml/backend/ggml/ggml/src/ggml-blas/ggml-blas.cpp index 22926d758..40738d5b4 100644 --- a/ml/backend/ggml/ggml/src/ggml-blas/ggml-blas.cpp +++ b/ml/backend/ggml/ggml/src/ggml-blas/ggml-blas.cpp @@ -281,10 +281,10 @@ ggml_backend_t ggml_backend_blas_init(void) { ggml_backend_blas_context * ctx = new ggml_backend_blas_context; ggml_backend_t backend = new ggml_backend { - /* .guid = */ ggml_backend_blas_guid(), - /* .interface = */ blas_backend_i, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_blas_reg(), 0), - /* .context = */ ctx, + /* .guid = */ ggml_backend_blas_guid(), + /* .iface = */ blas_backend_i, + /* .device = */ ggml_backend_reg_dev_get(ggml_backend_blas_reg(), 0), + /* .context = */ ctx, }; #if defined(OPENBLAS_VERSION) && defined(GGML_USE_OPENMP) diff --git a/ml/backend/ggml/ggml/src/ggml-common.h b/ml/backend/ggml/ggml/src/ggml-common.h index e0d71451b..93ab7ea44 100644 --- a/ml/backend/ggml/ggml/src/ggml-common.h +++ b/ml/backend/ggml/ggml/src/ggml-common.h @@ -99,6 +99,9 @@ typedef sycl::half2 ggml_half2; #define QI4_1 (QK4_1 / (4 * QR4_1)) #define QR4_1 2 +#define QI_MXFP4 (QK_MXFP4 / (4 * QR_MXFP4)) +#define QR_MXFP4 2 + #define QI5_0 (QK5_0 / (4 * QR5_0)) #define QR5_0 2 @@ -184,6 +187,13 @@ typedef struct { } block_q4_1; static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_half) + QK4_1 / 2, "wrong q4_1 block size/padding"); +#define QK_MXFP4 32 +typedef struct { + uint8_t e; // E8M0 + uint8_t qs[QK_MXFP4/2]; +} block_mxfp4; +static_assert(sizeof(block_mxfp4) == sizeof(uint8_t) + QK_MXFP4/2, "wrong mxfp4 block size/padding"); + #define QK5_0 32 typedef struct { ggml_half d; // delta @@ -417,13 +427,6 @@ typedef struct { } block_iq4_xs; static_assert(sizeof(block_iq4_xs) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/64 + QK_K/2, "wrong iq4_xs block size/padding"); -#define MXFP4 32 -typedef struct { - uint8_t d; // scale E8M0 float - uint8_t qs[MXFP4 / 2]; // (32) 4 bit elements E2M1 float -} block_mxfp4; -static_assert(sizeof(block_mxfp4) == sizeof(uint8_t) + MXFP4/2, "wrong mxfp4 block size/padding"); - #endif // GGML_COMMON_DECL #endif // GGML_COMMON_DECL @@ -1081,6 +1084,17 @@ GGML_TABLE_BEGIN(uint32_t, iq3s_grid, 512) 0x0f090307, 0x0f090501, 0x0f090b01, 0x0f0b0505, 0x0f0b0905, 0x0f0d0105, 0x0f0d0703, 0x0f0f0101, GGML_TABLE_END() +// TODO: fix name to kvalues_iq4_nl +GGML_TABLE_BEGIN(int8_t, kvalues_iq4nl, 16) + -127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113, +GGML_TABLE_END() + +// e2m1 values (doubled) +// ref: https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf +GGML_TABLE_BEGIN(int8_t, kvalues_mxfp4, 16) + 0, 1, 2, 3, 4, 6, 8, 12, 0, -1, -2, -3, -4, -6, -8, -12, +GGML_TABLE_END() + #define NGRID_IQ1S 2048 #define IQ1S_DELTA 0.125f #define IQ1M_DELTA 0.125f diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/CMakeLists.txt b/ml/backend/ggml/ggml/src/ggml-cpu/CMakeLists.txt index bdaec2881..f188d1638 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ml/backend/ggml/ggml/src/ggml-cpu/CMakeLists.txt @@ -1,3 +1,17 @@ +function(ggml_add_cpu_backend_features cpu_name arch) + # The feature detection code is compiled as a separate target so that + # it can be built without the architecture flags + # Since multiple variants of the CPU backend may be included in the same + # build, using set_source_files_properties() to set the arch flags is not possible + set(GGML_CPU_FEATS_NAME ${cpu_name}-feats) + add_library(${GGML_CPU_FEATS_NAME} OBJECT ggml-cpu/arch/${arch}/cpu-feats.cpp) + target_include_directories(${GGML_CPU_FEATS_NAME} PRIVATE . ../include) + target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE ${ARGN}) + target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE GGML_BACKEND_DL GGML_BACKEND_BUILD GGML_BACKEND_SHARED) + set_target_properties(${GGML_CPU_FEATS_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON) + target_link_libraries(${cpu_name} PRIVATE ${GGML_CPU_FEATS_NAME}) +endfunction() + function(ggml_add_cpu_backend_variant_impl tag_name) if (tag_name) set(GGML_CPU_NAME ggml-cpu-${tag_name}) @@ -10,14 +24,14 @@ function(ggml_add_cpu_backend_variant_impl tag_name) list (APPEND GGML_CPU_SOURCES ggml-cpu/ggml-cpu.c ggml-cpu/ggml-cpu.cpp - ggml-cpu/ggml-cpu-aarch64.cpp - ggml-cpu/ggml-cpu-aarch64.h - ggml-cpu/ggml-cpu-hbm.cpp - ggml-cpu/ggml-cpu-hbm.h - ggml-cpu/ggml-cpu-quants.c - ggml-cpu/ggml-cpu-quants.h - ggml-cpu/ggml-cpu-traits.cpp - ggml-cpu/ggml-cpu-traits.h + ggml-cpu/repack.cpp + ggml-cpu/repack.h + ggml-cpu/hbm.cpp + ggml-cpu/hbm.h + ggml-cpu/quants.c + ggml-cpu/quants.h + ggml-cpu/traits.cpp + ggml-cpu/traits.h ggml-cpu/amx/amx.cpp ggml-cpu/amx/amx.h ggml-cpu/amx/mmq.cpp @@ -56,10 +70,12 @@ function(ggml_add_cpu_backend_variant_impl tag_name) if (GGML_OPENMP) find_package(OpenMP) if (OpenMP_FOUND) + set(GGML_OPENMP_ENABLED "ON" CACHE INTERNAL "") target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_OPENMP) target_link_libraries(${GGML_CPU_NAME} PRIVATE OpenMP::OpenMP_C OpenMP::OpenMP_CXX) else() + set(GGML_OPENMP_ENABLED "OFF" CACHE INTERNAL "") message(WARNING "OpenMP not found") endif() endif() @@ -82,12 +98,12 @@ function(ggml_add_cpu_backend_variant_impl tag_name) target_link_libraries(${GGML_CPU_NAME} PUBLIC memkind) endif() - if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR - CMAKE_GENERATOR_PLATFORM_LWR STREQUAL "arm64" OR - (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND - CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm.*|ARM64)$")) - + if (GGML_SYSTEM_ARCH STREQUAL "ARM") message(STATUS "ARM detected") + list(APPEND GGML_CPU_SOURCES + ggml-cpu/arch/arm/quants.c + ggml-cpu/arch/arm/repack.cpp + ) if (MSVC AND NOT CMAKE_C_COMPILER_ID STREQUAL "Clang") message(FATAL_ERROR "MSVC is not supported for ARM, use clang") @@ -143,6 +159,49 @@ function(ggml_add_cpu_backend_variant_impl tag_name) else() if (GGML_CPU_ARM_ARCH) list(APPEND ARCH_FLAGS -march=${GGML_CPU_ARM_ARCH}) + elseif(GGML_CPU_ALL_VARIANTS) + # Begin with the lowest baseline + set(ARM_MCPU "armv8-a") + set(ARCH_TAGS "") + set(ARCH_DEFINITIONS "") + + # When a feature is selected, bump the MCPU to the first + # version that supported it + if (GGML_INTERNAL_DOTPROD) + set(ARM_MCPU "armv8.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+dotprod") + list(APPEND ARCH_DEFINITIONS GGML_USE_DOTPROD) + endif() + if (GGML_INTERNAL_FP16_VECTOR_ARITHMETIC) + set(ARM_MCPU "armv8.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+fp16") + list(APPEND ARCH_DEFINITIONS GGML_USE_FP16_VECTOR_ARITHMETIC) + endif() + if (GGML_INTERNAL_SVE) + set(ARM_MCPU "armv8.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+sve") + list(APPEND ARCH_DEFINITIONS GGML_USE_SVE) + endif() + if (GGML_INTERNAL_MATMUL_INT8) + set(ARM_MCPU "armv8.6-a") + set(ARCH_TAGS "${ARCH_TAGS}+i8mm") + list(APPEND ARCH_DEFINITIONS GGML_USE_MATMUL_INT8) + endif() + if (GGML_INTERNAL_SVE2) + set(ARM_MCPU "armv8.6-a") + set(ARCH_TAGS "${ARCH_TAGS}+sve2") + list(APPEND ARCH_DEFINITIONS GGML_USE_SVE2) + endif() + if (GGML_INTERNAL_NOSVE) + set(ARCH_TAGS "${ARCH_TAGS}+nosve") + endif() + if (GGML_INTERNAL_SME) + set(ARM_MCPU "armv9.2-a") + set(ARCH_TAGS "${ARCH_TAGS}+sme") + list(APPEND ARCH_DEFINITIONS GGML_USE_SME) + endif() + list(APPEND ARCH_FLAGS "-march=${ARM_MCPU}${ARCH_TAGS}") + ggml_add_cpu_backend_features(${GGML_CPU_NAME} arm ${ARCH_DEFINITIONS}) endif() endif() @@ -170,11 +229,12 @@ function(ggml_add_cpu_backend_variant_impl tag_name) endforeach() endif() endif() - elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR - (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND - CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64|amd64)$")) - + elseif (GGML_SYSTEM_ARCH STREQUAL "x86") message(STATUS "x86 detected") + list(APPEND GGML_CPU_SOURCES + ggml-cpu/arch/x86/quants.c + ggml-cpu/arch/x86/repack.cpp + ) if (MSVC) # instruction set detection for MSVC only @@ -299,8 +359,17 @@ function(ggml_add_cpu_backend_variant_impl tag_name) endif() endif() endif() - elseif ("${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "ppc64le " OR "${CMAKE_SYSTEM_PROCESSOR} " STREQUAL "powerpc ") + + if (GGML_BACKEND_DL) + if (GGML_NATIVE) + # the feature check relies on ARCH_DEFINITIONS, but it is not set with GGML_NATIVE + message(FATAL_ERROR "GGML_NATIVE is not compatible with GGML_BACKEND_DL, consider using GGML_CPU_ALL_VARIANTS") + endif() + ggml_add_cpu_backend_features(${GGML_CPU_NAME} x86 ${ARCH_DEFINITIONS}) + endif() + elseif (GGML_SYSTEM_ARCH STREQUAL "PowerPC") message(STATUS "PowerPC detected") + list(APPEND GGML_CPU_SOURCES ggml-cpu/arch/powerpc/quants.c) if (GGML_NATIVE) if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") file(READ "/proc/cpuinfo" POWER10_M) @@ -308,7 +377,8 @@ function(ggml_add_cpu_backend_variant_impl tag_name) execute_process(COMMAND bash -c "prtconf |grep 'Implementation' | head -n 1" OUTPUT_VARIABLE POWER10_M) endif() - string(REGEX MATCHALL "POWER *([0-9]+)" MATCHED_STRING "${POWER10_M}") + string(TOUPPER "${POWER10_M}" POWER10_M_UPPER) + string(REGEX MATCHALL "POWER *([0-9]+)" MATCHED_STRING "${POWER10_M_UPPER}") string(REGEX REPLACE "POWER *([0-9]+)" "\\1" EXTRACTED_NUMBER "${MATCHED_STRING}") if (EXTRACTED_NUMBER GREATER_EQUAL 10) @@ -320,13 +390,35 @@ function(ggml_add_cpu_backend_variant_impl tag_name) else() list(APPEND ARCH_FLAGS -mcpu=native -mtune=native -mpowerpc64) endif() + elseif(GGML_CPU_ALL_VARIANTS) + # Begin with the lowest baseline + set(ARCH_DEFINITIONS "") + + # When a feature is selected, bump the MCPU to the first + # version that supported it + foreach(PVER RANGE 7 11) + if(DEFINED GGML_INTERNAL_POWER${PVER}) + set(POWERPC_MCPU "power${PVER}") + list(APPEND ARCH_DEFINITIONS GGML_USE_POWER${PVER}) + endif() + endforeach() + if (GGML_INTERNAL_VSX) + list(APPEND ARCH_DEFINITIONS GGML_USE_VSX) + list(APPEND ARCH_FLAGS -mvsx) + endif() + + if (DEFINED POWERPC_MCPU) + list(APPEND ARCH_FLAGS -mcpu=${POWERPC_MCPU}) + endif() + ggml_add_cpu_backend_features(${GGML_CPU_NAME} powerpc ${ARCH_DEFINITIONS}) else() if (GGML_CPU_POWERPC_CPUTYPE) list(APPEND ARCH_FLAGS -mcpu=${GGML_CPU_POWERPC_CPUTYPE}) endif() endif() - elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") + elseif (GGML_SYSTEM_ARCH STREQUAL "loongarch64") message(STATUS "loongarch64 detected") + list(APPEND GGML_CPU_SOURCES ggml-cpu/arch/loongarch/quants.c) list(APPEND ARCH_FLAGS -march=loongarch64) if (GGML_LASX) @@ -335,22 +427,30 @@ function(ggml_add_cpu_backend_variant_impl tag_name) if (GGML_LSX) list(APPEND ARCH_FLAGS -mlsx) endif() - elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "riscv64") - message(STATUS "RISC-V detected") + elseif (GGML_SYSTEM_ARCH STREQUAL "riscv64") + message(STATUS "riscv64 detected") + list(APPEND GGML_CPU_SOURCES + ggml-cpu/arch/riscv/quants.c + ggml-cpu/arch/riscv/repack.cpp + ) if (GGML_RVV) - if (GGML_RV_ZFH) - list(APPEND ARCH_FLAGS -march=rv64gcv_zfhmin -DGGML_RV_ZFH -mabi=lp64d) + if (GGML_XTHEADVECTOR) + list(APPEND ARCH_FLAGS -march=rv64gc_xtheadvector -mabi=lp64d) + elseif (GGML_RV_ZFH) + list(APPEND ARCH_FLAGS -march=rv64gcv_zfhmin -mabi=lp64d) else() list(APPEND ARCH_FLAGS -march=rv64gcv -mabi=lp64d) endif() endif() - elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x") + elseif (GGML_SYSTEM_ARCH STREQUAL "s390x") message(STATUS "s390x detected") + list(APPEND GGML_CPU_SOURCES ggml-cpu/arch/s390/quants.c) file(READ "/proc/cpuinfo" CPUINFO_CONTENTS) string(REGEX REPLACE "machine[ \t\r\n]*=[ \t\r\n]*([0-9]+)" "\\1" S390X_M ${CPUINFO_CONTENTS}) # TODO: Separation to determine activation of VX/VXE/VXE2 if (${S390X_M} MATCHES "8561|8562") + set(GGML_NNPA OFF) message(STATUS "z15 target") list(APPEND ARCH_FLAGS -march=z15) elseif (${S390X_M} MATCHES "3931") @@ -358,6 +458,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) list(APPEND ARCH_FLAGS -march=z16) elseif (${S390X_M} MATCHES "9175|9176") # NOTE: Only available from GCC 15.1.0 onwards. Any z17 machine with compile issues must first verify their GCC version. + # binutils must also be updated to the latest for the -march=z17 flag to work. Otherwise, use -march=arch15. message(STATUS "z17 target") list(APPEND ARCH_FLAGS -march=z17) else() @@ -367,14 +468,25 @@ function(ggml_add_cpu_backend_variant_impl tag_name) endif() if (GGML_VXE) + message(STATUS "VX/VXE/VXE2 enabled") list(APPEND ARCH_FLAGS -mvx -mzvector) + list(APPEND ARCH_DEFINITIONS GGML_VXE) endif() + + if (GGML_NNPA) + message(STATUS "NNPA enabled") + list(APPEND ARCH_DEFINITIONS GGML_NNPA) + endif() + elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "wasm") + message(STATUS "Wasm detected") + list (APPEND GGML_CPU_SOURCES ggml-cpu/arch/wasm/quants.c) else() - message(STATUS "Unknown architecture") + message(WARNING "Unknown CPU architecture. Falling back to generic implementations.") + list(APPEND ARCH_FLAGS -DGGML_CPU_GENERIC) endif() - if (GGML_CPU_AARCH64) - target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_AARCH64) + if (GGML_CPU_REPACK) + target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_REPACK) endif() if (GGML_CPU_KLEIDIAI) @@ -385,9 +497,9 @@ function(ggml_add_cpu_backend_variant_impl tag_name) # Fetch KleidiAI sources: include(FetchContent) - set(KLEIDIAI_COMMIT_TAG "v1.5.0") + set(KLEIDIAI_COMMIT_TAG "v1.11.0") set(KLEIDIAI_DOWNLOAD_URL "https://github.com/ARM-software/kleidiai/archive/refs/tags/${KLEIDIAI_COMMIT_TAG}.tar.gz") - set(KLEIDIAI_ARCHIVE_MD5 "ea22e1aefb800e9bc8c74d91633cc58e") + set(KLEIDIAI_ARCHIVE_MD5 "3fe9e5ab964c375c53839296eb71eaa2") if (POLICY CMP0135) cmake_policy(SET CMP0135 NEW) @@ -477,26 +589,12 @@ function(ggml_add_cpu_backend_variant_impl tag_name) target_compile_options(${GGML_CPU_NAME} PRIVATE ${ARCH_FLAGS}) target_compile_definitions(${GGML_CPU_NAME} PRIVATE ${ARCH_DEFINITIONS}) - if (GGML_BACKEND_DL) - if (GGML_NATIVE) - # the feature check relies on ARCH_DEFINITIONS, but it is not set with GGML_NATIVE - message(FATAL_ERROR "GGML_NATIVE is not compatible with GGML_BACKEND_DL, consider using GGML_CPU_ALL_VARIANTS") - endif() - - # The feature detection code is compiled as a separate target so that - # it can be built without the architecture flags - # Since multiple variants of the CPU backend may be included in the same - # build, using set_source_files_properties() to set the arch flags is not possible - set(GGML_CPU_FEATS_NAME ${GGML_CPU_NAME}-feats) - add_library(${GGML_CPU_FEATS_NAME} OBJECT ggml-cpu/cpu-feats-x86.cpp) - target_include_directories(${GGML_CPU_FEATS_NAME} PRIVATE . .. ../include) - target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE ${ARCH_DEFINITIONS}) - target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE GGML_BACKEND_DL GGML_BACKEND_BUILD GGML_BACKEND_SHARED) - set_target_properties(${GGML_CPU_FEATS_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON) - target_link_libraries(${GGML_CPU_NAME} PRIVATE ${GGML_CPU_FEATS_NAME}) - endif() - if (EMSCRIPTEN) set_target_properties(${GGML_CPU_NAME} PROPERTIES COMPILE_FLAGS "-msimd128") endif() + + if (CMAKE_CXX_COMPILER_ID STREQUAL "IntelLLVM") + # The compiler automatically enables "-ffast-math" which can cause NaNs in tests due to "-fassociative-math" + target_compile_options(${GGML_CPU_NAME} PRIVATE "-fno-associative-math") + endif() endfunction() diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/amx/amx.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/amx/amx.cpp index 0f067137d..258857b00 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/amx/amx.cpp +++ b/ml/backend/ggml/ggml/src/ggml-cpu/amx/amx.cpp @@ -5,7 +5,7 @@ #include "ggml-backend.h" #include "ggml-impl.h" #include "ggml-cpu.h" -#include "ggml-cpu-traits.h" +#include "traits.h" #if defined(__gnu_linux__) #include diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/amx/mmq.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/amx/mmq.cpp index 0ea91596b..47c61b881 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/amx/mmq.cpp +++ b/ml/backend/ggml/ggml/src/ggml-cpu/amx/mmq.cpp @@ -8,7 +8,8 @@ #include "mmq.h" #include "ggml-impl.h" #include "ggml-cpu-impl.h" -#include "ggml-cpu-quants.h" +#include "simd-mappings.h" +#include "quants.h" #include "ggml-quants.h" #include #include @@ -453,7 +454,7 @@ void quantize_row_q8_K_vnni(const float * RESTRICT x, void * RESTRICT vy, int64_ // Quantize these floats const float iscale = 127.f / amax; - y[i].d = GGML_FP32_TO_FP16(1 / iscale); + y[i].d = GGML_CPU_FP32_TO_FP16(1 / iscale); const float id = ( amax != 0.0f ) ? iscale : 0.f; const __m512 vscale = _mm512_set1_ps(id); @@ -1090,7 +1091,7 @@ struct acc_C { const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset))); for (int m = 0; m < nr; ++m) { - const __m512 vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].d)); + const __m512 vd1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[m * lda].d)); const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N)); __m512 vsum; @@ -1113,8 +1114,8 @@ struct acc_C { const __m512 vm0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset + TILE_N * sizeof(ggml_half)))); for (int m = 0; m < nr; ++m) { - const __m512 vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].d)); - const __m512 vs1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].s)); + const __m512 vd1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[m * lda].d)); + const __m512 vs1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[m * lda].s)); const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N)); __m512 vsum; @@ -1137,7 +1138,7 @@ struct acc_C { const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset))); for (int m = 0; m < nr; ++m) { - const __m512 vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].d)); + const __m512 vd1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[m * lda].d)); const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N)); __m512 vsum; @@ -1437,7 +1438,7 @@ struct tinygemm_kernel_vnni for (int k = 0; k < 8; ++k) { va[k] = _mm512_set1_epi32(a_ptr[k]); } - vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[0 * KB + i].d)); - vs1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[0 * KB + i].s)); + vd1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[0 * KB + i].d)); + vs1 = _mm512_set1_ps(GGML_CPU_FP16_TO_FP32(A[0 * KB + i].s)); } // load b @@ -1571,7 +1572,7 @@ struct tinygemm_kernel_vnni +#elif defined(__APPLE__) +#include +#endif + +#if !defined(HWCAP2_I8MM) +#define HWCAP2_I8MM (1 << 13) +#endif + +#if !defined(HWCAP2_SME) +#define HWCAP2_SME (1 << 23) +#endif + +struct aarch64_features { + // has_neon not needed, aarch64 has NEON guaranteed + bool has_dotprod = false; + bool has_fp16_va = false; + bool has_sve = false; + bool has_sve2 = false; + bool has_i8mm = false; + bool has_sme = false; + + aarch64_features() { +#if defined(__linux__) + uint32_t hwcap = getauxval(AT_HWCAP); + uint32_t hwcap2 = getauxval(AT_HWCAP2); + + has_dotprod = !!(hwcap & HWCAP_ASIMDDP); + has_fp16_va = !!(hwcap & HWCAP_FPHP); + has_sve = !!(hwcap & HWCAP_SVE); + has_sve2 = !!(hwcap2 & HWCAP2_SVE2); + has_i8mm = !!(hwcap2 & HWCAP2_I8MM); + has_sme = !!(hwcap2 & HWCAP2_SME); +#elif defined(__APPLE__) + int oldp = 0; + size_t size = sizeof(oldp); + + if (sysctlbyname("hw.optional.arm.FEAT_DotProd", &oldp, &size, NULL, 0) == 0) { + has_dotprod = static_cast(oldp); + } + + if (sysctlbyname("hw.optional.arm.FEAT_I8MM", &oldp, &size, NULL, 0) == 0) { + has_i8mm = static_cast(oldp); + } + + if (sysctlbyname("hw.optional.arm.FEAT_SME", &oldp, &size, NULL, 0) == 0) { + has_sme = static_cast(oldp); + } + + // Apple apparently does not implement SVE yet +#endif + } +}; + +static int ggml_backend_cpu_aarch64_score() { + int score = 1; + aarch64_features af; + +#ifdef GGML_USE_DOTPROD + if (!af.has_dotprod) { return 0; } + score += 1<<1; +#endif +#ifdef GGML_USE_FP16_VECTOR_ARITHMETIC + if (!af.has_fp16_va) { return 0; } + score += 1<<2; +#endif +#ifdef GGML_USE_SVE + if (!af.has_sve) { return 0; } + score += 1<<3; +#endif +#ifdef GGML_USE_MATMUL_INT8 + if (!af.has_i8mm) { return 0; } + score += 1<<4; +#endif +#ifdef GGML_USE_SVE2 + if (!af.has_sve2) { return 0; } + score += 1<<5; +#endif +#ifdef GGML_USE_SME + if (!af.has_sme) { return 0; } + score += 1<<6; +#endif + + return score; +} + +GGML_BACKEND_DL_SCORE_IMPL(ggml_backend_cpu_aarch64_score) + +# endif // defined(__aarch64__) diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/arch/arm/quants.c b/ml/backend/ggml/ggml/src/ggml-cpu/arch/arm/quants.c new file mode 100644 index 000000000..aadbb487e --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/arch/arm/quants.c @@ -0,0 +1,3650 @@ +#define GGML_COMMON_IMPL_C +#include "ggml-common.h" +#include "ggml-quants.h" +#include "ggml-impl.h" +#include "ggml-cpu.h" +#include "simd-mappings.h" + +#include "../../quants.h" +#include "../../ggml-cpu-impl.h" + +#include +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GROUP_MAX_EPS 1e-15f +#define GROUP_MAX_EPS_IQ3_XXS 1e-8f +#define GROUP_MAX_EPS_IQ2_S 1e-8f +#define GROUP_MAX_EPS_IQ1_M 1e-7f +#define GROUP_MAX_EPS_IQ1_S 1e-12f + +#define UNUSED GGML_UNUSED + +#if defined(__ARM_NEON) +#define B1(c,s,n) 0x ## n ## c , 0x ## n ## s +#define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) +#define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) +#define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s) +#define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s) +#define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s) +#define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s) +#define B8(c,s ) B7(c,s, c), B7(c,s, s) + +// precomputed tables for expanding 8bits to 8 bytes: +static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 +static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 +#endif + +void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__ARM_NEON) + for (int i = 0; i < nb; i++) { + float32x4_t srcv [8]; + float32x4_t asrcv[8]; + float32x4_t amaxv[8]; + + for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); + + const float amax = vmaxvq_f32(amaxv[0]); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = GGML_CPU_FP32_TO_FP16(d); + + for (int j = 0; j < 8; j++) { + const float32x4_t v = vmulq_n_f32(srcv[j], id); + const int32x4_t vi = vcvtnq_s32_f32(v); + + y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); + y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); + y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); + y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); + } + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_0_ref(x, y, k); +#endif +} + +void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK8_1 == 0); + const int nb = k / QK8_1; + + block_q8_1 * GGML_RESTRICT y = vy; +#if defined(__ARM_NEON) + for (int i = 0; i < nb; i++) { + float32x4_t srcv [8]; + float32x4_t asrcv[8]; + float32x4_t amaxv[8]; + + for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); + + const float amax = vmaxvq_f32(amaxv[0]); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y[i].d = GGML_CPU_FP32_TO_FP16(d); + + int32x4_t accv = vdupq_n_s32(0); + + for (int j = 0; j < 8; j++) { + const float32x4_t v = vmulq_n_f32(srcv[j], id); + const int32x4_t vi = vcvtnq_s32_f32(v); + + y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); + y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); + y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); + y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); + + accv = vaddq_s32(accv, vi); + } + + y[i].s = GGML_CPU_FP32_TO_FP16(d * vaddvq_s32(accv)); + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_1_ref(x, y, k); +#endif +} + +// placeholder implementation for Apple targets +void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q8_K_ref(x, y, k); +} + +//===================================== Dot products ================================= + +void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); +#if defined(__ARM_FEATURE_MATMUL_INT8) + assert((nrc == 2) || (nrc == 1)); +#else + assert(nrc == 1); +#endif + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__ARM_FEATURE_MATMUL_INT8) + if (nrc == 2) { + const block_q4_0 * GGML_RESTRICT vx0 = vx; + const block_q4_0 * GGML_RESTRICT vx1 = (const block_q4_0 *) ((const uint8_t*)vx + bx); + const block_q8_0 * GGML_RESTRICT vy0 = vy; + const block_q8_0 * GGML_RESTRICT vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by); + + float32x4_t sumv0 = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; i++) { + const block_q4_0 * GGML_RESTRICT b_x0 = &vx0[i]; + const block_q4_0 * GGML_RESTRICT b_x1 = &vx1[i]; + const block_q8_0 * GGML_RESTRICT b_y0 = &vy0[i]; + const block_q8_0 * GGML_RESTRICT b_y1 = &vy1[i]; + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + const int8x16_t s8b = vdupq_n_s8(0x8); + + const uint8x16_t v0_0 = vld1q_u8(b_x0->qs); + const uint8x16_t v0_1 = vld1q_u8(b_x1->qs); + + // 4-bit -> 8-bit + const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // sub 8 + const int8x16_t x0_l = vsubq_s8(v0_0l, s8b); + const int8x16_t x0_h = vsubq_s8(v0_0h, s8b); + const int8x16_t x1_l = vsubq_s8(v0_1l, s8b); + const int8x16_t x1_h = vsubq_s8(v0_1h, s8b); + + // load y + const int8x16_t y0_l = vld1q_s8(b_y0->qs); + const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); + const int8x16_t y1_l = vld1q_s8(b_y1->qs); + const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); + + float32_t _scale[4] = { + GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), + GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y1->d), + GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), + GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y1->d) + }; + float32x4_t scale = vld1q_f32(_scale); + + int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + + int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + + int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + + int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + + sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), + l1, r1)), l2, r2)), l3, r3))), scale); + } + + float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); + float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); + + vst1_f32(s, vget_low_f32 (sumv2)); + vst1_f32(s + bs, vget_high_f32(sumv2)); + + return; + } +#endif + + int ib = 0; + float sumf = 0; + +#if defined(__ARM_FEATURE_SVE) + svfloat32_t sumv0 = svdup_n_f32(0.0f); + svfloat32_t sumv1 = svdup_n_f32(0.0f); + + const int vector_length = ggml_cpu_get_sve_cnt()*8; + + // VLA Implementation using switch case + switch (vector_length) { + case 128: + { + // predicate for activating higher lanes for 4 float32 elements + const svbool_t ph4 = svptrue_pat_b32(SV_VL4); + + for (; ib + 1 < nb; ib += 2) { + const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + // load x + const svuint8_t qx0r = svld1rq_u8(svptrue_b8(), x0->qs); + const svuint8_t qx1r = svld1rq_u8(svptrue_b8(), x1->qs); + + // 4-bit -> 8-bit + const svint8_t qx0l = svreinterpret_s8_u8(svand_n_u8_m(svptrue_b8(), qx0r, 0x0F)); + const svint8_t qx0h = svreinterpret_s8_u8(svlsr_n_u8_m(svptrue_b8(), qx0r, 0x04)); + const svint8_t qx1l = svreinterpret_s8_u8(svand_n_u8_m(svptrue_b8(), qx1r, 0x0F)); + const svint8_t qx1h = svreinterpret_s8_u8(svlsr_n_u8_m(svptrue_b8(), qx1r, 0x04)); + + // sub 8 + const svint8_t qx0ls = svsub_n_s8_x(svptrue_b8(), qx0h, 8); + const svint8_t qx0hs = svsub_n_s8_x(svptrue_b8(), qx0l, 8); + const svint8_t qx1ls = svsub_n_s8_x(svptrue_b8(), qx1h, 8); + const svint8_t qx1hs = svsub_n_s8_x(svptrue_b8(), qx1l, 8); + + // load y + const svint8_t qy0h = svld1_s8(svptrue_b8(), y0->qs); + const svint8_t qy0l = svld1_s8(svptrue_b8(), y0->qs + 16); + const svint8_t qy1h = svld1_s8(svptrue_b8(), y1->qs); + const svint8_t qy1l = svld1_s8(svptrue_b8(), y1->qs + 16); + + // dot product + sumv0 = svmla_n_f32_x(ph4, sumv0, svcvt_f32_s32_x(ph4, svadd_x(ph4, + svdot_s32(svdup_n_s32(0), qx0ls, qy0l), + svdot_s32(svdup_n_s32(0), qx0hs, qy0h))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); + sumv1 = svmla_n_f32_x(ph4, sumv1, svcvt_f32_s32_x(ph4, svadd_x(ph4, + svdot_s32(svdup_n_s32(0), qx1ls, qy1l), + svdot_s32(svdup_n_s32(0), qx1hs, qy1h))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); + } + + sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); + } break; + case 256: + { + // predicate for activating higher lanes for 16 int8 elements + const svbool_t ph16 = svptrue_pat_b8(SV_VL16); + // predicate for activating lower lanes for 16 int8 elements + const svbool_t pl16 = svnot_b_z(svptrue_b8(), ph16); + + for (; ib + 1 < nb; ib += 2) { + const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + // load x + const svuint8_t qx0r = svld1rq_u8(svptrue_b8(), x0->qs); + const svuint8_t qx1r = svld1rq_u8(svptrue_b8(), x1->qs); + + // 4-bit -> 8-bit + const svint8_t qx0 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx0r, 0x0F), 0x04)); + const svint8_t qx1 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx1r, 0x0F), 0x04)); + + // sub 8 + const svint8_t qx0s = svsub_n_s8_x(svptrue_b8(), qx0, 8); + const svint8_t qx1s = svsub_n_s8_x(svptrue_b8(), qx1, 8); + + // load y + const svint8_t qy0 = svld1_s8(svptrue_b8(), y0->qs); + const svint8_t qy1 = svld1_s8(svptrue_b8(), y1->qs); + + // dot product + sumv0 = svmla_n_f32_x(svptrue_b32(), sumv0, svcvt_f32_s32_x(svptrue_b32(), + svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); + sumv1 = svmla_n_f32_x(svptrue_b32(), sumv1, svcvt_f32_s32_x(svptrue_b32(), + svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); + } + + sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); + } break; + case 512: + { + // predicate for activating higher lanes for 32 int8 elements + const svbool_t ph32 = svptrue_pat_b8(SV_VL32); + + // predicate for activating higher lanes for 16 int8 elements + const svbool_t ph16 = svptrue_pat_b8(SV_VL16); + // predicate for activating lower lanes for 16 int8 elements from first 32 int8 activated lanes + const svbool_t pl16 = svnot_b_z(ph32, ph16); + + for (; ib + 1 < nb; ib += 2) { + const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + // load x + const svuint8_t qx0r = svld1rq_u8(ph32, x0->qs); + const svuint8_t qx1r = svld1rq_u8(ph32, x1->qs); + + // 4-bit -> 8-bit + const svint8_t qx0 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx0r, 0x0F), 0x04)); + const svint8_t qx1 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx1r, 0x0F), 0x04)); + + // sub 8 + const svint8_t qx0s = svsub_n_s8_x(ph32, qx0, 8); + const svint8_t qx1s = svsub_n_s8_x(ph32, qx1, 8); + + // load y + const svint8_t qy0 = svld1_s8(ph32, y0->qs); + const svint8_t qy1 = svld1_s8(ph32, y1->qs); + + // dot product + sumv0 = svmla_n_f32_x(ph32, sumv0, svcvt_f32_s32_x(ph32, + svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); + sumv1 = svmla_n_f32_x(ph32, sumv1, svcvt_f32_s32_x(ph32, + svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); + } + + sumf = svaddv_f32(ph32, svadd_f32_x(ph32, sumv0, sumv1)); + } break; + default: + assert(false && "Unsupported vector length"); + break; + } + +#elif defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); + + for (; ib + 1 < nb; ib += 2) { + const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + const int8x16_t s8b = vdupq_n_s8(0x8); + + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v0_1 = vld1q_u8(x1->qs); + + // 4-bit -> 8-bit + const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // sub 8 + const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); + const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); + const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b); + const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b); + + // load y + const int8x16_t v1_0l = vld1q_s8(y0->qs); + const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); + const int8x16_t v1_1l = vld1q_s8(y1->qs); + const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); + + // dot product into int32x4_t + const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h); + const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); + } + + sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F) - 8; + const int v1 = (x[ib].qs[j] >> 4) - 8; + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); + } + + *s = sumf; +} + +void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + assert(n % qk == 0); +#if defined(__ARM_FEATURE_MATMUL_INT8) + assert((nrc == 2) || (nrc == 1)); +#else + assert(nrc == 1); +#endif + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + +#if defined(__ARM_FEATURE_MATMUL_INT8) + if (nrc == 2) { + const block_q4_1 * GGML_RESTRICT vx0 = vx; + const block_q4_1 * GGML_RESTRICT vx1 = (const block_q4_1 *) ((const uint8_t*)vx + bx); + const block_q8_1 * GGML_RESTRICT vy0 = vy; + const block_q8_1 * GGML_RESTRICT vy1 = (const block_q8_1 *) ((const uint8_t*)vy + by); + + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t summs0 = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; i++) { + const block_q4_1 * GGML_RESTRICT b_x0 = &vx0[i]; + const block_q4_1 * GGML_RESTRICT b_x1 = &vx1[i]; + const block_q8_1 * GGML_RESTRICT b_y0 = &vy0[i]; + const block_q8_1 * GGML_RESTRICT b_y1 = &vy1[i]; + + float32_t summs_t[4] = { + GGML_CPU_FP16_TO_FP32(b_x0->m) * GGML_CPU_FP16_TO_FP32(b_y0->s), + GGML_CPU_FP16_TO_FP32(b_x1->m) * GGML_CPU_FP16_TO_FP32(b_y0->s), + GGML_CPU_FP16_TO_FP32(b_x0->m) * GGML_CPU_FP16_TO_FP32(b_y1->s), + GGML_CPU_FP16_TO_FP32(b_x1->m) * GGML_CPU_FP16_TO_FP32(b_y1->s) + }; + summs0 = vaddq_f32(summs0, vld1q_f32(summs_t)); + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + + const uint8x16_t v0_0 = vld1q_u8(b_x0->qs); + const uint8x16_t v0_1 = vld1q_u8(b_x1->qs); + + // 4-bit -> 8-bit + const int8x16_t x0_l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + const int8x16_t x0_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t x1_l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + const int8x16_t x1_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // load y + const int8x16_t y0_l = vld1q_s8(b_y0->qs); + const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); + const int8x16_t y1_l = vld1q_s8(b_y1->qs); + const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); + + // mmla into int32x4_t + float32_t _scale[4] = { + GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), + GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y1->d), + GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), + GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y1->d) + }; + float32x4_t scale = vld1q_f32(_scale); + + int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + + int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + + int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + + int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), + l1, r1)), l2, r2)), l3, r3))), scale); + } + + float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); + float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); + + sumv2 = vaddq_f32(sumv2, summs0); + + vst1_f32(s, vget_low_f32 (sumv2)); + vst1_f32(s + bs, vget_high_f32(sumv2)); + + return; + } +#endif + + int ib = 0; + float sumf = 0; + +#if defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); + + float summs = 0; + + for (; ib + 1 < nb; ib += 2) { + const block_q4_1 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q4_1 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_1 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_1 * GGML_RESTRICT y1 = &y[ib + 1]; + + summs += GGML_CPU_FP16_TO_FP32(x0->m) * GGML_CPU_FP16_TO_FP32(y0->s) + GGML_CPU_FP16_TO_FP32(x1->m) * GGML_CPU_FP16_TO_FP32(y1->s); + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v0_1 = vld1q_u8(x1->qs); + + // 4-bit -> 8-bit + const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // load y + const int8x16_t v1_0l = vld1q_s8(y0->qs); + const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); + const int8x16_t v1_1l = vld1q_s8(y1->qs); + const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); + + // dot product into int32x4_t + const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h); + const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); + } + + sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs; + +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F); + const int v1 = (x[ib].qs[j] >> 4); + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_mxfp4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK_MXFP4 == 0); + static_assert(QK_MXFP4 == QK8_0, "QK_MXFP4 and QK8_0 must be the same"); + + const block_mxfp4 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + const int nb = n / QK_MXFP4; + + int ib = 0; + float sumf = 0; + +#if defined __ARM_NEON + const int8x16_t values = vld1q_s8(kvalues_mxfp4); + const uint8x16_t m4b = vdupq_n_u8(0x0f); + uint8x16x2_t q4bits; + int8x16x4_t q4b; + int8x16x4_t q8b; + int32x4_t prod_1; + int32x4_t prod_2; + + for (; ib + 1 < nb; ib += 2) { + q4bits.val[0] = vld1q_u8(x[ib + 0].qs); + q4bits.val[1] = vld1q_u8(x[ib + 1].qs); + q8b.val[0] = vld1q_s8(y[ib + 0].qs); + q8b.val[1] = vld1q_s8(y[ib + 0].qs + 16); + q8b.val[2] = vld1q_s8(y[ib + 1].qs); + q8b.val[3] = vld1q_s8(y[ib + 1].qs + 16); + + q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b)); + q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4)); + q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b)); + q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4)); + + prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]); + prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]); + + sumf += + GGML_E8M0_TO_FP32_HALF(x[ib + 0].e) * GGML_CPU_FP16_TO_FP32(y[ib + 0].d) * vaddvq_s32(prod_1) + + GGML_E8M0_TO_FP32_HALF(x[ib + 1].e) * GGML_CPU_FP16_TO_FP32(y[ib + 1].d) * vaddvq_s32(prod_2); + } + +#endif + for (; ib < nb; ++ib) { + const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_E8M0_TO_FP32_HALF(x[ib].e); + int sumi1 = 0; + int sumi2 = 0; + for (int j = 0; j < QK_MXFP4/2; ++j) { + sumi1 += y[ib].qs[j + 0] * kvalues_mxfp4[x[ib].qs[j] & 0xf]; + sumi2 += y[ib].qs[j + QK_MXFP4/2] * kvalues_mxfp4[x[ib].qs[j] >> 4]; + } + sumf += d * (sumi1 + sumi2); + } + *s = sumf; +} + +void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); + + uint32_t qh0; + uint32_t qh1; + + uint64_t tmp0[4]; + uint64_t tmp1[4]; + + for (; ib + 1 < nb; ib += 2) { + const block_q5_0 * GGML_RESTRICT x0 = &x[ib]; + const block_q5_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + + // extract the 5th bit via lookup table ((!b) << 4) + memcpy(&qh0, x0->qh, sizeof(qh0)); + memcpy(&qh1, x1->qh, sizeof(qh1)); + + tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF]; + tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF]; + tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF]; + tmp0[3] = table_b2b_1[(qh0 >> 24) ]; + + tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF]; + tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF]; + tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF]; + tmp1[3] = table_b2b_1[(qh1 >> 24) ]; + + const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); + const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); + const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); + const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); + + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v0_1 = vld1q_u8(x1->qs); + + // 4-bit -> 8-bit + int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) + const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0); + const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0); + const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1); + const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1); + + // load y + const int8x16_t v1_0l = vld1q_s8(y0->qs); + const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); + const int8x16_t v1_1l = vld1q_s8(y1->qs); + const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( + ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), + ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( + ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), + ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); + } + + sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); + +#endif + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; + const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); + + const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16); + const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16); + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi; + } + + *s = sumf; +} + +void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_1); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + +#if defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); + + float summs0 = 0.0f; + float summs1 = 0.0f; + + uint32_t qh0; + uint32_t qh1; + + uint64_t tmp0[4]; + uint64_t tmp1[4]; + + for (; ib + 1 < nb; ib += 2) { + const block_q5_1 * GGML_RESTRICT x0 = &x[ib]; + const block_q5_1 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_1 * GGML_RESTRICT y0 = &y[ib]; + const block_q8_1 * GGML_RESTRICT y1 = &y[ib + 1]; + + const uint8x16_t m4b = vdupq_n_u8(0x0F); + + summs0 += GGML_CPU_FP16_TO_FP32(x0->m) * GGML_CPU_FP16_TO_FP32(y0->s); + summs1 += GGML_CPU_FP16_TO_FP32(x1->m) * GGML_CPU_FP16_TO_FP32(y1->s); + + // extract the 5th bit via lookup table ((b) << 4) + memcpy(&qh0, x0->qh, sizeof(qh0)); + memcpy(&qh1, x1->qh, sizeof(qh1)); + + tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF]; + tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF]; + tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF]; + tmp0[3] = table_b2b_0[(qh0 >> 24) ]; + + tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF]; + tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF]; + tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF]; + tmp1[3] = table_b2b_0[(qh1 >> 24) ]; + + const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); + const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); + const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); + const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); + + const uint8x16_t v0_0 = vld1q_u8(x0->qs); + const uint8x16_t v0_1 = vld1q_u8(x1->qs); + + // 4-bit -> 8-bit + const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); + const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); + const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); + const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); + + // add high bit + const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0); + const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0); + const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1); + const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1); + + // load y + const int8x16_t v1_0l = vld1q_s8(y0->qs); + const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); + const int8x16_t v1_1l = vld1q_s8(y1->qs); + const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( + ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), + ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( + ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), + ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); + } + + sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1; + +#endif + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; + const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; + + const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0; + const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1; + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); +#if defined(__ARM_FEATURE_MATMUL_INT8) + assert((nrc == 2) || (nrc == 1)); +#else + assert(nrc == 1); +#endif + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q8_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__ARM_FEATURE_MATMUL_INT8) + if (nrc == 2) { + const block_q8_0 * GGML_RESTRICT vx0 = vx; + const block_q8_0 * GGML_RESTRICT vx1 = (const block_q8_0 *) ((const uint8_t*)vx + bx); + const block_q8_0 * GGML_RESTRICT vy0 = vy; + const block_q8_0 * GGML_RESTRICT vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by); + + float32x4_t sumv0 = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; i++) { + const block_q8_0 * GGML_RESTRICT b_x0 = &vx0[i]; + const block_q8_0 * GGML_RESTRICT b_y0 = &vy0[i]; + + const block_q8_0 * GGML_RESTRICT b_x1 = &vx1[i]; + const block_q8_0 * GGML_RESTRICT b_y1 = &vy1[i]; + + const int8x16_t x0_l = vld1q_s8(b_x0->qs); + const int8x16_t x0_h = vld1q_s8(b_x0->qs + 16); + const int8x16_t x1_l = vld1q_s8(b_x1->qs); + const int8x16_t x1_h = vld1q_s8(b_x1->qs + 16); + + // load y + const int8x16_t y0_l = vld1q_s8(b_y0->qs); + const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); + const int8x16_t y1_l = vld1q_s8(b_y1->qs); + const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); + + float32_t _scale[4] = { + GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), + GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y1->d), + GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y0->d), + GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y1->d) + }; + float32x4_t scale = vld1q_f32(_scale); + + int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); + + int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); + + int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); + + int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); + + sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), + l1, r1)), l2, r2)), l3, r3))), scale); + } + + float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); + float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); + + vst1_f32(s, vget_low_f32 (sumv2)); + vst1_f32(s + bs, vget_high_f32(sumv2)); + + return; + } +#endif + + int ib = 0; + float sumf = 0; + +#if defined(__ARM_FEATURE_SVE) + svfloat32_t sumv0 = svdup_n_f32(0.0f); + svfloat32_t sumv1 = svdup_n_f32(0.0f); + + const int vector_length = ggml_cpu_get_sve_cnt()*8; + + //VLA Implemenation for SVE + switch (vector_length) { + case 128: + { + // predicate for activating lanes for 16 Int8 elements + const svbool_t ph16 = svptrue_pat_b8 (SV_VL16); + const svbool_t pl16 = svptrue_pat_b32(SV_VL4); + + for (; ib + 1 < nb; ib += 2) { + const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + // load x + const svint8_t qx0_0 = svld1_s8(ph16, x0->qs); + const svint8_t qx0_1 = svld1_s8(ph16, x0->qs+16); + const svint8_t qx1_0 = svld1_s8(ph16, x1->qs); + const svint8_t qx1_1 = svld1_s8(ph16, x1->qs+16); + + // load y + const svint8_t qy0_0 = svld1_s8(ph16, y0->qs); + const svint8_t qy0_1 = svld1_s8(ph16, y0->qs+16); + const svint8_t qy1_0 = svld1_s8(ph16, y1->qs); + const svint8_t qy1_1 = svld1_s8(ph16, y1->qs+16); + + sumv0 = svmla_n_f32_x(pl16, sumv0, svcvt_f32_s32_x(pl16, svadd_x(pl16, + svdot_s32(svdup_n_s32(0), qx0_0, qy0_0), + svdot_s32(svdup_n_s32(0), qx0_1, qy0_1))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); + sumv1 = svmla_n_f32_x(pl16, sumv1, svcvt_f32_s32_x(pl16, svadd_x(pl16, + svdot_s32(svdup_n_s32(0), qx1_0, qy1_0), + svdot_s32(svdup_n_s32(0), qx1_1, qy1_1))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); + } + + sumf = svaddv_f32(pl16, svadd_f32_x(pl16, sumv0, sumv1)); + } break; + case 256: + { + //printf("sve256"); + for (; ib + 1 < nb; ib += 2) { + const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + // load x + const svint8_t qx0 = svld1_s8(svptrue_b8(), x0->qs); + const svint8_t qx1 = svld1_s8(svptrue_b8(), x1->qs); + + // load y + const svint8_t qy0 = svld1_s8(svptrue_b8(), y0->qs); + const svint8_t qy1 = svld1_s8(svptrue_b8(), y1->qs); + + sumv0 = svmla_n_f32_x(svptrue_b32(), sumv0, svcvt_f32_s32_x(svptrue_b32(), + svdot_s32(svdup_n_s32(0), qx0, qy0)), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); + sumv1 = svmla_n_f32_x(svptrue_b32(), sumv1, svcvt_f32_s32_x(svptrue_b32(), + svdot_s32(svdup_n_s32(0), qx1, qy1)), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); + } + + sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); + } break; + case 512: + { + // predicate for activating high 256 bit + const svbool_t ph32 = svptrue_pat_b8(SV_VL32); + // predicate for activating low 256 bit + const svbool_t pl32 = svnot_b_z(svptrue_b8(), ph32); + + // predicate for activating high lanes for 8 float32 elements + const svbool_t ph8 = svptrue_pat_b32(SV_VL8); + // predicate for activating low lanes for 8 float32 elements + const svbool_t pl8 = svnot_b_z(svptrue_b32(), ph8); + + svfloat32_t sumv00 = svdup_n_f32(0.0f); + + for (; ib + 1 < nb; ib += 2) { + const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + //load 32 int8_t in first half of vector and put another 32 int8_t in second vector lower bits + // and add them to make one 64 element vector + // load x + const svint8_t qx_32 = svld1_s8(ph32, x0->qs); + svint8_t qx_64 = svld1_s8(pl32, x0->qs + 2); + + qx_64 = svadd_s8_x(svptrue_b8(), qx_32, qx_64); + + // load y + const svint8_t qy_32 = svld1_s8(ph32, y0->qs); + svint8_t qy_64 = svld1_s8(pl32, y0->qs + 2); + + qy_64 = svadd_s8_x(svptrue_b8(), qy_32, qy_64); + + // scale creation + const float32_t deq1 = GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d); + const float32_t deq2 = GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d); + + // duplicate deq1 in first half of vector and deq2 in second half of vector + const svfloat32_t temp = svdup_f32_m(svdup_f32_z(ph8, deq1), pl8, deq2); + + const svfloat32_t sumvt = svcvt_f32_s32_x(svptrue_b32(), svdot_s32(svdup_n_s32(0), qx_64, qy_64)); + + sumv00 = svmla_f32_m(svptrue_b32(), sumv00, sumvt, temp); + } + + sumf = svaddv_f32(svptrue_b32(), sumv00); + break; + } + default: + assert(false && "Unsupported vector length"); + break; + } +#elif defined(__ARM_NEON) + float32x4_t sumv0 = vdupq_n_f32(0.0f); + float32x4_t sumv1 = vdupq_n_f32(0.0f); + + for (; ib + 1 < nb; ib += 2) { + const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; + const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; + const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; + const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; + + const int8x16_t x0_0 = vld1q_s8(x0->qs); + const int8x16_t x0_1 = vld1q_s8(x0->qs + 16); + const int8x16_t x1_0 = vld1q_s8(x1->qs); + const int8x16_t x1_1 = vld1q_s8(x1->qs + 16); + + // load y + const int8x16_t y0_0 = vld1q_s8(y0->qs); + const int8x16_t y0_1 = vld1q_s8(y0->qs + 16); + const int8x16_t y1_0 = vld1q_s8(y1->qs); + const int8x16_t y1_1 = vld1q_s8(y1->qs + 16); + + sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( + ggml_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), + ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d)); + + sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( + ggml_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), + ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d)); + } + + sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); +#endif + for (; ib < nb; ++ib) { + int sumi = 0; + + for (int j = 0; j < qk; j++) { + sumi += x[ib].qs[j]*y[ib].qs[j]; + } + + sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); + } + + *s = sumf; +} + +void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_tq1_0 * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + float sumf = 0.0f; + + uint8_t k_shift[16] = {1, 1, 1, 1, 3, 3, 3, 3, 9, 9, 9, 9, 27, 27, 27, 27}; + + const uint8x16_t shift = vld1q_u8(k_shift); + + for (int i = 0; i < nb; ++i) { +#if defined(__ARM_FEATURE_DOTPROD) + int32x4_t sumi0 = vdupq_n_s32(0); + int32x4_t sumi1 = vdupq_n_s32(0); +#else + int16x8_t sumi0 = vdupq_n_s16(0); + int16x8_t sumi1 = vdupq_n_s16(0); +#endif + + // first 32 bytes of 5 elements + { + uint8x16_t qx0 = vld1q_u8(x[i].qs + 0); + uint8x16_t qx1 = vld1q_u8(x[i].qs + 16); + uint8x16_t qx2 = vmulq_u8(qx0, vdupq_n_u8(3)); + uint8x16_t qx3 = vmulq_u8(qx1, vdupq_n_u8(3)); + uint8x16_t qx4 = vmulq_u8(qx0, vdupq_n_u8(9)); + uint8x16_t qx5 = vmulq_u8(qx1, vdupq_n_u8(9)); + uint8x16_t qx6 = vmulq_u8(qx0, vdupq_n_u8(27)); + uint8x16_t qx7 = vmulq_u8(qx1, vdupq_n_u8(27)); + uint8x16_t qx8 = vmulq_u8(qx0, vdupq_n_u8(81)); + uint8x16_t qx9 = vmulq_u8(qx1, vdupq_n_u8(81)); + + // multiply by 3 and keep the 2 bits above 8 bits + int8x16_t sqx0 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx0, vshrq_n_u8(qx0, 1)), 6)); + int8x16_t sqx1 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx1, vshrq_n_u8(qx1, 1)), 6)); + int8x16_t sqx2 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx2, vshrq_n_u8(qx2, 1)), 6)); + int8x16_t sqx3 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx3, vshrq_n_u8(qx3, 1)), 6)); + int8x16_t sqx4 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx4, vshrq_n_u8(qx4, 1)), 6)); + int8x16_t sqx5 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx5, vshrq_n_u8(qx5, 1)), 6)); + int8x16_t sqx6 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx6, vshrq_n_u8(qx6, 1)), 6)); + int8x16_t sqx7 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx7, vshrq_n_u8(qx7, 1)), 6)); + int8x16_t sqx8 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx8, vshrq_n_u8(qx8, 1)), 6)); + int8x16_t sqx9 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx9, vshrq_n_u8(qx9, 1)), 6)); + + const int8x16_t qy0 = vld1q_s8(y[i].qs + 0); + const int8x16_t qy1 = vld1q_s8(y[i].qs + 16); + const int8x16_t qy2 = vld1q_s8(y[i].qs + 32); + const int8x16_t qy3 = vld1q_s8(y[i].qs + 48); + const int8x16_t qy4 = vld1q_s8(y[i].qs + 64); + const int8x16_t qy5 = vld1q_s8(y[i].qs + 80); + const int8x16_t qy6 = vld1q_s8(y[i].qs + 96); + const int8x16_t qy7 = vld1q_s8(y[i].qs + 112); + const int8x16_t qy8 = vld1q_s8(y[i].qs + 128); + const int8x16_t qy9 = vld1q_s8(y[i].qs + 144); + +#if defined(__ARM_FEATURE_DOTPROD) + sumi0 = vdotq_s32(sumi0, sqx0, qy0); + sumi1 = vdotq_s32(sumi1, sqx1, qy1); + sumi0 = vdotq_s32(sumi0, sqx2, qy2); + sumi1 = vdotq_s32(sumi1, sqx3, qy3); + sumi0 = vdotq_s32(sumi0, sqx4, qy4); + sumi1 = vdotq_s32(sumi1, sqx5, qy5); + sumi0 = vdotq_s32(sumi0, sqx6, qy6); + sumi1 = vdotq_s32(sumi1, sqx7, qy7); + sumi0 = vdotq_s32(sumi0, sqx8, qy8); + sumi1 = vdotq_s32(sumi1, sqx9, qy9); +#else + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx6), vget_low_s8(qy6)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx6), vget_high_s8(qy6)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx7), vget_low_s8(qy7)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx7), vget_high_s8(qy7)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx8), vget_low_s8(qy8)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx8), vget_high_s8(qy8)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx9), vget_low_s8(qy9)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx9), vget_high_s8(qy9)); +#endif + } + + // last 16 bytes of 5-element, along with the 4 bytes of 4 elements + { + uint8x16_t qx0 = vld1q_u8(x[i].qs + 32); + uint8x16_t qx1 = vmulq_u8(qx0, vdupq_n_u8(3)); + uint8x16_t qx2 = vmulq_u8(qx0, vdupq_n_u8(9)); + uint8x16_t qx3 = vmulq_u8(qx0, vdupq_n_u8(27)); + uint8x16_t qx4 = vmulq_u8(qx0, vdupq_n_u8(81)); + uint32_t qh; + memcpy(&qh, x[i].qh, sizeof(qh)); // potentially unaligned + uint8x16_t qx5 = vreinterpretq_u8_u32(vdupq_n_u32(qh)); + qx5 = vmulq_u8(qx5, shift); + + // multiply by 3 and keep the 2 bits above 8 bits + int8x16_t sqx0 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx0, vshrq_n_u8(qx0, 1)), 6)); + int8x16_t sqx1 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx1, vshrq_n_u8(qx1, 1)), 6)); + int8x16_t sqx2 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx2, vshrq_n_u8(qx2, 1)), 6)); + int8x16_t sqx3 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx3, vshrq_n_u8(qx3, 1)), 6)); + int8x16_t sqx4 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx4, vshrq_n_u8(qx4, 1)), 6)); + int8x16_t sqx5 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx5, vshrq_n_u8(qx5, 1)), 6)); + + const int8x16_t qy0 = vld1q_s8(y[i].qs + 160); + const int8x16_t qy1 = vld1q_s8(y[i].qs + 176); + const int8x16_t qy2 = vld1q_s8(y[i].qs + 192); + const int8x16_t qy3 = vld1q_s8(y[i].qs + 208); + const int8x16_t qy4 = vld1q_s8(y[i].qs + 224); + const int8x16_t qy5 = vld1q_s8(y[i].qs + 240); + +#if defined(__ARM_FEATURE_DOTPROD) + sumi0 = vdotq_s32(sumi0, sqx0, qy0); + sumi1 = vdotq_s32(sumi1, sqx1, qy1); + sumi0 = vdotq_s32(sumi0, sqx2, qy2); + sumi1 = vdotq_s32(sumi1, sqx3, qy3); + sumi0 = vdotq_s32(sumi0, sqx4, qy4); + sumi1 = vdotq_s32(sumi1, sqx5, qy5); +#else + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); +#endif + } + + const int16x8_t ysum0 = vld1q_s16(y[i].bsums); + const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8); + + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + +#if defined(__ARM_FEATURE_DOTPROD) + sumi0 = vaddq_s32(sumi0, sumi1); + sumi0 = vsubq_s32(sumi0, vpaddlq_s16(vaddq_s16(ysum0, ysum1))); + + sumf += d * (float) vaddvq_s32(sumi0); +#else + sumi0 = vaddq_s16(sumi0, sumi1); + sumi0 = vsubq_s16(sumi0, vaddq_s16(ysum0, ysum1)); + + sumf += d * (float) vaddlvq_s16(sumi0); +#endif + } + + *s = sumf; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_tq1_0_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_tq2_0 * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + float sumf = 0.0f; + + const uint8x16_t m3 = vdupq_n_u8(3); + + for (int i = 0; i < nb; ++i) { +#if defined(__ARM_FEATURE_DOTPROD) + int32x4_t sumi0 = vdupq_n_s32(0); + int32x4_t sumi1 = vdupq_n_s32(0); +#else + int16x8_t sumi0 = vdupq_n_s16(0); + int16x8_t sumi1 = vdupq_n_s16(0); +#endif + + for (size_t j = 0; j < sizeof(x->qs); j += 32) { + uint8x16_t qx0 = vld1q_u8(x[i].qs + j); + uint8x16_t qx1 = vld1q_u8(x[i].qs + j + 16); + uint8x16_t qx2 = vshrq_n_u8(qx0, 2); + uint8x16_t qx3 = vshrq_n_u8(qx1, 2); + uint8x16_t qx4 = vshrq_n_u8(qx0, 4); + uint8x16_t qx5 = vshrq_n_u8(qx1, 4); + uint8x16_t qx6 = vshrq_n_u8(qx0, 6); + uint8x16_t qx7 = vshrq_n_u8(qx1, 6); + + int8x16_t sqx0 = vreinterpretq_s8_u8(vandq_u8(qx0, m3)); + int8x16_t sqx1 = vreinterpretq_s8_u8(vandq_u8(qx1, m3)); + int8x16_t sqx2 = vreinterpretq_s8_u8(vandq_u8(qx2, m3)); + int8x16_t sqx3 = vreinterpretq_s8_u8(vandq_u8(qx3, m3)); + int8x16_t sqx4 = vreinterpretq_s8_u8(vandq_u8(qx4, m3)); + int8x16_t sqx5 = vreinterpretq_s8_u8(vandq_u8(qx5, m3)); + int8x16_t sqx6 = vreinterpretq_s8_u8(vandq_u8(qx6, m3)); + int8x16_t sqx7 = vreinterpretq_s8_u8(vandq_u8(qx7, m3)); + + const int8x16_t qy0 = vld1q_s8(y[i].qs + j*4 + 0); + const int8x16_t qy1 = vld1q_s8(y[i].qs + j*4 + 16); + const int8x16_t qy2 = vld1q_s8(y[i].qs + j*4 + 32); + const int8x16_t qy3 = vld1q_s8(y[i].qs + j*4 + 48); + const int8x16_t qy4 = vld1q_s8(y[i].qs + j*4 + 64); + const int8x16_t qy5 = vld1q_s8(y[i].qs + j*4 + 80); + const int8x16_t qy6 = vld1q_s8(y[i].qs + j*4 + 96); + const int8x16_t qy7 = vld1q_s8(y[i].qs + j*4 + 112); + +#if defined(__ARM_FEATURE_DOTPROD) + sumi0 = vdotq_s32(sumi0, sqx0, qy0); + sumi1 = vdotq_s32(sumi1, sqx1, qy1); + sumi0 = vdotq_s32(sumi0, sqx2, qy2); + sumi1 = vdotq_s32(sumi1, sqx3, qy3); + sumi0 = vdotq_s32(sumi0, sqx4, qy4); + sumi1 = vdotq_s32(sumi1, sqx5, qy5); + sumi0 = vdotq_s32(sumi0, sqx6, qy6); + sumi1 = vdotq_s32(sumi1, sqx7, qy7); +#else + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx6), vget_low_s8(qy6)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx6), vget_high_s8(qy6)); + sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx7), vget_low_s8(qy7)); + sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx7), vget_high_s8(qy7)); +#endif + } + + const int16x8_t ysum0 = vld1q_s16(y[i].bsums); + const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8); + + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + +#if defined(__ARM_FEATURE_DOTPROD) + sumi0 = vaddq_s32(sumi0, sumi1); + sumi0 = vsubq_s32(sumi0, vpaddlq_s16(vaddq_s16(ysum0, ysum1))); + + sumf += d * (float) vaddvq_s32(sumi0); +#else + sumi0 = vaddq_s16(sumi0, sumi1); + sumi0 = vsubq_s16(sumi0, vaddq_s16(ysum0, ysum1)); + + sumf += d * (float) vaddlvq_s16(sumi0); +#endif + } + + *s = sumf; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_tq2_0_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q2_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#ifdef __ARM_FEATURE_SVE + const int vector_length = svcntb()*8; + const svuint8_t m3s = svdup_n_u8(0x3); + const svuint32_t m4s = svdup_n_u32(0xF); + const svint32_t vzero_sv = svdup_n_s32(0); + svfloat32_t acc_sum = svdup_n_f32(0); + svbool_t pred_s32 = svptrue_pat_b32(SV_VL4); + + switch (vector_length) { + case 128: + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + svfloat32_t d_broad = svdup_n_f32((float32_t)d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); + svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); + + const uint8_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8_sv = y[i].qs; + const uint8_t * GGML_RESTRICT sc = x[i].scales; + + svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc); + const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); + + mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+4); + const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); + + svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums); + svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums+4); + + const svint32_t s0 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_2, q8sums_sv_2)); + + mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+8); + const svint32_t mins_sv_3 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); + + mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+12); + const svint32_t mins_sv_4 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); + + q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums+8); + q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums+12); + + svint32_t s1 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_3, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_4, q8sums_sv_2)); + + svfloat32_t temp = svcvt_f32_s32_x(svptrue_b32(), svadd_s32_x(svptrue_b32(), s0, s1)); + + acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, temp, dmin_broad); + + svint32_t sumi1 = svdup_n_s32(0); + + { + const svuint8_t q2bits_1 = svld1_u8(svptrue_b8(), q2); + svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_1, m3s)); + svint8_t q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc), m4s)); + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 0)); + + const svuint8_t q2bits_3 = svld1_u8(svptrue_b8(), q2+16); + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_3, m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 1)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 2)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 3)); + + + const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+4), m4s)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 0)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 1)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 2)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 3)); + + //------------------------------- + + q2 += 32; + const svint32_t scales_sv_2 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+8), m4s)); + const svuint8_t q2bits_2 = svld1_u8(svptrue_b8(), q2); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_2, m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 0)); + + const svuint8_t q2bits_4 = svld1_u8(svptrue_b8(), q2+16); + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_4, m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 1)); + + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 2)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 3)); + + + const svint32_t scales_sv_3 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+12), m4s)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 0)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 1)); + + + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 2)); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 3)); + } + acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, svcvt_f32_s32_x(svptrue_b32(), sumi1), d_broad); + } + *s = svaddv_f32(svptrue_b32(), acc_sum); + break; + + case 256: + case 512: + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + svfloat32_t d_broad = svdup_n_f32((float32_t)d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); + svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); + + const uint8_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8_sv = y[i].qs; + const uint8_t * GGML_RESTRICT sc = x[i].scales; + + const svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); sc += 8; + const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, m4s)); + const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, 4)); + svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums); + + const svuint32_t mins_and_scales_sve_1 = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); + const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, m4s)); + const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, 4)); + + svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums+8); + + svfloat32_t temp = svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_2, q8sums_sv_2))); + + acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, temp, dmin_broad); + + svint32_t sumi1 = svdup_n_s32(0); + + { + const svuint8_t q2bits_1 = svld1_u8(svptrue_pat_b8(SV_VL32), q2); + svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_1, m3s)); + svint8_t q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + svint32_t scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 0), svdup_lane_s32(scales_sv, 1)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + svint32_t scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 2), svdup_lane_s32(scales_sv, 3)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(svdup_n_s32(0), q2bytes_sv, q8bytes_sv), scale_2); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 4), svdup_lane_s32(scales_sv, 5)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 6), svdup_lane_s32(scales_sv, 7)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); + + q2 += 32; + + const svuint8_t q2bits_2 = svld1_u8(svptrue_pat_b8(SV_VL32), q2); + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_2, m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 0), svdup_lane_s32(scales_sv_1, 1)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 2), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 2), svdup_lane_s32(scales_sv_1, 3)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 4), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 4), svdup_lane_s32(scales_sv_1, 5)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); + + q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 6), m3s)); + q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 6), svdup_lane_s32(scales_sv_1, 7)); + sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); + } + acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), sumi1), d_broad); + } + *s = svaddv_f32(svptrue_pat_b32(SV_VL8), acc_sum); + break; + + default: + assert(false && "Unsupported vector length"); + break; + } + +#elif __ARM_NEON + const uint8x16_t m3 = vdupq_n_u8(0x3); + const uint8x16_t m4 = vdupq_n_u8(0xF); + + const int32x4_t vzero = vdupq_n_s32(0); + + ggml_int8x16x2_t q2bytes; + uint8_t aux[16]; + + float sum = 0; + + for (int i = 0; i < nb; ++i) { + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); + + const uint8_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + const uint8_t * GGML_RESTRICT sc = x[i].scales; + + const uint8x16_t mins_and_scales = vld1q_u8(sc); + const uint8x16_t scales = vandq_u8(mins_and_scales, m4); + vst1q_u8(aux, scales); + + const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4); + const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); + const ggml_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}}; + const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])), + vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0]))); + const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])), + vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1]))); + sum += dmin * vaddvq_s32(vaddq_s32(s0, s1)); + + int isum = 0; + int is = 0; + +// We use this macro instead of a function call because for some reason +// the code runs 2-3% slower, even if the function is declared inline +#define MULTIPLY_ACCUM_WITH_SCALE(index)\ + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\ + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)]; + +#define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\ + q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\ + q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\ + q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\ + MULTIPLY_ACCUM_WITH_SCALE((index)); + + for (int j = 0; j < QK_K/128; ++j) { + const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32; + + ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; + q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3)); + q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3)); + + MULTIPLY_ACCUM_WITH_SCALE(0); + + SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2); + SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4); + SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6); + + is += 8; + } + + sum += d * isum; + } + + *s = sum; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const uint32_t kmask1 = 0x03030303; + const uint32_t kmask2 = 0x0f0f0f0f; + + const block_q3_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_FEATURE_SVE) + + uint32_t aux[3]; + uint32_t utmp[4]; + + const int8_t m32 = 32; + const int vector_length = svcntb()*8; + const svuint8_t m3b_sv = svdup_n_u8(0x3); + const svint32_t vzero_sv = svdup_n_s32(0); + + const svuint8_t m0_sv = svdup_n_u8(1); + const svuint8_t m1_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 1); + const svuint8_t m2_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 2); + const svuint8_t m3_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 3); + + float sum = 0; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q3_sv = x[i].qs; + const uint8_t * GGML_RESTRICT qh_sv = x[i].hmask; + const int8_t * GGML_RESTRICT q8_sv = y[i].qs; + + // Set up scales + memcpy(aux, x[i].scales, 12); + utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); + utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); + utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); + utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); + + int8_t * scale = (int8_t *)utmp; + + for (int j = 0; j < 16; ++j) scale[j] -= m32; + + switch (vector_length) { + case 128: + { + svuint8_t qhbits_sv_1 = svld1_u8(svptrue_b8(), qh_sv); + svuint8_t qhbits_sv_2 = svld1_u8(svptrue_b8(), qh_sv+16); + svuint8_t q3h_sv; + + svint32_t sumi1_1 = svdup_n_s32(0); + svint8_t q3bytes_sv; + + for (int j = 0; j < QK_K/128; ++j) { + + const svuint8_t q3bits_sv = svld1_u8(svptrue_b8(), q3_sv); q3_sv += 16; + const svuint8_t q3bits_sv_1 = svld1_u8(svptrue_b8(), q3_sv); q3_sv += 16; + svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_1), 2); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0])); + + q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_2), 2); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv_1, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1])); + + q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_1), 1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2])); + + q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_2), 1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3])); + + + scale += 4; + q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0])); + + q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_2); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1])); + + + q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; + + q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_1), 1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2])); + + q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_2), 1); + q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3])); + + if (j == 0) { + qhbits_sv_1 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_1, 4); + qhbits_sv_2 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_2, 4); + } + + scale += 4; + } + + sum += d * (svaddv_s32(svptrue_b32(), sumi1_1)); + } break; + case 256: + case 512: + { + svuint8_t qhbits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), qh_sv); + svuint8_t q3h_sv; + + svint32_t sumi1_1 = svdup_n_s32(0); + svint8_t q3bytes_sv; + + for (int j = 0; j < QK_K/128; ++j) { + + const svuint8_t q3bits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), q3_sv); q3_sv += 32; + svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m0_sv, qhbits_sv), 2); + q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + + svint32_t scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1])); + sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1); + + q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m1_sv, qhbits_sv), 1); + q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3])); + sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1); + + scale += 4; + q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; + + q3h_sv = svbic_u8_x(svptrue_pat_b8(SV_VL32), m2_sv, qhbits_sv); + q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1])); + sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1); + + q3h_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m3_sv, qhbits_sv), 1); + q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); + + scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3])); + sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1); + + if (j == 0) { + qhbits_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), qhbits_sv, 4); + } + + scale += 4; + } + + sum += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), sumi1_1)); + } break; + default: + assert(false && "Unsupported vector length"); + break; + } + } + *s = sum; + +#elif __ARM_NEON + + uint32_t aux[3]; + uint32_t utmp[4]; + + const uint8x16_t m3b = vdupq_n_u8(0x3); + const int32x4_t vzero = vdupq_n_s32(0); + + const uint8x16_t m0 = vdupq_n_u8(1); + const uint8x16_t m1 = vshlq_n_u8(m0, 1); + const uint8x16_t m2 = vshlq_n_u8(m0, 2); + const uint8x16_t m3 = vshlq_n_u8(m0, 3); + const int8_t m32 = 32; + + ggml_int8x16x4_t q3bytes; + + float sum = 0; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].hmask; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); + + ggml_uint8x16x4_t q3h; + + int32_t isum = 0; + + // Set up scales + memcpy(aux, x[i].scales, 12); + utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); + utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); + utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); + utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); + + int8_t * scale = (int8_t *)utmp; + for (int j = 0; j < 16; ++j) scale[j] -= m32; + + for (int j = 0; j < QK_K/128; ++j) { + + const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32; + const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64; + const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64; + + q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2); + q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2); + q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1); + q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1); + + q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0])); + q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1])); + q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2])); + q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3])); + + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0]; + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1]; + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2]; + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3]; + + scale += 4; + + q3h.val[0] = vbicq_u8(m2, qhbits.val[0]); + q3h.val[1] = vbicq_u8(m2, qhbits.val[1]); + q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1); + q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1); + + q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0])); + q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1])); + q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2])); + q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3])); + + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0]; + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1]; + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2]; + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3]; + + scale += 4; + + if (j == 0) { + qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4); + qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4); + } + + } + sum += d * isum; + + } + + *s = sum; + +#else + UNUSED(kmask1); + UNUSED(kmask2); + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif + +} + +void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); +#ifdef __ARM_FEATURE_MATMUL_INT8 + assert((nrc == 2) || (nrc == 1)); +#else + assert(nrc == 1); +#endif + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined(__ARM_FEATURE_MATMUL_INT8) + if (nrc == 2) { + const block_q4_K * GGML_RESTRICT x0 = x; + const block_q4_K * GGML_RESTRICT x1 = (const block_q4_K *) ((const uint8_t *)vx + bx); + const block_q8_K * GGML_RESTRICT y0 = y; + const block_q8_K * GGML_RESTRICT y1 = (const block_q8_K *) ((const uint8_t *)vy + by); + + const uint8x16_t m4b = vdupq_n_u8(0x0f); + + float32x4_t vfsum = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; ++i, ++x0, ++x1, ++y0, ++y1) { + const uint8_t * GGML_RESTRICT qx0 = x0->qs; + const uint8_t * GGML_RESTRICT qx1 = x1->qs; + const int8_t * GGML_RESTRICT qy0 = y0->qs; + const int8_t * GGML_RESTRICT qy1 = y1->qs; + + // decode scales and mins + int8_t x0_scales[8], x1_scales[8]; + int16x8_t x0_mins, x1_mins; + { + uint32_t scales_mins[3]; + memcpy(scales_mins, x0->scales, 12); + const uint32_t mins_0_3 = scales_mins[1] & kmask1; + const uint32_t mins_4_7 = ((scales_mins[2] >> 4) & kmask2) | (((scales_mins[1] >> 6) & kmask3) << 4); + const uint32x2_t mins = {mins_0_3, mins_4_7}; + x0_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins))); + uint32_t scales[2]; + scales[0] = scales_mins[0] & kmask1; // scales 0~3 + scales[1] = (scales_mins[2] & kmask2) | (((scales_mins[0] >> 6) & kmask3) << 4); // scales 4~7 + memcpy(x0_scales, scales, 8); + } + { + uint32_t scales_mins[3]; + memcpy(scales_mins, x1->scales, 12); + const uint32_t mins_0_3 = scales_mins[1] & kmask1; + const uint32_t mins_4_7 = ((scales_mins[2] >> 4) & kmask2) | (((scales_mins[1] >> 6) & kmask3) << 4); + const uint32x2_t mins = {mins_0_3, mins_4_7}; + x1_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins))); + uint32_t scales[2]; + scales[0] = scales_mins[0] & kmask1; // scales 0~3 + scales[1] = (scales_mins[2] & kmask2) | (((scales_mins[0] >> 6) & kmask3) << 4); // scales 4~7 + memcpy(x1_scales, scales, 8); + } + + int32x4_t visum = {0}; + + // process 64 data points per iteration, totally 256 data points + for (int j = 0; j < QK_K / 64; ++j, qx0 += 32, qx1 += 32, qy0 += 64, qy1 += 64) { + const int8x16x4_t vy0 = vld1q_s8_x4(qy0); + const int8x16x4_t vy1 = vld1q_s8_x4(qy1); + + int8x16_t vx0[4], vx1[4]; + { + const uint8x16x2_t vv = vld1q_u8_x2(qx0); + vx0[0] = vreinterpretq_s8_u8(vandq_u8(vv.val[0], m4b)); + vx0[1] = vreinterpretq_s8_u8(vandq_u8(vv.val[1], m4b)); + vx0[2] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[0], 4)); + vx0[3] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[1], 4)); + } + { + const uint8x16x2_t vv = vld1q_u8_x2(qx1); + vx1[0] = vreinterpretq_s8_u8(vandq_u8(vv.val[0], m4b)); + vx1[1] = vreinterpretq_s8_u8(vandq_u8(vv.val[1], m4b)); + vx1[2] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[0], 4)); + vx1[3] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[1], 4)); + } + + // process 32 data points (share same block scale) per iteration + for (int k = 0; k < 2; ++k) { + const int blk = j * 2 + k; + const int32x4_t block_scale = { + x0_scales[blk], + x0_scales[blk], + x1_scales[blk], + x1_scales[blk], + }; + + int32x4_t vr = {0}; + for (int l = 0; l < 2; ++l) { + const int idx = k * 2 + l; + const int64x2_t vx0_s64 = vreinterpretq_s64_s8(vx0[idx]); + const int64x2_t vx1_s64 = vreinterpretq_s64_s8(vx1[idx]); + const int64x2_t vy0_s64 = vreinterpretq_s64_s8(vy0.val[idx]); + const int64x2_t vy1_s64 = vreinterpretq_s64_s8(vy1.val[idx]); + const int8x16_t vx_l = vreinterpretq_s8_s64(vzip1q_s64(vx0_s64, vx1_s64)); + const int8x16_t vx_h = vreinterpretq_s8_s64(vzip2q_s64(vx0_s64, vx1_s64)); + const int8x16_t vy_l = vreinterpretq_s8_s64(vzip1q_s64(vy0_s64, vy1_s64)); + const int8x16_t vy_h = vreinterpretq_s8_s64(vzip2q_s64(vy0_s64, vy1_s64)); + vr = vmmlaq_s32(vr, vx_l, vy_l); + vr = vmmlaq_s32(vr, vx_h, vy_h); + } + // apply block scale, will NOT overflow + // block_scale * sum_256(int4*int8) <= 2^(8+8+4+8) = 28 bits + visum = vmlaq_s32(visum, vr, block_scale); + } + } + + // adjust bias, apply superblock scale + { + int32_t bias[4]; + // no obvious uplift from sve sdot-16, just use neon mul add + const int16x8_t y0_sums = vpaddq_s16(vld1q_s16(y0->bsums), vld1q_s16(y0->bsums+8)); + const int16x8_t y1_sums = vpaddq_s16(vld1q_s16(y1->bsums), vld1q_s16(y1->bsums+8)); + bias[0] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y0_sums), vget_low_s16(x0_mins)), + vmull_s16(vget_high_s16(y0_sums), vget_high_s16(x0_mins)))); + bias[1] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x0_mins)), + vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x0_mins)))); + bias[2] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y0_sums), vget_low_s16(x1_mins)), + vmull_s16(vget_high_s16(y0_sums), vget_high_s16(x1_mins)))); + bias[3] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x1_mins)), + vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x1_mins)))); + const float32x4_t dmins = { + GGML_CPU_FP16_TO_FP32(x0->dmin) * y0->d, + GGML_CPU_FP16_TO_FP32(x0->dmin) * y1->d, + GGML_CPU_FP16_TO_FP32(x1->dmin) * y0->d, + GGML_CPU_FP16_TO_FP32(x1->dmin) * y1->d, + }; + vfsum = vmlsq_f32(vfsum, vcvtq_f32_s32(vld1q_s32(bias)), dmins); + + const float32x4_t superblock_scale = { + GGML_CPU_FP16_TO_FP32(x0->d) * y0->d, + GGML_CPU_FP16_TO_FP32(x0->d) * y1->d, + GGML_CPU_FP16_TO_FP32(x1->d) * y0->d, + GGML_CPU_FP16_TO_FP32(x1->d) * y1->d, + }; + vfsum = vmlaq_f32(vfsum, vcvtq_f32_s32(visum), superblock_scale); + } + } + + // vfsum = ABCD -> ACBD + // AC -> s, BD -> (s+bs) + vfsum = vzip1q_f32(vfsum, vextq_f32(vfsum, vfsum, 2)); + vst1_f32(s, vget_low_f32 (vfsum)); + vst1_f32(s + bs, vget_high_f32(vfsum)); + + return; + } +#endif + +#ifdef __ARM_FEATURE_SVE + float sumf = 0; + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); + + const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); + + memcpy(utmp, x[i].scales, K_SCALE_SIZE); + + uint32x2_t mins8 = { 0 }; + mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0); + mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1); + + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[0] &= kmask1; + + const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); + const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), + vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); + sumf -= dmin * vaddvq_s32(prod); + + const uint8_t * scales = (const uint8_t *)utmp; + + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const int vector_length = ggml_cpu_get_sve_cnt()*8; + const svuint8_t m4b = svdup_n_u8(0xf); + const svint32_t mzero = svdup_n_s32(0); + svint32_t sumi1 = svdup_n_s32(0); + svint32_t sumi1_1 = svdup_n_s32(0); + svint32_t sumi1_2 = svdup_n_s32(0); + svint32_t sumi2 = svdup_n_s32(0); + svint32_t sumi2_1 = svdup_n_s32(0); + svint32_t sumi2_2 = svdup_n_s32(0); + switch (vector_length) { + case 128: + { + for (int j = 0; j < QK_K/64; ++j) { + svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), m4b)); + svint8_t q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; + sumi1_1 = svmla_n_s32_x(svptrue_b32(), sumi1_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); + q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4+16), m4b)); + q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; + sumi1_2 = svmla_n_s32_x(svptrue_b32(), sumi1_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); + + q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), 4)); + q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; + sumi2_1 = svmla_n_s32_x(svptrue_b32(), sumi2_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); + q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4+16), 4)); + q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; + sumi2_2 = svmla_n_s32_x(svptrue_b32(), sumi2_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); + q4 += 32; + } + sumi1 = svadd_s32_x(svptrue_b32(), sumi1_1, sumi1_2); + sumi2 = svadd_s32_x(svptrue_b32(), sumi2_1, sumi2_2); + sumf += d * (svaddv_s32(svptrue_b32(), svadd_s32_x(svptrue_b32(), sumi1, sumi2))); + } break; + case 256: + case 512: + { + for (int j = 0; j < QK_K/64; ++j) { + const svuint8_t q4bits = svld1_u8(svptrue_pat_b8(SV_VL32), q4); q4 += 32; + svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_pat_b8(SV_VL32), q4bits, m4b)); + svint8_t q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); q8 += 32; + sumi1 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); + + q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q4bits, 4)); + q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); q8 += 32; + sumi2 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); + } + sumf += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), sumi1, sumi2))); + } break; + default: + assert(false && "Unsupported vector length"); + break; + } + } + *s = sumf; +#elif defined __ARM_NEON + const uint8x16_t m4b = vdupq_n_u8(0xf); + const int32x4_t mzero = vdupq_n_s32(0); + + ggml_int8x16x2_t q4bytes; + ggml_int8x16x2_t q8bytes; + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); + + const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); + + memcpy(utmp, x[i].scales, 12); + + uint32x2_t mins8 = { 0 }; + mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0); + mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1); + + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[0] &= kmask1; + + const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); + const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), + vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); + sumf -= dmin * vaddvq_s32(prod); + + const uint8_t * scales = (const uint8_t *)utmp; + + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + int32_t sumi1 = 0; + int32_t sumi2 = 0; + + for (int j = 0; j < QK_K/64; ++j) { + const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32; + + q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; + q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); + q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); + + const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); + sumi1 += vaddvq_s32(p1) * scales[2*j+0]; + + q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; + q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); + q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); + + const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); + + sumi2 += vaddvq_s32(p2) * scales[2*j+1]; + } + + sumf += d * (sumi1 + sumi2); + + } + + *s = sumf; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + UNUSED(kmask1); + UNUSED(kmask2); + UNUSED(kmask3); + UNUSED(utmp); + ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + + +#ifdef __ARM_NEON + const uint8x16_t m4b = vdupq_n_u8(0xf); + const uint8x16_t mone = vdupq_n_u8(1); + const uint8x16_t mtwo = vdupq_n_u8(2); + const int32x4_t mzero = vdupq_n_s32(0); + + ggml_int8x16x4_t q5bytes; + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); + + const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8); + const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8)); + const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), + vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); + int32_t sumi_mins = vaddvq_s32(prod); + + const uint8_t * scales = (const uint8_t *)utmp; + + const uint8_t * GGML_RESTRICT q5 = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); + + ggml_uint8x16x4_t q5h; + + int32_t sumi = 0; + + for (int j = 0; j < QK_K/64; ++j) { + + const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32; + const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; + + q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); + q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); + q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3); + q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3); + qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2); + qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2); + + q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0])); + q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1])); + q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2])); + q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3])); + + sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++; + sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++; + } + + sumf += d * sumi - dmin * sumi_mins; + } + + *s = sumf; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + UNUSED(kmask1); + UNUSED(kmask2); + UNUSED(kmask3); + UNUSED(utmp); + ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); +#ifdef __ARM_FEATURE_MATMUL_INT8 + assert((nrc == 2) || (nrc == 1)); +#else + assert(nrc == 1); +#endif + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q6_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_FEATURE_MATMUL_INT8) + if (nrc == 2) { + const block_q6_K * GGML_RESTRICT x0 = x; + const block_q6_K * GGML_RESTRICT x1 = (const block_q6_K *) ((const uint8_t *)vx + bx); + const block_q8_K * GGML_RESTRICT y0 = y; + const block_q8_K * GGML_RESTRICT y1 = (const block_q8_K *) ((const uint8_t *)vy + by); + + float32x4_t vfsum = vdupq_n_f32(0.0f); + + for (int i = 0; i < nb; ++i, ++x0, ++x1, ++y0, ++y1) { + const uint8_t * GGML_RESTRICT ql0 = x0->ql; + const uint8_t * GGML_RESTRICT ql1 = x1->ql; + const uint8_t * GGML_RESTRICT qh0 = x0->qh; + const uint8_t * GGML_RESTRICT qh1 = x1->qh; + const int8_t * GGML_RESTRICT qy0 = y0->qs; + const int8_t * GGML_RESTRICT qy1 = y1->qs; + + const uint8x16_t mone = vdupq_n_u8(0x30); + const uint8x16_t m4b = vdupq_n_u8(0x0f); + + int32x4_t visum = vdupq_n_s32(0); + + // process 8 blocks per iteration, totally 16 blocks + for (int j = 0; j < 2; ++j, qh0 += 32, ql0 += 64, qh1 += 32, ql1 += 64) { + int8x16_t vx0[8], vx1[8]; + + // de-quantize vx0[8] + { + const uint8x16x2_t qh_bits = vld1q_u8_x2(qh0); + const uint8x16x4_t ql_bits = vld1q_u8_x4(ql0); + + uint8x16_t q6h_0 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 4)); + uint8x16_t q6h_1 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 4)); + uint8x16_t q6h_2 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 2)); + uint8x16_t q6h_3 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 2)); + + vx0[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[0], m4b), q6h_0)); + vx0[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[1], m4b), q6h_1)); + vx0[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[2], m4b), q6h_2)); + vx0[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[3], m4b), q6h_3)); + + q6h_0 = vandq_u8(mone, qh_bits.val[0]); + q6h_1 = vandq_u8(mone, qh_bits.val[1]); + q6h_2 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[0], 2)); + q6h_3 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[1], 2)); + + vx0[4] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[0], 4), q6h_0)); + vx0[5] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[1], 4), q6h_1)); + vx0[6] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[2], 4), q6h_2)); + vx0[7] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[3], 4), q6h_3)); + } + + // de-quantize vx1[8] + { + const uint8x16x2_t qh_bits = vld1q_u8_x2(qh1); + const uint8x16x4_t ql_bits = vld1q_u8_x4(ql1); + + uint8x16_t q6h_0 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 4)); + uint8x16_t q6h_1 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 4)); + uint8x16_t q6h_2 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 2)); + uint8x16_t q6h_3 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 2)); + + vx1[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[0], m4b), q6h_0)); + vx1[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[1], m4b), q6h_1)); + vx1[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[2], m4b), q6h_2)); + vx1[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[3], m4b), q6h_3)); + + q6h_0 = vandq_u8(mone, qh_bits.val[0]); + q6h_1 = vandq_u8(mone, qh_bits.val[1]); + q6h_2 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[0], 2)); + q6h_3 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[1], 2)); + + vx1[4] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[0], 4), q6h_0)); + vx1[5] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[1], 4), q6h_1)); + vx1[6] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[2], 4), q6h_2)); + vx1[7] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[3], 4), q6h_3)); + } + + // process 16 elements (one block with same scale) per iteration + // - vx = concat(ql, qh) - 32 + // - r1,r2,r3,r4 = smmla(vx, vy) + for (int k = 0; k < 8; ++k) { + const int blk = j * 8 + k; + + const int8x16_t vy0 = vld1q_s8(qy0); + const int8x16_t vy1 = vld1q_s8(qy1); + qy0 += 16; + qy1 += 16; + + const int32x4_t block_scale = { + x0->scales[blk], + x0->scales[blk], + x1->scales[blk], + x1->scales[blk], + }; + + // calculate four results at once with outer product + const int8x16_t vx_l = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(vx0[k]), vreinterpretq_s64_s8(vx1[k]))); + const int8x16_t vx_h = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(vx0[k]), vreinterpretq_s64_s8(vx1[k]))); + const int8x16_t vy_l = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(vy0), vreinterpretq_s64_s8(vy1))); + const int8x16_t vy_h = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(vy0), vreinterpretq_s64_s8(vy1))); + int32x4_t vr = vdupq_n_s32(0); + vr = vmmlaq_s32(vr, vx_l, vy_l); + vr = vmmlaq_s32(vr, vx_h, vy_h); + + // apply block scale, will NOT overflow + // block_scale * sum_256(int6*int8) <= 2^(8+8+6+8) = 30 bits + visum = vmlaq_s32(visum, vr, block_scale); + } + } + + // adjust bias, apply superblock scale + { + int32_t bias[4]; +#ifdef __ARM_FEATURE_SVE + const svbool_t pg16_8 = svptrue_pat_b16(SV_VL8); + const svbool_t pg8_8 = svptrue_pat_b8(SV_VL8); + const svint16_t y0_q8sums_0 = svld1_s16(pg16_8, y0->bsums); + const svint16_t y0_q8sums_1 = svld1_s16(pg16_8, y0->bsums + 8); + const svint16_t y1_q8sums_0 = svld1_s16(pg16_8, y1->bsums); + const svint16_t y1_q8sums_1 = svld1_s16(pg16_8, y1->bsums + 8); + const svint16_t x0_q6scales_0 = svunpklo_s16(svld1_s8(pg8_8, x0->scales)); + const svint16_t x0_q6scales_1 = svunpklo_s16(svld1_s8(pg8_8, x0->scales + 8)); + const svint16_t x1_q6scales_0 = svunpklo_s16(svld1_s8(pg8_8, x1->scales)); + const svint16_t x1_q6scales_1 = svunpklo_s16(svld1_s8(pg8_8, x1->scales + 8)); + const svint64_t zero = svdup_n_s64(0); + bias[0] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y0_q8sums_0, x0_q6scales_0), + svdot_s64(zero, y0_q8sums_1, x0_q6scales_1))); + bias[1] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y1_q8sums_0, x0_q6scales_0), + svdot_s64(zero, y1_q8sums_1, x0_q6scales_1))); + bias[2] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y0_q8sums_0, x1_q6scales_0), + svdot_s64(zero, y0_q8sums_1, x1_q6scales_1))); + bias[3] = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(zero, y1_q8sums_0, x1_q6scales_0), + svdot_s64(zero, y1_q8sums_1, x1_q6scales_1))); +#else + // NEON doesn't support int16 dot product, fallback to separated mul and add + const int16x8x2_t q8sums0 = vld1q_s16_x2(y0->bsums); + const int16x8x2_t q8sums1 = vld1q_s16_x2(y1->bsums); + + int8x16_t scales_s8 = vld1q_s8(x0->scales); + const int16x8x2_t q6scales0 = {{vmovl_s8(vget_low_s8(scales_s8)), vmovl_s8(vget_high_s8(scales_s8))}}; + scales_s8 = vld1q_s8(x1->scales); + const int16x8x2_t q6scales1 = {{vmovl_s8(vget_low_s8(scales_s8)), vmovl_s8(vget_high_s8(scales_s8))}}; + + int32x4_t prod; + prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[0]), vget_low_s16 (q6scales0.val[0])), + vmull_s16(vget_high_s16(q8sums0.val[0]), vget_high_s16(q6scales0.val[0]))), + vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[1]), vget_low_s16 (q6scales0.val[1])), + vmull_s16(vget_high_s16(q8sums0.val[1]), vget_high_s16(q6scales0.val[1])))); + bias[0] = vaddvq_s32(prod); + prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[0]), vget_low_s16 (q6scales0.val[0])), + vmull_s16(vget_high_s16(q8sums1.val[0]), vget_high_s16(q6scales0.val[0]))), + vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[1]), vget_low_s16 (q6scales0.val[1])), + vmull_s16(vget_high_s16(q8sums1.val[1]), vget_high_s16(q6scales0.val[1])))); + bias[1] = vaddvq_s32(prod); + prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[0]), vget_low_s16 (q6scales1.val[0])), + vmull_s16(vget_high_s16(q8sums0.val[0]), vget_high_s16(q6scales1.val[0]))), + vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[1]), vget_low_s16 (q6scales1.val[1])), + vmull_s16(vget_high_s16(q8sums0.val[1]), vget_high_s16(q6scales1.val[1])))); + bias[2] = vaddvq_s32(prod); + prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[0]), vget_low_s16 (q6scales1.val[0])), + vmull_s16(vget_high_s16(q8sums1.val[0]), vget_high_s16(q6scales1.val[0]))), + vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[1]), vget_low_s16 (q6scales1.val[1])), + vmull_s16(vget_high_s16(q8sums1.val[1]), vget_high_s16(q6scales1.val[1])))); + bias[3] = vaddvq_s32(prod); + +#endif + const int32x4_t vibias = vmulq_n_s32(vld1q_s32(bias), 32); + + const float32x4_t superblock_scale = { + GGML_CPU_FP16_TO_FP32(x0->d) * y0->d, + GGML_CPU_FP16_TO_FP32(x0->d) * y1->d, + GGML_CPU_FP16_TO_FP32(x1->d) * y0->d, + GGML_CPU_FP16_TO_FP32(x1->d) * y1->d, + }; + + visum = vsubq_s32(visum, vibias); + vfsum = vmlaq_f32(vfsum, vcvtq_f32_s32(visum), superblock_scale); + } + } + + // vfsum = ABCD -> ACBD + // AC -> s, BD -> (s+bs) + vfsum = vzip1q_f32(vfsum, vextq_f32(vfsum, vfsum, 2)); + vst1_f32(s, vget_low_f32 (vfsum)); + vst1_f32(s + bs, vget_high_f32(vfsum)); + + return; + } +#endif + +#ifdef __ARM_FEATURE_SVE + const int vector_length = ggml_cpu_get_sve_cnt()*8; + float sum = 0; + svuint8_t m4b = svdup_n_u8(0xf); + svint32_t vzero = svdup_n_s32(0); + svuint8_t mone = svdup_n_u8(0x30); + svint8_t q6bytes_1, q6bytes_2, q6bytes_3, q6bytes_4; + svuint8_t q6h_1, q6h_2, q6h_3, q6h_4; + + for (int i = 0; i < nb; ++i) { + const float d_all = GGML_CPU_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q6 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const int8_t * GGML_RESTRICT scale = x[i].scales; + + const svbool_t pg16_8 = svptrue_pat_b16(SV_VL8); + const svint16_t q8sums_1 = svld1_s16(pg16_8, y[i].bsums); + const svint16_t q8sums_2 = svld1_s16(pg16_8, y[i].bsums + 8); + const svint16_t q6scales_1 = svunpklo_s16(svld1_s8(svptrue_pat_b8(SV_VL8), scale)); + const svint16_t q6scales_2 = svunpklo_s16(svld1_s8(svptrue_pat_b8(SV_VL8), scale + 8)); + const svint64_t prod = svdup_n_s64(0); + int32_t isum_mins = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(prod, q8sums_1, q6scales_1), + svdot_s64(prod, q8sums_2, q6scales_2))); + int32_t isum = 0; + + switch (vector_length) { + case 128: + { + const svbool_t pg32_4 = svptrue_pat_b32(SV_VL4); + const svbool_t pg8_16 = svptrue_pat_b8(SV_VL16); + svint32_t isum_tmp = svdup_n_s32(0); + for (int j = 0; j < QK_K/128; ++j) { + svuint8_t qhbits_1 = svld1_u8(pg8_16, qh); + svuint8_t qhbits_2 = svld1_u8(pg8_16, qh+16); + qh += 32; + svuint8_t q6bits_1 = svld1_u8(pg8_16, q6); + svuint8_t q6bits_2 = svld1_u8(pg8_16, q6+16); + svuint8_t q6bits_3 = svld1_u8(pg8_16, q6+32); + svuint8_t q6bits_4 = svld1_u8(pg8_16, q6+48); + q6 += 64; + svint8_t q8bytes_1 = svld1_s8(pg8_16, q8); + svint8_t q8bytes_2 = svld1_s8(pg8_16, q8+16); + svint8_t q8bytes_3 = svld1_s8(pg8_16, q8+32); + svint8_t q8bytes_4 = svld1_s8(pg8_16, q8+48); + q8 += 64; + + q6h_1 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_1, 4)); + q6h_2 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_2, 4)); + q6h_3 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_1, 2)); + q6h_4 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_2, 2)); + q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_1, m4b), q6h_1)); + q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_2, m4b), q6h_2)); + q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_3, m4b), q6h_3)); + q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_4, m4b), q6h_4)); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale[0]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale[1]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale[2]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale[3]); + + scale += 4; + q8bytes_1 = svld1_s8(pg8_16, q8); + q8bytes_2 = svld1_s8(pg8_16, q8+16); + q8bytes_3 = svld1_s8(pg8_16, q8+32); + q8bytes_4 = svld1_s8(pg8_16, q8+48); + q8 += 64; + + q6h_1 = svand_u8_x(pg16_8, mone, qhbits_1); + q6h_2 = svand_u8_x(pg16_8, mone, qhbits_2); + q6h_3 = svand_u8_x(pg16_8, mone, svlsr_n_u8_x(pg16_8, qhbits_1, 2)); + q6h_4 = svand_u8_x(pg16_8, mone, svlsr_n_u8_x(pg16_8, qhbits_2, 2)); + q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_1, 4), q6h_1)); + q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_2, 4), q6h_2)); + q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_3, 4), q6h_3)); + q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_4, 4), q6h_4)); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale[0]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale[1]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale[2]); + isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale[3]); + scale += 4; + } + isum += svaddv_s32(pg32_4, isum_tmp); + sum += d_all * y[i].d * (isum - 32 * isum_mins); + } + break; + case 256: + case 512: + { + const svbool_t pg8_2 = svptrue_pat_b8(SV_VL2); + const svbool_t pg32_8 = svptrue_pat_b32(SV_VL8); + const svbool_t pg8_32 = svptrue_pat_b8(SV_VL32); + svint32_t isum_tmp = svdup_n_s32(0); + for (int j = 0; j < QK_K/128; j++) { + svuint8_t qhbits_1 = svld1_u8(pg8_32, qh); + qh += 32; + svuint8_t q6bits_1 = svld1_u8(pg8_32, q6); + svuint8_t q6bits_2 = svld1_u8(pg8_32, q6+32); + q6 += 64; + svint8_t q8bytes_1 = svld1_s8(pg8_32, q8); + svint8_t q8bytes_2 = svld1_s8(pg8_32, q8+32); + svint8_t q8bytes_3 = svld1_s8(pg8_32, q8+64); + svint8_t q8bytes_4 = svld1_s8(pg8_32, q8+96); + q8 += 128; + q6h_1 = svand_u8_x(pg8_32, mone, svlsl_n_u8_x(pg8_32, qhbits_1, 4)); + q6h_2 = svand_u8_x(pg8_32, mone, svlsl_n_u8_x(pg8_32, qhbits_1, 2)); + q6h_3 = svand_u8_x(pg8_32, mone, qhbits_1); + q6h_4 = svand_u8_x(pg8_32, mone, svlsr_n_u8_x(pg8_32, qhbits_1, 2)); + q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svand_u8_x(pg8_32, q6bits_1, m4b), q6h_1)); + q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svand_u8_x(pg8_32, q6bits_2, m4b), q6h_2)); + q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svlsr_n_u8_x(pg8_32, q6bits_1, 4), q6h_3)); + q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svlsr_n_u8_x(pg8_32, q6bits_2, 4), q6h_4)); + + svint8_t scale_lane_1_tmp = svld1_s8(pg8_2, scale); + scale_lane_1_tmp= svzip1_s8(scale_lane_1_tmp, scale_lane_1_tmp); + scale_lane_1_tmp= svzip1_s8(scale_lane_1_tmp, scale_lane_1_tmp); + svint8_t scale_lane_2_tmp = svld1_s8(pg8_2, scale+2); + scale_lane_2_tmp = svzip1_s8(scale_lane_2_tmp, scale_lane_2_tmp); + scale_lane_2_tmp = svzip1_s8(scale_lane_2_tmp, scale_lane_2_tmp); + svint8_t scale_lane_3_tmp = svld1_s8(pg8_2, scale+4); + scale_lane_3_tmp = svzip1_s8(scale_lane_3_tmp, scale_lane_3_tmp); + scale_lane_3_tmp = svzip1_s8(scale_lane_3_tmp, scale_lane_3_tmp); + svint8_t scale_lane_4_tmp = svld1_s8(pg8_2, scale+6); + scale_lane_4_tmp = svzip1_s8(scale_lane_4_tmp, scale_lane_4_tmp); + scale_lane_4_tmp = svzip1_s8(scale_lane_4_tmp, scale_lane_4_tmp); + svint32_t scale_lane_1 = svunpklo_s32(svunpklo_s16(scale_lane_1_tmp)); + svint32_t scale_lane_2 = svunpklo_s32(svunpklo_s16(scale_lane_2_tmp)); + svint32_t scale_lane_3 = svunpklo_s32(svunpklo_s16(scale_lane_3_tmp)); + svint32_t scale_lane_4 = svunpklo_s32(svunpklo_s16(scale_lane_4_tmp)); + + isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale_lane_1); + isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale_lane_2); + isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale_lane_3); + isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale_lane_4); + scale += 8; + } + isum += svaddv_s32(pg32_8, isum_tmp); + sum += d_all * y[i].d * (isum - 32 * isum_mins); + } + break; + default: + assert(false && "Unsupported vector length"); + break; + } + } + + *s = sum; + +#elif __ARM_NEON + float sum = 0; + + const uint8x16_t m4b = vdupq_n_u8(0xF); + const int32x4_t vzero = vdupq_n_s32(0); + //const int8x16_t m32s = vdupq_n_s8(32); + + const uint8x16_t mone = vdupq_n_u8(3); + + ggml_int8x16x4_t q6bytes; + ggml_uint8x16x4_t q6h; + + for (int i = 0; i < nb; ++i) { + + const float d_all = GGML_CPU_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q6 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const int8_t * GGML_RESTRICT scale = x[i].scales; + + const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); + const int8x16_t scales = vld1q_s8(scale); + const ggml_int16x8x2_t q6scales = {{vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}}; + + const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])), + vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))), + vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])), + vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1])))); + int32_t isum_mins = vaddvq_s32(prod); + + int32_t isum = 0; + + for (int j = 0; j < QK_K/128; ++j) { + + ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32; + ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64; + ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; + + q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); + q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); + uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2); + q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + shifted = vshrq_n_u8(qhbits.val[1], 2); + q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + + //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s); + //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s); + //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s); + //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s); + q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])); + q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])); + q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])); + q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])); + + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; + + scale += 4; + + q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; + + shifted = vshrq_n_u8(qhbits.val[0], 4); + q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + shifted = vshrq_n_u8(qhbits.val[1], 4); + q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + shifted = vshrq_n_u8(qhbits.val[0], 6); + q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + shifted = vshrq_n_u8(qhbits.val[1], 6); + q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); + + //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s); + //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s); + //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s); + //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s); + q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])); + q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])); + q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])); + q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])); + + isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + + vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; + scale += 4; + } + //sum += isum * d_all * y[i].d; + sum += d_all * y[i].d * (isum - 32 * isum_mins); + + } + *s = sum; +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +#if defined (__ARM_NEON) +static const int8_t keven_signs_q2xs[1024] = { + 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, + 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, + 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, + 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, + 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, + 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, + 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, + 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, + 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, + 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, + 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, + 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, + 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, + 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, + 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, + 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, + 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, + 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, + 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, + 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, + 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, + 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, + 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, + 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, + 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, + 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, + 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, + 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, + 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, + 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, + 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, + 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, +}; +#endif + +void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[4]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + ggml_int8x16x4_t q2u; + ggml_int8x16x4_t q2s; + ggml_int8x16x4_t q8b; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + float sumf1 = 0, sumf2 = 0; + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; + q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 0])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 1]))); + q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 2])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 3]))); + q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 8])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 9]))); + q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[10])), vld1_s8((const void *)(iq2xxs_grid + aux8[11]))); + q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127)))); + q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127)))); + q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 7) & 127)))); + q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 21) & 127)))); + q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]); + q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]); + q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]); + q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]); + const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]); + const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]); + sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28)); + sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28)); + } + sumf += d*(sumf1 + sumf2); + } + *s = 0.25f * sumf; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_iq2_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + ggml_int8x16x4_t q2u; + ggml_int8x16x4_t q2s; + ggml_int8x16x4_t q8b; + + int32x4x4_t scales32; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + const uint8x8_t scales8 = vld1_u8(x[i].scales); + const uint8x8_t scales_l = vand_u8(scales8, vdup_n_u8(0xf)); + const uint8x8_t scales_h = vshr_n_u8(scales8, 4); + uint8x16_t scales = vcombine_u8(vzip1_u8(scales_l, scales_h), vzip2_u8(scales_l, scales_h)); + scales = vaddq_u8(vshlq_n_u8(scales, 1), vdupq_n_u8(1)); + const uint16x8_t scales1 = vmovl_u8(vget_low_u8(scales)); + const uint16x8_t scales2 = vmovl_u8(vget_high_u8(scales)); + scales32.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales1))); + scales32.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales1))); + scales32.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales2))); + scales32.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales2))); + int32x4_t sumi = vdupq_n_s32(0); + for (int ib64 = 0; ib64 < QK_K/64; ++ib64) { + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[0] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[1] & 511)))); + q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[2] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[3] & 511)))); + q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[4] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[5] & 511)))); + q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[6] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[7] & 511)))); + q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[0] >> 9))), vld1_s8((const void *)(signs64 + (q2[1] >> 9)))); + q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[2] >> 9))), vld1_s8((const void *)(signs64 + (q2[3] >> 9)))); + q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[4] >> 9))), vld1_s8((const void *)(signs64 + (q2[5] >> 9)))); + q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[6] >> 9))), vld1_s8((const void *)(signs64 + (q2[7] >> 9)))); + q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]); + q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]); + q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]); + q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]); + const int32x4_t p1 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]); + const int32x4_t p2 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[1], q8b.val[1]); + const int32x4_t p3 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]); + const int32x4_t p4 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[3], q8b.val[3]); + const int32x4_t p = vpaddq_s32(vpaddq_s32(p1, p2), vpaddq_s32(p3, p4)); + sumi = vmlaq_s32(sumi, p, scales32.val[ib64]); + q2 += 8; + } + sumf += d*vaddvq_s32(sumi); + } + *s = 0.125f * sumf; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_iq2_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; + + const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1); + const uint8x16_t mask2 = vld1q_u8(k_mask2); + const uint8x16_t m1 = vdupq_n_u8(1); + const int32x4_t vzero = vdupq_n_s32(0); + + uint8x16x2_t vs; + ggml_int8x16x4_t q2s; + ggml_int8x16x4_t q8b; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + int sumi1 = 0, sumi2 = 0; + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + q2s.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[0] | ((qh[ib32+0] << 8) & 0x300)))), + vld1_s8((const int8_t *)(iq2s_grid + (qs[1] | ((qh[ib32+0] << 6) & 0x300))))); + q2s.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[2] | ((qh[ib32+0] << 4) & 0x300)))), + vld1_s8((const int8_t *)(iq2s_grid + (qs[3] | ((qh[ib32+0] << 2) & 0x300))))); + q2s.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[4] | ((qh[ib32+1] << 8) & 0x300)))), + vld1_s8((const int8_t *)(iq2s_grid + (qs[5] | ((qh[ib32+1] << 6) & 0x300))))); + q2s.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[6] | ((qh[ib32+1] << 4) & 0x300)))), + vld1_s8((const int8_t *)(iq2s_grid + (qs[7] | ((qh[ib32+1] << 2) & 0x300))))); + qs += 8; + + vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16))); + vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); + vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); + vs.val[0] = vceqq_u8(vs.val[0], mask2); + vs.val[1] = vceqq_u8(vs.val[1], mask2); + + q2s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[0]); + q2s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[1]); + + vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16))); + vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); + vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); + vs.val[0] = vceqq_u8(vs.val[0], mask2); + vs.val[1] = vceqq_u8(vs.val[1], mask2); + + signs += 4; + + q2s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[2]); + q2s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[3]); + + const int32x4_t p1 = ggml_vdotq_s32(vzero, q2s.val[0], q8b.val[0]); + const int32x4_t p2 = ggml_vdotq_s32(vzero, q2s.val[1], q8b.val[1]); + const int32x4_t p3 = ggml_vdotq_s32(vzero, q2s.val[2], q8b.val[2]); + const int32x4_t p4 = ggml_vdotq_s32(vzero, q2s.val[3], q8b.val[3]); + + sumi1 += vaddvq_s32(p1) * (1 + 2*(x[i].scales[ib32+0] & 0xf)); + sumi2 += vaddvq_s32(p2) * (1 + 2*(x[i].scales[ib32+0] >> 4)); + sumi1 += vaddvq_s32(p3) * (1 + 2*(x[i].scales[ib32+1] & 0xf)); + sumi2 += vaddvq_s32(p4) * (1 + 2*(x[i].scales[ib32+1] >> 4)); + } + sumf += d*(sumi1 + sumi2); + } + + *s = 0.125f * sumf; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_iq2_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif + +} + +void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[2]; + + ggml_int8x16x4_t q3s; + ggml_int8x16x4_t q8b; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + float sumf1 = 0, sumf2 = 0; + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + memcpy(aux32, gas, 2*sizeof(uint32_t)); gas += 2*sizeof(uint32_t); + const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]); + const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]); + const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]); + const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]); + q3 += 16; + q3s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 7) & 127)))); + q3s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 21) & 127)))); + q3s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127)))); + q3s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127)))); + q3s.val[0] = vmulq_s8(q3s.val[0], vreinterpretq_s8_u32(aux32x4_0)); + q3s.val[1] = vmulq_s8(q3s.val[1], vreinterpretq_s8_u32(aux32x4_1)); + q3s.val[2] = vmulq_s8(q3s.val[2], vreinterpretq_s8_u32(aux32x4_2)); + q3s.val[3] = vmulq_s8(q3s.val[3], vreinterpretq_s8_u32(aux32x4_3)); + const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]); + const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]); + sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[0] >> 28)); + sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[1] >> 28)); + } + sumf += d*(sumf1 + sumf2); + } + *s = 0.5f * sumf; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_iq3_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__ARM_NEON) + + typedef union { + uint16x8_t vec_index; + uint16_t index[8]; + } vec_index_t; + + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; + + static const int16_t k_shift[8] = {8, 7, 6, 5, 4, 3, 2, 1}; + + const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1); + const uint8x16_t mask2 = vld1q_u8(k_mask2); + + const int16x8_t hshift = vld1q_s16(k_shift); + const uint16x8_t m256 = vdupq_n_u16(256); + const uint8x16_t m1 = vdupq_n_u8(1); + + uint8x16x2_t vs; + ggml_int8x16x4_t q3s; + ggml_int8x16x4_t q8b; + vec_index_t idx; + + uint32_t scales32[2]; + const uint8_t * scales8 = (const uint8_t *)scales32; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(scales32, x[i].scales, 4); + scales32[1] = (((scales32[0] >> 4) & 0x0f0f0f0f) << 1) | 0x01010101; + scales32[0] = ((scales32[0] & 0x0f0f0f0f) << 1) | 0x01010101; + + int sumi1 = 0, sumi2 = 0; + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + + const uint8x16_t idx_l = vld1q_u8(qs); qs += 16; + idx.vec_index = vorrq_u16(vmovl_u8(vget_low_u8 (idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+0]), hshift), m256)); + const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]], + iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]); + const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]], + iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]); + idx.vec_index = vorrq_u16(vmovl_u8(vget_high_u8(idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+1]), hshift), m256)); + const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]], + iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]); + const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]], + iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]); + + + vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16))); + vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); + vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); + vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1); + vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1); + + q3s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_0)); + q3s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_1)); + + vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16))); + vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); + vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); + vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1); + vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1); + + signs += 4; + + q3s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_2)); + q3s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_3)); + + const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]); + const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]); + + sumi1 += vaddvq_s32(p1) * scales8[ib32/2+0]; + sumi2 += vaddvq_s32(p2) * scales8[ib32/2+4]; + } + sumf += d*(sumi1 + sumi2); + } + *s = sumf; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_iq3_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq1_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __ARM_NEON + + ggml_int8x16x4_t q1b; + ggml_int8x16x4_t q8b; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint16_t * qh = x[i].qh; + + int sumi1 = 0, sumi2 = 0, sumi3 = 0; + + for (int ib = 0; ib < QK_K/32; ib += 2) { + + q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[ib+0] << 8) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[ib+0] << 5) & 0x700))))); + q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[ib+0] << 2) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[ib+0] >> 1) & 0x700))))); + q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[ib+1] << 8) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[ib+1] << 5) & 0x700))))); + q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[ib+1] << 2) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[ib+1] >> 1) & 0x700))))); + qs += 8; + + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + + const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[0], q8b.val[0]), q1b.val[1], q8b.val[1]); + const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[2], q8b.val[2]), q1b.val[3], q8b.val[3]); + + const int ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; + const int ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; + sumi1 += vaddvq_s32(p1) * ls1; + sumi2 += vaddvq_s32(p2) * ls2; + sumi3 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * ls1 * (qh[ib+0] & 0x8000 ? -1 : 1) + + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * ls2 * (qh[ib+1] & 0x8000 ? -1 : 1); + + } + + sumf += y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d) * (sumi1 + sumi2 + IQ1S_DELTA * sumi3); + } + + *s = sumf; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_iq1_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_iq1_m_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq1_m * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + iq1m_scale_t scale; + +#if defined __ARM_NEON + const int32x4_t mask = vdupq_n_s32(0x7); + const int32x4_t mone = vdupq_n_s32(1); + const int32x4_t mzero = vdupq_n_s32(0); + + ggml_int8x16x4_t deltas; + deltas.val[0] = vcombine_s8(vdup_n_s8(+1), vdup_n_s8(+1)); + deltas.val[1] = vcombine_s8(vdup_n_s8(-1), vdup_n_s8(+1)); + deltas.val[2] = vcombine_s8(vdup_n_s8(+1), vdup_n_s8(-1)); + deltas.val[3] = vcombine_s8(vdup_n_s8(-1), vdup_n_s8(-1)); + + ggml_int8x16x4_t q1b; + ggml_int8x16x4_t q8b; + + uint32_t aux32; + const uint8_t * aux8 = (const uint8_t *)&aux32; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint16_t * sc = (const uint16_t *)x[i].scales; + + scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + + int32x4_t sumi1 = mzero; + int32x4_t sumi2 = mzero; + + for (int ib = 0; ib < QK_K/32; ib += 2) { + + q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[0] << 8) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[0] << 4) & 0x700))))); + q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[1] << 8) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[1] << 4) & 0x700))))); + q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[2] << 8) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[2] << 4) & 0x700))))); + q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[3] << 8) & 0x700)))), + vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[3] << 4) & 0x700))))); + + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + + const int32x4_t p1 = vpaddq_s32(ggml_vdotq_s32(mzero, q1b.val[0], q8b.val[0]), ggml_vdotq_s32(mzero, q1b.val[1], q8b.val[1])); + const int32x4_t p2 = vpaddq_s32(ggml_vdotq_s32(mzero, q1b.val[2], q8b.val[2]), ggml_vdotq_s32(mzero, q1b.val[3], q8b.val[3])); + const int32x4_t p12 = vpaddq_s32(p1, p2); + + const uint32_t * qh32 = (const uint32_t *)qh; // we are 4-byte aligned, so we can do that + aux32 = ((qh32[0] >> 3) & 0x01010101) | ((qh32[0] >> 6) & 0x02020202); + + const int32x4_t p3 = vpaddq_s32(ggml_vdotq_s32(mzero, deltas.val[aux8[0]], q8b.val[0]), ggml_vdotq_s32(mzero, deltas.val[aux8[1]], q8b.val[1])); + const int32x4_t p4 = vpaddq_s32(ggml_vdotq_s32(mzero, deltas.val[aux8[2]], q8b.val[2]), ggml_vdotq_s32(mzero, deltas.val[aux8[3]], q8b.val[3])); + const int32x4_t p34 = vpaddq_s32(p3, p4); + + int32x4_t scales_4 = ggml_vld1q_u32(sc[ib/2] >> 0, sc[ib/2] >> 3, sc[ib/2] >> 6, sc[ib/2] >> 9); + + scales_4 = vaddq_s32(vshlq_n_s32(vandq_s32(scales_4, mask), 1), mone); + + sumi1 = vmlaq_s32(sumi1, scales_4, p12); + sumi2 = vmlaq_s32(sumi2, scales_4, p34); + + qs += 8; qh += 4; + + } + + sumf += y[i].d * GGML_CPU_FP16_TO_FP32(scale.f16) * (vaddvq_s32(sumi1) + IQ1M_DELTA * vaddvq_s32(sumi2)); + } + + *s = sumf; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + UNUSED(scale); + ggml_vec_dot_iq1_m_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK4_NL == 0); + static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); + + const block_iq4_nl * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + const int nb = n / QK4_NL; + + int ib = 0; + float sumf = 0; + +#if defined __ARM_NEON + const int8x16_t values = vld1q_s8(kvalues_iq4nl); + const uint8x16_t m4b = vdupq_n_u8(0x0f); + uint8x16x2_t q4bits; + int8x16x4_t q4b; + int8x16x4_t q8b; + int32x4_t prod_1, prod_2; + + for (; ib + 1 < nb; ib += 2) { + + q4bits.val[0] = vld1q_u8(x[ib + 0].qs); + q4bits.val[1] = vld1q_u8(x[ib + 1].qs); + q8b.val[0] = vld1q_s8(y[ib + 0].qs); + q8b.val[1] = vld1q_s8(y[ib + 0].qs + 16); + q8b.val[2] = vld1q_s8(y[ib + 1].qs); + q8b.val[3] = vld1q_s8(y[ib + 1].qs + 16); + + q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b)); + q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4)); + q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b)); + q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4)); + + prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]); + prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]); + + sumf += + GGML_CPU_FP16_TO_FP32(x[ib+0].d) * GGML_CPU_FP16_TO_FP32(y[ib + 0].d) * vaddvq_s32(prod_1) + + GGML_CPU_FP16_TO_FP32(x[ib+1].d) * GGML_CPU_FP16_TO_FP32(y[ib + 1].d) * vaddvq_s32(prod_2); + } + +#endif + for (; ib < nb; ++ib) { + const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < QK4_NL/2; ++j) { + sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; + sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; + } + sumf += d * (sumi1 + sumi2); + } + *s = sumf; +} + +void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK_K == 0); + + const block_iq4_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __ARM_NEON + const int8x16_t values = vld1q_s8(kvalues_iq4nl); + const uint8x16_t m4b = vdupq_n_u8(0x0f); + ggml_uint8x16x2_t q4bits; + ggml_int8x16x4_t q4b; + ggml_int8x16x4_t q8b; + int32x4_t prod_1, prod_2; + + float sumf = 0; + + for (int ibl = 0; ibl < nb; ++ibl) { + + const int8_t * q8 = y[ibl].qs; + const uint8_t * q4 = x[ibl].qs; + uint16_t h = x[ibl].scales_h; + + int sumi1 = 0, sumi2 = 0; + for (int ib = 0; ib < QK_K/64; ++ib) { + + q4bits = ggml_vld1q_u8_x2(q4); q4 += 32; + q8b = ggml_vld1q_s8_x4(q8); q8 += 64; + + q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b)); + q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4)); + q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b)); + q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4)); + + prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]); + prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]); + + int ls1 = ((x[ibl].scales_l[ib] & 0xf) | ((h << 4) & 0x30)) - 32; + int ls2 = ((x[ibl].scales_l[ib] >> 4) | ((h << 2) & 0x30)) - 32; + h >>= 4; + sumi1 += vaddvq_s32(prod_1) * ls1; + sumi2 += vaddvq_s32(prod_2) * ls2; + + } + + sumf += GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2); + } + + *s = sumf; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_iq4_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/arch/arm/repack.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/arch/arm/repack.cpp new file mode 100644 index 000000000..fdd0a513b --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/arch/arm/repack.cpp @@ -0,0 +1,1891 @@ +#define GGML_COMMON_IMPL_CPP +#define GGML_COMMON_DECL_CPP +#include "ggml-common.h" +#include "ggml-backend-impl.h" + +#include "ggml-impl.h" +#include "ggml-cpu.h" +#include "ggml-cpu-impl.h" +#include "simd-mappings.h" +#include "traits.h" + +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GGML_CPU_CLANG_WORKAROUND +#include "../../repack.h" + +#if defined(__GNUC__) +#pragma GCC diagnostic ignored "-Woverlength-strings" +#endif + +#define UNUSED GGML_UNUSED + +void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; + +#if defined(__ARM_NEON) + float32x4_t srcv[4][8]; + float id[4]; + + for (int i = 0; i < nb; i++) { + float32x4_t asrcv[8]; + float32x4_t amaxv[8]; + + for (int row_iter = 0; row_iter < 4; row_iter++) { + for (int j = 0; j < 8; j++) srcv[row_iter][j] = vld1q_f32(x + row_iter * k + i * 32 + 4 * j); + for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[row_iter][j]); + + for (int j = 0; j < 4; j++) amaxv[2 * j] = vmaxq_f32(asrcv[2 * j], asrcv[2 * j + 1]); + for (int j = 0; j < 2; j++) amaxv[4 * j] = vmaxq_f32(amaxv[4 * j], amaxv[4 * j + 2]); + for (int j = 0; j < 1; j++) amaxv[8 * j] = vmaxq_f32(amaxv[8 * j], amaxv[8 * j + 4]); + + const float amax = vmaxvq_f32(amaxv[0]); + + const float d = amax / ((1 << 7) - 1); + id[row_iter] = d ? 1.0f / d : 0.0f; + + y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); + } + + for (int j = 0; j < 8; j++) { + float32x4_t v = vmulq_n_f32(srcv[0][j], id[0]); + int32x4_t vi = vcvtnq_s32_f32(v); + y[i].qs[16 * j + 0] = vgetq_lane_s32(vi, 0); + y[i].qs[16 * j + 1] = vgetq_lane_s32(vi, 1); + y[i].qs[16 * j + 2] = vgetq_lane_s32(vi, 2); + y[i].qs[16 * j + 3] = vgetq_lane_s32(vi, 3); + + v = vmulq_n_f32(srcv[1][j], id[1]); + vi = vcvtnq_s32_f32(v); + y[i].qs[16 * j + 4] = vgetq_lane_s32(vi, 0); + y[i].qs[16 * j + 5] = vgetq_lane_s32(vi, 1); + y[i].qs[16 * j + 6] = vgetq_lane_s32(vi, 2); + y[i].qs[16 * j + 7] = vgetq_lane_s32(vi, 3); + + v = vmulq_n_f32(srcv[2][j], id[2]); + vi = vcvtnq_s32_f32(v); + y[i].qs[16 * j + 8] = vgetq_lane_s32(vi, 0); + y[i].qs[16 * j + 9] = vgetq_lane_s32(vi, 1); + y[i].qs[16 * j + 10] = vgetq_lane_s32(vi, 2); + y[i].qs[16 * j + 11] = vgetq_lane_s32(vi, 3); + + v = vmulq_n_f32(srcv[3][j], id[3]); + vi = vcvtnq_s32_f32(v); + y[i].qs[16 * j + 12] = vgetq_lane_s32(vi, 0); + y[i].qs[16 * j + 13] = vgetq_lane_s32(vi, 1); + y[i].qs[16 * j + 14] = vgetq_lane_s32(vi, 2); + y[i].qs[16 * j + 15] = vgetq_lane_s32(vi, 3); + } + } +#else + UNUSED(nb); + UNUSED(y); + ggml_quantize_mat_q8_0_4x4_generic(x, vy, k); +#endif +} + +void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; + +#if defined(__ARM_NEON) + float32x4_t srcv[4][8]; + float id[4]; + + for (int i = 0; i < nb; i++) { + float32x4_t asrcv[8]; + float32x4_t amaxv[8]; + + for (int row_iter = 0; row_iter < 4; row_iter++) { + for (int j = 0; j < 8; j++) srcv[row_iter][j] = vld1q_f32(x + row_iter * k + i * 32 + 4 * j); + for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[row_iter][j]); + + for (int j = 0; j < 4; j++) amaxv[2 * j] = vmaxq_f32(asrcv[2 * j], asrcv[2 * j + 1]); + for (int j = 0; j < 2; j++) amaxv[4 * j] = vmaxq_f32(amaxv[4 * j], amaxv[4 * j + 2]); + for (int j = 0; j < 1; j++) amaxv[8 * j] = vmaxq_f32(amaxv[8 * j], amaxv[8 * j + 4]); + + const float amax = vmaxvq_f32(amaxv[0]); + + const float d = amax / ((1 << 7) - 1); + id[row_iter] = d ? 1.0f / d : 0.0f; + + y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); + } + + for (int j = 0; j < 4; j++) { + float32x4_t v = vmulq_n_f32(srcv[0][2 * j], id[0]); + int32x4_t vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 0] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 1] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 2] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 3] = vgetq_lane_s32(vi, 3); + v = vmulq_n_f32(srcv[0][2 * j + 1], id[0]); + vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 4] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 5] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 6] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 7] = vgetq_lane_s32(vi, 3); + + v = vmulq_n_f32(srcv[1][2 * j], id[1]); + vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 8] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 9] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 10] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 11] = vgetq_lane_s32(vi, 3); + v = vmulq_n_f32(srcv[1][2 * j + 1], id[1]); + vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 12] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 13] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 14] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 15] = vgetq_lane_s32(vi, 3); + + v = vmulq_n_f32(srcv[2][2 * j], id[2]); + vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 16] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 17] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 18] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 19] = vgetq_lane_s32(vi, 3); + v = vmulq_n_f32(srcv[2][2 * j + 1], id[2]); + vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 20] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 21] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 22] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 23] = vgetq_lane_s32(vi, 3); + + v = vmulq_n_f32(srcv[3][2 * j], id[3]); + vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 24] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 25] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 26] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 27] = vgetq_lane_s32(vi, 3); + v = vmulq_n_f32(srcv[3][2 * j + 1], id[3]); + vi = vcvtnq_s32_f32(v); + y[i].qs[32 * j + 28] = vgetq_lane_s32(vi, 0); + y[i].qs[32 * j + 29] = vgetq_lane_s32(vi, 1); + y[i].qs[32 * j + 30] = vgetq_lane_s32(vi, 2); + y[i].qs[32 * j + 31] = vgetq_lane_s32(vi, 3); + } + } + +#else + UNUSED(nb); + UNUSED(y); + ggml_quantize_mat_q8_0_4x8_generic(x, vy, k); +#endif +} + +void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx; + + for (int c = 0; c < nc; c += ncols_interleaved) { + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + float32x4_t acc = vdupq_n_f32(0); + for (int b = 0; b < nb; b++) { + int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs); + int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16); + int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32); + int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48); + float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); + + int8x16_t a0 = vld1q_s8(a_ptr->qs); + int8x16_t a1 = vld1q_s8(a_ptr->qs + qk/2); + float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); + + int32x4_t ret = vdupq_n_s32(0); + + ret = vdotq_laneq_s32(ret, b0 << 4, a0, 0); + ret = vdotq_laneq_s32(ret, b1 << 4, a0, 1); + ret = vdotq_laneq_s32(ret, b2 << 4, a0, 2); + ret = vdotq_laneq_s32(ret, b3 << 4, a0, 3); + + ret = vdotq_laneq_s32(ret, b0 & 0xf0U, a1, 0); + ret = vdotq_laneq_s32(ret, b1 & 0xf0U, a1, 1); + ret = vdotq_laneq_s32(ret, b2 & 0xf0U, a1, 2); + ret = vdotq_laneq_s32(ret, b3 & 0xf0U, a1, 3); + + acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), + vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); + a_ptr++; + b_ptr++; + } + vst1q_f32(s, acc); + s += ncols_interleaved; + } + return; +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + ggml_gemv_q4_0_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc); +} + +void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx; + + for (int c = 0; c < nc; c += ncols_interleaved) { + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + float32x4_t acc = vdupq_n_f32(0); + for (int b = 0; b < nb; b++) { + int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs); + int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16); + int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32); + int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48); + float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); + + int8x16_t a0 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs); + int8x16_t a1 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 1); + int8x16_t a2 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 2); + int8x16_t a3 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 3); + float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); + + int32x4_t ret0 = vdupq_n_s32(0); + int32x4_t ret1 = vdupq_n_s32(0); + + ret0 = vdotq_s32(ret0, b0 << 4, a0); + ret1 = vdotq_s32(ret1, b1 << 4, a0); + ret0 = vdotq_s32(ret0, b2 << 4, a1); + ret1 = vdotq_s32(ret1, b3 << 4, a1); + + ret0 = vdotq_s32(ret0, b0 & 0xf0U, a2); + ret1 = vdotq_s32(ret1, b1 & 0xf0U, a2); + ret0 = vdotq_s32(ret0, b2 & 0xf0U, a3); + ret1 = vdotq_s32(ret1, b3 & 0xf0U, a3); + + int32x4_t ret = vpaddq_s32(ret0, ret1); + + acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), + vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); + a_ptr++; + b_ptr++; + } + vst1q_f32(s, acc); + s += ncols_interleaved; + } + return; +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + ggml_gemv_q4_0_4x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); +} + +void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) +#if defined(__ARM_FEATURE_SVE) + if (ggml_cpu_get_sve_cnt() == QK8_0) { + const void * b_ptr = vx; + const void * a_ptr = vy; + float * res_ptr = s; + + __asm__ __volatile__( + "ptrue p0.b\n" + "add %x[b_ptr], %x[b_ptr], #0x10\n" + "1:" // Column loop + "add x22, %x[a_ptr], #0x2\n" + "mov z31.b, #0x0\n" + "mov x21, %x[nb]\n" + "2:" // Block loop + "ld1b { z30.b }, p0/Z, [%x[b_ptr]]\n" + "ld1b { z29.b }, p0/Z, [%x[b_ptr], #1, MUL VL]\n" + "mov z28.s, #0x0\n" + "mov z27.s, #0x0\n" + "ld1rd { z26.d }, p0/Z, [x22]\n" + "ld1b { z25.b }, p0/Z, [%x[b_ptr], #2, MUL VL]\n" + "sub x20, x22, #0x2\n" + "sub x21, x21, #0x1\n" + "ld1b { z24.b }, p0/Z, [%x[b_ptr], #3, MUL VL]\n" + "ld1rd { z23.d }, p0/Z, [x22, #8]\n" + "lsl z22.b, z30.b, #0x4\n" + "lsl z16.b, z29.b, #0x4\n" + "and z30.b, z30.b, #0xf0\n" + "and z29.b, z29.b, #0xf0\n" + "ld1rd { z21.d }, p0/Z, [x22, #16]\n" + "ld1rd { z20.d }, p0/Z, [x22, #24]\n" + "lsl z19.b, z25.b, #0x4\n" + "and z25.b, z25.b, #0xf0\n" + "ld1rh { z17.h }, p0/Z, [x20]\n" + "ld1h { z18.s }, p0/Z, [%x[b_ptr], #-1, MUL VL]\n" + "sdot z28.s, z22.b, z26.b\n" + "sdot z27.s, z16.b, z26.b\n" + "lsl z16.b, z24.b, #0x4\n" + "add x22, x22, #0x22\n" + "and z24.b, z24.b, #0xf0\n" + "add %x[b_ptr], %x[b_ptr], #0x90\n" + "fcvt z17.s, p0/m, z17.h\n" + "fcvt z18.s, p0/m, z18.h\n" + "sdot z28.s, z19.b, z23.b\n" + "sdot z27.s, z16.b, z23.b\n" + "fmul z18.s, z18.s, z17.s\n" + "sdot z28.s, z30.b, z21.b\n" + "sdot z27.s, z29.b, z21.b\n" + "sdot z28.s, z25.b, z20.b\n" + "sdot z27.s, z24.b, z20.b\n" + "uzp1 z17.s, z28.s, z27.s\n" + "uzp2 z16.s, z28.s, z27.s\n" + "add z17.s, z17.s, z16.s\n" + "asr z17.s, z17.s, #0x4\n" + "scvtf z17.s, p0/m, z17.s\n" + "fmla z31.s, p0/M, z17.s, z18.s\n" + "cbnz x21, 2b\n" + "sub %x[nc], %x[nc], #0x8\n" + "st1w { z31.s }, p0, [%x[res_ptr]]\n" + "add %x[res_ptr], %x[res_ptr], #0x20\n" + "cbnz %x[nc], 1b\n" + : [b_ptr] "+&r" (b_ptr), [res_ptr] "+&r" (res_ptr), [nc] "+&r" (nc) + : [a_ptr] "r" (a_ptr), [nb] "r" (nb) + : "memory", "p0", "x20", "x21", "x22", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" + ); + return; + } +#endif // #if defined(__ARM_FEATURE_SVE) + +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) + ggml_gemv_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); +} + +void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + float * res_ptr = s; + + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + + float32x4_t sumf = vdupq_n_f32(0); + for (int l = 0; l < nb; l++) { + uint8x16_t b_0 = vld1q_u8(b_ptr[l].qs + 0); + uint8x16_t b_1 = vld1q_u8(b_ptr[l].qs + 16); + uint8x16_t b_2 = vld1q_u8(b_ptr[l].qs + 32); + uint8x16_t b_3 = vld1q_u8(b_ptr[l].qs + 48); + + int8x16_t b_0_hi = vqtbl1q_s8(kvalues, b_0 >> 4); + int8x16_t b_0_lo = vqtbl1q_s8(kvalues, b_0 & 0x0F); + int8x16_t b_1_hi = vqtbl1q_s8(kvalues, b_1 >> 4); + int8x16_t b_1_lo = vqtbl1q_s8(kvalues, b_1 & 0x0F); + int8x16_t b_2_hi = vqtbl1q_s8(kvalues, b_2 >> 4); + int8x16_t b_2_lo = vqtbl1q_s8(kvalues, b_2 & 0x0F); + int8x16_t b_3_hi = vqtbl1q_s8(kvalues, b_3 >> 4); + int8x16_t b_3_lo = vqtbl1q_s8(kvalues, b_3 & 0x0F); + + int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 0); + int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16); + + int32x4_t sumi = vdupq_n_s32(0); + sumi = vdotq_laneq_s32(sumi, b_0_lo, a_0, 0); + sumi = vdotq_laneq_s32(sumi, b_0_hi, a_1, 0); + sumi = vdotq_laneq_s32(sumi, b_1_lo, a_0, 1); + sumi = vdotq_laneq_s32(sumi, b_1_hi, a_1, 1); + sumi = vdotq_laneq_s32(sumi, b_2_lo, a_0, 2); + sumi = vdotq_laneq_s32(sumi, b_2_hi, a_1, 2); + sumi = vdotq_laneq_s32(sumi, b_3_lo, a_0, 3); + sumi = vdotq_laneq_s32(sumi, b_3_hi, a_1, 3); + + float32x4_t a_d = vcvt_f32_f16(vld1_dup_f16((const float16_t *)&a_ptr[l].d)); + float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); + float32x4_t d = a_d * b_d; + + sumf = vmlaq_f32(sumf, d, vcvtq_f32_s32(sumi)); + } + + vst1q_f32(res_ptr + x * 4, sumf); + } + return; +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) + ggml_gemv_iq4_nl_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc); +} + +void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + const void * b_ptr = vx; + const void * a_ptr = vy; + float * res_ptr = s; + size_t res_stride = bs * sizeof(float); + + __asm__ __volatile__( + "mov x10, %x[nr]\n" + "mov x9, #0x88\n" + "cmp x10, #0x10\n" + "mul x9, %x[nb], x9\n" + "blt 4f\n" + "1:" // Row loop + "add x28, %x[b_ptr], #0x8\n" + "mov x27, %x[nc]\n" + "add x26, %x[res_ptr], %x[res_stride], LSL #4\n" + "2:" // Column loop + "add x25, %x[a_ptr], #0x8\n" + "movi v15.16b, #0x0\n" + "movi v19.16b, #0x0\n" + "mov x24, %x[nb]\n" + "add x23, x25, x9\n" + "movi v18.16b, #0x0\n" + "movi v14.16b, #0x0\n" + "add x22, x23, x9\n" + "movi v11.16b, #0x0\n" + "movi v13.16b, #0x0\n" + "add x21, x22, x9\n" + "movi v23.16b, #0x0\n" + "movi v16.16b, #0x0\n" + "movi v25.16b, #0x0\n" + "movi v7.16b, #0x0\n" + "movi v0.16b, #0x0\n" + "movi v4.16b, #0x0\n" + "movi v5.16b, #0x0\n" + "movi v21.16b, #0x0\n" + "movi v8.16b, #0x0\n" + "movi v1.16b, #0x0\n" + "3:" // Block loop + "ldr q3, [x28, #0x0]\n" + "ldr q31, [x25, #0x0]\n" + "movi v28.16b, #0x4\n" + "movi v10.4s, #0x0\n" + "ldr q22, [x28, #0x10]\n" + "ldr q6, [x25, #0x10]\n" + "movi v29.4s, #0x0\n" + "movi v9.4s, #0x0\n" + "ldr q27, [x28, #0x20]\n" + "ldr q30, [x28, #0x30]\n" + "movi v20.4s, #0x0\n" + "movi v24.16b, #0xf0\n" + "ldr d2, [x25, #-0x8]\n" + "ldr d26, [x23, #-0x8]\n" + "sshl v12.16b, v3.16b, v28.16b\n" + "sub x20, x28, #0x8\n" + "ldr d17, [x20, #0x0]\n" + "and v3.16b, v3.16b, v24.16b\n" + "subs x24, x24, #0x1\n" + "add x28, x28, #0x48\n" + ".inst 0x4f9fe18a // sdot v10.4s, v12.16b, v31.4b[0]\n" + ".inst 0x4fbfe19d // sdot v29.4s, v12.16b, v31.4b[1]\n" + ".inst 0x4f9fe989 // sdot v9.4s, v12.16b, v31.4b[2]\n" + ".inst 0x4fbfe994 // sdot v20.4s, v12.16b, v31.4b[3]\n" + "sshl v31.16b, v22.16b, v28.16b\n" + "and v22.16b, v22.16b, v24.16b\n" + "fcvtl v17.4s, v17.4h\n" + "fcvtl v2.4s, v2.4h\n" + "fcvtl v26.4s, v26.4h\n" + ".inst 0x4f86e3ea // sdot v10.4s, v31.16b, v6.4b[0]\n" + ".inst 0x4fa6e3fd // sdot v29.4s, v31.16b, v6.4b[1]\n" + ".inst 0x4f86ebe9 // sdot v9.4s, v31.16b, v6.4b[2]\n" + ".inst 0x4fa6ebf4 // sdot v20.4s, v31.16b, v6.4b[3]\n" + "sshl v6.16b, v27.16b, v28.16b\n" + "sshl v28.16b, v30.16b, v28.16b\n" + "and v27.16b, v27.16b, v24.16b\n" + "and v30.16b, v30.16b, v24.16b\n" + "ldr q24, [x25, #0x20]\n" + ".inst 0x4f98e0ca // sdot v10.4s, v6.16b, v24.4b[0]\n" + ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n" + ".inst 0x4f98e8c9 // sdot v9.4s, v6.16b, v24.4b[2]\n" + ".inst 0x4fb8e8d4 // sdot v20.4s, v6.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x30]\n" + ".inst 0x4f98e38a // sdot v10.4s, v28.16b, v24.4b[0]\n" + ".inst 0x4fb8e39d // sdot v29.4s, v28.16b, v24.4b[1]\n" + ".inst 0x4f98eb89 // sdot v9.4s, v28.16b, v24.4b[2]\n" + ".inst 0x4fb8eb94 // sdot v20.4s, v28.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x40]\n" + ".inst 0x4f98e06a // sdot v10.4s, v3.16b, v24.4b[0]\n" + ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n" + ".inst 0x4f98e869 // sdot v9.4s, v3.16b, v24.4b[2]\n" + ".inst 0x4fb8e874 // sdot v20.4s, v3.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x50]\n" + ".inst 0x4f98e2ca // sdot v10.4s, v22.16b, v24.4b[0]\n" + ".inst 0x4fb8e2dd // sdot v29.4s, v22.16b, v24.4b[1]\n" + ".inst 0x4f98eac9 // sdot v9.4s, v22.16b, v24.4b[2]\n" + ".inst 0x4fb8ead4 // sdot v20.4s, v22.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x60]\n" + ".inst 0x4f98e36a // sdot v10.4s, v27.16b, v24.4b[0]\n" + ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n" + ".inst 0x4f98eb69 // sdot v9.4s, v27.16b, v24.4b[2]\n" + ".inst 0x4fb8eb74 // sdot v20.4s, v27.16b, v24.4b[3]\n" + "ldr q24, [x25, #0x70]\n" + "add x25, x25, #0x88\n" + ".inst 0x4f98e3ca // sdot v10.4s, v30.16b, v24.4b[0]\n" + ".inst 0x4fb8e3dd // sdot v29.4s, v30.16b, v24.4b[1]\n" + ".inst 0x4f98ebc9 // sdot v9.4s, v30.16b, v24.4b[2]\n" + ".inst 0x4fb8ebd4 // sdot v20.4s, v30.16b, v24.4b[3]\n" + "fmul v24.4s, v17.4s, v2.s[0]\n" + "scvtf v10.4s, v10.4s, #0x4\n" + "scvtf v29.4s, v29.4s, #0x4\n" + "scvtf v9.4s, v9.4s, #0x4\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "fmla v15.4s, v10.4s, v24.4s\n" + "ldr q24, [x23, #0x0]\n" + "fmul v10.4s, v17.4s, v2.s[1]\n" + "fmla v19.4s, v29.4s, v10.4s\n" + "ldr q10, [x23, #0x10]\n" + "fmul v29.4s, v17.4s, v2.s[2]\n" + "fmul v2.4s, v17.4s, v2.s[3]\n" + "fmla v18.4s, v9.4s, v29.4s\n" + "movi v9.4s, #0x0\n" + "movi v29.4s, #0x0\n" + ".inst 0x4f98e189 // sdot v9.4s, v12.16b, v24.4b[0]\n" + ".inst 0x4fb8e19d // sdot v29.4s, v12.16b, v24.4b[1]\n" + "fmla v14.4s, v20.4s, v2.4s\n" + "movi v20.4s, #0x0\n" + "movi v2.4s, #0x0\n" + ".inst 0x4f98e994 // sdot v20.4s, v12.16b, v24.4b[2]\n" + ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n" + "ldr q24, [x23, #0x20]\n" + ".inst 0x4f8ae3e9 // sdot v9.4s, v31.16b, v10.4b[0]\n" + ".inst 0x4faae3fd // sdot v29.4s, v31.16b, v10.4b[1]\n" + ".inst 0x4f8aebf4 // sdot v20.4s, v31.16b, v10.4b[2]\n" + ".inst 0x4faaebe2 // sdot v2.4s, v31.16b, v10.4b[3]\n" + "ldr q10, [x23, #0x30]\n" + ".inst 0x4f98e0c9 // sdot v9.4s, v6.16b, v24.4b[0]\n" + ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n" + ".inst 0x4f98e8d4 // sdot v20.4s, v6.16b, v24.4b[2]\n" + ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n" + "ldr q24, [x23, #0x40]\n" + ".inst 0x4f8ae389 // sdot v9.4s, v28.16b, v10.4b[0]\n" + ".inst 0x4faae39d // sdot v29.4s, v28.16b, v10.4b[1]\n" + ".inst 0x4f8aeb94 // sdot v20.4s, v28.16b, v10.4b[2]\n" + ".inst 0x4faaeb82 // sdot v2.4s, v28.16b, v10.4b[3]\n" + "ldr q10, [x23, #0x50]\n" + ".inst 0x4f98e069 // sdot v9.4s, v3.16b, v24.4b[0]\n" + ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n" + ".inst 0x4f98e874 // sdot v20.4s, v3.16b, v24.4b[2]\n" + ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n" + "ldr q24, [x23, #0x60]\n" + ".inst 0x4f8ae2c9 // sdot v9.4s, v22.16b, v10.4b[0]\n" + ".inst 0x4faae2dd // sdot v29.4s, v22.16b, v10.4b[1]\n" + ".inst 0x4f8aead4 // sdot v20.4s, v22.16b, v10.4b[2]\n" + ".inst 0x4faaeac2 // sdot v2.4s, v22.16b, v10.4b[3]\n" + "ldr q10, [x23, #0x70]\n" + "add x23, x23, #0x88\n" + ".inst 0x4f98e369 // sdot v9.4s, v27.16b, v24.4b[0]\n" + ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n" + ".inst 0x4f98eb74 // sdot v20.4s, v27.16b, v24.4b[2]\n" + ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n" + "ldr q24, [x22, #0x0]\n" + ".inst 0x4f8ae3c9 // sdot v9.4s, v30.16b, v10.4b[0]\n" + ".inst 0x4faae3dd // sdot v29.4s, v30.16b, v10.4b[1]\n" + ".inst 0x4f8aebd4 // sdot v20.4s, v30.16b, v10.4b[2]\n" + ".inst 0x4faaebc2 // sdot v2.4s, v30.16b, v10.4b[3]\n" + "fmul v10.4s, v17.4s, v26.s[0]\n" + "scvtf v9.4s, v9.4s, #0x4\n" + "scvtf v29.4s, v29.4s, #0x4\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "scvtf v2.4s, v2.4s, #0x4\n" + "fmla v11.4s, v9.4s, v10.4s\n" + "ldr q9, [x22, #0x10]\n" + "fmul v10.4s, v17.4s, v26.s[1]\n" + "fmla v13.4s, v29.4s, v10.4s\n" + "ldr d29, [x22, #-0x8]\n" + "fmul v10.4s, v17.4s, v26.s[2]\n" + "fmul v26.4s, v17.4s, v26.s[3]\n" + "fcvtl v29.4s, v29.4h\n" + "fmla v23.4s, v20.4s, v10.4s\n" + "movi v20.4s, #0x0\n" + "movi v10.4s, #0x0\n" + "fmla v16.4s, v2.4s, v26.4s\n" + "movi v26.4s, #0x0\n" + "movi v2.4s, #0x0\n" + ".inst 0x4f98e194 // sdot v20.4s, v12.16b, v24.4b[0]\n" + ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n" + ".inst 0x4f98e99a // sdot v26.4s, v12.16b, v24.4b[2]\n" + ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n" + "ldr q24, [x22, #0x20]\n" + ".inst 0x4f89e3f4 // sdot v20.4s, v31.16b, v9.4b[0]\n" + ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n" + ".inst 0x4f89ebfa // sdot v26.4s, v31.16b, v9.4b[2]\n" + ".inst 0x4fa9ebe2 // sdot v2.4s, v31.16b, v9.4b[3]\n" + "ldr q9, [x22, #0x30]\n" + ".inst 0x4f98e0d4 // sdot v20.4s, v6.16b, v24.4b[0]\n" + ".inst 0x4fb8e0ca // sdot v10.4s, v6.16b, v24.4b[1]\n" + ".inst 0x4f98e8da // sdot v26.4s, v6.16b, v24.4b[2]\n" + ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n" + "ldr q24, [x22, #0x40]\n" + ".inst 0x4f89e394 // sdot v20.4s, v28.16b, v9.4b[0]\n" + ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n" + ".inst 0x4f89eb9a // sdot v26.4s, v28.16b, v9.4b[2]\n" + ".inst 0x4fa9eb82 // sdot v2.4s, v28.16b, v9.4b[3]\n" + "ldr q9, [x22, #0x50]\n" + ".inst 0x4f98e074 // sdot v20.4s, v3.16b, v24.4b[0]\n" + ".inst 0x4fb8e06a // sdot v10.4s, v3.16b, v24.4b[1]\n" + ".inst 0x4f98e87a // sdot v26.4s, v3.16b, v24.4b[2]\n" + ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n" + "ldr q24, [x22, #0x60]\n" + ".inst 0x4f89e2d4 // sdot v20.4s, v22.16b, v9.4b[0]\n" + ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n" + ".inst 0x4f89eada // sdot v26.4s, v22.16b, v9.4b[2]\n" + ".inst 0x4fa9eac2 // sdot v2.4s, v22.16b, v9.4b[3]\n" + "ldr q9, [x22, #0x70]\n" + "add x22, x22, #0x88\n" + ".inst 0x4f98e374 // sdot v20.4s, v27.16b, v24.4b[0]\n" + ".inst 0x4fb8e36a // sdot v10.4s, v27.16b, v24.4b[1]\n" + ".inst 0x4f98eb7a // sdot v26.4s, v27.16b, v24.4b[2]\n" + ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n" + "ldr q24, [x21, #0x0]\n" + ".inst 0x4f89e3d4 // sdot v20.4s, v30.16b, v9.4b[0]\n" + ".inst 0x4fa9e3ca // sdot v10.4s, v30.16b, v9.4b[1]\n" + ".inst 0x4f89ebda // sdot v26.4s, v30.16b, v9.4b[2]\n" + ".inst 0x4fa9ebc2 // sdot v2.4s, v30.16b, v9.4b[3]\n" + "fmul v9.4s, v17.4s, v29.s[0]\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "scvtf v10.4s, v10.4s, #0x4\n" + "scvtf v26.4s, v26.4s, #0x4\n" + "scvtf v2.4s, v2.4s, #0x4\n" + "fmla v25.4s, v20.4s, v9.4s\n" + "ldr q9, [x21, #0x10]\n" + "fmul v20.4s, v17.4s, v29.s[1]\n" + "fmla v7.4s, v10.4s, v20.4s\n" + "ldr d20, [x21, #-0x8]\n" + "fmul v10.4s, v17.4s, v29.s[2]\n" + "fmul v29.4s, v17.4s, v29.s[3]\n" + "fcvtl v20.4s, v20.4h\n" + "fmla v0.4s, v26.4s, v10.4s\n" + "movi v26.4s, #0x0\n" + "movi v10.4s, #0x0\n" + "fmla v4.4s, v2.4s, v29.4s\n" + "movi v2.4s, #0x0\n" + "movi v29.4s, #0x0\n" + ".inst 0x4f98e19a // sdot v26.4s, v12.16b, v24.4b[0]\n" + ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n" + ".inst 0x4f98e982 // sdot v2.4s, v12.16b, v24.4b[2]\n" + ".inst 0x4fb8e99d // sdot v29.4s, v12.16b, v24.4b[3]\n" + "ldr q12, [x21, #0x20]\n" + "fmul v24.4s, v17.4s, v20.s[0]\n" + ".inst 0x4f89e3fa // sdot v26.4s, v31.16b, v9.4b[0]\n" + ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n" + ".inst 0x4f89ebe2 // sdot v2.4s, v31.16b, v9.4b[2]\n" + ".inst 0x4fa9ebfd // sdot v29.4s, v31.16b, v9.4b[3]\n" + "ldr q9, [x21, #0x30]\n" + "fmul v31.4s, v17.4s, v20.s[1]\n" + ".inst 0x4f8ce0da // sdot v26.4s, v6.16b, v12.4b[0]\n" + ".inst 0x4face0ca // sdot v10.4s, v6.16b, v12.4b[1]\n" + ".inst 0x4f8ce8c2 // sdot v2.4s, v6.16b, v12.4b[2]\n" + ".inst 0x4face8dd // sdot v29.4s, v6.16b, v12.4b[3]\n" + "ldr q12, [x21, #0x40]\n" + "fmul v6.4s, v17.4s, v20.s[2]\n" + "fmul v20.4s, v17.4s, v20.s[3]\n" + ".inst 0x4f89e39a // sdot v26.4s, v28.16b, v9.4b[0]\n" + ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n" + ".inst 0x4f89eb82 // sdot v2.4s, v28.16b, v9.4b[2]\n" + ".inst 0x4fa9eb9d // sdot v29.4s, v28.16b, v9.4b[3]\n" + "ldr q9, [x21, #0x50]\n" + ".inst 0x4f8ce07a // sdot v26.4s, v3.16b, v12.4b[0]\n" + ".inst 0x4face06a // sdot v10.4s, v3.16b, v12.4b[1]\n" + ".inst 0x4f8ce862 // sdot v2.4s, v3.16b, v12.4b[2]\n" + ".inst 0x4face87d // sdot v29.4s, v3.16b, v12.4b[3]\n" + "ldr q12, [x21, #0x60]\n" + ".inst 0x4f89e2da // sdot v26.4s, v22.16b, v9.4b[0]\n" + ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n" + ".inst 0x4f89eac2 // sdot v2.4s, v22.16b, v9.4b[2]\n" + ".inst 0x4fa9eadd // sdot v29.4s, v22.16b, v9.4b[3]\n" + "ldr q17, [x21, #0x70]\n" + "add x21, x21, #0x88\n" + ".inst 0x4f8ce37a // sdot v26.4s, v27.16b, v12.4b[0]\n" + ".inst 0x4face36a // sdot v10.4s, v27.16b, v12.4b[1]\n" + ".inst 0x4f8ceb62 // sdot v2.4s, v27.16b, v12.4b[2]\n" + ".inst 0x4faceb7d // sdot v29.4s, v27.16b, v12.4b[3]\n" + ".inst 0x4f91e3da // sdot v26.4s, v30.16b, v17.4b[0]\n" + ".inst 0x4fb1e3ca // sdot v10.4s, v30.16b, v17.4b[1]\n" + ".inst 0x4f91ebc2 // sdot v2.4s, v30.16b, v17.4b[2]\n" + ".inst 0x4fb1ebdd // sdot v29.4s, v30.16b, v17.4b[3]\n" + "scvtf v26.4s, v26.4s, #0x4\n" + "scvtf v10.4s, v10.4s, #0x4\n" + "fmla v5.4s, v26.4s, v24.4s\n" + "scvtf v2.4s, v2.4s, #0x4\n" + "scvtf v29.4s, v29.4s, #0x4\n" + "fmla v21.4s, v10.4s, v31.4s\n" + "fmla v8.4s, v2.4s, v6.4s\n" + "fmla v1.4s, v29.4s, v20.4s\n" + "bgt 3b\n" + "mov x20, %x[res_ptr]\n" + "subs x27, x27, #0x4\n" + "add %x[res_ptr], %x[res_ptr], #0x10\n" + "str q15, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q19, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q18, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q14, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q11, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q13, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q23, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q16, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q25, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q7, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q0, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q4, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q5, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q21, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q8, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q1, [x20, #0x0]\n" + "bne 2b\n" + "mov x20, #0x4\n" + "sub x10, x10, #0x10\n" + "cmp x10, #0x10\n" + "mov %x[res_ptr], x26\n" + "madd %x[a_ptr], x20, x9, %x[a_ptr]\n" + "bge 1b\n" + "4:" // Row loop skip + "cbz x10, 9f\n" + "5:" // Row tail: Row loop + "add x24, %x[b_ptr], #0x8\n" + "mov x23, %x[nc]\n" + "add x22, %x[res_ptr], %x[res_stride], LSL #2\n" + "6:" // Row tail: Column loop + "movi v15.16b, #0x0\n" + "movi v19.16b, #0x0\n" + "add x25, %x[a_ptr], #0x8\n" + "mov x21, %x[nb]\n" + "movi v18.16b, #0x0\n" + "movi v14.16b, #0x0\n" + "7:" // Row tail: Block loop + "ldr q7, [x24, #0x0]\n" + "ldr q5, [x25, #0x0]\n" + "movi v9.16b, #0x4\n" + "movi v4.4s, #0x0\n" + "ldr q3, [x24, #0x10]\n" + "ldr q2, [x25, #0x10]\n" + "movi v1.4s, #0x0\n" + "movi v0.4s, #0x0\n" + "ldr q13, [x24, #0x20]\n" + "ldr q31, [x25, #0x20]\n" + "movi v30.4s, #0x0\n" + "movi v29.16b, #0xf0\n" + "ldr q28, [x24, #0x30]\n" + "ldr q27, [x25, #0x30]\n" + "sshl v20.16b, v7.16b, v9.16b\n" + "sub x20, x24, #0x8\n" + "ldr q26, [x25, #0x40]\n" + "ldr q25, [x25, #0x50]\n" + "sshl v17.16b, v3.16b, v9.16b\n" + "and v7.16b, v7.16b, v29.16b\n" + "ldr q24, [x25, #0x60]\n" + "ldr q16, [x25, #0x70]\n" + "sshl v22.16b, v13.16b, v9.16b\n" + "and v3.16b, v3.16b, v29.16b\n" + "ldr d21, [x20, #0x0]\n" + "ldr d12, [x25, #-0x8]\n" + ".inst 0x4f85e284 // sdot v4.4s, v20.16b, v5.4b[0]\n" + ".inst 0x4fa5e281 // sdot v1.4s, v20.16b, v5.4b[1]\n" + ".inst 0x4f85ea80 // sdot v0.4s, v20.16b, v5.4b[2]\n" + ".inst 0x4fa5ea9e // sdot v30.4s, v20.16b, v5.4b[3]\n" + "sshl v9.16b, v28.16b, v9.16b\n" + "subs x21, x21, #0x1\n" + "and v13.16b, v13.16b, v29.16b\n" + "and v28.16b, v28.16b, v29.16b\n" + "add x25, x25, #0x88\n" + "add x24, x24, #0x48\n" + "fcvtl v21.4s, v21.4h\n" + "fcvtl v12.4s, v12.4h\n" + ".inst 0x4f82e224 // sdot v4.4s, v17.16b, v2.4b[0]\n" + ".inst 0x4fa2e221 // sdot v1.4s, v17.16b, v2.4b[1]\n" + ".inst 0x4f82ea20 // sdot v0.4s, v17.16b, v2.4b[2]\n" + ".inst 0x4fa2ea3e // sdot v30.4s, v17.16b, v2.4b[3]\n" + "fmul v11.4s, v21.4s, v12.s[0]\n" + "fmul v23.4s, v21.4s, v12.s[1]\n" + "fmul v17.4s, v21.4s, v12.s[2]\n" + ".inst 0x4f9fe2c4 // sdot v4.4s, v22.16b, v31.4b[0]\n" + "fmul v6.4s, v21.4s, v12.s[3]\n" + ".inst 0x4fbfe2c1 // sdot v1.4s, v22.16b, v31.4b[1]\n" + ".inst 0x4f9feac0 // sdot v0.4s, v22.16b, v31.4b[2]\n" + ".inst 0x4fbfeade // sdot v30.4s, v22.16b, v31.4b[3]\n" + ".inst 0x4f9be124 // sdot v4.4s, v9.16b, v27.4b[0]\n" + ".inst 0x4fbbe121 // sdot v1.4s, v9.16b, v27.4b[1]\n" + ".inst 0x4f9be920 // sdot v0.4s, v9.16b, v27.4b[2]\n" + ".inst 0x4fbbe93e // sdot v30.4s, v9.16b, v27.4b[3]\n" + ".inst 0x4f9ae0e4 // sdot v4.4s, v7.16b, v26.4b[0]\n" + ".inst 0x4fbae0e1 // sdot v1.4s, v7.16b, v26.4b[1]\n" + ".inst 0x4f9ae8e0 // sdot v0.4s, v7.16b, v26.4b[2]\n" + ".inst 0x4fbae8fe // sdot v30.4s, v7.16b, v26.4b[3]\n" + ".inst 0x4f99e064 // sdot v4.4s, v3.16b, v25.4b[0]\n" + ".inst 0x4fb9e061 // sdot v1.4s, v3.16b, v25.4b[1]\n" + ".inst 0x4f99e860 // sdot v0.4s, v3.16b, v25.4b[2]\n" + ".inst 0x4fb9e87e // sdot v30.4s, v3.16b, v25.4b[3]\n" + ".inst 0x4f98e1a4 // sdot v4.4s, v13.16b, v24.4b[0]\n" + ".inst 0x4fb8e1a1 // sdot v1.4s, v13.16b, v24.4b[1]\n" + ".inst 0x4f98e9a0 // sdot v0.4s, v13.16b, v24.4b[2]\n" + ".inst 0x4fb8e9be // sdot v30.4s, v13.16b, v24.4b[3]\n" + ".inst 0x4f90e384 // sdot v4.4s, v28.16b, v16.4b[0]\n" + ".inst 0x4fb0e381 // sdot v1.4s, v28.16b, v16.4b[1]\n" + ".inst 0x4f90eb80 // sdot v0.4s, v28.16b, v16.4b[2]\n" + ".inst 0x4fb0eb9e // sdot v30.4s, v28.16b, v16.4b[3]\n" + "scvtf v4.4s, v4.4s, #0x4\n" + "scvtf v1.4s, v1.4s, #0x4\n" + "scvtf v0.4s, v0.4s, #0x4\n" + "fmla v15.4s, v4.4s, v11.4s\n" + "scvtf v30.4s, v30.4s, #0x4\n" + "fmla v19.4s, v1.4s, v23.4s\n" + "fmla v18.4s, v0.4s, v17.4s\n" + "fmla v14.4s, v30.4s, v6.4s\n" + "bgt 7b\n" + "mov x20, %x[res_ptr]\n" + "cmp x10, #0x1\n" + "str q15, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x10, #0x2\n" + "str q19, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x10, #0x3\n" + "str q18, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "str q14, [x20, #0x0]\n" + "8:" // Row tail: Accumulator store skip + "subs x23, x23, #0x4\n" + "add %x[res_ptr], %x[res_ptr], #0x10\n" + "bne 6b\n" + "subs x10, x10, #0x4\n" + "add %x[a_ptr], %x[a_ptr], x9\n" + "mov %x[res_ptr], x22\n" + "bgt 5b\n" + "9:" // Row tail: Row loop skip + : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) + : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + ); + return; +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) + ggml_gemm_q4_0_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc); +} + +void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) + const void * b_ptr = vx; + const void * a_ptr = vy; + float * res_ptr = s; + size_t res_stride = bs * sizeof(float); + + __asm__ __volatile__( + "mov x10, %x[nr]\n" + "mov x9, #0x88\n" + "cmp x10, #0x10\n" + "mul x9, %x[nb], x9\n" + "blt 4f\n" + "1:" // Row loop + "add x28, %x[b_ptr], #0x8\n" + "mov x27, %x[nc]\n" + "add x26, %x[res_ptr], %x[res_stride], LSL #4\n" + "2:" // Column loop + "add x25, %x[a_ptr], #0x8\n" + "movi v2.16b, #0x0\n" + "movi v10.16b, #0x0\n" + "mov x24, %x[nb]\n" + "add x23, x25, x9\n" + "movi v12.16b, #0x0\n" + "movi v28.16b, #0x0\n" + "add x22, x23, x9\n" + "movi v11.16b, #0x0\n" + "movi v13.16b, #0x0\n" + "add x21, x22, x9\n" + "movi v22.16b, #0x0\n" + "movi v23.16b, #0x0\n" + "movi v25.16b, #0x0\n" + "movi v5.16b, #0x0\n" + "movi v7.16b, #0x0\n" + "movi v4.16b, #0x0\n" + "movi v6.16b, #0x0\n" + "movi v30.16b, #0x0\n" + "movi v24.16b, #0x0\n" + "movi v14.16b, #0x0\n" + "3:" // Block loop + "ldr q21, [x28, #0x0]\n" + "ldr q16, [x28, #0x10]\n" + "movi v1.16b, #0x4\n" + "movi v19.4s, #0x0\n" + "ldr q27, [x25, #0x0]\n" + "ldr q15, [x25, #0x10]\n" + "movi v26.4s, #0x0\n" + "movi v18.4s, #0x0\n" + "ldr q29, [x28, #0x20]\n" + "ldr q3, [x28, #0x30]\n" + "movi v17.4s, #0x0\n" + "movi v0.16b, #0xf0\n" + "ldr d20, [x25, #-0x8]\n" + "ldr d9, [x23, #-0x8]\n" + "sshl v8.16b, v21.16b, v1.16b\n" + "sshl v31.16b, v16.16b, v1.16b\n" + "and v21.16b, v21.16b, v0.16b\n" + "and v16.16b, v16.16b, v0.16b\n" + "sub x20, x28, #0x8\n" + "subs x24, x24, #0x1\n" + "add x28, x28, #0x48\n" + ".inst 0x4e88a773 // smmla v19.4s, v27.16b, v8.16b\n" + ".inst 0x4e9fa77a // smmla v26.4s, v27.16b, v31.16b\n" + "ldr q27, [x25, #0x20]\n" + ".inst 0x4e88a5f2 // smmla v18.4s, v15.16b, v8.16b\n" + ".inst 0x4e9fa5f1 // smmla v17.4s, v15.16b, v31.16b\n" + "sshl v15.16b, v29.16b, v1.16b\n" + "sshl v1.16b, v3.16b, v1.16b\n" + "and v29.16b, v29.16b, v0.16b\n" + "and v3.16b, v3.16b, v0.16b\n" + "ldr q0, [x25, #0x30]\n" + "fcvtl v20.4s, v20.4h\n" + ".inst 0x4e8fa773 // smmla v19.4s, v27.16b, v15.16b\n" + "fcvtl v9.4s, v9.4h\n" + ".inst 0x4e81a77a // smmla v26.4s, v27.16b, v1.16b\n" + "ldr q27, [x25, #0x40]\n" + ".inst 0x4e8fa412 // smmla v18.4s, v0.16b, v15.16b\n" + ".inst 0x4e81a411 // smmla v17.4s, v0.16b, v1.16b\n" + "ldr q0, [x25, #0x50]\n" + ".inst 0x4e95a773 // smmla v19.4s, v27.16b, v21.16b\n" + ".inst 0x4e90a77a // smmla v26.4s, v27.16b, v16.16b\n" + "ldr q27, [x25, #0x60]\n" + ".inst 0x4e95a412 // smmla v18.4s, v0.16b, v21.16b\n" + ".inst 0x4e90a411 // smmla v17.4s, v0.16b, v16.16b\n" + "ldr q0, [x25, #0x70]\n" + "add x25, x25, #0x88\n" + ".inst 0x4e9da773 // smmla v19.4s, v27.16b, v29.16b\n" + ".inst 0x4e83a77a // smmla v26.4s, v27.16b, v3.16b\n" + "ldr d27, [x20, #0x0]\n" + ".inst 0x4e9da412 // smmla v18.4s, v0.16b, v29.16b\n" + ".inst 0x4e83a411 // smmla v17.4s, v0.16b, v3.16b\n" + "fcvtl v27.4s, v27.4h\n" + "uzp1 v0.2d, v19.2d, v26.2d\n" + "uzp2 v26.2d, v19.2d, v26.2d\n" + "fmul v19.4s, v27.4s, v20.s[0]\n" + "scvtf v0.4s, v0.4s, #0x4\n" + "scvtf v26.4s, v26.4s, #0x4\n" + "fmla v2.4s, v0.4s, v19.4s\n" + "ldr q19, [x23, #0x0]\n" + "uzp1 v0.2d, v18.2d, v17.2d\n" + "uzp2 v18.2d, v18.2d, v17.2d\n" + "fmul v17.4s, v27.4s, v20.s[1]\n" + "scvtf v0.4s, v0.4s, #0x4\n" + "scvtf v18.4s, v18.4s, #0x4\n" + "fmla v10.4s, v26.4s, v17.4s\n" + "ldr q17, [x23, #0x10]\n" + "fmul v26.4s, v27.4s, v20.s[2]\n" + "fmul v20.4s, v27.4s, v20.s[3]\n" + "fmla v12.4s, v0.4s, v26.4s\n" + "ldr d0, [x22, #-0x8]\n" + "ldr d26, [x21, #-0x8]\n" + "fcvtl v0.4s, v0.4h\n" + "fmla v28.4s, v18.4s, v20.4s\n" + "movi v20.4s, #0x0\n" + "movi v18.4s, #0x0\n" + ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n" + ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n" + "ldr q19, [x23, #0x20]\n" + "fcvtl v26.4s, v26.4h\n" + ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n" + ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n" + "ldr q19, [x23, #0x40]\n" + ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n" + ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n" + "ldr q19, [x23, #0x60]\n" + ".inst 0x4e9da674 // smmla v20.4s, v19.16b, v29.16b\n" + ".inst 0x4e83a672 // smmla v18.4s, v19.16b, v3.16b\n" + "uzp1 v19.2d, v20.2d, v18.2d\n" + "scvtf v19.4s, v19.4s, #0x4\n" + "uzp2 v20.2d, v20.2d, v18.2d\n" + "fmul v18.4s, v27.4s, v9.s[0]\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "fmla v11.4s, v19.4s, v18.4s\n" + "ldr q18, [x22, #0x0]\n" + "fmul v19.4s, v27.4s, v9.s[1]\n" + "fmla v13.4s, v20.4s, v19.4s\n" + "movi v19.4s, #0x0\n" + "movi v20.4s, #0x0\n" + ".inst 0x4e88a633 // smmla v19.4s, v17.16b, v8.16b\n" + ".inst 0x4e9fa634 // smmla v20.4s, v17.16b, v31.16b\n" + "ldr q17, [x23, #0x30]\n" + ".inst 0x4e8fa633 // smmla v19.4s, v17.16b, v15.16b\n" + ".inst 0x4e81a634 // smmla v20.4s, v17.16b, v1.16b\n" + "ldr q17, [x23, #0x50]\n" + ".inst 0x4e95a633 // smmla v19.4s, v17.16b, v21.16b\n" + ".inst 0x4e90a634 // smmla v20.4s, v17.16b, v16.16b\n" + "ldr q17, [x23, #0x70]\n" + "add x23, x23, #0x88\n" + ".inst 0x4e9da633 // smmla v19.4s, v17.16b, v29.16b\n" + ".inst 0x4e83a634 // smmla v20.4s, v17.16b, v3.16b\n" + "uzp1 v17.2d, v19.2d, v20.2d\n" + "scvtf v17.4s, v17.4s, #0x4\n" + "uzp2 v20.2d, v19.2d, v20.2d\n" + "fmul v19.4s, v27.4s, v9.s[2]\n" + "fmul v9.4s, v27.4s, v9.s[3]\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "fmla v22.4s, v17.4s, v19.4s\n" + "ldr q17, [x22, #0x10]\n" + "movi v19.4s, #0x0\n" + ".inst 0x4e88a653 // smmla v19.4s, v18.16b, v8.16b\n" + "fmla v23.4s, v20.4s, v9.4s\n" + "movi v20.4s, #0x0\n" + "movi v9.4s, #0x0\n" + ".inst 0x4e9fa654 // smmla v20.4s, v18.16b, v31.16b\n" + "ldr q18, [x22, #0x20]\n" + ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n" + ".inst 0x4e8fa653 // smmla v19.4s, v18.16b, v15.16b\n" + ".inst 0x4e81a654 // smmla v20.4s, v18.16b, v1.16b\n" + "ldr q18, [x22, #0x40]\n" + ".inst 0x4e95a653 // smmla v19.4s, v18.16b, v21.16b\n" + ".inst 0x4e90a654 // smmla v20.4s, v18.16b, v16.16b\n" + "ldr q18, [x22, #0x60]\n" + ".inst 0x4e9da653 // smmla v19.4s, v18.16b, v29.16b\n" + ".inst 0x4e83a654 // smmla v20.4s, v18.16b, v3.16b\n" + "movi v18.4s, #0x0\n" + ".inst 0x4e9fa632 // smmla v18.4s, v17.16b, v31.16b\n" + "ldr q17, [x22, #0x30]\n" + ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n" + ".inst 0x4e81a632 // smmla v18.4s, v17.16b, v1.16b\n" + "ldr q17, [x22, #0x50]\n" + ".inst 0x4e95a629 // smmla v9.4s, v17.16b, v21.16b\n" + ".inst 0x4e90a632 // smmla v18.4s, v17.16b, v16.16b\n" + "ldr q17, [x22, #0x70]\n" + "add x22, x22, #0x88\n" + ".inst 0x4e9da629 // smmla v9.4s, v17.16b, v29.16b\n" + ".inst 0x4e83a632 // smmla v18.4s, v17.16b, v3.16b\n" + "uzp1 v17.2d, v19.2d, v20.2d\n" + "uzp2 v20.2d, v19.2d, v20.2d\n" + "fmul v19.4s, v27.4s, v0.s[0]\n" + "scvtf v17.4s, v17.4s, #0x4\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "fmla v25.4s, v17.4s, v19.4s\n" + "ldr q19, [x21, #0x0]\n" + "fmul v17.4s, v27.4s, v0.s[1]\n" + "fmla v5.4s, v20.4s, v17.4s\n" + "ldr q17, [x21, #0x10]\n" + "uzp1 v20.2d, v9.2d, v18.2d\n" + "uzp2 v9.2d, v9.2d, v18.2d\n" + "fmul v18.4s, v27.4s, v0.s[2]\n" + "fmul v0.4s, v27.4s, v0.s[3]\n" + "scvtf v20.4s, v20.4s, #0x4\n" + "scvtf v9.4s, v9.4s, #0x4\n" + "fmla v7.4s, v20.4s, v18.4s\n" + "movi v20.4s, #0x0\n" + "movi v18.4s, #0x0\n" + ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n" + ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n" + "ldr q19, [x21, #0x20]\n" + "fmla v4.4s, v9.4s, v0.4s\n" + "movi v9.4s, #0x0\n" + "movi v0.4s, #0x0\n" + ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n" + "fmul v8.4s, v27.4s, v26.s[0]\n" + ".inst 0x4e9fa620 // smmla v0.4s, v17.16b, v31.16b\n" + "ldr q17, [x21, #0x30]\n" + ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n" + "fmul v31.4s, v27.4s, v26.s[1]\n" + ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n" + "ldr q19, [x21, #0x40]\n" + ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n" + "fmul v15.4s, v27.4s, v26.s[2]\n" + "fmul v27.4s, v27.4s, v26.s[3]\n" + ".inst 0x4e81a620 // smmla v0.4s, v17.16b, v1.16b\n" + "ldr q1, [x21, #0x50]\n" + ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n" + ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n" + "ldr q26, [x21, #0x60]\n" + ".inst 0x4e95a429 // smmla v9.4s, v1.16b, v21.16b\n" + ".inst 0x4e90a420 // smmla v0.4s, v1.16b, v16.16b\n" + "ldr q21, [x21, #0x70]\n" + "add x21, x21, #0x88\n" + ".inst 0x4e9da754 // smmla v20.4s, v26.16b, v29.16b\n" + ".inst 0x4e83a752 // smmla v18.4s, v26.16b, v3.16b\n" + ".inst 0x4e9da6a9 // smmla v9.4s, v21.16b, v29.16b\n" + ".inst 0x4e83a6a0 // smmla v0.4s, v21.16b, v3.16b\n" + "uzp1 v29.2d, v20.2d, v18.2d\n" + "uzp2 v21.2d, v20.2d, v18.2d\n" + "scvtf v29.4s, v29.4s, #0x4\n" + "uzp1 v18.2d, v9.2d, v0.2d\n" + "uzp2 v16.2d, v9.2d, v0.2d\n" + "scvtf v21.4s, v21.4s, #0x4\n" + "fmla v6.4s, v29.4s, v8.4s\n" + "scvtf v18.4s, v18.4s, #0x4\n" + "scvtf v16.4s, v16.4s, #0x4\n" + "fmla v30.4s, v21.4s, v31.4s\n" + "fmla v24.4s, v18.4s, v15.4s\n" + "fmla v14.4s, v16.4s, v27.4s\n" + "bgt 3b\n" + "mov x20, %x[res_ptr]\n" + "subs x27, x27, #0x4\n" + "add %x[res_ptr], %x[res_ptr], #0x10\n" + "str q2, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q10, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q12, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q28, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q11, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q13, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q22, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q23, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q25, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q5, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q7, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q4, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q6, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q30, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q24, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "str q14, [x20, #0x0]\n" + "bne 2b\n" + "mov x20, #0x4\n" + "sub x10, x10, #0x10\n" + "cmp x10, #0x10\n" + "mov %x[res_ptr], x26\n" + "madd %x[a_ptr], x20, x9, %x[a_ptr]\n" + "bge 1b\n" + "4:" // Row loop skip + "cbz x10, 9f\n" + "5:" // Row tail: Row loop + "add x24, %x[b_ptr], #0x8\n" + "mov x23, %x[nc]\n" + "add x22, %x[res_ptr], %x[res_stride], LSL #2\n" + "6:" // Row tail: Column loop + "movi v2.16b, #0x0\n" + "movi v10.16b, #0x0\n" + "add x25, %x[a_ptr], #0x8\n" + "mov x21, %x[nb]\n" + "movi v12.16b, #0x0\n" + "movi v28.16b, #0x0\n" + "7:" // Row tail: Block loop + "ldr q6, [x24, #0x0]\n" + "ldr q5, [x24, #0x10]\n" + "movi v17.16b, #0x4\n" + "movi v8.4s, #0x0\n" + "ldr q4, [x25, #0x0]\n" + "ldr q13, [x25, #0x10]\n" + "movi v27.4s, #0x0\n" + "movi v0.4s, #0x0\n" + "ldr q31, [x24, #0x20]\n" + "ldr q14, [x24, #0x30]\n" + "movi v29.4s, #0x0\n" + "movi v22.16b, #0xf0\n" + "ldr q11, [x25, #0x20]\n" + "ldr q23, [x25, #0x30]\n" + "sshl v21.16b, v6.16b, v17.16b\n" + "sshl v16.16b, v5.16b, v17.16b\n" + "ldr q20, [x25, #0x40]\n" + "ldr q26, [x25, #0x50]\n" + "and v6.16b, v6.16b, v22.16b\n" + "and v5.16b, v5.16b, v22.16b\n" + "ldr q25, [x25, #0x60]\n" + "ldr q3, [x25, #0x70]\n" + "sshl v19.16b, v31.16b, v17.16b\n" + "sshl v18.16b, v14.16b, v17.16b\n" + "ldr d17, [x25, #-0x8]\n" + ".inst 0x4e95a488 // smmla v8.4s, v4.16b, v21.16b\n" + ".inst 0x4e90a49b // smmla v27.4s, v4.16b, v16.16b\n" + "and v31.16b, v31.16b, v22.16b\n" + ".inst 0x4e95a5a0 // smmla v0.4s, v13.16b, v21.16b\n" + ".inst 0x4e90a5bd // smmla v29.4s, v13.16b, v16.16b\n" + "and v14.16b, v14.16b, v22.16b\n" + "sub x20, x24, #0x8\n" + "ldr d16, [x20, #0x0]\n" + "subs x21, x21, #0x1\n" + "add x25, x25, #0x88\n" + "fcvtl v17.4s, v17.4h\n" + "add x24, x24, #0x48\n" + ".inst 0x4e93a568 // smmla v8.4s, v11.16b, v19.16b\n" + ".inst 0x4e92a57b // smmla v27.4s, v11.16b, v18.16b\n" + ".inst 0x4e93a6e0 // smmla v0.4s, v23.16b, v19.16b\n" + ".inst 0x4e92a6fd // smmla v29.4s, v23.16b, v18.16b\n" + "fcvtl v16.4s, v16.4h\n" + ".inst 0x4e86a688 // smmla v8.4s, v20.16b, v6.16b\n" + ".inst 0x4e85a69b // smmla v27.4s, v20.16b, v5.16b\n" + "fmul v23.4s, v16.4s, v17.s[0]\n" + "fmul v21.4s, v16.4s, v17.s[1]\n" + "fmul v1.4s, v16.4s, v17.s[2]\n" + "fmul v20.4s, v16.4s, v17.s[3]\n" + ".inst 0x4e86a740 // smmla v0.4s, v26.16b, v6.16b\n" + ".inst 0x4e85a75d // smmla v29.4s, v26.16b, v5.16b\n" + ".inst 0x4e9fa728 // smmla v8.4s, v25.16b, v31.16b\n" + ".inst 0x4e8ea73b // smmla v27.4s, v25.16b, v14.16b\n" + ".inst 0x4e9fa460 // smmla v0.4s, v3.16b, v31.16b\n" + ".inst 0x4e8ea47d // smmla v29.4s, v3.16b, v14.16b\n" + "uzp1 v19.2d, v8.2d, v27.2d\n" + "uzp2 v18.2d, v8.2d, v27.2d\n" + "scvtf v19.4s, v19.4s, #0x4\n" + "uzp1 v17.2d, v0.2d, v29.2d\n" + "uzp2 v16.2d, v0.2d, v29.2d\n" + "scvtf v18.4s, v18.4s, #0x4\n" + "fmla v2.4s, v19.4s, v23.4s\n" + "scvtf v17.4s, v17.4s, #0x4\n" + "scvtf v16.4s, v16.4s, #0x4\n" + "fmla v10.4s, v18.4s, v21.4s\n" + "fmla v12.4s, v17.4s, v1.4s\n" + "fmla v28.4s, v16.4s, v20.4s\n" + "bgt 7b\n" + "mov x20, %x[res_ptr]\n" + "cmp x10, #0x1\n" + "str q2, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x10, #0x2\n" + "str q10, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x10, #0x3\n" + "str q12, [x20, #0x0]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "str q28, [x20, #0x0]\n" + "8:" // Row tail: Accumulator store skip + "subs x23, x23, #0x4\n" + "add %x[res_ptr], %x[res_ptr], #0x10\n" + "bne 6b\n" + "subs x10, x10, #0x4\n" + "add %x[a_ptr], %x[a_ptr], x9\n" + "mov %x[res_ptr], x22\n" + "bgt 5b\n" + "9:" // Row tail: Row loop skip + : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) + : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) + : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" + ); + return; +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) + ggml_gemm_q4_0_4x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); +} + +void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) +#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) + if (ggml_cpu_get_sve_cnt() == QK8_0) { + const void * b_ptr = vx; + const void * a_ptr = vy; + float * res_ptr = s; + size_t res_stride = bs * sizeof(float); + + __asm__ __volatile__( + "mov x20, #0x4\n" + "mov x13, %x[nr]\n" + "mov z28.s, #-0x4\n" + "mov x12, #0x88\n" + "ptrue p1.b\n" + "whilelt p0.s, XZR, x20\n" + "cmp x13, #0x10\n" + "mul x12, %x[nb], x12\n" + "blt 4f\n" + "1:" // Row loop + "add x11, %x[b_ptr], #0x10\n" + "mov x10, %x[nc]\n" + "add x9, %x[res_ptr], %x[res_stride], LSL #4\n" + "2:" // Column loop + "add x28, %x[a_ptr], #0x8\n" + "mov z24.b, #0x0\n" + "mov z15.b, #0x0\n" + "mov x27, %x[nb]\n" + "add x26, x28, x12\n" + "mov z12.b, #0x0\n" + "mov z0.b, #0x0\n" + "add x25, x26, x12\n" + "mov z13.b, #0x0\n" + "mov z1.b, #0x0\n" + "add x24, x25, x12\n" + "mov z20.b, #0x0\n" + "mov z25.b, #0x0\n" + "mov z11.b, #0x0\n" + "mov z16.b, #0x0\n" + "mov z19.b, #0x0\n" + "mov z26.b, #0x0\n" + "mov z8.b, #0x0\n" + "mov z29.b, #0x0\n" + "mov z27.b, #0x0\n" + "mov z10.b, #0x0\n" + "3:" // Block loop + "ld1b { z30.b }, p1/Z, [x11]\n" + "ld1b { z21.b }, p1/Z, [x11, #1, MUL VL]\n" + "mov z18.s, #0x0\n" + "mov z7.s, #0x0\n" + "ld1rqb { z3.b }, p1/Z, [x28]\n" + "ld1rqb { z5.b }, p1/Z, [x28, #16]\n" + "mov z9.s, #0x0\n" + "mov z22.s, #0x0\n" + "ld1b { z4.b }, p1/Z, [x11, #2, MUL VL]\n" + "ld1b { z17.b }, p1/Z, [x11, #3, MUL VL]\n" + "sub x20, x11, #0x10\n" + "sub x23, x28, #0x8\n" + "lsl z31.b, z30.b, #0x4\n" + "lsl z6.b, z21.b, #0x4\n" + "ld1h { z23.s }, p1/Z, [x20]\n" + "sub x22, x26, #0x8\n" + "and z30.b, z30.b, #0xf0\n" + "and z21.b, z21.b, #0xf0\n" + "sub x21, x25, #0x8\n" + "sub x20, x24, #0x8\n" + "lsl z14.b, z4.b, #0x4\n" + "lsl z2.b, z17.b, #0x4\n" + "subs x27, x27, #0x1\n" + "add x11, x11, #0x90\n" + ".inst 0x451f9872 // smmla z18.s, z3.b, z31.b\n" + ".inst 0x45069867 // smmla z7.s, z3.b, z6.b\n" + "ld1rqb { z3.b }, p1/Z, [x28, #32]\n" + "and z4.b, z4.b, #0xf0\n" + ".inst 0x451f98a9 // smmla z9.s, z5.b, z31.b\n" + ".inst 0x450698b6 // smmla z22.s, z5.b, z6.b\n" + "ld1rqb { z5.b }, p1/Z, [x28, #48]\n" + "and z17.b, z17.b, #0xf0\n" + "fcvt z23.s, p1/m, z23.h\n" + ".inst 0x450e9872 // smmla z18.s, z3.b, z14.b\n" + ".inst 0x45029867 // smmla z7.s, z3.b, z2.b\n" + "ld1rqb { z3.b }, p1/Z, [x28, #64]\n" + ".inst 0x450e98a9 // smmla z9.s, z5.b, z14.b\n" + ".inst 0x450298b6 // smmla z22.s, z5.b, z2.b\n" + "ld1rqb { z5.b }, p1/Z, [x28, #80]\n" + "fscale z23.s, p1/m, z23.s, z28.s\n" + ".inst 0x451e9872 // smmla z18.s, z3.b, z30.b\n" + ".inst 0x45159867 // smmla z7.s, z3.b, z21.b\n" + "ld1rqb { z3.b }, p1/Z, [x28, #96]\n" + ".inst 0x451e98a9 // smmla z9.s, z5.b, z30.b\n" + ".inst 0x451598b6 // smmla z22.s, z5.b, z21.b\n" + "ld1rqb { z5.b }, p1/Z, [x28, #112]\n" + "add x28, x28, #0x88\n" + ".inst 0x45049872 // smmla z18.s, z3.b, z4.b\n" + ".inst 0x45119867 // smmla z7.s, z3.b, z17.b\n" + "ld1h { z3.s }, p0/Z, [x23]\n" + ".inst 0x450498a9 // smmla z9.s, z5.b, z4.b\n" + ".inst 0x451198b6 // smmla z22.s, z5.b, z17.b\n" + "fcvt z3.s, p1/m, z3.h\n" + "uzp1 z5.d, z18.d, z7.d\n" + "uzp2 z18.d, z18.d, z7.d\n" + "mov z3.q, z3.q[0]\n" + "uzp1 z7.d, z9.d, z22.d\n" + "uzp2 z22.d, z9.d, z22.d\n" + "fmul z9.s, z23.s, z3.s[0]\n" + "scvtf z5.s, p1/m, z5.s\n" + "scvtf z18.s, p1/m, z18.s\n" + "scvtf z7.s, p1/m, z7.s\n" + "scvtf z22.s, p1/m, z22.s\n" + "fmla z24.s, p1/M, z5.s, z9.s\n" + "ld1rqb { z5.b }, p1/Z, [x26]\n" + "fmul z9.s, z23.s, z3.s[1]\n" + "fmla z15.s, p1/M, z18.s, z9.s\n" + "ld1rqb { z18.b }, p1/Z, [x26, #16]\n" + "fmul z9.s, z23.s, z3.s[2]\n" + "fmul z3.s, z23.s, z3.s[3]\n" + "fmla z12.s, p1/M, z7.s, z9.s\n" + "mov z9.s, #0x0\n" + "ld1h { z7.s }, p0/Z, [x22]\n" + ".inst 0x451f98a9 // smmla z9.s, z5.b, z31.b\n" + "fmla z0.s, p1/M, z22.s, z3.s\n" + "mov z22.s, #0x0\n" + "ld1h { z3.s }, p0/Z, [x21]\n" + ".inst 0x450698b6 // smmla z22.s, z5.b, z6.b\n" + "ld1rqb { z5.b }, p1/Z, [x26, #32]\n" + "fcvt z7.s, p1/m, z7.h\n" + "fcvt z3.s, p1/m, z3.h\n" + ".inst 0x450e98a9 // smmla z9.s, z5.b, z14.b\n" + ".inst 0x450298b6 // smmla z22.s, z5.b, z2.b\n" + "ld1rqb { z5.b }, p1/Z, [x26, #64]\n" + "mov z7.q, z7.q[0]\n" + "mov z3.q, z3.q[0]\n" + ".inst 0x451e98a9 // smmla z9.s, z5.b, z30.b\n" + ".inst 0x451598b6 // smmla z22.s, z5.b, z21.b\n" + "ld1rqb { z5.b }, p1/Z, [x26, #96]\n" + ".inst 0x450498a9 // smmla z9.s, z5.b, z4.b\n" + ".inst 0x451198b6 // smmla z22.s, z5.b, z17.b\n" + "uzp1 z5.d, z9.d, z22.d\n" + "scvtf z5.s, p1/m, z5.s\n" + "uzp2 z22.d, z9.d, z22.d\n" + "fmul z9.s, z23.s, z7.s[0]\n" + "scvtf z22.s, p1/m, z22.s\n" + "fmla z13.s, p1/M, z5.s, z9.s\n" + "ld1rqb { z9.b }, p1/Z, [x25]\n" + "fmul z5.s, z23.s, z7.s[1]\n" + "fmla z1.s, p1/M, z22.s, z5.s\n" + "mov z5.s, #0x0\n" + "mov z22.s, #0x0\n" + ".inst 0x451f9a45 // smmla z5.s, z18.b, z31.b\n" + ".inst 0x45069a56 // smmla z22.s, z18.b, z6.b\n" + "ld1rqb { z18.b }, p1/Z, [x26, #48]\n" + ".inst 0x450e9a45 // smmla z5.s, z18.b, z14.b\n" + ".inst 0x45029a56 // smmla z22.s, z18.b, z2.b\n" + "ld1rqb { z18.b }, p1/Z, [x26, #80]\n" + ".inst 0x451e9a45 // smmla z5.s, z18.b, z30.b\n" + ".inst 0x45159a56 // smmla z22.s, z18.b, z21.b\n" + "ld1rqb { z18.b }, p1/Z, [x26, #112]\n" + "add x26, x26, #0x88\n" + ".inst 0x45049a45 // smmla z5.s, z18.b, z4.b\n" + ".inst 0x45119a56 // smmla z22.s, z18.b, z17.b\n" + "uzp1 z18.d, z5.d, z22.d\n" + "scvtf z18.s, p1/m, z18.s\n" + "uzp2 z22.d, z5.d, z22.d\n" + "fmul z5.s, z23.s, z7.s[2]\n" + "fmul z7.s, z23.s, z7.s[3]\n" + "scvtf z22.s, p1/m, z22.s\n" + "fmla z20.s, p1/M, z18.s, z5.s\n" + "ld1rqb { z18.b }, p1/Z, [x25, #16]\n" + "ld1h { z5.s }, p0/Z, [x20]\n" + "fcvt z5.s, p1/m, z5.h\n" + "fmla z25.s, p1/M, z22.s, z7.s\n" + "mov z22.s, #0x0\n" + "mov z7.s, #0x0\n" + ".inst 0x451f9936 // smmla z22.s, z9.b, z31.b\n" + ".inst 0x45069927 // smmla z7.s, z9.b, z6.b\n" + "ld1rqb { z9.b }, p1/Z, [x25, #32]\n" + "mov z5.q, z5.q[0]\n" + ".inst 0x450e9936 // smmla z22.s, z9.b, z14.b\n" + ".inst 0x45029927 // smmla z7.s, z9.b, z2.b\n" + "ld1rqb { z9.b }, p1/Z, [x25, #64]\n" + ".inst 0x451e9936 // smmla z22.s, z9.b, z30.b\n" + ".inst 0x45159927 // smmla z7.s, z9.b, z21.b\n" + "ld1rqb { z9.b }, p1/Z, [x25, #96]\n" + ".inst 0x45049936 // smmla z22.s, z9.b, z4.b\n" + ".inst 0x45119927 // smmla z7.s, z9.b, z17.b\n" + "uzp1 z9.d, z22.d, z7.d\n" + "scvtf z9.s, p1/m, z9.s\n" + "uzp2 z22.d, z22.d, z7.d\n" + "fmul z7.s, z23.s, z3.s[0]\n" + "scvtf z22.s, p1/m, z22.s\n" + "fmla z11.s, p1/M, z9.s, z7.s\n" + "ld1rqb { z9.b }, p1/Z, [x24]\n" + "fmul z7.s, z23.s, z3.s[1]\n" + "fmla z16.s, p1/M, z22.s, z7.s\n" + "mov z22.s, #0x0\n" + "mov z7.s, #0x0\n" + ".inst 0x451f9a56 // smmla z22.s, z18.b, z31.b\n" + ".inst 0x45069a47 // smmla z7.s, z18.b, z6.b\n" + "ld1rqb { z18.b }, p1/Z, [x25, #48]\n" + ".inst 0x450e9a56 // smmla z22.s, z18.b, z14.b\n" + ".inst 0x45029a47 // smmla z7.s, z18.b, z2.b\n" + "ld1rqb { z18.b }, p1/Z, [x25, #80]\n" + ".inst 0x451e9a56 // smmla z22.s, z18.b, z30.b\n" + ".inst 0x45159a47 // smmla z7.s, z18.b, z21.b\n" + "ld1rqb { z18.b }, p1/Z, [x25, #112]\n" + "add x25, x25, #0x88\n" + ".inst 0x45049a56 // smmla z22.s, z18.b, z4.b\n" + ".inst 0x45119a47 // smmla z7.s, z18.b, z17.b\n" + "uzp1 z18.d, z22.d, z7.d\n" + "scvtf z18.s, p1/m, z18.s\n" + "uzp2 z7.d, z22.d, z7.d\n" + "fmul z22.s, z23.s, z3.s[2]\n" + "fmul z3.s, z23.s, z3.s[3]\n" + "scvtf z7.s, p1/m, z7.s\n" + "fmla z19.s, p1/M, z18.s, z22.s\n" + "ld1rqb { z18.b }, p1/Z, [x24, #16]\n" + "fmul z22.s, z23.s, z5.s[0]\n" + "fmla z26.s, p1/M, z7.s, z3.s\n" + "mov z3.s, #0x0\n" + "mov z7.s, #0x0\n" + ".inst 0x451f9923 // smmla z3.s, z9.b, z31.b\n" + ".inst 0x45069927 // smmla z7.s, z9.b, z6.b\n" + "ld1rqb { z9.b }, p1/Z, [x24, #32]\n" + ".inst 0x450e9923 // smmla z3.s, z9.b, z14.b\n" + ".inst 0x45029927 // smmla z7.s, z9.b, z2.b\n" + "mov z9.s, #0x0\n" + ".inst 0x451f9a49 // smmla z9.s, z18.b, z31.b\n" + "mov z31.s, #0x0\n" + ".inst 0x45069a5f // smmla z31.s, z18.b, z6.b\n" + "ld1rqb { z6.b }, p1/Z, [x24, #48]\n" + "ld1rqb { z18.b }, p1/Z, [x24, #64]\n" + ".inst 0x450e98c9 // smmla z9.s, z6.b, z14.b\n" + "fmul z14.s, z23.s, z5.s[1]\n" + ".inst 0x450298df // smmla z31.s, z6.b, z2.b\n" + "ld1rqb { z6.b }, p1/Z, [x24, #80]\n" + "fmul z2.s, z23.s, z5.s[2]\n" + "fmul z23.s, z23.s, z5.s[3]\n" + ".inst 0x451e9a43 // smmla z3.s, z18.b, z30.b\n" + ".inst 0x45159a47 // smmla z7.s, z18.b, z21.b\n" + "ld1rqb { z5.b }, p1/Z, [x24, #96]\n" + ".inst 0x451e98c9 // smmla z9.s, z6.b, z30.b\n" + ".inst 0x451598df // smmla z31.s, z6.b, z21.b\n" + "ld1rqb { z18.b }, p1/Z, [x24, #112]\n" + "add x24, x24, #0x88\n" + ".inst 0x450498a3 // smmla z3.s, z5.b, z4.b\n" + ".inst 0x451198a7 // smmla z7.s, z5.b, z17.b\n" + ".inst 0x45049a49 // smmla z9.s, z18.b, z4.b\n" + ".inst 0x45119a5f // smmla z31.s, z18.b, z17.b\n" + "uzp1 z18.d, z3.d, z7.d\n" + "uzp2 z5.d, z3.d, z7.d\n" + "scvtf z18.s, p1/m, z18.s\n" + "uzp1 z6.d, z9.d, z31.d\n" + "uzp2 z9.d, z9.d, z31.d\n" + "scvtf z5.s, p1/m, z5.s\n" + "fmla z8.s, p1/M, z18.s, z22.s\n" + "scvtf z6.s, p1/m, z6.s\n" + "scvtf z9.s, p1/m, z9.s\n" + "fmla z29.s, p1/M, z5.s, z14.s\n" + "fmla z27.s, p1/M, z6.s, z2.s\n" + "fmla z10.s, p1/M, z9.s, z23.s\n" + "bgt 3b\n" + "mov x20, %x[res_ptr]\n" + "subs x10, x10, #0x8\n" + "add %x[res_ptr], %x[res_ptr], #0x20\n" + "st1w { z24.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z15.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z12.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z0.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z13.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z1.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z20.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z25.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z11.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z16.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z19.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z26.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z8.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z29.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z27.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "st1w { z10.s }, p1, [x20]\n" + "bne 2b\n" + "mov x20, #0x4\n" + "sub x13, x13, #0x10\n" + "cmp x13, #0x10\n" + "mov %x[res_ptr], x9\n" + "madd %x[a_ptr], x20, x12, %x[a_ptr]\n" + "bge 1b\n" + "4:" // Row loop skip + "cbz x13, 9f\n" + "5:" // Row tail: Row loop + "add x25, %x[b_ptr], #0x10\n" + "mov x24, %x[nc]\n" + "add x23, %x[res_ptr], %x[res_stride], LSL #2\n" + "6:" // Row tail: Column loop + "mov z24.b, #0x0\n" + "mov z15.b, #0x0\n" + "add x28, %x[a_ptr], #0x8\n" + "mov x22, %x[nb]\n" + "mov z12.b, #0x0\n" + "mov z0.b, #0x0\n" + "7:" // Row tail: Block loop + "ld1b { z3.b }, p1/Z, [x25]\n" + "ld1b { z6.b }, p1/Z, [x25, #1, MUL VL]\n" + "mov z2.s, #0x0\n" + "mov z25.s, #0x0\n" + "ld1rqb { z26.b }, p1/Z, [x28]\n" + "ld1rqb { z21.b }, p1/Z, [x28, #16]\n" + "mov z27.s, #0x0\n" + "mov z19.s, #0x0\n" + "ld1b { z29.b }, p1/Z, [x25, #2, MUL VL]\n" + "ld1b { z16.b }, p1/Z, [x25, #3, MUL VL]\n" + "sub x21, x25, #0x10\n" + "sub x20, x28, #0x8\n" + "lsl z20.b, z3.b, #0x4\n" + "lsl z4.b, z6.b, #0x4\n" + "ld1rqb { z10.b }, p1/Z, [x28, #32]\n" + "ld1rqb { z23.b }, p1/Z, [x28, #48]\n" + "and z3.b, z3.b, #0xf0\n" + "and z6.b, z6.b, #0xf0\n" + "ld1rqb { z11.b }, p1/Z, [x28, #64]\n" + "ld1rqb { z7.b }, p1/Z, [x28, #80]\n" + "lsl z8.b, z29.b, #0x4\n" + "lsl z14.b, z16.b, #0x4\n" + "ld1rqb { z18.b }, p1/Z, [x28, #96]\n" + "ld1rqb { z30.b }, p1/Z, [x28, #112]\n" + ".inst 0x45149b42 // smmla z2.s, z26.b, z20.b\n" + ".inst 0x45049b59 // smmla z25.s, z26.b, z4.b\n" + "and z29.b, z29.b, #0xf0\n" + "ld1h { z17.s }, p1/Z, [x21]\n" + ".inst 0x45149abb // smmla z27.s, z21.b, z20.b\n" + ".inst 0x45049ab3 // smmla z19.s, z21.b, z4.b\n" + "and z16.b, z16.b, #0xf0\n" + "ld1h { z4.s }, p0/Z, [x20]\n" + "subs x22, x22, #0x1\n" + "add x28, x28, #0x88\n" + "fcvt z17.s, p1/m, z17.h\n" + "add x25, x25, #0x90\n" + ".inst 0x45089942 // smmla z2.s, z10.b, z8.b\n" + ".inst 0x450e9959 // smmla z25.s, z10.b, z14.b\n" + "fcvt z4.s, p1/m, z4.h\n" + ".inst 0x45089afb // smmla z27.s, z23.b, z8.b\n" + ".inst 0x450e9af3 // smmla z19.s, z23.b, z14.b\n" + "fscale z17.s, p1/m, z17.s, z28.s\n" + "mov z4.q, z4.q[0]\n" + ".inst 0x45039962 // smmla z2.s, z11.b, z3.b\n" + ".inst 0x45069979 // smmla z25.s, z11.b, z6.b\n" + "fmul z23.s, z17.s, z4.s[0]\n" + "fmul z9.s, z17.s, z4.s[1]\n" + "fmul z21.s, z17.s, z4.s[2]\n" + "fmul z4.s, z17.s, z4.s[3]\n" + ".inst 0x450398fb // smmla z27.s, z7.b, z3.b\n" + ".inst 0x450698f3 // smmla z19.s, z7.b, z6.b\n" + ".inst 0x451d9a42 // smmla z2.s, z18.b, z29.b\n" + ".inst 0x45109a59 // smmla z25.s, z18.b, z16.b\n" + ".inst 0x451d9bdb // smmla z27.s, z30.b, z29.b\n" + ".inst 0x45109bd3 // smmla z19.s, z30.b, z16.b\n" + "uzp1 z31.d, z2.d, z25.d\n" + "uzp2 z13.d, z2.d, z25.d\n" + "scvtf z31.s, p1/m, z31.s\n" + "uzp1 z17.d, z27.d, z19.d\n" + "uzp2 z18.d, z27.d, z19.d\n" + "scvtf z13.s, p1/m, z13.s\n" + "fmla z24.s, p1/M, z31.s, z23.s\n" + "scvtf z17.s, p1/m, z17.s\n" + "scvtf z18.s, p1/m, z18.s\n" + "fmla z15.s, p1/M, z13.s, z9.s\n" + "fmla z12.s, p1/M, z17.s, z21.s\n" + "fmla z0.s, p1/M, z18.s, z4.s\n" + "bgt 7b\n" + "mov x20, %x[res_ptr]\n" + "cmp x13, #0x1\n" + "st1w { z24.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x13, #0x2\n" + "st1w { z15.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "cmp x13, #0x3\n" + "st1w { z12.s }, p1, [x20]\n" + "add x20, x20, %x[res_stride]\n" + "ble 8f\n" + "st1w { z0.s }, p1, [x20]\n" + "8:" // Row tail: Accumulator store skip + "subs x24, x24, #0x8\n" + "add %x[res_ptr], %x[res_ptr], #0x20\n" + "bne 6b\n" + "subs x13, x13, #0x4\n" + "add %x[a_ptr], %x[a_ptr], x12\n" + "mov %x[res_ptr], x23\n" + "bgt 5b\n" + "9:" // Row tail: Row loop skip + : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) + : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) + : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" + ); + return; + } +#endif // #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) + +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) + ggml_gemm_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); +} + +void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + + float32x4_t sumf[4]; + for (int m = 0; m < 4; m++) { + sumf[m] = vdupq_n_f32(0); + } + + for (int l = 0; l < nb; l++) { + float32x4_t a_d = vcvt_f32_f16(vld1_f16((const float16_t *)a_ptr[l].d)); + float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); + + int32x4_t sumi_0 = vdupq_n_s32(0); + int32x4_t sumi_1 = vdupq_n_s32(0); + int32x4_t sumi_2 = vdupq_n_s32(0); + int32x4_t sumi_3 = vdupq_n_s32(0); + + for (int k = 0; k < 4; k++) { + int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 16 * k + 0); + int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16 * k + 64); + + uint8x16_t b = vld1q_u8(b_ptr[l].qs + 16 * k); + int8x16_t b_hi = vqtbl1q_s8(kvalues, b >> 4); + int8x16_t b_lo = vqtbl1q_s8(kvalues, b & 0xF); + + sumi_0 = vdotq_laneq_s32(sumi_0, b_lo, a_0, 0); + sumi_1 = vdotq_laneq_s32(sumi_1, b_lo, a_0, 1); + sumi_2 = vdotq_laneq_s32(sumi_2, b_lo, a_0, 2); + sumi_3 = vdotq_laneq_s32(sumi_3, b_lo, a_0, 3); + sumi_0 = vdotq_laneq_s32(sumi_0, b_hi, a_1, 0); + sumi_1 = vdotq_laneq_s32(sumi_1, b_hi, a_1, 1); + sumi_2 = vdotq_laneq_s32(sumi_2, b_hi, a_1, 2); + sumi_3 = vdotq_laneq_s32(sumi_3, b_hi, a_1, 3); + } + + sumf[0] = vmlaq_f32(sumf[0], vmulq_laneq_f32(b_d, a_d, 0), vcvtq_f32_s32(sumi_0)); + sumf[1] = vmlaq_f32(sumf[1], vmulq_laneq_f32(b_d, a_d, 1), vcvtq_f32_s32(sumi_1)); + sumf[2] = vmlaq_f32(sumf[2], vmulq_laneq_f32(b_d, a_d, 2), vcvtq_f32_s32(sumi_2)); + sumf[3] = vmlaq_f32(sumf[3], vmulq_laneq_f32(b_d, a_d, 3), vcvtq_f32_s32(sumi_3)); + } + + for (int m = 0; m < 4; m++) { + vst1q_f32(s + (y * 4 + m) * bs + x * 4, sumf[m]); + } + } + } + return; +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) + ggml_gemm_iq4_nl_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc); +} diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/cpu-feats-x86.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/arch/x86/cpu-feats.cpp similarity index 100% rename from ml/backend/ggml/ggml/src/ggml-cpu/cpu-feats-x86.cpp rename to ml/backend/ggml/ggml/src/ggml-cpu/arch/x86/cpu-feats.cpp diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/arch/x86/quants.c b/ml/backend/ggml/ggml/src/ggml-cpu/arch/x86/quants.c new file mode 100644 index 000000000..cb49320a6 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/arch/x86/quants.c @@ -0,0 +1,3820 @@ +#define GGML_COMMON_IMPL_C +#include "ggml-common.h" +#include "ggml-quants.h" +#include "ggml-impl.h" +#include "ggml-cpu.h" +#include "simd-mappings.h" + +#include "../../quants.h" +#include "../../ggml-cpu-impl.h" + +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GROUP_MAX_EPS 1e-15f +#define GROUP_MAX_EPS_IQ3_XXS 1e-8f +#define GROUP_MAX_EPS_IQ2_S 1e-8f +#define GROUP_MAX_EPS_IQ1_M 1e-7f +#define GROUP_MAX_EPS_IQ1_S 1e-12f + +#define UNUSED GGML_UNUSED + +// some compilers don't provide _mm256_set_m128i, e.g. gcc 7 +#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1) + +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) +// multiply int8_t, add results pairwise twice +static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) { + // Get absolute values of x vectors + const __m128i ax = _mm_sign_epi8(x, x); + // Sign the values of the y vectors + const __m128i sy = _mm_sign_epi8(y, x); + // Perform multiplication and create 16-bit values + const __m128i dot = _mm_maddubs_epi16(ax, sy); + const __m128i ones = _mm_set1_epi16(1); + return _mm_madd_epi16(ones, dot); +} + +#if __AVX__ || __AVX2__ || __AVX512F__ +// horizontally add 8 floats +static inline float hsum_float_8(const __m256 x) { + __m128 res = _mm256_extractf128_ps(x, 1); + res = _mm_add_ps(res, _mm256_castps256_ps128(x)); + res = _mm_add_ps(res, _mm_movehl_ps(res, res)); + res = _mm_add_ss(res, _mm_movehdup_ps(res)); + return _mm_cvtss_f32(res); +} + +// horizontally add 8 int32_t +static inline int hsum_i32_8(const __m256i a) { + const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)); + const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128); + const __m128i sum64 = _mm_add_epi32(hi64, sum128); + const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); + return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); +} + +// horizontally add 4 int32_t +static inline int hsum_i32_4(const __m128i a) { + const __m128i hi64 = _mm_unpackhi_epi64(a, a); + const __m128i sum64 = _mm_add_epi32(hi64, a); + const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); + return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); +} + +#if defined(__AVX2__) || defined(__AVX512F__) +static inline __m256i mul_add_epi8(const __m256i x, const __m256i y) { + const __m256i ax = _mm256_sign_epi8(x, x); + const __m256i sy = _mm256_sign_epi8(y, x); + return _mm256_maddubs_epi16(ax, sy); +} + +// spread 32 bits to 32 bytes { 0x00, 0xFF } +static inline __m256i bytes_from_bits_32(const uint8_t * x) { + uint32_t x32; + memcpy(&x32, x, sizeof(uint32_t)); + const __m256i shuf_mask = _mm256_set_epi64x( + 0x0303030303030303, 0x0202020202020202, + 0x0101010101010101, 0x0000000000000000); + __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask); + const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe); + bytes = _mm256_or_si256(bytes, bit_mask); + return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1)); +} + +// Unpack 32 4-bit fields into 32 bytes +// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval +static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) +{ + const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi); + const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp); + const __m256i lowMask = _mm256_set1_epi8( 0xF ); + return _mm256_and_si256(lowMask, bytes); +} + +// add int16_t pairwise and return as float vector +static inline __m256 sum_i16_pairs_float(const __m256i x) { + const __m256i ones = _mm256_set1_epi16(1); + const __m256i summed_pairs = _mm256_madd_epi16(ones, x); + return _mm256_cvtepi32_ps(summed_pairs); +} + +static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { +#if defined(__AVX512VNNI__) && defined(__AVX512VL__) + const __m256i zero = _mm256_setzero_si256(); + const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy); + return _mm256_cvtepi32_ps(summed_pairs); +#elif defined(__AVXVNNI__) + const __m256i zero = _mm256_setzero_si256(); + const __m256i summed_pairs = _mm256_dpbusd_avx_epi32(zero, ax, sy); + return _mm256_cvtepi32_ps(summed_pairs); +#else + // Perform multiplication and create 16-bit values + const __m256i dot = _mm256_maddubs_epi16(ax, sy); + return sum_i16_pairs_float(dot); +#endif +} + +// multiply int8_t, add results pairwise twice and return as float vector +static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { +#if __AVXVNNIINT8__ + const __m256i zero = _mm256_setzero_si256(); + const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y); + return _mm256_cvtepi32_ps(summed_pairs); +#else + // Get absolute values of x vectors + const __m256i ax = _mm256_sign_epi8(x, x); + // Sign the values of the y vectors + const __m256i sy = _mm256_sign_epi8(y, x); + return mul_sum_us8_pairs_float(ax, sy); +#endif +} + +static inline __m128i packNibbles( __m256i bytes ) +{ + // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh +#if __AVX512F__ + const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000 + bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh + return _mm256_cvtepi16_epi8(bytes); // abcd_efgh +#else + const __m256i lowByte = _mm256_set1_epi16( 0xFF ); + __m256i high = _mm256_andnot_si256( lowByte, bytes ); + __m256i low = _mm256_and_si256( lowByte, bytes ); + high = _mm256_srli_epi16( high, 4 ); + bytes = _mm256_or_si256( low, high ); + + // Compress uint16_t lanes into bytes + __m128i r0 = _mm256_castsi256_si128( bytes ); + __m128i r1 = _mm256_extracti128_si256( bytes, 1 ); + return _mm_packus_epi16( r0, r1 ); +#endif +} +#elif defined(__AVX__) +static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 ) +{ + // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh + const __m128i lowByte = _mm_set1_epi16( 0xFF ); + __m128i high = _mm_andnot_si128( lowByte, bytes1 ); + __m128i low = _mm_and_si128( lowByte, bytes1 ); + high = _mm_srli_epi16( high, 4 ); + bytes1 = _mm_or_si128( low, high ); + high = _mm_andnot_si128( lowByte, bytes2 ); + low = _mm_and_si128( lowByte, bytes2 ); + high = _mm_srli_epi16( high, 4 ); + bytes2 = _mm_or_si128( low, high ); + + return _mm_packus_epi16( bytes1, bytes2); +} + +static inline __m128i mul_add_epi8_sse(const __m128i x, const __m128i y) { + const __m128i ax = _mm_sign_epi8(x, x); + const __m128i sy = _mm_sign_epi8(y, x); + return _mm_maddubs_epi16(ax, sy); +} + +// spread 32 bits to 32 bytes { 0x00, 0xFF } +static inline __m256i bytes_from_bits_32(const uint8_t * x) { + uint32_t x32; + memcpy(&x32, x, sizeof(uint32_t)); + const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000); + const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202); + __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl); + __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh); + const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe); + bytesl = _mm_or_si128(bytesl, bit_mask); + bytesh = _mm_or_si128(bytesh, bit_mask); + bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1)); + bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1)); + return MM256_SET_M128I(bytesh, bytesl); +} + +// Unpack 32 4-bit fields into 32 bytes +// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval +static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) +{ + // Load 16 bytes from memory + __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi); + __m128i tmph = _mm_srli_epi16(tmpl, 4); + const __m128i lowMask = _mm_set1_epi8(0xF); + tmpl = _mm_and_si128(lowMask, tmpl); + tmph = _mm_and_si128(lowMask, tmph); + return MM256_SET_M128I(tmph, tmpl); +} + +// add int16_t pairwise and return as float vector +static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) { + const __m128i ones = _mm_set1_epi16(1); + const __m128i summed_pairsl = _mm_madd_epi16(ones, xl); + const __m128i summed_pairsh = _mm_madd_epi16(ones, xh); + const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl); + return _mm256_cvtepi32_ps(summed_pairs); +} + +static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { + const __m128i axl = _mm256_castsi256_si128(ax); + const __m128i axh = _mm256_extractf128_si256(ax, 1); + const __m128i syl = _mm256_castsi256_si128(sy); + const __m128i syh = _mm256_extractf128_si256(sy, 1); + // Perform multiplication and create 16-bit values + const __m128i dotl = _mm_maddubs_epi16(axl, syl); + const __m128i doth = _mm_maddubs_epi16(axh, syh); + return sum_i16_pairs_float(doth, dotl); +} + +// multiply int8_t, add results pairwise twice and return as float vector +static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { + const __m128i xl = _mm256_castsi256_si128(x); + const __m128i xh = _mm256_extractf128_si256(x, 1); + const __m128i yl = _mm256_castsi256_si128(y); + const __m128i yh = _mm256_extractf128_si256(y, 1); + // Get absolute values of x vectors + const __m128i axl = _mm_sign_epi8(xl, xl); + const __m128i axh = _mm_sign_epi8(xh, xh); + // Sign the values of the y vectors + const __m128i syl = _mm_sign_epi8(yl, xl); + const __m128i syh = _mm_sign_epi8(yh, xh); + // Perform multiplication and create 16-bit values + const __m128i dotl = _mm_maddubs_epi16(axl, syl); + const __m128i doth = _mm_maddubs_epi16(axh, syh); + return sum_i16_pairs_float(doth, dotl); +} + +// larger version of mul_sum_i8_pairs_float where x and y are each represented by four 128-bit vectors +static inline __m256 mul_sum_i8_quad_float(const __m128i x_1_0, const __m128i x_1_1, const __m128i x_2_0, const __m128i x_2_1, + const __m128i y_1_0, const __m128i y_1_1, const __m128i y_2_0, const __m128i y_2_1) { + const __m128i mone = _mm_set1_epi16(1); + + const __m128i p16_1_0 = mul_add_epi8_sse(x_1_0, y_1_0); + const __m128i p16_1_1 = mul_add_epi8_sse(x_1_1, y_1_1); + const __m128i p16_2_0 = mul_add_epi8_sse(x_2_0, y_2_0); + const __m128i p16_2_1 = mul_add_epi8_sse(x_2_1, y_2_1); + const __m128i p_1_0 = _mm_madd_epi16(p16_1_0, mone); + const __m128i p_1_1 = _mm_madd_epi16(p16_1_1, mone); + const __m128i p_2_0 = _mm_madd_epi16(p16_2_0, mone); + const __m128i p_2_1 = _mm_madd_epi16(p16_2_1, mone); + const __m128i p_1 = _mm_add_epi32(p_1_0, p_1_1); + const __m128i p_2 = _mm_add_epi32(p_2_0, p_2_1); + return _mm256_cvtepi32_ps(MM256_SET_M128I(p_2, p_1)); +} + +// quad fp16 delta calculation +static inline __m256 quad_fp16_delta_float(const float x0, const float y0, const float x1, const float y1) { + // GGML_CPU_FP16_TO_FP32 is faster than Intel F16C + return _mm256_set_m128(_mm_set1_ps(GGML_CPU_FP16_TO_FP32(x1) * GGML_CPU_FP16_TO_FP32(y1)), + _mm_set1_ps(GGML_CPU_FP16_TO_FP32(x0) * GGML_CPU_FP16_TO_FP32(y0))); +} + +static inline __m256 quad_mx_delta_float(const int8_t x0, const float y0, const int8_t x1, const float y1) { + return _mm256_set_m128(_mm_set1_ps(GGML_E8M0_TO_FP32_HALF(x1) * GGML_CPU_FP16_TO_FP32(y1)), + _mm_set1_ps(GGML_E8M0_TO_FP32_HALF(x0) * GGML_CPU_FP16_TO_FP32(y0))); +} +#endif +#elif defined(__SSSE3__) +// horizontally add 4x4 floats +static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) { + __m128 res_0 =_mm_hadd_ps(a, b); + __m128 res_1 =_mm_hadd_ps(c, d); + __m128 res =_mm_hadd_ps(res_0, res_1); + res =_mm_hadd_ps(res, res); + res =_mm_hadd_ps(res, res); + + return _mm_cvtss_f32(res); +} +#endif // __AVX__ || __AVX2__ || __AVX512F__ +#endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) + +void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__AVX2__) || defined(__AVX__) + for (int i = 0; i < nb; i++) { + // Load elements into 4 AVX vectors + __m256 v0 = _mm256_loadu_ps( x ); + __m256 v1 = _mm256_loadu_ps( x + 8 ); + __m256 v2 = _mm256_loadu_ps( x + 16 ); + __m256 v3 = _mm256_loadu_ps( x + 24 ); + x += 32; + + // Compute max(abs(e)) for the block + const __m256 signBit = _mm256_set1_ps( -0.0f ); + __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); + + __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); + max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); + max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); + const float maxScalar = _mm_cvtss_f32( max4 ); + + // Quantize these floats + const float d = maxScalar / 127.f; + y[i].d = GGML_CPU_FP32_TO_FP16(d); + const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; + const __m256 mul = _mm256_set1_ps( id ); + + // Apply the multiplier + v0 = _mm256_mul_ps( v0, mul ); + v1 = _mm256_mul_ps( v1, mul ); + v2 = _mm256_mul_ps( v2, mul ); + v3 = _mm256_mul_ps( v3, mul ); + + // Round to nearest integer + v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); + v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); + v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); + v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); + + // Convert floats to integers + __m256i i0 = _mm256_cvtps_epi32( v0 ); + __m256i i1 = _mm256_cvtps_epi32( v1 ); + __m256i i2 = _mm256_cvtps_epi32( v2 ); + __m256i i3 = _mm256_cvtps_epi32( v3 ); + +#if defined(__AVX2__) + // Convert int32 to int16 + i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 + i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 + // Convert int16 to int8 + i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 + + // We got our precious signed bytes, but the order is now wrong + // These AVX2 pack instructions process 16-byte pieces independently + // The following instruction is fixing the order + const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); + i0 = _mm256_permutevar8x32_epi32( i0, perm ); + + _mm256_storeu_si256((__m256i *)y[i].qs, i0); +#else + // Since we don't have in AVX some necessary functions, + // we split the registers in half and call AVX2 analogs from SSE + __m128i ni0 = _mm256_castsi256_si128( i0 ); + __m128i ni1 = _mm256_extractf128_si256( i0, 1); + __m128i ni2 = _mm256_castsi256_si128( i1 ); + __m128i ni3 = _mm256_extractf128_si256( i1, 1); + __m128i ni4 = _mm256_castsi256_si128( i2 ); + __m128i ni5 = _mm256_extractf128_si256( i2, 1); + __m128i ni6 = _mm256_castsi256_si128( i3 ); + __m128i ni7 = _mm256_extractf128_si256( i3, 1); + + // Convert int32 to int16 + ni0 = _mm_packs_epi32( ni0, ni1 ); + ni2 = _mm_packs_epi32( ni2, ni3 ); + ni4 = _mm_packs_epi32( ni4, ni5 ); + ni6 = _mm_packs_epi32( ni6, ni7 ); + // Convert int16 to int8 + ni0 = _mm_packs_epi16( ni0, ni2 ); + ni4 = _mm_packs_epi16( ni4, ni6 ); + + _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); + _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); +#endif + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_0_ref(x, y, k); +#endif +} + +void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK8_1 == 0); + const int nb = k / QK8_1; + + block_q8_1 * GGML_RESTRICT y = vy; +#if defined(__AVX2__) || defined(__AVX__) + for (int i = 0; i < nb; i++) { + // Load elements into 4 AVX vectors + __m256 v0 = _mm256_loadu_ps( x ); + __m256 v1 = _mm256_loadu_ps( x + 8 ); + __m256 v2 = _mm256_loadu_ps( x + 16 ); + __m256 v3 = _mm256_loadu_ps( x + 24 ); + x += 32; + + // Compute max(abs(e)) for the block + const __m256 signBit = _mm256_set1_ps( -0.0f ); + __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); + + __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); + max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); + max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); + const float max_scalar = _mm_cvtss_f32( max4 ); + + // Quantize these floats + const float d = max_scalar / 127.f; + y[i].d = GGML_CPU_FP32_TO_FP16(d); + const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; + const __m256 mul = _mm256_set1_ps( id ); + + // Apply the multiplier + v0 = _mm256_mul_ps( v0, mul ); + v1 = _mm256_mul_ps( v1, mul ); + v2 = _mm256_mul_ps( v2, mul ); + v3 = _mm256_mul_ps( v3, mul ); + + // Round to nearest integer + v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); + v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); + v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); + v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); + + // Convert floats to integers + __m256i i0 = _mm256_cvtps_epi32( v0 ); + __m256i i1 = _mm256_cvtps_epi32( v1 ); + __m256i i2 = _mm256_cvtps_epi32( v2 ); + __m256i i3 = _mm256_cvtps_epi32( v3 ); + +#if defined(__AVX2__) + // Compute the sum of the quants and set y[i].s + y[i].s = GGML_CPU_FP32_TO_FP16(d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)))); + + // Convert int32 to int16 + i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 + i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 + // Convert int16 to int8 + i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 + + // We got our precious signed bytes, but the order is now wrong + // These AVX2 pack instructions process 16-byte pieces independently + // The following instruction is fixing the order + const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); + i0 = _mm256_permutevar8x32_epi32( i0, perm ); + + _mm256_storeu_si256((__m256i *)y[i].qs, i0); +#else + // Since we don't have in AVX some necessary functions, + // we split the registers in half and call AVX2 analogs from SSE + __m128i ni0 = _mm256_castsi256_si128( i0 ); + __m128i ni1 = _mm256_extractf128_si256( i0, 1); + __m128i ni2 = _mm256_castsi256_si128( i1 ); + __m128i ni3 = _mm256_extractf128_si256( i1, 1); + __m128i ni4 = _mm256_castsi256_si128( i2 ); + __m128i ni5 = _mm256_extractf128_si256( i2, 1); + __m128i ni6 = _mm256_castsi256_si128( i3 ); + __m128i ni7 = _mm256_extractf128_si256( i3, 1); + + // Compute the sum of the quants and set y[i].s + const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3)); + const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7)); + y[i].s = GGML_CPU_FP32_TO_FP16(d * hsum_i32_4(_mm_add_epi32(s0, s1))); + + // Convert int32 to int16 + ni0 = _mm_packs_epi32( ni0, ni1 ); + ni2 = _mm_packs_epi32( ni2, ni3 ); + ni4 = _mm_packs_epi32( ni4, ni5 ); + ni6 = _mm_packs_epi32( ni6, ni7 ); + // Convert int16 to int8 + ni0 = _mm_packs_epi16( ni0, ni2 ); + ni4 = _mm_packs_epi16( ni4, ni6 ); + + _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); + _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); +#endif + } +#else + GGML_UNUSED(nb); + // scalar + quantize_row_q8_1_ref(x, y, k); +#endif +} + +// placeholder implementation for Apple targets +void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q8_K_ref(x, y, k); +} + +//===================================== Dot products ================================= + +// +// Helper functions +// + +#if __AVX__ || __AVX2__ || __AVX512F__ + +// shuffles to pick the required scales in dot products +static inline __m256i get_scale_shuffle_q3k(int i) { + static const uint8_t k_shuffle[128] = { + 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, + 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, + 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, + 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15, + }; + return _mm256_loadu_si256((const __m256i*)k_shuffle + i); +} +static inline __m256i get_scale_shuffle_k4(int i) { + static const uint8_t k_shuffle[256] = { + 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, + 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, + 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, + 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, + 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, + 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, + 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, + 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15 + }; + return _mm256_loadu_si256((const __m256i*)k_shuffle + i); +} +static inline __m128i get_scale_shuffle(int i) { + static const uint8_t k_shuffle[128] = { + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, + 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, + 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11, + 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13, + 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15 + }; + return _mm_loadu_si128((const __m128i*)k_shuffle + i); +} +#endif + +void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__AVX2__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + // Main loop + for (; ib < nb; ++ib) { + /* Compute combined scale for the block */ + const __m256 d = _mm256_set1_ps( GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d) ); + + __m256i qx = bytes_from_nibbles_32(x[ib].qs); + + // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. + const __m256i off = _mm256_set1_epi8( 8 ); + qx = _mm256_sub_epi8( qx, off ); + + __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); + + const __m256 q = mul_sum_i8_pairs_float(qx, qy); + + /* Multiply q with scale and accumulate */ + acc = _mm256_fmadd_ps( d, q, acc ); + } + + sumf = hsum_float_8(acc); +#elif defined(__AVX__) + __m256 accum = _mm256_setzero_ps(); + for (; ib + 1 < nb; ib += 2) { + const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)x[ib + 0].qs); + const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); + const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs); + const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs + 1); + const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); + const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); + + const __m128i q4b_1_0 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), q4bits_1), _mm_set1_epi8(8)); + const __m128i q4b_1_1 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(q4bits_1, 4)), _mm_set1_epi8(8)); + const __m128i q4b_2_0 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), q4bits_2), _mm_set1_epi8(8)); + const __m128i q4b_2_1 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(q4bits_2, 4)), _mm_set1_epi8(8)); + + const __m128i p16_1_0 = mul_add_epi8_sse(q4b_1_0, q8b_1_0); + const __m128i p16_1_1 = mul_add_epi8_sse(q4b_1_1, q8b_1_1); + const __m128i p16_2_0 = mul_add_epi8_sse(q4b_2_0, q8b_2_0); + const __m128i p16_2_1 = mul_add_epi8_sse(q4b_2_1, q8b_2_1); + const __m128i p_1 = _mm_add_epi16(p16_1_0, p16_1_1); + const __m128i p_2 = _mm_add_epi16(p16_2_0, p16_2_1); + const __m256 p = sum_i16_pairs_float(p_2, p_1); + + const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); + accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); + } + + sumf = hsum_float_8(accum); +#elif defined(__SSSE3__) + // set constants + const __m128i lowMask = _mm_set1_epi8(0xF); + const __m128i off = _mm_set1_epi8(8); + + // Initialize accumulator with zeros + __m128 acc_0 = _mm_setzero_ps(); + __m128 acc_1 = _mm_setzero_ps(); + __m128 acc_2 = _mm_setzero_ps(); + __m128 acc_3 = _mm_setzero_ps(); + + for (; ib + 1 < nb; ib += 2) { + _mm_prefetch(&x[ib] + sizeof(block_q4_0), _MM_HINT_T0); + _mm_prefetch(&y[ib] + sizeof(block_q8_0), _MM_HINT_T0); + + // Compute combined scale for the block 0 and 1 + const __m128 d_0_1 = _mm_set1_ps( GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d) ); + + const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[ib].qs); + + __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1); + __m128i by_0 = _mm_loadu_si128((const __m128i *)y[ib].qs); + bx_0 = _mm_sub_epi8(bx_0, off); + const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); + + __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4)); + __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[ib].qs + 16)); + bx_1 = _mm_sub_epi8(bx_1, off); + const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); + + _mm_prefetch(&x[ib] + 2 * sizeof(block_q4_0), _MM_HINT_T0); + _mm_prefetch(&y[ib] + 2 * sizeof(block_q8_0), _MM_HINT_T0); + + // Compute combined scale for the block 2 and 3 + const __m128 d_2_3 = _mm_set1_ps( GGML_CPU_FP16_TO_FP32(x[ib + 1].d) * GGML_CPU_FP16_TO_FP32(y[ib + 1].d) ); + + const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); + + __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3); + __m128i by_2 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); + bx_2 = _mm_sub_epi8(bx_2, off); + const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); + + __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4)); + __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[ib + 1].qs + 16)); + bx_3 = _mm_sub_epi8(bx_3, off); + const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); + + // Convert int32_t to float + __m128 p0 = _mm_cvtepi32_ps(i32_0); + __m128 p1 = _mm_cvtepi32_ps(i32_1); + __m128 p2 = _mm_cvtepi32_ps(i32_2); + __m128 p3 = _mm_cvtepi32_ps(i32_3); + + // Apply the scale + __m128 p0_d = _mm_mul_ps( d_0_1, p0 ); + __m128 p1_d = _mm_mul_ps( d_0_1, p1 ); + __m128 p2_d = _mm_mul_ps( d_2_3, p2 ); + __m128 p3_d = _mm_mul_ps( d_2_3, p3 ); + + // Acummulate + acc_0 = _mm_add_ps(p0_d, acc_0); + acc_1 = _mm_add_ps(p1_d, acc_1); + acc_2 = _mm_add_ps(p2_d, acc_2); + acc_3 = _mm_add_ps(p3_d, acc_3); + } + + sumf = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3); + +#endif + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F) - 8; + const int v1 = (x[ib].qs[j] >> 4) - 8; + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); + } + + *s = sumf; +} + +void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + + int ib = 0; + +#if defined(__AVX2__) || defined(__AVX__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + float summs = 0; + + // Main loop + for (; ib < nb; ++ib) { + const float d0 = GGML_CPU_FP16_TO_FP32(x[ib].d); + const float d1 = GGML_CPU_FP16_TO_FP32(y[ib].d); + + summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); + + const __m256 d0v = _mm256_set1_ps( d0 ); + const __m256 d1v = _mm256_set1_ps( d1 ); + + // Compute combined scales + const __m256 d0d1 = _mm256_mul_ps( d0v, d1v ); + + // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes + const __m256i qx = bytes_from_nibbles_32(x[ib].qs); + const __m256i qy = _mm256_loadu_si256( (const __m256i *)y[ib].qs ); + + const __m256 xy = mul_sum_us8_pairs_float(qx, qy); + + // Accumulate d0*d1*x*y +#if defined(__AVX2__) + acc = _mm256_fmadd_ps( d0d1, xy, acc ); +#else + acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc ); +#endif + } + + *s = hsum_float_8(acc) + summs; +#else + UNUSED(nb); + UNUSED(x); + UNUSED(y); + UNUSED(ib); + ggml_vec_dot_q4_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_mxfp4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK_MXFP4 == 0); + static_assert(QK_MXFP4 == QK8_0, "QK_MXFP4 and QK8_0 must be the same"); + + const block_mxfp4 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + const int nb = n / QK_MXFP4; + + int ib = 0; + float sumf = 0; + +#if defined __AVX2__ + + const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_mxfp4); + const __m128i m4b = _mm_set1_epi8(0x0f); + const __m256i mone = _mm256_set1_epi16(1); + + __m256 accum1 = _mm256_setzero_ps(); + __m256 accum2 = _mm256_setzero_ps(); + for (; ib + 1 < nb; ib += 2) { + const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)x[ib + 0].qs); + const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)x[ib + 1].qs); + const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)y[ib + 0].qs); + const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)y[ib + 1].qs); + const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)), + _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b))); + const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)), + _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b))); + const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); + const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); + const __m256i p_1 = _mm256_madd_epi16(p16_1, mone); + const __m256i p_2 = _mm256_madd_epi16(p16_2, mone); + accum1 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib + 0].d)*GGML_E8M0_TO_FP32_HALF(x[ib + 0].e)), + _mm256_cvtepi32_ps(p_1), accum1); + accum2 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib + 1].d)*GGML_E8M0_TO_FP32_HALF(x[ib + 1].e)), + _mm256_cvtepi32_ps(p_2), accum2); + } + + sumf = hsum_float_8(_mm256_add_ps(accum1, accum2)); + +#elif defined __AVX__ + const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_mxfp4); + const __m128i m4b = _mm_set1_epi8(0x0f); + + __m256 accum = _mm256_setzero_ps(); + for (; ib + 1 < nb; ib += 2) { + const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)x[ib + 0].qs); + const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); + const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs); + const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs + 1); + const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); + const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); + + const __m128i q4b_1_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)); + const __m128i q4b_1_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)); + const __m128i q4b_2_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)); + const __m128i q4b_2_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)); + + const __m256 p = mul_sum_i8_quad_float(q4b_1_0, q4b_1_1, q4b_2_0, q4b_2_1, q8b_1_0, q8b_1_1, q8b_2_0, q8b_2_1); + const __m256 deltas = quad_mx_delta_float(x[ib].e, y[ib].d, x[ib + 1].e, y[ib + 1].d); + accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); + } + + sumf = hsum_float_8(accum); + +#endif + for (; ib < nb; ++ib) { + const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_E8M0_TO_FP32_HALF(x[ib].e); + int sumi1 = 0; + int sumi2 = 0; + for (int j = 0; j < QK_MXFP4/2; ++j) { + sumi1 += y[ib].qs[j + 0] * kvalues_mxfp4[x[ib].qs[j] & 0xf]; + sumi2 += y[ib].qs[j + QK_MXFP4/2] * kvalues_mxfp4[x[ib].qs[j] >> 4]; + } + sumf += d * (sumi1 + sumi2); + } + *s = sumf; +} + +void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + int ib = 0; + + assert(n % qk == 0); + assert(qk == QK5_0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + +#if defined(__AVX2__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + // Main loop + for (; ib < nb; ++ib) { + /* Compute combined scale for the block */ + const __m256 d = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); + + __m256i qx = bytes_from_nibbles_32(x[ib].qs); + __m256i bxhi = bytes_from_bits_32(x[ib].qh); + bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0)); + qx = _mm256_or_si256(qx, bxhi); + + __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); + + const __m256 q = mul_sum_i8_pairs_float(qx, qy); + + /* Multiply q with scale and accumulate */ + acc = _mm256_fmadd_ps(d, q, acc); + } + + *s = hsum_float_8(acc); +#elif defined(__AVX__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + __m128i mask = _mm_set1_epi8((char)0xF0); + + // Main loop + for (; ib < nb; ++ib) { + /* Compute combined scale for the block */ + const __m256 d = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); + + __m256i bx_0 = bytes_from_nibbles_32(x[ib].qs); + const __m256i bxhi = bytes_from_bits_32(x[ib].qh); + __m128i bxhil = _mm256_castsi256_si128(bxhi); + __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); + bxhil = _mm_andnot_si128(bxhil, mask); + bxhih = _mm_andnot_si128(bxhih, mask); + __m128i bxl = _mm256_castsi256_si128(bx_0); + __m128i bxh = _mm256_extractf128_si256(bx_0, 1); + bxl = _mm_or_si128(bxl, bxhil); + bxh = _mm_or_si128(bxh, bxhih); + bx_0 = MM256_SET_M128I(bxh, bxl); + + const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[ib].qs); + + const __m256 q = mul_sum_i8_pairs_float(bx_0, by_0); + + /* Multiply q with scale and accumulate */ + acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc); + } + + *s = hsum_float_8(acc); +#else + UNUSED(nb); + UNUSED(ib); + UNUSED(x); + UNUSED(y); + ggml_vec_dot_q5_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + int ib = 0; + + assert(n % qk == 0); + assert(qk == QK5_1); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + +#if defined(__AVX2__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + float summs = 0.0f; + + // Main loop + for (; ib < nb; ++ib) { + const __m256 dx = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d)); + + summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); + + __m256i qx = bytes_from_nibbles_32(x[ib].qs); + __m256i bxhi = bytes_from_bits_32(x[ib].qh); + bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10)); + qx = _mm256_or_si256(qx, bxhi); + + const __m256 dy = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib].d)); + const __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); + + const __m256 q = mul_sum_us8_pairs_float(qx, qy); + + acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc); + } + + *s = hsum_float_8(acc) + summs; +#elif defined(__AVX__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + __m128i mask = _mm_set1_epi8(0x10); + + float summs = 0.0f; + + // Main loop + for (; ib < nb; ++ib) { + const __m256 dx = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d)); + + summs += GGML_CPU_FP16_TO_FP32(x[ib].m) * GGML_CPU_FP16_TO_FP32(y[ib].s); + + __m256i bx_0 = bytes_from_nibbles_32(x[ib].qs); + const __m256i bxhi = bytes_from_bits_32(x[ib].qh); + __m128i bxhil = _mm256_castsi256_si128(bxhi); + __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); + bxhil = _mm_and_si128(bxhil, mask); + bxhih = _mm_and_si128(bxhih, mask); + __m128i bxl = _mm256_castsi256_si128(bx_0); + __m128i bxh = _mm256_extractf128_si256(bx_0, 1); + bxl = _mm_or_si128(bxl, bxhil); + bxh = _mm_or_si128(bxh, bxhih); + bx_0 = MM256_SET_M128I(bxh, bxl); + + const __m256 dy = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib].d)); + const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[ib].qs); + + const __m256 q = mul_sum_us8_pairs_float(bx_0, by_0); + + acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc); + } + + *s = hsum_float_8(acc) + summs; +#else + UNUSED(nb); + UNUSED(ib); + UNUSED(x); + UNUSED(y); + ggml_vec_dot_q5_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q8_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + +#if defined(__AVX2__) + // Initialize accumulator with zeros + __m256 acc = _mm256_setzero_ps(); + + // Main loop + for (; ib < nb; ++ib) { + // Compute combined scale for the block + const __m256 d = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)); + __m256i qx = _mm256_loadu_si256((const __m256i *)x[ib].qs); + __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); + + const __m256 q = mul_sum_i8_pairs_float(qx, qy); + + // Multiply q with scale and accumulate + acc = _mm256_fmadd_ps( d, q, acc ); + } + + sumf = hsum_float_8(acc); +#elif defined(__AVX__) + __m256 accum = _mm256_setzero_ps(); + + for (; ib + 1 < nb; ib += 2) { + const __m128i qx_1_0 = _mm_loadu_si128((const __m128i *)x[ib].qs); + const __m128i qx_1_1 = _mm_loadu_si128((const __m128i *)x[ib].qs + 1); + const __m128i qx_2_0 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); + const __m128i qx_2_1 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs + 1); + const __m128i qy_1_0 = _mm_loadu_si128((const __m128i *)y[ib].qs); + const __m128i qy_1_1 = _mm_loadu_si128((const __m128i *)y[ib].qs + 1); + const __m128i qy_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); + const __m128i qy_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); + + const __m256 p = mul_sum_i8_quad_float(qx_1_0, qx_1_1, qx_2_0, qx_2_1, qy_1_0, qy_1_1, qy_2_0, qy_2_1); + const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); + accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); + } + + sumf = hsum_float_8(accum); +#endif + for (; ib < nb; ++ib) { + int sumi = 0; + + for (int j = 0; j < qk; j++) { + sumi += x[ib].qs[j]*y[ib].qs[j]; + } + + sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); + } + + *s = sumf; +} + +void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_tq1_0 * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__AVX2__) + __m256 sumf = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + // 16-bit sums + __m256i sumi0 = _mm256_setzero_si256(); + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + + // first 32 bytes of 5 elements + { + __m256i qx0 = _mm256_loadu_si256((const __m256i *) (x[i].qs)); + // 8-bit multiplies with shifts, masks and adds + __m256i qx1 = _mm256_add_epi8(qx0, _mm256_add_epi8(qx0, qx0)); // 1 * 3 + __m256i qx2 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx0, 3), _mm256_set1_epi8(-8)), qx0); // 1 * 9 + __m256i qx3 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx1, 3), _mm256_set1_epi8(-8)), qx1); // 3 * 9 + __m256i qx4 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx2, 3), _mm256_set1_epi8(-8)), qx2); // 9 * 9 + + // TODO: can _mm256_mulhi_epu16 be faster even if 16-bits? + + // Cancel the +1 from avg so that it behaves like a halving add + qx0 = _mm256_subs_epu8(qx0, _mm256_set1_epi8(1)); + qx1 = _mm256_subs_epu8(qx1, _mm256_set1_epi8(1)); + qx2 = _mm256_subs_epu8(qx2, _mm256_set1_epi8(1)); + qx3 = _mm256_subs_epu8(qx3, _mm256_set1_epi8(1)); + qx4 = _mm256_subs_epu8(qx4, _mm256_set1_epi8(1)); + // Multiply by 3 and get the top 2 bits + qx0 = _mm256_avg_epu8(qx0, _mm256_avg_epu8(qx0, _mm256_setzero_si256())); + qx1 = _mm256_avg_epu8(qx1, _mm256_avg_epu8(qx1, _mm256_setzero_si256())); + qx2 = _mm256_avg_epu8(qx2, _mm256_avg_epu8(qx2, _mm256_setzero_si256())); + qx3 = _mm256_avg_epu8(qx3, _mm256_avg_epu8(qx3, _mm256_setzero_si256())); + qx4 = _mm256_avg_epu8(qx4, _mm256_avg_epu8(qx4, _mm256_setzero_si256())); + qx0 = _mm256_and_si256(_mm256_srli_epi16(qx0, 6), _mm256_set1_epi8(3)); + qx1 = _mm256_and_si256(_mm256_srli_epi16(qx1, 6), _mm256_set1_epi8(3)); + qx2 = _mm256_and_si256(_mm256_srli_epi16(qx2, 6), _mm256_set1_epi8(3)); + qx3 = _mm256_and_si256(_mm256_srli_epi16(qx3, 6), _mm256_set1_epi8(3)); + qx4 = _mm256_and_si256(_mm256_srli_epi16(qx4, 6), _mm256_set1_epi8(3)); + + const __m256i qy0 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 0)); + const __m256i qy1 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 32)); + const __m256i qy2 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 64)); + const __m256i qy3 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 96)); + const __m256i qy4 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 128)); + + qx0 = _mm256_maddubs_epi16(qx0, qy0); + qx1 = _mm256_maddubs_epi16(qx1, qy1); + qx2 = _mm256_maddubs_epi16(qx2, qy2); + qx3 = _mm256_maddubs_epi16(qx3, qy3); + qx4 = _mm256_maddubs_epi16(qx4, qy4); + + sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(qx0, qx1)); + sumi1 = _mm256_add_epi16(sumi1, _mm256_add_epi16(qx2, qx3)); + sumi2 = _mm256_add_epi16(sumi2, qx4); + } + + // last 16 bytes of 5-element, along with the 4 bytes of 4 elements + { + __m128i qx0 = _mm_loadu_si128((const __m128i *) (x[i].qs + 32)); + uint32_t qh; + memcpy(&qh, x[i].qh, sizeof(qh)); // potentially unaligned + __m256i qx5_l = _mm256_cvtepu8_epi16(_mm_set1_epi32(qh)); + __m128i qx1 = _mm_add_epi8(qx0, _mm_add_epi8(qx0, qx0)); // 1 * 3 + __m128i qx2 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx0, 3), _mm_set1_epi8(-8)), qx0); // 1 * 9 + __m128i qx3 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx1, 3), _mm_set1_epi8(-8)), qx1); // 3 * 9 + __m128i qx4 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx2, 3), _mm_set1_epi8(-8)), qx2); // 9 * 9 + __m256i qx01 = MM256_SET_M128I(qx1, qx0); + __m256i qx23 = MM256_SET_M128I(qx3, qx2); + + // avx2 does not have 8-bit multiplies, so 16-bit it is. + qx5_l = _mm256_mullo_epi16(qx5_l, _mm256_set_epi16(27, 27, 27, 27, 9, 9, 9, 9, 3, 3, 3, 3, 1, 1, 1, 1)); + qx5_l = _mm256_and_si256(qx5_l, _mm256_set1_epi16(0xFF)); + __m128i qx5 = _mm_packus_epi16(_mm256_castsi256_si128(qx5_l), _mm256_extracti128_si256(qx5_l, 1)); + + __m256i qx45 = MM256_SET_M128I(qx5, qx4); + + // Cancel the +1 from avg so that it behaves like a halving add + qx01 = _mm256_subs_epu8(qx01, _mm256_set1_epi8(1)); + qx23 = _mm256_subs_epu8(qx23, _mm256_set1_epi8(1)); + qx45 = _mm256_subs_epu8(qx45, _mm256_set1_epi8(1)); + // Multiply by 3 and get the top 2 bits + qx01 = _mm256_avg_epu8(qx01, _mm256_avg_epu8(qx01, _mm256_setzero_si256())); + qx23 = _mm256_avg_epu8(qx23, _mm256_avg_epu8(qx23, _mm256_setzero_si256())); + qx45 = _mm256_avg_epu8(qx45, _mm256_avg_epu8(qx45, _mm256_setzero_si256())); + qx01 = _mm256_and_si256(_mm256_srli_epi16(qx01, 6), _mm256_set1_epi8(3)); + qx23 = _mm256_and_si256(_mm256_srli_epi16(qx23, 6), _mm256_set1_epi8(3)); + qx45 = _mm256_and_si256(_mm256_srli_epi16(qx45, 6), _mm256_set1_epi8(3)); + + const __m256i qy01 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 160)); + const __m256i qy23 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 192)); + const __m256i qy45 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 224)); + + qx01 = _mm256_maddubs_epi16(qx01, qy01); + qx23 = _mm256_maddubs_epi16(qx23, qy23); + qx45 = _mm256_maddubs_epi16(qx45, qy45); + + sumi0 = _mm256_add_epi16(sumi0, qx01); + sumi1 = _mm256_add_epi16(sumi1, qx23); + sumi2 = _mm256_add_epi16(sumi2, qx45); + } + + const __m256i ysum = _mm256_loadu_si256((const __m256i *) y[i].bsums); + const __m256 d = _mm256_set1_ps(y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d)); + + sumi0 = _mm256_sub_epi16(sumi0, ysum); + sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(sumi1, sumi2)); + sumi0 = _mm256_madd_epi16(sumi0, _mm256_set1_epi16(1)); + + sumf = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(sumi0), d), sumf); + } + + *s = hsum_float_8(sumf); + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_tq1_0_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_tq2_0 * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__AVX2__) + __m256 sumf = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + // 16-bit sums, because 256*127 still fits + __m256i sumi0 = _mm256_setzero_si256(); + __m256i sumi1 = _mm256_setzero_si256(); + + for (size_t j = 0; j < sizeof(x->qs); j += 32) { + __m256i qx0 = _mm256_loadu_si256((const __m256i *) (x[i].qs + j)); + __m256i qx1 = _mm256_srli_epi16(qx0, 2); + __m256i qx2 = _mm256_srli_epi16(qx0, 4); + __m256i qx3 = _mm256_srli_epi16(qx0, 6); + + // 0, 1, 2 (should not be 3) + qx0 = _mm256_and_si256(qx0, _mm256_set1_epi8(3)); + qx1 = _mm256_and_si256(qx1, _mm256_set1_epi8(3)); + qx2 = _mm256_and_si256(qx2, _mm256_set1_epi8(3)); + qx3 = _mm256_and_si256(qx3, _mm256_set1_epi8(3)); + + const __m256i qy0 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 0)); + const __m256i qy1 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 32)); + const __m256i qy2 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 64)); + const __m256i qy3 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 96)); + + qx0 = _mm256_maddubs_epi16(qx0, qy0); + qx1 = _mm256_maddubs_epi16(qx1, qy1); + qx2 = _mm256_maddubs_epi16(qx2, qy2); + qx3 = _mm256_maddubs_epi16(qx3, qy3); + + sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(qx0, qx1)); + sumi1 = _mm256_add_epi16(sumi1, _mm256_add_epi16(qx2, qx3)); + } + + const __m256i ysum = _mm256_loadu_si256((const __m256i *) y[i].bsums); + const __m256 d = _mm256_set1_ps(y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d)); + + sumi0 = _mm256_add_epi16(sumi0, sumi1); + sumi0 = _mm256_sub_epi16(sumi0, ysum); + sumi0 = _mm256_madd_epi16(sumi0, _mm256_set1_epi16(1)); + + sumf = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(sumi0), d), sumf); + } + + *s = hsum_float_8(sumf); + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_tq2_0_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q2_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __AVX2__ + + const __m256i m3 = _mm256_set1_epi8(3); + const __m128i m4 = _mm_set1_epi8(0xF); + + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); + + const uint8_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); + const __m128i scales8 = _mm_and_si128(mins_and_scales, m4); + const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); + const __m256i mins = _mm256_cvtepi8_epi16(mins8); + const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums)); + + acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc); + + const __m256i all_scales = _mm256_cvtepi8_epi16(scales8); + const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); + const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); + const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; + + __m256i sumi = _mm256_setzero_si256(); + + for (int j = 0; j < QK_K/128; ++j) { + + const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32; + + const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + + const __m256i q2_0 = _mm256_and_si256(q2bits, m3); + const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3); + const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3); + const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3); + + __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0); + __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1); + __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2); + __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3); + + p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0); + p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1); + p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2); + p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3); + + p0 = _mm256_add_epi32(p0, p1); + p2 = _mm256_add_epi32(p2, p3); + + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2)); + } + + acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); + + } + + *s = hsum_float_8(acc); + +#elif defined __AVX__ + + const __m128i m3 = _mm_set1_epi8(0x3); + const __m128i m4 = _mm_set1_epi8(0xF); + const __m128i m2 = _mm_set1_epi8(0x2); + + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + + const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); + + const uint8_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + // load mins and scales from block_q2_K.scales[QK_K/16] + const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); + const __m128i scales16 = _mm_and_si128(mins_and_scales, m4); + const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); + const __m128i mins_0 = _mm_cvtepi8_epi16(mins16); + const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16)); + + // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2 + const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0])); + const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8])); + + // sumf += -dmin * summs in 32bits*8 + acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc); + + const __m128i scales_0 = _mm_cvtepi8_epi16(scales16); + const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16)); + const __m128i scales[2] = { scales_0, scales_1 }; + + __m128i sumi_0 = _mm_setzero_si128(); + __m128i sumi_1 = _mm_setzero_si128(); + + for (int j = 0; j < QK_K/128; ++j) { + + // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K] + const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + + // load 2bits*16*8 from block_q2_K.qs[QK_K/4] + __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16; + const __m128i q2_0 = _mm_and_si128(q2bits, m3); + const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); + const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); + const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); + q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16; + const __m128i q2_1 = _mm_and_si128(q2bits, m3); + const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); + const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); + const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); + + // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8 + __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0); + __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1); + __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2); + __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3); + __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4); + __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5); + __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6); + __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7); + + // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8 + __m128i shuffle = _mm_set1_epi16(0x0100); + p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0); + shuffle = _mm_add_epi16(shuffle, m2); + p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1); + shuffle = _mm_add_epi16(shuffle, m2); + p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2); + shuffle = _mm_add_epi16(shuffle, m2); + p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3); + shuffle = _mm_add_epi16(shuffle, m2); + p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4); + shuffle = _mm_add_epi16(shuffle, m2); + p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5); + shuffle = _mm_add_epi16(shuffle, m2); + p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6); + shuffle = _mm_add_epi16(shuffle, m2); + p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7); + + p0 = _mm_add_epi32(p0, p1); + p2 = _mm_add_epi32(p2, p3); + p4 = _mm_add_epi32(p4, p5); + p6 = _mm_add_epi32(p6, p7); + + // isum in 32bits*4*2 + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6)); + } + + // sumf += dall * isum - dmin * summs in 32bits + __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); + acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc); + } + + *s = hsum_float_8(acc); + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const uint32_t kmask1 = 0x03030303; + const uint32_t kmask2 = 0x0f0f0f0f; + + const block_q3_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __AVX2__ + + const __m256i m3 = _mm256_set1_epi8(3); + const __m256i mone = _mm256_set1_epi8(1); + const __m128i m32 = _mm_set1_epi8(32); + + __m256 acc = _mm256_setzero_ps(); + + uint32_t aux[3]; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + // Set up scales + memcpy(aux, x[i].scales, 12); + __m128i scales128 = _mm_set_epi32( + ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), + ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), + (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), + (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); + scales128 = _mm_sub_epi8(scales128, m32); + const __m256i all_scales = _mm256_cvtepi8_epi16(scales128); + const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); + const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); + const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; + + // high bit + const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask); + + // integer accumulator + __m256i sumi = _mm256_setzero_si256(); + + int bit = 0; + int is = 0; + + for (int j = 0; j < QK_K/128; ++j) { + // load low 2 bits + const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32; + + // prepare low and high bits + const __m256i q3l_0 = _mm256_and_si256(q3bits, m3); + const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); + ++bit; + + const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3); + const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); + ++bit; + + const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3); + const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); + ++bit; + + const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3); + const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); + ++bit; + + // load Q8 quants + const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + + // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, + // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, + // and 2 if the high bit was set) + __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0); + __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1); + __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2); + __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3); + + __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0); + __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1); + __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2); + __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3); + + p16_0 = _mm256_sub_epi16(p16_0, q8s_0); + p16_1 = _mm256_sub_epi16(p16_1, q8s_1); + p16_2 = _mm256_sub_epi16(p16_2, q8s_2); + p16_3 = _mm256_sub_epi16(p16_3, q8s_3); + + // multiply with scales + p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0); + p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1); + p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2); + p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3); + + // accumulate + p16_0 = _mm256_add_epi32(p16_0, p16_1); + p16_2 = _mm256_add_epi32(p16_2, p16_3); + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2)); + + } + + // multiply with block scale and accumulate + acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); + + } + + *s = hsum_float_8(acc); + +#elif defined __AVX__ + + const __m128i m3 = _mm_set1_epi8(3); + const __m128i mone = _mm_set1_epi8(1); + const __m128i m32 = _mm_set1_epi8(32); + const __m128i m2 = _mm_set1_epi8(2); + + __m256 acc = _mm256_setzero_ps(); + + const uint32_t *aux; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + // Set up scales + aux = (const uint32_t *)x[i].scales; + __m128i scales128 = _mm_set_epi32( + ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), + ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), + (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), + (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); + scales128 = _mm_sub_epi8(scales128, m32); + const __m128i scales_0 = _mm_cvtepi8_epi16(scales128); + const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128)); + const __m128i scales[2] = { scales_0, scales_1 }; + + // high bit *128*2 from block_q3_K.hmask[QK_K/8] + const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]); + const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]); + + // integer accumulator + __m128i sumi_0 = _mm_setzero_si128(); + __m128i sumi_1 = _mm_setzero_si128(); + + for (int j = 0; j < QK_K/128; ++j) { + // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4] + const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16; + const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16; + + // prepare low and high bits + const int bit = j << 2; + + const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3); + const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3); + const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2); + const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2); + + const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3); + const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3); + const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2); + const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2); + + const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3); + const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3); + const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2); + const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2); + + const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3); + const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3); + const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2); + const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2); + + // load Q8 quants from block_q8_K.qs[QK_K] + const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + + // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, + // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, + // and 2 if the high bit was set) + __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0); + __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1); + __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2); + __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3); + __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4); + __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5); + __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6); + __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7); + + __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0); + __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1); + __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2); + __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3); + __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4); + __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5); + __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6); + __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7); + + p16_0 = _mm_sub_epi16(p16_0, q8s_0); + p16_1 = _mm_sub_epi16(p16_1, q8s_1); + p16_2 = _mm_sub_epi16(p16_2, q8s_2); + p16_3 = _mm_sub_epi16(p16_3, q8s_3); + p16_4 = _mm_sub_epi16(p16_4, q8s_4); + p16_5 = _mm_sub_epi16(p16_5, q8s_5); + p16_6 = _mm_sub_epi16(p16_6, q8s_6); + p16_7 = _mm_sub_epi16(p16_7, q8s_7); + + // multiply with scales + __m128i shuffle = _mm_set1_epi16(0x0100); + p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0); + shuffle = _mm_add_epi16(shuffle, m2); + p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1); + shuffle = _mm_add_epi16(shuffle, m2); + p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2); + shuffle = _mm_add_epi16(shuffle, m2); + p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3); + shuffle = _mm_add_epi16(shuffle, m2); + p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4); + shuffle = _mm_add_epi16(shuffle, m2); + p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5); + shuffle = _mm_add_epi16(shuffle, m2); + p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6); + shuffle = _mm_add_epi16(shuffle, m2); + p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7); + + // accumulate + p16_0 = _mm_add_epi32(p16_0, p16_1); + p16_2 = _mm_add_epi32(p16_2, p16_3); + p16_4 = _mm_add_epi32(p16_4, p16_5); + p16_6 = _mm_add_epi32(p16_6, p16_7); + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6)); + + } + + // multiply with block scale and accumulate + __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); + acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc); + + } + + *s = hsum_float_8(acc); + +#else + UNUSED(kmask1); + UNUSED(kmask2); + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined __AVX2__ + + const __m256i m4 = _mm256_set1_epi8(0xF); + + __m256 acc = _mm256_setzero_ps(); + __m128 acc_m = _mm_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); + + const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums); + const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); + const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); + acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m); + + const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); + const __m256i scales = MM256_SET_M128I(sc128, sc128); + + __m256i sumi = _mm256_setzero_si256(); + + for (int j = 0; j < QK_K/64; ++j) { + + const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0)); + const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1)); + + const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; + const __m256i q4l = _mm256_and_si256(q4bits, m4); + const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4); + + const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + __m256i p16l = _mm256_maddubs_epi16(q4l, q8l); + p16l = _mm256_madd_epi16(scale_l, p16l); + + const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + __m256i p16h = _mm256_maddubs_epi16(q4h, q8h); + p16h = _mm256_madd_epi16(scale_h, p16h); + const __m256i sumj = _mm256_add_epi32(p16l, p16h); + + sumi = _mm256_add_epi32(sumi, sumj); + } + + __m256 vd = _mm256_set1_ps(d); + acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); + + } + + acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); + acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); + + *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m); + +#elif defined __AVX__ + + const __m128i m4 = _mm_set1_epi8(0xF); + const __m128i m2 = _mm_set1_epi8(0x2); + + __m256 acc = _mm256_setzero_ps(); + __m128 acc_m = _mm_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); + + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]); + const __m128i scales = _mm_cvtepu8_epi16(utmps); + const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps)); + + const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]); + const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]); + const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1); + const __m128i prod = _mm_madd_epi16(mins, q8s); + acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m); + + __m128i sumi_0 = _mm_setzero_si128(); + __m128i sumi_1 = _mm_setzero_si128(); + + __m128i shuffle = _mm_set1_epi16(0x0100); + for (int j = 0; j < QK_K/64; ++j) { + + const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle); + shuffle = _mm_add_epi16(shuffle, m2); + const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle); + shuffle = _mm_add_epi16(shuffle, m2); + + __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16; + const __m128i q4l_0 = _mm_and_si128(q4bits, m4); + const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4); + q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16; + const __m128i q4l_1 = _mm_and_si128(q4bits, m4); + const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4); + + const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0); + p16l = _mm_madd_epi16(scale_l, p16l); + sumi_0 = _mm_add_epi32(sumi_0, p16l); + const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + p16l = _mm_maddubs_epi16(q4l_1, q8l_1); + p16l = _mm_madd_epi16(scale_l, p16l); + sumi_1 = _mm_add_epi32(sumi_1, p16l); + + const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0); + p16h = _mm_madd_epi16(scale_h, p16h); + sumi_0 = _mm_add_epi32(sumi_0, p16h); + const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + p16h = _mm_maddubs_epi16(q4h_1, q8h_1); + p16h = _mm_madd_epi16(scale_h, p16h); + sumi_1 = _mm_add_epi32(sumi_1, p16h); + + } + + __m256 vd = _mm256_set1_ps(d); + __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); + acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc); + + } + + acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); + acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); + + *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m); + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + UNUSED(kmask1); + UNUSED(kmask2); + UNUSED(kmask3); + UNUSED(utmp); + ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + +#if defined __AVX2__ + + const __m256i m4 = _mm256_set1_epi8(0xF); + const __m128i mzero = _mm_setzero_si128(); + const __m256i mone = _mm256_set1_epi8(1); + + __m256 acc = _mm256_setzero_ps(); + + float summs = 0.f; + + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q5 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); + + const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums); + const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); + const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); + const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero); + summs += dmin * _mm_extract_epi32(hsum, 0); + + const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); + const __m256i scales = MM256_SET_M128I(sc128, sc128); + + const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh); + __m256i hmask = mone; + + __m256i sumi = _mm256_setzero_si256(); + + int bit = 0; + + for (int j = 0; j < QK_K/64; ++j) { + + const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0)); + const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1)); + + const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32; + + const __m256i q5l_0 = _mm256_and_si256(q5bits, m4); + const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4); + const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0); + hmask = _mm256_slli_epi16(hmask, 1); + + const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4); + const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4); + const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1); + hmask = _mm256_slli_epi16(hmask, 1); + + const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + + __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0); + __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1); + + p16_0 = _mm256_madd_epi16(scale_0, p16_0); + p16_1 = _mm256_madd_epi16(scale_1, p16_1); + + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); + + } + + __m256 vd = _mm256_set1_ps(d); + acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); + + } + + *s = hsum_float_8(acc) + summs; + +#elif defined __AVX__ + + const __m128i m4 = _mm_set1_epi8(0xF); + const __m128i mzero = _mm_setzero_si128(); + const __m128i mone = _mm_set1_epi8(1); + const __m128i m2 = _mm_set1_epi8(2); + + __m256 acc = _mm256_setzero_ps(); + + float summs = 0.f; + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); + + const uint8_t * GGML_RESTRICT q5 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]); + const __m128i scales = _mm_cvtepu8_epi16(utmps); + const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps)); + + const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]); + const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]); + const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1); + const __m128i prod = _mm_madd_epi16(mins, q8s); + const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero); + summs += dmin * _mm_extract_epi32(hsum, 0); + + const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]); + const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]); + __m128i hmask = mone; + + __m128i sumi_0 = _mm_setzero_si128(); + __m128i sumi_1 = _mm_setzero_si128(); + + int bit = 0; + + __m128i shuffle = _mm_set1_epi16(0x0100); + for (int j = 0; j < QK_K/64; ++j) { + + const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle); + shuffle = _mm_add_epi16(shuffle, m2); + const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle); + shuffle = _mm_add_epi16(shuffle, m2); + + const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16; + const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16; + + __m128i q5l_0 = _mm_and_si128(q5bits_0, m4); + __m128i q5l_1 = _mm_and_si128(q5bits_1, m4); + __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4); + __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4); + __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0); + __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1); + hmask = _mm_slli_epi16(hmask, 1); + + __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0); + __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1); + p16_0 = _mm_madd_epi16(scale_0, p16_0); + p16_1 = _mm_madd_epi16(scale_0, p16_1); + + q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4); + q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4); + q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4); + q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4); + q5_0 = _mm_add_epi8(q5l_0, q5h_0); + q5_1 = _mm_add_epi8(q5l_1, q5h_1); + hmask = _mm_slli_epi16(hmask, 1); + + q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0); + __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1); + p16_2 = _mm_madd_epi16(scale_1, p16_2); + p16_3 = _mm_madd_epi16(scale_1, p16_3); + + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); + + } + + __m256 vd = _mm256_set1_ps(d); + __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); + acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc); + + } + + *s = hsum_float_8(acc) + summs; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + UNUSED(kmask1); + UNUSED(kmask2); + UNUSED(kmask3); + UNUSED(utmp); + ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q6_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __AVX2__ + + const __m256i m4 = _mm256_set1_epi8(0xF); + const __m256i m2 = _mm256_set1_epi8(3); + const __m256i m32s = _mm256_set1_epi8(32); + + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales); + + __m256i sumi = _mm256_setzero_si256(); + + int is = 0; + + for (int j = 0; j < QK_K/128; ++j) { + + const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0)); + const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); + const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); + const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); + is += 4; + + const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; + const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; + const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32; + + const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4); + const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4); + const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4); + const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4); + + const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0); + const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1); + const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2); + const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3); + + const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + + __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0); + __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1); + __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2); + __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3); + + __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0); + __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1); + __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2); + __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3); + + p16_0 = _mm256_sub_epi16(p16_0, q8s_0); + p16_1 = _mm256_sub_epi16(p16_1, q8s_1); + p16_2 = _mm256_sub_epi16(p16_2, q8s_2); + p16_3 = _mm256_sub_epi16(p16_3, q8s_3); + + p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0); + p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1); + p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2); + p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3); + + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3)); + + } + + acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); + } + + *s = hsum_float_8(acc); + +#elif defined __AVX__ + + const __m128i m3 = _mm_set1_epi8(3); + const __m128i m15 = _mm_set1_epi8(15); + + __m256 acc = _mm256_setzero_ps(); + + for (int i = 0; i < nb; ++i) { + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + // handle the q6_k -32 offset separately using bsums + const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)y[i].bsums); + const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)y[i].bsums + 1); + const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales); + const __m128i scales_16_0 = _mm_cvtepi8_epi16(scales); + const __m128i scales_16_1 = _mm_cvtepi8_epi16(_mm_bsrli_si128(scales, 8)); + const __m128i q8sclsub_0 = _mm_slli_epi32(_mm_madd_epi16(q8sums_0, scales_16_0), 5); + const __m128i q8sclsub_1 = _mm_slli_epi32(_mm_madd_epi16(q8sums_1, scales_16_1), 5); + + __m128i sumi_0 = _mm_setzero_si128(); + __m128i sumi_1 = _mm_setzero_si128(); + + int is = 0; + + for (int j = 0; j < QK_K/128; ++j) { + + const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16; + const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16; + + const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4); + const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4); + const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, _mm_set1_epi8(12)), 2); + const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, _mm_set1_epi8(12)), 2); + const __m128i q4h_4 = _mm_and_si128(q4bitsH_0, _mm_set1_epi8(48)); + const __m128i q4h_5 = _mm_and_si128(q4bitsH_1, _mm_set1_epi8(48)); + const __m128i q4h_6 = _mm_srli_epi16(_mm_and_si128(q4bitsH_0, _mm_set1_epi8(-64)), 2); + const __m128i q4h_7 = _mm_srli_epi16(_mm_and_si128(q4bitsH_1, _mm_set1_epi8(-64)), 2); + + const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; + const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; + const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; + const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; + + const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m15), q4h_0); + const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m15), q4h_1); + const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m15), q4h_2); + const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m15), q4h_3); + const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m15), q4h_4); + const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m15), q4h_5); + const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m15), q4h_6); + const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m15), q4h_7); + + const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; + + __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0); + __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1); + __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2); + __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3); + __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4); + __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5); + __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6); + __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7); + + const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0)); + const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); + const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); + const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); + is += 4; + + p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0); + p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_0, 8)), p16_1); + p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2); + p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_1, 8)), p16_3); + p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4); + p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_2, 8)), p16_5); + p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6); + p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_3, 8)), p16_7); + + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); + sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6)); + sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7)); + + } + + sumi_0 = _mm_sub_epi32(sumi_0, q8sclsub_0); + sumi_1 = _mm_sub_epi32(sumi_1, q8sclsub_1); + const __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); + acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi)), acc); + } + + *s = hsum_float_8(acc); + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +#if defined (__AVX__) || defined (__AVX2__) +static const int8_t keven_signs_q2xs[1024] = { + 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, + 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, + 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, + 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, + 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, + 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, + 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, + 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, + 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, + 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, + 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, + 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, + 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, + 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, + 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, + 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, + 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, + 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, + 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, + 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, + 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, + 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, + 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, + 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, + 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, + 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, + 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, + 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, + 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, + 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, + 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, + 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, +}; +#endif + +void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__AVX2__) + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[4]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; + const __m256i q2_1 = _mm256_set_epi64x(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); + const __m256i q2_2 = _mm256_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); + const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], + signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); + const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127], + signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); + const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1); + const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2); + const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); + const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); + const uint16_t ls1 = aux32[1] >> 28; + const uint16_t ls2 = aux32[3] >> 28; + const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); + const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); + sumi1 = _mm256_add_epi32(sumi1, p1); + sumi2 = _mm256_add_epi32(sumi2, p2); + } + + accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); + + } + + *s = 0.125f * hsum_float_8(accumf); + +#elif defined(__AVX__) + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[4]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; + const __m128i q2_1_0 = _mm_set_epi64x(iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); + const __m128i q2_1_1 = _mm_set_epi64x(iq2xxs_grid[aux8[3]], iq2xxs_grid[aux8[2]]); + const __m128i q2_2_0 = _mm_set_epi64x(iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); + const __m128i q2_2_1 = _mm_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]]); + const __m128i s2_1_0 = _mm_set_epi64x(signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); + const __m128i s2_1_1 = _mm_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127]); + const __m128i s2_2_0 = _mm_set_epi64x(signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); + const __m128i s2_2_1 = _mm_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127]); + const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, s2_1_0); + const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, s2_1_1); + const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, s2_2_0); + const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, s2_2_1); + const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); + const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); + const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); + const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); + const uint16_t ls1 = aux32[1] >> 28; + const uint16_t ls2 = aux32[3] >> 28; + const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); + const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); + const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); + const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); + sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); + sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); + sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); + sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); + } + + accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); + + } + + *s = 0.125f * hsum_float_8(accumf); + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_iq2_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__AVX2__) + + const __m256i mone = _mm256_set1_epi8(1); + static const char block_sign_shuffle_mask_1[32] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + }; + static const char block_sign_shuffle_mask_2[32] = { + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, + }; + static const uint8_t bit_selector_mask_bytes[32] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + const __m256i bit_selector_mask = _mm256_loadu_si256((const __m256i*)bit_selector_mask_bytes); + const __m256i block_sign_shuffle_1 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_1); + const __m256i block_sign_shuffle_2 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_2); + + static const uint8_t k_bit_helper[32] = { + 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, + 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, + }; + const __m256i bit_helper = _mm256_loadu_si256((const __m256i*)k_bit_helper); + const __m256i m511 = _mm256_set1_epi16(511); + const __m128i m4 = _mm_set1_epi8(0xf); + const __m128i m1 = _mm_set1_epi8(1); + + uint64_t aux64; + + // somewhat hacky, but gives a significant boost in performance + __m256i aux_gindex; + const uint16_t * gindex = (const uint16_t *)&aux_gindex; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(&aux64, x[i].scales, 8); + __m128i stmp = _mm_set1_epi64x(aux64); + stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4)); + const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1); + + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { + + const __m256i q2_data = _mm256_loadu_si256((const __m256i*)q2); q2 += 16; + aux_gindex = _mm256_and_si256(q2_data, m511); + + const __m256i partial_sign_bits = _mm256_srli_epi16(q2_data, 9); + const __m256i partial_sign_bits_upper = _mm256_srli_epi16(q2_data, 13); + const __m256i partial_sign_bits_for_counting = _mm256_xor_si256(partial_sign_bits, partial_sign_bits_upper); + + const __m256i odd_bits = _mm256_shuffle_epi8(bit_helper, partial_sign_bits_for_counting); + const __m256i full_sign_bits = _mm256_or_si256(partial_sign_bits, odd_bits); + + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8_4 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + + const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]], + iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]); + const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]], + iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]); + const __m256i q2_3 = _mm256_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]], + iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]); + const __m256i q2_4 = _mm256_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]], + iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); + + const __m128i full_signs_l = _mm256_castsi256_si128(full_sign_bits); + const __m128i full_signs_h = _mm256_extractf128_si256(full_sign_bits, 1); + const __m256i full_signs_1 = MM256_SET_M128I(full_signs_l, full_signs_l); + const __m256i full_signs_2 = MM256_SET_M128I(full_signs_h, full_signs_h); + + __m256i signs; + signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_1); + signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); + const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone)); + + signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_2); + signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); + const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone)); + + signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_1); + signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); + const __m256i q8s_3 = _mm256_sign_epi8(q8_3, _mm256_or_si256(signs, mone)); + + signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_2); + signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); + const __m256i q8s_4 = _mm256_sign_epi8(q8_4, _mm256_or_si256(signs, mone)); + + const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); + const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); + const __m256i dot3 = _mm256_maddubs_epi16(q2_3, q8s_3); + const __m256i dot4 = _mm256_maddubs_epi16(q2_4, q8s_4); + + const __m256i sc1 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0))); + const __m256i sc2 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1))); + const __m256i sc3 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2))); + const __m256i sc4 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3))); + + sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot1, sc1)); + sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot2, sc2)); + sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot3, sc3)); + sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot4, sc4)); + } + + accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); + + } + + *s = 0.125f * hsum_float_8(accumf); + +#elif defined(__AVX__) + const __m128i mone = _mm_set1_epi8(1); + static const char block_sign_shuffle_mask_1[32] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + }; + static const char block_sign_shuffle_mask_2[32] = { + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, + }; + static const uint8_t bit_selector_mask_bytes[32] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + const __m128i bit_selector_mask_0 = _mm_loadu_si128((const __m128i*)bit_selector_mask_bytes); + const __m128i bit_selector_mask_1 = _mm_loadu_si128((const __m128i*)bit_selector_mask_bytes + 1); + const __m128i block_sign_shuffle_1_0 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_1); + const __m128i block_sign_shuffle_1_1 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_1 + 1); + const __m128i block_sign_shuffle_2_0 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_2); + const __m128i block_sign_shuffle_2_1 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_2 + 1); + + static const uint8_t k_bit_helper[32] = { + 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, + 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, + }; + const __m128i bit_helper_0 = _mm_loadu_si128((const __m128i*)k_bit_helper); + const __m128i bit_helper_1 = _mm_loadu_si128((const __m128i*)k_bit_helper + 1); + const __m128i m511 = _mm_set1_epi16(511); + const __m128i m4 = _mm_set1_epi8(0xf); + const __m128i m1 = _mm_set1_epi8(1); + + uint64_t aux64; + + // somewhat hacky, but gives a significant boost in performance + __m256i aux_gindex; + const uint16_t * gindex = (const uint16_t *)&aux_gindex; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(&aux64, x[i].scales, 8); + __m128i stmp = _mm_set1_epi64x(aux64); + stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4)); + const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1); + + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { + + const __m128i q2_data_0 = _mm_loadu_si128((const __m128i*)q2); + const __m128i q2_data_1 = _mm_loadu_si128((const __m128i*)q2 + 1); q2 += 16; + aux_gindex = MM256_SET_M128I(_mm_and_si128(q2_data_1, m511), _mm_and_si128(q2_data_0, m511)); + + const __m128i partial_sign_bits_0 = _mm_srli_epi16(q2_data_0, 9); + const __m128i partial_sign_bits_1 = _mm_srli_epi16(q2_data_1, 9); + const __m128i partial_sign_bits_upper_0 = _mm_srli_epi16(q2_data_0, 13); + const __m128i partial_sign_bits_upper_1 = _mm_srli_epi16(q2_data_1, 13); + const __m128i partial_sign_bits_for_counting_0 = _mm_xor_si128(partial_sign_bits_0, partial_sign_bits_upper_0); + const __m128i partial_sign_bits_for_counting_1 = _mm_xor_si128(partial_sign_bits_1, partial_sign_bits_upper_1); + + const __m128i odd_bits_0 = _mm_shuffle_epi8(bit_helper_0, partial_sign_bits_for_counting_0); + const __m128i odd_bits_1 = _mm_shuffle_epi8(bit_helper_1, partial_sign_bits_for_counting_1); + const __m128i full_sign_bits_0 = _mm_or_si128(partial_sign_bits_0, odd_bits_0); + const __m128i full_sign_bits_1 = _mm_or_si128(partial_sign_bits_1, odd_bits_1); + + const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_3_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_3_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_4_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_4_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + + const __m128i q2_1_0 = _mm_set_epi64x(iq2xs_grid[gindex[1]], iq2xs_grid[gindex[0]]); + const __m128i q2_1_1 = _mm_set_epi64x(iq2xs_grid[gindex[3]], iq2xs_grid[gindex[2]]); + const __m128i q2_2_0 = _mm_set_epi64x(iq2xs_grid[gindex[5]], iq2xs_grid[gindex[4]]); + const __m128i q2_2_1 = _mm_set_epi64x(iq2xs_grid[gindex[7]], iq2xs_grid[gindex[6]]); + const __m128i q2_3_0 = _mm_set_epi64x(iq2xs_grid[gindex[9]], iq2xs_grid[gindex[8]]); + const __m128i q2_3_1 = _mm_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]]); + const __m128i q2_4_0 = _mm_set_epi64x(iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); + const __m128i q2_4_1 = _mm_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]]); + + // AVX2 full_signs_1 is full_sign_bits_0 here + // AVX2 full_signs_2 is full_sign_bits_1 here + __m128i signs_0, signs_1; + signs_0 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_1_0); + signs_1 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_1_1); + signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); + signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); + const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, _mm_or_si128(signs_0, mone)); + const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, _mm_or_si128(signs_1, mone)); + + signs_0 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_2_0); + signs_1 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_2_1); + signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); + signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); + const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, _mm_or_si128(signs_0, mone)); + const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, _mm_or_si128(signs_1, mone)); + + signs_0 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_1_0); + signs_1 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_1_1); + signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); + signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); + const __m128i q8s_3_0 = _mm_sign_epi8(q8_3_0, _mm_or_si128(signs_0, mone)); + const __m128i q8s_3_1 = _mm_sign_epi8(q8_3_1, _mm_or_si128(signs_1, mone)); + + signs_0 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_2_0); + signs_1 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_2_1); + signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); + signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); + const __m128i q8s_4_0 = _mm_sign_epi8(q8_4_0, _mm_or_si128(signs_0, mone)); + const __m128i q8s_4_1 = _mm_sign_epi8(q8_4_1, _mm_or_si128(signs_1, mone)); + + const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); + const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); + const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); + const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); + const __m128i dot3_0 = _mm_maddubs_epi16(q2_3_0, q8s_3_0); + const __m128i dot3_1 = _mm_maddubs_epi16(q2_3_1, q8s_3_1); + const __m128i dot4_0 = _mm_maddubs_epi16(q2_4_0, q8s_4_0); + const __m128i dot4_1 = _mm_maddubs_epi16(q2_4_1, q8s_4_1); + + __m128i sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0)); + const __m128i sc1_0 = _mm_cvtepi8_epi16(sc_tmp); + const __m128i sc1_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); + sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1)); + const __m128i sc2_0 = _mm_cvtepi8_epi16(sc_tmp); + const __m128i sc2_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); + sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2)); + const __m128i sc3_0 = _mm_cvtepi8_epi16(sc_tmp); + const __m128i sc3_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); + sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3)); + const __m128i sc4_0 = _mm_cvtepi8_epi16(sc_tmp); + const __m128i sc4_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); + + sumi1_0 = _mm_add_epi32(sumi1_0, _mm_madd_epi16(dot1_0, sc1_0)); + sumi1_1 = _mm_add_epi32(sumi1_1, _mm_madd_epi16(dot1_1, sc1_1)); + sumi2_0 = _mm_add_epi32(sumi2_0, _mm_madd_epi16(dot2_0, sc2_0)); + sumi2_1 = _mm_add_epi32(sumi2_1, _mm_madd_epi16(dot2_1, sc2_1)); + sumi1_0 = _mm_add_epi32(sumi1_0, _mm_madd_epi16(dot3_0, sc3_0)); + sumi1_1 = _mm_add_epi32(sumi1_1, _mm_madd_epi16(dot3_1, sc3_1)); + sumi2_0 = _mm_add_epi32(sumi2_0, _mm_madd_epi16(dot4_0, sc4_0)); + sumi2_1 = _mm_add_epi32(sumi2_1, _mm_madd_epi16(dot4_1, sc4_1)); + } + + accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); + + } + + *s = 0.125f * hsum_float_8(accumf); + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_iq2_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__AVX2__) + + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + const __m128i m4 = _mm_set1_epi8(0xf); + const __m128i m1 = _mm_set1_epi8(1); + + const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1); + const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2); + + uint64_t aux64; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(&aux64, x[i].scales, 8); + const __m128i scales8 = _mm_add_epi8(_mm_slli_epi16(_mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), m4), 1), m1); + const __m256i scales16 = _mm256_cvtepi8_epi16(scales8); // 0 2 4 6 8 10 12 14 1 3 5 7 9 11 13 15 + + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q2_1 = _mm256_set_epi64x(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)], + iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)], + iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)], + iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]); + const __m256i q2_2 = _mm256_set_epi64x(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)], + iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)], + iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)], + iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]); + qs += 8; + + __m256i aux256 = _mm256_set1_epi32(signs[0] | ((uint32_t) signs[1] << 16)); + aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); + const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2); + const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1); + + aux256 = _mm256_set1_epi32(signs[2] | ((uint32_t) signs[3] << 16)); + aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); + const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2); + const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2); + + signs += 4; + + const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); // blocks 2*ib32+0, 2*ib32+1 + const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); // blocks 2*ib32+2, 2*ib32+3 + + const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+0))); + const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+1))); + sumi1 = _mm256_add_epi32(sumi1, p1); + sumi2 = _mm256_add_epi32(sumi2, p2); + } + + accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); + + } + + *s = 0.125f * hsum_float_8(accumf); + +#elif defined(__AVX__) + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + const __m128i m4 = _mm_set1_epi8(0xf); + const __m128i m1 = _mm_set1_epi8(1); + + const __m128i mask1_0 = _mm_loadu_si128((const __m128i*)k_mask1); + const __m128i mask1_1 = _mm_loadu_si128((const __m128i*)k_mask1 + 1); + const __m128i mask2_0 = _mm_loadu_si128((const __m128i*)k_mask2); + const __m128i mask2_1 = _mm_loadu_si128((const __m128i*)k_mask2 + 1); + + uint64_t aux64; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); + const int8_t * GGML_RESTRICT q8 = y[i].qs; + + memcpy(&aux64, x[i].scales, 8); + const __m128i scales8 = _mm_add_epi8(_mm_slli_epi16(_mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), m4), 1), m1); + const __m128i scales16_0 = _mm_cvtepi8_epi16(scales8); + const __m128i scales16_1 = _mm_cvtepi8_epi16(_mm_srli_si128(scales8, 8)); + + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q2_1_0 = _mm_set_epi64x(iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)], + iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]); + const __m128i q2_1_1 = _mm_set_epi64x(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)], + iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)]); + const __m128i q2_2_0 = _mm_set_epi64x(iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)], + iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]); + const __m128i q2_2_1 = _mm_set_epi64x(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)], + iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)]); + qs += 8; + + __m128i aux128_0 = _mm_set1_epi32(signs[0] | ((uint32_t) signs[1] << 16)); + __m128i aux128_1 = aux128_0; + aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); + aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); + const __m128i s2_1_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); + const __m128i s2_1_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); + const __m128i q8s_1_0 = _mm_sub_epi8(_mm_xor_si128(s2_1_0, q8_1_0), s2_1_0); + const __m128i q8s_1_1 = _mm_sub_epi8(_mm_xor_si128(s2_1_1, q8_1_1), s2_1_1); + + aux128_0 = _mm_set1_epi32(signs[2] | ((uint32_t) signs[3] << 16)); + aux128_1 = aux128_0; + aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); + aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); + const __m128i s2_2_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); + const __m128i s2_2_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); + const __m128i q8s_2_0 = _mm_sub_epi8(_mm_xor_si128(s2_2_0, q8_2_0), s2_2_0); + const __m128i q8s_2_1 = _mm_sub_epi8(_mm_xor_si128(s2_2_1, q8_2_1), s2_2_1); + + signs += 4; + + const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); + const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); + const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); + const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); + + const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_shuffle_epi8(scales16_0, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+0), 0))); + const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_shuffle_epi8(scales16_1, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+0), 1))); + const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_shuffle_epi8(scales16_0, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+1), 0))); + const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_shuffle_epi8(scales16_1, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+1), 1))); + sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); + sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); + sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); + sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); + } + + accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); + + } + + *s = 0.125f * hsum_float_8(accumf); + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_iq2_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__AVX2__) + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[2]; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q2_1 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], + iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); + q3 += 8; + const __m256i q2_2 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], + iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); + q3 += 8; + memcpy(aux32, gas, 8); gas += 8; + const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127], + signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); + const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], + signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); + const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1); + const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2); + const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); + const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); + const uint16_t ls1 = aux32[0] >> 28; + const uint16_t ls2 = aux32[1] >> 28; + const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); + const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); + sumi1 = _mm256_add_epi32(sumi1, p1); + sumi2 = _mm256_add_epi32(sumi2, p2); + } + + accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); + + } + + *s = 0.25f * hsum_float_8(accumf); + +#elif defined(__AVX__) + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + uint32_t aux32[2]; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q2_1_0 = _mm_set_epi32(iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); + const __m128i q2_1_1 = _mm_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]]); + q3 += 8; + const __m128i q2_2_0 = _mm_set_epi32(iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); + const __m128i q2_2_1 = _mm_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]]); + q3 += 8; + memcpy(aux32, gas, 8); gas += 8; + const __m128i s2_1_0 = _mm_set_epi64x(signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); + const __m128i s2_1_1 = _mm_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127]); + const __m128i s2_2_0 = _mm_set_epi64x(signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); + const __m128i s2_2_1 = _mm_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127]); + const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, s2_1_0); + const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, s2_1_1); + const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, s2_2_0); + const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, s2_2_1); + const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); + const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); + const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); + const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); + const uint16_t ls1 = aux32[0] >> 28; + const uint16_t ls2 = aux32[1] >> 28; + const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); + const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); + const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); + const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); + sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); + sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); + sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); + sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); + } + + accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); + + } + + *s = 0.25f * hsum_float_8(accumf); + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_iq3_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined(__AVX2__) + + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1); + const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2); + + const __m256i idx_shift = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + const __m256i idx_mask = _mm256_set1_epi32(256); + + typedef union { + __m256i vec[2]; + uint32_t index[16]; + } index_t; + + index_t idx; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i idx_l = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i *)qs)); qs += 16; + idx.vec[0] = _mm256_set1_epi32(qh[ib32+0]); + idx.vec[1] = _mm256_set1_epi32(qh[ib32+1]); + idx.vec[0] = _mm256_and_si256(_mm256_sllv_epi32(idx.vec[0], idx_shift), idx_mask); + idx.vec[1] = _mm256_and_si256(_mm256_sllv_epi32(idx.vec[1], idx_shift), idx_mask); + idx.vec[0] = _mm256_or_si256(idx.vec[0], _mm256_cvtepi16_epi32(_mm256_castsi256_si128(idx_l))); + idx.vec[1] = _mm256_or_si256(idx.vec[1], _mm256_cvtepi16_epi32(_mm256_extractf128_si256(idx_l, 1))); + + // At leat on my CPU (Ryzen 7950X), using _mm256_i32gather_epi32 is slower than _mm256_set_epi32. Strange. + //const __m256i q2_1 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[0], 4); + //const __m256i q2_2 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[1], 4); + const __m256i q2_1 = _mm256_set_epi32( + iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]], + iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]] + ); + const __m256i q2_2 = _mm256_set_epi32( + iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]], + iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[ 9]], iq3s_grid[idx.index[ 8]] + ); + + __m256i aux256 = _mm256_set1_epi32(signs[0] | (signs[1] << 16)); + aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); + const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2); + const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1); + + aux256 = _mm256_set1_epi32(signs[2] | (signs[3] << 16)); + aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); + const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2); + const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2); + + signs += 4; + + const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); + const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); + const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; + const uint16_t ls2 = x[i].scales[ib32/2] >> 4; + const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); + const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); + sumi1 = _mm256_add_epi32(sumi1, p1); + sumi2 = _mm256_add_epi32(sumi2, p2); + } + + accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); + + } + + *s = hsum_float_8(accumf); + +#elif defined(__AVX__) + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, + }; + + const __m128i mask1_0 = _mm_loadu_si128((const __m128i*)k_mask1); + const __m128i mask1_1 = _mm_loadu_si128((const __m128i*)k_mask1 + 1); + const __m128i mask2_0 = _mm_loadu_si128((const __m128i*)k_mask2); + const __m128i mask2_1 = _mm_loadu_si128((const __m128i*)k_mask2 + 1); + + const __m128i idx_mul_0 = _mm_set_epi32(32, 64, 128, 256); + const __m128i idx_mul_1 = _mm_set_epi32(2, 4, 8, 16); + const __m128i idx_mask = _mm_set1_epi32(256); + + typedef union { + __m128i vec[4]; + uint32_t index[16]; + } index_t; + + index_t idx; + + __m256 accumf = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i qs_tmp = _mm_loadu_si128((const __m128i *)qs); + const __m128i idx_l_0 = _mm_cvtepu8_epi16(qs_tmp); + const __m128i idx_l_1 = _mm_cvtepu8_epi16(_mm_srli_si128(qs_tmp, 8)); qs += 16; + idx.vec[0] = _mm_set1_epi32(qh[ib32+0]); + idx.vec[1] = idx.vec[0]; + idx.vec[2] = _mm_set1_epi32(qh[ib32+1]); + idx.vec[3] = idx.vec[2]; + + idx.vec[0] = _mm_and_si128(_mm_mullo_epi32(idx.vec[0], idx_mul_0), idx_mask); + idx.vec[1] = _mm_and_si128(_mm_mullo_epi32(idx.vec[1], idx_mul_1), idx_mask); + idx.vec[2] = _mm_and_si128(_mm_mullo_epi32(idx.vec[2], idx_mul_0), idx_mask); + idx.vec[3] = _mm_and_si128(_mm_mullo_epi32(idx.vec[3], idx_mul_1), idx_mask); + + idx.vec[0] = _mm_or_si128(idx.vec[0], _mm_cvtepi16_epi32(idx_l_0)); + idx.vec[1] = _mm_or_si128(idx.vec[1], _mm_cvtepi16_epi32(_mm_srli_si128(idx_l_0, 8))); + idx.vec[2] = _mm_or_si128(idx.vec[2], _mm_cvtepi16_epi32(idx_l_1)); + idx.vec[3] = _mm_or_si128(idx.vec[3], _mm_cvtepi16_epi32(_mm_srli_si128(idx_l_1, 8))); + + const __m128i q2_1_0 = _mm_set_epi32(iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]]); + const __m128i q2_1_1 = _mm_set_epi32(iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]]); + const __m128i q2_2_0 = _mm_set_epi32(iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[9]], iq3s_grid[idx.index[8]]); + const __m128i q2_2_1 = _mm_set_epi32(iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]]); + + __m128i aux128_0 = _mm_set1_epi32(signs[0] | (signs[1] << 16)); + __m128i aux128_1 = aux128_0; + aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); + aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); + const __m128i s2_1_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); + const __m128i s2_1_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); + const __m128i q8s_1_0 = _mm_sub_epi8(_mm_xor_si128(s2_1_0, q8_1_0), s2_1_0); + const __m128i q8s_1_1 = _mm_sub_epi8(_mm_xor_si128(s2_1_1, q8_1_1), s2_1_1); + + aux128_0 = _mm_set1_epi32(signs[2] | (signs[3] << 16)); + aux128_1 = aux128_0; + aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); + aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); + const __m128i s2_2_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); + const __m128i s2_2_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); + const __m128i q8s_2_0 = _mm_sub_epi8(_mm_xor_si128(s2_2_0, q8_2_0), s2_2_0); + const __m128i q8s_2_1 = _mm_sub_epi8(_mm_xor_si128(s2_2_1, q8_2_1), s2_2_1); + + signs += 4; + + const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); + const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); + const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); + const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); + const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; + const uint16_t ls2 = x[i].scales[ib32/2] >> 4; + const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); + const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); + const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); + const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); + sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); + sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); + sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); + sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); + } + + accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); + + } + + *s = hsum_float_8(accumf); + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_iq3_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq1_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __AVX2__ + + __m256 accum = _mm256_setzero_ps(); + float accum1 = 0; + for (int i = 0; i < nb; ++i) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint16_t * qh = x[i].qh; + + __m256i sumi = _mm256_setzero_si256(); + int sumi1 = 0; + for (int ib = 0; ib < QK_K/32; ib += 2) { +#ifdef __BMI2__ + const uint64_t packed_idx1 = _pdep_u64(*(const uint32_t *)qs, 0x00ff00ff00ff00ffULL) | _pdep_u64(qh[ib], 0x700070007000700ULL); + const uint64_t packed_idx2 = _pdep_u64(*(const uint32_t *)(qs + 4), 0x00ff00ff00ff00ffULL) | _pdep_u64(qh[ib + 1], 0x700070007000700ULL); + const uint16_t *idx1 = (const uint16_t *)(&packed_idx1); + const uint16_t *idx2 = (const uint16_t *)(&packed_idx2); + const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[idx1[3]], iq1s_grid[idx1[2]], iq1s_grid[idx1[1]], iq1s_grid[idx1[0]]); + const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[idx2[3]], iq1s_grid[idx2[2]], iq1s_grid[idx2[1]], iq1s_grid[idx2[0]]); +#else + const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)], + iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)]); + const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)], + iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)]); +#endif + qs += 8; + const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + + const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1); + const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2); + const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; + const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; + const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(ls1)); + const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(ls2)); + + sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p1, p2)); + sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1 + + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; + } + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + accum = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi), accum); + accum1 += d * sumi1; + + } + + *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; + +#elif defined __AVX__ + __m256 accum = _mm256_setzero_ps(); + float accum1 = 0; + for (int i = 0; i < nb; ++i) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint16_t * qh = x[i].qh; + + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + int sumi1 = 0; + for (int ib = 0; ib < QK_K/32; ib += 2) { + const __m128i q1b_1_0 = _mm_set_epi64x(iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)]); + const __m128i q1b_1_1 = _mm_set_epi64x(iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)]); + const __m128i q1b_2_0 = _mm_set_epi64x(iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)]); + const __m128i q1b_2_1 = _mm_set_epi64x(iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)]); + qs += 8; + const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + + const __m128i dot1_0 = mul_add_epi8_sse(q1b_1_0, q8b_1_0); + const __m128i dot1_1 = mul_add_epi8_sse(q1b_1_1, q8b_1_1); + const __m128i dot2_0 = mul_add_epi8_sse(q1b_2_0, q8b_2_0); + const __m128i dot2_1 = mul_add_epi8_sse(q1b_2_1, q8b_2_1); + const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; + const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; + const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(ls1)); + const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(ls1)); + const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(ls2)); + const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(ls2)); + + sumi1_0 = _mm_add_epi32(sumi1_0, _mm_add_epi32(p1_0, p2_0)); + sumi1_1 = _mm_add_epi32(sumi1_1, _mm_add_epi32(p1_1, p2_1)); + sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1 + + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; + } + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + accum = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi1_1, sumi1_0))), accum); + accum1 += d * sumi1; + + } + + *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_iq1_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_iq1_m_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq1_m * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + iq1m_scale_t scale; + +#if defined __AVX2__ + + const __m256i mask = _mm256_set1_epi16(0x7); + const __m256i mone = _mm256_set1_epi16(1); + const __m256i mone8 = _mm256_set1_epi8(1); + const __m256i mtwo8 = _mm256_set1_epi8(2); + // VPSHUFB cannot cross 128-bit lanes so odd shifts go to upper half. + const __m256i scales_shift = _mm256_set_epi64x(9, 3, 6, 0); + + __m256 accum1 = _mm256_setzero_ps(); + __m256 accum2 = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint16_t * sc = (const uint16_t *)x[i].scales; + + scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + // Extract 3-bit scales (16 values) + __m256i scales = _mm256_set1_epi64x(*(const uint64_t*)sc); + scales = _mm256_srlv_epi64(scales, scales_shift); + scales = _mm256_add_epi16(_mm256_slli_epi16(_mm256_and_si256(scales, mask), 1), mone); + + // Indices to repeat each scale 8 times. + __m256i scales_idx1 = _mm256_set1_epi16(0x0100); + __m256i scales_idx2 = _mm256_add_epi8(scales_idx1, _mm256_set1_epi8(8)); + + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib = 0; ib < QK_K/32; ib += 2) { +#ifdef __BMI2__ + const uint64_t packed_idx1 = _pdep_u64(*(const uint32_t *)qs, 0x00ff00ff00ff00ffULL) + | _pdep_u64(*(const uint16_t*)(qh) & 0x7777, 0xf000f000f000f00ULL); + const uint64_t packed_idx2 = _pdep_u64(*(const uint32_t *)(qs + 4), 0x00ff00ff00ff00ffULL) + | _pdep_u64(*(const uint16_t*)(qh + 2) & 0x7777, 0xf000f000f000f00ULL); + const uint16_t *idx1 = (const uint16_t *)(&packed_idx1); + const uint16_t *idx2 = (const uint16_t *)(&packed_idx2); + const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[idx1[3]], iq1s_grid[idx1[2]], iq1s_grid[idx1[1]], iq1s_grid[idx1[0]]); + const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[idx2[3]], iq1s_grid[idx2[2]], iq1s_grid[idx2[1]], iq1s_grid[idx2[0]]); + + // Convert signs to bytes 0x81 (negative) or 0x01 (positive) + const uint64_t delta_sign = _pdep_u64(*(const uint32_t*)(qh) & 0x88888888, 0xf0f0f0f0f0f0f0f0ULL); + const __m256i delta1 = _mm256_or_si256(mone8, _mm256_cvtepi8_epi64(_mm_set1_epi32(delta_sign))); + const __m256i delta2 = _mm256_or_si256(mone8, _mm256_cvtepi8_epi64(_mm_set1_epi32(delta_sign >> 32))); +#else + const __m256i q1b_1 = _mm256_set_epi64x( + iq1s_grid[qs[3] | (((uint16_t)qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | (((uint16_t)qh[1] << 8) & 0x700)], + iq1s_grid[qs[1] | (((uint16_t)qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | (((uint16_t)qh[0] << 8) & 0x700)] + ); + const __m256i q1b_2 = _mm256_set_epi64x( + iq1s_grid[qs[7] | (((uint16_t)qh[3] << 4) & 0x700)], iq1s_grid[qs[6] | (((uint16_t)qh[3] << 8) & 0x700)], + iq1s_grid[qs[5] | (((uint16_t)qh[2] << 4) & 0x700)], iq1s_grid[qs[4] | (((uint16_t)qh[2] << 8) & 0x700)] + ); + + const __m256i delta1 = _mm256_set_epi64x(qh[1] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[1] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101, + qh[0] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[0] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); + const __m256i delta2 = _mm256_set_epi64x(qh[3] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[3] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101, + qh[2] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[2] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); +#endif + const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; + + const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1); + const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2); + const __m256i dot3 = _mm256_maddubs_epi16(mone8, _mm256_sign_epi8(q8b_1, delta1)); + const __m256i dot4 = _mm256_maddubs_epi16(mone8, _mm256_sign_epi8(q8b_2, delta2)); + + __m256i scale1 = _mm256_shuffle_epi8(scales, scales_idx1); + __m256i scale2 = _mm256_shuffle_epi8(scales, scales_idx2); + + scales_idx1 = _mm256_add_epi8(scales_idx1, mtwo8); + scales_idx2 = _mm256_add_epi8(scales_idx2, mtwo8); + + const __m256i p1 = _mm256_madd_epi16(dot1, scale1); + const __m256i p2 = _mm256_madd_epi16(dot2, scale2); + const __m256i p3 = _mm256_madd_epi16(dot3, scale1); + const __m256i p4 = _mm256_madd_epi16(dot4, scale2); + + sumi1 = _mm256_add_epi32(sumi1, _mm256_add_epi32(p1, p2)); + sumi2 = _mm256_add_epi32(sumi2, _mm256_add_epi32(p3, p4)); + + qs += 8; qh += 4; + } + + const __m256 d = _mm256_set1_ps(y[i].d * GGML_CPU_FP16_TO_FP32(scale.f16)); + + accum1 = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi1), accum1); + accum2 = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi2), accum2); + } + + *s = hsum_float_8(accum1) + IQ1M_DELTA * hsum_float_8(accum2); + +#elif defined __AVX__ + const __m128i mask = _mm_set1_epi16(0x7); + const __m128i mone = _mm_set1_epi16(1); + + __m256 accum1 = _mm256_setzero_ps(); + __m256 accum2 = _mm256_setzero_ps(); + for (int i = 0; i < nb; ++i) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint16_t * sc = (const uint16_t *)x[i].scales; + + scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib = 0; ib < QK_K/32; ib += 2) { + const __m128i q1b_1_0 = _mm_set_epi64x( + iq1s_grid[qs[1] | (((uint16_t)qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | (((uint16_t)qh[0] << 8) & 0x700)]); + const __m128i q1b_1_1 = _mm_set_epi64x( + iq1s_grid[qs[3] | (((uint16_t)qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | (((uint16_t)qh[1] << 8) & 0x700)]); + const __m128i q1b_2_0 = _mm_set_epi64x( + iq1s_grid[qs[5] | (((uint16_t)qh[2] << 4) & 0x700)], iq1s_grid[qs[4] | (((uint16_t)qh[2] << 8) & 0x700)]); + const __m128i q1b_2_1 = _mm_set_epi64x( + iq1s_grid[qs[7] | (((uint16_t)qh[3] << 4) & 0x700)], iq1s_grid[qs[6] | (((uint16_t)qh[3] << 8) & 0x700)]); + const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + + const __m128i dot1_0 = mul_add_epi8_sse(q1b_1_0, q8b_1_0); + const __m128i dot1_1 = mul_add_epi8_sse(q1b_1_1, q8b_1_1); + const __m128i dot2_0 = mul_add_epi8_sse(q1b_2_0, q8b_2_0); + const __m128i dot2_1 = mul_add_epi8_sse(q1b_2_1, q8b_2_1); + + const __m128i delta1_0 = _mm_set_epi64x(qh[0] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[0] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); + const __m128i delta1_1 = _mm_set_epi64x(qh[1] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[1] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); + const __m128i delta2_0 = _mm_set_epi64x(qh[2] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[2] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); + const __m128i delta2_1 = _mm_set_epi64x(qh[3] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, + qh[3] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); + + const __m128i dot3_0 = mul_add_epi8_sse(delta1_0, q8b_1_0); + const __m128i dot3_1 = mul_add_epi8_sse(delta1_1, q8b_1_1); + const __m128i dot4_0 = mul_add_epi8_sse(delta2_0, q8b_2_0); + const __m128i dot4_1 = mul_add_epi8_sse(delta2_1, q8b_2_1); + + __m128i scale1_0 = _mm_set1_epi16(sc[ib/2] >> 0); + __m128i scale1_1 = _mm_set1_epi16(sc[ib/2] >> 3); + __m128i scale2_0 = _mm_set1_epi16(sc[ib/2] >> 6); + __m128i scale2_1 = _mm_set1_epi16(sc[ib/2] >> 9); + + scale1_0 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale1_0, mask), 1), mone); + scale1_1 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale1_1, mask), 1), mone); + scale2_0 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale2_0, mask), 1), mone); + scale2_1 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale2_1, mask), 1), mone); + const __m128i p1_0 = _mm_madd_epi16(dot1_0, scale1_0); + const __m128i p1_1 = _mm_madd_epi16(dot1_1, scale1_1); + const __m128i p2_0 = _mm_madd_epi16(dot2_0, scale2_0); + const __m128i p2_1 = _mm_madd_epi16(dot2_1, scale2_1); + const __m128i p3_0 = _mm_madd_epi16(dot3_0, scale1_0); + const __m128i p3_1 = _mm_madd_epi16(dot3_1, scale1_1); + const __m128i p4_0 = _mm_madd_epi16(dot4_0, scale2_0); + const __m128i p4_1 = _mm_madd_epi16(dot4_1, scale2_1); + + sumi1_0 = _mm_add_epi32(sumi1_0, _mm_add_epi32(p1_0, p2_0)); + sumi1_1 = _mm_add_epi32(sumi1_1, _mm_add_epi32(p1_1, p2_1)); + sumi2_0 = _mm_add_epi32(sumi2_0, _mm_add_epi32(p3_0, p4_0)); + sumi2_1 = _mm_add_epi32(sumi2_1, _mm_add_epi32(p3_1, p4_1)); + + qs += 8; qh += 4; + } + + const __m256 d = _mm256_set1_ps(y[i].d * GGML_CPU_FP16_TO_FP32(scale.f16)); + + accum1 = _mm256_add_ps(_mm256_mul_ps(d, _mm256_cvtepi32_ps(MM256_SET_M128I(sumi1_1, sumi1_0))), accum1); + accum2 = _mm256_add_ps(_mm256_mul_ps(d, _mm256_cvtepi32_ps(MM256_SET_M128I(sumi2_1, sumi2_0))), accum2); + } + + *s = hsum_float_8(accum1) + IQ1M_DELTA * hsum_float_8(accum2); + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + UNUSED(scale); + ggml_vec_dot_iq1_m_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + +void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK4_NL == 0); + static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); + + const block_iq4_nl * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + const int nb = n / QK4_NL; + + int ib = 0; + float sumf = 0; + +#if defined __AVX2__ + + const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); + const __m128i m4b = _mm_set1_epi8(0x0f); + const __m256i mone = _mm256_set1_epi16(1); + + __m256 accum1 = _mm256_setzero_ps(); + __m256 accum2 = _mm256_setzero_ps(); + for (; ib + 1 < nb; ib += 2) { + const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)x[ib + 0].qs); + const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)x[ib + 1].qs); + const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)y[ib + 0].qs); + const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)y[ib + 1].qs); + const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)), + _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b))); + const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)), + _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b))); + const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); + const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); + const __m256i p_1 = _mm256_madd_epi16(p16_1, mone); + const __m256i p_2 = _mm256_madd_epi16(p16_2, mone); + accum1 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib + 0].d)*GGML_CPU_FP16_TO_FP32(x[ib + 0].d)), + _mm256_cvtepi32_ps(p_1), accum1); + accum2 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(y[ib + 1].d)*GGML_CPU_FP16_TO_FP32(x[ib + 1].d)), + _mm256_cvtepi32_ps(p_2), accum2); + } + + sumf = hsum_float_8(_mm256_add_ps(accum1, accum2)); + +#elif defined __AVX__ + const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); + const __m128i m4b = _mm_set1_epi8(0x0f); + + __m256 accum = _mm256_setzero_ps(); + for (; ib + 1 < nb; ib += 2) { + const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)x[ib + 0].qs); + const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); + const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs); + const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs + 1); + const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); + const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); + + const __m128i q4b_1_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)); + const __m128i q4b_1_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)); + const __m128i q4b_2_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)); + const __m128i q4b_2_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)); + + const __m256 p = mul_sum_i8_quad_float(q4b_1_0, q4b_1_1, q4b_2_0, q4b_2_1, q8b_1_0, q8b_1_1, q8b_2_0, q8b_2_1); + const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); + accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); + } + + sumf = hsum_float_8(accum); + +#endif + for (; ib < nb; ++ib) { + const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < QK4_NL/2; ++j) { + sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; + sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; + } + sumf += d * (sumi1 + sumi2); + } + *s = sumf; +} + +void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK_K == 0); + + const block_iq4_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + +#if defined __AVX2__ + + const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); + const __m128i m4b = _mm_set1_epi8(0x0f); + + __m256 accum = _mm256_setzero_ps(); + for (int ibl = 0; ibl < nb; ++ibl) { + const uint8_t * qs = x[ibl].qs; + const int8_t * q8 = y[ibl].qs; + uint16_t sh = x[ibl].scales_h; + __m256i sumi1 = _mm256_setzero_si256(); + __m256i sumi2 = _mm256_setzero_si256(); + for (int ib = 0; ib < QK_K/32; ib += 2) { + const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)qs); qs += 16; + const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)qs); qs += 16; + const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; + const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)), + _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b))); + const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)), + _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b))); + const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); + const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); + const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32; + const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32; + sh >>= 4; + const __m256i p_1 = _mm256_madd_epi16(p16_1, _mm256_set1_epi16(ls1)); + const __m256i p_2 = _mm256_madd_epi16(p16_2, _mm256_set1_epi16(ls2)); + sumi1 = _mm256_add_epi32(p_1, sumi1); + sumi2 = _mm256_add_epi32(p_2, sumi2); + } + accum = _mm256_fmadd_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ibl].d)*y[ibl].d), + _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accum); + } + + *s = hsum_float_8(accum); + +#elif defined __AVX__ + const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); + const __m128i m4b = _mm_set1_epi8(0x0f); + + __m256 accum = _mm256_setzero_ps(); + for (int ibl = 0; ibl < nb; ++ibl) { + const uint8_t * qs = x[ibl].qs; + const int8_t * q8 = y[ibl].qs; + uint16_t sh = x[ibl].scales_h; + __m128i sumi1_0 = _mm_setzero_si128(); + __m128i sumi1_1 = _mm_setzero_si128(); + __m128i sumi2_0 = _mm_setzero_si128(); + __m128i sumi2_1 = _mm_setzero_si128(); + for (int ib = 0; ib < QK_K/32; ib += 2) { + const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)qs); qs += 16; + const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)qs); qs += 16; + const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; + const __m128i q4b_1_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)); + const __m128i q4b_1_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)); + const __m128i q4b_2_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)); + const __m128i q4b_2_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)); + const __m128i p16_1_0 = mul_add_epi8_sse(q4b_1_0, q8b_1_0); + const __m128i p16_1_1 = mul_add_epi8_sse(q4b_1_1, q8b_1_1); + const __m128i p16_2_0 = mul_add_epi8_sse(q4b_2_0, q8b_2_0); + const __m128i p16_2_1 = mul_add_epi8_sse(q4b_2_1, q8b_2_1); + const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32; + const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32; + sh >>= 4; + const __m128i p_1_0 = _mm_madd_epi16(p16_1_0, _mm_set1_epi16(ls1)); + const __m128i p_1_1 = _mm_madd_epi16(p16_1_1, _mm_set1_epi16(ls1)); + const __m128i p_2_0 = _mm_madd_epi16(p16_2_0, _mm_set1_epi16(ls2)); + const __m128i p_2_1 = _mm_madd_epi16(p16_2_1, _mm_set1_epi16(ls2)); + sumi1_0 = _mm_add_epi32(p_1_0, sumi1_0); + sumi1_1 = _mm_add_epi32(p_1_1, sumi1_1); + sumi2_0 = _mm_add_epi32(p_2_0, sumi2_0); + sumi2_1 = _mm_add_epi32(p_2_1, sumi2_1); + } + __m128i sumi12_0 = _mm_add_epi32(sumi1_0, sumi2_0); + __m128i sumi12_1 = _mm_add_epi32(sumi1_1, sumi2_1); + accum = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(GGML_CPU_FP16_TO_FP32(x[ibl].d)*y[ibl].d), + _mm256_cvtepi32_ps(MM256_SET_M128I(sumi12_1, sumi12_0))), accum); + } + + *s = hsum_float_8(accum); + +#else + UNUSED(x); + UNUSED(y); + UNUSED(nb); + ggml_vec_dot_iq4_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc); +#endif +} + diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/arch/x86/repack.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/arch/x86/repack.cpp new file mode 100644 index 000000000..37933a4b2 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/arch/x86/repack.cpp @@ -0,0 +1,6248 @@ +#define GGML_COMMON_IMPL_CPP +#define GGML_COMMON_DECL_CPP +#include "ggml-common.h" +#include "ggml-backend-impl.h" + +#include "ggml-impl.h" +#include "ggml-cpu.h" +#include "ggml-cpu-impl.h" +#include "simd-mappings.h" +#include "traits.h" + +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GGML_CPU_CLANG_WORKAROUND +#include "../../repack.h" + +#if defined(__GNUC__) +#pragma GCC diagnostic ignored "-Woverlength-strings" +#endif + +#define UNUSED GGML_UNUSED + +#if defined(__AVX__) +#if defined(__F16C__) +#if defined(__AVX512F__) +#define GGML_F32Cx8x2_LOAD(x, y) _mm512_cvtph_ps(_mm256_set_m128i(_mm_loadu_si128((const __m128i *)(y)), _mm_loadu_si128((const __m128i *)(x)))) +#define GGML_F32Cx16_REPEAT_LOAD(x) _mm512_cvtph_ps(_mm256_set_m128i(x, x)) +#endif +// the _mm256_cvt intrinsics require F16C +#define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)(x))) +#define GGML_F32Cx8_REPEAT_LOAD(x, loadMask) _mm256_cvtph_ps(_mm_shuffle_epi32(_mm_maskload_epi32((int const*)(x), loadMask), 68)) +#define GGML_F32Cx8_REARRANGE_LOAD(x, arrangeMask) _mm256_cvtph_ps(_mm_shuffle_epi8(_mm_loadu_si128((const __m128i *) x), arrangeMask)) +#else +#if defined(__AVX512F__) +static inline __m512 __avx512_f32cx8x2_load(ggml_fp16_t *x, ggml_fp16_t *y) { + float tmp[16]; + + for (int i = 0; i < 8; i++) { + tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); + } + + for (int i = 0; i < 8; i++) { + tmp[i + 8] = GGML_CPU_FP16_TO_FP32(y[i]); + } + + return _mm512_loadu_ps(tmp); +} +static inline __m512 __avx512_repeat_f32cx16_load(__m128i x) { + float tmp[16]; + uint16_t tmphalf[8]; + _mm_storeu_si128((__m128i*)tmphalf, x); + + for (int i = 0; i < 4; i++) { + tmp[i] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); + tmp[i + 4] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); + tmp[i + 8] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); + tmp[i + 12] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); + } + + return _mm512_loadu_ps(tmp); +} +#endif +static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) { + float tmp[8]; + + for (int i = 0; i < 8; i++) { + tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); + } + + return _mm256_loadu_ps(tmp); +} +static inline __m256 __avx_repeat_f32cx8_load(ggml_fp16_t *x) { + float tmp[8]; + + for (int i = 0; i < 4; i++) { + tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); + tmp[i + 4] = GGML_CPU_FP16_TO_FP32(x[i]); + } + + return _mm256_loadu_ps(tmp); +} +static inline __m256 __avx_rearranged_f32cx8_load(ggml_fp16_t *x, __m128i arrangeMask) { + uint16_t tmphalf[8]; + float tmp[8]; + + _mm_storeu_si128((__m128i*)tmphalf, _mm_shuffle_epi8(_mm_loadu_si128((const __m128i *) x), arrangeMask)); + for (int i = 0; i < 8; i++) { + tmp[i] = GGML_CPU_FP16_TO_FP32(tmphalf[i]); + } + + return _mm256_loadu_ps(tmp); +} + +#define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x) +#define GGML_F32Cx8_REPEAT_LOAD(x, loadMask) __avx_repeat_f32cx8_load(x) +#define GGML_F32Cx8_REARRANGE_LOAD(x, arrangeMask) __avx_rearranged_f32cx8_load(x, arrangeMask) +#if defined(__AVX512F__) +#define GGML_F32Cx8x2_LOAD(x, y) __avx512_f32cx8x2_load(x, y) +#define GGML_F32Cx16_REPEAT_LOAD(x) __avx512_repeat_f32cx16_load(x) +#endif +#endif +#endif + +static inline int nearest_int(float fval) { + assert(fabsf(fval) <= 4194303.f); + float val = fval + 12582912.f; + int i; memcpy(&i, &val, sizeof(int)); + return (i & 0x007fffff) - 0x00400000; +} + +#if defined(__AVX2__) || defined(__AVX512F__) +#if defined(__AVX512F__) +// add int16_t pairwise and return as 512 bit int vector, then add the accumulator +static inline __m512i sum_i16_pairs_acc_int32x16(const __m512i acc, const __m512i x) { + const __m512i ones = _mm512_set1_epi16(1); + return _mm512_add_epi32(acc, _mm512_madd_epi16(ones, x)); +} + +static inline __m512i mul_sum_us8_pairs_acc_int32x16(const __m512i acc, const __m512i ax, const __m512i sy) { +#if defined(__AVX512VNNI__) + return _mm512_dpbusd_epi32(acc, ax, sy); +#else + // Perform multiplication and create 16-bit values + const __m512i dot = _mm512_maddubs_epi16(ax, sy); + return sum_i16_pairs_acc_int32x16(acc, dot); +#endif +} + +// multiply int8_t, add results pairwise twice and return as 512 bit int vector,then add the accumulator +static inline __m512i mul_sum_i8_pairs_acc_int32x16(const __m512i acc, const __m512i x, const __m512i y) { + const __m512i zero = _mm512_setzero_si512(); + // Get absolute values of x vectors + const __m512i ax = _mm512_abs_epi8(x); + // Sign the values of the y vectors + __mmask64 blt0 = _mm512_movepi8_mask(x); + const __m512i sy = _mm512_mask_sub_epi8(y, blt0, zero, y); + return mul_sum_us8_pairs_acc_int32x16(acc, ax, sy); +} +#endif + +// add int16_t pairwise and return as 256 bit int vector, then add the accumulator +static inline __m256i sum_i16_pairs_acc_int32x8(const __m256i acc, const __m256i x) { + const __m256i ones = _mm256_set1_epi16(1); + return _mm256_add_epi32(acc, _mm256_madd_epi16(ones, x)); +} + +static inline __m256i mul_sum_us8_pairs_acc_int32x8(const __m256i acc, const __m256i ax, const __m256i sy) { +#if defined(__AVX512VNNI__) && defined(__AVX512VL__) + return _mm256_dpbusd_epi32(acc, ax, sy); +#elif defined(__AVXVNNI__) + return _mm256_dpbusd_avx_epi32(acc, ax, sy); +#else + // Perform multiplication and create 16-bit values + const __m256i dot = _mm256_maddubs_epi16(ax, sy); + return sum_i16_pairs_acc_int32x8(acc, dot); +#endif +} + +// Integer variant of the function defined in ggml-quants.c +// multiply int8_t, add results pairwise twice and return as 256 bit int vector, then add the accumulator +static inline __m256i mul_sum_i8_pairs_acc_int32x8(const __m256i acc, const __m256i x, const __m256i y) { +#if defined(__AVXVNNIINT8__) + return _mm256_dpbssd_epi32(acc, x, y); +#else + // Get absolute values of x vectors + const __m256i ax = _mm256_sign_epi8(x, x); + // Sign the values of the y vectors + const __m256i sy = _mm256_sign_epi8(y, x); + return mul_sum_us8_pairs_acc_int32x8(acc, ax, sy); +#endif +} +#endif + +void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; + +#if defined(__AVX2__) || defined(__AVX__) + float id[4]; + __m256 srcv[4][4]; + __m256 idvec[4]; + + for (int i = 0; i < nb; i++) { + for (int row_iter = 0; row_iter < 4; row_iter++) { + // Load elements into 4 AVX vectors + __m256 v0 = _mm256_loadu_ps( x + row_iter * k + i * 32 ); + __m256 v1 = _mm256_loadu_ps( x + row_iter * k + i * 32 + 8 ); + __m256 v2 = _mm256_loadu_ps( x + row_iter * k + i * 32 + 16 ); + __m256 v3 = _mm256_loadu_ps( x + row_iter * k + i * 32 + 24 ); + + // Compute max(abs(e)) for the block + const __m256 signBit = _mm256_set1_ps( -0.0f ); + __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); + maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); + + __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); + max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); + max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); + const float maxScalar = _mm_cvtss_f32( max4 ); + + // Divided by 127.f to mirror results in quantize_row_q8_0 + const float d = maxScalar / 127.f; + id[row_iter] = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; //d ? 1.0f / d : 0.0f; + + // Store the scale for the individual block + y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); + + // Store the values in blocks of eight values - Aim is to use these later for block interleaving + srcv[row_iter][0] = v0; + srcv[row_iter][1] = v1; + srcv[row_iter][2] = v2; + srcv[row_iter][3] = v3; + idvec[row_iter] = _mm256_set1_ps(id[row_iter]); + } + + // The loop iterates four times - The aim is to get 4 corresponding chunks of eight bytes from the original weight blocks that are interleaved + for (int j = 0; j < 4; j++) { + // Apply the multiplier + __m256 v0 = _mm256_mul_ps(srcv[0][j], idvec[0]); + __m256 v1 = _mm256_mul_ps(srcv[1][j], idvec[1]); + __m256 v2 = _mm256_mul_ps(srcv[2][j], idvec[2]); + __m256 v3 = _mm256_mul_ps(srcv[3][j], idvec[3]); + + // Round to nearest integer + v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); + v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); + v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); + v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); + + // Convert floats to integers + __m256i i0 = _mm256_cvtps_epi32( v0 ); + __m256i i1 = _mm256_cvtps_epi32( v1 ); + __m256i i2 = _mm256_cvtps_epi32( v2 ); + __m256i i3 = _mm256_cvtps_epi32( v3 ); + +#if defined(__AVX2__) + // Convert int32 to int16 + i0 = _mm256_packs_epi32( i0, i1 ); + i2 = _mm256_packs_epi32( i2, i3 ); + // Convert int16 to int8 + i0 = _mm256_packs_epi16( i0, i2 ); + + // Permute and store the quantized weights in the required order after the pack instruction + const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); + i0 = _mm256_permutevar8x32_epi32( i0, perm ); + + _mm256_storeu_si256((__m256i *)(y[i].qs + 32 * j), i0); +#else + // Since we don't have in AVX some necessary functions, + // we split the registers in half and call AVX2 analogs from SSE + __m128i ni0 = _mm256_castsi256_si128( i0 ); + __m128i ni1 = _mm256_extractf128_si256( i0, 1); + __m128i ni2 = _mm256_castsi256_si128( i1 ); + __m128i ni3 = _mm256_extractf128_si256( i1, 1); + __m128i ni4 = _mm256_castsi256_si128( i2 ); + __m128i ni5 = _mm256_extractf128_si256( i2, 1); + __m128i ni6 = _mm256_castsi256_si128( i3 ); + __m128i ni7 = _mm256_extractf128_si256( i3, 1); + + // Convert int32 to int16 + ni0 = _mm_packs_epi32( ni0, ni1 ); + ni2 = _mm_packs_epi32( ni2, ni3 ); + ni4 = _mm_packs_epi32( ni4, ni5 ); + ni6 = _mm_packs_epi32( ni6, ni7 ); + // Convert int16 to int8 + ni0 = _mm_packs_epi16( ni0, ni2 ); + ni4 = _mm_packs_epi16( ni4, ni6 ); + _mm_storeu_si128((__m128i *)(y[i].qs + 32 * j), ni0); + _mm_storeu_si128((__m128i *)(y[i].qs + 32 * j + 16), ni4); +#endif + } + } + +#else + UNUSED(nb); + UNUSED(y); + ggml_quantize_mat_q8_0_4x8_generic(x, vy, k); +#endif +} + +void ggml_quantize_mat_q8_K_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK_K == 256); + assert(k % QK_K == 0); + const int nb = k / QK_K; + + block_q8_Kx4 * GGML_RESTRICT y = (block_q8_Kx4 *) vy; + +#if defined(__AVX2__) + float iscale[4]; + __m256 srcv[4][32]; + __m256 iscale_vec[4]; + + for (int i = 0; i < nb; i++) { + for (int row_iter = 0; row_iter < 4; row_iter++) { + // Load elements into 4 AVX vectors + __m256 v0 = _mm256_loadu_ps( x + row_iter * k + i * 256 ); + __m256 v1 = _mm256_loadu_ps( x + row_iter * k + i * 256 + 8 ); + __m256 v2 = _mm256_loadu_ps( x + row_iter * k + i * 256 + 16 ); + __m256 v3 = _mm256_loadu_ps( x + row_iter * k + i * 256 + 24 ); + + // Compute max(abs(e)) for the block + const __m256 signBit = _mm256_set1_ps( -0.0f ); + __m256 abs0 = _mm256_andnot_ps( signBit, v0 ); + __m256 abs1 = _mm256_andnot_ps( signBit, v1 ); + __m256 abs2 = _mm256_andnot_ps( signBit, v2 ); + __m256 abs3 = _mm256_andnot_ps( signBit, v3 ); + + __m256 maxAbs = _mm256_max_ps( abs0, abs1 ); + maxAbs = _mm256_max_ps( maxAbs, abs2 ); + maxAbs = _mm256_max_ps( maxAbs, abs3 ); + + __m256 mask0 = _mm256_cmp_ps( maxAbs, v0, _CMP_EQ_OQ ); + __m256 mask1 = _mm256_cmp_ps( maxAbs, v1, _CMP_EQ_OQ ); + __m256 mask2 = _mm256_cmp_ps( maxAbs, v2, _CMP_EQ_OQ ); + __m256 mask3 = _mm256_cmp_ps( maxAbs, v3, _CMP_EQ_OQ ); + + __m256 maskAbs = _mm256_or_ps(_mm256_or_ps(mask0, mask1),_mm256_or_ps(mask2, mask3)); + + srcv[row_iter][0] = v0; + srcv[row_iter][1] = v1; + srcv[row_iter][2] = v2; + srcv[row_iter][3] = v3; + + for (int sb = 1; sb < 8; sb++) { + // Temporarily stores absolute quant values + __m256 tempAbs = maxAbs; + + // Load elements into 4 AVX vectors + __m256 v0 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32); + __m256 v1 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32 + 8 ); + __m256 v2 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32 + 16 ); + __m256 v3 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32 + 24 ); + + // Compute max(abs(e)) for the block + __m256 abs0 = _mm256_andnot_ps( signBit, v0 ); + __m256 abs1 = _mm256_andnot_ps( signBit, v1 ); + __m256 abs2 = _mm256_andnot_ps( signBit, v2 ); + __m256 abs3 = _mm256_andnot_ps( signBit, v3 ); + + maxAbs = _mm256_max_ps( maxAbs, abs0 ); + maxAbs = _mm256_max_ps( maxAbs, abs1 ); + maxAbs = _mm256_max_ps( maxAbs, abs2 ); + maxAbs = _mm256_max_ps( maxAbs, abs3 ); + + __m256 mask_prev = _mm256_cmp_ps( tempAbs, maxAbs, _CMP_EQ_OQ ); + maskAbs = _mm256_and_ps( maskAbs, mask_prev ); + + mask0 = _mm256_cmp_ps( maxAbs, v0, _CMP_EQ_OQ ); + mask1 = _mm256_cmp_ps( maxAbs, v1, _CMP_EQ_OQ ); + mask2 = _mm256_cmp_ps( maxAbs, v2, _CMP_EQ_OQ ); + mask3 = _mm256_cmp_ps( maxAbs, v3, _CMP_EQ_OQ ); + + __m256 mask_curr = _mm256_or_ps(_mm256_or_ps(mask0, mask1),_mm256_or_ps(mask2, mask3)); + maskAbs = _mm256_or_ps(maskAbs, mask_curr); + + srcv[row_iter][sb * 4] = v0; + srcv[row_iter][sb * 4 + 1] = v1; + srcv[row_iter][sb * 4 + 2] = v2; + srcv[row_iter][sb * 4 + 3] = v3; + } + + __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); + max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); + max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); + const float maxScalar = _mm_cvtss_f32( max4 ); + + __m256 maxScalarVec = _mm256_set1_ps(maxScalar); + + __m256 mask_next = _mm256_cmp_ps( maxScalarVec, maxAbs, _CMP_EQ_OQ ); + __m256 finalMask = _mm256_and_ps(maskAbs, mask_next); + + const int mask = _mm256_movemask_ps(finalMask); + iscale[row_iter] = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; + + if(mask) { + iscale[row_iter] = ( maxScalar != 0.0f ) ? -127.f / maxScalar: 0.0f; + } + + y[i].d[row_iter] = maxScalar ? 1/iscale[row_iter] : 0; + iscale_vec[row_iter] = _mm256_set1_ps(iscale[row_iter]); + } + + __m256i quants_interleaved[32]; + for (int j = 0; j < 32; j++) { + // Apply the multiplier + __m256 v0 = _mm256_mul_ps(srcv[0][j], iscale_vec[0]); + __m256 v1 = _mm256_mul_ps(srcv[1][j], iscale_vec[1]); + __m256 v2 = _mm256_mul_ps(srcv[2][j], iscale_vec[2]); + __m256 v3 = _mm256_mul_ps(srcv[3][j], iscale_vec[3]); + + // Round to nearest integer + v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); + v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); + v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); + v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); + + // Convert floats to integers + __m256i i0 = _mm256_cvtps_epi32( v0 ); + __m256i i1 = _mm256_cvtps_epi32( v1 ); + __m256i i2 = _mm256_cvtps_epi32( v2 ); + __m256i i3 = _mm256_cvtps_epi32( v3 ); + + // Convert int32 to int16 + i0 = _mm256_packs_epi32( i0, i1 ); + i2 = _mm256_packs_epi32( i2, i3 ); + // Convert int16 to int8 + i0 = _mm256_packs_epi16( i0, i2 ); + + // Permute and store the quantized weights in the required order after the pack instruction + const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); + i0 = _mm256_permutevar8x32_epi32( i0, perm ); + + _mm256_storeu_si256((__m256i *)(y[i].qs + 32 * j), i0); + quants_interleaved[j] = i0; + } + + // Masks to shuffle the quants of corresonding sub blocks for rearraning quants for vectorized bsums computation + __m256i shuffle_mask_sb2 = _mm256_castsi128_si256(_mm_setr_epi8(0, 1, 0, 1, 4, 5, 6, 7, 8, 9, 8, 9, 12, 13, 14, 15)); + shuffle_mask_sb2 = _mm256_permute2f128_si256(shuffle_mask_sb2, shuffle_mask_sb2, 0); + __m256i shuffle_mask_sb3 = _mm256_castsi128_si256(_mm_setr_epi8(0, 1, 2, 3, 0, 1, 6, 7, 8, 9, 10, 11, 8, 9, 14, 15)); + shuffle_mask_sb3 = _mm256_permute2f128_si256(shuffle_mask_sb3, shuffle_mask_sb3, 0); + __m256i shuffle_mask_sb4 = _mm256_castsi128_si256(_mm_setr_epi8(0, 1, 2, 3, 4, 5, 0, 1, 8, 9, 10, 11, 12, 13, 8, 9)); + shuffle_mask_sb4 = _mm256_permute2f128_si256(shuffle_mask_sb4, shuffle_mask_sb4, 0); + + for (int k = 0; k < 4; k++) { + // Quants from four different sub blocks are taken + __m256i q0 = quants_interleaved[k * 8 + 0]; + __m256i q1 = quants_interleaved[k * 8 + 1]; + __m256i q2 = quants_interleaved[k * 8 + 2]; + __m256i q3 = quants_interleaved[k * 8 + 3]; + __m256i q4 = quants_interleaved[k * 8 + 4]; + __m256i q5 = quants_interleaved[k * 8 + 5]; + __m256i q6 = quants_interleaved[k * 8 + 6]; + __m256i q7 = quants_interleaved[k * 8 + 7]; + + + // The below code block has the first half of different sub blocks shuffled and blended so as to process 2 values from each sub block at a time + __m256i sb2_h1_shuffled = _mm256_shuffle_epi8(q2, shuffle_mask_sb2); + __m256i sb_h1_interleaved = _mm256_blend_epi16(q0, sb2_h1_shuffled, 34); + __m256i sb3_h1_shuffled = _mm256_shuffle_epi8(q4, shuffle_mask_sb3); + sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb3_h1_shuffled, 68); + __m256i sb4_h1_shuffled = _mm256_shuffle_epi8(q6, shuffle_mask_sb4); + sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb4_h1_shuffled, 136); + + __m256i one = _mm256_set1_epi8(1); + __m256i bsums_r1 = _mm256_maddubs_epi16(one, sb_h1_interleaved); + + for (int l = 0; l < 3; l++) { + // Quants value shifted to process next two values from each sub block + q0 = _mm256_srli_epi64(q0, 16); + q2 = _mm256_srli_epi64(q2, 16); + q4 = _mm256_srli_epi64(q4, 16); + q6 = _mm256_srli_epi64(q6, 16); + + sb2_h1_shuffled = _mm256_shuffle_epi8(q2, shuffle_mask_sb2); + sb_h1_interleaved = _mm256_blend_epi16(q0, sb2_h1_shuffled, 34); + sb3_h1_shuffled = _mm256_shuffle_epi8(q4, shuffle_mask_sb3); + sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb3_h1_shuffled, 68); + sb4_h1_shuffled = _mm256_shuffle_epi8(q6, shuffle_mask_sb4); + sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb4_h1_shuffled, 136); + + bsums_r1 = _mm256_add_epi16(bsums_r1, _mm256_maddubs_epi16(one, sb_h1_interleaved)); + } + + // The below code block has the second half of different sub blocks shuffled and blended so as to process 2 values from each sub block at a time + __m256i sb2_h2_shuffled = _mm256_shuffle_epi8(q3, shuffle_mask_sb2); + __m256i sb_h2_interleaved = _mm256_blend_epi16(q1, sb2_h2_shuffled, 34); + __m256i sb3_h2_shuffled = _mm256_shuffle_epi8(q5, shuffle_mask_sb3); + sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb3_h2_shuffled, 68); + __m256i sb4_h2_shuffled = _mm256_shuffle_epi8(q7, shuffle_mask_sb4); + sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb4_h2_shuffled, 136); + + __m256i bsums_r2 = _mm256_maddubs_epi16(one, sb_h2_interleaved); + + for (int l = 0; l < 3; l++) { + // Quants value shifted to process next two values from each sub block + q1 = _mm256_srli_epi64(q1, 16); + q3 = _mm256_srli_epi64(q3, 16); + q5 = _mm256_srli_epi64(q5, 16); + q7 = _mm256_srli_epi64(q7, 16); + + sb2_h2_shuffled = _mm256_shuffle_epi8(q3, shuffle_mask_sb2); + sb_h2_interleaved = _mm256_blend_epi16(q1, sb2_h2_shuffled, 34); + sb3_h2_shuffled = _mm256_shuffle_epi8(q5, shuffle_mask_sb3); + sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb3_h2_shuffled, 68); + sb4_h2_shuffled = _mm256_shuffle_epi8(q7, shuffle_mask_sb4); + sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb4_h2_shuffled, 136); + + bsums_r2 = _mm256_add_epi16(bsums_r2, _mm256_maddubs_epi16(one, sb_h2_interleaved)); + } + + // Overall bsums in interleaved fashion computed by adding results of both halves + __m256i bsums_r = _mm256_add_epi16(bsums_r1, bsums_r2); + _mm256_storeu_si256((__m256i *)(y[i].bsums + 16 * k), bsums_r); + } + } + +#else + UNUSED(nb); + UNUSED(y); + ggml_quantize_mat_q8_K_4x8_generic(x, vy, k); +#endif +} + +void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if defined(__AVX2__) + // Lookup table to convert signed nibbles to signed bytes + __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); + signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); + __m128i changemask = _mm_set_epi8(15, 14, 7, 6, 13, 12, 5, 4, 11, 10, 3, 2, 9, 8, 1, 0); + __m256i finalpermutemask = _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0); + + // Permute mask used for easier vector processing at later stages + const __m256i m4b = _mm256_set1_epi8(0x0F); + + int64_t b_nb = n / QK4_0; + + const block_q4_0x8 * b_ptr_start = (const block_q4_0x8 *)vx; + const block_q8_0 * a_ptr_start = (const block_q8_0 *)vy; + + // Process Q8_0 blocks one by one + for (int64_t y = 0; y < nr; y++) { + + // Pointers to LHS blocks of block_q8_0 format + const block_q8_0 * a_ptr = a_ptr_start + (y * nb); + + // Take group of eight block_q4_0x8 structures at each pass of the loop and perform dot product operation + for (int64_t x = 0; x < nc / 8; x++) { + + // Pointers to RHS blocks + const block_q4_0x8 * b_ptr = b_ptr_start + (x * b_nb); + + // Master FP accumulator + __m256 acc_row = _mm256_setzero_ps(); + + for (int64_t b = 0; b < nb; b++) { + // Load 8 blocks of Q4_0 interleaved as 8 bytes (B0 - B7) + const __m256i rhs_raw_vec_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs)); + const __m256i rhs_raw_vec_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs) + 1); + const __m256i rhs_raw_vec_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs) + 2); + const __m256i rhs_raw_vec_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs) + 3); + + // 4-bit -> 8-bit - Sign is maintained + const __m256i rhs_vec_0123_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_vec_0123_0, m4b)); // B0(0-7) B1(0-7) B2(0-7) B3(0-7) + const __m256i rhs_vec_4567_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_vec_4567_0, m4b)); // B4(0-7) B5(0-7) B6(0-7) B7(0-7) + const __m256i rhs_vec_0123_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_vec_0123_1, m4b)); // B0(8-15) B1(8-15) B2(8-15) B3(8-15) + const __m256i rhs_vec_4567_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_vec_4567_1, m4b)); // B0(8-15) B1(8-15) B2(8-15) B3(8-15) + + const __m256i rhs_vec_0123_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_0, 4), m4b)); // B0(16-23) B1(16-23) B2(16-23) B3(16-23) + const __m256i rhs_vec_4567_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_0, 4), m4b)); // B4(16-23) B5(16-23) B6(16-23) B7(16-23) + const __m256i rhs_vec_0123_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_1, 4), m4b)); // B0(24-31) B1(24-31) B2(24-31) B3(24-31) + const __m256i rhs_vec_4567_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_1, 4), m4b)); // B4(24-31) B5(24-31) B6(24-31) B7(24-31) + + // Load the scale values for the 8 blocks interleaved in block_q4_0x8 + const __m256 col_scale_f32 = GGML_F32Cx8_REARRANGE_LOAD(b_ptr[b].d, changemask); + + // Load and convert to FP32 scale from block_q8_0 + const __m256 row_scale_f32 = _mm256_set1_ps(GGML_CPU_FP16_TO_FP32(a_ptr[b].d)); + + // Load the block values in block_q8_0 in batches of 16 bytes and replicate the same across 256 bit vector + __m256i lhs_vec_0 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)a_ptr[b].qs)); + __m256i lhs_vec_1 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 16))); + + lhs_vec_0 = _mm256_permute2f128_si256(lhs_vec_0, lhs_vec_0, 0); // A0 (0-15) A0(0-15) + lhs_vec_1 = _mm256_permute2f128_si256(lhs_vec_1, lhs_vec_1, 0); // A0 (16-31) A0(16-31)) + + __m256i iacc = _mm256_setzero_si256(); + + // Dot product done within 32 bit lanes and accumulated in the same vector + // B0(0-3) B4(0-3) B1(0-3) B5(0-3) B2(0-3) B6(0-3) B3(0-3) B7(0-3) with A0(0-3) + // B0(4-7) B4(4-7) B1(4-7) B5(4-7) B2(4-7) B6(4-7) B3(4-7) B7(4-7) with A0(4-7) + // ........................................................................... + // B0(28-31) B4(28-31) B1(28-31) B5(28-31) B2(28-31) B6(28-31) B3(28-31) B7(28-31) with A0(28-31) + + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_0 ,_mm256_shuffle_epi32(rhs_vec_4567_0, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 0)); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_0, 177) ,rhs_vec_4567_0, 170), _mm256_shuffle_epi32(lhs_vec_0, 85)); + + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_1 ,_mm256_shuffle_epi32(rhs_vec_4567_1, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 170)); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_1, 177) ,rhs_vec_4567_1, 170), _mm256_shuffle_epi32(lhs_vec_0, 255)); + + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_2 ,_mm256_shuffle_epi32(rhs_vec_4567_2, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 0)); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_2, 177) ,rhs_vec_4567_2, 170), _mm256_shuffle_epi32(lhs_vec_1, 85)); + + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_3 ,_mm256_shuffle_epi32(rhs_vec_4567_3, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 170)); + iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_3, 177) ,rhs_vec_4567_3, 170), _mm256_shuffle_epi32(lhs_vec_1, 255)); + + // Accumulated values multipled with appropriate scales + acc_row = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc), _mm256_mul_ps(col_scale_f32, row_scale_f32), acc_row); + } + + // Accumulated output values permuted so as to be stored in appropriate order post accumulation + acc_row = _mm256_permutevar8x32_ps(acc_row, finalpermutemask); + _mm256_storeu_ps(s + (y * nr + x * 8), acc_row); + } + } + return; + +#endif + ggml_gemv_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); +} + +void ggml_gemv_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK_K; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if defined(__AVX2__) + // Lookup table to convert signed nibbles to signed bytes + __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); + signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); + // Shuffle masks to rearrange delta and scale values to multiply with appropriate scales + __m128i deltamask = _mm_set_epi8(15, 14, 7, 6, 13, 12, 5, 4, 11, 10, 3, 2, 9, 8, 1, 0); + __m128i scalemask = _mm_set_epi8(7, 7, 3, 3, 6, 6, 2, 2, 5, 5, 1, 1, 4, 4, 0, 0); + // Permute mask used for easier vector processing at later stages + __m256i finalpermutemask = _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0); + + // Mask to extract nibbles from bytes + const __m256i m4b = _mm256_set1_epi8(0x0F); + + int64_t b_nb = n / QK_K; + + const block_q4_Kx8 * b_ptr_start = (const block_q4_Kx8 *)vx; + const block_q8_K * a_ptr_start = (const block_q8_K *)vy; + + // Process Q8_K blocks one by one + for (int64_t y = 0; y < nr; y++) { + + // Pointers to LHS blocks of block_q8_K format + const block_q8_K * a_ptr = a_ptr_start + (y * nb); + + // Take group of eight interleaved block_q4_K structures at each pass of the loop and perform dot product operation + for (int64_t x = 0; x < nc / 8; x++) { + + // Pointers to RHS blocks + const block_q4_Kx8 * b_ptr = b_ptr_start + (x * b_nb); + + // Master FP accumulators + __m256 acc_row = _mm256_setzero_ps(); + __m256 acc_min_rows = _mm256_setzero_ps(); + + for (int64_t b = 0; b < nb; b++) { + + // Load and convert to FP32 scale from block_q8_K + const __m256 row_scale_f32 = _mm256_set1_ps((a_ptr[b].d)); + + // Load the scale values for the 8 blocks interleaved in block_q4_Kx8 + // col_scale_f32 rearranged so as to multiply with appropriate quants + const __m256 col_scale_f32 = GGML_F32Cx8_REARRANGE_LOAD(b_ptr[b].d, deltamask); + const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); + + __m256i iacc_b = _mm256_setzero_si256(); + __m256i iacc_min_b = _mm256_setzero_si256(); + + const __m256i q8sums = _mm256_loadu_si256((const __m256i * )(a_ptr[b].bsums)); + __m256i q8s = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(q8sums), _mm256_extracti128_si256(q8sums, 1))); + q8s = _mm256_permute2f128_si256(q8s, q8s, 0); + + // Processes two sub blocks from each Q4_K in each iteration + for (int sb = 0; sb < QK_K / 64; sb++) { + + // Load the eight block_q4_K for two sub blocks quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 + const __m256i rhs_raw_vec_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + sb * 256)); + const __m256i rhs_raw_vec_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_vec_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_vec_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_vec_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_vec_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_vec_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_vec_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 224 + sb * 256)); + + // 4-bit -> 8-bit + // Values of the first sub block of eight block_q4_K structures for the sb loop + const __m256i rhs_vec_0123_00 = _mm256_and_si256(rhs_raw_vec_0123_0, m4b); + const __m256i rhs_vec_4567_00 = _mm256_and_si256(rhs_raw_vec_4567_0, m4b); + const __m256i rhs_vec_0123_01 = _mm256_and_si256(rhs_raw_vec_0123_1, m4b); + const __m256i rhs_vec_4567_01 = _mm256_and_si256(rhs_raw_vec_4567_1, m4b); + const __m256i rhs_vec_0123_02 = _mm256_and_si256(rhs_raw_vec_0123_2, m4b); + const __m256i rhs_vec_4567_02 = _mm256_and_si256(rhs_raw_vec_4567_2, m4b); + const __m256i rhs_vec_0123_03 = _mm256_and_si256(rhs_raw_vec_0123_3, m4b); + const __m256i rhs_vec_4567_03 = _mm256_and_si256(rhs_raw_vec_4567_3, m4b); + + // Values of the second sub block of eight block_q4_K structures when sb = 1 + const __m256i rhs_vec_0123_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_0, 4), m4b); + const __m256i rhs_vec_4567_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_0, 4), m4b); + const __m256i rhs_vec_0123_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_1, 4), m4b); + const __m256i rhs_vec_4567_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_1, 4), m4b); + const __m256i rhs_vec_0123_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_2, 4), m4b); + const __m256i rhs_vec_4567_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_2, 4), m4b); + const __m256i rhs_vec_0123_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_3, 4), m4b); + const __m256i rhs_vec_4567_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_3, 4), m4b); + + uint32_t utmp_0[4], utmp_1[4]; + + // Scales and Mins of corresponding sub blocks from different Q8_K structures are stored together + // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_0, b_ptr[b].scales + 24 * sb, 12); + utmp_0[3] = ((utmp_0[2] >> 4) & kmask2) | (((utmp_0[1] >> 6) & kmask3) << 4); + const uint32_t uaux_0 = utmp_0[1] & kmask1; + utmp_0[1] = (utmp_0[2] & kmask2) | (((utmp_0[0] >> 6) & kmask3) << 4); + utmp_0[2] = uaux_0; + utmp_0[0] &= kmask1; + + // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_1, b_ptr[b].scales + 12 + sb * 24, 12); + utmp_1[3] = ((utmp_1[2] >> 4) & kmask2) | (((utmp_1[1] >> 6) & kmask3) << 4); + const uint32_t uaux_1 = utmp_1[1] & kmask1; + utmp_1[1] = (utmp_1[2] & kmask2) | (((utmp_1[0] >> 6) & kmask3) << 4); + utmp_1[2] = uaux_1; + utmp_1[0] &= kmask1; + + // Scales of first sub block in the sb loop + const __m128i mins_and_scales_0 = _mm_set_epi32(utmp_0[3], utmp_0[2], utmp_0[1], utmp_0[0]); + __m128i scales_rearrange_0 = _mm_shuffle_epi8(mins_and_scales_0, scalemask); + __m256i scales_0 = _mm256_cvtepu8_epi16(scales_rearrange_0); + + // Scales of second sub block in the sb loop + __m128i mins_and_scales_1 = _mm_set_epi32(utmp_1[3], utmp_1[2], utmp_1[1], utmp_1[0]); + __m128i scales_rearrange_1 = _mm_shuffle_epi8(mins_and_scales_1, scalemask); + __m256i scales_1 = _mm256_cvtepu8_epi16(scales_rearrange_1); + + // Mins of first and second sub block of Q4_K block are arranged side by side + __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(_mm_shuffle_epi32(mins_and_scales_0, 78), _mm_shuffle_epi32(mins_and_scales_1, 78))); + + // Load the two sub block values corresponding to sb in block_q8_K in batches of 16 bytes and replicate the same across 256 bit vector + __m256i lhs_vec_00 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + sb * 64))); + __m256i lhs_vec_01 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 16 + sb * 64))); + __m256i lhs_vec_10 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 32 + sb * 64))); + __m256i lhs_vec_11 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 48 + sb * 64))); + + lhs_vec_00 = _mm256_permute2f128_si256(lhs_vec_00, lhs_vec_00, 0); + lhs_vec_01 = _mm256_permute2f128_si256(lhs_vec_01, lhs_vec_01, 0); + lhs_vec_10 = _mm256_permute2f128_si256(lhs_vec_10, lhs_vec_10, 0); + lhs_vec_11 = _mm256_permute2f128_si256(lhs_vec_11, lhs_vec_11, 0); + + // Dot product done within 32 bit lanes and accumulated in the same vector + // First done for first sub block and thenn for second sub block in each sb + // B0(0-3) B4(0-3) B1(0-3) B5(0-3) B2(0-3) B6(0-3) B3(0-3) B7(0-3) with A0(0-3) + // B0(4-7) B4(4-7) B1(4-7) B5(4-7) B2(4-7) B6(4-7) B3(4-7) B7(4-7) with A0(4-7) + // ........................................................................... + // B0(28-31) B4(28-31) B1(28-31) B5(28-31) B2(28-31) B6(28-31) B3(28-31) B7(28-31) with A0(28-31) + + + __m256i iacc_0 = _mm256_setzero_si256(); + __m256i iacc_1 = _mm256_setzero_si256(); + + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_00 ,_mm256_shuffle_epi32(rhs_vec_4567_00, 177), 170), _mm256_shuffle_epi32(lhs_vec_00, 0))); + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_00, 177) ,rhs_vec_4567_00, 170), _mm256_shuffle_epi32(lhs_vec_00, 85))); + + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_01 ,_mm256_shuffle_epi32(rhs_vec_4567_01, 177), 170), _mm256_shuffle_epi32(lhs_vec_00, 170))); + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_01, 177) ,rhs_vec_4567_01, 170), _mm256_shuffle_epi32(lhs_vec_00, 255))); + + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_02 ,_mm256_shuffle_epi32(rhs_vec_4567_02, 177), 170), _mm256_shuffle_epi32(lhs_vec_01, 0))); + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_02, 177) ,rhs_vec_4567_02, 170), _mm256_shuffle_epi32(lhs_vec_01, 85))); + + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_03 ,_mm256_shuffle_epi32(rhs_vec_4567_03, 177), 170), _mm256_shuffle_epi32(lhs_vec_01, 170))); + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_03, 177) ,rhs_vec_4567_03, 170), _mm256_shuffle_epi32(lhs_vec_01, 255))); + + iacc_0 = _mm256_madd_epi16(iacc_0, scales_0); + + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_10 ,_mm256_shuffle_epi32(rhs_vec_4567_10, 177), 170), _mm256_shuffle_epi32(lhs_vec_10, 0))); + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_10, 177) ,rhs_vec_4567_10, 170), _mm256_shuffle_epi32(lhs_vec_10, 85))); + + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_11 ,_mm256_shuffle_epi32(rhs_vec_4567_11, 177), 170), _mm256_shuffle_epi32(lhs_vec_10, 170))); + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_11, 177) ,rhs_vec_4567_11, 170), _mm256_shuffle_epi32(lhs_vec_10, 255))); + + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_12 ,_mm256_shuffle_epi32(rhs_vec_4567_12, 177), 170), _mm256_shuffle_epi32(lhs_vec_11, 0))); + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_12, 177) ,rhs_vec_4567_12, 170), _mm256_shuffle_epi32(lhs_vec_11, 85))); + + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_13 ,_mm256_shuffle_epi32(rhs_vec_4567_13, 177), 170), _mm256_shuffle_epi32(lhs_vec_11, 170))); + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_13, 177) ,rhs_vec_4567_13, 170), _mm256_shuffle_epi32(lhs_vec_11, 255))); + + iacc_1 = _mm256_madd_epi16(iacc_1, scales_1); + + // Accumulate the iacc value for one sb + __m256i iacc_sb = _mm256_add_epi32(iacc_0, iacc_1); + + // Broadcast the bsums of the two sub blocks of the iteration of Q8_K across the vector + // Multiply-Add with corresponding mins of Q4_Kx8 with bsums + __m256i q8s_sb = _mm256_shuffle_epi32(q8s, 0); + __m256i iacc_min_sb = _mm256_madd_epi16(q8s_sb, mins_01); + q8s = _mm256_bsrli_epi128(q8s, 4); + + // Accumulate for the complete block + iacc_b = _mm256_add_epi32(iacc_b, iacc_sb); + iacc_min_b = _mm256_add_epi32(iacc_min_b, iacc_min_sb); + } + + // Multiply-Add with scale values for the complete super block + acc_row = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_b), _mm256_mul_ps(col_scale_f32, row_scale_f32), acc_row); + acc_min_rows = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_min_b), _mm256_mul_ps(col_dmin_f32, row_scale_f32), acc_min_rows); + + } + + // Accumulated output values permuted so as to be stored in appropriate order post accumulation + acc_row = _mm256_permutevar8x32_ps(acc_row, finalpermutemask); + _mm256_storeu_ps(s + (y * nr + x * 8), _mm256_sub_ps(acc_row, acc_min_rows)); + } + } + +#else + UNUSED(kmask1); + UNUSED(kmask2); + UNUSED(kmask3); + ggml_gemv_q4_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc); +#endif +} + +void ggml_gemv_q2_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK_K; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if defined(__AVX2__) + // Lookup table to convert signed nibbles to signed bytes + __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); + signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); + // Shuffle masks to rearrange delta values to multiply with appropriate scales + __m128i deltamask = _mm_set_epi8(15, 14, 7, 6, 13, 12, 5, 4, 11, 10, 3, 2, 9, 8, 1, 0); + // Permute mask used for easier vector processing at later stages + __m256i finalpermutemask = _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0); + + const __m256i m3b = _mm256_set1_epi8(3); + const __m128i m4b_sse = _mm_set1_epi8(0xF); + + //Mask to get appropriate scales + __m128i scalemask1 = _mm_set_epi8(14,14,6,6,12,12,4,4,10,10,2,2,8,8,0,0); + __m128i scalemask2 = _mm_set_epi8(15,15,7,7,13,13,5,5,11,11,3,3,9,9,1,1); + + int64_t b_nb = n / QK_K; + + const block_q2_Kx8 * b_ptr_start = (const block_q2_Kx8 *)vx; + const block_q8_K * a_ptr_start = (const block_q8_K *)vy; + + // Process Q8_K blocks one by one + for (int64_t y = 0; y < nr; y++) { + + // Pointers to LHS blocks of block_q8_K format + const block_q8_K * a_ptr = a_ptr_start + (y * nb); + + // Take group of eight interleaved block_q2_K structures at each pass of the loop and perform dot product operation + for(int64_t x = 0; x < nc / 8; x++) { + + // Pointers to RHS blocks + const block_q2_Kx8 * b_ptr = b_ptr_start + (x * b_nb); + + // Master FP accumulators + __m256 acc_row = _mm256_setzero_ps(); + __m256 acc_min_rows = _mm256_setzero_ps(); + + for (int64_t b = 0; b < nb; b++) { + + // Load and convert to FP32 delta from block_q8_K + const __m256 row_scale_f32 = _mm256_set1_ps((a_ptr[b].d)); + + // Load the delta values for the 8 blocks interleaved in block_q2_Kx8 + // col_scale_f32 rearranged so as to multiply with appropriate quants + const __m256 col_scale_f32 = GGML_F32Cx8_REARRANGE_LOAD(b_ptr[b].d, deltamask); + const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); + + __m256i iacc_b = _mm256_setzero_si256(); + __m256i iacc_min_b = _mm256_setzero_si256(); + + // Processes eight sub blocks from each Q2_K in each iteration + for(int sb = 0; sb < QK_K / 128; sb++) { + + // Load the eight block_q2_K for eight sub blocks quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 + const __m256i rhs_raw_vec_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + sb * 256)); + const __m256i rhs_raw_vec_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_vec_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_vec_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_vec_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_vec_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_vec_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_vec_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 224 + sb * 256)); + + // 2-bit -> 8-bit + // Values of the 0th,2nd,4th,6th sub blocks of eight block_q2_K structures for the sb loop + const __m256i rhs_vec_0123_00 = _mm256_and_si256(rhs_raw_vec_0123_0, m3b); //B00(0-7) B01(0-7) B02(0-7) B03(0-7) + const __m256i rhs_vec_0123_20 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_0, 2), m3b); //B20(0-7) B21(0-7) B22(0-7) B23(0-7) + const __m256i rhs_vec_0123_40 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_0, 4), m3b); //B40(0-7) B41(0-7) B42(0-7) B43(0-7) + const __m256i rhs_vec_0123_60 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_0, 6), m3b); //B60(0-7) B61(0-7) B62(0-7) B63(0-7) + + const __m256i rhs_vec_4567_00 = _mm256_and_si256(rhs_raw_vec_4567_0, m3b); //B04(0-7) B05(0-7) B06(0-7) B07(0-7) + const __m256i rhs_vec_4567_20 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_0, 2), m3b); //B24(0-7) B25(0-7) B26(0-7) B27(0-7) + const __m256i rhs_vec_4567_40 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_0, 4), m3b); //B44(0-7) B45(0-7) B46(0-7) B47(0-7) + const __m256i rhs_vec_4567_60 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_0, 6), m3b); //B64(0-7) B65(0-7) B66(0-7) B67(0-7) + + const __m256i rhs_vec_0123_01 = _mm256_and_si256(rhs_raw_vec_0123_1, m3b); //B00(8-15) B01(8-15) B02(8-15) B03(8-15) + const __m256i rhs_vec_0123_21 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_1, 2), m3b); //B20(8-15) B21(8-15) B22(8-15) B23(8-15) + const __m256i rhs_vec_0123_41 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_1, 4), m3b); //B40(8-15) B41(8-15) B42(8-15) B43(8-15) + const __m256i rhs_vec_0123_61 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_1, 6), m3b); //B60(8-15) B61(8-15) B62(8-15) B63(8-15) + + const __m256i rhs_vec_4567_01 = _mm256_and_si256(rhs_raw_vec_4567_1, m3b); //B04(8-15) B05(8-15) B06(8-15) B07(8-15) + const __m256i rhs_vec_4567_21 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_1, 2), m3b); //B24(8-15) B25(8-15) B26(8-15) B27(8-15) + const __m256i rhs_vec_4567_41 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_1, 4), m3b); //B44(8-15) B45(8-15) B46(8-15) B47(8-15) + const __m256i rhs_vec_4567_61 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_1, 6), m3b); //B64(8-15) B65(8-15) B66(8-15) B67(8-15) + + // Values of the 1st,3rd,5th,7th sub blocks of eight block_q2_K structures for the sb loop + const __m256i rhs_vec_0123_10 = _mm256_and_si256(rhs_raw_vec_0123_2, m3b); //B10(0-7) B11(0-7) B12(0-7) B13(0-7) + const __m256i rhs_vec_0123_30 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_2, 2), m3b); //B30(0-7) B31(0-7) B32(0-7) B33(0-7) + const __m256i rhs_vec_0123_50 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_2, 4), m3b); //B50(0-7) B51(0-7) B52(0-7) B53(0-7) + const __m256i rhs_vec_0123_70 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_2, 6), m3b); //B70(0-7) B71(0-7) B72(0-7) B73(0-7) + + const __m256i rhs_vec_4567_10 = _mm256_and_si256(rhs_raw_vec_4567_2, m3b); //B14(0-7) B15(0-7) B16(0-7) B17(0-7) + const __m256i rhs_vec_4567_30 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_2, 2), m3b); //B34(0-7) B35(0-7) B36(0-7) B37(0-7) + const __m256i rhs_vec_4567_50 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_2, 4), m3b); //B54(0-7) B55(0-7) B56(0-7) B57(0-7) + const __m256i rhs_vec_4567_70 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_2, 6), m3b); //B74(0-7) B75(0-7) B76(0-7) B77(0-7) + + const __m256i rhs_vec_0123_11 = _mm256_and_si256(rhs_raw_vec_0123_3, m3b); //B10(8-15) B11(8-15) B12(8-15) B13(8-15) + const __m256i rhs_vec_0123_31 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_3, 2), m3b); //B30(8-15) B31(8-15) B32(8-15) B33(8-15) + const __m256i rhs_vec_0123_51 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_3, 4), m3b); //B50(8-15) B51(8-15) B52(8-15) B53(8-15) + const __m256i rhs_vec_0123_71 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_3, 6), m3b); //B70(8-15) B71(8-15) B72(8-15) B73(8-15) + + const __m256i rhs_vec_4567_11 = _mm256_and_si256(rhs_raw_vec_4567_3, m3b); //B14(8-15) B15(8-15) B16(8-15) B17(8-15) + const __m256i rhs_vec_4567_31 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_3, 2), m3b); //B34(8-15) B35(8-15) B36(8-15) B37(8-15) + const __m256i rhs_vec_4567_51 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_3, 4), m3b); //B54(8-15) B55(8-15) B56(8-15) B57(8-15) + const __m256i rhs_vec_4567_71 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_3, 6), m3b); //B74(8-15) B75(8-15) B76(8-15) B77(8-15) + + //Scales and Mins of corresponding sub blocks from different Q2_K structures are stored together + //s00 m00 s01 m01 s10 m10 s11 m11 s20 m20 s21 m21 s30 m30 s31 m31 s40 m40 s41 m41 s50 m50 s51 m51 s60 m60 s61 m61 s70 m70 s71 m71 + + const __m128i mins_and_scales_01 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + sb * 64)); + const __m128i mins_and_scales_23 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 16 + sb * 64)); + const __m128i mins_and_scales_45 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 32 + sb * 64)); + const __m128i mins_and_scales_67 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 48 + sb * 64)); + + // Extract scales which is lower half from mins_and_scales + const __m128i scales_01 = _mm_and_si128(mins_and_scales_01, m4b_sse); + const __m128i scales_23 = _mm_and_si128(mins_and_scales_23, m4b_sse); + const __m128i scales_45 = _mm_and_si128(mins_and_scales_45, m4b_sse); + const __m128i scales_67 = _mm_and_si128(mins_and_scales_67, m4b_sse); + + // Extract mins which is upper half from mins_and_scales + const __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_01, 4), m4b_sse)); + const __m256i mins_23 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_23, 4), m4b_sse)); + const __m256i mins_45 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_45, 4), m4b_sse)); + const __m256i mins_67 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_67, 4), m4b_sse)); + + // Scales of sub blocks in the sb loop + // Scales of the 0th sub block from each super block + __m128i scales_rearrange_0 = _mm_shuffle_epi8(scales_01, scalemask1); + __m256i scales_0 = _mm256_cvtepu8_epi16(scales_rearrange_0); + + // Scales of the 1st sub block from each super block + __m128i scales_rearrange_1 = _mm_shuffle_epi8(scales_01, scalemask2); + __m256i scales_1 = _mm256_cvtepu8_epi16(scales_rearrange_1); + + // Scales of the 2nd sub block from each super block + __m128i scales_rearrange_2 = _mm_shuffle_epi8(scales_23, scalemask1); + __m256i scales_2 = _mm256_cvtepu8_epi16(scales_rearrange_2); + + // Scales of the 3rd sub block from each super block + __m128i scales_rearrange_3 = _mm_shuffle_epi8(scales_23, scalemask2); + __m256i scales_3 = _mm256_cvtepu8_epi16(scales_rearrange_3); + + // Scales of the 4th sub block from each super block + __m128i scales_rearrange_4 = _mm_shuffle_epi8(scales_45, scalemask1); + __m256i scales_4 = _mm256_cvtepu8_epi16(scales_rearrange_4); + + // Scales of the 5th sub block from each super block + __m128i scales_rearrange_5 = _mm_shuffle_epi8(scales_45, scalemask2); + __m256i scales_5 = _mm256_cvtepu8_epi16(scales_rearrange_5); + + // Scales of the 6th sub block from each super block + __m128i scales_rearrange_6 = _mm_shuffle_epi8(scales_67, scalemask1); + __m256i scales_6 = _mm256_cvtepu8_epi16(scales_rearrange_6); + + // Scales of the 7th sub block from each super block + __m128i scales_rearrange_7 = _mm_shuffle_epi8(scales_67, scalemask2); + __m256i scales_7 = _mm256_cvtepu8_epi16(scales_rearrange_7); + + // Load the sub block values corresponding to sb in block_q8_K in batches of 16 bytes and replicate the same across 256 bit vector + __m256i lhs_vec_0 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + sb * 128))); + __m256i lhs_vec_1 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 16 + sb * 128))); + __m256i lhs_vec_2 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 32 + sb * 128))); + __m256i lhs_vec_3 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 48 + sb * 128))); + __m256i lhs_vec_4 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 64 + sb * 128))); + __m256i lhs_vec_5 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 80 + sb * 128))); + __m256i lhs_vec_6 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 96 + sb * 128))); + __m256i lhs_vec_7 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 112 + sb * 128))); + + lhs_vec_0 = _mm256_permute2f128_si256(lhs_vec_0, lhs_vec_0, 0); + lhs_vec_1 = _mm256_permute2f128_si256(lhs_vec_1, lhs_vec_1, 0); + lhs_vec_2 = _mm256_permute2f128_si256(lhs_vec_2, lhs_vec_2, 0); + lhs_vec_3 = _mm256_permute2f128_si256(lhs_vec_3, lhs_vec_3, 0); + lhs_vec_4 = _mm256_permute2f128_si256(lhs_vec_4, lhs_vec_4, 0); + lhs_vec_5 = _mm256_permute2f128_si256(lhs_vec_5, lhs_vec_5, 0); + lhs_vec_6 = _mm256_permute2f128_si256(lhs_vec_6, lhs_vec_6, 0); + lhs_vec_7 = _mm256_permute2f128_si256(lhs_vec_7, lhs_vec_7, 0); + + __m256i iacc_0 = _mm256_setzero_si256(); + __m256i iacc_1 = _mm256_setzero_si256(); + __m256i iacc_2 = _mm256_setzero_si256(); + __m256i iacc_3 = _mm256_setzero_si256(); + __m256i iacc_4 = _mm256_setzero_si256(); + __m256i iacc_5 = _mm256_setzero_si256(); + __m256i iacc_6 = _mm256_setzero_si256(); + __m256i iacc_7 = _mm256_setzero_si256(); + + // Dot product done within 32 bit lanes and accumulated in the same vector + // First done for 0th sub block and then for seven (1st - 7th) other sub blocks processed for each sb (sb < QK_K/128 loop) // B0(0-3) B4(0-3) B1(0-3) B5(0-3) B2(0-3) B6(0-3) B3(0-3) B7(0-3) with A0(0-3) + // B0(4-7) B4(4-7) B1(4-7) B5(4-7) B2(4-7) B6(4-7) B3(4-7) B7(4-7) with A0(4-7) + // B0(8-11) B4(8-11) B1(8-11) B5(8-11) B2(8-11) B6(8-11) B3(8-11) B7(8-11) with A0(8-11) + // B0(12-15) B4(12-15) B1(12-15) B5(12-15) B2(12-15) B6(12-15) B3(12-15) B7(12-15) with A0(12-15) + + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_00 ,_mm256_shuffle_epi32(rhs_vec_4567_00, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 0))); + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_00, 177) ,rhs_vec_4567_00, 170), _mm256_shuffle_epi32(lhs_vec_0, 85))); + + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_01 ,_mm256_shuffle_epi32(rhs_vec_4567_01, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 170))); + iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_01, 177) ,rhs_vec_4567_01, 170), _mm256_shuffle_epi32(lhs_vec_0, 255))); + + iacc_0 = _mm256_madd_epi16(iacc_0, scales_0); + + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_10 ,_mm256_shuffle_epi32(rhs_vec_4567_10, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 0))); + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_10, 177) ,rhs_vec_4567_10, 170), _mm256_shuffle_epi32(lhs_vec_1, 85))); + + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_11 ,_mm256_shuffle_epi32(rhs_vec_4567_11, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 170))); + iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_11, 177) ,rhs_vec_4567_11, 170), _mm256_shuffle_epi32(lhs_vec_1, 255))); + + iacc_1 = _mm256_madd_epi16(iacc_1, scales_1); + + iacc_2 = _mm256_add_epi16(iacc_2, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_20 ,_mm256_shuffle_epi32(rhs_vec_4567_20, 177), 170), _mm256_shuffle_epi32(lhs_vec_2, 0))); + iacc_2 = _mm256_add_epi16(iacc_2, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_20, 177) ,rhs_vec_4567_20, 170), _mm256_shuffle_epi32(lhs_vec_2, 85))); + + iacc_2 = _mm256_add_epi16(iacc_2, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_21 ,_mm256_shuffle_epi32(rhs_vec_4567_21, 177), 170), _mm256_shuffle_epi32(lhs_vec_2, 170))); + iacc_2 = _mm256_add_epi16(iacc_2, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_21, 177) ,rhs_vec_4567_21, 170), _mm256_shuffle_epi32(lhs_vec_2, 255))); + + iacc_2 = _mm256_madd_epi16(iacc_2, scales_2); + + iacc_3 = _mm256_add_epi16(iacc_3, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_30 ,_mm256_shuffle_epi32(rhs_vec_4567_30, 177), 170), _mm256_shuffle_epi32(lhs_vec_3, 0))); + iacc_3 = _mm256_add_epi16(iacc_3, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_30, 177) ,rhs_vec_4567_30, 170), _mm256_shuffle_epi32(lhs_vec_3, 85))); + + iacc_3 = _mm256_add_epi16(iacc_3, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_31 ,_mm256_shuffle_epi32(rhs_vec_4567_31, 177), 170), _mm256_shuffle_epi32(lhs_vec_3, 170))); + iacc_3 = _mm256_add_epi16(iacc_3, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_31, 177) ,rhs_vec_4567_31, 170), _mm256_shuffle_epi32(lhs_vec_3, 255))); + + iacc_3 = _mm256_madd_epi16(iacc_3, scales_3); + + iacc_4 = _mm256_add_epi16(iacc_4, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_40 ,_mm256_shuffle_epi32(rhs_vec_4567_40, 177), 170), _mm256_shuffle_epi32(lhs_vec_4, 0))); + iacc_4 = _mm256_add_epi16(iacc_4, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_40, 177) ,rhs_vec_4567_40, 170), _mm256_shuffle_epi32(lhs_vec_4, 85))); + + iacc_4 = _mm256_add_epi16(iacc_4, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_41 ,_mm256_shuffle_epi32(rhs_vec_4567_41, 177), 170), _mm256_shuffle_epi32(lhs_vec_4, 170))); + iacc_4 = _mm256_add_epi16(iacc_4, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_41, 177) ,rhs_vec_4567_41, 170), _mm256_shuffle_epi32(lhs_vec_4, 255))); + + iacc_4 = _mm256_madd_epi16(iacc_4, scales_4); + + iacc_5 = _mm256_add_epi16(iacc_5, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_50 ,_mm256_shuffle_epi32(rhs_vec_4567_50, 177), 170), _mm256_shuffle_epi32(lhs_vec_5, 0))); + iacc_5 = _mm256_add_epi16(iacc_5, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_50, 177) ,rhs_vec_4567_50, 170), _mm256_shuffle_epi32(lhs_vec_5, 85))); + + iacc_5 = _mm256_add_epi16(iacc_5, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_51 ,_mm256_shuffle_epi32(rhs_vec_4567_51, 177), 170), _mm256_shuffle_epi32(lhs_vec_5, 170))); + iacc_5 = _mm256_add_epi16(iacc_5, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_51, 177) ,rhs_vec_4567_51, 170), _mm256_shuffle_epi32(lhs_vec_5, 255))); + + iacc_5 = _mm256_madd_epi16(iacc_5, scales_5); + + iacc_6 = _mm256_add_epi16(iacc_6, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_60 ,_mm256_shuffle_epi32(rhs_vec_4567_60, 177), 170), _mm256_shuffle_epi32(lhs_vec_6, 0))); + iacc_6 = _mm256_add_epi16(iacc_6, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_60, 177) ,rhs_vec_4567_60, 170), _mm256_shuffle_epi32(lhs_vec_6, 85))); + + iacc_6 = _mm256_add_epi16(iacc_6, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_61 ,_mm256_shuffle_epi32(rhs_vec_4567_61, 177), 170), _mm256_shuffle_epi32(lhs_vec_6, 170))); + iacc_6 = _mm256_add_epi16(iacc_6, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_61, 177) ,rhs_vec_4567_61, 170), _mm256_shuffle_epi32(lhs_vec_6, 255))); + + iacc_6 = _mm256_madd_epi16(iacc_6, scales_6); + + iacc_7 = _mm256_add_epi16(iacc_7, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_70 ,_mm256_shuffle_epi32(rhs_vec_4567_70, 177), 170), _mm256_shuffle_epi32(lhs_vec_7, 0))); + iacc_7 = _mm256_add_epi16(iacc_7, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_70, 177) ,rhs_vec_4567_70, 170), _mm256_shuffle_epi32(lhs_vec_7, 85))); + + iacc_7 = _mm256_add_epi16(iacc_7, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_71 ,_mm256_shuffle_epi32(rhs_vec_4567_71, 177), 170), _mm256_shuffle_epi32(lhs_vec_7, 170))); + iacc_7 = _mm256_add_epi16(iacc_7, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_71, 177) ,rhs_vec_4567_71, 170), _mm256_shuffle_epi32(lhs_vec_7, 255))); + + iacc_7 = _mm256_madd_epi16(iacc_7, scales_7); + + // Accumulate the iacc value for one sb + __m256i iacc_sb = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_0, iacc_1), _mm256_add_epi32(iacc_2, iacc_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_4, iacc_5), _mm256_add_epi32(iacc_6, iacc_7))); + + __m128i q8sums = _mm_loadu_si128((const __m128i *)(a_ptr[b].bsums + sb * 8)); + __m256i q8s = _mm256_castsi128_si256(q8sums); + q8s= _mm256_permute2f128_si256(q8s, q8s, 0); + + // Broadcast the bsums of the two corresponding subblocks of q8_k + // Multiply-Add with corresponding mins of Q2_Kx8 with bsums + __m256i iacc_min_sb_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(q8s, 0), mins_01); + __m256i iacc_min_sb_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(q8s, 85), mins_23); + __m256i iacc_min_sb_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(q8s, 170), mins_45); + __m256i iacc_min_sb_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(q8s, 255), mins_67); + + __m256i iacc_min_sb = _mm256_add_epi32(_mm256_add_epi32(iacc_min_sb_01, iacc_min_sb_23), _mm256_add_epi32(iacc_min_sb_45,iacc_min_sb_67)); + + // Accumulate for the complete block + iacc_b = _mm256_add_epi32(iacc_b, iacc_sb); + iacc_min_b = _mm256_add_epi32(iacc_min_b, iacc_min_sb); + } + + //Multiply-Add with scale values for complete super block + acc_row = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_b), _mm256_mul_ps(col_scale_f32, row_scale_f32), acc_row); + acc_min_rows = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_min_b), _mm256_mul_ps(col_dmin_f32, row_scale_f32), acc_min_rows); + } + // Accumulated output values permuted so as to be stored in appropriate order post accumulation + acc_row = _mm256_permutevar8x32_ps(acc_row, finalpermutemask); + _mm256_storeu_ps(s + (y * nr + x * 8), _mm256_sub_ps(acc_row, acc_min_rows)); + } + } +#else + + ggml_gemv_q2_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc); + +#endif +} + +void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if defined(__AVX2__) || defined(__AVX512F__) + { + const block_q4_0x8 * b_ptr_start = (const block_q4_0x8 *)vx; + const block_q8_0x4 * a_ptr_start = (const block_q8_0x4 *)vy; + int64_t b_nb = n / QK4_0; + int64_t y = 0; + // Mask to mask out nibbles from packed bytes + const __m256i m4b = _mm256_set1_epi8(0x0F); + const __m128i loadMask = _mm_blend_epi32(_mm_setzero_si128(), _mm_set1_epi32(0xFFFFFFFF), 3); + // Lookup table to convert signed nibbles to signed bytes + __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); + signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); + // Permute mask used for easier vector processing at later stages + __m256i requiredOrder = _mm256_set_epi32(3, 2, 1, 0, 7, 6, 5, 4); + int64_t xstart = 0; + int anr = nr - nr%16; // Used to align nr with boundary of 16 + #ifdef __AVX512F__ + int anc = nc - nc%16; // Used to align nc with boundary of 16 + // Mask to mask out nibbles from packed bytes expanded to 512 bit length + const __m512i m4bexpanded = _mm512_set1_epi8(0x0F); + // Lookup table to convert signed nibbles to signed bytes expanded to 512 bit length + __m512i signextendlutexpanded = _mm512_inserti32x8(_mm512_castsi256_si512(signextendlut), signextendlut, 1); + + // Take group of four block_q8_0x4 structures at each pass of the loop and perform dot product operation + for (; y < anr / 4; y += 4) { + + const block_q8_0x4 * a_ptrs[4]; + + a_ptrs[0] = a_ptr_start + (y * nb); + for (int i = 0; i < 3; ++i) { + a_ptrs[i + 1] = a_ptrs[i] + nb; + } + + // Take group of two block_q4_0x8 structures at each pass of the loop and perform dot product operation + for (int64_t x = 0; x < anc / 8; x += 2) { + + const block_q4_0x8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); + const block_q4_0x8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); + + // Master FP accumulators + __m512 acc_rows[16]; + for (int i = 0; i < 16; i++) { + acc_rows[i] = _mm512_setzero_ps(); + } + + for (int64_t b = 0; b < nb; b++) { + // Load the sixteen block_q4_0 quantized values interleaved with each other in chunks of eight - B0,B1 ....BE,BF + const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs)); + const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 32)); + const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 64)); + const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 96)); + + const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs)); + const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 32)); + const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 64)); + const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 96)); + + // Save the values in the following vectors in the formats B0B1B4B5B8B9BCBD, B2B3B6B7BABBBEBF for further processing and storing of values + const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); + const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); + + const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); + const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); + + const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); + const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); + const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); + const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); + + // 4-bit -> 8-bit - Sign is maintained + const __m512i rhs_mat_014589CD_0 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_014589CD_0, m4bexpanded)); //B0(0-7) B1(0-7) B4(0-7) B5(0-7) B8(0-7) B9(0-7) BC(0-7) BD(0-7) + const __m512i rhs_mat_2367ABEF_0 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_2367ABEF_0, m4bexpanded)); //B2(0-7) B3(0-7) B6(0-7) B7(0-7) BA(0-7) BB(0-7) BE(0-7) BF(0-7) + + const __m512i rhs_mat_014589CD_1 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_014589CD_1, m4bexpanded)); //B0(8-15) B1(8-15) B4(8-15) B5(8-15) B8(8-15) B9(8-15) BC(8-15) BD(8-15) + const __m512i rhs_mat_2367ABEF_1 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_2367ABEF_1, m4bexpanded)); //B2(8-15) B3(8-15) B6(8-15) B7(8-15) BA(8-15) BB(8-15) BE(8-15) BF(8-15) + + const __m512i rhs_mat_014589CD_2 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m4bexpanded)); //B0(16-23) B1(16-23) B4(16-23) B5(16-23) B8(16-23) B9(16-23) BC(16-23) BD(16-23) + const __m512i rhs_mat_2367ABEF_2 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m4bexpanded)); //B2(16-23) B3(16-23) B6(16-23) B7(16-23) BA(16-23) BB(16-23) BE(16-23) BF(16-23) + + const __m512i rhs_mat_014589CD_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m4bexpanded)); //B0(24-31) B1(24-31) B4(24-31) B5(24-31) B8(24-31) B9(24-31) BC(24-31) BD(24-31) + const __m512i rhs_mat_2367ABEF_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) BA(24-31) BB(24-31) BE(24-31) BF(24-31) + + // Shuffle pattern one - right side input + const __m512i rhs_mat_014589CD_0_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) B8(0-3) B9(0-3) B8(0-3) B9(0-3) BC(0-3) BD(0-3) BC(0-3) BD(0-3) + const __m512i rhs_mat_2367ABEF_0_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) BA(0-3) BB(0-3) BA(0-3) BB(0-3) BE(0-3) BF(0-3) BE(0-3) BF(0-3) + + const __m512i rhs_mat_014589CD_1_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) B8(8-11) B9(8-11) B8(8-11) B9(8-11) BC(8-11) BD(8-11) BC(8-11) BD(8-11) + const __m512i rhs_mat_2367ABEF_1_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) BA(8-11) BB(8-11) BA(8-11) BB(8-11) BE(8-11) BF(8-11) BE(8-11) BF(8-11) + + const __m512i rhs_mat_014589CD_2_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) B8(16-19) B9(16-19) B8(16-19) B9(16-19) BC(16-19) BD(16-19) BC(16-19) BD(16-19) + const __m512i rhs_mat_2367ABEF_2_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) BA(16-19) BB(16-19) BA(16-19) BB(16-19) BE(16-19) BF(16-19) BE(16-19) BF(16-19) + + const __m512i rhs_mat_014589CD_3_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) B8(24-27) B9(24-27) B8(24-27) B9(24-27) BC(24-27) BD(24-27) BC(24-27) BD(24-27) + const __m512i rhs_mat_2367ABEF_3_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) BA(24-27) BB(24-27) BA(24-27) BB(24-27) BE(24-27) BF(24-27) BE(24-27) BF(24-27) + + // Shuffle pattern two - right side input + + const __m512i rhs_mat_014589CD_0_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) B8(4-7) B9(4-7) B8(4-7) B9(4-7) BC(4-7) BD(4-7) BC(4-7) BD(4-7) + const __m512i rhs_mat_2367ABEF_0_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) BA(4-7) BB(4-7) BA(4-7) BB(4-7) BE(4-7) BF(4-7) BE(4-7) BF(4-7) + + const __m512i rhs_mat_014589CD_1_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) B8(12-15) B9(12-15) B8(12-15) B9(12-15) BC(12-15) BD(12-15) BC(12-15) BD(12-15) + const __m512i rhs_mat_2367ABEF_1_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) BA(12-15) BB(12-15) BA(12-15) BB(12-15) BE(12-15) BF(12-15) BE(12-15) BF(12-15) + + const __m512i rhs_mat_014589CD_2_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) B8(20-23) B9(20-23) B8(20-23) B9(20-23) BC(20-23) BD(20-23) BC(20-23) BD(20-23) + const __m512i rhs_mat_2367ABEF_2_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) BA(20-23) BB(20-23) BA(20-23) BB(20-23) BE(20-23) BF(20-23) BE(20-23) BF(20-23) + + const __m512i rhs_mat_014589CD_3_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) B8(28-31) B9(28-31) B8(28-31) B9(28-31) BC(28-31) BD(28-31) BC(28-31) BD(28-31) + const __m512i rhs_mat_2367ABEF_3_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) BA(28-31) BB(28-31) BA(28-31) BB(28-31) BE(28-31) BF(28-31) BE(28-31) BF(28-31) + + // Scale values - Load the weight scale values of two block_q4_0x8 + const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); + + // Process LHS in pairs of rows + for (int rp = 0; rp < 4; rp++) { + + // Load the four block_q4_0 quantized values interleaved with each other in chunks of eight - A0,A1,A2,A3 + // Loaded as set of 128 bit vectors and repeated and stored into a 256 bit vector before again repeating into 512 bit vector + __m256i lhs_mat_ymm_0123_0 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs))); + __m256i lhs_mat_ymm_01_0 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_0, lhs_mat_ymm_0123_0, 0); + __m256i lhs_mat_ymm_23_0 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_0, lhs_mat_ymm_0123_0, 17); + __m256i lhs_mat_ymm_0123_1 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 32))); + __m256i lhs_mat_ymm_01_1 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_1, lhs_mat_ymm_0123_1, 0); + __m256i lhs_mat_ymm_23_1 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_1, lhs_mat_ymm_0123_1, 17); + __m256i lhs_mat_ymm_0123_2 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 64))); + __m256i lhs_mat_ymm_01_2 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_2, lhs_mat_ymm_0123_2, 0); + __m256i lhs_mat_ymm_23_2 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_2, lhs_mat_ymm_0123_2, 17); + __m256i lhs_mat_ymm_0123_3 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 96))); + __m256i lhs_mat_ymm_01_3 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_3, lhs_mat_ymm_0123_3, 0); + __m256i lhs_mat_ymm_23_3 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_3, lhs_mat_ymm_0123_3, 17); + + __m512i lhs_mat_01_0 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_0), lhs_mat_ymm_01_0, 1); + __m512i lhs_mat_23_0 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_0), lhs_mat_ymm_23_0, 1); + __m512i lhs_mat_01_1 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_1), lhs_mat_ymm_01_1, 1); + __m512i lhs_mat_23_1 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_1), lhs_mat_ymm_23_1, 1); + __m512i lhs_mat_01_2 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_2), lhs_mat_ymm_01_2, 1); + __m512i lhs_mat_23_2 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_2), lhs_mat_ymm_23_2, 1); + __m512i lhs_mat_01_3 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_3), lhs_mat_ymm_01_3, 1); + __m512i lhs_mat_23_3 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_3), lhs_mat_ymm_23_3, 1); + + // Shuffle pattern one - left side input + + const __m512i lhs_mat_01_0_sp1 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) + const __m512i lhs_mat_23_0_sp1 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) + + const __m512i lhs_mat_01_1_sp1 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) + const __m512i lhs_mat_23_1_sp1 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) + + const __m512i lhs_mat_01_2_sp1 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) + const __m512i lhs_mat_23_2_sp1 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) + + const __m512i lhs_mat_01_3_sp1 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) + const __m512i lhs_mat_23_3_sp1 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) + + // Shuffle pattern two - left side input + + const __m512i lhs_mat_01_0_sp2 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) + const __m512i lhs_mat_23_0_sp2 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) + + const __m512i lhs_mat_01_1_sp2 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) + const __m512i lhs_mat_23_1_sp2 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) + + const __m512i lhs_mat_01_2_sp2 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) + const __m512i lhs_mat_23_2_sp2 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) + + const __m512i lhs_mat_01_3_sp2 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) + const __m512i lhs_mat_23_3_sp2 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) + + // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane + // Resembles MMLAs into 2x2 matrices in ARM Version + const __m512i zero = _mm512_setzero_epi32(); + __m512i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_01_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_01_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_01_0_sp1, rhs_mat_014589CD_0_sp1); + __m512i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367ABEF_0_sp1); + __m512i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_23_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_23_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_23_0_sp1, rhs_mat_014589CD_0_sp1); + __m512i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367ABEF_0_sp1); + __m512i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_01_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_01_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_01_0_sp2, rhs_mat_014589CD_0_sp2); + __m512i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367ABEF_0_sp2); + __m512i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_23_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_23_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_23_0_sp2, rhs_mat_014589CD_0_sp2); + __m512i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367ABEF_0_sp2); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + __m512i iacc_mat_00 = _mm512_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); + __m512i iacc_mat_01 = _mm512_add_epi32(iacc_mat_01_sp1, iacc_mat_01_sp2); + __m512i iacc_mat_10 = _mm512_add_epi32(iacc_mat_10_sp1, iacc_mat_10_sp2); + __m512i iacc_mat_11 = _mm512_add_epi32(iacc_mat_11_sp1, iacc_mat_11_sp2); + + + // Straighten out to make 4 row vectors + __m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, (_MM_PERM_ENUM)78)); + __m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, (_MM_PERM_ENUM)78), iacc_mat_01); + __m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, (_MM_PERM_ENUM)78)); + __m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, (_MM_PERM_ENUM)78), iacc_mat_11); + + // Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes + const __m128i row_scale_f16 = _mm_shuffle_epi32(_mm_maskload_epi32((int const*)(a_ptrs[rp][b].d), loadMask), 68); + const __m512 row_scale_f32 = GGML_F32Cx16_REPEAT_LOAD(row_scale_f16); + + // Multiply with appropiate scales and accumulate + acc_rows[rp * 4] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); + acc_rows[rp * 4 + 1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); + acc_rows[rp * 4 + 2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); + acc_rows[rp * 4 + 3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); + } + } + + // Store the accumulated values + for (int i = 0; i < 16; i++) { + _mm512_storeu_ps((float *)(s + ((y * 4 + i) * bs + x * 8)), acc_rows[i]); + } + } + } + // Take a block_q8_0x4 structures at each pass of the loop and perform dot product operation + for (; y < nr / 4; y ++) { + + const block_q8_0x4 * a_ptr = a_ptr_start + (y * nb); + + // Take group of two block_q4_0x8 structures at each pass of the loop and perform dot product operation + for (int64_t x = 0; x < anc / 8; x += 2) { + + const block_q4_0x8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); + const block_q4_0x8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); + + // Master FP accumulators + __m512 acc_rows[4]; + for (int i = 0; i < 4; i++) { + acc_rows[i] = _mm512_setzero_ps(); + } + + for (int64_t b = 0; b < nb; b++) { + // Load the sixteen block_q4_0 quantized values interleaved with each other in chunks of eight - B0,B1 ....BE,BF + const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs)); + const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 32)); + const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 64)); + const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 96)); + + const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs)); + const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 32)); + const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 64)); + const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 96)); + + // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of valuess + const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); + const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); + + const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); + const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); + + const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); + const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); + const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); + const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); + + // 4-bit -> 8-bit - Sign is maintained + const __m512i rhs_mat_014589CD_0 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_014589CD_0, m4bexpanded)); //B0(0-7) B1(0-7) B4(0-7) B5(0-7) B8(0-7) B9(0-7) BC(0-7) BD(0-7) + const __m512i rhs_mat_2367ABEF_0 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_2367ABEF_0, m4bexpanded)); //B2(0-7) B3(0-7) B6(0-7) B7(0-7) BA(0-7) BB(0-7) BE(0-7) BF(0-7) + + const __m512i rhs_mat_014589CD_1 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_014589CD_1, m4bexpanded)); //B0(8-15) B1(8-15) B4(8-15) B5(8-15) B8(8-15) B9(8-15) BC(8-15) BD(8-15) + const __m512i rhs_mat_2367ABEF_1 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_2367ABEF_1, m4bexpanded)); //B2(8-15) B3(8-15) B6(8-15) B7(8-15) BA(8-15) BB(8-15) BE(8-15) BF(8-15) + + const __m512i rhs_mat_014589CD_2 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m4bexpanded)); //B0(16-23) B1(16-23) B4(16-23) B5(16-23) B8(16-23) B9(16-23) BC(16-23) BD(16-23) + const __m512i rhs_mat_2367ABEF_2 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m4bexpanded)); //B2(16-23) B3(16-23) B6(16-23) B7(16-23) BA(16-23) BB(16-23) BE(16-23) BF(16-23) + + const __m512i rhs_mat_014589CD_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m4bexpanded)); //B0(24-31) B1(24-31) B4(24-31) B5(24-31) B8(24-31) B9(24-31) BC(24-31) BD(24-31) + const __m512i rhs_mat_2367ABEF_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) BA(24-31) BB(24-31) BE(24-31) BF(24-31) + + // Shuffle pattern one - right side input + const __m512i rhs_mat_014589CD_0_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) B8(0-3) B9(0-3) B8(0-3) B9(0-3) BC(0-3) BD(0-3) BC(0-3) BD(0-3) + const __m512i rhs_mat_2367ABEF_0_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) BA(0-3) BB(0-3) BA(0-3) BB(0-3) BE(0-3) BF(0-3) BE(0-3) BF(0-3) + + const __m512i rhs_mat_014589CD_1_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) B8(8-11) B9(8-11) B8(8-11) B9(8-11) BC(8-11) BD(8-11) BC(8-11) BD(8-11) + const __m512i rhs_mat_2367ABEF_1_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) BA(8-11) BB(8-11) BA(8-11) BB(8-11) BE(8-11) BF(8-11) BE(8-11) BF(8-11) + + const __m512i rhs_mat_014589CD_2_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) B8(16-19) B9(16-19) B8(16-19) B9(16-19) BC(16-19) BD(16-19) BC(16-19) BD(16-19) + const __m512i rhs_mat_2367ABEF_2_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) BA(16-19) BB(16-19) BA(16-19) BB(16-19) BE(16-19) BF(16-19) BE(16-19) BF(16-19) + + const __m512i rhs_mat_014589CD_3_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) B8(24-27) B9(24-27) B8(24-27) B9(24-27) BC(24-27) BD(24-27) BC(24-27) BD(24-27) + const __m512i rhs_mat_2367ABEF_3_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) BA(24-27) BB(24-27) BA(24-27) BB(24-27) BE(24-27) BF(24-27) BE(24-27) BF(24-27) + + // Shuffle pattern two - right side input + + const __m512i rhs_mat_014589CD_0_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) B8(4-7) B9(4-7) B8(4-7) B9(4-7) BC(4-7) BD(4-7) BC(4-7) BD(4-7) + const __m512i rhs_mat_2367ABEF_0_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) BA(4-7) BB(4-7) BA(4-7) BB(4-7) BE(4-7) BF(4-7) BE(4-7) BF(4-7) + + const __m512i rhs_mat_014589CD_1_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) B8(12-15) B9(12-15) B8(12-15) B9(12-15) BC(12-15) BD(12-15) BC(12-15) BD(12-15) + const __m512i rhs_mat_2367ABEF_1_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) BA(12-15) BB(12-15) BA(12-15) BB(12-15) BE(12-15) BF(12-15) BE(12-15) BF(12-15) + + const __m512i rhs_mat_014589CD_2_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) B8(20-23) B9(20-23) B8(20-23) B9(20-23) BC(20-23) BD(20-23) BC(20-23) BD(20-23) + const __m512i rhs_mat_2367ABEF_2_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) BA(20-23) BB(20-23) BA(20-23) BB(20-23) BE(20-23) BF(20-23) BE(20-23) BF(20-23) + + const __m512i rhs_mat_014589CD_3_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) B8(28-31) B9(28-31) B8(28-31) B9(28-31) BC(28-31) BD(28-31) BC(28-31) BD(28-31) + const __m512i rhs_mat_2367ABEF_3_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) BA(28-31) BB(28-31) BA(28-31) BB(28-31) BE(28-31) BF(28-31) BE(28-31) BF(28-31) + + + // Scale values - Load the weight scale values of two block_q4_0x8 + const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); + + // Load the four block_q4_0 quantized values interleaved with each other in chunks of eight - A0,A1,A2,A3 + // Loaded as set of 128 bit vectors and repeated and stored into a 256 bit vector before again repeating into 512 bit vector + __m256i lhs_mat_ymm_0123_0 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs))); + __m256i lhs_mat_ymm_01_0 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_0, lhs_mat_ymm_0123_0, 0); + __m256i lhs_mat_ymm_23_0 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_0, lhs_mat_ymm_0123_0, 17); + __m256i lhs_mat_ymm_0123_1 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 32))); + __m256i lhs_mat_ymm_01_1 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_1, lhs_mat_ymm_0123_1, 0); + __m256i lhs_mat_ymm_23_1 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_1, lhs_mat_ymm_0123_1, 17); + __m256i lhs_mat_ymm_0123_2 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 64))); + __m256i lhs_mat_ymm_01_2 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_2, lhs_mat_ymm_0123_2, 0); + __m256i lhs_mat_ymm_23_2 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_2, lhs_mat_ymm_0123_2, 17); + __m256i lhs_mat_ymm_0123_3 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 96))); + __m256i lhs_mat_ymm_01_3 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_3, lhs_mat_ymm_0123_3, 0); + __m256i lhs_mat_ymm_23_3 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_3, lhs_mat_ymm_0123_3, 17); + + __m512i lhs_mat_01_0 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_0), lhs_mat_ymm_01_0, 1); + __m512i lhs_mat_23_0 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_0), lhs_mat_ymm_23_0, 1); + __m512i lhs_mat_01_1 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_1), lhs_mat_ymm_01_1, 1); + __m512i lhs_mat_23_1 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_1), lhs_mat_ymm_23_1, 1); + __m512i lhs_mat_01_2 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_2), lhs_mat_ymm_01_2, 1); + __m512i lhs_mat_23_2 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_2), lhs_mat_ymm_23_2, 1); + __m512i lhs_mat_01_3 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_3), lhs_mat_ymm_01_3, 1); + __m512i lhs_mat_23_3 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_3), lhs_mat_ymm_23_3, 1); + + // Shuffle pattern one - left side input + + const __m512i lhs_mat_01_0_sp1 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) + const __m512i lhs_mat_23_0_sp1 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) + + const __m512i lhs_mat_01_1_sp1 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) + const __m512i lhs_mat_23_1_sp1 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) + + const __m512i lhs_mat_01_2_sp1 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) + const __m512i lhs_mat_23_2_sp1 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) + + const __m512i lhs_mat_01_3_sp1 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) + const __m512i lhs_mat_23_3_sp1 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) + + // Shuffle pattern two - left side input + + const __m512i lhs_mat_01_0_sp2 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) + const __m512i lhs_mat_23_0_sp2 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) + + const __m512i lhs_mat_01_1_sp2 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) + const __m512i lhs_mat_23_1_sp2 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) + + const __m512i lhs_mat_01_2_sp2 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) + const __m512i lhs_mat_23_2_sp2 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) + + const __m512i lhs_mat_01_3_sp2 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) + const __m512i lhs_mat_23_3_sp2 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) + + // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane + // Resembles MMLAs into 2x2 matrices in ARM Version + const __m512i zero = _mm512_setzero_epi32(); + __m512i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_01_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_01_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_01_0_sp1, rhs_mat_014589CD_0_sp1); + __m512i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367ABEF_0_sp1); + __m512i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_23_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_23_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_23_0_sp1, rhs_mat_014589CD_0_sp1); + __m512i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367ABEF_0_sp1); + __m512i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_01_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_01_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_01_0_sp2, rhs_mat_014589CD_0_sp2); + __m512i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367ABEF_0_sp2); + __m512i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_23_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_23_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_23_0_sp2, rhs_mat_014589CD_0_sp2); + __m512i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367ABEF_0_sp2); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + __m512i iacc_mat_00 = _mm512_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); + __m512i iacc_mat_01 = _mm512_add_epi32(iacc_mat_01_sp1, iacc_mat_01_sp2); + __m512i iacc_mat_10 = _mm512_add_epi32(iacc_mat_10_sp1, iacc_mat_10_sp2); + __m512i iacc_mat_11 = _mm512_add_epi32(iacc_mat_11_sp1, iacc_mat_11_sp2); + + + // Straighten out to make 4 row vectors + __m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, (_MM_PERM_ENUM)78)); + __m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, (_MM_PERM_ENUM)78), iacc_mat_01); + __m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, (_MM_PERM_ENUM)78)); + __m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, (_MM_PERM_ENUM)78), iacc_mat_11); + + // Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes + const __m128i row_scale_f16 = _mm_shuffle_epi32(_mm_maskload_epi32((int const*)(a_ptr[b].d), loadMask), 68); + const __m512 row_scale_f32 = GGML_F32Cx16_REPEAT_LOAD(row_scale_f16); + + // Multiply with appropiate scales and accumulate + acc_rows[0] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); + acc_rows[1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); + acc_rows[2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); + acc_rows[3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); + } + + // Store the accumulated values + for (int i = 0; i < 4; i++) { + _mm512_storeu_ps((float *)(s + ((y * 4 + i) * bs + x * 8)), acc_rows[i]); + } + } + } + if (anc != nc) { + xstart = anc/8; + y = 0; + } + #endif // __AVX512F__ + + // Take group of four block_q8_0x4 structures at each pass of the loop and perform dot product operation + + for (; y < anr / 4; y += 4) { + const block_q8_0x4 * a_ptrs[4]; + + a_ptrs[0] = a_ptr_start + (y * nb); + for (int i = 0; i < 3; ++i) { + a_ptrs[i + 1] = a_ptrs[i] + nb; + } + + // Take group of eight block_q4_0x8 structures at each pass of the loop and perform dot product operation + for (int64_t x = xstart; x < nc / 8; x++) { + + const block_q4_0x8 * b_ptr = b_ptr_start + (x * b_nb); + + // Master FP accumulators + __m256 acc_rows[16]; + for (int i = 0; i < 16; i++) { + acc_rows[i] = _mm256_setzero_ps(); + } + + for (int64_t b = 0; b < nb; b++) { + // Load the eight block_q4_0 quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 + const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs)); + const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 32)); + const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 64)); + const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 96)); + + // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values + const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); + const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); + + // 4-bit -> 8-bit - Sign is maintained + const __m256i rhs_mat_0145_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_0145_0, m4b)); //B0(0-7) B1(0-7) B4(0-7) B5(0-7) + const __m256i rhs_mat_2367_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_2367_0, m4b)); //B2(0-7) B3(0-7) B6(0-7) B7(0-7) + + const __m256i rhs_mat_0145_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_0145_1, m4b)); //B0(8-15) B1(8-15) B4(8-15) B5(8-15) + const __m256i rhs_mat_2367_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_2367_1, m4b)); //B2(8-15) B3(8-15) B6(8-15) B7(8-15) + + const __m256i rhs_mat_0145_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m4b)); //B0(16-23) B1(16-23) B4(16-23) B5(16-23) + const __m256i rhs_mat_2367_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m4b)); //B2(16-23) B3(16-23) B6(16-23) B7(16-23) + + const __m256i rhs_mat_0145_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m4b)); //B0(24-31) B1(24-31) B4(24-31) B5(24-31) + const __m256i rhs_mat_2367_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m4b)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) + + // Shuffle pattern one - right side input + const __m256i rhs_mat_0145_0_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_0, 136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) + const __m256i rhs_mat_2367_0_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_0, 136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) + + const __m256i rhs_mat_0145_1_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_1, 136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) + const __m256i rhs_mat_2367_1_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_1, 136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) + + const __m256i rhs_mat_0145_2_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_2, 136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) + const __m256i rhs_mat_2367_2_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_2, 136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) + + const __m256i rhs_mat_0145_3_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_3, 136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) + const __m256i rhs_mat_2367_3_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_3, 136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) + + // Shuffle pattern two - right side input + + const __m256i rhs_mat_0145_0_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_0, 221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) + const __m256i rhs_mat_2367_0_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_0, 221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) + + const __m256i rhs_mat_0145_1_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_1, 221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) + const __m256i rhs_mat_2367_1_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_1, 221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) + + const __m256i rhs_mat_0145_2_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_2, 221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) + const __m256i rhs_mat_2367_2_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_2, 221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) + + const __m256i rhs_mat_0145_3_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_3, 221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) + const __m256i rhs_mat_2367_3_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_3, 221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) + + // Scale values - Load the wight scale values of block_q4_0x8 + const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); + + // Process LHS in groups of four + for (int rp = 0; rp < 4; rp++) { + // Load the four block_q4_0 quantized values interleaved with each other in chunks of eight - A0,A1,A2,A3 + // Loaded as set of 128 bit vectors and repeated into a 256 bit vector + __m256i lhs_mat_0123_0 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs))); + __m256i lhs_mat_01_0 = _mm256_permute2f128_si256(lhs_mat_0123_0, lhs_mat_0123_0, 0); + __m256i lhs_mat_23_0 = _mm256_permute2f128_si256(lhs_mat_0123_0, lhs_mat_0123_0, 17); + __m256i lhs_mat_0123_1 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 32))); + __m256i lhs_mat_01_1 = _mm256_permute2f128_si256(lhs_mat_0123_1, lhs_mat_0123_1, 0); + __m256i lhs_mat_23_1 = _mm256_permute2f128_si256(lhs_mat_0123_1, lhs_mat_0123_1, 17); + __m256i lhs_mat_0123_2 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 64))); + __m256i lhs_mat_01_2 = _mm256_permute2f128_si256(lhs_mat_0123_2, lhs_mat_0123_2, 0); + __m256i lhs_mat_23_2 = _mm256_permute2f128_si256(lhs_mat_0123_2, lhs_mat_0123_2, 17); + __m256i lhs_mat_0123_3 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 96))); + __m256i lhs_mat_01_3 = _mm256_permute2f128_si256(lhs_mat_0123_3, lhs_mat_0123_3, 0); + __m256i lhs_mat_23_3 = _mm256_permute2f128_si256(lhs_mat_0123_3, lhs_mat_0123_3, 17); + + // Shuffle pattern one - left side input + const __m256i lhs_mat_01_0_sp1 = _mm256_shuffle_epi32(lhs_mat_01_0, 160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) + const __m256i lhs_mat_23_0_sp1 = _mm256_shuffle_epi32(lhs_mat_23_0, 160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) + + const __m256i lhs_mat_01_1_sp1 = _mm256_shuffle_epi32(lhs_mat_01_1, 160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) + const __m256i lhs_mat_23_1_sp1 = _mm256_shuffle_epi32(lhs_mat_23_1, 160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) + + const __m256i lhs_mat_01_2_sp1 = _mm256_shuffle_epi32(lhs_mat_01_2, 160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) + const __m256i lhs_mat_23_2_sp1 = _mm256_shuffle_epi32(lhs_mat_23_2, 160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) + + const __m256i lhs_mat_01_3_sp1 = _mm256_shuffle_epi32(lhs_mat_01_3, 160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) + const __m256i lhs_mat_23_3_sp1 = _mm256_shuffle_epi32(lhs_mat_23_3, 160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) + + // Shuffle pattern two - left side input + const __m256i lhs_mat_01_0_sp2 = _mm256_shuffle_epi32(lhs_mat_01_0, 245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) + const __m256i lhs_mat_23_0_sp2 = _mm256_shuffle_epi32(lhs_mat_23_0, 245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) + + const __m256i lhs_mat_01_1_sp2 = _mm256_shuffle_epi32(lhs_mat_01_1, 245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) + const __m256i lhs_mat_23_1_sp2 = _mm256_shuffle_epi32(lhs_mat_23_1, 245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) + + const __m256i lhs_mat_01_2_sp2 = _mm256_shuffle_epi32(lhs_mat_01_2, 245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) + const __m256i lhs_mat_23_2_sp2 = _mm256_shuffle_epi32(lhs_mat_23_2, 245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) + + const __m256i lhs_mat_01_3_sp2 = _mm256_shuffle_epi32(lhs_mat_01_3, 245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) + const __m256i lhs_mat_23_3_sp2 = _mm256_shuffle_epi32(lhs_mat_23_3, 245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) + + // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane + // Resembles MMLAs into 2x2 matrices in ARM Version + const __m256i zero = _mm256_setzero_si256(); + __m256i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_01_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_01_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_01_0_sp1, rhs_mat_0145_0_sp1); + __m256i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367_0_sp1); + __m256i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_23_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_23_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_23_0_sp1, rhs_mat_0145_0_sp1); + __m256i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367_0_sp1); + __m256i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_01_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_01_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_01_0_sp2, rhs_mat_0145_0_sp2); + __m256i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367_0_sp2); + __m256i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_23_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_23_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_23_0_sp2, rhs_mat_0145_0_sp2); + __m256i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367_0_sp2); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + __m256i iacc_mat_00 = _mm256_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); + __m256i iacc_mat_01 = _mm256_add_epi32(iacc_mat_01_sp1, iacc_mat_01_sp2); + __m256i iacc_mat_10 = _mm256_add_epi32(iacc_mat_10_sp1, iacc_mat_10_sp2); + __m256i iacc_mat_11 = _mm256_add_epi32(iacc_mat_11_sp1, iacc_mat_11_sp2); + + // Straighten out to make 4 row vectors + __m256i iacc_row_0 = _mm256_blend_epi32(iacc_mat_00, _mm256_shuffle_epi32(iacc_mat_01, 78), 204); + __m256i iacc_row_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00, 78), iacc_mat_01, 204); + __m256i iacc_row_2 = _mm256_blend_epi32(iacc_mat_10, _mm256_shuffle_epi32(iacc_mat_11, 78), 204); + __m256i iacc_row_3 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10, 78), iacc_mat_11, 204); + + // Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes + const __m256 row_scale_f32 = GGML_F32Cx8_REPEAT_LOAD(a_ptrs[rp][b].d, loadMask); + + // Multiply with appropiate scales and accumulate + acc_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); + acc_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); + acc_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); + acc_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); + } + } + + // Store the accumulated values + for (int i = 0; i < 16; i++) { + _mm256_storeu_ps((float *)(s + ((y * 4 + i) * bs + x * 8)), acc_rows[i]); + } + } + } + + // Take a block_q8_0x4 structures at each pass of the loop and perform dot product operation + for (; y < nr / 4; y ++) { + + const block_q8_0x4 * a_ptr = a_ptr_start + (y * nb); + + // Load the eight block_q4_0 quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 + for (int64_t x = xstart; x < nc / 8; x++) { + + const block_q4_0x8 * b_ptr = b_ptr_start + (x * b_nb); + + // Master FP accumulators + __m256 acc_rows[4]; + for (int i = 0; i < 4; i++) { + acc_rows[i] = _mm256_setzero_ps(); + } + + for (int64_t b = 0; b < nb; b++) { + // Load the eight block_q8_0 quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 + const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs)); + const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 32)); + const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 64)); + const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 96)); + + // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of valuess + const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); + const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); + + // 4-bit -> 8-bit - Sign is maintained + const __m256i rhs_mat_0145_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_0145_0, m4b)); //B0(0-7) B1(0-7) B4(0-7) B5(0-7) + const __m256i rhs_mat_2367_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_2367_0, m4b)); //B2(0-7) B3(0-7) B6(0-7) B7(0-7) + + const __m256i rhs_mat_0145_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_0145_1, m4b)); //B0(8-15) B1(8-15) B4(8-15) B5(8-15) + const __m256i rhs_mat_2367_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_2367_1, m4b)); //B2(8-15) B3(8-15) B6(8-15) B7(8-15) + + const __m256i rhs_mat_0145_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m4b)); //B0(16-23) B1(16-23) B4(16-23) B5(16-23) + const __m256i rhs_mat_2367_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m4b)); //B2(16-23) B3(16-23) B6(16-23) B7(16-23) + + const __m256i rhs_mat_0145_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m4b)); //B0(24-31) B1(24-31) B4(24-31) B5(24-31) + const __m256i rhs_mat_2367_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m4b)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) + + // Shuffle pattern one - right side input + const __m256i rhs_mat_0145_0_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_0, 136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) + const __m256i rhs_mat_2367_0_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_0, 136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) + + const __m256i rhs_mat_0145_1_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_1, 136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) + const __m256i rhs_mat_2367_1_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_1, 136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) + + const __m256i rhs_mat_0145_2_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_2, 136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) + const __m256i rhs_mat_2367_2_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_2, 136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) + + const __m256i rhs_mat_0145_3_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_3, 136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) + const __m256i rhs_mat_2367_3_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_3, 136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) + + // Shuffle pattern two - right side input + + const __m256i rhs_mat_0145_0_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_0, 221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) + const __m256i rhs_mat_2367_0_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_0, 221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) + + const __m256i rhs_mat_0145_1_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_1, 221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) + const __m256i rhs_mat_2367_1_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_1, 221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) + + const __m256i rhs_mat_0145_2_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_2, 221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) + const __m256i rhs_mat_2367_2_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_2, 221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) + + const __m256i rhs_mat_0145_3_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_3, 221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) + const __m256i rhs_mat_2367_3_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_3, 221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) + + // Scale values - Load the wight scale values of block_q4_0x8 + const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); + + // Load the four block_q4_0 quantized values interleaved with each other in chunks of eight - A0,A1,A2,A3 + // Loaded as set of 128 bit vectors and repeated into a 256 bit vector + __m256i lhs_mat_0123_0 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs))); + __m256i lhs_mat_01_0 = _mm256_permute2f128_si256(lhs_mat_0123_0, lhs_mat_0123_0, 0); + __m256i lhs_mat_23_0 = _mm256_permute2f128_si256(lhs_mat_0123_0, lhs_mat_0123_0, 17); + __m256i lhs_mat_0123_1 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 32))); + __m256i lhs_mat_01_1 = _mm256_permute2f128_si256(lhs_mat_0123_1, lhs_mat_0123_1, 0); + __m256i lhs_mat_23_1 = _mm256_permute2f128_si256(lhs_mat_0123_1, lhs_mat_0123_1, 17); + __m256i lhs_mat_0123_2 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 64))); + __m256i lhs_mat_01_2 = _mm256_permute2f128_si256(lhs_mat_0123_2, lhs_mat_0123_2, 0); + __m256i lhs_mat_23_2 = _mm256_permute2f128_si256(lhs_mat_0123_2, lhs_mat_0123_2, 17); + __m256i lhs_mat_0123_3 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 96))); + __m256i lhs_mat_01_3 = _mm256_permute2f128_si256(lhs_mat_0123_3, lhs_mat_0123_3, 0); + __m256i lhs_mat_23_3 = _mm256_permute2f128_si256(lhs_mat_0123_3, lhs_mat_0123_3, 17); + + // Shuffle pattern one - left side input + + const __m256i lhs_mat_01_0_sp1 = _mm256_shuffle_epi32(lhs_mat_01_0, 160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) + const __m256i lhs_mat_23_0_sp1 = _mm256_shuffle_epi32(lhs_mat_23_0, 160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) + + const __m256i lhs_mat_01_1_sp1 = _mm256_shuffle_epi32(lhs_mat_01_1, 160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) + const __m256i lhs_mat_23_1_sp1 = _mm256_shuffle_epi32(lhs_mat_23_1, 160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) + + const __m256i lhs_mat_01_2_sp1 = _mm256_shuffle_epi32(lhs_mat_01_2, 160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) + const __m256i lhs_mat_23_2_sp1 = _mm256_shuffle_epi32(lhs_mat_23_2, 160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) + + const __m256i lhs_mat_01_3_sp1 = _mm256_shuffle_epi32(lhs_mat_01_3, 160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) + const __m256i lhs_mat_23_3_sp1 = _mm256_shuffle_epi32(lhs_mat_23_3, 160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) + + // Shuffle pattern two - left side input + + const __m256i lhs_mat_01_0_sp2 = _mm256_shuffle_epi32(lhs_mat_01_0, 245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) + const __m256i lhs_mat_23_0_sp2 = _mm256_shuffle_epi32(lhs_mat_23_0, 245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) + + const __m256i lhs_mat_01_1_sp2 = _mm256_shuffle_epi32(lhs_mat_01_1, 245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) + const __m256i lhs_mat_23_1_sp2 = _mm256_shuffle_epi32(lhs_mat_23_1, 245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) + + const __m256i lhs_mat_01_2_sp2 = _mm256_shuffle_epi32(lhs_mat_01_2, 245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) + const __m256i lhs_mat_23_2_sp2 = _mm256_shuffle_epi32(lhs_mat_23_2, 245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) + + const __m256i lhs_mat_01_3_sp2 = _mm256_shuffle_epi32(lhs_mat_01_3, 245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) + const __m256i lhs_mat_23_3_sp2 = _mm256_shuffle_epi32(lhs_mat_23_3, 245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) + + // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane + // Resembles MMLAs into 2x2 matrices in ARM Version + const __m256i zero = _mm256_setzero_si256(); + __m256i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_01_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_01_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_01_0_sp1, rhs_mat_0145_0_sp1); + __m256i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367_0_sp1); + __m256i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_23_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_23_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_23_0_sp1, rhs_mat_0145_0_sp1); + __m256i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367_0_sp1); + __m256i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_01_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_01_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_01_0_sp2, rhs_mat_0145_0_sp2); + __m256i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367_0_sp2); + __m256i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_23_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_23_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_23_0_sp2, rhs_mat_0145_0_sp2); + __m256i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367_0_sp2); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + __m256i iacc_mat_00 = _mm256_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); + __m256i iacc_mat_01 = _mm256_add_epi32(iacc_mat_01_sp1, iacc_mat_01_sp2); + __m256i iacc_mat_10 = _mm256_add_epi32(iacc_mat_10_sp1, iacc_mat_10_sp2); + __m256i iacc_mat_11 = _mm256_add_epi32(iacc_mat_11_sp1, iacc_mat_11_sp2); + + + // Straighten out to make 4 row vectors + __m256i iacc_row_0 = _mm256_blend_epi32(iacc_mat_00, _mm256_shuffle_epi32(iacc_mat_01, 78), 204); + __m256i iacc_row_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00, 78), iacc_mat_01, 204); + __m256i iacc_row_2 = _mm256_blend_epi32(iacc_mat_10, _mm256_shuffle_epi32(iacc_mat_11, 78), 204); + __m256i iacc_row_3 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10, 78), iacc_mat_11, 204); + + // Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes + const __m256 row_scale_f32 = GGML_F32Cx8_REPEAT_LOAD(a_ptr[b].d, loadMask); + + // Multiply with appropiate scales and accumulate + acc_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); + acc_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); + acc_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); + acc_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); + } + + // Store the accumulated values + for (int i = 0; i < 4; i++) { + _mm256_storeu_ps((float *)(s + ((y * 4 + i) * bs + x * 8)), acc_rows[i]); + } + } + } + return; + } + +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) + ggml_gemm_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc); +} + +void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK_K; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if defined(__AVX2__) || defined(__AVX512F__) + const block_q4_Kx8 * b_ptr_start = (const block_q4_Kx8 * ) vx; + const block_q8_Kx4 * a_ptr_start = (const block_q8_Kx4 * ) vy; + int64_t b_nb = n / QK_K; + int64_t y = 0; + + // Mask to mask out nibbles from packed bytes + const __m256i m4b = _mm256_set1_epi8(0x0F); + // Permute mask used for easier vector processing at later stages + __m256i requiredOrder = _mm256_set_epi32(3, 2, 1, 0, 7, 6, 5, 4); + int64_t xstart = 0; + int anr = nr - nr % 16;; // Used to align nr with boundary of 16 +#ifdef __AVX512F__ + int anc = nc - nc % 16; // Used to align nc with boundary of 16 + // Mask to mask out nibbles from packed bytes expanded to 512 bit length + const __m512i m4bexpanded = _mm512_set1_epi8(0x0F); + //Take group of four block_q8_Kx4 structures at each pass of the loop and perform dot product operation + for (; y < anr / 4; y += 4) { + + const block_q8_Kx4 * a_ptrs[4]; + + a_ptrs[0] = a_ptr_start + (y * nb); + for (int i = 0; i < 3; ++i) { + a_ptrs[i + 1] = a_ptrs[i] + nb; + } + + // Take group of eight block_q4_kx8 structures at each pass of the loop and perform dot product operation + for (int64_t x = 0; x < anc / 8; x += 2) { + + const block_q4_Kx8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); + const block_q4_Kx8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); + + // Master FP accumulators + __m512 acc_rows[16]; + for (int i = 0; i < 16; i++) { + acc_rows[i] = _mm512_setzero_ps(); + } + + __m512 acc_min_rows[16]; + for (int i = 0; i < 16; i++) { + acc_min_rows[i] = _mm512_setzero_ps(); + } + + // For super block + for (int64_t b = 0; b < nb; b++) { + // Scale values - Load the sixteen scale values from two block_q4_kx8 structures + const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); + + // dmin values - Load the sixteen dmin values from two block_q4_kx8 structures + const __m512 col_dmin_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].dmin, b_ptr_1[b].dmin); + + // Loop to iterate over the eight sub blocks of a super block - two sub blocks are processed per iteration + for (int sb = 0; sb < QK_K / 64; sb++) { + + const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + sb * 256)); + const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 224 + sb * 256)); + + const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + sb * 256)); + const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_mat_89AB_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_mat_CDEF_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_mat_89AB_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_mat_CDEF_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 224 + sb * 256)); + + const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); + const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); + const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); + const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); + + const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); + const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); + const __m256i rhs_raw_mat_89CD_2 = _mm256_blend_epi32(rhs_raw_mat_89AB_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_2, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_2, requiredOrder), rhs_raw_mat_CDEF_2, 240); + const __m256i rhs_raw_mat_89CD_3 = _mm256_blend_epi32(rhs_raw_mat_89AB_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_3, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_3, requiredOrder), rhs_raw_mat_CDEF_3, 240); + + const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); + const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); + const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); + const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); + + const __m512i rhs_raw_mat_014589CD_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_2), rhs_raw_mat_89CD_2, 1); + const __m512i rhs_raw_mat_2367ABEF_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_2), rhs_raw_mat_ABEF_2, 1); + const __m512i rhs_raw_mat_014589CD_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_3), rhs_raw_mat_89CD_3, 1); + const __m512i rhs_raw_mat_2367ABEF_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_3), rhs_raw_mat_ABEF_3, 1); + + //4-bit -> 8-bit + const __m512i rhs_mat_014589CD_00 = _mm512_and_si512(rhs_raw_mat_014589CD_0, m4bexpanded); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) B08(0-7) B09(0-7) B0C(0-7) B0D(0-7) + const __m512i rhs_mat_2367ABEF_00 = _mm512_and_si512(rhs_raw_mat_2367ABEF_0, m4bexpanded); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) B0A(0-7) B0B(0-7) B0E(0-7) B0F(0-7) + const __m512i rhs_mat_014589CD_01 = _mm512_and_si512(rhs_raw_mat_014589CD_1, m4bexpanded); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) B08(8-15) B09(8-15) B0C(8-15) B0D(8-15) + const __m512i rhs_mat_2367ABEF_01 = _mm512_and_si512(rhs_raw_mat_2367ABEF_1, m4bexpanded); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) B0A(8-15) B0B(8-15) B0E(8-15) B0F(8-15) + + const __m512i rhs_mat_014589CD_02 = _mm512_and_si512(rhs_raw_mat_014589CD_2, m4bexpanded); //B00(16-23) B01(16-23) B04(16-23) B05(16-23) B08(16-23) B09(16-23) B0C(16-23) B0D(16-23) + const __m512i rhs_mat_2367ABEF_02 = _mm512_and_si512(rhs_raw_mat_2367ABEF_2, m4bexpanded); //B02(16-23) B03(16-23) B06(16-23) B07(16-23) B0A(16-23) B0B(16-23) B0E(16-23) B0F(16-23) + const __m512i rhs_mat_014589CD_03 = _mm512_and_si512(rhs_raw_mat_014589CD_3, m4bexpanded); //B00(24-31) B01(24-31) B04(24-31) B05(24-31) B08(24-31) B09(24-31) B0C(24-31) B0D(24-31) + const __m512i rhs_mat_2367ABEF_03 = _mm512_and_si512(rhs_raw_mat_2367ABEF_3, m4bexpanded); //B02(24-31) B03(24-31) B06(24-31) B07(24-31) B0A(24-31) B0B(24-31) B0E(24-31) B0F(24-31) + + const __m512i rhs_mat_014589CD_10 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m4bexpanded); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) B18(0-7) B19(0-7) B1C(0-7) B1D(0-7) + const __m512i rhs_mat_2367ABEF_10 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m4bexpanded); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) B1A(0-7) B1B(0-7) B1E(0-7) B1F(0-7) + const __m512i rhs_mat_014589CD_11 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m4bexpanded); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) B18(8-15) B19(8-15) B1C(8-15) B1D(8-15) + const __m512i rhs_mat_2367ABEF_11 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) B1A(8-15) B1B(8-15) B1E(8-15) B1F(8-15) + + const __m512i rhs_mat_014589CD_12 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 4), m4bexpanded); //B10(16-23) B11(16-23) B14(16-23) B15(16-23) B18(16-23) B19(16-23) B1C(16-23) B1D(16-23) + const __m512i rhs_mat_2367ABEF_12 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 4), m4bexpanded); //B12(16-23) B13(16-23) B16(16-23) B17(16-23) B1A(16-23) B1B(16-23) B1E(16-23) B1F(16-23) + const __m512i rhs_mat_014589CD_13 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 4), m4bexpanded); //B10(24-31) B11(24-31) B14(24-31) B15(24-31) B18(24-31) B19(24-31) B1C(24-31) B1D(24-31) + const __m512i rhs_mat_2367ABEF_13 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 4), m4bexpanded); //B12(24-31) B13(24-31) B16(24-31) B17(24-31) B1A(24-31) B1B(24-31) B1E(24-31) B1F(24-31) + + // Shuffle pattern one - right side input + const __m512i rhs_mat_014589CD_00_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) B08(0-3) B09(0-3) B08(0-3) B09(0-3) B0C(0-3) B0D(0-3) B0C(0-3) B0D(0-3) + const __m512i rhs_mat_2367ABEF_00_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) B0A(0-3) B0B(0-3) B0A(0-3) B0B(0-3) B0E(0-3) B0F(0-3) B0E(0-3) B0F(0-3) + const __m512i rhs_mat_014589CD_01_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) B08(8-11) B09(8-11) B08(8-11) B09(8-11) B0C(8-11) B0D(8-11) B0C(8-11) B0D(8-11) + const __m512i rhs_mat_2367ABEF_01_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) B0A(8-11) B0B(8-11) B0A(8-11) B0B(8-11) B0E(8-11) B0F(8-11) B0E(8-11) B0F(8-11) + const __m512i rhs_mat_014589CD_02_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_02, (_MM_PERM_ENUM)136); //B00(16-19) B01(16-19) B00(16-19) B01(16-19) B04(16-19) B05(16-19) B04(16-19) B05(16-19) B08(16-19) B09(16-19) B08(16-19) B09(16-19) B0C(16-19) B0D(16-19) B0C(16-19) B0D(16-19) + const __m512i rhs_mat_2367ABEF_02_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_02, (_MM_PERM_ENUM)136); //B02(16-19) B03(16-19) B02(16-19) B03(16-19) B06(16-19) B07(16-19) B06(16-19) B07(16-19) B0A(16-19) B0B(16-19) B0A(16-19) B0B(16-19) B0E(16-19) B0F(16-19) B0E(16-19) B0F(16-19) + const __m512i rhs_mat_014589CD_03_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_03, (_MM_PERM_ENUM)136); //B00(24-27) B01(24-27) B00(24-27) B01(24-27) B04(24-27) B05(24-27) B04(24-27) B05(24-27) B08(24-27) B09(24-27) B08(24-27) B09(24-27) B0C(24-27) B0D(24-27) B0C(24-27) B0D(24-27) + const __m512i rhs_mat_2367ABEF_03_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_03, (_MM_PERM_ENUM)136); //B02(24-27) B03(24-27) B02(24-27) B03(24-27) B06(24-27) B07(24-27) B06(24-27) B07(24-27) B0A(24-27) B0B(24-27) B0A(24-27) B0B(24-27) B0E(24-27) B0F(24-27) B0E(24-27) B0F(24-27) + + const __m512i rhs_mat_014589CD_10_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) B18(0-3) B19(0-3) B18(0-3) B19(0-3) B1C(0-3) B1D(0-3) B1C(0-3) B1D(0-3) + const __m512i rhs_mat_2367ABEF_10_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) B1A(0-3) B1B(0-3) B1A(0-3) B1B(0-3) B1E(0-3) B1F(0-3) B1E(0-3) B1F(0-3) + const __m512i rhs_mat_014589CD_11_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) B18(8-11) B19(8-11) B18(8-11) B19(8-11) B1C(8-11) B1D(8-11) B1C(8-11) B1D(8-11) + const __m512i rhs_mat_2367ABEF_11_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) B1A(8-11) B1B(8-11) B1A(8-11) B1B(8-11) B1E(8-11) B1F(8-11) B1E(8-11) B1F(8-11) + const __m512i rhs_mat_014589CD_12_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_12, (_MM_PERM_ENUM)136); //B10(16-19) B11(16-19) B10(16-19) B11(16-19) B14(16-19) B15(16-19) B14(16-19) B15(16-19) B18(16-19) B19(16-19) B18(16-19) B19(16-19) B1C(16-19) B1D(16-19) B1C(16-19) B1D(16-19) + const __m512i rhs_mat_2367ABEF_12_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_12, (_MM_PERM_ENUM)136); //B12(16-19) B13(16-19) B12(16-19) B13(16-19) B16(16-19) B17(16-19) B16(16-19) B17(16-19) B1A(16-19) B1B(16-19) B1A(16-19) B1B(16-19) B1E(16-19) B1F(16-19) B1E(16-19) B1F(16-19) + const __m512i rhs_mat_014589CD_13_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_13, (_MM_PERM_ENUM)136); //B10(24-27) B11(24-27) B10(24-27) B11(24-27) B14(24-27) B15(24-27) B14(24-27) B15(24-27) B18(24-27) B19(24-27) B18(24-27) B19(24-27) B1C(24-27) B1D(24-27) B1C(24-27) B1D(24-27) + const __m512i rhs_mat_2367ABEF_13_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_13, (_MM_PERM_ENUM)136); //B12(24-27) B13(24-27) B12(24-27) B13(24-27) B16(24-27) B17(24-27) B16(24-27) B17(24-27) B1A(24-27) B1B(24-27) B1A(24-27) B1B(24-27) B1E(24-27) B1F(24-27) B1E(24-27) B1F(24-27) + + // Shuffle pattern two - right side input + const __m512i rhs_mat_014589CD_00_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) B08(4-7) B09(4-7) B08(4-7) B09(4-7) B0C(4-7) B0D(4-7) B0C(4-7) B0D(4-7) + const __m512i rhs_mat_2367ABEF_00_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) B0A(4-7) B0B(4-7) B0A(4-7) B0B(4-7) B0E(4-7) B0F(4-7) B0E(4-7) B0F(4-7) + const __m512i rhs_mat_014589CD_01_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) B08(12-15) B09(12-15) B08(12-15) B09(12-15) B0C(12-15) B0D(12-15) B0C(12-15) B0D(12-15) + const __m512i rhs_mat_2367ABEF_01_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) B0A(12-15) B0B(12-15) B0A(12-15) B0B(12-15) B0E(12-15) B0F(12-15) B0E(12-15) B0F(12-15) + const __m512i rhs_mat_014589CD_02_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_02, (_MM_PERM_ENUM)221); //B00(20-23) B01(20-23) B00(20-23) B01(20-23) B04(20-23) B05(20-23) B04(20-23) B05(20-23) B08(20-23) B09(20-23) B08(20-23) B09(20-23) B0C(20-23) B0D(20-23) B0C(20-23) B0D(20-23) + const __m512i rhs_mat_2367ABEF_02_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_02, (_MM_PERM_ENUM)221); //B02(20-23) B03(20-23) B02(20-23) B03(20-23) B06(20-23) B07(20-23) B06(20-23) B07(20-23) B0A(20-23) B0B(20-23) B0A(20-23) B0B(20-23) B0E(20-23) B0F(20-23) B0E(20-23) B0F(20-23) + const __m512i rhs_mat_014589CD_03_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_03, (_MM_PERM_ENUM)221); //B00(28-31) B01(28-31) B00(28-31) B01(28-31) B04(28-31) B05(28-31) B04(28-31) B05(28-31) B08(28-31) B09(28-31) B08(28-31) B09(28-31) B0C(28-31) B0D(28-31) B0C(28-31) 0BD(28-31) + const __m512i rhs_mat_2367ABEF_03_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_03, (_MM_PERM_ENUM)221); //B02(28-31) B03(28-31) B02(28-31) B03(28-31) B06(28-31) B07(28-31) B06(28-31) B07(28-31) B0A(28-31) B0B(28-31) B0A(28-31) B0B(28-31) B0E(28-31) B0F(28-31) B0E(28-31) B0F(28-31) + + const __m512i rhs_mat_014589CD_10_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) B18(4-7) B19(4-7) B18(4-7) B19(4-7) B1C(4-7) B1D(4-7) B1C(4-7) B1D(4-7) + const __m512i rhs_mat_2367ABEF_10_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) B1A(4-7) B1B(4-7) B1A(4-7) B1B(4-7) B1E(4-7) B1F(4-7) B1E(4-7) B1F(4-7) + const __m512i rhs_mat_014589CD_11_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) B18(12-15) B19(12-15) B18(12-15) B19(12-15) B1C(12-15) B1D(12-15) B1C(12-15) B1D(12-15) + const __m512i rhs_mat_2367ABEF_11_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) B1A(12-15) B1B(12-15) B1A(12-15) B1B(12-15) B1E(12-15) B1F(12-15) B1E(12-15) B1F(12-15) + const __m512i rhs_mat_014589CD_12_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_12, (_MM_PERM_ENUM)221); //B10(20-23) B11(20-23) B10(20-23) B11(20-23) B14(20-23) B15(20-23) B14(20-23) B15(20-23) B18(20-23) B19(20-23) B18(20-23) B19(20-23) B1C(20-23) B1D(20-23) B1C(20-23) B1D(20-23) + const __m512i rhs_mat_2367ABEF_12_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_12, (_MM_PERM_ENUM)221); //B12(20-23) B13(20-23) B12(20-23) B13(20-23) B16(20-23) B17(20-23) B16(20-23) B17(20-23) B1A(20-23) B1B(20-23) B1A(20-23) B1B(20-23) B1E(20-23) B1F(20-23) B1E(20-23) B1F(20-23) + const __m512i rhs_mat_014589CD_13_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_13, (_MM_PERM_ENUM)221); //B10(28-31) B11(28-31) B10(28-31) B11(28-31) B14(28-31) B15(28-31) B14(28-31) B15(28-31) B18(28-31) B19(28-31) B18(28-31) B19(28-31) B1C(28-31) B1D(28-31) B1C(28-31) B1D(28-31) + const __m512i rhs_mat_2367ABEF_13_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_13, (_MM_PERM_ENUM)221); //B12(28-31) B13(28-31) B12(28-31) B13(28-31) B16(28-31) B17(28-31) B16(28-31) B17(28-31) B1A(28-31) B1B(28-31) B1A(28-31) B1B(28-31) B1E(28-31) B1F(28-31) B1E(28-31) B1F(28-31) + + uint32_t utmp_00[4], utmp_01[4], utmp_10[4], utmp_11[4]; + + // Scales and Mins of corresponding sub blocks from different Q4_K structures are stored together + // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_00, b_ptr_0[b].scales + 24 * sb, 12); + utmp_00[3] = ((utmp_00[2] >> 4) & kmask2) | (((utmp_00[1] >> 6) & kmask3) << 4); + const uint32_t uaux_00 = utmp_00[1] & kmask1; + utmp_00[1] = (utmp_00[2] & kmask2) | (((utmp_00[0] >> 6) & kmask3) << 4); + utmp_00[2] = uaux_00; + utmp_00[0] &= kmask1; + + // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_01, b_ptr_0[b].scales + 12 + sb * 24, 12); + utmp_01[3] = ((utmp_01[2] >> 4) & kmask2) | (((utmp_01[1] >> 6) & kmask3) << 4); + const uint32_t uaux_01 = utmp_01[1] & kmask1; + utmp_01[1] = (utmp_01[2] & kmask2) | (((utmp_01[0] >> 6) & kmask3) << 4); + utmp_01[2] = uaux_01; + utmp_01[0] &= kmask1; + + memcpy(utmp_10, b_ptr_1[b].scales + sb * 24, 12); + utmp_10[3] = ((utmp_10[2] >> 4) & kmask2) | (((utmp_10[1] >> 6) & kmask3) << 4); + const uint32_t uaux_10 = utmp_10[1] & kmask1; + utmp_10[1] = (utmp_10[2] & kmask2) | (((utmp_10[0] >> 6) & kmask3) << 4); + utmp_10[2] = uaux_10; + utmp_10[0] &= kmask1; + + // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_11, b_ptr_1[b].scales + 12 + sb * 24, 12); + utmp_11[3] = ((utmp_11[2] >> 4) & kmask2) | (((utmp_11[1] >> 6) & kmask3) << 4); + const uint32_t uaux_11 = utmp_11[1] & kmask1; + utmp_11[1] = (utmp_11[2] & kmask2) | (((utmp_11[0] >> 6) & kmask3) << 4); + utmp_11[2] = uaux_11; + utmp_11[0] &= kmask1; + + // Scales of first sub block in the sb loop + const __m256i mins_and_scales_0 = _mm256_set_epi32(utmp_10[3], utmp_10[2], utmp_10[1], utmp_10[0], utmp_00[3], utmp_00[2], utmp_00[1], utmp_00[0]); + const __m512i scales_0 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(mins_and_scales_0, mins_and_scales_0)); + + // Scales of second sub block in the sb loop + const __m256i mins_and_scales_1 = _mm256_set_epi32(utmp_11[3], utmp_11[2], utmp_11[1], utmp_11[0], utmp_01[3], utmp_01[2], utmp_01[1], utmp_01[0]); + const __m512i scales_1 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(mins_and_scales_1, mins_and_scales_1)); + + // Mins of first and second sub block of Q4_K block are arranged side by side + const __m512i mins_01 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(_mm256_shuffle_epi32(mins_and_scales_0, 78), _mm256_shuffle_epi32(mins_and_scales_1, 78))); + + const __m512i scale_014589CD_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)238); + + for (int rp = 0; rp < 4; rp++) { + + // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 + // Loaded as set of 128 bit vectors and repeated and stored into a 256 bit vector before again repeating into 512 bit vector + __m256i lhs_mat_ymm_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 256 * sb))); + __m256i lhs_mat_ymm_01_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 0); + __m256i lhs_mat_ymm_23_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 17); + __m256i lhs_mat_ymm_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 32 + 256 * sb))); + __m256i lhs_mat_ymm_01_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 0); + __m256i lhs_mat_ymm_23_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 17); + __m256i lhs_mat_ymm_0123_02 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 64 + 256 * sb))); + __m256i lhs_mat_ymm_01_02 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_02, lhs_mat_ymm_0123_02, 0); + __m256i lhs_mat_ymm_23_02 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_02, lhs_mat_ymm_0123_02, 17); + __m256i lhs_mat_ymm_0123_03 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 96 + 256 * sb))); + __m256i lhs_mat_ymm_01_03 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_03, lhs_mat_ymm_0123_03, 0); + __m256i lhs_mat_ymm_23_03 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_03, lhs_mat_ymm_0123_03, 17); + __m256i lhs_mat_ymm_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 128 + 256 * sb))); + __m256i lhs_mat_ymm_01_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 0); + __m256i lhs_mat_ymm_23_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 17); + __m256i lhs_mat_ymm_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 160 + 256 * sb))); + __m256i lhs_mat_ymm_01_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 0); + __m256i lhs_mat_ymm_23_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 17); + __m256i lhs_mat_ymm_0123_12 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 192 + 256 * sb))); + __m256i lhs_mat_ymm_01_12 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_12, lhs_mat_ymm_0123_12, 0); + __m256i lhs_mat_ymm_23_12 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_12, lhs_mat_ymm_0123_12, 17); + __m256i lhs_mat_ymm_0123_13 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 224 + 256 * sb))); + __m256i lhs_mat_ymm_01_13 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_13, lhs_mat_ymm_0123_13, 0); + __m256i lhs_mat_ymm_23_13 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_13, lhs_mat_ymm_0123_13, 17); + + __m512i lhs_mat_01_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_00), lhs_mat_ymm_01_00, 1); + __m512i lhs_mat_23_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_00), lhs_mat_ymm_23_00, 1); + __m512i lhs_mat_01_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_01), lhs_mat_ymm_01_01, 1); + __m512i lhs_mat_23_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_01), lhs_mat_ymm_23_01, 1); + __m512i lhs_mat_01_02 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_02), lhs_mat_ymm_01_02, 1); + __m512i lhs_mat_23_02 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_02), lhs_mat_ymm_23_02, 1); + __m512i lhs_mat_01_03 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_03), lhs_mat_ymm_01_03, 1); + __m512i lhs_mat_23_03 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_03), lhs_mat_ymm_23_03, 1); + + __m512i lhs_mat_01_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_10), lhs_mat_ymm_01_10, 1); + __m512i lhs_mat_23_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_10), lhs_mat_ymm_23_10, 1); + __m512i lhs_mat_01_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_11), lhs_mat_ymm_01_11, 1); + __m512i lhs_mat_23_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_11), lhs_mat_ymm_23_11, 1); + __m512i lhs_mat_01_12 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_12), lhs_mat_ymm_01_12, 1); + __m512i lhs_mat_23_12 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_12), lhs_mat_ymm_23_12, 1); + __m512i lhs_mat_01_13 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_13), lhs_mat_ymm_01_13, 1); + __m512i lhs_mat_23_13 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_13), lhs_mat_ymm_23_13, 1); + + // Bsums are loaded - four bsums are loaded (for two sub blocks) for the different Q8_K blocks + __m256i lhs_bsums_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].bsums + 16 * sb))); + __m256i lhs_bsums_hsum_ymm_0123_01 = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(lhs_bsums_0123_01), _mm256_extractf128_si256(lhs_bsums_0123_01, 1))); + lhs_bsums_hsum_ymm_0123_01 = _mm256_permute2x128_si256(lhs_bsums_hsum_ymm_0123_01, lhs_bsums_hsum_ymm_0123_01, 0); + __m512i lhs_bsums_hsum_0123_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_hsum_ymm_0123_01), lhs_bsums_hsum_ymm_0123_01, 1); + + // Shuffle pattern one - left side input + const __m512i lhs_mat_01_00_sp1 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) + const __m512i lhs_mat_23_00_sp1 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)160); //A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) + const __m512i lhs_mat_01_01_sp1 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) + const __m512i lhs_mat_23_01_sp1 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)160); //A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) + const __m512i lhs_mat_01_02_sp1 = _mm512_shuffle_epi32(lhs_mat_01_02, (_MM_PERM_ENUM)160); //A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) + const __m512i lhs_mat_23_02_sp1 = _mm512_shuffle_epi32(lhs_mat_23_02, (_MM_PERM_ENUM)160); //A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) + const __m512i lhs_mat_01_03_sp1 = _mm512_shuffle_epi32(lhs_mat_01_03, (_MM_PERM_ENUM)160); //A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) + const __m512i lhs_mat_23_03_sp1 = _mm512_shuffle_epi32(lhs_mat_23_03, (_MM_PERM_ENUM)160); //A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) + + const __m512i lhs_mat_01_10_sp1 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) + const __m512i lhs_mat_23_10_sp1 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)160); //A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) + const __m512i lhs_mat_01_11_sp1 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) + const __m512i lhs_mat_23_11_sp1 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)160); //A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) + const __m512i lhs_mat_01_12_sp1 = _mm512_shuffle_epi32(lhs_mat_01_12, (_MM_PERM_ENUM)160); //A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) + const __m512i lhs_mat_23_12_sp1 = _mm512_shuffle_epi32(lhs_mat_23_12, (_MM_PERM_ENUM)160); //A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) + const __m512i lhs_mat_01_13_sp1 = _mm512_shuffle_epi32(lhs_mat_01_13, (_MM_PERM_ENUM)160); //A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) + const __m512i lhs_mat_23_13_sp1 = _mm512_shuffle_epi32(lhs_mat_23_13, (_MM_PERM_ENUM)160); //A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) + + const __m512i lhs_mat_01_00_sp2 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) + const __m512i lhs_mat_23_00_sp2 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)245); //A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) + const __m512i lhs_mat_01_01_sp2 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) + const __m512i lhs_mat_23_01_sp2 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)245); //A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) + const __m512i lhs_mat_01_02_sp2 = _mm512_shuffle_epi32(lhs_mat_01_02, (_MM_PERM_ENUM)245); //A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) + const __m512i lhs_mat_23_02_sp2 = _mm512_shuffle_epi32(lhs_mat_23_02, (_MM_PERM_ENUM)245); //A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) + const __m512i lhs_mat_01_03_sp2 = _mm512_shuffle_epi32(lhs_mat_01_03, (_MM_PERM_ENUM)245); //A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) + const __m512i lhs_mat_23_03_sp2 = _mm512_shuffle_epi32(lhs_mat_23_03, (_MM_PERM_ENUM)245); //A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) + + const __m512i lhs_mat_01_10_sp2 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) + const __m512i lhs_mat_23_10_sp2 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)245); //A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) + const __m512i lhs_mat_01_11_sp2 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) + const __m512i lhs_mat_23_11_sp2 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)245); //A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) + const __m512i lhs_mat_01_12_sp2 = _mm512_shuffle_epi32(lhs_mat_01_12, (_MM_PERM_ENUM)245); //A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) + const __m512i lhs_mat_23_12_sp2 = _mm512_shuffle_epi32(lhs_mat_23_12, (_MM_PERM_ENUM)245); //A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) + const __m512i lhs_mat_01_13_sp2 = _mm512_shuffle_epi32(lhs_mat_01_13, (_MM_PERM_ENUM)245); //A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) + const __m512i lhs_mat_23_13_sp2 = _mm512_shuffle_epi32(lhs_mat_23_13, (_MM_PERM_ENUM)245); //A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) + + // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane + __m512i iacc_mat_00_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp1, lhs_mat_01_03_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp1, lhs_mat_01_02_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_01_01_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_01_00_sp1)); + __m512i iacc_mat_01_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp1, lhs_mat_01_03_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp1, lhs_mat_01_02_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_01_01_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_01_00_sp1)); + __m512i iacc_mat_10_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp1, lhs_mat_23_03_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp1, lhs_mat_23_02_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_23_01_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_23_00_sp1)); + __m512i iacc_mat_11_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp1, lhs_mat_23_03_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp1, lhs_mat_23_02_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_23_01_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_23_00_sp1)); + __m512i iacc_mat_00_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp1, lhs_mat_01_13_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp1, lhs_mat_01_12_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_01_11_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_01_10_sp1)); + __m512i iacc_mat_01_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp1, lhs_mat_01_13_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp1, lhs_mat_01_12_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_01_11_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_01_10_sp1)); + __m512i iacc_mat_10_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp1, lhs_mat_23_13_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp1, lhs_mat_23_12_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_23_11_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_23_10_sp1)); + __m512i iacc_mat_11_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp1, lhs_mat_23_13_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp1, lhs_mat_23_12_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_23_11_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_23_10_sp1)); + + __m512i iacc_mat_00_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp2, lhs_mat_01_03_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp2, lhs_mat_01_02_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_01_01_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_01_00_sp2)); + __m512i iacc_mat_01_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp2, lhs_mat_01_03_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp2, lhs_mat_01_02_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_01_01_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_01_00_sp2)); + __m512i iacc_mat_10_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp2, lhs_mat_23_03_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp2, lhs_mat_23_02_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_23_01_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_23_00_sp2)); + __m512i iacc_mat_11_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp2, lhs_mat_23_03_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp2, lhs_mat_23_02_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_23_01_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_23_00_sp2)); + __m512i iacc_mat_00_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp2, lhs_mat_01_13_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp2, lhs_mat_01_12_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_01_11_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_01_10_sp2)); + __m512i iacc_mat_01_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp2, lhs_mat_01_13_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp2, lhs_mat_01_12_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_01_11_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_01_10_sp2)); + __m512i iacc_mat_10_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp2, lhs_mat_23_13_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp2, lhs_mat_23_12_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_23_11_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_23_10_sp2)); + __m512i iacc_mat_11_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp2, lhs_mat_23_13_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp2, lhs_mat_23_12_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_23_11_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_23_10_sp2)); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + __m512i iacc_mat_00_0 = _mm512_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); + __m512i iacc_mat_01_0 = _mm512_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); + __m512i iacc_mat_10_0 = _mm512_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); + __m512i iacc_mat_11_0 = _mm512_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); + + __m512i iacc_mat_00_1 = _mm512_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); + __m512i iacc_mat_01_1 = _mm512_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); + __m512i iacc_mat_10_1 = _mm512_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); + __m512i iacc_mat_11_1 = _mm512_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); + + iacc_mat_00_0 = _mm512_madd_epi16(iacc_mat_00_0, scale_014589CD_0); + iacc_mat_01_0 = _mm512_madd_epi16(iacc_mat_01_0, scale_2367ABEF_0); + iacc_mat_10_0 = _mm512_madd_epi16(iacc_mat_10_0, scale_014589CD_0); + iacc_mat_11_0 = _mm512_madd_epi16(iacc_mat_11_0, scale_2367ABEF_0); + + iacc_mat_00_1 = _mm512_madd_epi16(iacc_mat_00_1, scale_014589CD_1); + iacc_mat_01_1 = _mm512_madd_epi16(iacc_mat_01_1, scale_2367ABEF_1); + iacc_mat_10_1 = _mm512_madd_epi16(iacc_mat_10_1, scale_014589CD_1); + iacc_mat_11_1 = _mm512_madd_epi16(iacc_mat_11_1, scale_2367ABEF_1); + + // Straighten out to make 4 row vectors (4 for each sub block which are accumulated together in the next step) + __m512i iacc_row_0_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00_0, _mm512_shuffle_epi32(iacc_mat_01_0, (_MM_PERM_ENUM)78)); + __m512i iacc_row_1_0 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00_0, (_MM_PERM_ENUM)78), iacc_mat_01_0); + __m512i iacc_row_2_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10_0, _mm512_shuffle_epi32(iacc_mat_11_0, (_MM_PERM_ENUM)78)); + __m512i iacc_row_3_0 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10_0, (_MM_PERM_ENUM)78), iacc_mat_11_0); + __m512i iacc_row_0_1 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00_1, _mm512_shuffle_epi32(iacc_mat_01_1, (_MM_PERM_ENUM)78)); + __m512i iacc_row_1_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00_1, (_MM_PERM_ENUM)78), iacc_mat_01_1); + __m512i iacc_row_2_1 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10_1, _mm512_shuffle_epi32(iacc_mat_11_1, (_MM_PERM_ENUM)78)); + __m512i iacc_row_3_1 = _mm512_mask_blend_epi32(0xCCCC,_mm512_shuffle_epi32(iacc_mat_10_1, (_MM_PERM_ENUM)78), iacc_mat_11_1); + + __m512i iacc_row_0 = _mm512_add_epi32(iacc_row_0_0, iacc_row_0_1); + __m512i iacc_row_1 = _mm512_add_epi32(iacc_row_1_0, iacc_row_1_1); + __m512i iacc_row_2 = _mm512_add_epi32(iacc_row_2_0, iacc_row_2_1); + __m512i iacc_row_3 = _mm512_add_epi32(iacc_row_3_0, iacc_row_3_1); + + // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes + const __m128 row_scale_f32_sse = _mm_load_ps(a_ptrs[rp][b].d); + const __m256 row_scale_f32_ymm = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); + const __m512 row_scale_f32 = _mm512_insertf32x8(_mm512_castps256_ps512(row_scale_f32_ymm), row_scale_f32_ymm, 1); + + // Multiply with appropiate scales and accumulate (for both d and dmin) below + acc_rows[rp * 4] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); + acc_rows[rp * 4 + 1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); + acc_rows[rp * 4 + 2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); + acc_rows[rp * 4 + 3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); + + __m512i iacc_row_min_0 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)0), mins_01); + __m512i iacc_row_min_1 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)85), mins_01); + __m512i iacc_row_min_2 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)170), mins_01); + __m512i iacc_row_min_3 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)255), mins_01); + + acc_min_rows[rp * 4] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_0), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[rp * 4]); + acc_min_rows[rp * 4 + 1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_1), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[rp * 4 + 1]); + acc_min_rows[rp * 4 + 2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_2), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[rp * 4 + 2]); + acc_min_rows[rp * 4 + 3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_3), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[rp * 4 + 3]); + } + } + } + // Store the accumulated values + for (int i = 0; i < 16; i++) { + _mm512_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm512_sub_ps(acc_rows[i], acc_min_rows[i])); + } + } + } + + for (; y < nr / 4; y++) { + + const block_q8_Kx4 * a_ptr = a_ptr_start + (y * nb); + + // Take group of eight block_q4_kx8 structures at each pass of the loop and perform dot product operation + for (int64_t x = 0; x < anc / 8; x += 2) { + + const block_q4_Kx8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); + const block_q4_Kx8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); + + // Master FP accumulators + __m512 acc_rows[4]; + for (int i = 0; i < 4; i++) { + acc_rows[i] = _mm512_setzero_ps(); + } + + __m512 acc_min_rows[4]; + for (int i = 0; i < 4; i++) { + acc_min_rows[i] = _mm512_setzero_ps(); + } + + // For super block + for (int64_t b = 0; b < nb; b++) { + // Scale values - Load the sixteen scale values from two block_q4_kx8 structures + const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); + + // dmin values - Load the sixteen dmin values from two block_q4_kx8 structures + const __m512 col_dmin_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].dmin, b_ptr_1[b].dmin); + + // Loop to iterate over the eight sub blocks of a super block - two sub blocks are processed per iteration + for (int sb = 0; sb < QK_K / 64; sb++) { + + const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + sb * 256)); + const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 224 + sb * 256)); + + const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + sb * 256)); + const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_mat_89AB_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_mat_CDEF_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_mat_89AB_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_mat_CDEF_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 224 + sb * 256)); + + const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); + const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); + const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); + const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); + + const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); + const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); + const __m256i rhs_raw_mat_89CD_2 = _mm256_blend_epi32(rhs_raw_mat_89AB_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_2, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_2, requiredOrder), rhs_raw_mat_CDEF_2, 240); + const __m256i rhs_raw_mat_89CD_3 = _mm256_blend_epi32(rhs_raw_mat_89AB_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_3, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_3, requiredOrder), rhs_raw_mat_CDEF_3, 240); + + const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); + const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); + const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); + const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); + + const __m512i rhs_raw_mat_014589CD_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_2), rhs_raw_mat_89CD_2, 1); + const __m512i rhs_raw_mat_2367ABEF_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_2), rhs_raw_mat_ABEF_2, 1); + const __m512i rhs_raw_mat_014589CD_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_3), rhs_raw_mat_89CD_3, 1); + const __m512i rhs_raw_mat_2367ABEF_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_3), rhs_raw_mat_ABEF_3, 1); + + //4-bit -> 8-bit + const __m512i rhs_mat_014589CD_00 = _mm512_and_si512(rhs_raw_mat_014589CD_0, m4bexpanded); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) B08(0-7) B09(0-7) B0C(0-7) B0D(0-7) + const __m512i rhs_mat_2367ABEF_00 = _mm512_and_si512(rhs_raw_mat_2367ABEF_0, m4bexpanded); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) B0A(0-7) B0B(0-7) B0E(0-7) B0F(0-7) + const __m512i rhs_mat_014589CD_01 = _mm512_and_si512(rhs_raw_mat_014589CD_1, m4bexpanded); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) B08(8-15) B09(8-15) B0C(8-15) B0D(8-15) + const __m512i rhs_mat_2367ABEF_01 = _mm512_and_si512(rhs_raw_mat_2367ABEF_1, m4bexpanded); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) B0A(8-15) B0B(8-15) B0E(8-15) B0F(8-15) + + const __m512i rhs_mat_014589CD_02 = _mm512_and_si512(rhs_raw_mat_014589CD_2, m4bexpanded); //B00(16-23) B01(16-23) B04(16-23) B05(16-23) B08(16-23) B09(16-23) B0C(16-23) B0D(16-23) + const __m512i rhs_mat_2367ABEF_02 = _mm512_and_si512(rhs_raw_mat_2367ABEF_2, m4bexpanded); //B02(16-23) B03(16-23) B06(16-23) B07(16-23) B0A(16-23) B0B(16-23) B0E(16-23) B0F(16-23) + const __m512i rhs_mat_014589CD_03 = _mm512_and_si512(rhs_raw_mat_014589CD_3, m4bexpanded); //B00(24-31) B01(24-31) B04(24-31) B05(24-31) B08(24-31) B09(24-31) B0C(24-31) B0D(24-31) + const __m512i rhs_mat_2367ABEF_03 = _mm512_and_si512(rhs_raw_mat_2367ABEF_3, m4bexpanded); //B02(24-31) B03(24-31) B06(24-31) B07(24-31) B0A(24-31) B0B(24-31) B0E(24-31) B0F(24-31) + + const __m512i rhs_mat_014589CD_10 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m4bexpanded); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) B18(0-7) B19(0-7) B1C(0-7) B1D(0-7) + const __m512i rhs_mat_2367ABEF_10 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m4bexpanded); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) B1A(0-7) B1B(0-7) B1E(0-7) B1F(0-7) + const __m512i rhs_mat_014589CD_11 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m4bexpanded); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) B18(8-15) B19(8-15) B1C(8-15) B1D(8-15) + const __m512i rhs_mat_2367ABEF_11 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) B1A(8-15) B1B(8-15) B1E(8-15) B1F(8-15) + + const __m512i rhs_mat_014589CD_12 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 4), m4bexpanded); //B10(16-23) B11(16-23) B14(16-23) B15(16-23) B18(16-23) B19(16-23) B1C(16-23) B1D(16-23) + const __m512i rhs_mat_2367ABEF_12 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 4), m4bexpanded); //B12(16-23) B13(16-23) B16(16-23) B17(16-23) B1A(16-23) B1B(16-23) B1E(16-23) B1F(16-23) + const __m512i rhs_mat_014589CD_13 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 4), m4bexpanded); //B10(24-31) B11(24-31) B14(24-31) B15(24-31) B18(24-31) B19(24-31) B1C(24-31) B1D(24-31) + const __m512i rhs_mat_2367ABEF_13 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 4), m4bexpanded); //B12(24-31) B13(24-31) B16(24-31) B17(24-31) B1A(24-31) B1B(24-31) B1E(24-31) B1F(24-31) + + // Shuffle pattern one - right side input + const __m512i rhs_mat_014589CD_00_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) B08(0-3) B09(0-3) B08(0-3) B09(0-3) B0C(0-3) B0D(0-3) B0C(0-3) B0D(0-3) + const __m512i rhs_mat_2367ABEF_00_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) B0A(0-3) B0B(0-3) B0A(0-3) B0B(0-3) B0E(0-3) B0F(0-3) B0E(0-3) B0F(0-3) + const __m512i rhs_mat_014589CD_01_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) B08(8-11) B09(8-11) B08(8-11) B09(8-11) B0C(8-11) B0D(8-11) B0C(8-11) B0D(8-11) + const __m512i rhs_mat_2367ABEF_01_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) B0A(8-11) B0B(8-11) B0A(8-11) B0B(8-11) B0E(8-11) B0F(8-11) B0E(8-11) B0F(8-11) + const __m512i rhs_mat_014589CD_02_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_02, (_MM_PERM_ENUM)136); //B00(16-19) B01(16-19) B00(16-19) B01(16-19) B04(16-19) B05(16-19) B04(16-19) B05(16-19) B08(16-19) B09(16-19) B08(16-19) B09(16-19) B0C(16-19) B0D(16-19) B0C(16-19) B0D(16-19) + const __m512i rhs_mat_2367ABEF_02_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_02, (_MM_PERM_ENUM)136); //B02(16-19) B03(16-19) B02(16-19) B03(16-19) B06(16-19) B07(16-19) B06(16-19) B07(16-19) B0A(16-19) B0B(16-19) B0A(16-19) B0B(16-19) B0E(16-19) B0F(16-19) B0E(16-19) B0F(16-19) + const __m512i rhs_mat_014589CD_03_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_03, (_MM_PERM_ENUM)136); //B00(24-27) B01(24-27) B00(24-27) B01(24-27) B04(24-27) B05(24-27) B04(24-27) B05(24-27) B08(24-27) B09(24-27) B08(24-27) B09(24-27) B0C(24-27) B0D(24-27) B0C(24-27) B0D(24-27) + const __m512i rhs_mat_2367ABEF_03_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_03, (_MM_PERM_ENUM)136); //B02(24-27) B03(24-27) B02(24-27) B03(24-27) B06(24-27) B07(24-27) B06(24-27) B07(24-27) B0A(24-27) B0B(24-27) B0A(24-27) B0B(24-27) B0E(24-27) B0F(24-27) B0E(24-27) B0F(24-27) + + const __m512i rhs_mat_014589CD_10_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) B18(0-3) B19(0-3) B18(0-3) B19(0-3) B1C(0-3) B1D(0-3) B1C(0-3) B1D(0-3) + const __m512i rhs_mat_2367ABEF_10_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) B1A(0-3) B1B(0-3) B1A(0-3) B1B(0-3) B1E(0-3) B1F(0-3) B1E(0-3) B1F(0-3) + const __m512i rhs_mat_014589CD_11_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) B18(8-11) B19(8-11) B18(8-11) B19(8-11) B1C(8-11) B1D(8-11) B1C(8-11) B1D(8-11) + const __m512i rhs_mat_2367ABEF_11_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) B1A(8-11) B1B(8-11) B1A(8-11) B1B(8-11) B1E(8-11) B1F(8-11) B1E(8-11) B1F(8-11) + const __m512i rhs_mat_014589CD_12_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_12, (_MM_PERM_ENUM)136); //B10(16-19) B11(16-19) B10(16-19) B11(16-19) B14(16-19) B15(16-19) B14(16-19) B15(16-19) B18(16-19) B19(16-19) B18(16-19) B19(16-19) B1C(16-19) B1D(16-19) B1C(16-19) B1D(16-19) + const __m512i rhs_mat_2367ABEF_12_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_12, (_MM_PERM_ENUM)136); //B12(16-19) B13(16-19) B12(16-19) B13(16-19) B16(16-19) B17(16-19) B16(16-19) B17(16-19) B1A(16-19) B1B(16-19) B1A(16-19) B1B(16-19) B1E(16-19) B1F(16-19) B1E(16-19) B1F(16-19) + const __m512i rhs_mat_014589CD_13_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_13, (_MM_PERM_ENUM)136); //B10(24-27) B11(24-27) B10(24-27) B11(24-27) B14(24-27) B15(24-27) B14(24-27) B15(24-27) B18(24-27) B19(24-27) B18(24-27) B19(24-27) B1C(24-27) B1D(24-27) B1C(24-27) B1D(24-27) + const __m512i rhs_mat_2367ABEF_13_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_13, (_MM_PERM_ENUM)136); //B12(24-27) B13(24-27) B12(24-27) B13(24-27) B16(24-27) B17(24-27) B16(24-27) B17(24-27) B1A(24-27) B1B(24-27) B1A(24-27) B1B(24-27) B1E(24-27) B1F(24-27) B1E(24-27) B1F(24-27) + + // Shuffle pattern two - right side input + const __m512i rhs_mat_014589CD_00_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) B08(4-7) B09(4-7) B08(4-7) B09(4-7) B0C(4-7) B0D(4-7) B0C(4-7) B0D(4-7) + const __m512i rhs_mat_2367ABEF_00_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) B0A(4-7) B0B(4-7) B0A(4-7) B0B(4-7) B0E(4-7) B0F(4-7) B0E(4-7) B0F(4-7) + const __m512i rhs_mat_014589CD_01_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) B08(12-15) B09(12-15) B08(12-15) B09(12-15) B0C(12-15) B0D(12-15) B0C(12-15) B0D(12-15) + const __m512i rhs_mat_2367ABEF_01_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) B0A(12-15) B0B(12-15) B0A(12-15) B0B(12-15) B0E(12-15) B0F(12-15) B0E(12-15) B0F(12-15) + const __m512i rhs_mat_014589CD_02_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_02, (_MM_PERM_ENUM)221); //B00(20-23) B01(20-23) B00(20-23) B01(20-23) B04(20-23) B05(20-23) B04(20-23) B05(20-23) B08(20-23) B09(20-23) B08(20-23) B09(20-23) B0C(20-23) B0D(20-23) B0C(20-23) B0D(20-23) + const __m512i rhs_mat_2367ABEF_02_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_02, (_MM_PERM_ENUM)221); //B02(20-23) B03(20-23) B02(20-23) B03(20-23) B06(20-23) B07(20-23) B06(20-23) B07(20-23) B0A(20-23) B0B(20-23) B0A(20-23) B0B(20-23) B0E(20-23) B0F(20-23) B0E(20-23) B0F(20-23) + const __m512i rhs_mat_014589CD_03_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_03, (_MM_PERM_ENUM)221); //B00(28-31) B01(28-31) B00(28-31) B01(28-31) B04(28-31) B05(28-31) B04(28-31) B05(28-31) B08(28-31) B09(28-31) B08(28-31) B09(28-31) B0C(28-31) B0D(28-31) B0C(28-31) 0BD(28-31) + const __m512i rhs_mat_2367ABEF_03_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_03, (_MM_PERM_ENUM)221); //B02(28-31) B03(28-31) B02(28-31) B03(28-31) B06(28-31) B07(28-31) B06(28-31) B07(28-31) B0A(28-31) B0B(28-31) B0A(28-31) B0B(28-31) B0E(28-31) B0F(28-31) B0E(28-31) B0F(28-31) + + const __m512i rhs_mat_014589CD_10_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) B18(4-7) B19(4-7) B18(4-7) B19(4-7) B1C(4-7) B1D(4-7) B1C(4-7) B1D(4-7) + const __m512i rhs_mat_2367ABEF_10_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) B1A(4-7) B1B(4-7) B1A(4-7) B1B(4-7) B1E(4-7) B1F(4-7) B1E(4-7) B1F(4-7) + const __m512i rhs_mat_014589CD_11_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) B18(12-15) B19(12-15) B18(12-15) B19(12-15) B1C(12-15) B1D(12-15) B1C(12-15) B1D(12-15) + const __m512i rhs_mat_2367ABEF_11_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) B1A(12-15) B1B(12-15) B1A(12-15) B1B(12-15) B1E(12-15) B1F(12-15) B1E(12-15) B1F(12-15) + const __m512i rhs_mat_014589CD_12_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_12, (_MM_PERM_ENUM)221); //B10(20-23) B11(20-23) B10(20-23) B11(20-23) B14(20-23) B15(20-23) B14(20-23) B15(20-23) B18(20-23) B19(20-23) B18(20-23) B19(20-23) B1C(20-23) B1D(20-23) B1C(20-23) B1D(20-23) + const __m512i rhs_mat_2367ABEF_12_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_12, (_MM_PERM_ENUM)221); //B12(20-23) B13(20-23) B12(20-23) B13(20-23) B16(20-23) B17(20-23) B16(20-23) B17(20-23) B1A(20-23) B1B(20-23) B1A(20-23) B1B(20-23) B1E(20-23) B1F(20-23) B1E(20-23) B1F(20-23) + const __m512i rhs_mat_014589CD_13_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_13, (_MM_PERM_ENUM)221); //B10(28-31) B11(28-31) B10(28-31) B11(28-31) B14(28-31) B15(28-31) B14(28-31) B15(28-31) B18(28-31) B19(28-31) B18(28-31) B19(28-31) B1C(28-31) B1D(28-31) B1C(28-31) B1D(28-31) + const __m512i rhs_mat_2367ABEF_13_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_13, (_MM_PERM_ENUM)221); //B12(28-31) B13(28-31) B12(28-31) B13(28-31) B16(28-31) B17(28-31) B16(28-31) B17(28-31) B1A(28-31) B1B(28-31) B1A(28-31) B1B(28-31) B1E(28-31) B1F(28-31) B1E(28-31) B1F(28-31) + + uint32_t utmp_00[4], utmp_01[4], utmp_10[4], utmp_11[4]; + + // Scales and Mins of corresponding sub blocks from different Q4_K structures are stored together + // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_00, b_ptr_0[b].scales + 24 * sb, 12); + utmp_00[3] = ((utmp_00[2] >> 4) & kmask2) | (((utmp_00[1] >> 6) & kmask3) << 4); + const uint32_t uaux_00 = utmp_00[1] & kmask1; + utmp_00[1] = (utmp_00[2] & kmask2) | (((utmp_00[0] >> 6) & kmask3) << 4); + utmp_00[2] = uaux_00; + utmp_00[0] &= kmask1; + + // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_01, b_ptr_0[b].scales + 12 + sb * 24, 12); + utmp_01[3] = ((utmp_01[2] >> 4) & kmask2) | (((utmp_01[1] >> 6) & kmask3) << 4); + const uint32_t uaux_01 = utmp_01[1] & kmask1; + utmp_01[1] = (utmp_01[2] & kmask2) | (((utmp_01[0] >> 6) & kmask3) << 4); + utmp_01[2] = uaux_01; + utmp_01[0] &= kmask1; + + // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_10, b_ptr_1[b].scales + sb * 24, 12); + utmp_10[3] = ((utmp_10[2] >> 4) & kmask2) | (((utmp_10[1] >> 6) & kmask3) << 4); + const uint32_t uaux_10 = utmp_10[1] & kmask1; + utmp_10[1] = (utmp_10[2] & kmask2) | (((utmp_10[0] >> 6) & kmask3) << 4); + utmp_10[2] = uaux_10; + utmp_10[0] &= kmask1; + + // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_11, b_ptr_1[b].scales + 12 + sb * 24, 12); + utmp_11[3] = ((utmp_11[2] >> 4) & kmask2) | (((utmp_11[1] >> 6) & kmask3) << 4); + const uint32_t uaux_11 = utmp_11[1] & kmask1; + utmp_11[1] = (utmp_11[2] & kmask2) | (((utmp_11[0] >> 6) & kmask3) << 4); + utmp_11[2] = uaux_11; + utmp_11[0] &= kmask1; + + // Scales of first sub block in the sb loop + const __m256i mins_and_scales_0 = _mm256_set_epi32(utmp_10[3], utmp_10[2], utmp_10[1], utmp_10[0], utmp_00[3], utmp_00[2], utmp_00[1], utmp_00[0]); + const __m512i scales_0 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(mins_and_scales_0, mins_and_scales_0)); + + // Scales of second sub block in the sb loop + const __m256i mins_and_scales_1 = _mm256_set_epi32(utmp_11[3], utmp_11[2], utmp_11[1], utmp_11[0], utmp_01[3], utmp_01[2], utmp_01[1], utmp_01[0]); + const __m512i scales_1 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(mins_and_scales_1, mins_and_scales_1)); + + // Mins of first and second sub block of Q4_K block are arranged side by side + const __m512i mins_01 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(_mm256_shuffle_epi32(mins_and_scales_0, 78), _mm256_shuffle_epi32(mins_and_scales_1, 78))); + + const __m512i scale_014589CD_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)238); + + // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 + // Loaded as set of 128 bit vectors and repeated into a 256 bit vector + __m256i lhs_mat_ymm_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 256 * sb))); + __m256i lhs_mat_ymm_01_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 0); + __m256i lhs_mat_ymm_23_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 17); + __m256i lhs_mat_ymm_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 32 + 256 * sb))); + __m256i lhs_mat_ymm_01_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 0); + __m256i lhs_mat_ymm_23_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 17); + __m256i lhs_mat_ymm_0123_02 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 64 + 256 * sb))); + __m256i lhs_mat_ymm_01_02 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_02, lhs_mat_ymm_0123_02, 0); + __m256i lhs_mat_ymm_23_02 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_02, lhs_mat_ymm_0123_02, 17); + __m256i lhs_mat_ymm_0123_03 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 96 + 256 * sb))); + __m256i lhs_mat_ymm_01_03 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_03, lhs_mat_ymm_0123_03, 0); + __m256i lhs_mat_ymm_23_03 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_03, lhs_mat_ymm_0123_03, 17); + __m256i lhs_mat_ymm_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 128 + 256 * sb))); + __m256i lhs_mat_ymm_01_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 0); + __m256i lhs_mat_ymm_23_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 17); + __m256i lhs_mat_ymm_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 160 + 256 * sb))); + __m256i lhs_mat_ymm_01_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 0); + __m256i lhs_mat_ymm_23_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 17); + __m256i lhs_mat_ymm_0123_12 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 192 + 256 * sb))); + __m256i lhs_mat_ymm_01_12 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_12, lhs_mat_ymm_0123_12, 0); + __m256i lhs_mat_ymm_23_12 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_12, lhs_mat_ymm_0123_12, 17); + __m256i lhs_mat_ymm_0123_13 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 224 + 256 * sb))); + __m256i lhs_mat_ymm_01_13 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_13, lhs_mat_ymm_0123_13, 0); + __m256i lhs_mat_ymm_23_13 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_13, lhs_mat_ymm_0123_13, 17); + + //Loaded as set of 128 bit vectors and repeated and stored into a 256 bit vector before again repeating into a 512 bit vector + __m512i lhs_mat_01_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_00), lhs_mat_ymm_01_00, 1); + __m512i lhs_mat_23_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_00), lhs_mat_ymm_23_00, 1); + __m512i lhs_mat_01_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_01), lhs_mat_ymm_01_01, 1); + __m512i lhs_mat_23_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_01), lhs_mat_ymm_23_01, 1); + __m512i lhs_mat_01_02 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_02), lhs_mat_ymm_01_02, 1); + __m512i lhs_mat_23_02 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_02), lhs_mat_ymm_23_02, 1); + __m512i lhs_mat_01_03 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_03), lhs_mat_ymm_01_03, 1); + __m512i lhs_mat_23_03 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_03), lhs_mat_ymm_23_03, 1); + + __m512i lhs_mat_01_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_10), lhs_mat_ymm_01_10, 1); + __m512i lhs_mat_23_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_10), lhs_mat_ymm_23_10, 1); + __m512i lhs_mat_01_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_11), lhs_mat_ymm_01_11, 1); + __m512i lhs_mat_23_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_11), lhs_mat_ymm_23_11, 1); + __m512i lhs_mat_01_12 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_12), lhs_mat_ymm_01_12, 1); + __m512i lhs_mat_23_12 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_12), lhs_mat_ymm_23_12, 1); + __m512i lhs_mat_01_13 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_13), lhs_mat_ymm_01_13, 1); + __m512i lhs_mat_23_13 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_13), lhs_mat_ymm_23_13, 1); + + // Bsums are loaded - four bsums are loaded (for two sub blocks) for the different Q8_K blocks + __m256i lhs_bsums_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].bsums + 16 * sb))); + __m256i lhs_bsums_hsum_ymm_0123_01 = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(lhs_bsums_0123_01), _mm256_extractf128_si256(lhs_bsums_0123_01, 1))); + lhs_bsums_hsum_ymm_0123_01 = _mm256_permute2x128_si256(lhs_bsums_hsum_ymm_0123_01, lhs_bsums_hsum_ymm_0123_01, 0); + __m512i lhs_bsums_hsum_0123_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_hsum_ymm_0123_01), lhs_bsums_hsum_ymm_0123_01, 1); + + // Shuffle pattern one - left side input + const __m512i lhs_mat_01_00_sp1 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) + const __m512i lhs_mat_23_00_sp1 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)160); //A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) + const __m512i lhs_mat_01_01_sp1 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) + const __m512i lhs_mat_23_01_sp1 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)160); //A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) + const __m512i lhs_mat_01_02_sp1 = _mm512_shuffle_epi32(lhs_mat_01_02, (_MM_PERM_ENUM)160); //A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) + const __m512i lhs_mat_23_02_sp1 = _mm512_shuffle_epi32(lhs_mat_23_02, (_MM_PERM_ENUM)160); //A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) + const __m512i lhs_mat_01_03_sp1 = _mm512_shuffle_epi32(lhs_mat_01_03, (_MM_PERM_ENUM)160); //A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) + const __m512i lhs_mat_23_03_sp1 = _mm512_shuffle_epi32(lhs_mat_23_03, (_MM_PERM_ENUM)160); //A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) + + const __m512i lhs_mat_01_10_sp1 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) + const __m512i lhs_mat_23_10_sp1 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)160); //A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) + const __m512i lhs_mat_01_11_sp1 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) + const __m512i lhs_mat_23_11_sp1 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)160); //A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) + const __m512i lhs_mat_01_12_sp1 = _mm512_shuffle_epi32(lhs_mat_01_12, (_MM_PERM_ENUM)160); //A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) + const __m512i lhs_mat_23_12_sp1 = _mm512_shuffle_epi32(lhs_mat_23_12, (_MM_PERM_ENUM)160); //A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) + const __m512i lhs_mat_01_13_sp1 = _mm512_shuffle_epi32(lhs_mat_01_13, (_MM_PERM_ENUM)160); //A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) + const __m512i lhs_mat_23_13_sp1 = _mm512_shuffle_epi32(lhs_mat_23_13, (_MM_PERM_ENUM)160); //A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) + + const __m512i lhs_mat_01_00_sp2 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) + const __m512i lhs_mat_23_00_sp2 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)245); //A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) + const __m512i lhs_mat_01_01_sp2 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) + const __m512i lhs_mat_23_01_sp2 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)245); //A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) + const __m512i lhs_mat_01_02_sp2 = _mm512_shuffle_epi32(lhs_mat_01_02, (_MM_PERM_ENUM)245); //A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) + const __m512i lhs_mat_23_02_sp2 = _mm512_shuffle_epi32(lhs_mat_23_02, (_MM_PERM_ENUM)245); //A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) + const __m512i lhs_mat_01_03_sp2 = _mm512_shuffle_epi32(lhs_mat_01_03, (_MM_PERM_ENUM)245); //A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) + const __m512i lhs_mat_23_03_sp2 = _mm512_shuffle_epi32(lhs_mat_23_03, (_MM_PERM_ENUM)245); //A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) + + const __m512i lhs_mat_01_10_sp2 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) + const __m512i lhs_mat_23_10_sp2 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)245); //A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) + const __m512i lhs_mat_01_11_sp2 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) + const __m512i lhs_mat_23_11_sp2 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)245); //A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) + const __m512i lhs_mat_01_12_sp2 = _mm512_shuffle_epi32(lhs_mat_01_12, (_MM_PERM_ENUM)245); //A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) + const __m512i lhs_mat_23_12_sp2 = _mm512_shuffle_epi32(lhs_mat_23_12, (_MM_PERM_ENUM)245); //A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) + const __m512i lhs_mat_01_13_sp2 = _mm512_shuffle_epi32(lhs_mat_01_13, (_MM_PERM_ENUM)245); //A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) + const __m512i lhs_mat_23_13_sp2 = _mm512_shuffle_epi32(lhs_mat_23_13, (_MM_PERM_ENUM)245); //A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) + + // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane + __m512i iacc_mat_00_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp1, lhs_mat_01_03_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp1, lhs_mat_01_02_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_01_01_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_01_00_sp1)); + __m512i iacc_mat_01_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp1, lhs_mat_01_03_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp1, lhs_mat_01_02_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_01_01_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_01_00_sp1)); + __m512i iacc_mat_10_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp1, lhs_mat_23_03_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp1, lhs_mat_23_02_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_23_01_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_23_00_sp1)); + __m512i iacc_mat_11_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp1, lhs_mat_23_03_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp1, lhs_mat_23_02_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_23_01_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_23_00_sp1)); + __m512i iacc_mat_00_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp1, lhs_mat_01_13_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp1, lhs_mat_01_12_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_01_11_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_01_10_sp1)); + __m512i iacc_mat_01_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp1, lhs_mat_01_13_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp1, lhs_mat_01_12_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_01_11_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_01_10_sp1)); + __m512i iacc_mat_10_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp1, lhs_mat_23_13_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp1, lhs_mat_23_12_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_23_11_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_23_10_sp1)); + __m512i iacc_mat_11_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp1, lhs_mat_23_13_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp1, lhs_mat_23_12_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_23_11_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_23_10_sp1)); + + __m512i iacc_mat_00_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp2, lhs_mat_01_03_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp2, lhs_mat_01_02_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_01_01_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_01_00_sp2)); + __m512i iacc_mat_01_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp2, lhs_mat_01_03_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp2, lhs_mat_01_02_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_01_01_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_01_00_sp2)); + __m512i iacc_mat_10_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp2, lhs_mat_23_03_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp2, lhs_mat_23_02_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_23_01_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_23_00_sp2)); + __m512i iacc_mat_11_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp2, lhs_mat_23_03_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp2, lhs_mat_23_02_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_23_01_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_23_00_sp2)); + __m512i iacc_mat_00_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp2, lhs_mat_01_13_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp2, lhs_mat_01_12_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_01_11_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_01_10_sp2)); + __m512i iacc_mat_01_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp2, lhs_mat_01_13_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp2, lhs_mat_01_12_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_01_11_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_01_10_sp2)); + __m512i iacc_mat_10_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp2, lhs_mat_23_13_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp2, lhs_mat_23_12_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_23_11_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_23_10_sp2)); + __m512i iacc_mat_11_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp2, lhs_mat_23_13_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp2, lhs_mat_23_12_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_23_11_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_23_10_sp2)); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + __m512i iacc_mat_00_0 = _mm512_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); + __m512i iacc_mat_01_0 = _mm512_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); + __m512i iacc_mat_10_0 = _mm512_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); + __m512i iacc_mat_11_0 = _mm512_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); + + __m512i iacc_mat_00_1 = _mm512_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); + __m512i iacc_mat_01_1 = _mm512_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); + __m512i iacc_mat_10_1 = _mm512_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); + __m512i iacc_mat_11_1 = _mm512_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); + + iacc_mat_00_0 = _mm512_madd_epi16(iacc_mat_00_0, scale_014589CD_0); + iacc_mat_01_0 = _mm512_madd_epi16(iacc_mat_01_0, scale_2367ABEF_0); + iacc_mat_10_0 = _mm512_madd_epi16(iacc_mat_10_0, scale_014589CD_0); + iacc_mat_11_0 = _mm512_madd_epi16(iacc_mat_11_0, scale_2367ABEF_0); + + iacc_mat_00_1 = _mm512_madd_epi16(iacc_mat_00_1, scale_014589CD_1); + iacc_mat_01_1 = _mm512_madd_epi16(iacc_mat_01_1, scale_2367ABEF_1); + iacc_mat_10_1 = _mm512_madd_epi16(iacc_mat_10_1, scale_014589CD_1); + iacc_mat_11_1 = _mm512_madd_epi16(iacc_mat_11_1, scale_2367ABEF_1); + + // Straighten out to make 4 row vectors (4 for each sub block which are accumulated together in the next step) + __m512i iacc_row_0_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00_0, _mm512_shuffle_epi32(iacc_mat_01_0, (_MM_PERM_ENUM)78)); + __m512i iacc_row_1_0 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00_0, (_MM_PERM_ENUM)78), iacc_mat_01_0); + __m512i iacc_row_2_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10_0, _mm512_shuffle_epi32(iacc_mat_11_0, (_MM_PERM_ENUM)78)); + __m512i iacc_row_3_0 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10_0, (_MM_PERM_ENUM)78), iacc_mat_11_0); + __m512i iacc_row_0_1 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00_1, _mm512_shuffle_epi32(iacc_mat_01_1, (_MM_PERM_ENUM)78)); + __m512i iacc_row_1_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00_1, (_MM_PERM_ENUM)78), iacc_mat_01_1); + __m512i iacc_row_2_1 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10_1, _mm512_shuffle_epi32(iacc_mat_11_1, (_MM_PERM_ENUM)78)); + __m512i iacc_row_3_1 = _mm512_mask_blend_epi32(0xCCCC,_mm512_shuffle_epi32(iacc_mat_10_1, (_MM_PERM_ENUM)78), iacc_mat_11_1); + + __m512i iacc_row_0 = _mm512_add_epi32(iacc_row_0_0, iacc_row_0_1); + __m512i iacc_row_1 = _mm512_add_epi32(iacc_row_1_0, iacc_row_1_1); + __m512i iacc_row_2 = _mm512_add_epi32(iacc_row_2_0, iacc_row_2_1); + __m512i iacc_row_3 = _mm512_add_epi32(iacc_row_3_0, iacc_row_3_1); + + // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes + const __m128 row_scale_f32_sse = _mm_load_ps(a_ptr[b].d); + const __m256 row_scale_f32_ymm = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); + const __m512 row_scale_f32 = _mm512_insertf32x8(_mm512_castps256_ps512(row_scale_f32_ymm), row_scale_f32_ymm, 1); + + // Multiply with appropiate scales and accumulate (for both d and dmin) below + acc_rows[0] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); + acc_rows[1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); + acc_rows[2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); + acc_rows[3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); + + __m512i iacc_row_min_0 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)0), mins_01); + __m512i iacc_row_min_1 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)85), mins_01); + __m512i iacc_row_min_2 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)170), mins_01); + __m512i iacc_row_min_3 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)255), mins_01); + + acc_min_rows[0] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_0), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[0]); + acc_min_rows[1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_1), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[1]); + acc_min_rows[2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_2), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[2]); + acc_min_rows[3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_3), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[3]); + } + } + // Store accumlated values + for (int i = 0; i < 4; i++) { + _mm512_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm512_sub_ps(acc_rows[i], acc_min_rows[i])); + } + } + } + if (anc != nc) { + xstart = anc/8; + y = 0; + } +#endif //AVX512F + + // Take group of four block_q8_Kx4 structures at each pass of the loop and perform dot product operation + for (; y < anr / 4; y += 4) { + + const block_q8_Kx4 * a_ptrs[4]; + + a_ptrs[0] = a_ptr_start + (y * nb); + for (int i = 0; i < 3; ++i) { + a_ptrs[i + 1] = a_ptrs[i] + nb; + } + + // Take group of eight block_q4_kx8 structures at each pass of the loop and perform dot product operation + for (int64_t x = xstart; x < nc / 8; x++) { + + const block_q4_Kx8 * b_ptr = b_ptr_start + (x * b_nb); + + // Master FP accumulators + __m256 acc_rows[16]; + for (int i = 0; i < 16; i++) { + acc_rows[i] = _mm256_setzero_ps(); + } + + __m256 acc_min_rows[16]; + for (int i = 0; i < 16; i++) { + acc_min_rows[i] = _mm256_setzero_ps(); + } + + // For super block + for (int64_t b = 0; b < nb; b++) { + + // Scale values - Load the eight scale values of block_q4_kx8 + const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); + + // dmin values - Load the eight dmin values of block_q4_kx8 + const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); + + // Loop to iterate over the eight sub blocks of a super block - two sub blocks are processed per iteration + for (int sb = 0; sb < QK_K / 64; sb++) { + + // Load the eight block_q4_K for two sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 + const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + sb * 256)); + const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 224 + sb * 256)); + + // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values + const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); + const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); + const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); + const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); + + // 4-bit -> 8-bit + // First sub block of the two sub blocks processed in the iteration + const __m256i rhs_mat_0145_00 = _mm256_and_si256(rhs_raw_mat_0145_0, m4b); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) + const __m256i rhs_mat_2367_00 = _mm256_and_si256(rhs_raw_mat_2367_0, m4b); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) + + const __m256i rhs_mat_0145_01 = _mm256_and_si256(rhs_raw_mat_0145_1, m4b); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) + const __m256i rhs_mat_2367_01 = _mm256_and_si256(rhs_raw_mat_2367_1, m4b); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) + + const __m256i rhs_mat_0145_02 = _mm256_and_si256(rhs_raw_mat_0145_2, m4b); //B00(16-23) B01(16-23) B04(16-23) B05(16-23) + const __m256i rhs_mat_2367_02 = _mm256_and_si256(rhs_raw_mat_2367_2, m4b); //B02(16-23) B03(16-23) B06(16-23) B07(16-23) + + const __m256i rhs_mat_0145_03 = _mm256_and_si256(rhs_raw_mat_0145_3, m4b); //B00(24-31) B01(24-31) B04(24-31) B05(24-31) + const __m256i rhs_mat_2367_03 = _mm256_and_si256(rhs_raw_mat_2367_3, m4b); //B02(24-31) B03(24-31) B06(24-31) B07(24-31) + + // Second sub block of the two sub blocks processed in the iteration + const __m256i rhs_mat_0145_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m4b); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) + const __m256i rhs_mat_2367_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m4b); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) + + const __m256i rhs_mat_0145_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m4b); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) + const __m256i rhs_mat_2367_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m4b); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) + + const __m256i rhs_mat_0145_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 4), m4b); //B10(16-23) B11(16-23) B14(16-23) B15(16-23) + const __m256i rhs_mat_2367_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 4), m4b); //B12(16-23) B13(16-23) B16(16-23) B17(16-23) + + const __m256i rhs_mat_0145_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 4), m4b); //B10(24-31) B11(24-31) B14(24-31) B15(24-31) + const __m256i rhs_mat_2367_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 4), m4b); //B12(24-31) B13(24-31) B16(24-31) B17(24-31) + + // Shuffle pattern one - right side input + const __m256i rhs_mat_0145_00_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_00, 136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) + const __m256i rhs_mat_2367_00_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_00, 136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) + + const __m256i rhs_mat_0145_01_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_01, 136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) + const __m256i rhs_mat_2367_01_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_01, 136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) + + const __m256i rhs_mat_0145_02_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_02, 136); //B00(16-19) B01(16-19) B00(16-19) B01(16-19) B04(16-19) B05(16-19) B04(16-19) B05(16-19) + const __m256i rhs_mat_2367_02_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_02, 136); //B02(16-19) B03(16-19) B02(16-19) B03(16-19) B06(16-19) B07(16-19) B06(16-19) B07(16-19) + + const __m256i rhs_mat_0145_03_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_03, 136); //B00(24-27) B01(24-27) B00(24-27) B01(24-27) B04(24-27) B05(24-27) B04(24-27) B05(24-27) + const __m256i rhs_mat_2367_03_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_03, 136); //B02(24-27) B03(24-27) B02(24-27) B03(24-27) B06(24-27) B07(24-27) B06(24-27) B07(24-27) + + const __m256i rhs_mat_0145_10_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_10, 136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) + const __m256i rhs_mat_2367_10_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_10, 136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) + + const __m256i rhs_mat_0145_11_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_11, 136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) + const __m256i rhs_mat_2367_11_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_11, 136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) + + const __m256i rhs_mat_0145_12_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_12, 136); //B10(16-19) B11(16-19) B10(16-19) B11(16-19) B14(16-19) B15(16-19) B14(16-19) B15(16-19) + const __m256i rhs_mat_2367_12_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_12, 136); //B12(16-19) B13(16-19) B12(16-19) B13(16-19) B16(16-19) B17(16-19) B16(16-19) B17(16-19) + + const __m256i rhs_mat_0145_13_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_13, 136); //B10(24-27) B11(24-27) B10(24-27) B11(24-27) B14(24-27) B15(24-27) B14(24-27) B15(24-27) + const __m256i rhs_mat_2367_13_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_13, 136); //B12(24-27) B13(24-27) B12(24-27) B13(24-27) B16(24-27) B17(24-27) B16(24-27) B17(24-27) + + + // Shuffle pattern two - right side input + const __m256i rhs_mat_0145_00_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_00, 221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) + const __m256i rhs_mat_2367_00_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_00, 221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) + + const __m256i rhs_mat_0145_01_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_01, 221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) + const __m256i rhs_mat_2367_01_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_01, 221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) + + const __m256i rhs_mat_0145_02_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_02, 221); //B00(20-23) B01(20-23) B00(20-23) B01(20-23) B04(20-23) B05(20-23) B04(20-23) B05(20-23) + const __m256i rhs_mat_2367_02_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_02, 221); //B02(20-23) B03(20-23) B02(20-23) B03(20-23) B06(20-23) B07(20-23) B06(20-23) B07(20-23) + + const __m256i rhs_mat_0145_03_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_03, 221); //B00(28-31) B01(28-31) B00(28-31) B01(28-31) B04(28-31) B05(28-31) B04(28-31) B05(28-31) + const __m256i rhs_mat_2367_03_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_03, 221); //B02(28-31) B03(28-31) B02(28-31) B03(28-31) B06(28-31) B07(28-31) B06(28-31) B07(28-31) + + const __m256i rhs_mat_0145_10_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_10, 221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) + const __m256i rhs_mat_2367_10_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_10, 221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) + + const __m256i rhs_mat_0145_11_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_11, 221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) + const __m256i rhs_mat_2367_11_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_11, 221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) + + const __m256i rhs_mat_0145_12_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_12, 221); //B10(20-23) B11(20-23) B10(20-23) B11(20-23) B14(20-23) B15(20-23) B14(20-23) B15(20-23) + const __m256i rhs_mat_2367_12_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_12, 221); //B12(20-23) B13(20-23) B12(20-23) B13(20-23) B16(20-23) B17(20-23) B16(20-23) B17(20-23) + + const __m256i rhs_mat_0145_13_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_13, 221); //B10(28-31) B11(28-31) B10(28-31) B11(28-31) B14(28-31) B15(28-31) B14(28-31) B15(28-31) + const __m256i rhs_mat_2367_13_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_13, 221); //B12(28-31) B13(28-31) B12(28-31) B13(28-31) B16(28-31) B17(28-31) B16(28-31) B17(28-31) + + uint32_t utmp_0[4], utmp_1[4]; + + // Scales and Mins of corresponding sub blocks from different Q4_K structures are stored together + // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_0, b_ptr[b].scales + 24 * sb, 12); + utmp_0[3] = ((utmp_0[2] >> 4) & kmask2) | (((utmp_0[1] >> 6) & kmask3) << 4); + const uint32_t uaux_0 = utmp_0[1] & kmask1; + utmp_0[1] = (utmp_0[2] & kmask2) | (((utmp_0[0] >> 6) & kmask3) << 4); + utmp_0[2] = uaux_0; + utmp_0[0] &= kmask1; + + // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_1, b_ptr[b].scales + 12 + sb * 24, 12); + utmp_1[3] = ((utmp_1[2] >> 4) & kmask2) | (((utmp_1[1] >> 6) & kmask3) << 4); + const uint32_t uaux_1 = utmp_1[1] & kmask1; + utmp_1[1] = (utmp_1[2] & kmask2) | (((utmp_1[0] >> 6) & kmask3) << 4); + utmp_1[2] = uaux_1; + utmp_1[0] &= kmask1; + + // Scales of first sub block in the sb loop + const __m128i mins_and_scales_0 = _mm_set_epi32(utmp_0[3], utmp_0[2], utmp_0[1], utmp_0[0]); + const __m256i scales_0 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_0, mins_and_scales_0)); + + // Scales of second sub block in the sb loop + const __m128i mins_and_scales_1 = _mm_set_epi32(utmp_1[3], utmp_1[2], utmp_1[1], utmp_1[0]); + const __m256i scales_1 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_1, mins_and_scales_1)); + + // Mins of first and second sub block of Q4_K block are arranged side by side + const __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(_mm_shuffle_epi32(mins_and_scales_0, 78), _mm_shuffle_epi32(mins_and_scales_1, 78))); + + const __m256i scale_0145_0 = _mm256_shuffle_epi32(scales_0, 68); + const __m256i scale_2367_0 = _mm256_shuffle_epi32(scales_0, 238); + + const __m256i scale_0145_1 = _mm256_shuffle_epi32(scales_1, 68); + const __m256i scale_2367_1 = _mm256_shuffle_epi32(scales_1, 238); + + for (int rp = 0; rp < 4; rp++) { + + // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 + // Loaded as set of 128 bit vectors and repeated into a 256 bit vector + __m256i lhs_mat_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 256 * sb))); + __m256i lhs_mat_01_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 0); + __m256i lhs_mat_23_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 17); + __m256i lhs_mat_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 32 + 256 * sb))); + __m256i lhs_mat_01_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 0); + __m256i lhs_mat_23_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 17); + __m256i lhs_mat_0123_02 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 64 + 256 * sb))); + __m256i lhs_mat_01_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 0); + __m256i lhs_mat_23_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 17); + __m256i lhs_mat_0123_03 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 96 + 256 * sb))); + __m256i lhs_mat_01_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 0); + __m256i lhs_mat_23_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 17); + __m256i lhs_mat_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 128 + 256 * sb))); + __m256i lhs_mat_01_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 0); + __m256i lhs_mat_23_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 17); + __m256i lhs_mat_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 160 + 256 * sb))); + __m256i lhs_mat_01_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 0); + __m256i lhs_mat_23_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 17); + __m256i lhs_mat_0123_12 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 192 + 256 * sb))); + __m256i lhs_mat_01_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 0); + __m256i lhs_mat_23_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 17); + __m256i lhs_mat_0123_13 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 224 + 256 * sb))); + __m256i lhs_mat_01_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 0); + __m256i lhs_mat_23_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 17); + + // Bsums are loaded - four bsums are loaded (for two sub blocks) for the different Q8_K blocks + __m256i lhs_bsums_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].bsums + 16 * sb))); + __m256i lhs_bsums_hsum_0123_01 = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(lhs_bsums_0123_01), _mm256_extractf128_si256(lhs_bsums_0123_01, 1))); + lhs_bsums_hsum_0123_01 = _mm256_permute2x128_si256(lhs_bsums_hsum_0123_01, lhs_bsums_hsum_0123_01, 0); + + // Shuffle pattern one - left side input + const __m256i lhs_mat_01_00_sp1 = _mm256_shuffle_epi32(lhs_mat_01_00, 160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) + const __m256i lhs_mat_23_00_sp1 = _mm256_shuffle_epi32(lhs_mat_23_00, 160); //A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) + + const __m256i lhs_mat_01_01_sp1 = _mm256_shuffle_epi32(lhs_mat_01_01, 160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) + const __m256i lhs_mat_23_01_sp1 = _mm256_shuffle_epi32(lhs_mat_23_01, 160); //A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) + + const __m256i lhs_mat_01_02_sp1 = _mm256_shuffle_epi32(lhs_mat_01_02, 160); //A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) + const __m256i lhs_mat_23_02_sp1 = _mm256_shuffle_epi32(lhs_mat_23_02, 160); //A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) + + const __m256i lhs_mat_01_03_sp1 = _mm256_shuffle_epi32(lhs_mat_01_03, 160); //A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) + const __m256i lhs_mat_23_03_sp1 = _mm256_shuffle_epi32(lhs_mat_23_03, 160); //A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) + + const __m256i lhs_mat_01_10_sp1 = _mm256_shuffle_epi32(lhs_mat_01_10, 160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) + const __m256i lhs_mat_23_10_sp1 = _mm256_shuffle_epi32(lhs_mat_23_10, 160); //A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) + + const __m256i lhs_mat_01_11_sp1 = _mm256_shuffle_epi32(lhs_mat_01_11, 160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) + const __m256i lhs_mat_23_11_sp1 = _mm256_shuffle_epi32(lhs_mat_23_11, 160); //A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) + + const __m256i lhs_mat_01_12_sp1 = _mm256_shuffle_epi32(lhs_mat_01_12, 160); //A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) + const __m256i lhs_mat_23_12_sp1 = _mm256_shuffle_epi32(lhs_mat_23_12, 160); //A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) + + const __m256i lhs_mat_01_13_sp1 = _mm256_shuffle_epi32(lhs_mat_01_13, 160); //A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) + const __m256i lhs_mat_23_13_sp1 = _mm256_shuffle_epi32(lhs_mat_23_13, 160); //A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) + + // Shuffle pattern two- left side input + const __m256i lhs_mat_01_00_sp2 = _mm256_shuffle_epi32(lhs_mat_01_00, 245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) + const __m256i lhs_mat_23_00_sp2 = _mm256_shuffle_epi32(lhs_mat_23_00, 245); //A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) + + const __m256i lhs_mat_01_01_sp2 = _mm256_shuffle_epi32(lhs_mat_01_01, 245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) + const __m256i lhs_mat_23_01_sp2 = _mm256_shuffle_epi32(lhs_mat_23_01, 245); //A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) + + const __m256i lhs_mat_01_02_sp2 = _mm256_shuffle_epi32(lhs_mat_01_02, 245); //A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) + const __m256i lhs_mat_23_02_sp2 = _mm256_shuffle_epi32(lhs_mat_23_02, 245); //A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) + + const __m256i lhs_mat_01_03_sp2 = _mm256_shuffle_epi32(lhs_mat_01_03, 245); //A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) + const __m256i lhs_mat_23_03_sp2 = _mm256_shuffle_epi32(lhs_mat_23_03, 245); //A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) + + const __m256i lhs_mat_01_10_sp2 = _mm256_shuffle_epi32(lhs_mat_01_10, 245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) + const __m256i lhs_mat_23_10_sp2 = _mm256_shuffle_epi32(lhs_mat_23_10, 245); //A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) + + const __m256i lhs_mat_01_11_sp2 = _mm256_shuffle_epi32(lhs_mat_01_11, 245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) + const __m256i lhs_mat_23_11_sp2 = _mm256_shuffle_epi32(lhs_mat_23_11, 245); //A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) + + const __m256i lhs_mat_01_12_sp2 = _mm256_shuffle_epi32(lhs_mat_01_12, 245); //A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) + const __m256i lhs_mat_23_12_sp2 = _mm256_shuffle_epi32(lhs_mat_23_12, 245); //A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) + + const __m256i lhs_mat_01_13_sp2 = _mm256_shuffle_epi32(lhs_mat_01_13, 245); //A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) + const __m256i lhs_mat_23_13_sp2 = _mm256_shuffle_epi32(lhs_mat_23_13, 245); //A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) + + // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane + __m256i iacc_mat_00_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_01_00_sp1)); + __m256i iacc_mat_01_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_01_00_sp1)); + __m256i iacc_mat_10_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_23_00_sp1)); + __m256i iacc_mat_11_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_23_00_sp1)); + __m256i iacc_mat_00_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_01_10_sp1)); + __m256i iacc_mat_01_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_01_10_sp1)); + __m256i iacc_mat_10_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_23_10_sp1)); + __m256i iacc_mat_11_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_23_10_sp1)); + + __m256i iacc_mat_00_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_01_00_sp2)); + __m256i iacc_mat_01_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_01_00_sp2)); + __m256i iacc_mat_10_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_23_00_sp2)); + __m256i iacc_mat_11_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_23_00_sp2)); + __m256i iacc_mat_00_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_01_10_sp2)); + __m256i iacc_mat_01_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_01_10_sp2)); + __m256i iacc_mat_10_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_23_10_sp2)); + __m256i iacc_mat_11_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_23_10_sp2)); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + __m256i iacc_mat_00_0 = _mm256_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); + __m256i iacc_mat_01_0 = _mm256_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); + __m256i iacc_mat_10_0 = _mm256_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); + __m256i iacc_mat_11_0 = _mm256_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); + + __m256i iacc_mat_00_1 = _mm256_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); + __m256i iacc_mat_01_1 = _mm256_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); + __m256i iacc_mat_10_1 = _mm256_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); + __m256i iacc_mat_11_1 = _mm256_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + iacc_mat_00_0 = _mm256_madd_epi16(iacc_mat_00_0, scale_0145_0); + iacc_mat_01_0 = _mm256_madd_epi16(iacc_mat_01_0, scale_2367_0); + iacc_mat_10_0 = _mm256_madd_epi16(iacc_mat_10_0, scale_0145_0); + iacc_mat_11_0 = _mm256_madd_epi16(iacc_mat_11_0, scale_2367_0); + + iacc_mat_00_1 = _mm256_madd_epi16(iacc_mat_00_1, scale_0145_1); + iacc_mat_01_1 = _mm256_madd_epi16(iacc_mat_01_1, scale_2367_1); + iacc_mat_10_1 = _mm256_madd_epi16(iacc_mat_10_1, scale_0145_1); + iacc_mat_11_1 = _mm256_madd_epi16(iacc_mat_11_1, scale_2367_1); + + // Straighten out to make 4 row vectors (4 for each sub block which are accumulated together in the next step) + __m256i iacc_row_0_0 = _mm256_blend_epi32(iacc_mat_00_0, _mm256_shuffle_epi32(iacc_mat_01_0, 78), 204); + __m256i iacc_row_1_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_0, 78), iacc_mat_01_0, 204); + __m256i iacc_row_2_0 = _mm256_blend_epi32(iacc_mat_10_0, _mm256_shuffle_epi32(iacc_mat_11_0, 78), 204); + __m256i iacc_row_3_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_0, 78), iacc_mat_11_0, 204); + __m256i iacc_row_0_1 = _mm256_blend_epi32(iacc_mat_00_1, _mm256_shuffle_epi32(iacc_mat_01_1, 78), 204); + __m256i iacc_row_1_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_1, 78), iacc_mat_01_1, 204); + __m256i iacc_row_2_1 = _mm256_blend_epi32(iacc_mat_10_1, _mm256_shuffle_epi32(iacc_mat_11_1, 78), 204); + __m256i iacc_row_3_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_1, 78), iacc_mat_11_1, 204); + + __m256i iacc_row_0 = _mm256_add_epi32(iacc_row_0_0, iacc_row_0_1); + __m256i iacc_row_1 = _mm256_add_epi32(iacc_row_1_0, iacc_row_1_1); + __m256i iacc_row_2 = _mm256_add_epi32(iacc_row_2_0, iacc_row_2_1); + __m256i iacc_row_3 = _mm256_add_epi32(iacc_row_3_0, iacc_row_3_1); + + // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes + const __m128 row_scale_f32_sse = _mm_load_ps(a_ptrs[rp][b].d); + const __m256 row_scale_f32 = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse);//GGML_F32Cx8_REPEAT_LOAD(a_ptrs[rp][b].d, loadMask); + + // Multiply with appropiate scales and accumulate (for both d and dmin) below + acc_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); + acc_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); + acc_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); + acc_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); + + __m256i iacc_row_min_0 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 0), mins_01); + __m256i iacc_row_min_1 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 85), mins_01); + __m256i iacc_row_min_2 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 170), mins_01); + __m256i iacc_row_min_3 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 255), mins_01); + + acc_min_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_0), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[rp * 4]); + acc_min_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_1), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[rp * 4 + 1]); + acc_min_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_2), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[rp * 4 + 2]); + acc_min_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_3), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[rp * 4 + 3]); + + } + } + } + // Store the accumulated values + for (int i = 0; i < 16; i++) { + _mm256_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm256_sub_ps(acc_rows[i], acc_min_rows[i])); + } + } + } + for (; y < nr / 4; y++) { + + const block_q8_Kx4 * a_ptr = a_ptr_start + (y * nb); + + for (int64_t x = xstart; x < nc / 8; x++) { + + const block_q4_Kx8 * b_ptr = b_ptr_start + (x * b_nb); + + // Master FP accumulators + __m256 acc_rows[4]; + for (int i = 0; i < 4; i++) { + acc_rows[i] = _mm256_setzero_ps(); + } + + __m256 acc_min_rows[4]; + for (int i = 0; i < 4; i++) { + acc_min_rows[i] = _mm256_setzero_ps(); + } + + for (int64_t b = 0; b < nb; b++) { + + // Scale values - Load the eight scale values of block_q4_Kx8 + const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); + + // dmin values - Load the eight dmin values of block_q4_Kx8 + const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); + + // Loop to iterate over the eight sub blocks of a super block - two sub blocks are processed per iteration + for (int sb = 0; sb < QK_K / 64; sb++) { + + // Load the eight block_q4_k for two sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 + const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + sb * 256)); + const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 224 + sb * 256)); + + // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values + const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); + const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); + const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); + const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); + + // 4-bit -> 8-bit + // First sub block of the two sub blocks processed in the iteration + const __m256i rhs_mat_0145_00 = _mm256_and_si256(rhs_raw_mat_0145_0, m4b); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) + const __m256i rhs_mat_2367_00 = _mm256_and_si256(rhs_raw_mat_2367_0, m4b); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) + + const __m256i rhs_mat_0145_01 = _mm256_and_si256(rhs_raw_mat_0145_1, m4b); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) + const __m256i rhs_mat_2367_01 = _mm256_and_si256(rhs_raw_mat_2367_1, m4b); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) + + const __m256i rhs_mat_0145_02 = _mm256_and_si256(rhs_raw_mat_0145_2, m4b); //B00(16-23) B01(16-23) B04(16-23) B05(16-23) + const __m256i rhs_mat_2367_02 = _mm256_and_si256(rhs_raw_mat_2367_2, m4b); //B02(16-23) B03(16-23) B06(16-23) B07(16-23) + + const __m256i rhs_mat_0145_03 = _mm256_and_si256(rhs_raw_mat_0145_3, m4b); //B00(24-31) B01(24-31) B04(24-31) B05(24-31) + const __m256i rhs_mat_2367_03 = _mm256_and_si256(rhs_raw_mat_2367_3, m4b); //B02(24-31) B03(24-31) B06(24-31) B07(24-31) + + // Second sub block of the two sub blocks processed in the iteration + const __m256i rhs_mat_0145_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m4b); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) + const __m256i rhs_mat_2367_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m4b); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) + + const __m256i rhs_mat_0145_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m4b); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) + const __m256i rhs_mat_2367_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m4b); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) + + const __m256i rhs_mat_0145_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 4), m4b); //B10(16-23) B11(16-23) B14(16-23) B15(16-23) + const __m256i rhs_mat_2367_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 4), m4b); //B12(16-23) B13(16-23) B16(16-23) B17(16-23) + + const __m256i rhs_mat_0145_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 4), m4b); //B10(24-31) B11(24-31) B14(24-31) B15(24-31) + const __m256i rhs_mat_2367_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 4), m4b); //B12(24-31) B13(24-31) B16(24-31) B17(24-31) + + // Shuffle pattern one - right side input + const __m256i rhs_mat_0145_00_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_00, 136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) + const __m256i rhs_mat_2367_00_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_00, 136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) + + const __m256i rhs_mat_0145_01_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_01, 136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) + const __m256i rhs_mat_2367_01_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_01, 136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) + + const __m256i rhs_mat_0145_02_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_02, 136); //B00(16-19) B01(16-19) B00(16-19) B01(16-19) B04(16-19) B05(16-19) B04(16-19) B05(16-19) + const __m256i rhs_mat_2367_02_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_02, 136); //B02(16-19) B03(16-19) B02(16-19) B03(16-19) B06(16-19) B07(16-19) B06(16-19) B07(16-19) + + const __m256i rhs_mat_0145_03_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_03, 136); //B00(24-27) B01(24-27) B00(24-27) B01(24-27) B04(24-27) B05(24-27) B04(24-27) B05(24-27) + const __m256i rhs_mat_2367_03_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_03, 136); //B02(24-27) B03(24-27) B02(24-27) B03(24-27) B06(24-27) B07(24-27) B06(24-27) B07(24-27) + + const __m256i rhs_mat_0145_10_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_10, 136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) + const __m256i rhs_mat_2367_10_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_10, 136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) + + const __m256i rhs_mat_0145_11_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_11, 136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) + const __m256i rhs_mat_2367_11_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_11, 136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) + + const __m256i rhs_mat_0145_12_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_12, 136); //B10(16-19) B11(16-19) B10(16-19) B11(16-19) B14(16-19) B15(16-19) B14(16-19) B15(16-19) + const __m256i rhs_mat_2367_12_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_12, 136); //B12(16-19) B13(16-19) B12(16-19) B13(16-19) B16(16-19) B17(16-19) B16(16-19) B17(16-19) + + const __m256i rhs_mat_0145_13_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_13, 136); //B10(24-27) B11(24-27) B10(24-27) B11(24-27) B14(24-27) B15(24-27) B14(24-27) B15(24-27) + const __m256i rhs_mat_2367_13_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_13, 136); //B12(24-27) B13(24-27) B12(24-27) B13(24-27) B16(24-27) B17(24-27) B16(24-27) B17(24-27) + + // Shuffle pattern two - right side input + const __m256i rhs_mat_0145_00_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_00, 221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) + const __m256i rhs_mat_2367_00_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_00, 221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) + + const __m256i rhs_mat_0145_01_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_01, 221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) + const __m256i rhs_mat_2367_01_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_01, 221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) + + const __m256i rhs_mat_0145_02_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_02, 221); //B00(20-23) B01(20-23) B00(20-23) B01(20-23) B04(20-23) B05(20-23) B04(20-23) B05(20-23) + const __m256i rhs_mat_2367_02_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_02, 221); //B02(20-23) B03(20-23) B02(20-23) B03(20-23) B06(20-23) B07(20-23) B06(20-23) B07(20-23) + + const __m256i rhs_mat_0145_03_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_03, 221); //B00(28-31) B01(28-31) B00(28-31) B01(28-31) B04(28-31) B05(28-31) B04(28-31) B05(28-31) + const __m256i rhs_mat_2367_03_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_03, 221); //B02(28-31) B03(28-31) B02(28-31) B03(28-31) B06(28-31) B07(28-31) B06(28-31) B07(28-31) + + const __m256i rhs_mat_0145_10_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_10, 221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) + const __m256i rhs_mat_2367_10_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_10, 221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) + + const __m256i rhs_mat_0145_11_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_11, 221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) + const __m256i rhs_mat_2367_11_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_11, 221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) + + const __m256i rhs_mat_0145_12_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_12, 221); //B10(20-23) B11(20-23) B10(20-23) B11(20-23) B14(20-23) B15(20-23) B14(20-23) B15(20-23) + const __m256i rhs_mat_2367_12_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_12, 221); //B12(20-23) B13(20-23) B12(20-23) B13(20-23) B16(20-23) B17(20-23) B16(20-23) B17(20-23) + + const __m256i rhs_mat_0145_13_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_13, 221); //B10(28-31) B11(28-31) B10(28-31) B11(28-31) B14(28-31) B15(28-31) B14(28-31) B15(28-31) + const __m256i rhs_mat_2367_13_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_13, 221); //B12(28-31) B13(28-31) B12(28-31) B13(28-31) B16(28-31) B17(28-31) B16(28-31) B17(28-31) + + uint32_t utmp_0[4], utmp_1[4]; + + // Scales and Mins of corresponding sub blocks from different Q4_K structures are stored together + // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop + memcpy(utmp_0, b_ptr[b].scales + 24 * sb, 12); + utmp_0[3] = ((utmp_0[2] >> 4) & kmask2) | (((utmp_0[1] >> 6) & kmask3) << 4); + const uint32_t uaux_0 = utmp_0[1] & kmask1; + utmp_0[1] = (utmp_0[2] & kmask2) | (((utmp_0[0] >> 6) & kmask3) << 4); + utmp_0[2] = uaux_0; + utmp_0[0] &= kmask1; + + // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures when sb = 1 + memcpy(utmp_1, b_ptr[b].scales + 12 + sb * 24, 12); + utmp_1[3] = ((utmp_1[2] >> 4) & kmask2) | (((utmp_1[1] >> 6) & kmask3) << 4); + const uint32_t uaux_1 = utmp_1[1] & kmask1; + utmp_1[1] = (utmp_1[2] & kmask2) | (((utmp_1[0] >> 6) & kmask3) << 4); + utmp_1[2] = uaux_1; + utmp_1[0] &= kmask1; + + // Scales of first sub block in the sb loop + const __m128i mins_and_scales_0 = _mm_set_epi32(utmp_0[3], utmp_0[2], utmp_0[1], utmp_0[0]); + const __m256i scales_0 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_0, mins_and_scales_0)); + + // Scales of second sub block in the sb loop + const __m128i mins_and_scales_1 = _mm_set_epi32(utmp_1[3], utmp_1[2], utmp_1[1], utmp_1[0]); + const __m256i scales_1 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_1, mins_and_scales_1)); + + // Mins of first and second sub block of Q4_K block are arranged side by side + const __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(_mm_shuffle_epi32(mins_and_scales_0, 78), _mm_shuffle_epi32(mins_and_scales_1, 78))); + + const __m256i scale_0145_0 = _mm256_shuffle_epi32(scales_0, 68); + const __m256i scale_2367_0 = _mm256_shuffle_epi32(scales_0, 238); + + const __m256i scale_0145_1 = _mm256_shuffle_epi32(scales_1, 68); + const __m256i scale_2367_1 = _mm256_shuffle_epi32(scales_1, 238); + + // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 + // Loaded as set of 128 bit vectors and repeated into a 256 bit vector + __m256i lhs_mat_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 256 * sb))); + __m256i lhs_mat_01_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 0); + __m256i lhs_mat_23_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 17); + __m256i lhs_mat_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 32 + 256 * sb))); + __m256i lhs_mat_01_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 0); + __m256i lhs_mat_23_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 17); + __m256i lhs_mat_0123_02 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 64 + 256 * sb))); + __m256i lhs_mat_01_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 0); + __m256i lhs_mat_23_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 17); + __m256i lhs_mat_0123_03 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 96 + 256 * sb))); + __m256i lhs_mat_01_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 0); + __m256i lhs_mat_23_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 17); + __m256i lhs_mat_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 128 + 256 * sb))); + __m256i lhs_mat_01_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 0); + __m256i lhs_mat_23_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 17); + __m256i lhs_mat_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 160 + 256 * sb))); + __m256i lhs_mat_01_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 0); + __m256i lhs_mat_23_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 17); + __m256i lhs_mat_0123_12 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 192 + 256 * sb))); + __m256i lhs_mat_01_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 0); + __m256i lhs_mat_23_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 17); + __m256i lhs_mat_0123_13 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 224 + 256 * sb))); + __m256i lhs_mat_01_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 0); + __m256i lhs_mat_23_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 17); + + // Bsums are loaded - four bsums are loaded (for two sub blocks) for the different Q8_K blocks + __m256i lhs_bsums_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].bsums + 16 * sb))); + __m256i lhs_bsums_hsum_0123_01 = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(lhs_bsums_0123_01), _mm256_extractf128_si256(lhs_bsums_0123_01, 1))); + lhs_bsums_hsum_0123_01 = _mm256_permute2x128_si256(lhs_bsums_hsum_0123_01, lhs_bsums_hsum_0123_01, 0); + + // Shuffle pattern one - left side input + const __m256i lhs_mat_01_00_sp1 = _mm256_shuffle_epi32(lhs_mat_01_00, 160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) + const __m256i lhs_mat_23_00_sp1 = _mm256_shuffle_epi32(lhs_mat_23_00, 160); //A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) + + const __m256i lhs_mat_01_01_sp1 = _mm256_shuffle_epi32(lhs_mat_01_01, 160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) + const __m256i lhs_mat_23_01_sp1 = _mm256_shuffle_epi32(lhs_mat_23_01, 160); //A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) + + const __m256i lhs_mat_01_02_sp1 = _mm256_shuffle_epi32(lhs_mat_01_02, 160); //A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) + const __m256i lhs_mat_23_02_sp1 = _mm256_shuffle_epi32(lhs_mat_23_02, 160); //A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) + + const __m256i lhs_mat_01_03_sp1 = _mm256_shuffle_epi32(lhs_mat_01_03, 160); //A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) + const __m256i lhs_mat_23_03_sp1 = _mm256_shuffle_epi32(lhs_mat_23_03, 160); //A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) + + const __m256i lhs_mat_01_10_sp1 = _mm256_shuffle_epi32(lhs_mat_01_10, 160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) + const __m256i lhs_mat_23_10_sp1 = _mm256_shuffle_epi32(lhs_mat_23_10, 160); //A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) + + const __m256i lhs_mat_01_11_sp1 = _mm256_shuffle_epi32(lhs_mat_01_11, 160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) + const __m256i lhs_mat_23_11_sp1 = _mm256_shuffle_epi32(lhs_mat_23_11, 160); //A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) + + const __m256i lhs_mat_01_12_sp1 = _mm256_shuffle_epi32(lhs_mat_01_12, 160); //A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) + const __m256i lhs_mat_23_12_sp1 = _mm256_shuffle_epi32(lhs_mat_23_12, 160); //A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) + + const __m256i lhs_mat_01_13_sp1 = _mm256_shuffle_epi32(lhs_mat_01_13, 160); //A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) + const __m256i lhs_mat_23_13_sp1 = _mm256_shuffle_epi32(lhs_mat_23_13, 160); //A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) + + // Shuffle pattern two- left side input + const __m256i lhs_mat_01_00_sp2 = _mm256_shuffle_epi32(lhs_mat_01_00, 245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) + const __m256i lhs_mat_23_00_sp2 = _mm256_shuffle_epi32(lhs_mat_23_00, 245); //A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) + + const __m256i lhs_mat_01_01_sp2 = _mm256_shuffle_epi32(lhs_mat_01_01, 245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) + const __m256i lhs_mat_23_01_sp2 = _mm256_shuffle_epi32(lhs_mat_23_01, 245); //A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) + + const __m256i lhs_mat_01_02_sp2 = _mm256_shuffle_epi32(lhs_mat_01_02, 245); //A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) + const __m256i lhs_mat_23_02_sp2 = _mm256_shuffle_epi32(lhs_mat_23_02, 245); //A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) + + const __m256i lhs_mat_01_03_sp2 = _mm256_shuffle_epi32(lhs_mat_01_03, 245); //A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) + const __m256i lhs_mat_23_03_sp2 = _mm256_shuffle_epi32(lhs_mat_23_03, 245); //A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) + + const __m256i lhs_mat_01_10_sp2 = _mm256_shuffle_epi32(lhs_mat_01_10, 245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) + const __m256i lhs_mat_23_10_sp2 = _mm256_shuffle_epi32(lhs_mat_23_10, 245); //A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) + + const __m256i lhs_mat_01_11_sp2 = _mm256_shuffle_epi32(lhs_mat_01_11, 245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) + const __m256i lhs_mat_23_11_sp2 = _mm256_shuffle_epi32(lhs_mat_23_11, 245); //A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) + + const __m256i lhs_mat_01_12_sp2 = _mm256_shuffle_epi32(lhs_mat_01_12, 245); //A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) + const __m256i lhs_mat_23_12_sp2 = _mm256_shuffle_epi32(lhs_mat_23_12, 245); //A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) + + const __m256i lhs_mat_01_13_sp2 = _mm256_shuffle_epi32(lhs_mat_01_13, 245); //A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) + const __m256i lhs_mat_23_13_sp2 = _mm256_shuffle_epi32(lhs_mat_23_13, 245); //A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) + + // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane + __m256i iacc_mat_00_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_01_00_sp1)); + __m256i iacc_mat_01_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_01_00_sp1)); + __m256i iacc_mat_10_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_23_00_sp1)); + __m256i iacc_mat_11_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_23_00_sp1)); + __m256i iacc_mat_00_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_01_10_sp1)); + __m256i iacc_mat_01_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_01_10_sp1)); + __m256i iacc_mat_10_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_23_10_sp1)); + __m256i iacc_mat_11_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_23_10_sp1)); + + __m256i iacc_mat_00_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_01_00_sp2)); + __m256i iacc_mat_01_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_01_00_sp2)); + __m256i iacc_mat_10_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_23_00_sp2)); + __m256i iacc_mat_11_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_23_00_sp2)); + __m256i iacc_mat_00_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_01_10_sp2)); + __m256i iacc_mat_01_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_01_10_sp2)); + __m256i iacc_mat_10_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_23_10_sp2)); + __m256i iacc_mat_11_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_23_10_sp2)); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + __m256i iacc_mat_00_0 = _mm256_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); + __m256i iacc_mat_01_0 = _mm256_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); + __m256i iacc_mat_10_0 = _mm256_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); + __m256i iacc_mat_11_0 = _mm256_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); + + __m256i iacc_mat_00_1 = _mm256_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); + __m256i iacc_mat_01_1 = _mm256_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); + __m256i iacc_mat_10_1 = _mm256_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); + __m256i iacc_mat_11_1 = _mm256_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + iacc_mat_00_0 = _mm256_madd_epi16(iacc_mat_00_0, scale_0145_0); + iacc_mat_01_0 = _mm256_madd_epi16(iacc_mat_01_0, scale_2367_0); + iacc_mat_10_0 = _mm256_madd_epi16(iacc_mat_10_0, scale_0145_0); + iacc_mat_11_0 = _mm256_madd_epi16(iacc_mat_11_0, scale_2367_0); + + iacc_mat_00_1 = _mm256_madd_epi16(iacc_mat_00_1, scale_0145_1); + iacc_mat_01_1 = _mm256_madd_epi16(iacc_mat_01_1, scale_2367_1); + iacc_mat_10_1 = _mm256_madd_epi16(iacc_mat_10_1, scale_0145_1); + iacc_mat_11_1 = _mm256_madd_epi16(iacc_mat_11_1, scale_2367_1); + + // Straighten out to make 4 row vectors (4 for each sub block which are accumulated together in the next step) + __m256i iacc_row_0_0 = _mm256_blend_epi32(iacc_mat_00_0, _mm256_shuffle_epi32(iacc_mat_01_0, 78), 204); + __m256i iacc_row_1_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_0, 78), iacc_mat_01_0, 204); + __m256i iacc_row_2_0 = _mm256_blend_epi32(iacc_mat_10_0, _mm256_shuffle_epi32(iacc_mat_11_0, 78), 204); + __m256i iacc_row_3_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_0, 78), iacc_mat_11_0, 204); + __m256i iacc_row_0_1 = _mm256_blend_epi32(iacc_mat_00_1, _mm256_shuffle_epi32(iacc_mat_01_1, 78), 204); + __m256i iacc_row_1_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_1, 78), iacc_mat_01_1, 204); + __m256i iacc_row_2_1 = _mm256_blend_epi32(iacc_mat_10_1, _mm256_shuffle_epi32(iacc_mat_11_1, 78), 204); + __m256i iacc_row_3_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_1, 78), iacc_mat_11_1, 204); + + __m256i iacc_row_0 = _mm256_add_epi32(iacc_row_0_0, iacc_row_0_1); + __m256i iacc_row_1 = _mm256_add_epi32(iacc_row_1_0, iacc_row_1_1); + __m256i iacc_row_2 = _mm256_add_epi32(iacc_row_2_0, iacc_row_2_1); + __m256i iacc_row_3 = _mm256_add_epi32(iacc_row_3_0, iacc_row_3_1); + + // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes + const __m128 row_scale_f32_sse = _mm_load_ps(a_ptr[b].d); + const __m256 row_scale_f32 = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); //GGML_F32Cx8_REPEAT_LOAD(a_ptrs[rp][b].d, loadMask); + + // Multiply with appropiate scales and accumulate (for both d and dmin) below + acc_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); + acc_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); + acc_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); + acc_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); + + __m256i iacc_row_min_0 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 0), mins_01); + __m256i iacc_row_min_1 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 85), mins_01); + __m256i iacc_row_min_2 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 170), mins_01); + __m256i iacc_row_min_3 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 255), mins_01); + + acc_min_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_0), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[0]); + acc_min_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_1), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[1]); + acc_min_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_2), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[2]); + acc_min_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_3), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[3]); + } + } + + // Store the accumulated values + for (int i = 0; i < 4; i++) { + _mm256_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm256_sub_ps(acc_rows[i], acc_min_rows[i])); + } + } + } + +#else + UNUSED(kmask1); + UNUSED(kmask2); + UNUSED(kmask3); + ggml_gemm_q4_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc); +#endif +} + +void ggml_gemm_q2_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK_K; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if defined(__AVX2__) || defined(__AVX512F__) + const block_q2_Kx8 * b_ptr_start = (const block_q2_Kx8 * ) vx; + const block_q8_Kx4 * a_ptr_start = (const block_q8_Kx4 * ) vy; + int64_t b_nb = n / QK_K; + int64_t y = 0; + + // Permute mask used for easier vector processing at later stages + __m256i requiredOrder = _mm256_set_epi32(3, 2, 1, 0, 7, 6, 5, 4); + int64_t xstart = 0; + int anr = nr - nr % 16; // Used to align nr with boundary of 16 + + // Mask to convert 2 bit and 4 bit values into a bytes + const __m256i m3b = _mm256_set1_epi8(3); + const __m128i m4b_sse = _mm_set1_epi8(0xF); + + //Mask to get appropriate scales + __m128i scalesmask1_sse = _mm_set_epi8(14,14,12,12,10,10,8,8,6,6,4,4,2,2,0,0); + __m128i scalesmask2_sse = _mm_set_epi8(15,15,13,13,11,11,9,9,7,7,5,5,3,3,1,1); + + __m256i scalesmask1 = _mm256_castsi128_si256(scalesmask1_sse); + scalesmask1 = _mm256_permute2f128_si256(scalesmask1, scalesmask1, 0); + __m256i scalesmask2 = _mm256_castsi128_si256(scalesmask2_sse); + scalesmask2 = _mm256_permute2f128_si256(scalesmask2, scalesmask2, 0); + +#ifdef __AVX512F__ + + int anc = nc - nc % 16; // Used to align nc with boundary of 16 + + // Mask to mask out nibbles from packed bytes + const __m256i m4b = _mm256_set1_epi8(0x0F); + // Mask to mask out nibbles from packed bytes expanded to 512 bit length + const __m512i m3bexpanded = _mm512_set1_epi8(3); + //Take group of four block_q8_Kx4 structures at each pass of the loop and perform dot product operation + for (; y < anr / 4; y += 4) { + + const block_q8_Kx4 * a_ptrs[4]; + + a_ptrs[0] = a_ptr_start + (y * nb); + for (int i = 0; i < 3; ++i) { + a_ptrs[i + 1] = a_ptrs[i] + nb; + } + + // Take group of eight block_q2_kx8 structures at each pass of the loop and perform dot product operation + for (int64_t x = 0; x < anc / 8; x += 2) { + + const block_q2_Kx8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); + const block_q2_Kx8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); + + // Master FP accumulators + __m512 acc_rows[16]; + for (int i = 0; i < 16; i++) { + acc_rows[i] = _mm512_setzero_ps(); + } + + __m512 acc_min_rows[16]; + for (int i = 0; i < 16; i++) { + acc_min_rows[i] = _mm512_setzero_ps(); + } + // For super block + for (int64_t b = 0; b < nb; b++) { + // Delta values - Load the sixteen scale values from two block_q2_kx8 structures + const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); + + // dmin values - Load the sixteen dmin values from two block_q2_kx8 structures + const __m512 col_dmin_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].dmin, b_ptr_1[b].dmin); + + // Loop to iterate over the sixteen sub blocks of a super block - eight sub blocks are processed per iteration + for (int sb = 0; sb < QK_K / 128; sb++) { + + // Load the eight block_q2_k for eight sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 + const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + sb * 256)); + const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 224 + sb * 256)); + + const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + sb * 256)); + const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_mat_89AB_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_mat_CDEF_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_mat_89AB_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_mat_CDEF_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 224 + sb * 256)); + + const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); + const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); + const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); + const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); + + const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); + const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); + const __m256i rhs_raw_mat_89CD_2 = _mm256_blend_epi32(rhs_raw_mat_89AB_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_2, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_2, requiredOrder), rhs_raw_mat_CDEF_2, 240); + const __m256i rhs_raw_mat_89CD_3 = _mm256_blend_epi32(rhs_raw_mat_89AB_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_3, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_3, requiredOrder), rhs_raw_mat_CDEF_3, 240); + + const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); + const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); + const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); + const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); + + const __m512i rhs_raw_mat_014589CD_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_2), rhs_raw_mat_89CD_2, 1); + const __m512i rhs_raw_mat_2367ABEF_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_2), rhs_raw_mat_ABEF_2, 1); + const __m512i rhs_raw_mat_014589CD_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_3), rhs_raw_mat_89CD_3, 1); + const __m512i rhs_raw_mat_2367ABEF_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_3), rhs_raw_mat_ABEF_3, 1); + + //2-bit -> 8-bit + const __m512i rhs_mat_014589CD_00 = _mm512_and_si512(rhs_raw_mat_014589CD_0,m3bexpanded); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) B08(0-7) B09(0-7) B0C(0-7) B0D(0-7) + const __m512i rhs_mat_2367ABEF_00 = _mm512_and_si512(rhs_raw_mat_2367ABEF_0,m3bexpanded); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) B0A(0-7) B0B(0-7) B0E(0-7) B0F(0-7) + const __m512i rhs_mat_014589CD_01 = _mm512_and_si512(rhs_raw_mat_014589CD_1,m3bexpanded); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) B08(8-15) B09(8-15) B0C(8-15) B0D(8-15) + const __m512i rhs_mat_2367ABEF_01 = _mm512_and_si512(rhs_raw_mat_2367ABEF_1,m3bexpanded); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) B0A(8-15) B0B(8-15) B0E(8-15) B0F(8-15) + const __m512i rhs_mat_014589CD_10 = _mm512_and_si512(rhs_raw_mat_014589CD_2,m3bexpanded); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) B18(0-7) B19(0-7) B1C(0-7) B1D(0-7) + const __m512i rhs_mat_2367ABEF_10 = _mm512_and_si512(rhs_raw_mat_2367ABEF_2,m3bexpanded); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) B1A(0-7) B1B(0-7) B1E(0-7) B1F(0-7) + const __m512i rhs_mat_014589CD_11 = _mm512_and_si512(rhs_raw_mat_014589CD_3,m3bexpanded); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) B18(8-15) B19(8-15) B1C(8-15) B1D(8-15) + const __m512i rhs_mat_2367ABEF_11 = _mm512_and_si512(rhs_raw_mat_2367ABEF_3,m3bexpanded); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) B1A(8-15) B1B(8-15) B1E(8-15) B1F(8-15) + + const __m512i rhs_mat_014589CD_20 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 2), m3bexpanded); //B20(0-7) B21(0-7) B24(0-7) B25(0-7) B28(0-7) B29(0-7) B2C(0-7) B2D(0-7) + const __m512i rhs_mat_2367ABEF_20 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 2), m3bexpanded); //B22(0-7) B23(0-7) B26(0-7) B27(0-7) B2A(0-7) B2B(0-7) B2E(0-7) B2F(0-7) + + const __m512i rhs_mat_014589CD_21 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 2), m3bexpanded); //B20(8-15) B21(8-15) B24(8-15) B25(8-15) B28(8-15) B29(8-15) B2C(8-15) B2D(8-15) + const __m512i rhs_mat_2367ABEF_21 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 2), m3bexpanded); //B22(8-15) B23(8-15) B26(8-15) B27(8-15) B2A(8-15) B2B(8-15) B2E(8-15) B2F(8-15) + + const __m512i rhs_mat_014589CD_30 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 2), m3bexpanded); //B30(0-7) B31(0-7) B34(0-7) B35(0-7) B38(0-7) B39(0-7) B3C(0-7) B3D(0-7) + const __m512i rhs_mat_2367ABEF_30 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 2), m3bexpanded); //B32(0-7) B33(0-7) B36(0-7) B37(0-7) B3A(0-7) B3B(0-7) B3E(0-7) B3F(0-7) + + const __m512i rhs_mat_014589CD_31 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 2), m3bexpanded); //B30(8-15) B31(8-15) B34(8-15) B35(8-15) B38(8-15) B39(8-15) B3C(8-15) B3D(8-15) + const __m512i rhs_mat_2367ABEF_31 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 2), m3bexpanded); //B32(8-15) B33(8-15) B36(8-15) B37(8-15) B3A(8-15) B3B(8-15) B3E(8-15) B3F(8-15) + + const __m512i rhs_mat_014589CD_40 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m3bexpanded); //B40(0-7) B41(0-7) B44(0-7) B45(0-7) B48(0-7) B49(0-7) B4C(0-7) B4D(0-7) + const __m512i rhs_mat_2367ABEF_40 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m3bexpanded); //B42(0-7) B43(0-7) B46(0-7) B47(0-7) B4A(0-7) B4B(0-7) B4E(0-7) B4F(0-7) + + const __m512i rhs_mat_014589CD_41 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m3bexpanded); //B40(8-15) B41(8-15) B44(8-15) B45(8-15) B48(8-15) B49(8-15) B4C(8-15) B4D(8-15) + const __m512i rhs_mat_2367ABEF_41 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m3bexpanded); //B42(8-15) B43(8-15) B46(8-15) B47(8-15) B4A(8-15) B4B(8-15) B4E(8-15) B4F(8-15) + + const __m512i rhs_mat_014589CD_50 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 4), m3bexpanded); //B50(0-7) B51(0-7) B54(0-7) B55(0-7) B58(0-7) B59(0-7) B5C(0-7) B5D(0-7) + const __m512i rhs_mat_2367ABEF_50 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 4), m3bexpanded); //B52(0-7) B53(0-7) B56(0-7) B57(0-7) B5A(0-7) B5B(0-7) B5E(0-7) B5F(0-7) + + const __m512i rhs_mat_014589CD_51 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 4), m3bexpanded); //B50(8-15) B51(8-15) B54(8-15) B55(8-15) B58(8-15) B59(8-15) B5C(8-15) B5D(8-15) + const __m512i rhs_mat_2367ABEF_51 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 4), m3bexpanded); //B52(8-15) B53(8-15) B56(8-15) B57(8-15) B5A(8-15) B5B(8-15) B5E(8-15) B5F(8-15) + + const __m512i rhs_mat_014589CD_60 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 6), m3bexpanded); //B60(0-7) B61(0-7) B64(0-7) B65(0-7) B68(0-7) B69(0-7) B6C(0-7) B6D(0-7) + const __m512i rhs_mat_2367ABEF_60 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 6), m3bexpanded); //B62(0-7) B63(0-7) B66(0-7) B67(0-7) B6A(0-7) B6B(0-7) B6E(0-7) B6F(0-7) + + const __m512i rhs_mat_014589CD_61 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 6), m3bexpanded); //B60(8-15) B61(8-15) B64(8-15) B65(8-15) B68(8-15) B69(8-15) B6C(8-15) B6D(8-15) + const __m512i rhs_mat_2367ABEF_61 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 6), m3bexpanded); //B62(8-15) B63(8-15) B66(8-15) B67(8-15) B6A(8-15) B6B(8-15) B6E(8-15) B6F(8-15) + + const __m512i rhs_mat_014589CD_70 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 6), m3bexpanded); //B70(0-7) B71(0-7) B74(0-7) B75(0-7) B78(0-7) B79(0-7) B7C(0-7) B7D(0-7) + const __m512i rhs_mat_2367ABEF_70 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 6), m3bexpanded); //B72(0-7) B73(0-7) B76(0-7) B77(0-7) B7A(0-7) B7B(0-7) B7E(0-7) B7F(0-7) + + const __m512i rhs_mat_014589CD_71 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 6), m3bexpanded); //B70(8-15) B71(8-15) B74(8-15) B75(8-15) B78(8-15) B79(8-15) B7C(8-15) B7D(8-15) + const __m512i rhs_mat_2367ABEF_71 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 6), m3bexpanded); //B72(8-15) B73(8-15) B76(8-15) B77(8-15) B7A(8-15) B7B(8-15) B7E(8-15) B7F(8-15) + + const __m512i rhs_mat_014589CD_00_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) B08(0-3) B09(0-3) B08(0-3) B09(0-3) B0C(0-3) B0D(0-3) B0C(0-3) B0D(0-3) + const __m512i rhs_mat_2367ABEF_00_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) B0A(0-3) B0B(0-3) B0A(0-3) B0B(0-3) B0E(0-3) B0F(0-3) B0E(0-3) B0F(0-3) + + const __m512i rhs_mat_014589CD_01_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) B08(8-11) B09(8-11) B08(8-11) B09(8-11) B0C(8-11) B0D(8-11) B0C(8-11) B0D(8-11) + const __m512i rhs_mat_2367ABEF_01_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) B0A(8-11) B0B(8-11) B0A(8-11) B0B(8-11) B0E(8-11) B0F(8-11) B0E(8-11) B0F(8-11) + + const __m512i rhs_mat_014589CD_10_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) B18(0-3) B19(0-3) B18(0-3) B19(0-3) B1C(0-3) B1D(0-3) B1C(0-3) B1D(0-3) + const __m512i rhs_mat_2367ABEF_10_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) B1A(0-3) B1B(0-3) B1A(0-3) B1B(0-3) B1E(0-3) B1F(0-3) B1E(0-3) B1F(0-3) + + const __m512i rhs_mat_014589CD_11_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) B18(8-11) B19(8-11) B18(8-11) B19(8-11) B1C(8-11) B1D(8-11) B1C(8-11) B1D(8-11) + const __m512i rhs_mat_2367ABEF_11_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) B1A(8-11) B1B(8-11) B1A(8-11) B1B(8-11) B1E(8-11) B1F(8-11) B1E(8-11) B1F(8-11) + + const __m512i rhs_mat_014589CD_20_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_20, (_MM_PERM_ENUM)136); //B20(0-3) B21(0-3) B20(0-3) B21(0-3) B24(0-3) B25(0-3) B24(0-3) B25(0-3) B28(0-3) B29(0-3) B28(0-3) B29(0-3) B2C(0-3) B2D(0-3) B2C(0-3) B2D(0-3) + const __m512i rhs_mat_2367ABEF_20_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_20, (_MM_PERM_ENUM)136); //B22(0-3) B23(0-3) B22(0-3) B23(0-3) B26(0-3) B27(0-3) B26(0-3) B27(0-3) B2A(0-3) B2B(0-3) B2A(0-3) B2B(0-3) B2E(0-3) B2F(0-3) B2E(0-3) B2F(0-3) + + const __m512i rhs_mat_014589CD_21_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_21, (_MM_PERM_ENUM)136); //B20(8-11) B21(8-11) B20(8-11) B21(8-11) B24(8-11) B25(8-11) B24(8-11) B25(8-11) B28(8-11) B29(8-11) B28(8-11) B29(8-11) B2C(8-11) B2D(8-11) B2C(8-11) B2D(8-11) + const __m512i rhs_mat_2367ABEF_21_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_21, (_MM_PERM_ENUM)136); //B22(8-11) B23(8-11) B22(8-11) B23(8-11) B26(8-11) B27(8-11) B26(8-11) B27(8-11) B2A(8-11) B2B(8-11) B2A(8-11) B2B(8-11) B2E(8-11) B2F(8-11) B2E(8-11) B2F(8-11) + + const __m512i rhs_mat_014589CD_30_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_30, (_MM_PERM_ENUM)136); ///B30(0-3) B31(0-3) B30(0-3) B31(0-3) B34(0-3) B35(0-3) B34(0-3) B35(0-3) B38(0-3) B39(0-3) B38(0-3) B39(0-3) B3C(0-3) B3D(0-3) B3C(0-3) B3D(0-3) + const __m512i rhs_mat_2367ABEF_30_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_30, (_MM_PERM_ENUM)136); //B32(0-3) B33(0-3) B32(0-3) B33(0-3) B36(0-3) B37(0-3) B36(0-3) B37(0-3) B3A(0-3) B3B(0-3) B3A(0-3) B3B(0-3) B3E(0-3) B3F(0-3) B3E(0-3) B3F(0-3) + + const __m512i rhs_mat_014589CD_31_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_31, (_MM_PERM_ENUM)136); //B30(8-11) B31(8-11) B30(8-11) B31(8-11) B34(8-11) B35(8-11) B34(8-11) B35(8-11) B38(8-11) B39(8-11) B38(8-11) B39(8-11) B3C(8-11) B3D(8-11) B3C(8-11) B3D(8-11) + const __m512i rhs_mat_2367ABEF_31_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_31, (_MM_PERM_ENUM)136); //B32(8-11) B33(8-11) B32(8-11) B33(8-11) B36(8-11) B37(8-11) B36(8-11) B37(8-11) B3A(8-11) B3B(8-11) B3A(8-11) B3B(8-11) B3E(8-11) B3F(8-11) B3E(8-11) B3F(8-11) + + const __m512i rhs_mat_014589CD_40_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_40, (_MM_PERM_ENUM)136); //B40(0-3) B41(0-3) B40(0-3) B41(0-3) B44(0-3) B45(0-3) B44(0-3) B45(0-3) B48(0-3) B49(0-3) B48(0-3) B49(0-3) B4C(0-3) B4D(0-3) B4C(0-3) B4D(0-3) + const __m512i rhs_mat_2367ABEF_40_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_40, (_MM_PERM_ENUM)136); //B42(0-3) B43(0-3) B42(0-3) B43(0-3) B46(0-3) B47(0-3) B46(0-3) B47(0-3) B4A(0-3) B4B(0-3) B4A(0-3) B4B(0-3) B4E(0-3) B4F(0-3) B4E(0-3) B4F(0-3) + + const __m512i rhs_mat_014589CD_41_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_41, (_MM_PERM_ENUM)136); //B40(8-11) B41(8-11) B40(8-11) B41(8-11) B44(8-11) B45(8-11) B44(8-11) B45(8-11) B48(8-11) B49(8-11) B48(8-11) B49(8-11) B4C(8-11) B4D(8-11) B4C(8-11) B4D(8-11) + const __m512i rhs_mat_2367ABEF_41_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_41, (_MM_PERM_ENUM)136); //B42(8-11) B43(8-11) B42(8-11) B43(8-11) B46(8-11) B47(8-11) B46(8-11) B47(8-11) B4A(8-11) B4B(8-11) B4A(8-11) B4B(8-11) B4E(8-11) B4F(8-11) B4E(8-11) B4F(8-11) + + const __m512i rhs_mat_014589CD_50_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_50, (_MM_PERM_ENUM)136); //B50(0-3) B51(0-3) B50(0-3) B51(0-3) B54(0-3) B55(0-3) B54(0-3) B55(0-3) B58(0-3) B59(0-3) B58(0-3) B59(0-3) B5C(0-3) B5D(0-3) B5C(0-3) B5D(0-3) + const __m512i rhs_mat_2367ABEF_50_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_50, (_MM_PERM_ENUM)136); //B52(0-3) B53(0-3) B52(0-3) B53(0-3) B56(0-3) B57(0-3) B56(0-3) B57(0-3) B5A(0-3) B5B(0-3) B5A(0-3) B5B(0-3) B5E(0-3) B5F(0-3) B5E(0-3) B5F(0-3) + + const __m512i rhs_mat_014589CD_51_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_51, (_MM_PERM_ENUM)136); //B50(8-11) B51(8-11) B50(8-11) B51(8-11) B54(8-11) B55(8-11) B54(8-11) B55(8-11) B58(8-11) B59(8-11) B58(8-11) B59(8-11) B5C(8-11) B5D(8-11) B5C(8-11) B5D(8-11) + const __m512i rhs_mat_2367ABEF_51_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_51, (_MM_PERM_ENUM)136); //B52(8-11) B53(8-11) B52(8-11) B53(8-11) B56(8-11) B57(8-11) B56(8-11) B57(8-11) B5A(8-11) B5B(8-11) B5A(8-11) B5B(8-11) B5E(8-11) B5F(8-11) B5E(8-11) B5F(8-11) + + const __m512i rhs_mat_014589CD_60_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_60, (_MM_PERM_ENUM)136); //B60(0-3) B61(0-3) B60(0-3) B61(0-3) B64(0-3) B65(0-3) B64(0-3) B65(0-3) B68(0-3) B69(0-3) B68(0-3) B69(0-3) B6C(0-3) B6D(0-3) B6C(0-3) B6D(0-3) + const __m512i rhs_mat_2367ABEF_60_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_60, (_MM_PERM_ENUM)136); //B62(0-3) B63(0-3) B62(0-3) B63(0-3) B66(0-3) B67(0-3) B66(0-3) B67(0-3) B6A(0-3) B6B(0-3) B6A(0-3) B6B(0-3) B6E(0-3) B6F(0-3) B6E(0-3) B6F(0-3) + + const __m512i rhs_mat_014589CD_61_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_61, (_MM_PERM_ENUM)136); //B60(8-11) B61(8-11) B60(8-11) B61(8-11) B64(8-11) B65(8-11) B64(8-11) B65(8-11) B68(8-11) B69(8-11) B68(8-11) B69(8-11) B6C(8-11) B6D(8-11) B6C(8-11) B6D(8-11) + const __m512i rhs_mat_2367ABEF_61_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_61, (_MM_PERM_ENUM)136); //B62(8-11) B63(8-11) B62(8-11) B63(8-11) B66(8-11) B67(8-11) B66(8-11) B67(8-11) B6A(8-11) B6B(8-11) B6A(8-11) B6B(8-11) B6E(8-11) B6F(8-11) B6E(8-11) B6F(8-11) + + const __m512i rhs_mat_014589CD_70_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_70, (_MM_PERM_ENUM)136); //B70(0-3) B71(0-3) B70(0-3) B71(0-3) B74(0-3) B75(0-3) B74(0-3) B75(0-3) B78(0-3) B79(0-3) B78(0-3) B79(0-3) B7C(0-3) B7D(0-3) B7C(0-3) B7D(0-3) + const __m512i rhs_mat_2367ABEF_70_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_70, (_MM_PERM_ENUM)136); //B72(0-3) B73(0-3) B72(0-3) B73(0-3) B76(0-3) B77(0-3) B76(0-3) B77(0-3) B7A(0-3) B7B(0-3) B7A(0-3) B7B(0-3) B7E(0-3) B7F(0-3) B7E(0-3) B7F(0-3) + + const __m512i rhs_mat_014589CD_71_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_71, (_MM_PERM_ENUM)136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) B08(8-11) B09(8-11) B08(8-11) B09(8-11) B0C(8-11) B0D(8-11) B0C(8-11) B0D(8-11) + const __m512i rhs_mat_2367ABEF_71_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_71, (_MM_PERM_ENUM)136); //B72(8-11) B73(8-11) B72(8-11) B73(8-11) B76(8-11) B77(8-11) B76(8-11) B77(8-11) B7A(8-11) B7B(8-11) B7A(8-11) B7B(8-11) B7E(8-11) B7F(8-11) B7E(8-11) B7F(8-11) + + const __m512i rhs_mat_014589CD_00_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) B08(4-7) B09(4-7) B08(4-7) B09(4-7) B0C(4-7) B0D(4-7) B0C(4-7) B0D(4-7) + const __m512i rhs_mat_2367ABEF_00_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) B0A(4-7) B0B(4-7) B0A(4-7) B0B(4-7) B0E(4-7) B0F(4-7) B0E(4-7) B0F(4-7) + + const __m512i rhs_mat_014589CD_01_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) B08(12-15) B09(12-15) B08(12-15) B09(12-15) B0C(12-15) B0D(12-15) B0C(12-15) B0D(12-15) + const __m512i rhs_mat_2367ABEF_01_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) B0A(12-15) B0B(12-15) B0A(12-15) B0B(12-15) B0E(12-15) B0F(12-15) B0E(12-15) B0F(12-15) + + const __m512i rhs_mat_014589CD_10_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) B18(4-7) B19(4-7) B18(4-7) B19(4-7) B1C(4-7) B1D(4-7) B1C(4-7) B1D(4-7) + const __m512i rhs_mat_2367ABEF_10_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) B1A(4-7) B1B(4-7) B1A(4-7) B1B(4-7) B1E(4-7) B1F(4-7) B1E(4-7) B1F(4-7) + + const __m512i rhs_mat_014589CD_11_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) B18(12-15) B19(12-15) B18(12-15) B19(12-15) B1C(12-15) B1D(12-15) B1C(12-15) B1D(12-15) + const __m512i rhs_mat_2367ABEF_11_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) B1A(12-15) B1B(12-15) B1A(12-15) B1B(12-15) B1E(12-15) B1F(12-15) B1E(12-15) B1F(12-15) + + const __m512i rhs_mat_014589CD_20_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_20, (_MM_PERM_ENUM)221); //B20(4-7) B21(4-7) B20(4-7) B21(4-7) B24(4-7) B25(4-7) B24(4-7) B25(4-7) B28(4-7) B29(4-7) B28(4-7) B29(4-7) B2C(4-7) B2D(4-7) B2C(4-7) B2D(4-7) + const __m512i rhs_mat_2367ABEF_20_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_20, (_MM_PERM_ENUM)221); //B22(4-7) B23(4-7) B22(4-7) B23(4-7) B26(4-7) B27(4-7) B26(4-7) B27(4-7) B2A(4-7) B2B(4-7) B2A(4-7) B2B(4-7) B2E(4-7) B2F(4-7) B2E(4-7) B2F(4-7) + + const __m512i rhs_mat_014589CD_21_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_21, (_MM_PERM_ENUM)221); //B20(12-15) B21(12-15) B20(12-15) B21(12-15) B24(12-15) B25(12-15) B24(12-15) B25(12-15) B28(12-15) B29(12-15) B28(12-15) B29(12-15) B2C(12-15) B2D(12-15) B2C(12-15) B2D(12-15) + const __m512i rhs_mat_2367ABEF_21_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_21, (_MM_PERM_ENUM)221); //B22(12-15) B23(12-15) B22(12-15) B23(12-15) B26(12-15) B27(12-15) B26(12-15) B27(12-15) B2A(12-15) B2B(12-15) B2A(12-15) B2B(12-15) B2E(12-15) B2F(12-15) B2E(12-15) B2F(12-15) + + const __m512i rhs_mat_014589CD_30_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_30, (_MM_PERM_ENUM)221); //B30(4-7) B31(4-7) B30(4-7) B31(4-7) B34(4-7) B35(4-7) B34(4-7) B35(4-7) B38(4-7) B39(4-7) B38(4-7) B39(4-7) B3C(4-7) B3D(4-7) B3C(4-7) B3D(4-7) + const __m512i rhs_mat_2367ABEF_30_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_30, (_MM_PERM_ENUM)221); //B32(4-7) B33(4-7) B32(4-7) B33(4-7) B36(4-7) B37(4-7) B36(4-7) B37(4-7) B3A(4-7) B3B(4-7) B3A(4-7) B3B(4-7) B3E(4-7) B3F(4-7) B3E(4-7) B3F(4-7) + + const __m512i rhs_mat_014589CD_31_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_31, (_MM_PERM_ENUM)221); //B30(12-15) B31(12-15) B30(12-15) B31(12-15) B34(12-15) B35(12-15) B34(12-15) B35(12-15) B38(12-15) B39(12-15) B38(12-15) B39(12-15) B3C(12-15) B3D(12-15) B3C(12-15) B3D(12-15) + const __m512i rhs_mat_2367ABEF_31_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_31, (_MM_PERM_ENUM)221); //B32(12-15) B33(12-15) B32(12-15) B33(12-15) B36(12-15) B37(12-15) B36(12-15) B37(12-15) B3A(12-15) B3B(12-15) B3A(12-15) B3B(12-15) B3E(12-15) B3F(12-15) B3E(12-15) B3F(12-15) + + const __m512i rhs_mat_014589CD_40_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_40, (_MM_PERM_ENUM)221); //B40(4-7) B41(4-7) B40(4-7) B41(4-7) B44(4-7) B45(4-7) B44(4-7) B45(4-7) B48(4-7) B49(4-7) B48(4-7) B49(4-7) B4C(4-7) B4D(4-7) B4C(4-7) B4D(4-7) + const __m512i rhs_mat_2367ABEF_40_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_40, (_MM_PERM_ENUM)221); //B42(4-7) B43(4-7) B42(4-7) B43(4-7) B46(4-7) B47(4-7) B46(4-7) B47(4-7) B4A(4-7) B4B(4-7) B4A(4-7) B4B(4-7) B4E(4-7) B4F(4-7) B4E(4-7) B4F(4-7) + + const __m512i rhs_mat_014589CD_41_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_41, (_MM_PERM_ENUM)221); //B40(12-15) B41(12-15) B40(12-15) B41(12-15) B44(12-15) B45(12-15) B44(12-15) B45(12-15) B48(12-15) B49(12-15) B48(12-15) B49(12-15) B4C(12-15) B4D(12-15) B4C(12-15) B4D(12-15) + const __m512i rhs_mat_2367ABEF_41_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_41, (_MM_PERM_ENUM)221); //B42(12-15) B43(12-15) B42(12-15) B43(12-15) B46(12-15) B47(12-15) B46(12-15) B47(12-15) B4A(12-15) B4B(12-15) B4A(12-15) B4B(12-15) B4E(12-15) B4F(12-15) B4E(12-15) B4F(12-15) + + const __m512i rhs_mat_014589CD_50_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_50, (_MM_PERM_ENUM)221); //B50(4-7) B51(4-7) B50(4-7) B51(4-7) B54(4-7) B55(4-7) B54(4-7) B55(4-7) B58(4-7) B59(4-7) B58(4-7) B59(4-7) B5C(4-7) B5D(4-7) B5C(4-7) B5D(4-7) + const __m512i rhs_mat_2367ABEF_50_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_50, (_MM_PERM_ENUM)221); //B52(4-7) B53(4-7) B52(4-7) B53(4-7) B56(4-7) B57(4-7) B56(4-7) B57(4-7) B5A(4-7) B5B(4-7) B5A(4-7) B5B(4-7) B5E(4-7) B5F(4-7) B5E(4-7) B5F(4-7) + + const __m512i rhs_mat_014589CD_51_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_51, (_MM_PERM_ENUM)221); //B50(12-15) B51(12-15) B50(12-15) B51(12-15) B54(12-15) B55(12-15) B54(12-15) B55(12-15) B58(12-15) B59(12-15) B58(12-15) B59(12-15) B5C(12-15) B5D(12-15) B5C(12-15) B5D(12-15) + const __m512i rhs_mat_2367ABEF_51_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_51, (_MM_PERM_ENUM)221); //B52(12-15) B53(12-15) B52(12-15) B53(12-15) B56(12-15) B57(12-15) B56(12-15) B57(12-15) B5A(12-15) B5B(12-15) B5A(12-15) B5B(12-15) B5E(12-15) B5F(12-15) B5E(12-15) B5F(12-15) + + const __m512i rhs_mat_014589CD_60_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_60, (_MM_PERM_ENUM)221); //B60(4-7) B61(4-7) B60(4-7) B61(4-7) B64(4-7) B65(4-7) B64(4-7) B65(4-7) B68(4-7) B69(4-7) B68(4-7) B69(4-7) B6C(4-7) B6D(4-7) B6C(4-7) B6D(4-7) + const __m512i rhs_mat_2367ABEF_60_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_60, (_MM_PERM_ENUM)221); //B62(4-7) B63(4-7) B62(4-7) B63(4-7) B66(4-7) B67(4-7) B66(4-7) B67(4-7) B6A(4-7) B6B(4-7) B6A(4-7) B6B(4-7) B6E(4-7) B6F(4-7) B6E(4-7) B6F(4-7) + + const __m512i rhs_mat_014589CD_61_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_61, (_MM_PERM_ENUM)221); //B60(12-15) B61(12-15) B60(12-15) B61(12-15) B64(12-15) B65(12-15) B64(12-15) B65(12-15) B68(12-15) B69(12-15) B68(12-15) B69(12-15) B6C(12-15) B6D(12-15) B6C(12-15) B6D(12-15) + const __m512i rhs_mat_2367ABEF_61_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_61, (_MM_PERM_ENUM)221); //B62(12-15) B63(12-15) B62(12-15) B63(12-15) B66(12-15) B67(12-15) B66(12-15) B67(12-15) B6A(12-15) B6B(12-15) B6A(12-15) B6B(12-15) B6E(12-15) B6F(12-15) B6E(12-15) B6F(12-15) + + const __m512i rhs_mat_014589CD_70_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_70, (_MM_PERM_ENUM)221); //B70(4-7) B71(4-7) B70(4-7) B71(4-7) B74(4-7) B75(4-7) B74(4-7) B75(4-7) B78(4-7) B79(4-7) B78(4-7) B79(4-7) B7C(4-7) B7D(4-7) B7C(4-7) B7D(4-7) + const __m512i rhs_mat_2367ABEF_70_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_70, (_MM_PERM_ENUM)221); //B72(4-7) B73(4-7) B72(4-7) B73(4-7) B76(4-7) B77(4-7) B76(4-7) B77(4-7) B7A(4-7) B7B(4-7) B7A(4-7) B7B(4-7) B7E(4-7) B7F(4-7) B7E(4-7) B7F(4-7) + + const __m512i rhs_mat_014589CD_71_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_71, (_MM_PERM_ENUM)221); //B70(12-15) B71(12-15) B70(12-15) B71(12-15) B74(12-15) B75(12-15) B74(12-15) B75(12-15) B78(12-15) B79(12-15) B78(12-15) B79(12-15) B7C(12-15) B7D(12-15) B7C(12-15) B7D(12-15) + const __m512i rhs_mat_2367ABEF_71_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_71, (_MM_PERM_ENUM)221); //B72(12-15) B73(12-15) B72(12-15) B73(12-15) B76(12-15) B77(12-15) B76(12-15) B77(12-15) B7A(12-15) B7B(12-15) B7A(12-15) B7B(12-15) B7E(12-15) B7F(12-15) B7E(12-15) B7F(12-15) + + //notation:superblock subblock + //s00 m00 s01 m01 s10 m10 s11 m11 s20 m20 s21 m21 s30 m30 s31 m31 s40 m40 s41 m41 s50 m50 s51 m51 s60 m60 s61 m61 s70 m70 s71 m71 + + const __m128i mins_and_scales_01_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + sb * 64)); + const __m128i mins_and_scales_23_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + 16 + sb * 64)); + const __m128i mins_and_scales_45_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + 32 + sb * 64)); + const __m128i mins_and_scales_67_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + 48 + sb * 64)); + + const __m128i mins_and_scales_01_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + sb * 64)); + const __m128i mins_and_scales_23_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + 16 + sb * 64)); + const __m128i mins_and_scales_45_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + 32 + sb * 64)); + const __m128i mins_and_scales_67_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + 48 + sb * 64)); + + // Combine mins and scales for sub-blocks: 0-1, 2-3, 4-5, 6-7 in the sb loop + const __m256i mins_and_scales_01 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_01_0), mins_and_scales_01_1, 1); + const __m256i mins_and_scales_23 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_23_0), mins_and_scales_23_1, 1); + const __m256i mins_and_scales_45 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_45_0), mins_and_scales_45_1, 1); + const __m256i mins_and_scales_67 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_67_0), mins_and_scales_67_1, 1); + + // Extract scales which is lower half from mins_and_scales + const __m256i scales_01 = _mm256_and_si256(mins_and_scales_01, m4b); + const __m256i scales_23 = _mm256_and_si256(mins_and_scales_23, m4b); + const __m256i scales_45 = _mm256_and_si256(mins_and_scales_45, m4b); + const __m256i scales_67 = _mm256_and_si256(mins_and_scales_67, m4b); + + // Extract mins which is upper half from mins_and_scales + const __m512i mins_01 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_01, 4), m4b)); + const __m512i mins_23 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_23, 4), m4b)); + const __m512i mins_45 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_45, 4), m4b)); + const __m512i mins_67 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_67, 4), m4b)); + + const __m512i scales_0 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_01,scalesmask1)); + const __m512i scales_1 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_01,scalesmask2)); + const __m512i scales_2 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_23,scalesmask1)); + const __m512i scales_3 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_23,scalesmask2)); + const __m512i scales_4 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_45,scalesmask1)); + const __m512i scales_5 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_45,scalesmask2)); + const __m512i scales_6 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_67,scalesmask1)); + const __m512i scales_7 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_67,scalesmask2)); + + const __m512i scale_014589CD_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_2 = _mm512_shuffle_epi32(scales_2, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_2 = _mm512_shuffle_epi32(scales_2, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_3 = _mm512_shuffle_epi32(scales_3, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_3 = _mm512_shuffle_epi32(scales_3, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_4 = _mm512_shuffle_epi32(scales_4, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_4 = _mm512_shuffle_epi32(scales_4, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_5 = _mm512_shuffle_epi32(scales_5, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_5 = _mm512_shuffle_epi32(scales_5, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_6 = _mm512_shuffle_epi32(scales_6, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_6 = _mm512_shuffle_epi32(scales_6, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_7 = _mm512_shuffle_epi32(scales_7, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_7 = _mm512_shuffle_epi32(scales_7, (_MM_PERM_ENUM)238); + + + for (int rp = 0; rp < 4; rp++) { + + // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 + // Loaded as set of 128 bit vectors and repeated and stored into a 256 bit vector before again repeating into 512 bit vector + __m256i lhs_mat_ymm_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 512 * sb))); + __m256i lhs_mat_ymm_01_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 0); + __m256i lhs_mat_ymm_23_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 17); + __m256i lhs_mat_ymm_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 32 + 512 * sb))); + __m256i lhs_mat_ymm_01_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 0); + __m256i lhs_mat_ymm_23_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 17); + __m256i lhs_mat_ymm_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 64 + 512 * sb))); + __m256i lhs_mat_ymm_01_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 0); + __m256i lhs_mat_ymm_23_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 17); + __m256i lhs_mat_ymm_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 96 + 512 * sb))); + __m256i lhs_mat_ymm_01_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 0); + __m256i lhs_mat_ymm_23_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 17); + __m256i lhs_mat_ymm_0123_20 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 128 + 512 * sb))); + __m256i lhs_mat_ymm_01_20 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_20, lhs_mat_ymm_0123_20, 0); + __m256i lhs_mat_ymm_23_20 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_20, lhs_mat_ymm_0123_20, 17); + __m256i lhs_mat_ymm_0123_21 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 160 + 512 * sb))); + __m256i lhs_mat_ymm_01_21 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_21, lhs_mat_ymm_0123_21, 0); + __m256i lhs_mat_ymm_23_21 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_21, lhs_mat_ymm_0123_21, 17); + __m256i lhs_mat_ymm_0123_30 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 192 + 512 * sb))); + __m256i lhs_mat_ymm_01_30 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_30, lhs_mat_ymm_0123_30, 0); + __m256i lhs_mat_ymm_23_30 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_30, lhs_mat_ymm_0123_30, 17); + __m256i lhs_mat_ymm_0123_31 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 224 + 512 * sb))); + __m256i lhs_mat_ymm_01_31 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_31, lhs_mat_ymm_0123_31, 0); + __m256i lhs_mat_ymm_23_31 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_31, lhs_mat_ymm_0123_31, 17); + + __m256i lhs_mat_ymm_0123_40 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 256 + 512 * sb))); + __m256i lhs_mat_ymm_01_40 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_40, lhs_mat_ymm_0123_40, 0); + __m256i lhs_mat_ymm_23_40 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_40, lhs_mat_ymm_0123_40, 17); + __m256i lhs_mat_ymm_0123_41 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 288 + 512 * sb))); + __m256i lhs_mat_ymm_01_41 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_41, lhs_mat_ymm_0123_41, 0); + __m256i lhs_mat_ymm_23_41 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_41, lhs_mat_ymm_0123_41, 17); + __m256i lhs_mat_ymm_0123_50 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 320 + 512 * sb))); + __m256i lhs_mat_ymm_01_50 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_50, lhs_mat_ymm_0123_50, 0); + __m256i lhs_mat_ymm_23_50 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_50, lhs_mat_ymm_0123_50, 17); + __m256i lhs_mat_ymm_0123_51 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 352 + 512 * sb))); + __m256i lhs_mat_ymm_01_51 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_51, lhs_mat_ymm_0123_51, 0); + __m256i lhs_mat_ymm_23_51 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_51, lhs_mat_ymm_0123_51, 17); + __m256i lhs_mat_ymm_0123_60 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 384 + 512 * sb))); + __m256i lhs_mat_ymm_01_60 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_60, lhs_mat_ymm_0123_60, 0); + __m256i lhs_mat_ymm_23_60 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_60, lhs_mat_ymm_0123_60, 17); + __m256i lhs_mat_ymm_0123_61 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 416 + 512 * sb))); + __m256i lhs_mat_ymm_01_61 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_61, lhs_mat_ymm_0123_61, 0); + __m256i lhs_mat_ymm_23_61 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_61, lhs_mat_ymm_0123_61, 17); + __m256i lhs_mat_ymm_0123_70 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 448 + 512 * sb))); + __m256i lhs_mat_ymm_01_70 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_70, lhs_mat_ymm_0123_70, 0); + __m256i lhs_mat_ymm_23_70 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_70, lhs_mat_ymm_0123_70, 17); + __m256i lhs_mat_ymm_0123_71 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 480 + 512 * sb))); + __m256i lhs_mat_ymm_01_71 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_71, lhs_mat_ymm_0123_71, 0); + __m256i lhs_mat_ymm_23_71 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_71, lhs_mat_ymm_0123_71, 17); + + + __m512i lhs_mat_01_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_00), lhs_mat_ymm_01_00, 1); + __m512i lhs_mat_23_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_00), lhs_mat_ymm_23_00, 1); + __m512i lhs_mat_01_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_01), lhs_mat_ymm_01_01, 1); + __m512i lhs_mat_23_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_01), lhs_mat_ymm_23_01, 1); + + __m512i lhs_mat_01_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_10), lhs_mat_ymm_01_10, 1); + __m512i lhs_mat_23_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_10), lhs_mat_ymm_23_10, 1); + __m512i lhs_mat_01_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_11), lhs_mat_ymm_01_11, 1); + __m512i lhs_mat_23_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_11), lhs_mat_ymm_23_11, 1); + + __m512i lhs_mat_01_20 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_20), lhs_mat_ymm_01_20, 1); + __m512i lhs_mat_23_20 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_20), lhs_mat_ymm_23_20, 1); + __m512i lhs_mat_01_21 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_21), lhs_mat_ymm_01_21, 1); + __m512i lhs_mat_23_21 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_21), lhs_mat_ymm_23_21, 1); + + __m512i lhs_mat_01_30 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_30), lhs_mat_ymm_01_30, 1); + __m512i lhs_mat_23_30 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_30), lhs_mat_ymm_23_30, 1); + __m512i lhs_mat_01_31 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_31), lhs_mat_ymm_01_31, 1); + __m512i lhs_mat_23_31 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_31), lhs_mat_ymm_23_31, 1); + + __m512i lhs_mat_01_40 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_40), lhs_mat_ymm_01_40, 1); + __m512i lhs_mat_23_40 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_40), lhs_mat_ymm_23_40, 1); + __m512i lhs_mat_01_41 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_41), lhs_mat_ymm_01_41, 1); + __m512i lhs_mat_23_41 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_41), lhs_mat_ymm_23_41, 1); + + __m512i lhs_mat_01_50 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_50), lhs_mat_ymm_01_50, 1); + __m512i lhs_mat_23_50 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_50), lhs_mat_ymm_23_50, 1); + __m512i lhs_mat_01_51 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_51), lhs_mat_ymm_01_51, 1); + __m512i lhs_mat_23_51 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_51), lhs_mat_ymm_23_51, 1); + + __m512i lhs_mat_01_60 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_60), lhs_mat_ymm_01_60, 1); + __m512i lhs_mat_23_60 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_60), lhs_mat_ymm_23_60, 1); + __m512i lhs_mat_01_61 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_61), lhs_mat_ymm_01_61, 1); + __m512i lhs_mat_23_61 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_61), lhs_mat_ymm_23_61, 1); + + __m512i lhs_mat_01_70 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_70), lhs_mat_ymm_01_70, 1); + __m512i lhs_mat_23_70 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_70), lhs_mat_ymm_23_70, 1); + __m512i lhs_mat_01_71 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_71), lhs_mat_ymm_01_71, 1); + __m512i lhs_mat_23_71 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_71), lhs_mat_ymm_23_71, 1); + + // Bsums are loaded for the different Q8_K blocks + __m128i lhs_raw_bsums_01_0123 = _mm_loadu_si128((const __m128i *)((a_ptrs[rp][b].bsums + 32 * sb))); + __m128i lhs_raw_bsums_23_0123 = _mm_loadu_si128((const __m128i *)(a_ptrs[rp][b].bsums + 8 + 32 * sb)); + __m128i lhs_raw_bsums_01_4567 = _mm_loadu_si128((const __m128i *)((a_ptrs[rp][b].bsums + 16 + 32 * sb))); + __m128i lhs_raw_bsums_23_4567 = _mm_loadu_si128((const __m128i *)(a_ptrs[rp][b].bsums + 24 + 32 * sb)); + + __m256i lhs_bsums_ymm_01_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_0123), lhs_raw_bsums_01_0123, 1); + __m512i lhs_bsums_01_0123 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_01_0123), lhs_bsums_ymm_01_0123, 1); + __m256i lhs_bsums_ymm_23_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_0123), lhs_raw_bsums_23_0123, 1); + __m512i lhs_bsums_23_0123 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_23_0123), lhs_bsums_ymm_23_0123, 1); __m256i lhs_bsums_ymm_01_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_4567), lhs_raw_bsums_01_4567, 1); + __m512i lhs_bsums_01_4567 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_01_4567), lhs_bsums_ymm_01_4567, 1); + __m256i lhs_bsums_ymm_23_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_4567), lhs_raw_bsums_23_4567, 1); + __m512i lhs_bsums_23_4567 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_23_4567), lhs_bsums_ymm_23_4567, 1); + + // Shuffle pattern one - left side input + const __m512i lhs_mat_01_00_sp1 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) + const __m512i lhs_mat_23_00_sp1 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)160); //A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) + + const __m512i lhs_mat_01_01_sp1 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) + const __m512i lhs_mat_23_01_sp1 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)160); //A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) + + const __m512i lhs_mat_01_10_sp1 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) + const __m512i lhs_mat_23_10_sp1 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)160); //A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) + + const __m512i lhs_mat_01_11_sp1 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) + const __m512i lhs_mat_23_11_sp1 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)160); //A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) + + const __m512i lhs_mat_01_20_sp1 = _mm512_shuffle_epi32(lhs_mat_01_20, (_MM_PERM_ENUM)160); //A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) + const __m512i lhs_mat_23_20_sp1 = _mm512_shuffle_epi32(lhs_mat_23_20, (_MM_PERM_ENUM)160); //A22(0-3) A22(0-3) A23(0-3) A23(0-3) A22(0-3) A22(0-3) A23(0-3) A23(0-3) A22(0-3) A22(0-3) A23(0-3) A23(0-3) A22(0-3) A22(0-3) A23(0-3) A23(0-3) + + const __m512i lhs_mat_01_21_sp1 = _mm512_shuffle_epi32(lhs_mat_01_21, (_MM_PERM_ENUM)160); //A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) + const __m512i lhs_mat_23_21_sp1 = _mm512_shuffle_epi32(lhs_mat_23_21, (_MM_PERM_ENUM)160); //A22(8-11) A22(8-11) A23(8-11) A23(8-11) A22(8-11) A22(8-11) A23(8-11) A23(8-11) A22(8-11) A22(8-11) A23(8-11) A23(8-11) A22(8-11) A22(8-11) A23(8-11) A23(8-11) + + const __m512i lhs_mat_01_30_sp1 = _mm512_shuffle_epi32(lhs_mat_01_30, (_MM_PERM_ENUM)160); //A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) + const __m512i lhs_mat_23_30_sp1 = _mm512_shuffle_epi32(lhs_mat_23_30, (_MM_PERM_ENUM)160); //A32(0-3) A32(0-3) A33(0-3) A33(0-3) A32(0-3) A32(0-3) A33(0-3) A33(0-3) A32(0-3) A32(0-3) A33(0-3) A33(0-3) A32(0-3) A32(0-3) A33(0-3) A33(0-3) + + const __m512i lhs_mat_01_31_sp1 = _mm512_shuffle_epi32(lhs_mat_01_31, (_MM_PERM_ENUM)160); //A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) + const __m512i lhs_mat_23_31_sp1 = _mm512_shuffle_epi32(lhs_mat_23_31, (_MM_PERM_ENUM)160); //A32(8-11) A32(8-11) A33(8-11) A33(8-11) A32(8-11) A32(8-11) A33(8-11) A33(8-11) A32(8-11) A32(8-11) A33(8-11) A33(8-11) A32(8-11) A32(8-11) A33(8-11) A33(8-11) + + const __m512i lhs_mat_01_40_sp1 = _mm512_shuffle_epi32(lhs_mat_01_40, (_MM_PERM_ENUM)160); //A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) + const __m512i lhs_mat_23_40_sp1 = _mm512_shuffle_epi32(lhs_mat_23_40, (_MM_PERM_ENUM)160); //A42(0-3) A42(0-3) A43(0-3) A43(0-3) A42(0-3) A42(0-3) A43(0-3) A43(0-3) A42(0-3) A42(0-3) A43(0-3) A43(0-3) A42(0-3) A42(0-3) A43(0-3) A43(0-3) + + const __m512i lhs_mat_01_41_sp1 = _mm512_shuffle_epi32(lhs_mat_01_41, (_MM_PERM_ENUM)160); //A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) + const __m512i lhs_mat_23_41_sp1 = _mm512_shuffle_epi32(lhs_mat_23_41, (_MM_PERM_ENUM)160); //A42(8-11) A42(8-11) A43(8-11) A43(8-11) A42(8-11) A42(8-11) A43(8-11) A43(8-11) A42(8-11) A42(8-11) A43(8-11) A43(8-11) A42(8-11) A42(8-11) A43(8-11) A43(8-11) + + const __m512i lhs_mat_01_50_sp1 = _mm512_shuffle_epi32(lhs_mat_01_50, (_MM_PERM_ENUM)160); //A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) + const __m512i lhs_mat_23_50_sp1 = _mm512_shuffle_epi32(lhs_mat_23_50, (_MM_PERM_ENUM)160); //A52(0-3) A52(0-3) A53(0-3) A53(0-3) A52(0-3) A52(0-3) A53(0-3) A53(0-3) A52(0-3) A52(0-3) A53(0-3) A53(0-3) A52(0-3) A52(0-3) A53(0-3) A53(0-3) + + const __m512i lhs_mat_01_51_sp1 = _mm512_shuffle_epi32(lhs_mat_01_51, (_MM_PERM_ENUM)160); //A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) + const __m512i lhs_mat_23_51_sp1 = _mm512_shuffle_epi32(lhs_mat_23_51, (_MM_PERM_ENUM)160); //A52(8-11) A52(8-11) A53(8-11) A53(8-11) A52(8-11) A52(8-11) A53(8-11) A53(8-11) A52(8-11) A52(8-11) A53(8-11) A53(8-11) A52(8-11) A52(8-11) A53(8-11) A53(8-11) + + const __m512i lhs_mat_01_60_sp1 = _mm512_shuffle_epi32(lhs_mat_01_60, (_MM_PERM_ENUM)160); //A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) + const __m512i lhs_mat_23_60_sp1 = _mm512_shuffle_epi32(lhs_mat_23_60, (_MM_PERM_ENUM)160); //A62(0-3) A62(0-3) A63(0-3) A63(0-3) A62(0-3) A62(0-3) A63(0-3) A63(0-3) A62(0-3) A62(0-3) A63(0-3) A63(0-3) A62(0-3) A62(0-3) A63(0-3) A63(0-3) + + const __m512i lhs_mat_01_61_sp1 = _mm512_shuffle_epi32(lhs_mat_01_61, (_MM_PERM_ENUM)160); //A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) + const __m512i lhs_mat_23_61_sp1 = _mm512_shuffle_epi32(lhs_mat_23_61, (_MM_PERM_ENUM)160); //A62(8-11) A62(8-11) A63(8-11) A63(8-11) A62(8-11) A62(8-11) A63(8-11) A63(8-11) A62(8-11) A62(8-11) A63(8-11) A63(8-11) A62(8-11) A62(8-11) A63(8-11) A63(8-11) + + const __m512i lhs_mat_01_70_sp1 = _mm512_shuffle_epi32(lhs_mat_01_70, (_MM_PERM_ENUM)160); //A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) + const __m512i lhs_mat_23_70_sp1 = _mm512_shuffle_epi32(lhs_mat_23_70, (_MM_PERM_ENUM)160); //A72(0-3) A72(0-3) A73(0-3) A73(0-3) A72(0-3) A72(0-3) A73(0-3) A73(0-3) A72(0-3) A72(0-3) A73(0-3) A73(0-3) A72(0-3) A72(0-3) A73(0-3) A73(0-3) + + const __m512i lhs_mat_01_71_sp1 = _mm512_shuffle_epi32(lhs_mat_01_71, (_MM_PERM_ENUM)160); //A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) + const __m512i lhs_mat_23_71_sp1 = _mm512_shuffle_epi32(lhs_mat_23_71, (_MM_PERM_ENUM)160); //A72(8-11) A72(8-11) A73(8-11) A73(8-11) A72(8-11) A72(8-11) A73(8-11) A73(8-11) A72(8-11) A72(8-11) A73(8-11) A73(8-11) A72(8-11) A72(8-11) A73(8-11) A73(8-11) + + const __m512i lhs_mat_01_00_sp2 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) + const __m512i lhs_mat_23_00_sp2 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)245); //A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) + + const __m512i lhs_mat_01_01_sp2 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) + const __m512i lhs_mat_23_01_sp2 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)245); //A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) + + const __m512i lhs_mat_01_10_sp2 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) + const __m512i lhs_mat_23_10_sp2 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)245); //A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) + + const __m512i lhs_mat_01_11_sp2 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) + const __m512i lhs_mat_23_11_sp2 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)245); //A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) + + const __m512i lhs_mat_01_20_sp2 = _mm512_shuffle_epi32(lhs_mat_01_20, (_MM_PERM_ENUM)245); //A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) + const __m512i lhs_mat_23_20_sp2 = _mm512_shuffle_epi32(lhs_mat_23_20, (_MM_PERM_ENUM)245); //A22(4-7) A22(4-7) A23(4-7) A23(4-7) A22(4-7) A22(4-7) A23(4-7) A23(4-7) A22(4-7) A22(4-7) A23(4-7) A23(4-7) A22(4-7) A22(4-7) A23(4-7) A23(4-7) + + const __m512i lhs_mat_01_21_sp2 = _mm512_shuffle_epi32(lhs_mat_01_21, (_MM_PERM_ENUM)245); //A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) + const __m512i lhs_mat_23_21_sp2 = _mm512_shuffle_epi32(lhs_mat_23_21, (_MM_PERM_ENUM)245); //A22(12-15) A22(12-15) A23(12-15) A23(12-15) A22(12-15) A22(12-15) A23(12-15) A23(12-15) A22(12-15) A22(12-15) A23(12-15) A23(12-15) A22(12-15) A22(12-15) A23(12-15) A23(12-15) + + const __m512i lhs_mat_01_30_sp2 = _mm512_shuffle_epi32(lhs_mat_01_30, (_MM_PERM_ENUM)245); //A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) + const __m512i lhs_mat_23_30_sp2 = _mm512_shuffle_epi32(lhs_mat_23_30, (_MM_PERM_ENUM)245); //A32(4-7) A32(4-7) A33(4-7) A33(4-7) A32(4-7) A32(4-7) A33(4-7) A33(4-7) A32(4-7) A32(4-7) A33(4-7) A33(4-7) A32(4-7) A32(4-7) A33(4-7) A33(4-7) + + const __m512i lhs_mat_01_31_sp2 = _mm512_shuffle_epi32(lhs_mat_01_31, (_MM_PERM_ENUM)245); //A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) + const __m512i lhs_mat_23_31_sp2 = _mm512_shuffle_epi32(lhs_mat_23_31, (_MM_PERM_ENUM)245); //A32(12-15) A32(12-15) A33(12-15) A33(12-15) A32(12-15) A32(12-15) A33(12-15) A33(12-15) A32(12-15) A32(12-15) A33(12-15) A33(12-15) A32(12-15) A32(12-15) A33(12-15) A33(12-15) + + const __m512i lhs_mat_01_40_sp2 = _mm512_shuffle_epi32(lhs_mat_01_40, (_MM_PERM_ENUM)245); //A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) + const __m512i lhs_mat_23_40_sp2 = _mm512_shuffle_epi32(lhs_mat_23_40, (_MM_PERM_ENUM)245); //A42(4-7) A42(4-7) A43(4-7) A43(4-7) A42(4-7) A42(4-7) A43(4-7) A43(4-7) A42(4-7) A42(4-7) A43(4-7) A43(4-7) A42(4-7) A42(4-7) A43(4-7) A43(4-7) + + const __m512i lhs_mat_01_41_sp2 = _mm512_shuffle_epi32(lhs_mat_01_41, (_MM_PERM_ENUM)245); //A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) + const __m512i lhs_mat_23_41_sp2 = _mm512_shuffle_epi32(lhs_mat_23_41, (_MM_PERM_ENUM)245); //A42(12-15) A42(12-15) A43(12-15) A43(12-15) A42(12-15) A42(12-15) A43(12-15) A43(12-15) A42(12-15) A42(12-15) A43(12-15) A43(12-15) A42(12-15) A42(12-15) A43(12-15) A43(12-15) + + const __m512i lhs_mat_01_50_sp2 = _mm512_shuffle_epi32(lhs_mat_01_50, (_MM_PERM_ENUM)245); //A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) + const __m512i lhs_mat_23_50_sp2 = _mm512_shuffle_epi32(lhs_mat_23_50, (_MM_PERM_ENUM)245); //A52(4-7) A52(4-7) A53(4-7) A53(4-7) A52(4-7) A52(4-7) A53(4-7) A53(4-7) A52(4-7) A52(4-7) A53(4-7) A53(4-7) A52(4-7) A52(4-7) A53(4-7) A53(4-7) + + const __m512i lhs_mat_01_51_sp2 = _mm512_shuffle_epi32(lhs_mat_01_51, (_MM_PERM_ENUM)245); //A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) + const __m512i lhs_mat_23_51_sp2 = _mm512_shuffle_epi32(lhs_mat_23_51, (_MM_PERM_ENUM)245); //A52(12-15) A52(12-15) A53(12-15) A53(12-15) A52(12-15) A52(12-15) A53(12-15) A53(12-15) A52(12-15) A52(12-15) A53(12-15) A53(12-15) A52(12-15) A52(12-15) A53(12-15) A53(12-15) + + const __m512i lhs_mat_01_60_sp2 = _mm512_shuffle_epi32(lhs_mat_01_60, (_MM_PERM_ENUM)245); //A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) + const __m512i lhs_mat_23_60_sp2 = _mm512_shuffle_epi32(lhs_mat_23_60, (_MM_PERM_ENUM)245); //A62(4-7) A62(4-7) A63(4-7) A63(4-7) A62(4-7) A62(4-7) A63(4-7) A63(4-7) A62(4-7) A62(4-7) A63(4-7) A63(4-7) A62(4-7) A62(4-7) A63(4-7) A63(4-7) + + const __m512i lhs_mat_01_61_sp2 = _mm512_shuffle_epi32(lhs_mat_01_61, (_MM_PERM_ENUM)245); //A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) + const __m512i lhs_mat_23_61_sp2 = _mm512_shuffle_epi32(lhs_mat_23_61, (_MM_PERM_ENUM)245); //A62(12-15) A62(12-15) A63(12-15) A63(12-15) A62(12-15) A62(12-15) A63(12-15) A63(12-15) A62(12-15) A62(12-15) A63(12-15) A63(12-15) A62(12-15) A62(12-15) A63(12-15) A63(12-15) + + const __m512i lhs_mat_01_70_sp2 = _mm512_shuffle_epi32(lhs_mat_01_70, (_MM_PERM_ENUM)245); //A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) + const __m512i lhs_mat_23_70_sp2 = _mm512_shuffle_epi32(lhs_mat_23_70, (_MM_PERM_ENUM)245); //A72(4-7) A72(4-7) A73(4-7) A73(4-7) A72(4-7) A72(4-7) A73(4-7) A73(4-7) A72(4-7) A72(4-7) A73(4-7) A73(4-7) A72(4-7) A72(4-7) A73(4-7) A73(4-7) + + const __m512i lhs_mat_01_71_sp2 = _mm512_shuffle_epi32(lhs_mat_01_71, (_MM_PERM_ENUM)245); //A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) + const __m512i lhs_mat_23_71_sp2 = _mm512_shuffle_epi32(lhs_mat_23_71, (_MM_PERM_ENUM)245); //A72(12-15) A72(12-15) A73(12-15) A73(12-15) A72(12-15) A72(12-15) A73(12-15) A73(12-15) A72(12-15) A72(12-15) A73(12-15) A73(12-15) A72(12-15) A72(12-15) A73(12-15) A73(12-15) + + // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane + __m512i iacc_mat_00_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_01_00_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_01_01_sp1)); + __m512i iacc_mat_01_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_01_00_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_01_01_sp1)); + + __m512i iacc_mat_10_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_23_00_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_23_01_sp1)); + __m512i iacc_mat_11_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_23_00_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_23_01_sp1)); + + __m512i iacc_mat_00_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_01_10_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_01_11_sp1)); + __m512i iacc_mat_01_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_01_10_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_01_11_sp1)); + + __m512i iacc_mat_10_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_23_10_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_23_11_sp1)); + __m512i iacc_mat_11_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_23_10_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_23_11_sp1)); + + __m512i iacc_mat_00_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp1, lhs_mat_01_20_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp1, lhs_mat_01_21_sp1)); + __m512i iacc_mat_01_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp1, lhs_mat_01_20_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp1, lhs_mat_01_21_sp1)); + + __m512i iacc_mat_10_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp1, lhs_mat_23_20_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp1, lhs_mat_23_21_sp1)); + __m512i iacc_mat_11_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp1, lhs_mat_23_20_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp1, lhs_mat_23_21_sp1)); + + __m512i iacc_mat_00_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp1, lhs_mat_01_30_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp1, lhs_mat_01_31_sp1)); + __m512i iacc_mat_01_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp1, lhs_mat_01_30_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp1, lhs_mat_01_31_sp1)); + + __m512i iacc_mat_10_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp1, lhs_mat_23_30_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp1, lhs_mat_23_31_sp1)); + __m512i iacc_mat_11_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp1, lhs_mat_23_30_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp1, lhs_mat_23_31_sp1)); + + __m512i iacc_mat_00_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp1, lhs_mat_01_40_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp1, lhs_mat_01_41_sp1)); + __m512i iacc_mat_01_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp1, lhs_mat_01_40_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp1, lhs_mat_01_41_sp1)); + + __m512i iacc_mat_10_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp1, lhs_mat_23_40_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp1, lhs_mat_23_41_sp1)); + __m512i iacc_mat_11_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp1, lhs_mat_23_40_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp1, lhs_mat_23_41_sp1)); + + __m512i iacc_mat_00_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp1, lhs_mat_01_50_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp1, lhs_mat_01_51_sp1)); + __m512i iacc_mat_01_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp1, lhs_mat_01_50_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp1, lhs_mat_01_51_sp1)); + + __m512i iacc_mat_10_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp1, lhs_mat_23_50_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp1, lhs_mat_23_51_sp1)); + __m512i iacc_mat_11_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp1, lhs_mat_23_50_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp1, lhs_mat_23_51_sp1)); + + __m512i iacc_mat_00_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp1, lhs_mat_01_60_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp1, lhs_mat_01_61_sp1)); + __m512i iacc_mat_01_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp1, lhs_mat_01_60_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp1, lhs_mat_01_61_sp1)); + + __m512i iacc_mat_10_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp1, lhs_mat_23_60_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp1, lhs_mat_23_61_sp1)); + __m512i iacc_mat_11_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp1, lhs_mat_23_60_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp1, lhs_mat_23_61_sp1)); + + __m512i iacc_mat_00_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp1, lhs_mat_01_70_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp1, lhs_mat_01_71_sp1)); + __m512i iacc_mat_01_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp1, lhs_mat_01_70_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp1, lhs_mat_01_71_sp1)); + + __m512i iacc_mat_10_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp1, lhs_mat_23_70_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp1, lhs_mat_23_71_sp1)); + __m512i iacc_mat_11_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp1, lhs_mat_23_70_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp1, lhs_mat_23_71_sp1)); + + + __m512i iacc_mat_00_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_01_00_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_01_01_sp2)); + __m512i iacc_mat_01_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_01_00_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_01_01_sp2)); + + __m512i iacc_mat_10_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_23_00_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_23_01_sp2)); + __m512i iacc_mat_11_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_23_00_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_23_01_sp2)); + + __m512i iacc_mat_00_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_01_10_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_01_11_sp2)); + __m512i iacc_mat_01_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_01_10_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_01_11_sp2)); + + __m512i iacc_mat_10_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_23_10_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_23_11_sp2)); + __m512i iacc_mat_11_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_23_10_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_23_11_sp2)); + + __m512i iacc_mat_00_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp2, lhs_mat_01_20_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp2, lhs_mat_01_21_sp2)); + __m512i iacc_mat_01_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp2, lhs_mat_01_20_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp2, lhs_mat_01_21_sp2)); + + __m512i iacc_mat_10_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp2, lhs_mat_23_20_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp2, lhs_mat_23_21_sp2)); + __m512i iacc_mat_11_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp2, lhs_mat_23_20_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp2, lhs_mat_23_21_sp2)); + + __m512i iacc_mat_00_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp2, lhs_mat_01_30_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp2, lhs_mat_01_31_sp2)); + __m512i iacc_mat_01_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp2, lhs_mat_01_30_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp2, lhs_mat_01_31_sp2)); + + __m512i iacc_mat_10_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp2, lhs_mat_23_30_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp2, lhs_mat_23_31_sp2)); + __m512i iacc_mat_11_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp2, lhs_mat_23_30_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp2, lhs_mat_23_31_sp2)); + + __m512i iacc_mat_00_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp2, lhs_mat_01_40_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp2, lhs_mat_01_41_sp2)); + __m512i iacc_mat_01_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp2, lhs_mat_01_40_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp2, lhs_mat_01_41_sp2)); + + __m512i iacc_mat_10_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp2, lhs_mat_23_40_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp2, lhs_mat_23_41_sp2)); + __m512i iacc_mat_11_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp2, lhs_mat_23_40_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp2, lhs_mat_23_41_sp2)); + + __m512i iacc_mat_00_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp2, lhs_mat_01_50_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp2, lhs_mat_01_51_sp2)); + __m512i iacc_mat_01_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp2, lhs_mat_01_50_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp2, lhs_mat_01_51_sp2)); + + __m512i iacc_mat_10_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp2, lhs_mat_23_50_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp2, lhs_mat_23_51_sp2)); + __m512i iacc_mat_11_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp2, lhs_mat_23_50_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp2, lhs_mat_23_51_sp2)); + + __m512i iacc_mat_00_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp2, lhs_mat_01_60_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp2, lhs_mat_01_61_sp2)); + __m512i iacc_mat_01_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp2, lhs_mat_01_60_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp2, lhs_mat_01_61_sp2)); + + __m512i iacc_mat_10_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp2, lhs_mat_23_60_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp2, lhs_mat_23_61_sp2)); + __m512i iacc_mat_11_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp2, lhs_mat_23_60_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp2, lhs_mat_23_61_sp2)); + + __m512i iacc_mat_00_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp2, lhs_mat_01_70_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp2, lhs_mat_01_71_sp2)); + __m512i iacc_mat_01_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp2, lhs_mat_01_70_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp2, lhs_mat_01_71_sp2)); + + __m512i iacc_mat_10_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp2, lhs_mat_23_70_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp2, lhs_mat_23_71_sp2)); + __m512i iacc_mat_11_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp2, lhs_mat_23_70_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp2, lhs_mat_23_71_sp2)); + + // Combine results from both shuffle patterns for each output block + __m512i iacc_mat_00_0 = _mm512_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); + __m512i iacc_mat_01_0 = _mm512_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); + __m512i iacc_mat_10_0 = _mm512_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); + __m512i iacc_mat_11_0 = _mm512_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); + + __m512i iacc_mat_00_1 = _mm512_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); + __m512i iacc_mat_01_1 = _mm512_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); + __m512i iacc_mat_10_1 = _mm512_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); + __m512i iacc_mat_11_1 = _mm512_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); + + __m512i iacc_mat_00_2 = _mm512_add_epi16(iacc_mat_00_2_sp1, iacc_mat_00_2_sp2); + __m512i iacc_mat_01_2 = _mm512_add_epi16(iacc_mat_01_2_sp1, iacc_mat_01_2_sp2); + __m512i iacc_mat_10_2 = _mm512_add_epi16(iacc_mat_10_2_sp1, iacc_mat_10_2_sp2); + __m512i iacc_mat_11_2 = _mm512_add_epi16(iacc_mat_11_2_sp1, iacc_mat_11_2_sp2); + + __m512i iacc_mat_00_3 = _mm512_add_epi16(iacc_mat_00_3_sp1, iacc_mat_00_3_sp2); + __m512i iacc_mat_01_3 = _mm512_add_epi16(iacc_mat_01_3_sp1, iacc_mat_01_3_sp2); + __m512i iacc_mat_10_3 = _mm512_add_epi16(iacc_mat_10_3_sp1, iacc_mat_10_3_sp2); + __m512i iacc_mat_11_3 = _mm512_add_epi16(iacc_mat_11_3_sp1, iacc_mat_11_3_sp2); + + __m512i iacc_mat_00_4 = _mm512_add_epi16(iacc_mat_00_4_sp1, iacc_mat_00_4_sp2); + __m512i iacc_mat_01_4 = _mm512_add_epi16(iacc_mat_01_4_sp1, iacc_mat_01_4_sp2); + __m512i iacc_mat_10_4 = _mm512_add_epi16(iacc_mat_10_4_sp1, iacc_mat_10_4_sp2); + __m512i iacc_mat_11_4 = _mm512_add_epi16(iacc_mat_11_4_sp1, iacc_mat_11_4_sp2); + + __m512i iacc_mat_00_5 = _mm512_add_epi16(iacc_mat_00_5_sp1, iacc_mat_00_5_sp2); + __m512i iacc_mat_01_5 = _mm512_add_epi16(iacc_mat_01_5_sp1, iacc_mat_01_5_sp2); + __m512i iacc_mat_10_5 = _mm512_add_epi16(iacc_mat_10_5_sp1, iacc_mat_10_5_sp2); + __m512i iacc_mat_11_5 = _mm512_add_epi16(iacc_mat_11_5_sp1, iacc_mat_11_5_sp2); + + __m512i iacc_mat_00_6 = _mm512_add_epi16(iacc_mat_00_6_sp1, iacc_mat_00_6_sp2); + __m512i iacc_mat_01_6 = _mm512_add_epi16(iacc_mat_01_6_sp1, iacc_mat_01_6_sp2); + __m512i iacc_mat_10_6 = _mm512_add_epi16(iacc_mat_10_6_sp1, iacc_mat_10_6_sp2); + __m512i iacc_mat_11_6 = _mm512_add_epi16(iacc_mat_11_6_sp1, iacc_mat_11_6_sp2); + + __m512i iacc_mat_00_7 = _mm512_add_epi16(iacc_mat_00_7_sp1, iacc_mat_00_7_sp2); + __m512i iacc_mat_01_7 = _mm512_add_epi16(iacc_mat_01_7_sp1, iacc_mat_01_7_sp2); + __m512i iacc_mat_10_7 = _mm512_add_epi16(iacc_mat_10_7_sp1, iacc_mat_10_7_sp2); + __m512i iacc_mat_11_7 = _mm512_add_epi16(iacc_mat_11_7_sp1, iacc_mat_11_7_sp2); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + iacc_mat_00_0 = _mm512_madd_epi16(iacc_mat_00_0, scale_014589CD_0); + iacc_mat_01_0 = _mm512_madd_epi16(iacc_mat_01_0, scale_2367ABEF_0); + iacc_mat_10_0 = _mm512_madd_epi16(iacc_mat_10_0, scale_014589CD_0); + iacc_mat_11_0 = _mm512_madd_epi16(iacc_mat_11_0, scale_2367ABEF_0); + + iacc_mat_00_1 = _mm512_madd_epi16(iacc_mat_00_1, scale_014589CD_1); + iacc_mat_01_1 = _mm512_madd_epi16(iacc_mat_01_1, scale_2367ABEF_1); + iacc_mat_10_1 = _mm512_madd_epi16(iacc_mat_10_1, scale_014589CD_1); + iacc_mat_11_1 = _mm512_madd_epi16(iacc_mat_11_1, scale_2367ABEF_1); + + iacc_mat_00_2 = _mm512_madd_epi16(iacc_mat_00_2, scale_014589CD_2); + iacc_mat_01_2 = _mm512_madd_epi16(iacc_mat_01_2, scale_2367ABEF_2); + iacc_mat_10_2 = _mm512_madd_epi16(iacc_mat_10_2, scale_014589CD_2); + iacc_mat_11_2 = _mm512_madd_epi16(iacc_mat_11_2, scale_2367ABEF_2); + + iacc_mat_00_3 = _mm512_madd_epi16(iacc_mat_00_3, scale_014589CD_3); + iacc_mat_01_3 = _mm512_madd_epi16(iacc_mat_01_3, scale_2367ABEF_3); + iacc_mat_10_3 = _mm512_madd_epi16(iacc_mat_10_3, scale_014589CD_3); + iacc_mat_11_3 = _mm512_madd_epi16(iacc_mat_11_3, scale_2367ABEF_3); + + iacc_mat_00_4 = _mm512_madd_epi16(iacc_mat_00_4, scale_014589CD_4); + iacc_mat_01_4 = _mm512_madd_epi16(iacc_mat_01_4, scale_2367ABEF_4); + iacc_mat_10_4 = _mm512_madd_epi16(iacc_mat_10_4, scale_014589CD_4); + iacc_mat_11_4 = _mm512_madd_epi16(iacc_mat_11_4, scale_2367ABEF_4); + + iacc_mat_00_5 = _mm512_madd_epi16(iacc_mat_00_5, scale_014589CD_5); + iacc_mat_01_5 = _mm512_madd_epi16(iacc_mat_01_5, scale_2367ABEF_5); + iacc_mat_10_5 = _mm512_madd_epi16(iacc_mat_10_5, scale_014589CD_5); + iacc_mat_11_5 = _mm512_madd_epi16(iacc_mat_11_5, scale_2367ABEF_5); + + iacc_mat_00_6 = _mm512_madd_epi16(iacc_mat_00_6, scale_014589CD_6); + iacc_mat_01_6 = _mm512_madd_epi16(iacc_mat_01_6, scale_2367ABEF_6); + iacc_mat_10_6 = _mm512_madd_epi16(iacc_mat_10_6, scale_014589CD_6); + iacc_mat_11_6 = _mm512_madd_epi16(iacc_mat_11_6, scale_2367ABEF_6); + + iacc_mat_00_7 = _mm512_madd_epi16(iacc_mat_00_7, scale_014589CD_7); + iacc_mat_01_7 = _mm512_madd_epi16(iacc_mat_01_7, scale_2367ABEF_7); + iacc_mat_10_7 = _mm512_madd_epi16(iacc_mat_10_7, scale_014589CD_7); + iacc_mat_11_7 = _mm512_madd_epi16(iacc_mat_11_7, scale_2367ABEF_7); + + __m512i iacc_mat_00 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_00_0, iacc_mat_00_1), _mm512_add_epi32(iacc_mat_00_2, iacc_mat_00_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_00_4, iacc_mat_00_5), _mm512_add_epi32(iacc_mat_00_6, iacc_mat_00_7))); + __m512i iacc_mat_01 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_01_0, iacc_mat_01_1), _mm512_add_epi32(iacc_mat_01_2, iacc_mat_01_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_01_4, iacc_mat_01_5), _mm512_add_epi32(iacc_mat_01_6, iacc_mat_01_7))); + __m512i iacc_mat_10 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_10_0, iacc_mat_10_1), _mm512_add_epi32(iacc_mat_10_2, iacc_mat_10_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_10_4, iacc_mat_10_5), _mm512_add_epi32(iacc_mat_10_6, iacc_mat_10_7))); + __m512i iacc_mat_11 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_11_0, iacc_mat_11_1), _mm512_add_epi32(iacc_mat_11_2, iacc_mat_11_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_11_4, iacc_mat_11_5), _mm512_add_epi32(iacc_mat_11_6, iacc_mat_11_7))); + + // Straighten out to make 4 row vectors + __m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, (_MM_PERM_ENUM)78)); + __m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, (_MM_PERM_ENUM)78), iacc_mat_01); + __m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, (_MM_PERM_ENUM)78)); + __m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, (_MM_PERM_ENUM)78), iacc_mat_11); + + // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes + const __m128 row_scale_f32_sse = _mm_load_ps(a_ptrs[rp][b].d); + const __m256 row_scale_f32_ymm = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); + const __m512 row_scale_f32 = _mm512_insertf32x8(_mm512_castps256_ps512(row_scale_f32_ymm), row_scale_f32_ymm, 1); + + // Multiply with appropiate scales and accumulate (for both d and dmin) below + acc_rows[rp * 4] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); + acc_rows[rp * 4 + 1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); + acc_rows[rp * 4 + 2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); + acc_rows[rp * 4 + 3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); + + // Take two bsums from two Q8_Ks at a time and multiply with corresponding mins values from each Q2_K + __m512i iacc_row_min_0_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)0), mins_01); + __m512i iacc_row_min_1_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)170), mins_01); + __m512i iacc_row_min_2_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)0), mins_01); + __m512i iacc_row_min_3_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)170), mins_01); + + __m512i iacc_row_min_0_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)85), mins_23); + __m512i iacc_row_min_1_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)255), mins_23); + __m512i iacc_row_min_2_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)85), mins_23); + __m512i iacc_row_min_3_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)255), mins_23); + + __m512i iacc_row_min_0_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)0), mins_45); + __m512i iacc_row_min_1_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)170), mins_45); + __m512i iacc_row_min_2_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)0), mins_45); + __m512i iacc_row_min_3_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)170), mins_45); + + __m512i iacc_row_min_0_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)85), mins_67); + __m512i iacc_row_min_1_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)255), mins_67); + __m512i iacc_row_min_2_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)85), mins_67); + __m512i iacc_row_min_3_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)255), mins_67); + + __m512i iacc_row_min_0 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_0_01, iacc_row_min_0_23), _mm512_add_epi32(iacc_row_min_0_45,iacc_row_min_0_67)); + __m512i iacc_row_min_1 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_1_01, iacc_row_min_1_23), _mm512_add_epi32(iacc_row_min_1_45,iacc_row_min_1_67)); + __m512i iacc_row_min_2 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_2_01, iacc_row_min_2_23), _mm512_add_epi32(iacc_row_min_2_45,iacc_row_min_2_67)); + __m512i iacc_row_min_3 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_3_01, iacc_row_min_3_23), _mm512_add_epi32(iacc_row_min_3_45,iacc_row_min_3_67)); + + acc_min_rows[rp * 4] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_0), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[rp * 4]); + acc_min_rows[rp * 4 + 1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_1), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[rp * 4 + 1]); + acc_min_rows[rp * 4 + 2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_2), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[rp * 4 + 2]); + acc_min_rows[rp * 4 + 3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_3), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[rp * 4 + 3]); + } + } + } + // Store the accumulated values + for (int i = 0; i < 16; i++) { + _mm512_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm512_sub_ps(acc_rows[i], acc_min_rows[i])); + } + } + } + + for (; y < nr / 4; y ++) { + + const block_q8_Kx4 * a_ptr = a_ptr_start + (y * nb); + + // Take group of eight block_q2_kx8 structures at each pass of the loop and perform dot product operation + for (int64_t x = 0; x < anc / 8; x += 2) { + + const block_q2_Kx8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); + const block_q2_Kx8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); + + // Master FP accumulators + __m512 acc_rows[4]; + for (int i = 0; i < 4; i++) { + acc_rows[i] = _mm512_setzero_ps(); + } + + __m512 acc_min_rows[4]; + for (int i = 0; i < 4; i++) { + acc_min_rows[i] = _mm512_setzero_ps(); + } + // For super block + for (int64_t b = 0; b < nb; b++) { + // Delta values - Load the sixteen scale values from two block_q2_kx8 structures + const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); + + // dmin values - Load the sixteen dmin values from two block_q2_kx8 structures + const __m512 col_dmin_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].dmin, b_ptr_1[b].dmin); + + // Loop to iterate over the sixteen sub blocks of a super block - eight sub blocks are processed per iteration + for (int sb = 0; sb < QK_K / 128; sb++) { + + // Load the eight block_q2_k for eight sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 + const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + sb * 256)); + const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 224 + sb * 256)); + + const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + sb * 256)); + const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_mat_89AB_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_mat_CDEF_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_mat_89AB_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_mat_CDEF_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 224 + sb * 256)); + + const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); + const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); + const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); + const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); + + const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); + const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); + const __m256i rhs_raw_mat_89CD_2 = _mm256_blend_epi32(rhs_raw_mat_89AB_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_2, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_2, requiredOrder), rhs_raw_mat_CDEF_2, 240); + const __m256i rhs_raw_mat_89CD_3 = _mm256_blend_epi32(rhs_raw_mat_89AB_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_3, requiredOrder), 240); + const __m256i rhs_raw_mat_ABEF_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_3, requiredOrder), rhs_raw_mat_CDEF_3, 240); + + const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); + const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); + const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); + const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); + + const __m512i rhs_raw_mat_014589CD_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_2), rhs_raw_mat_89CD_2, 1); + const __m512i rhs_raw_mat_2367ABEF_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_2), rhs_raw_mat_ABEF_2, 1); + const __m512i rhs_raw_mat_014589CD_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_3), rhs_raw_mat_89CD_3, 1); + const __m512i rhs_raw_mat_2367ABEF_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_3), rhs_raw_mat_ABEF_3, 1); + + //2-bit -> 8-bit + const __m512i rhs_mat_014589CD_00 = _mm512_and_si512(rhs_raw_mat_014589CD_0,m3bexpanded); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) B08(0-7) B09(0-7) B0C(0-7) B0D(0-7) + const __m512i rhs_mat_2367ABEF_00 = _mm512_and_si512(rhs_raw_mat_2367ABEF_0,m3bexpanded); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) B0A(0-7) B0B(0-7) B0E(0-7) B0F(0-7) + const __m512i rhs_mat_014589CD_01 = _mm512_and_si512(rhs_raw_mat_014589CD_1,m3bexpanded); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) B08(8-15) B09(8-15) B0C(8-15) B0D(8-15) + const __m512i rhs_mat_2367ABEF_01 = _mm512_and_si512(rhs_raw_mat_2367ABEF_1,m3bexpanded); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) B0A(8-15) B0B(8-15) B0E(8-15) B0F(8-15) + const __m512i rhs_mat_014589CD_10 = _mm512_and_si512(rhs_raw_mat_014589CD_2,m3bexpanded); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) B18(0-7) B19(0-7) B1C(0-7) B1D(0-7) + const __m512i rhs_mat_2367ABEF_10 = _mm512_and_si512(rhs_raw_mat_2367ABEF_2,m3bexpanded); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) B1A(0-7) B1B(0-7) B1E(0-7) B1F(0-7) + const __m512i rhs_mat_014589CD_11 = _mm512_and_si512(rhs_raw_mat_014589CD_3,m3bexpanded); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) B18(8-15) B19(8-15) B1C(8-15) B1D(8-15) + const __m512i rhs_mat_2367ABEF_11 = _mm512_and_si512(rhs_raw_mat_2367ABEF_3,m3bexpanded); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) B1A(8-15) B1B(8-15) B1E(8-15) B1F(8-15) + + const __m512i rhs_mat_014589CD_20 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 2), m3bexpanded); //B20(0-7) B21(0-7) B24(0-7) B25(0-7) B28(0-7) B29(0-7) B2C(0-7) B2D(0-7) + const __m512i rhs_mat_2367ABEF_20 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 2), m3bexpanded); //B22(0-7) B23(0-7) B26(0-7) B27(0-7) B2A(0-7) B2B(0-7) B2E(0-7) B2F(0-7) + + const __m512i rhs_mat_014589CD_21 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 2), m3bexpanded); //B20(8-15) B21(8-15) B24(8-15) B25(8-15) B28(8-15) B29(8-15) B2C(8-15) B2D(8-15) + const __m512i rhs_mat_2367ABEF_21 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 2), m3bexpanded); //B22(8-15) B23(8-15) B26(8-15) B27(8-15) B2A(8-15) B2B(8-15) B2E(8-15) B2F(8-15) + + const __m512i rhs_mat_014589CD_30 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 2), m3bexpanded); //B30(0-7) B31(0-7) B34(0-7) B35(0-7) B38(0-7) B39(0-7) B3C(0-7) B3D(0-7) + const __m512i rhs_mat_2367ABEF_30 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 2), m3bexpanded); //B32(0-7) B33(0-7) B36(0-7) B37(0-7) B3A(0-7) B3B(0-7) B3E(0-7) B3F(0-7) + + const __m512i rhs_mat_014589CD_31 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 2), m3bexpanded); //B30(8-15) B31(8-15) B34(8-15) B35(8-15) B38(8-15) B39(8-15) B3C(8-15) B3D(8-15) + const __m512i rhs_mat_2367ABEF_31 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 2), m3bexpanded); //B32(8-15) B33(8-15) B36(8-15) B37(8-15) B3A(8-15) B3B(8-15) B3E(8-15) B3F(8-15) + + const __m512i rhs_mat_014589CD_40 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m3bexpanded); //B40(0-7) B41(0-7) B44(0-7) B45(0-7) B48(0-7) B49(0-7) B4C(0-7) B4D(0-7) + const __m512i rhs_mat_2367ABEF_40 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m3bexpanded); //B42(0-7) B43(0-7) B46(0-7) B47(0-7) B4A(0-7) B4B(0-7) B4E(0-7) B4F(0-7) + + const __m512i rhs_mat_014589CD_41 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m3bexpanded); //B40(8-15) B41(8-15) B44(8-15) B45(8-15) B48(8-15) B49(8-15) B4C(8-15) B4D(8-15) + const __m512i rhs_mat_2367ABEF_41 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m3bexpanded); //B42(8-15) B43(8-15) B46(8-15) B47(8-15) B4A(8-15) B4B(8-15) B4E(8-15) B4F(8-15) + + const __m512i rhs_mat_014589CD_50 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 4), m3bexpanded); //B50(0-7) B51(0-7) B54(0-7) B55(0-7) B58(0-7) B59(0-7) B5C(0-7) B5D(0-7) + const __m512i rhs_mat_2367ABEF_50 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 4), m3bexpanded); //B52(0-7) B53(0-7) B56(0-7) B57(0-7) B5A(0-7) B5B(0-7) B5E(0-7) B5F(0-7) + + const __m512i rhs_mat_014589CD_51 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 4), m3bexpanded); //B50(8-15) B51(8-15) B54(8-15) B55(8-15) B58(8-15) B59(8-15) B5C(8-15) B5D(8-15) + const __m512i rhs_mat_2367ABEF_51 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 4), m3bexpanded); //B52(8-15) B53(8-15) B56(8-15) B57(8-15) B5A(8-15) B5B(8-15) B5E(8-15) B5F(8-15) + + const __m512i rhs_mat_014589CD_60 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 6), m3bexpanded); //B60(0-7) B61(0-7) B64(0-7) B65(0-7) B68(0-7) B69(0-7) B6C(0-7) B6D(0-7) + const __m512i rhs_mat_2367ABEF_60 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 6), m3bexpanded); //B62(0-7) B63(0-7) B66(0-7) B67(0-7) B6A(0-7) B6B(0-7) B6E(0-7) B6F(0-7) + + const __m512i rhs_mat_014589CD_61 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 6), m3bexpanded); //B60(8-15) B61(8-15) B64(8-15) B65(8-15) B68(8-15) B69(8-15) B6C(8-15) B6D(8-15) + const __m512i rhs_mat_2367ABEF_61 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 6), m3bexpanded); //B62(8-15) B63(8-15) B66(8-15) B67(8-15) B6A(8-15) B6B(8-15) B6E(8-15) B6F(8-15) + + const __m512i rhs_mat_014589CD_70 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 6), m3bexpanded); //B70(0-7) B71(0-7) B74(0-7) B75(0-7) B78(0-7) B79(0-7) B7C(0-7) B7D(0-7) + const __m512i rhs_mat_2367ABEF_70 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 6), m3bexpanded); //B72(0-7) B73(0-7) B76(0-7) B77(0-7) B7A(0-7) B7B(0-7) B7E(0-7) B7F(0-7) + + const __m512i rhs_mat_014589CD_71 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 6), m3bexpanded); //B70(8-15) B71(8-15) B74(8-15) B75(8-15) B78(8-15) B79(8-15) B7C(8-15) B7D(8-15) + const __m512i rhs_mat_2367ABEF_71 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 6), m3bexpanded); //B72(8-15) B73(8-15) B76(8-15) B77(8-15) B7A(8-15) B7B(8-15) B7E(8-15) B7F(8-15) + + const __m512i rhs_mat_014589CD_00_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) B08(0-3) B09(0-3) B08(0-3) B09(0-3) B0C(0-3) B0D(0-3) B0C(0-3) B0D(0-3) + const __m512i rhs_mat_2367ABEF_00_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) B0A(0-3) B0B(0-3) B0A(0-3) B0B(0-3) B0E(0-3) B0F(0-3) B0E(0-3) B0F(0-3) + + const __m512i rhs_mat_014589CD_01_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) B08(8-11) B09(8-11) B08(8-11) B09(8-11) B0C(8-11) B0D(8-11) B0C(8-11) B0D(8-11) + const __m512i rhs_mat_2367ABEF_01_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) B0A(8-11) B0B(8-11) B0A(8-11) B0B(8-11) B0E(8-11) B0F(8-11) B0E(8-11) B0F(8-11) + + const __m512i rhs_mat_014589CD_10_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) B18(0-3) B19(0-3) B18(0-3) B19(0-3) B1C(0-3) B1D(0-3) B1C(0-3) B1D(0-3) + const __m512i rhs_mat_2367ABEF_10_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) B1A(0-3) B1B(0-3) B1A(0-3) B1B(0-3) B1E(0-3) B1F(0-3) B1E(0-3) B1F(0-3) + + const __m512i rhs_mat_014589CD_11_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) B18(8-11) B19(8-11) B18(8-11) B19(8-11) B1C(8-11) B1D(8-11) B1C(8-11) B1D(8-11) + const __m512i rhs_mat_2367ABEF_11_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) B1A(8-11) B1B(8-11) B1A(8-11) B1B(8-11) B1E(8-11) B1F(8-11) B1E(8-11) B1F(8-11) + + const __m512i rhs_mat_014589CD_20_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_20, (_MM_PERM_ENUM)136); //B20(0-3) B21(0-3) B20(0-3) B21(0-3) B24(0-3) B25(0-3) B24(0-3) B25(0-3) B28(0-3) B29(0-3) B28(0-3) B29(0-3) B2C(0-3) B2D(0-3) B2C(0-3) B2D(0-3) + const __m512i rhs_mat_2367ABEF_20_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_20, (_MM_PERM_ENUM)136); //B22(0-3) B23(0-3) B22(0-3) B23(0-3) B26(0-3) B27(0-3) B26(0-3) B27(0-3) B2A(0-3) B2B(0-3) B2A(0-3) B2B(0-3) B2E(0-3) B2F(0-3) B2E(0-3) B2F(0-3) + + const __m512i rhs_mat_014589CD_21_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_21, (_MM_PERM_ENUM)136); //B20(8-11) B21(8-11) B20(8-11) B21(8-11) B24(8-11) B25(8-11) B24(8-11) B25(8-11) B28(8-11) B29(8-11) B28(8-11) B29(8-11) B2C(8-11) B2D(8-11) B2C(8-11) B2D(8-11) + const __m512i rhs_mat_2367ABEF_21_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_21, (_MM_PERM_ENUM)136); //B22(8-11) B23(8-11) B22(8-11) B23(8-11) B26(8-11) B27(8-11) B26(8-11) B27(8-11) B2A(8-11) B2B(8-11) B2A(8-11) B2B(8-11) B2E(8-11) B2F(8-11) B2E(8-11) B2F(8-11) + const __m512i rhs_mat_014589CD_30_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_30, (_MM_PERM_ENUM)136); ///B30(0-3) B31(0-3) B30(0-3) B31(0-3) B34(0-3) B35(0-3) B34(0-3) B35(0-3) B38(0-3) B39(0-3) B38(0-3) B39(0-3) B3C(0-3) B3D(0-3) B3C(0-3) B3D(0-3) + const __m512i rhs_mat_2367ABEF_30_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_30, (_MM_PERM_ENUM)136); //B32(0-3) B33(0-3) B32(0-3) B33(0-3) B36(0-3) B37(0-3) B36(0-3) B37(0-3) B3A(0-3) B3B(0-3) B3A(0-3) B3B(0-3) B3E(0-3) B3F(0-3) B3E(0-3) B3F(0-3) + + const __m512i rhs_mat_014589CD_31_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_31, (_MM_PERM_ENUM)136); //B30(8-11) B31(8-11) B30(8-11) B31(8-11) B34(8-11) B35(8-11) B34(8-11) B35(8-11) B38(8-11) B39(8-11) B38(8-11) B39(8-11) B3C(8-11) B3D(8-11) B3C(8-11) B3D(8-11) + const __m512i rhs_mat_2367ABEF_31_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_31, (_MM_PERM_ENUM)136); //B32(8-11) B33(8-11) B32(8-11) B33(8-11) B36(8-11) B37(8-11) B36(8-11) B37(8-11) B3A(8-11) B3B(8-11) B3A(8-11) B3B(8-11) B3E(8-11) B3F(8-11) B3E(8-11) B3F(8-11) + + const __m512i rhs_mat_014589CD_40_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_40, (_MM_PERM_ENUM)136); //B40(0-3) B41(0-3) B40(0-3) B41(0-3) B44(0-3) B45(0-3) B44(0-3) B45(0-3) B48(0-3) B49(0-3) B48(0-3) B49(0-3) B4C(0-3) B4D(0-3) B4C(0-3) B4D(0-3) + const __m512i rhs_mat_2367ABEF_40_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_40, (_MM_PERM_ENUM)136); //B42(0-3) B43(0-3) B42(0-3) B43(0-3) B46(0-3) B47(0-3) B46(0-3) B47(0-3) B4A(0-3) B4B(0-3) B4A(0-3) B4B(0-3) B4E(0-3) B4F(0-3) B4E(0-3) B4F(0-3) + + const __m512i rhs_mat_014589CD_41_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_41, (_MM_PERM_ENUM)136); //B40(8-11) B41(8-11) B40(8-11) B41(8-11) B44(8-11) B45(8-11) B44(8-11) B45(8-11) B48(8-11) B49(8-11) B48(8-11) B49(8-11) B4C(8-11) B4D(8-11) B4C(8-11) B4D(8-11) + const __m512i rhs_mat_2367ABEF_41_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_41, (_MM_PERM_ENUM)136); //B42(8-11) B43(8-11) B42(8-11) B43(8-11) B46(8-11) B47(8-11) B46(8-11) B47(8-11) B4A(8-11) B4B(8-11) B4A(8-11) B4B(8-11) B4E(8-11) B4F(8-11) B4E(8-11) B4F(8-11) + + const __m512i rhs_mat_014589CD_50_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_50, (_MM_PERM_ENUM)136); //B50(0-3) B51(0-3) B50(0-3) B51(0-3) B54(0-3) B55(0-3) B54(0-3) B55(0-3) B58(0-3) B59(0-3) B58(0-3) B59(0-3) B5C(0-3) B5D(0-3) B5C(0-3) B5D(0-3) + const __m512i rhs_mat_2367ABEF_50_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_50, (_MM_PERM_ENUM)136); //B52(0-3) B53(0-3) B52(0-3) B53(0-3) B56(0-3) B57(0-3) B56(0-3) B57(0-3) B5A(0-3) B5B(0-3) B5A(0-3) B5B(0-3) B5E(0-3) B5F(0-3) B5E(0-3) B5F(0-3) + + const __m512i rhs_mat_014589CD_51_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_51, (_MM_PERM_ENUM)136); //B50(8-11) B51(8-11) B50(8-11) B51(8-11) B54(8-11) B55(8-11) B54(8-11) B55(8-11) B58(8-11) B59(8-11) B58(8-11) B59(8-11) B5C(8-11) B5D(8-11) B5C(8-11) B5D(8-11) + const __m512i rhs_mat_2367ABEF_51_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_51, (_MM_PERM_ENUM)136); //B52(8-11) B53(8-11) B52(8-11) B53(8-11) B56(8-11) B57(8-11) B56(8-11) B57(8-11) B5A(8-11) B5B(8-11) B5A(8-11) B5B(8-11) B5E(8-11) B5F(8-11) B5E(8-11) B5F(8-11) + + const __m512i rhs_mat_014589CD_60_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_60, (_MM_PERM_ENUM)136); //B60(0-3) B61(0-3) B60(0-3) B61(0-3) B64(0-3) B65(0-3) B64(0-3) B65(0-3) B68(0-3) B69(0-3) B68(0-3) B69(0-3) B6C(0-3) B6D(0-3) B6C(0-3) B6D(0-3) + const __m512i rhs_mat_2367ABEF_60_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_60, (_MM_PERM_ENUM)136); //B62(0-3) B63(0-3) B62(0-3) B63(0-3) B66(0-3) B67(0-3) B66(0-3) B67(0-3) B6A(0-3) B6B(0-3) B6A(0-3) B6B(0-3) B6E(0-3) B6F(0-3) B6E(0-3) B6F(0-3) + + const __m512i rhs_mat_014589CD_61_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_61, (_MM_PERM_ENUM)136); //B60(8-11) B61(8-11) B60(8-11) B61(8-11) B64(8-11) B65(8-11) B64(8-11) B65(8-11) B68(8-11) B69(8-11) B68(8-11) B69(8-11) B6C(8-11) B6D(8-11) B6C(8-11) B6D(8-11) + const __m512i rhs_mat_2367ABEF_61_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_61, (_MM_PERM_ENUM)136); //B62(8-11) B63(8-11) B62(8-11) B63(8-11) B66(8-11) B67(8-11) B66(8-11) B67(8-11) B6A(8-11) B6B(8-11) B6A(8-11) B6B(8-11) B6E(8-11) B6F(8-11) B6E(8-11) B6F(8-11) + + const __m512i rhs_mat_014589CD_70_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_70, (_MM_PERM_ENUM)136); //B70(0-3) B71(0-3) B70(0-3) B71(0-3) B74(0-3) B75(0-3) B74(0-3) B75(0-3) B78(0-3) B79(0-3) B78(0-3) B79(0-3) B7C(0-3) B7D(0-3) B7C(0-3) B7D(0-3) + const __m512i rhs_mat_2367ABEF_70_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_70, (_MM_PERM_ENUM)136); //B72(0-3) B73(0-3) B72(0-3) B73(0-3) B76(0-3) B77(0-3) B76(0-3) B77(0-3) B7A(0-3) B7B(0-3) B7A(0-3) B7B(0-3) B7E(0-3) B7F(0-3) B7E(0-3) B7F(0-3) + + const __m512i rhs_mat_014589CD_71_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_71, (_MM_PERM_ENUM)136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) B08(8-11) B09(8-11) B08(8-11) B09(8-11) B0C(8-11) B0D(8-11) B0C(8-11) B0D(8-11) + const __m512i rhs_mat_2367ABEF_71_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_71, (_MM_PERM_ENUM)136); //B72(8-11) B73(8-11) B72(8-11) B73(8-11) B76(8-11) B77(8-11) B76(8-11) B77(8-11) B7A(8-11) B7B(8-11) B7A(8-11) B7B(8-11) B7E(8-11) B7F(8-11) B7E(8-11) B7F(8-11) + + const __m512i rhs_mat_014589CD_00_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) B08(4-7) B09(4-7) B08(4-7) B09(4-7) B0C(4-7) B0D(4-7) B0C(4-7) B0D(4-7) + const __m512i rhs_mat_2367ABEF_00_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) B0A(4-7) B0B(4-7) B0A(4-7) B0B(4-7) B0E(4-7) B0F(4-7) B0E(4-7) B0F(4-7) + + const __m512i rhs_mat_014589CD_01_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) B08(12-15) B09(12-15) B08(12-15) B09(12-15) B0C(12-15) B0D(12-15) B0C(12-15) B0D(12-15) + const __m512i rhs_mat_2367ABEF_01_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) B0A(12-15) B0B(12-15) B0A(12-15) B0B(12-15) B0E(12-15) B0F(12-15) B0E(12-15) B0F(12-15) + + const __m512i rhs_mat_014589CD_10_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) B18(4-7) B19(4-7) B18(4-7) B19(4-7) B1C(4-7) B1D(4-7) B1C(4-7) B1D(4-7) + const __m512i rhs_mat_2367ABEF_10_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) B1A(4-7) B1B(4-7) B1A(4-7) B1B(4-7) B1E(4-7) B1F(4-7) B1E(4-7) B1F(4-7) + + const __m512i rhs_mat_014589CD_11_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) B18(12-15) B19(12-15) B18(12-15) B19(12-15) B1C(12-15) B1D(12-15) B1C(12-15) B1D(12-15) + const __m512i rhs_mat_2367ABEF_11_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) B1A(12-15) B1B(12-15) B1A(12-15) B1B(12-15) B1E(12-15) B1F(12-15) B1E(12-15) B1F(12-15) + + const __m512i rhs_mat_014589CD_20_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_20, (_MM_PERM_ENUM)221); //B20(4-7) B21(4-7) B20(4-7) B21(4-7) B24(4-7) B25(4-7) B24(4-7) B25(4-7) B28(4-7) B29(4-7) B28(4-7) B29(4-7) B2C(4-7) B2D(4-7) B2C(4-7) B2D(4-7) + const __m512i rhs_mat_2367ABEF_20_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_20, (_MM_PERM_ENUM)221); //B22(4-7) B23(4-7) B22(4-7) B23(4-7) B26(4-7) B27(4-7) B26(4-7) B27(4-7) B2A(4-7) B2B(4-7) B2A(4-7) B2B(4-7) B2E(4-7) B2F(4-7) B2E(4-7) B2F(4-7) + + const __m512i rhs_mat_014589CD_21_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_21, (_MM_PERM_ENUM)221); //B20(12-15) B21(12-15) B20(12-15) B21(12-15) B24(12-15) B25(12-15) B24(12-15) B25(12-15) B28(12-15) B29(12-15) B28(12-15) B29(12-15) B2C(12-15) B2D(12-15) B2C(12-15) B2D(12-15) + const __m512i rhs_mat_2367ABEF_21_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_21, (_MM_PERM_ENUM)221); //B22(12-15) B23(12-15) B22(12-15) B23(12-15) B26(12-15) B27(12-15) B26(12-15) B27(12-15) B2A(12-15) B2B(12-15) B2A(12-15) B2B(12-15) B2E(12-15) B2F(12-15) B2E(12-15) B2F(12-15) + + const __m512i rhs_mat_014589CD_30_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_30, (_MM_PERM_ENUM)221); //B30(4-7) B31(4-7) B30(4-7) B31(4-7) B34(4-7) B35(4-7) B34(4-7) B35(4-7) B38(4-7) B39(4-7) B38(4-7) B39(4-7) B3C(4-7) B3D(4-7) B3C(4-7) B3D(4-7) + const __m512i rhs_mat_2367ABEF_30_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_30, (_MM_PERM_ENUM)221); //B32(4-7) B33(4-7) B32(4-7) B33(4-7) B36(4-7) B37(4-7) B36(4-7) B37(4-7) B3A(4-7) B3B(4-7) B3A(4-7) B3B(4-7) B3E(4-7) B3F(4-7) B3E(4-7) B3F(4-7) + + const __m512i rhs_mat_014589CD_31_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_31, (_MM_PERM_ENUM)221); //B30(12-15) B31(12-15) B30(12-15) B31(12-15) B34(12-15) B35(12-15) B34(12-15) B35(12-15) B38(12-15) B39(12-15) B38(12-15) B39(12-15) B3C(12-15) B3D(12-15) B3C(12-15) B3D(12-15) + const __m512i rhs_mat_2367ABEF_31_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_31, (_MM_PERM_ENUM)221); //B32(12-15) B33(12-15) B32(12-15) B33(12-15) B36(12-15) B37(12-15) B36(12-15) B37(12-15) B3A(12-15) B3B(12-15) B3A(12-15) B3B(12-15) B3E(12-15) B3F(12-15) B3E(12-15) B3F(12-15) + + const __m512i rhs_mat_014589CD_40_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_40, (_MM_PERM_ENUM)221); //B40(4-7) B41(4-7) B40(4-7) B41(4-7) B44(4-7) B45(4-7) B44(4-7) B45(4-7) B48(4-7) B49(4-7) B48(4-7) B49(4-7) B4C(4-7) B4D(4-7) B4C(4-7) B4D(4-7) + const __m512i rhs_mat_2367ABEF_40_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_40, (_MM_PERM_ENUM)221); //B42(4-7) B43(4-7) B42(4-7) B43(4-7) B46(4-7) B47(4-7) B46(4-7) B47(4-7) B4A(4-7) B4B(4-7) B4A(4-7) B4B(4-7) B4E(4-7) B4F(4-7) B4E(4-7) B4F(4-7) + + const __m512i rhs_mat_014589CD_41_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_41, (_MM_PERM_ENUM)221); //B40(12-15) B41(12-15) B40(12-15) B41(12-15) B44(12-15) B45(12-15) B44(12-15) B45(12-15) B48(12-15) B49(12-15) B48(12-15) B49(12-15) B4C(12-15) B4D(12-15) B4C(12-15) B4D(12-15) + const __m512i rhs_mat_2367ABEF_41_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_41, (_MM_PERM_ENUM)221); //B42(12-15) B43(12-15) B42(12-15) B43(12-15) B46(12-15) B47(12-15) B46(12-15) B47(12-15) B4A(12-15) B4B(12-15) B4A(12-15) B4B(12-15) B4E(12-15) B4F(12-15) B4E(12-15) B4F(12-15) + + const __m512i rhs_mat_014589CD_50_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_50, (_MM_PERM_ENUM)221); //B50(4-7) B51(4-7) B50(4-7) B51(4-7) B54(4-7) B55(4-7) B54(4-7) B55(4-7) B58(4-7) B59(4-7) B58(4-7) B59(4-7) B5C(4-7) B5D(4-7) B5C(4-7) B5D(4-7) + const __m512i rhs_mat_2367ABEF_50_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_50, (_MM_PERM_ENUM)221); //B52(4-7) B53(4-7) B52(4-7) B53(4-7) B56(4-7) B57(4-7) B56(4-7) B57(4-7) B5A(4-7) B5B(4-7) B5A(4-7) B5B(4-7) B5E(4-7) B5F(4-7) B5E(4-7) B5F(4-7) + + const __m512i rhs_mat_014589CD_51_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_51, (_MM_PERM_ENUM)221); //B50(12-15) B51(12-15) B50(12-15) B51(12-15) B54(12-15) B55(12-15) B54(12-15) B55(12-15) B58(12-15) B59(12-15) B58(12-15) B59(12-15) B5C(12-15) B5D(12-15) B5C(12-15) B5D(12-15) + const __m512i rhs_mat_2367ABEF_51_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_51, (_MM_PERM_ENUM)221); //B52(12-15) B53(12-15) B52(12-15) B53(12-15) B56(12-15) B57(12-15) B56(12-15) B57(12-15) B5A(12-15) B5B(12-15) B5A(12-15) B5B(12-15) B5E(12-15) B5F(12-15) B5E(12-15) B5F(12-15) + + const __m512i rhs_mat_014589CD_60_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_60, (_MM_PERM_ENUM)221); //B60(4-7) B61(4-7) B60(4-7) B61(4-7) B64(4-7) B65(4-7) B64(4-7) B65(4-7) B68(4-7) B69(4-7) B68(4-7) B69(4-7) B6C(4-7) B6D(4-7) B6C(4-7) B6D(4-7) + const __m512i rhs_mat_2367ABEF_60_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_60, (_MM_PERM_ENUM)221); //B62(4-7) B63(4-7) B62(4-7) B63(4-7) B66(4-7) B67(4-7) B66(4-7) B67(4-7) B6A(4-7) B6B(4-7) B6A(4-7) B6B(4-7) B6E(4-7) B6F(4-7) B6E(4-7) B6F(4-7) + + const __m512i rhs_mat_014589CD_61_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_61, (_MM_PERM_ENUM)221); //B60(12-15) B61(12-15) B60(12-15) B61(12-15) B64(12-15) B65(12-15) B64(12-15) B65(12-15) B68(12-15) B69(12-15) B68(12-15) B69(12-15) B6C(12-15) B6D(12-15) B6C(12-15) B6D(12-15) + const __m512i rhs_mat_2367ABEF_61_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_61, (_MM_PERM_ENUM)221); //B62(12-15) B63(12-15) B62(12-15) B63(12-15) B66(12-15) B67(12-15) B66(12-15) B67(12-15) B6A(12-15) B6B(12-15) B6A(12-15) B6B(12-15) B6E(12-15) B6F(12-15) B6E(12-15) B6F(12-15) + + const __m512i rhs_mat_014589CD_70_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_70, (_MM_PERM_ENUM)221); //B70(4-7) B71(4-7) B70(4-7) B71(4-7) B74(4-7) B75(4-7) B74(4-7) B75(4-7) B78(4-7) B79(4-7) B78(4-7) B79(4-7) B7C(4-7) B7D(4-7) B7C(4-7) B7D(4-7) + const __m512i rhs_mat_2367ABEF_70_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_70, (_MM_PERM_ENUM)221); //B72(4-7) B73(4-7) B72(4-7) B73(4-7) B76(4-7) B77(4-7) B76(4-7) B77(4-7) B7A(4-7) B7B(4-7) B7A(4-7) B7B(4-7) B7E(4-7) B7F(4-7) B7E(4-7) B7F(4-7) + + const __m512i rhs_mat_014589CD_71_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_71, (_MM_PERM_ENUM)221); //B70(12-15) B71(12-15) B70(12-15) B71(12-15) B74(12-15) B75(12-15) B74(12-15) B75(12-15) B78(12-15) B79(12-15) B78(12-15) B79(12-15) B7C(12-15) B7D(12-15) B7C(12-15) B7D(12-15) + const __m512i rhs_mat_2367ABEF_71_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_71, (_MM_PERM_ENUM)221); //B72(12-15) B73(12-15) B72(12-15) B73(12-15) B76(12-15) B77(12-15) B76(12-15) B77(12-15) B7A(12-15) B7B(12-15) B7A(12-15) B7B(12-15) B7E(12-15) B7F(12-15) B7E(12-15) B7F(12-15) + + //notation:superblock subblock + //s00 m00 s01 m01 s10 m10 s11 m11 s20 m20 s21 m21 s30 m30 s31 m31 s40 m40 s41 m41 s50 m50 s51 m51 s60 m60 s61 m61 s70 m70 s71 m71 + + const __m128i mins_and_scales_01_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + sb * 64)); + const __m128i mins_and_scales_23_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + 16 + sb * 64)); + const __m128i mins_and_scales_45_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + 32 + sb * 64)); + const __m128i mins_and_scales_67_0 = _mm_loadu_si128((const __m128i *)(b_ptr_0[b].scales + 48 + sb * 64)); + + const __m128i mins_and_scales_01_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + sb * 64)); + const __m128i mins_and_scales_23_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + 16 + sb * 64)); + const __m128i mins_and_scales_45_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + 32 + sb * 64)); + const __m128i mins_and_scales_67_1 = _mm_loadu_si128((const __m128i *)(b_ptr_1[b].scales + 48 + sb * 64)); + + // Combine mins and scales for sub-blocks: 0-1, 2-3, 4-5, 6-7 in the sb loop + const __m256i mins_and_scales_01 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_01_0), mins_and_scales_01_1, 1); + const __m256i mins_and_scales_23 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_23_0), mins_and_scales_23_1, 1); + const __m256i mins_and_scales_45 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_45_0), mins_and_scales_45_1, 1); + const __m256i mins_and_scales_67 = _mm256_insertf128_si256(_mm256_castsi128_si256(mins_and_scales_67_0), mins_and_scales_67_1, 1); + + // Extract scales which is lower half from mins_and_scales + const __m256i scales_01 = _mm256_and_si256(mins_and_scales_01, m4b); + const __m256i scales_23 = _mm256_and_si256(mins_and_scales_23, m4b); + const __m256i scales_45 = _mm256_and_si256(mins_and_scales_45, m4b); + const __m256i scales_67 = _mm256_and_si256(mins_and_scales_67, m4b); + + // Extract mins which is upper half from mins_and_scales + const __m512i mins_01 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_01, 4), m4b)); + const __m512i mins_23 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_23, 4), m4b)); + const __m512i mins_45 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_45, 4), m4b)); + const __m512i mins_67 = _mm512_cvtepu8_epi16(_mm256_and_si256(_mm256_srli_epi16(mins_and_scales_67, 4), m4b)); + + const __m512i scales_0 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_01, scalesmask1)); + const __m512i scales_1 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_01, scalesmask2)); + const __m512i scales_2 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_23, scalesmask1)); + const __m512i scales_3 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_23, scalesmask2)); + const __m512i scales_4 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_45, scalesmask1)); + const __m512i scales_5 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_45, scalesmask2)); + const __m512i scales_6 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_67, scalesmask1)); + const __m512i scales_7 = _mm512_cvtepu8_epi16(_mm256_shuffle_epi8(scales_67, scalesmask2)); + + const __m512i scale_014589CD_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_2 = _mm512_shuffle_epi32(scales_2, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_2 = _mm512_shuffle_epi32(scales_2, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_3 = _mm512_shuffle_epi32(scales_3, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_3 = _mm512_shuffle_epi32(scales_3, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_4 = _mm512_shuffle_epi32(scales_4, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_4 = _mm512_shuffle_epi32(scales_4, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_5 = _mm512_shuffle_epi32(scales_5, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_5 = _mm512_shuffle_epi32(scales_5, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_6 = _mm512_shuffle_epi32(scales_6, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_6 = _mm512_shuffle_epi32(scales_6, (_MM_PERM_ENUM)238); + + const __m512i scale_014589CD_7 = _mm512_shuffle_epi32(scales_7, (_MM_PERM_ENUM)68); + const __m512i scale_2367ABEF_7 = _mm512_shuffle_epi32(scales_7, (_MM_PERM_ENUM)238); + + // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 + // Loaded as set of 128 bit vectors and repeated into a 256 bit vector + __m256i lhs_mat_ymm_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 512 * sb))); + __m256i lhs_mat_ymm_01_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 0); + __m256i lhs_mat_ymm_23_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 17); + __m256i lhs_mat_ymm_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 32 + 512 * sb))); + __m256i lhs_mat_ymm_01_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 0); + __m256i lhs_mat_ymm_23_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 17); + __m256i lhs_mat_ymm_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 64 + 512 * sb))); + __m256i lhs_mat_ymm_01_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 0); + __m256i lhs_mat_ymm_23_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 17); + __m256i lhs_mat_ymm_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 96 + 512 * sb))); + __m256i lhs_mat_ymm_01_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 0); + __m256i lhs_mat_ymm_23_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 17); + __m256i lhs_mat_ymm_0123_20 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 128 + 512 * sb))); + __m256i lhs_mat_ymm_01_20 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_20, lhs_mat_ymm_0123_20, 0); + __m256i lhs_mat_ymm_23_20 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_20, lhs_mat_ymm_0123_20, 17); + __m256i lhs_mat_ymm_0123_21 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 160 + 512 * sb))); + __m256i lhs_mat_ymm_01_21 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_21, lhs_mat_ymm_0123_21, 0); + __m256i lhs_mat_ymm_23_21 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_21, lhs_mat_ymm_0123_21, 17); + __m256i lhs_mat_ymm_0123_30 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 192 + 512 * sb))); + __m256i lhs_mat_ymm_01_30 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_30, lhs_mat_ymm_0123_30, 0); + __m256i lhs_mat_ymm_23_30 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_30, lhs_mat_ymm_0123_30, 17); + __m256i lhs_mat_ymm_0123_31 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 224 + 512 * sb))); + __m256i lhs_mat_ymm_01_31 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_31, lhs_mat_ymm_0123_31, 0); + __m256i lhs_mat_ymm_23_31 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_31, lhs_mat_ymm_0123_31, 17); + + __m256i lhs_mat_ymm_0123_40 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 256 + 512 * sb))); + __m256i lhs_mat_ymm_01_40 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_40, lhs_mat_ymm_0123_40, 0); + __m256i lhs_mat_ymm_23_40 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_40, lhs_mat_ymm_0123_40, 17); + __m256i lhs_mat_ymm_0123_41 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 288 + 512 * sb))); + __m256i lhs_mat_ymm_01_41 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_41, lhs_mat_ymm_0123_41, 0); + __m256i lhs_mat_ymm_23_41 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_41, lhs_mat_ymm_0123_41, 17); + __m256i lhs_mat_ymm_0123_50 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 320 + 512 * sb))); + __m256i lhs_mat_ymm_01_50 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_50, lhs_mat_ymm_0123_50, 0); + __m256i lhs_mat_ymm_23_50 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_50, lhs_mat_ymm_0123_50, 17); + __m256i lhs_mat_ymm_0123_51 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 352 + 512 * sb))); + __m256i lhs_mat_ymm_01_51 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_51, lhs_mat_ymm_0123_51, 0); + __m256i lhs_mat_ymm_23_51 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_51, lhs_mat_ymm_0123_51, 17); + __m256i lhs_mat_ymm_0123_60 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 384 + 512 * sb))); + __m256i lhs_mat_ymm_01_60 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_60, lhs_mat_ymm_0123_60, 0); + __m256i lhs_mat_ymm_23_60 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_60, lhs_mat_ymm_0123_60, 17); + __m256i lhs_mat_ymm_0123_61 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 416 + 512 * sb))); + __m256i lhs_mat_ymm_01_61 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_61, lhs_mat_ymm_0123_61, 0); + __m256i lhs_mat_ymm_23_61 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_61, lhs_mat_ymm_0123_61, 17); + __m256i lhs_mat_ymm_0123_70 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 448 + 512 * sb))); + __m256i lhs_mat_ymm_01_70 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_70, lhs_mat_ymm_0123_70, 0); + __m256i lhs_mat_ymm_23_70 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_70, lhs_mat_ymm_0123_70, 17); + __m256i lhs_mat_ymm_0123_71 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 480 + 512 * sb))); + __m256i lhs_mat_ymm_01_71 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_71, lhs_mat_ymm_0123_71, 0); + __m256i lhs_mat_ymm_23_71 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_71, lhs_mat_ymm_0123_71, 17); + + __m512i lhs_mat_01_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_00), lhs_mat_ymm_01_00, 1); + __m512i lhs_mat_23_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_00), lhs_mat_ymm_23_00, 1); + __m512i lhs_mat_01_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_01), lhs_mat_ymm_01_01, 1); + __m512i lhs_mat_23_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_01), lhs_mat_ymm_23_01, 1); + + __m512i lhs_mat_01_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_10), lhs_mat_ymm_01_10, 1); + __m512i lhs_mat_23_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_10), lhs_mat_ymm_23_10, 1); + __m512i lhs_mat_01_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_11), lhs_mat_ymm_01_11, 1); + __m512i lhs_mat_23_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_11), lhs_mat_ymm_23_11, 1); + + __m512i lhs_mat_01_20 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_20), lhs_mat_ymm_01_20, 1); + __m512i lhs_mat_23_20 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_20), lhs_mat_ymm_23_20, 1); + __m512i lhs_mat_01_21 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_21), lhs_mat_ymm_01_21, 1); + __m512i lhs_mat_23_21 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_21), lhs_mat_ymm_23_21, 1); + + __m512i lhs_mat_01_30 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_30), lhs_mat_ymm_01_30, 1); + __m512i lhs_mat_23_30 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_30), lhs_mat_ymm_23_30, 1); + __m512i lhs_mat_01_31 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_31), lhs_mat_ymm_01_31, 1); + __m512i lhs_mat_23_31 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_31), lhs_mat_ymm_23_31, 1); + + __m512i lhs_mat_01_40 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_40), lhs_mat_ymm_01_40, 1); + __m512i lhs_mat_23_40 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_40), lhs_mat_ymm_23_40, 1); + __m512i lhs_mat_01_41 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_41), lhs_mat_ymm_01_41, 1); + __m512i lhs_mat_23_41 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_41), lhs_mat_ymm_23_41, 1); + + __m512i lhs_mat_01_50 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_50), lhs_mat_ymm_01_50, 1); + __m512i lhs_mat_23_50 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_50), lhs_mat_ymm_23_50, 1); + __m512i lhs_mat_01_51 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_51), lhs_mat_ymm_01_51, 1); + __m512i lhs_mat_23_51 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_51), lhs_mat_ymm_23_51, 1); + + __m512i lhs_mat_01_60 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_60), lhs_mat_ymm_01_60, 1); + __m512i lhs_mat_23_60 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_60), lhs_mat_ymm_23_60, 1); + __m512i lhs_mat_01_61 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_61), lhs_mat_ymm_01_61, 1); + __m512i lhs_mat_23_61 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_61), lhs_mat_ymm_23_61, 1); + + __m512i lhs_mat_01_70 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_70), lhs_mat_ymm_01_70, 1); + __m512i lhs_mat_23_70 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_70), lhs_mat_ymm_23_70, 1); + __m512i lhs_mat_01_71 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_71), lhs_mat_ymm_01_71, 1); + __m512i lhs_mat_23_71 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_71), lhs_mat_ymm_23_71, 1); + + // Bsums are loaded for the different Q8_K blocks + __m128i lhs_raw_bsums_01_0123 = _mm_loadu_si128((const __m128i *)((a_ptr[b].bsums + 32 * sb))); + __m128i lhs_raw_bsums_23_0123 = _mm_loadu_si128((const __m128i *)(a_ptr[b].bsums + 8 + 32 * sb)); + __m128i lhs_raw_bsums_01_4567 = _mm_loadu_si128((const __m128i *)((a_ptr[b].bsums + 16 + 32 * sb))); + __m128i lhs_raw_bsums_23_4567 = _mm_loadu_si128((const __m128i *)(a_ptr[b].bsums + 24 + 32 * sb)); + + __m256i lhs_bsums_ymm_01_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_0123), lhs_raw_bsums_01_0123, 1); + __m512i lhs_bsums_01_0123 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_01_0123), lhs_bsums_ymm_01_0123, 1); + __m256i lhs_bsums_ymm_23_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_0123), lhs_raw_bsums_23_0123, 1); + __m512i lhs_bsums_23_0123 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_23_0123), lhs_bsums_ymm_23_0123, 1); + __m256i lhs_bsums_ymm_01_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_4567), lhs_raw_bsums_01_4567, 1); + __m512i lhs_bsums_01_4567 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_01_4567), lhs_bsums_ymm_01_4567, 1); + __m256i lhs_bsums_ymm_23_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_4567), lhs_raw_bsums_23_4567, 1); + __m512i lhs_bsums_23_4567 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_ymm_23_4567), lhs_bsums_ymm_23_4567, 1); + + // Shuffle pattern one - left side input + const __m512i lhs_mat_01_00_sp1 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) + const __m512i lhs_mat_23_00_sp1 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)160); //A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) + + const __m512i lhs_mat_01_01_sp1 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) + const __m512i lhs_mat_23_01_sp1 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)160); //A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) + + const __m512i lhs_mat_01_10_sp1 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) + const __m512i lhs_mat_23_10_sp1 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)160); //A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) + + const __m512i lhs_mat_01_11_sp1 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) + const __m512i lhs_mat_23_11_sp1 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)160); //A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) + + const __m512i lhs_mat_01_20_sp1 = _mm512_shuffle_epi32(lhs_mat_01_20, (_MM_PERM_ENUM)160); //A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) + const __m512i lhs_mat_23_20_sp1 = _mm512_shuffle_epi32(lhs_mat_23_20, (_MM_PERM_ENUM)160); //A22(0-3) A22(0-3) A23(0-3) A23(0-3) A22(0-3) A22(0-3) A23(0-3) A23(0-3) A22(0-3) A22(0-3) A23(0-3) A23(0-3) A22(0-3) A22(0-3) A23(0-3) A23(0-3) + + const __m512i lhs_mat_01_21_sp1 = _mm512_shuffle_epi32(lhs_mat_01_21, (_MM_PERM_ENUM)160); //A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) + const __m512i lhs_mat_23_21_sp1 = _mm512_shuffle_epi32(lhs_mat_23_21, (_MM_PERM_ENUM)160); //A22(8-11) A22(8-11) A23(8-11) A23(8-11) A22(8-11) A22(8-11) A23(8-11) A23(8-11) A22(8-11) A22(8-11) A23(8-11) A23(8-11) A22(8-11) A22(8-11) A23(8-11) A23(8-11) + + const __m512i lhs_mat_01_30_sp1 = _mm512_shuffle_epi32(lhs_mat_01_30, (_MM_PERM_ENUM)160); //A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) + const __m512i lhs_mat_23_30_sp1 = _mm512_shuffle_epi32(lhs_mat_23_30, (_MM_PERM_ENUM)160); //A32(0-3) A32(0-3) A33(0-3) A33(0-3) A32(0-3) A32(0-3) A33(0-3) A33(0-3) A32(0-3) A32(0-3) A33(0-3) A33(0-3) A32(0-3) A32(0-3) A33(0-3) A33(0-3) + + const __m512i lhs_mat_01_31_sp1 = _mm512_shuffle_epi32(lhs_mat_01_31, (_MM_PERM_ENUM)160); //A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) + const __m512i lhs_mat_23_31_sp1 = _mm512_shuffle_epi32(lhs_mat_23_31, (_MM_PERM_ENUM)160); //A32(8-11) A32(8-11) A33(8-11) A33(8-11) A32(8-11) A32(8-11) A33(8-11) A33(8-11) A32(8-11) A32(8-11) A33(8-11) A33(8-11) A32(8-11) A32(8-11) A33(8-11) A33(8-11) + + const __m512i lhs_mat_01_40_sp1 = _mm512_shuffle_epi32(lhs_mat_01_40, (_MM_PERM_ENUM)160); //A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) + const __m512i lhs_mat_23_40_sp1 = _mm512_shuffle_epi32(lhs_mat_23_40, (_MM_PERM_ENUM)160); //A42(0-3) A42(0-3) A43(0-3) A43(0-3) A42(0-3) A42(0-3) A43(0-3) A43(0-3) A42(0-3) A42(0-3) A43(0-3) A43(0-3) A42(0-3) A42(0-3) A43(0-3) A43(0-3) + + const __m512i lhs_mat_01_41_sp1 = _mm512_shuffle_epi32(lhs_mat_01_41, (_MM_PERM_ENUM)160); //A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) + const __m512i lhs_mat_23_41_sp1 = _mm512_shuffle_epi32(lhs_mat_23_41, (_MM_PERM_ENUM)160); //A42(8-11) A42(8-11) A43(8-11) A43(8-11) A42(8-11) A42(8-11) A43(8-11) A43(8-11) A42(8-11) A42(8-11) A43(8-11) A43(8-11) A42(8-11) A42(8-11) A43(8-11) A43(8-11) + + const __m512i lhs_mat_01_50_sp1 = _mm512_shuffle_epi32(lhs_mat_01_50, (_MM_PERM_ENUM)160); //A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) + const __m512i lhs_mat_23_50_sp1 = _mm512_shuffle_epi32(lhs_mat_23_50, (_MM_PERM_ENUM)160); //A52(0-3) A52(0-3) A53(0-3) A53(0-3) A52(0-3) A52(0-3) A53(0-3) A53(0-3) A52(0-3) A52(0-3) A53(0-3) A53(0-3) A52(0-3) A52(0-3) A53(0-3) A53(0-3) + + const __m512i lhs_mat_01_51_sp1 = _mm512_shuffle_epi32(lhs_mat_01_51, (_MM_PERM_ENUM)160); //A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) + const __m512i lhs_mat_23_51_sp1 = _mm512_shuffle_epi32(lhs_mat_23_51, (_MM_PERM_ENUM)160); //A52(8-11) A52(8-11) A53(8-11) A53(8-11) A52(8-11) A52(8-11) A53(8-11) A53(8-11) A52(8-11) A52(8-11) A53(8-11) A53(8-11) A52(8-11) A52(8-11) A53(8-11) A53(8-11) + + const __m512i lhs_mat_01_60_sp1 = _mm512_shuffle_epi32(lhs_mat_01_60, (_MM_PERM_ENUM)160); //A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) + const __m512i lhs_mat_23_60_sp1 = _mm512_shuffle_epi32(lhs_mat_23_60, (_MM_PERM_ENUM)160); //A62(0-3) A62(0-3) A63(0-3) A63(0-3) A62(0-3) A62(0-3) A63(0-3) A63(0-3) A62(0-3) A62(0-3) A63(0-3) A63(0-3) A62(0-3) A62(0-3) A63(0-3) A63(0-3) + + const __m512i lhs_mat_01_61_sp1 = _mm512_shuffle_epi32(lhs_mat_01_61, (_MM_PERM_ENUM)160); //A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) + const __m512i lhs_mat_23_61_sp1 = _mm512_shuffle_epi32(lhs_mat_23_61, (_MM_PERM_ENUM)160); //A62(8-11) A62(8-11) A63(8-11) A63(8-11) A62(8-11) A62(8-11) A63(8-11) A63(8-11) A62(8-11) A62(8-11) A63(8-11) A63(8-11) A62(8-11) A62(8-11) A63(8-11) A63(8-11) + + const __m512i lhs_mat_01_70_sp1 = _mm512_shuffle_epi32(lhs_mat_01_70, (_MM_PERM_ENUM)160); //A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) + const __m512i lhs_mat_23_70_sp1 = _mm512_shuffle_epi32(lhs_mat_23_70, (_MM_PERM_ENUM)160); //A72(0-3) A72(0-3) A73(0-3) A73(0-3) A72(0-3) A72(0-3) A73(0-3) A73(0-3) A72(0-3) A72(0-3) A73(0-3) A73(0-3) A72(0-3) A72(0-3) A73(0-3) A73(0-3) + + const __m512i lhs_mat_01_71_sp1 = _mm512_shuffle_epi32(lhs_mat_01_71, (_MM_PERM_ENUM)160); //A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) + const __m512i lhs_mat_23_71_sp1 = _mm512_shuffle_epi32(lhs_mat_23_71, (_MM_PERM_ENUM)160); //A72(8-11) A72(8-11) A73(8-11) A73(8-11) A72(8-11) A72(8-11) A73(8-11) A73(8-11) A72(8-11) A72(8-11) A73(8-11) A73(8-11) A72(8-11) A72(8-11) A73(8-11) A73(8-11) + + const __m512i lhs_mat_01_00_sp2 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) + const __m512i lhs_mat_23_00_sp2 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)245); //A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) + + const __m512i lhs_mat_01_01_sp2 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) + const __m512i lhs_mat_23_01_sp2 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)245); //A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) + + const __m512i lhs_mat_01_10_sp2 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) + const __m512i lhs_mat_23_10_sp2 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)245); //A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) + + const __m512i lhs_mat_01_11_sp2 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) + const __m512i lhs_mat_23_11_sp2 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)245); //A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) + + const __m512i lhs_mat_01_20_sp2 = _mm512_shuffle_epi32(lhs_mat_01_20, (_MM_PERM_ENUM)245); //A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) + const __m512i lhs_mat_23_20_sp2 = _mm512_shuffle_epi32(lhs_mat_23_20, (_MM_PERM_ENUM)245); //A22(4-7) A22(4-7) A23(4-7) A23(4-7) A22(4-7) A22(4-7) A23(4-7) A23(4-7) A22(4-7) A22(4-7) A23(4-7) A23(4-7) A22(4-7) A22(4-7) A23(4-7) A23(4-7) + + const __m512i lhs_mat_01_21_sp2 = _mm512_shuffle_epi32(lhs_mat_01_21, (_MM_PERM_ENUM)245); //A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) + const __m512i lhs_mat_23_21_sp2 = _mm512_shuffle_epi32(lhs_mat_23_21, (_MM_PERM_ENUM)245); //A22(12-15) A22(12-15) A23(12-15) A23(12-15) A22(12-15) A22(12-15) A23(12-15) A23(12-15) A22(12-15) A22(12-15) A23(12-15) A23(12-15) A22(12-15) A22(12-15) A23(12-15) A23(12-15) + + const __m512i lhs_mat_01_30_sp2 = _mm512_shuffle_epi32(lhs_mat_01_30, (_MM_PERM_ENUM)245); //A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) + const __m512i lhs_mat_23_30_sp2 = _mm512_shuffle_epi32(lhs_mat_23_30, (_MM_PERM_ENUM)245); //A32(4-7) A32(4-7) A33(4-7) A33(4-7) A32(4-7) A32(4-7) A33(4-7) A33(4-7) A32(4-7) A32(4-7) A33(4-7) A33(4-7) A32(4-7) A32(4-7) A33(4-7) A33(4-7) + + const __m512i lhs_mat_01_31_sp2 = _mm512_shuffle_epi32(lhs_mat_01_31, (_MM_PERM_ENUM)245); //A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) + const __m512i lhs_mat_23_31_sp2 = _mm512_shuffle_epi32(lhs_mat_23_31, (_MM_PERM_ENUM)245); //A32(12-15) A32(12-15) A33(12-15) A33(12-15) A32(12-15) A32(12-15) A33(12-15) A33(12-15) A32(12-15) A32(12-15) A33(12-15) A33(12-15) A32(12-15) A32(12-15) A33(12-15) A33(12-15) + + const __m512i lhs_mat_01_40_sp2 = _mm512_shuffle_epi32(lhs_mat_01_40, (_MM_PERM_ENUM)245); //A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) + const __m512i lhs_mat_23_40_sp2 = _mm512_shuffle_epi32(lhs_mat_23_40, (_MM_PERM_ENUM)245); //A42(4-7) A42(4-7) A43(4-7) A43(4-7) A42(4-7) A42(4-7) A43(4-7) A43(4-7) A42(4-7) A42(4-7) A43(4-7) A43(4-7) A42(4-7) A42(4-7) A43(4-7) A43(4-7) + + const __m512i lhs_mat_01_41_sp2 = _mm512_shuffle_epi32(lhs_mat_01_41, (_MM_PERM_ENUM)245); //A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) + const __m512i lhs_mat_23_41_sp2 = _mm512_shuffle_epi32(lhs_mat_23_41, (_MM_PERM_ENUM)245); //A42(12-15) A42(12-15) A43(12-15) A43(12-15) A42(12-15) A42(12-15) A43(12-15) A43(12-15) A42(12-15) A42(12-15) A43(12-15) A43(12-15) A42(12-15) A42(12-15) A43(12-15) A43(12-15) + + const __m512i lhs_mat_01_50_sp2 = _mm512_shuffle_epi32(lhs_mat_01_50, (_MM_PERM_ENUM)245); //A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) + const __m512i lhs_mat_23_50_sp2 = _mm512_shuffle_epi32(lhs_mat_23_50, (_MM_PERM_ENUM)245); //A52(4-7) A52(4-7) A53(4-7) A53(4-7) A52(4-7) A52(4-7) A53(4-7) A53(4-7) A52(4-7) A52(4-7) A53(4-7) A53(4-7) A52(4-7) A52(4-7) A53(4-7) A53(4-7) + + const __m512i lhs_mat_01_51_sp2 = _mm512_shuffle_epi32(lhs_mat_01_51, (_MM_PERM_ENUM)245); //A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) + const __m512i lhs_mat_23_51_sp2 = _mm512_shuffle_epi32(lhs_mat_23_51, (_MM_PERM_ENUM)245); //A52(12-15) A52(12-15) A53(12-15) A53(12-15) A52(12-15) A52(12-15) A53(12-15) A53(12-15) A52(12-15) A52(12-15) A53(12-15) A53(12-15) A52(12-15) A52(12-15) A53(12-15) A53(12-15) + + const __m512i lhs_mat_01_60_sp2 = _mm512_shuffle_epi32(lhs_mat_01_60, (_MM_PERM_ENUM)245); //A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) + const __m512i lhs_mat_23_60_sp2 = _mm512_shuffle_epi32(lhs_mat_23_60, (_MM_PERM_ENUM)245); //A62(4-7) A62(4-7) A63(4-7) A63(4-7) A62(4-7) A62(4-7) A63(4-7) A63(4-7) A62(4-7) A62(4-7) A63(4-7) A63(4-7) A62(4-7) A62(4-7) A63(4-7) A63(4-7) + + const __m512i lhs_mat_01_61_sp2 = _mm512_shuffle_epi32(lhs_mat_01_61, (_MM_PERM_ENUM)245); //A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) + const __m512i lhs_mat_23_61_sp2 = _mm512_shuffle_epi32(lhs_mat_23_61, (_MM_PERM_ENUM)245); //A62(12-15) A62(12-15) A63(12-15) A63(12-15) A62(12-15) A62(12-15) A63(12-15) A63(12-15) A62(12-15) A62(12-15) A63(12-15) A63(12-15) A62(12-15) A62(12-15) A63(12-15) A63(12-15) + + const __m512i lhs_mat_01_70_sp2 = _mm512_shuffle_epi32(lhs_mat_01_70, (_MM_PERM_ENUM)245); //A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) + const __m512i lhs_mat_23_70_sp2 = _mm512_shuffle_epi32(lhs_mat_23_70, (_MM_PERM_ENUM)245); //A72(4-7) A72(4-7) A73(4-7) A73(4-7) A72(4-7) A72(4-7) A73(4-7) A73(4-7) A72(4-7) A72(4-7) A73(4-7) A73(4-7) A72(4-7) A72(4-7) A73(4-7) A73(4-7) + + const __m512i lhs_mat_01_71_sp2 = _mm512_shuffle_epi32(lhs_mat_01_71, (_MM_PERM_ENUM)245); //A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) + const __m512i lhs_mat_23_71_sp2 = _mm512_shuffle_epi32(lhs_mat_23_71, (_MM_PERM_ENUM)245); //A72(12-15) A72(12-15) A73(12-15) A73(12-15) A72(12-15) A72(12-15) A73(12-15) A73(12-15) A72(12-15) A72(12-15) A73(12-15) A73(12-15) A72(12-15) A72(12-15) A73(12-15) A73(12-15) + + // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane + __m512i iacc_mat_00_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_01_00_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_01_01_sp1)); + __m512i iacc_mat_01_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_01_00_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_01_01_sp1)); + + __m512i iacc_mat_10_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_23_00_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_23_01_sp1)); + __m512i iacc_mat_11_0_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_23_00_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_23_01_sp1)); + + __m512i iacc_mat_00_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_01_10_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_01_11_sp1)); + __m512i iacc_mat_01_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_01_10_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_01_11_sp1)); + + __m512i iacc_mat_10_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_23_10_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_23_11_sp1)); + __m512i iacc_mat_11_1_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_23_10_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_23_11_sp1)); + + __m512i iacc_mat_00_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp1, lhs_mat_01_20_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp1, lhs_mat_01_21_sp1)); + __m512i iacc_mat_01_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp1, lhs_mat_01_20_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp1, lhs_mat_01_21_sp1)); + + __m512i iacc_mat_10_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp1, lhs_mat_23_20_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp1, lhs_mat_23_21_sp1)); + __m512i iacc_mat_11_2_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp1, lhs_mat_23_20_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp1, lhs_mat_23_21_sp1)); + + __m512i iacc_mat_00_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp1, lhs_mat_01_30_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp1, lhs_mat_01_31_sp1)); + __m512i iacc_mat_01_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp1, lhs_mat_01_30_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp1, lhs_mat_01_31_sp1)); + + __m512i iacc_mat_10_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp1, lhs_mat_23_30_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp1, lhs_mat_23_31_sp1)); + __m512i iacc_mat_11_3_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp1, lhs_mat_23_30_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp1, lhs_mat_23_31_sp1)); + + __m512i iacc_mat_00_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp1, lhs_mat_01_40_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp1, lhs_mat_01_41_sp1)); + __m512i iacc_mat_01_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp1, lhs_mat_01_40_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp1, lhs_mat_01_41_sp1)); + + __m512i iacc_mat_10_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp1, lhs_mat_23_40_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp1, lhs_mat_23_41_sp1)); + __m512i iacc_mat_11_4_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp1, lhs_mat_23_40_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp1, lhs_mat_23_41_sp1)); + + __m512i iacc_mat_00_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp1, lhs_mat_01_50_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp1, lhs_mat_01_51_sp1)); + __m512i iacc_mat_01_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp1, lhs_mat_01_50_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp1, lhs_mat_01_51_sp1)); + + __m512i iacc_mat_10_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp1, lhs_mat_23_50_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp1, lhs_mat_23_51_sp1)); + __m512i iacc_mat_11_5_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp1, lhs_mat_23_50_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp1, lhs_mat_23_51_sp1)); + + __m512i iacc_mat_00_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp1, lhs_mat_01_60_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp1, lhs_mat_01_61_sp1)); + __m512i iacc_mat_01_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp1, lhs_mat_01_60_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp1, lhs_mat_01_61_sp1)); + + __m512i iacc_mat_10_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp1, lhs_mat_23_60_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp1, lhs_mat_23_61_sp1)); + __m512i iacc_mat_11_6_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp1, lhs_mat_23_60_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp1, lhs_mat_23_61_sp1)); + + __m512i iacc_mat_00_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp1, lhs_mat_01_70_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp1, lhs_mat_01_71_sp1)); + __m512i iacc_mat_01_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp1, lhs_mat_01_70_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp1, lhs_mat_01_71_sp1)); + + __m512i iacc_mat_10_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp1, lhs_mat_23_70_sp1),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp1, lhs_mat_23_71_sp1)); + __m512i iacc_mat_11_7_sp1 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp1, lhs_mat_23_70_sp1),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp1, lhs_mat_23_71_sp1)); + + + __m512i iacc_mat_00_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_01_00_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_01_01_sp2)); + __m512i iacc_mat_01_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_01_00_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_01_01_sp2)); + + __m512i iacc_mat_10_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_23_00_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_23_01_sp2)); + __m512i iacc_mat_11_0_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_23_00_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_23_01_sp2)); + + __m512i iacc_mat_00_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_01_10_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_01_11_sp2)); + __m512i iacc_mat_01_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_01_10_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_01_11_sp2)); + + __m512i iacc_mat_10_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_23_10_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_23_11_sp2)); + __m512i iacc_mat_11_1_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_23_10_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_23_11_sp2)); + + __m512i iacc_mat_00_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp2, lhs_mat_01_20_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp2, lhs_mat_01_21_sp2)); + __m512i iacc_mat_01_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp2, lhs_mat_01_20_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp2, lhs_mat_01_21_sp2)); + + __m512i iacc_mat_10_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_20_sp2, lhs_mat_23_20_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_21_sp2, lhs_mat_23_21_sp2)); + __m512i iacc_mat_11_2_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_20_sp2, lhs_mat_23_20_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_21_sp2, lhs_mat_23_21_sp2)); + + __m512i iacc_mat_00_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp2, lhs_mat_01_30_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp2, lhs_mat_01_31_sp2)); + __m512i iacc_mat_01_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp2, lhs_mat_01_30_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp2, lhs_mat_01_31_sp2)); + + __m512i iacc_mat_10_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_30_sp2, lhs_mat_23_30_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_31_sp2, lhs_mat_23_31_sp2)); + __m512i iacc_mat_11_3_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_30_sp2, lhs_mat_23_30_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_31_sp2, lhs_mat_23_31_sp2)); + + __m512i iacc_mat_00_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp2, lhs_mat_01_40_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp2, lhs_mat_01_41_sp2)); + __m512i iacc_mat_01_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp2, lhs_mat_01_40_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp2, lhs_mat_01_41_sp2)); + + __m512i iacc_mat_10_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_40_sp2, lhs_mat_23_40_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_41_sp2, lhs_mat_23_41_sp2)); + __m512i iacc_mat_11_4_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_40_sp2, lhs_mat_23_40_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_41_sp2, lhs_mat_23_41_sp2)); + + __m512i iacc_mat_00_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp2, lhs_mat_01_50_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp2, lhs_mat_01_51_sp2)); + __m512i iacc_mat_01_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp2, lhs_mat_01_50_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp2, lhs_mat_01_51_sp2)); + + __m512i iacc_mat_10_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_50_sp2, lhs_mat_23_50_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_51_sp2, lhs_mat_23_51_sp2)); + __m512i iacc_mat_11_5_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_50_sp2, lhs_mat_23_50_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_51_sp2, lhs_mat_23_51_sp2)); + + __m512i iacc_mat_00_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp2, lhs_mat_01_60_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp2, lhs_mat_01_61_sp2)); + __m512i iacc_mat_01_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp2, lhs_mat_01_60_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp2, lhs_mat_01_61_sp2)); + + __m512i iacc_mat_10_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_60_sp2, lhs_mat_23_60_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_61_sp2, lhs_mat_23_61_sp2)); + __m512i iacc_mat_11_6_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_60_sp2, lhs_mat_23_60_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_61_sp2, lhs_mat_23_61_sp2)); + + __m512i iacc_mat_00_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp2, lhs_mat_01_70_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp2, lhs_mat_01_71_sp2)); + __m512i iacc_mat_01_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp2, lhs_mat_01_70_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp2, lhs_mat_01_71_sp2)); + + __m512i iacc_mat_10_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_70_sp2, lhs_mat_23_70_sp2),_mm512_maddubs_epi16(rhs_mat_014589CD_71_sp2, lhs_mat_23_71_sp2)); + __m512i iacc_mat_11_7_sp2 = _mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_70_sp2, lhs_mat_23_70_sp2),_mm512_maddubs_epi16(rhs_mat_2367ABEF_71_sp2, lhs_mat_23_71_sp2)); + + // Combine results from both shuffle patterns for each output block + __m512i iacc_mat_00_0 = _mm512_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); + __m512i iacc_mat_01_0 = _mm512_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); + __m512i iacc_mat_10_0 = _mm512_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); + __m512i iacc_mat_11_0 = _mm512_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); + + __m512i iacc_mat_00_1 = _mm512_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); + __m512i iacc_mat_01_1 = _mm512_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); + __m512i iacc_mat_10_1 = _mm512_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); + __m512i iacc_mat_11_1 = _mm512_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); + + __m512i iacc_mat_00_2 = _mm512_add_epi16(iacc_mat_00_2_sp1, iacc_mat_00_2_sp2); + __m512i iacc_mat_01_2 = _mm512_add_epi16(iacc_mat_01_2_sp1, iacc_mat_01_2_sp2); + __m512i iacc_mat_10_2 = _mm512_add_epi16(iacc_mat_10_2_sp1, iacc_mat_10_2_sp2); + __m512i iacc_mat_11_2 = _mm512_add_epi16(iacc_mat_11_2_sp1, iacc_mat_11_2_sp2); + + __m512i iacc_mat_00_3 = _mm512_add_epi16(iacc_mat_00_3_sp1, iacc_mat_00_3_sp2); + __m512i iacc_mat_01_3 = _mm512_add_epi16(iacc_mat_01_3_sp1, iacc_mat_01_3_sp2); + __m512i iacc_mat_10_3 = _mm512_add_epi16(iacc_mat_10_3_sp1, iacc_mat_10_3_sp2); + __m512i iacc_mat_11_3 = _mm512_add_epi16(iacc_mat_11_3_sp1, iacc_mat_11_3_sp2); + + __m512i iacc_mat_00_4 = _mm512_add_epi16(iacc_mat_00_4_sp1, iacc_mat_00_4_sp2); + __m512i iacc_mat_01_4 = _mm512_add_epi16(iacc_mat_01_4_sp1, iacc_mat_01_4_sp2); + __m512i iacc_mat_10_4 = _mm512_add_epi16(iacc_mat_10_4_sp1, iacc_mat_10_4_sp2); + __m512i iacc_mat_11_4 = _mm512_add_epi16(iacc_mat_11_4_sp1, iacc_mat_11_4_sp2); + + __m512i iacc_mat_00_5 = _mm512_add_epi16(iacc_mat_00_5_sp1, iacc_mat_00_5_sp2); + __m512i iacc_mat_01_5 = _mm512_add_epi16(iacc_mat_01_5_sp1, iacc_mat_01_5_sp2); + __m512i iacc_mat_10_5 = _mm512_add_epi16(iacc_mat_10_5_sp1, iacc_mat_10_5_sp2); + __m512i iacc_mat_11_5 = _mm512_add_epi16(iacc_mat_11_5_sp1, iacc_mat_11_5_sp2); + + __m512i iacc_mat_00_6 = _mm512_add_epi16(iacc_mat_00_6_sp1, iacc_mat_00_6_sp2); + __m512i iacc_mat_01_6 = _mm512_add_epi16(iacc_mat_01_6_sp1, iacc_mat_01_6_sp2); + __m512i iacc_mat_10_6 = _mm512_add_epi16(iacc_mat_10_6_sp1, iacc_mat_10_6_sp2); + __m512i iacc_mat_11_6 = _mm512_add_epi16(iacc_mat_11_6_sp1, iacc_mat_11_6_sp2); + + __m512i iacc_mat_00_7 = _mm512_add_epi16(iacc_mat_00_7_sp1, iacc_mat_00_7_sp2); + __m512i iacc_mat_01_7 = _mm512_add_epi16(iacc_mat_01_7_sp1, iacc_mat_01_7_sp2); + __m512i iacc_mat_10_7 = _mm512_add_epi16(iacc_mat_10_7_sp1, iacc_mat_10_7_sp2); + __m512i iacc_mat_11_7 = _mm512_add_epi16(iacc_mat_11_7_sp1, iacc_mat_11_7_sp2); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + iacc_mat_00_0 = _mm512_madd_epi16(iacc_mat_00_0, scale_014589CD_0); + iacc_mat_01_0 = _mm512_madd_epi16(iacc_mat_01_0, scale_2367ABEF_0); + iacc_mat_10_0 = _mm512_madd_epi16(iacc_mat_10_0, scale_014589CD_0); + iacc_mat_11_0 = _mm512_madd_epi16(iacc_mat_11_0, scale_2367ABEF_0); + + iacc_mat_00_1 = _mm512_madd_epi16(iacc_mat_00_1, scale_014589CD_1); + iacc_mat_01_1 = _mm512_madd_epi16(iacc_mat_01_1, scale_2367ABEF_1); + iacc_mat_10_1 = _mm512_madd_epi16(iacc_mat_10_1, scale_014589CD_1); + iacc_mat_11_1 = _mm512_madd_epi16(iacc_mat_11_1, scale_2367ABEF_1); + + iacc_mat_00_2 = _mm512_madd_epi16(iacc_mat_00_2, scale_014589CD_2); + iacc_mat_01_2 = _mm512_madd_epi16(iacc_mat_01_2, scale_2367ABEF_2); + iacc_mat_10_2 = _mm512_madd_epi16(iacc_mat_10_2, scale_014589CD_2); + iacc_mat_11_2 = _mm512_madd_epi16(iacc_mat_11_2, scale_2367ABEF_2); + + iacc_mat_00_3 = _mm512_madd_epi16(iacc_mat_00_3, scale_014589CD_3); + iacc_mat_01_3 = _mm512_madd_epi16(iacc_mat_01_3, scale_2367ABEF_3); + iacc_mat_10_3 = _mm512_madd_epi16(iacc_mat_10_3, scale_014589CD_3); + iacc_mat_11_3 = _mm512_madd_epi16(iacc_mat_11_3, scale_2367ABEF_3); + + iacc_mat_00_4 = _mm512_madd_epi16(iacc_mat_00_4, scale_014589CD_4); + iacc_mat_01_4 = _mm512_madd_epi16(iacc_mat_01_4, scale_2367ABEF_4); + iacc_mat_10_4 = _mm512_madd_epi16(iacc_mat_10_4, scale_014589CD_4); + iacc_mat_11_4 = _mm512_madd_epi16(iacc_mat_11_4, scale_2367ABEF_4); + + iacc_mat_00_5 = _mm512_madd_epi16(iacc_mat_00_5, scale_014589CD_5); + iacc_mat_01_5 = _mm512_madd_epi16(iacc_mat_01_5, scale_2367ABEF_5); + iacc_mat_10_5 = _mm512_madd_epi16(iacc_mat_10_5, scale_014589CD_5); + iacc_mat_11_5 = _mm512_madd_epi16(iacc_mat_11_5, scale_2367ABEF_5); + + iacc_mat_00_6 = _mm512_madd_epi16(iacc_mat_00_6, scale_014589CD_6); + iacc_mat_01_6 = _mm512_madd_epi16(iacc_mat_01_6, scale_2367ABEF_6); + iacc_mat_10_6 = _mm512_madd_epi16(iacc_mat_10_6, scale_014589CD_6); + iacc_mat_11_6 = _mm512_madd_epi16(iacc_mat_11_6, scale_2367ABEF_6); + + iacc_mat_00_7 = _mm512_madd_epi16(iacc_mat_00_7, scale_014589CD_7); + iacc_mat_01_7 = _mm512_madd_epi16(iacc_mat_01_7, scale_2367ABEF_7); + iacc_mat_10_7 = _mm512_madd_epi16(iacc_mat_10_7, scale_014589CD_7); + iacc_mat_11_7 = _mm512_madd_epi16(iacc_mat_11_7, scale_2367ABEF_7); + + __m512i iacc_mat_00 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_00_0, iacc_mat_00_1), _mm512_add_epi32(iacc_mat_00_2, iacc_mat_00_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_00_4, iacc_mat_00_5), _mm512_add_epi32(iacc_mat_00_6, iacc_mat_00_7))); + __m512i iacc_mat_01 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_01_0, iacc_mat_01_1), _mm512_add_epi32(iacc_mat_01_2, iacc_mat_01_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_01_4, iacc_mat_01_5), _mm512_add_epi32(iacc_mat_01_6, iacc_mat_01_7))); + __m512i iacc_mat_10 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_10_0, iacc_mat_10_1), _mm512_add_epi32(iacc_mat_10_2, iacc_mat_10_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_10_4, iacc_mat_10_5), _mm512_add_epi32(iacc_mat_10_6, iacc_mat_10_7))); + __m512i iacc_mat_11 = _mm512_add_epi32(_mm512_add_epi32(_mm512_add_epi32(iacc_mat_11_0, iacc_mat_11_1), _mm512_add_epi32(iacc_mat_11_2, iacc_mat_11_3)), _mm512_add_epi32(_mm512_add_epi32(iacc_mat_11_4, iacc_mat_11_5), _mm512_add_epi32(iacc_mat_11_6, iacc_mat_11_7))); + + // Straighten out to make 4 row vectors + __m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, (_MM_PERM_ENUM)78)); + __m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, (_MM_PERM_ENUM)78), iacc_mat_01); + __m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, (_MM_PERM_ENUM)78)); + __m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, (_MM_PERM_ENUM)78), iacc_mat_11); + + // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes + const __m128 row_scale_f32_sse = _mm_load_ps(a_ptr[b].d); + const __m256 row_scale_f32_ymm = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); + const __m512 row_scale_f32 = _mm512_insertf32x8(_mm512_castps256_ps512(row_scale_f32_ymm), row_scale_f32_ymm, 1); + + // Multiply with appropiate scales and accumulate (for both d and dmin) below + acc_rows[0] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); + acc_rows[1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); + acc_rows[2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); + acc_rows[3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); + + // Take two bsums from two Q8_Ks at a time and multiply with corresponding mins values from each Q2_K + __m512i iacc_row_min_0_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)0), mins_01); + __m512i iacc_row_min_1_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)170), mins_01); + __m512i iacc_row_min_2_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)0), mins_01); + __m512i iacc_row_min_3_01 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)170), mins_01); + + __m512i iacc_row_min_0_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)85), mins_23); + __m512i iacc_row_min_1_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_0123, (_MM_PERM_ENUM)255), mins_23); + __m512i iacc_row_min_2_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)85), mins_23); + __m512i iacc_row_min_3_23 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_0123, (_MM_PERM_ENUM)255), mins_23); + + __m512i iacc_row_min_0_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)0), mins_45); + __m512i iacc_row_min_1_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)170), mins_45); + __m512i iacc_row_min_2_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)0), mins_45); + __m512i iacc_row_min_3_45 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)170), mins_45); + + __m512i iacc_row_min_0_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)85), mins_67); + __m512i iacc_row_min_1_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_01_4567, (_MM_PERM_ENUM)255), mins_67); + __m512i iacc_row_min_2_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)85), mins_67); + __m512i iacc_row_min_3_67 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_23_4567, (_MM_PERM_ENUM)255), mins_67); + + __m512i iacc_row_min_0 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_0_01, iacc_row_min_0_23), _mm512_add_epi32(iacc_row_min_0_45,iacc_row_min_0_67)); + __m512i iacc_row_min_1 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_1_01, iacc_row_min_1_23), _mm512_add_epi32(iacc_row_min_1_45,iacc_row_min_1_67)); + __m512i iacc_row_min_2 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_2_01, iacc_row_min_2_23), _mm512_add_epi32(iacc_row_min_2_45,iacc_row_min_2_67)); + __m512i iacc_row_min_3 = _mm512_add_epi32(_mm512_add_epi32(iacc_row_min_3_01, iacc_row_min_3_23), _mm512_add_epi32(iacc_row_min_3_45,iacc_row_min_3_67)); + + acc_min_rows[0] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_0), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[0]); + acc_min_rows[1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_1), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[1]); + acc_min_rows[2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_2), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[2]); + acc_min_rows[3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_3), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[3]); + } + } + // Store accumlated values + for (int i = 0; i < 4; i++) { + _mm512_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm512_sub_ps(acc_rows[i], acc_min_rows[i])); + } + } + } + + if (anc != nc) { + xstart = anc/8; + y = 0; + } + +#endif //AVX512F + + // Take group of four block_q8_Kx4 structures at each pass of the loop and perform dot product operation + for (; y < anr / 4; y += 4) { + + const block_q8_Kx4 * a_ptrs[4]; + + a_ptrs[0] = a_ptr_start + (y * nb); + for (int i = 0; i < 3; ++i) { + a_ptrs[i + 1] = a_ptrs[i] + nb; + } + + // Take group of eight block_q2_kx8 structures at each pass of the loop and perform dot product operation + for (int64_t x = xstart; x < nc / 8; x++) { + + const block_q2_Kx8 * b_ptr = b_ptr_start + (x * b_nb); + + // Master FP accumulators + __m256 acc_rows[16]; + for (int i = 0; i < 16; i++) { + acc_rows[i] = _mm256_setzero_ps(); + } + + __m256 acc_min_rows[16]; + for (int i = 0; i < 16; i++) { + acc_min_rows[i] = _mm256_setzero_ps(); + } + + // For super block + for (int64_t b = 0; b < nb; b++) { + // Delta values - Load the eight scale values of block_q2_kx8 + const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); + + // dmin values - Load the eight dmin values of block_q2_kx8 + const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); + + // Loop to iterate over the sixteen sub blocks of a super block - eight sub blocks are processed per iteration + for (int sb = 0; sb < QK_K / 128; sb++) { + + // Load the eight block_q2_K for eight sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 + const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + sb * 256)); + const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 224 + sb * 256)); + + // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values + //superblock sub block which part of sub block + const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); + + const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); + + const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); + + const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); + + // 2-bit -> 8-bit + // First sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_00 = _mm256_and_si256(rhs_raw_mat_0145_0, m3b); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) + const __m256i rhs_mat_2367_00 = _mm256_and_si256(rhs_raw_mat_2367_0, m3b); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) + + const __m256i rhs_mat_0145_01 = _mm256_and_si256(rhs_raw_mat_0145_1, m3b); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) + const __m256i rhs_mat_2367_01 = _mm256_and_si256(rhs_raw_mat_2367_1, m3b); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) + + // Second sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_10 = _mm256_and_si256(rhs_raw_mat_0145_2, m3b); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) + const __m256i rhs_mat_2367_10 = _mm256_and_si256(rhs_raw_mat_2367_2, m3b); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) + + const __m256i rhs_mat_0145_11 = _mm256_and_si256(rhs_raw_mat_0145_3, m3b); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) + const __m256i rhs_mat_2367_11 = _mm256_and_si256(rhs_raw_mat_2367_3, m3b); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) + + // Third sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_20 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 2), m3b); //B20(0-7) B21(0-7) B24(0-7) B25(0-7) + const __m256i rhs_mat_2367_20 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 2), m3b); //B22(0-7) B23(0-7) B26(0-7) B27(0-7) + + const __m256i rhs_mat_0145_21 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 2), m3b); //B20(8-15) B21(8-15) B24(8-15) B25(8-15) + const __m256i rhs_mat_2367_21 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 2), m3b); //B22(8-15) B23(8-15) B26(8-15) B27(8-15) + + // Fourth sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_30 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 2), m3b); //B30(0-7) B31(0-7) B34(0-7) B35(0-7) + const __m256i rhs_mat_2367_30 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 2), m3b); //B32(0-7) B33(0-7) B36(0-7) B37(0-7) + + const __m256i rhs_mat_0145_31 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 2), m3b); //B30(8-15) B31(8-15) B34(8-15) B35(8-15) + const __m256i rhs_mat_2367_31 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 2), m3b); //B32(8-15) B33(8-15) B36(8-15) B37(8-15) + + // Fifth sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_40 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m3b); //B40(0-7) B41(0-7) B44(0-7) B45(0-7) + const __m256i rhs_mat_2367_40 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m3b); //B42(0-7) B43(0-7) B46(0-7) B47(0-7) + + const __m256i rhs_mat_0145_41 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m3b); //B40(8-15) B41(8-15) B44(8-15) B45(8-15) + const __m256i rhs_mat_2367_41 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m3b); //B42(8-15) B43(8-15) B46(8-15) B47(8-15) + + // Sixth sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_50 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 4), m3b); //B50(0-7) B51(0-7) B54(0-7) B55(0-7) + const __m256i rhs_mat_2367_50 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 4), m3b); //B52(0-7) B53(0-7) B56(0-7) B57(0-7) + + const __m256i rhs_mat_0145_51 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 4), m3b); //B50(8-15) B51(8-15) B54(8-15) B55(8-15) + const __m256i rhs_mat_2367_51 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 4), m3b); //B52(8-15) B53(8-15) B56(8-15) B57(8-15) + + // Seventh sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_60 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 6), m3b); //B60(0-7) B61(0-7) B64(0-7) B65(0-7) + const __m256i rhs_mat_2367_60 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 6), m3b); //B62(0-7) B63(0-7) B66(0-7) B67(0-7) + + const __m256i rhs_mat_0145_61 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 6), m3b); //B60(8-15) B61(8-15) B64(8-15) B65(8-15) + const __m256i rhs_mat_2367_61 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 6), m3b); //B62(8-15) B63(8-15) B66(8-15) B67(8-15) + + // Eighth sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_70 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 6), m3b); //B70(0-7) B71(0-7) B74(0-7) B75(0-7) + const __m256i rhs_mat_2367_70 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 6), m3b); //B72(0-7) B73(0-7) B76(0-7) B77(0-7) + + const __m256i rhs_mat_0145_71 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 6), m3b); //B70(8-15) B71(8-15) B74(8-15) B75(8-15) + const __m256i rhs_mat_2367_71 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 6), m3b); //B72(8-15) B73(8-15) B76(8-15) B77(8-15) + + // Shuffle pattern one - right side input + const __m256i rhs_mat_0145_00_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_00, 136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) + const __m256i rhs_mat_2367_00_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_00, 136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) + + const __m256i rhs_mat_0145_01_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_01, 136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) + const __m256i rhs_mat_2367_01_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_01, 136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) + + const __m256i rhs_mat_0145_10_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_10, 136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) + const __m256i rhs_mat_2367_10_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_10, 136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) + + const __m256i rhs_mat_0145_11_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_11, 136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) + const __m256i rhs_mat_2367_11_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_11, 136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) + + const __m256i rhs_mat_0145_20_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_20, 136); //B20(0-3) B21(0-3) B20(0-3) B21(0-3) B24(0-3) B25(0-3) B24(0-3) B25(0-3) + const __m256i rhs_mat_2367_20_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_20, 136); //B22(0-3) B23(0-3) B22(0-3) B23(0-3) B26(0-3) B27(0-3) B26(0-3) B27(0-3) + + const __m256i rhs_mat_0145_21_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_21, 136); //B20(8-11) B21(8-11) B20(8-11) B21(8-11) B24(8-11) B25(8-11) B24(8-11) B25(8-11) + const __m256i rhs_mat_2367_21_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_21, 136); //B22(8-11) B23(8-11) B22(8-11) B23(8-11) B26(8-11) B27(8-11) B26(8-11) B27(8-11) + + const __m256i rhs_mat_0145_30_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_30, 136); //B30(0-3) B31(0-3) B30(0-3) B31(0-3) B34(0-3) B35(0-3) B34(0-3) B35(0-3) + const __m256i rhs_mat_2367_30_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_30, 136); //B32(0-3) B33(0-3) B32(0-3) B33(0-3) B36(0-3) B37(0-3) B36(0-3) B37(0-3) + + const __m256i rhs_mat_0145_31_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_31, 136); //B30(8-11) B31(8-11) B30(8-11) B31(8-11) B34(8-11) B35(8-11) B34(8-11) B35(8-11 + const __m256i rhs_mat_2367_31_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_31, 136); //B32(8-11) B33(8-11) B32(8-11) B33(8-11) B36(8-11) B37(8-11) B36(8-11) B37(8-11) + + const __m256i rhs_mat_0145_40_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_40, 136); //B40(0-3) B41(0-3) B40(0-3) B41(0-3) B44(0-3) B45(0-3) B44(0-3) B45(0-3) + const __m256i rhs_mat_2367_40_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_40, 136); //B42(0-3) B43(0-3) B42(0-3) B43(0-3) B46(0-3) B47(0-3) B46(0-3) B47(0-3) + + const __m256i rhs_mat_0145_41_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_41, 136); //B40(8-11) B41(8-11) B40(8-11) B41(8-11) B44(8-11) B45(8-11) B44(8-11) B45(8-11) + const __m256i rhs_mat_2367_41_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_41, 136); //B42(8-11) B43(8-11) B42(8-11) B43(8-11) B46(8-11) B47(8-11) B46(8-11) B47(8-11) + + const __m256i rhs_mat_0145_50_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_50, 136); //B50(0-3) B51(0-3) B50(0-3) B51(0-3) B54(0-3) B55(0-3) B54(0-3) B55(0-3) + const __m256i rhs_mat_2367_50_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_50, 136); //B52(0-3) B53(0-3) B52(0-3) B53(0-3) B56(0-3) B57(0-3) B56(0-3) B57(0-3) + + const __m256i rhs_mat_0145_51_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_51, 136); //B50(8-11) B51(8-11) B50(8-11) B51(8-11) B54(8-11) B55(8-11) B54(8-11) B55(8-11) + const __m256i rhs_mat_2367_51_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_51, 136); //B52(8-11) B53(8-11) B52(8-11) B53(8-11) B56(8-11) B57(8-11) B56(8-11) B57(8-11) + + const __m256i rhs_mat_0145_60_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_60, 136); //B60(0-3) B61(0-3) B60(0-3) B61(0-3) B64(0-3) B65(0-3) B64(0-3) B65(0-3) + const __m256i rhs_mat_2367_60_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_60, 136); //B62(0-3) B63(0-3) B62(0-3) B63(0-3) B66(0-3) B67(0-3) B66(0-3) B67(0-3) + + const __m256i rhs_mat_0145_61_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_61, 136); //B60(8-11) B61(8-11) B60(8-11) B61(8-11) B64(8-11) B65(8-11) B64(8-11) B65(8-11) + const __m256i rhs_mat_2367_61_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_61, 136); //B62(8-11) B63(8-11) B62(8-11) B63(8-11) B66(8-11) B67(8-11) B66(8-11) B67(8-11) + + const __m256i rhs_mat_0145_70_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_70, 136); //B70(0-3) B71(0-3) B70(0-3) B71(0-3) B74(0-3) B75(0-3) B74(0-3) B75(0-3) + const __m256i rhs_mat_2367_70_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_70, 136); //B72(0-3) B73(0-3) B72(0-3) B73(0-3) B76(0-3) B77(0-3) B76(0-3) B77(0-3) + + const __m256i rhs_mat_0145_71_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_71, 136); //B70(8-11) B71(8-11) B70(8-11) B71(8-11) B74(8-11) B75(8-11) B74(8-11) B75(8-11) + const __m256i rhs_mat_2367_71_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_71, 136); //B72(8-11) B73(8-11) B72(8-11) B73(8-11) B76(8-11) B77(8-11) B76(8-11) B77(8-11) + + + // Shuffle pattern two - right side input + const __m256i rhs_mat_0145_00_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_00, 221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) + const __m256i rhs_mat_2367_00_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_00, 221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) + + const __m256i rhs_mat_0145_01_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_01, 221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) + const __m256i rhs_mat_2367_01_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_01, 221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) + + const __m256i rhs_mat_0145_10_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_10, 221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) + const __m256i rhs_mat_2367_10_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_10, 221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) + + const __m256i rhs_mat_0145_11_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_11, 221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) + const __m256i rhs_mat_2367_11_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_11, 221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) + + const __m256i rhs_mat_0145_20_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_20, 221); //B20(4-7) B21(4-7) B20(4-7) B21(4-7) B24(4-7) B25(4-7) B24(4-7) B25(4-7) + const __m256i rhs_mat_2367_20_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_20, 221); //B22(4-7) B23(4-7) B22(4-7) B23(4-7) B26(4-7) B27(4-7) B26(4-7) B27(4-7) + + const __m256i rhs_mat_0145_21_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_21, 221); //B20(12-15) B21(12-15) B20(12-15) B21(12-15) B24(12-15) B25(12-15) B24(12-15) B25(12-15) + const __m256i rhs_mat_2367_21_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_21, 221); //B22(12-15) B23(12-15) B22(12-15) B23(12-15) B26(12-15) B27(12-15) B26(12-15) B27(12-15) + + const __m256i rhs_mat_0145_30_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_30, 221); //B30(4-7) B31(4-7) B30(4-7) B31(4-7) B34(4-7) B35(4-7) B34(4-7) B35(4-7) + const __m256i rhs_mat_2367_30_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_30, 221); //B32(4-7) B33(4-7) B32(4-7) B33(4-7) B36(4-7) B37(4-7) B36(4-7) B37(4-7) + + const __m256i rhs_mat_0145_31_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_31, 221); //B30(12-15) B31(12-15) B30(12-15) B31(12-15) B34(12-15) B35(12-15) B34(12-15) B35(12-15) + const __m256i rhs_mat_2367_31_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_31, 221); //B32(12-15) B33(12-15) B32(12-15) B33(12-15) B36(12-15) B37(12-15) B36(12-15) B37(12-15) + + const __m256i rhs_mat_0145_40_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_40, 221); //B40(4-7) B41(4-7) B40(4-7) B41(4-7) B44(4-7) B45(4-7) B44(4-7) B45(4-7) + const __m256i rhs_mat_2367_40_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_40, 221); //B42(4-7) B43(4-7) B42(4-7) B43(4-7) B46(4-7) B47(4-7) B46(4-7) B47(4-7) + + const __m256i rhs_mat_0145_41_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_41, 221); //B40(12-15) B41(12-15) B40(12-15) B41(12-15) B44(12-15) B45(12-15) B44(12-15) B45(12-15) + const __m256i rhs_mat_2367_41_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_41, 221); //B42(12-15) B43(12-15) B42(12-15) B43(12-15) B46(12-15) B47(12-15) B46(12-15) B47(12-15) + + const __m256i rhs_mat_0145_50_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_50, 221); //B50(4-7) B51(4-7) B50(4-7) B51(4-7) B54(4-7) B55(4-7) B54(4-7) B55(4-7) + const __m256i rhs_mat_2367_50_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_50, 221); //B52(4-7) B53(4-7) B52(4-7) B53(4-7) B56(4-7) B57(4-7) B56(4-7) B57(4-7) + + const __m256i rhs_mat_0145_51_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_51, 221); //B50(12-15) B51(12-15) B50(12-15) B51(12-15) B54(12-15) B55(12-15) B54(12-15) B55(12-15) + const __m256i rhs_mat_2367_51_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_51, 221); //B52(12-15) B53(12-15) B52(12-15) B53(12-15) B56(12-15) B57(12-15) B56(12-15) B57(12-15) + + const __m256i rhs_mat_0145_60_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_60, 221); //B60(4-7) B61(4-7) B60(4-7) B61(4-7) B64(4-7) B65(4-7) B64(4-7) B65(4-7) + const __m256i rhs_mat_2367_60_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_60, 221); //B62(4-7) B63(4-7) B62(4-7) B63(4-7) B66(4-7) B67(4-7) B66(4-7) B67(4-7) + + const __m256i rhs_mat_0145_61_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_61, 221); //B60(12-15) B61(12-15) B60(12-15) B61(12-15) B64(12-15) B65(12-15) B64(12-15) B65(12-15) + const __m256i rhs_mat_2367_61_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_61, 221); //B62(12-15) B63(12-15) B62(12-15) B63(12-15) B66(12-15) B67(12-15) B66(12-15) B67(12-15) + + const __m256i rhs_mat_0145_70_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_70, 221); //B70(4-7) B71(4-7) B70(4-7) B71(4-7) B74(4-7) B75(4-7) B74(4-7) B75(4-7) + const __m256i rhs_mat_2367_70_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_70, 221); //B72(4-7) B73(4-7) B72(4-7) B73(4-7) B76(4-7) B77(4-7) B76(4-7) B77(4-7) + + const __m256i rhs_mat_0145_71_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_71, 221); //B70(12-15) B71(12-15) B70(12-15) B71(12-15) B74(12-15) B75(12-15) B74(12-15) B75(12-15) + const __m256i rhs_mat_2367_71_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_71, 221); //B72(12-15) B73(12-15) B72(12-15) B73(12-15) B76(12-15) B77(12-15) B76(12-15) B77(12-15) + + //Scales and Mins of corresponding sub blocks from different Q2_K structures are stored together + //s00 m00 s01 m01 s10 m10 s11 m11 s20 m20 s21 m21 s30 m30 s31 m31 s40 m40 s41 m41 s50 m50 s51 m51 s60 m60 s61 m61 s70 m70 s71 m71 + + // Combine mins and scales for sub-blocks: 0-1, 2-3, 4-5, 6-7 in the sb loop + const __m128i mins_and_scales_01 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + sb * 64)); + const __m128i mins_and_scales_23 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 16 + sb * 64)); + const __m128i mins_and_scales_45 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 32 + sb * 64)); + const __m128i mins_and_scales_67 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 48 + sb * 64)); + + // Extract scales which is lower half from mins_and_scales + const __m128i scales_01 = _mm_and_si128(mins_and_scales_01, m4b_sse); + const __m128i scales_23 = _mm_and_si128(mins_and_scales_23, m4b_sse); + const __m128i scales_45 = _mm_and_si128(mins_and_scales_45, m4b_sse); + const __m128i scales_67 = _mm_and_si128(mins_and_scales_67, m4b_sse); + + // Extract mins which is upper half from mins_and_scales + const __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_01, 4), m4b_sse)); + const __m256i mins_23 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_23, 4), m4b_sse)); + const __m256i mins_45 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_45, 4), m4b_sse)); + const __m256i mins_67 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_67, 4), m4b_sse)); + + const __m256i scales_0 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_01, scalesmask1_sse)); + const __m256i scales_1 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_01, scalesmask2_sse)); + + const __m256i scales_2 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_23, scalesmask1_sse)); + const __m256i scales_3 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_23, scalesmask2_sse)); + + const __m256i scales_4 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_45, scalesmask1_sse)); + const __m256i scales_5 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_45, scalesmask2_sse)); + + const __m256i scales_6 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_67, scalesmask1_sse)); + const __m256i scales_7 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_67, scalesmask2_sse)); + + const __m256i scale_0145_0 = _mm256_shuffle_epi32(scales_0, 68); + const __m256i scale_2367_0 = _mm256_shuffle_epi32(scales_0, 238); + + const __m256i scale_0145_1 = _mm256_shuffle_epi32(scales_1, 68); + const __m256i scale_2367_1 = _mm256_shuffle_epi32(scales_1, 238); + + const __m256i scale_0145_2 = _mm256_shuffle_epi32(scales_2, 68); + const __m256i scale_2367_2 = _mm256_shuffle_epi32(scales_2, 238); + + const __m256i scale_0145_3 = _mm256_shuffle_epi32(scales_3, 68); + const __m256i scale_2367_3 = _mm256_shuffle_epi32(scales_3, 238); + + const __m256i scale_0145_4 = _mm256_shuffle_epi32(scales_4, 68); + const __m256i scale_2367_4 = _mm256_shuffle_epi32(scales_4, 238); + + const __m256i scale_0145_5 = _mm256_shuffle_epi32(scales_5, 68); + const __m256i scale_2367_5 = _mm256_shuffle_epi32(scales_5, 238); + + const __m256i scale_0145_6 = _mm256_shuffle_epi32(scales_6, 68); + const __m256i scale_2367_6 = _mm256_shuffle_epi32(scales_6, 238); + + const __m256i scale_0145_7 = _mm256_shuffle_epi32(scales_7, 68); + const __m256i scale_2367_7 = _mm256_shuffle_epi32(scales_7, 238); + + + for (int rp = 0; rp < 4; rp++) { + + // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 + // Loaded as set of 128 bit vectors and repeated into a 256 bit vector + __m256i lhs_mat_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 512 * sb))); + __m256i lhs_mat_01_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 0); + __m256i lhs_mat_23_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 17); + __m256i lhs_mat_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 32 + 512 * sb))); + __m256i lhs_mat_01_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 0); + __m256i lhs_mat_23_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 17); + __m256i lhs_mat_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 64 + 512 * sb))); + __m256i lhs_mat_01_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 0); + __m256i lhs_mat_23_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 17); + __m256i lhs_mat_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 96 + 512 * sb))); + __m256i lhs_mat_01_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 0); + __m256i lhs_mat_23_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 17); + __m256i lhs_mat_0123_20 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 128 + 512 * sb))); + __m256i lhs_mat_01_20 = _mm256_permute2f128_si256(lhs_mat_0123_20, lhs_mat_0123_20, 0); + __m256i lhs_mat_23_20 = _mm256_permute2f128_si256(lhs_mat_0123_20, lhs_mat_0123_20, 17); + __m256i lhs_mat_0123_21 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 160 + 512 * sb))); + __m256i lhs_mat_01_21 = _mm256_permute2f128_si256(lhs_mat_0123_21, lhs_mat_0123_21, 0); + __m256i lhs_mat_23_21 = _mm256_permute2f128_si256(lhs_mat_0123_21, lhs_mat_0123_21, 17); + __m256i lhs_mat_0123_30 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 192 + 512 * sb))); + __m256i lhs_mat_01_30 = _mm256_permute2f128_si256(lhs_mat_0123_30, lhs_mat_0123_30, 0); + __m256i lhs_mat_23_30 = _mm256_permute2f128_si256(lhs_mat_0123_30, lhs_mat_0123_30, 17); + __m256i lhs_mat_0123_31 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 224 + 512 * sb))); + __m256i lhs_mat_01_31 = _mm256_permute2f128_si256(lhs_mat_0123_31, lhs_mat_0123_31, 0); + __m256i lhs_mat_23_31 = _mm256_permute2f128_si256(lhs_mat_0123_31, lhs_mat_0123_31, 17); + + __m256i lhs_mat_0123_40 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 256 + 512 * sb))); + __m256i lhs_mat_01_40 = _mm256_permute2f128_si256(lhs_mat_0123_40, lhs_mat_0123_40, 0); + __m256i lhs_mat_23_40 = _mm256_permute2f128_si256(lhs_mat_0123_40, lhs_mat_0123_40, 17); + __m256i lhs_mat_0123_41 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 288 + 512 * sb))); + __m256i lhs_mat_01_41 = _mm256_permute2f128_si256(lhs_mat_0123_41, lhs_mat_0123_41, 0); + __m256i lhs_mat_23_41 = _mm256_permute2f128_si256(lhs_mat_0123_41, lhs_mat_0123_41, 17); + __m256i lhs_mat_0123_50 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 320 + 512 * sb))); + __m256i lhs_mat_01_50 = _mm256_permute2f128_si256(lhs_mat_0123_50, lhs_mat_0123_50, 0); + __m256i lhs_mat_23_50 = _mm256_permute2f128_si256(lhs_mat_0123_50, lhs_mat_0123_50, 17); + __m256i lhs_mat_0123_51 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 352 + 512 * sb))); + __m256i lhs_mat_01_51 = _mm256_permute2f128_si256(lhs_mat_0123_51, lhs_mat_0123_51, 0); + __m256i lhs_mat_23_51 = _mm256_permute2f128_si256(lhs_mat_0123_51, lhs_mat_0123_51, 17); + __m256i lhs_mat_0123_60 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 384 + 512 * sb))); + __m256i lhs_mat_01_60 = _mm256_permute2f128_si256(lhs_mat_0123_60, lhs_mat_0123_60, 0); + __m256i lhs_mat_23_60 = _mm256_permute2f128_si256(lhs_mat_0123_60, lhs_mat_0123_60, 17); + __m256i lhs_mat_0123_61 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 416 + 512 * sb))); + __m256i lhs_mat_01_61 = _mm256_permute2f128_si256(lhs_mat_0123_61, lhs_mat_0123_61, 0); + __m256i lhs_mat_23_61 = _mm256_permute2f128_si256(lhs_mat_0123_61, lhs_mat_0123_61, 17); + __m256i lhs_mat_0123_70 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 448 + 512 * sb))); + __m256i lhs_mat_01_70 = _mm256_permute2f128_si256(lhs_mat_0123_70, lhs_mat_0123_70, 0); + __m256i lhs_mat_23_70 = _mm256_permute2f128_si256(lhs_mat_0123_70, lhs_mat_0123_70, 17); + __m256i lhs_mat_0123_71 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 480 + 512 * sb))); + __m256i lhs_mat_01_71 = _mm256_permute2f128_si256(lhs_mat_0123_71, lhs_mat_0123_71, 0); + __m256i lhs_mat_23_71 = _mm256_permute2f128_si256(lhs_mat_0123_71, lhs_mat_0123_71, 17); + + // Bsums are loaded for the different Q8_K blocks + __m128i lhs_raw_bsums_01_0123 = _mm_loadu_si128((const __m128i *)((a_ptrs[rp][b].bsums + 32 * sb))); + __m128i lhs_raw_bsums_23_0123 = _mm_loadu_si128((const __m128i *)(a_ptrs[rp][b].bsums + 8 + 32 * sb)); + __m128i lhs_raw_bsums_01_4567 = _mm_loadu_si128((const __m128i *)((a_ptrs[rp][b].bsums + 16 + 32 * sb))); + __m128i lhs_raw_bsums_23_4567 = _mm_loadu_si128((const __m128i *)(a_ptrs[rp][b].bsums + 24 + 32 * sb)); + + // Shuffle pattern one - left side input + const __m256i lhs_mat_01_00_sp1 = _mm256_shuffle_epi32(lhs_mat_01_00, 160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) + const __m256i lhs_mat_23_00_sp1 = _mm256_shuffle_epi32(lhs_mat_23_00, 160); //A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) + + const __m256i lhs_mat_01_01_sp1 = _mm256_shuffle_epi32(lhs_mat_01_01, 160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) + const __m256i lhs_mat_23_01_sp1 = _mm256_shuffle_epi32(lhs_mat_23_01, 160); //A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) + + const __m256i lhs_mat_01_10_sp1 = _mm256_shuffle_epi32(lhs_mat_01_10, 160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) + const __m256i lhs_mat_23_10_sp1 = _mm256_shuffle_epi32(lhs_mat_23_10, 160); //A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) + + const __m256i lhs_mat_01_11_sp1 = _mm256_shuffle_epi32(lhs_mat_01_11, 160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) + const __m256i lhs_mat_23_11_sp1 = _mm256_shuffle_epi32(lhs_mat_23_11, 160); //A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) + + const __m256i lhs_mat_01_20_sp1 = _mm256_shuffle_epi32(lhs_mat_01_20, 160); //A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) + const __m256i lhs_mat_23_20_sp1 = _mm256_shuffle_epi32(lhs_mat_23_20, 160); //A22(0-3) A23(0-3) A22(0-3) A23(0-3) A22(0-3) A23(0-3) A22(0-3) A23(0-3) + + const __m256i lhs_mat_01_21_sp1 = _mm256_shuffle_epi32(lhs_mat_01_21, 160); //A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) + const __m256i lhs_mat_23_21_sp1 = _mm256_shuffle_epi32(lhs_mat_23_21, 160); //A22(8-11) A23(8-11) A22(8-11) A23(8-11) A22(8-11) A23(8-11) A22(8-11) A23(8-11) + + const __m256i lhs_mat_01_30_sp1 = _mm256_shuffle_epi32(lhs_mat_01_30, 160); //A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) + const __m256i lhs_mat_23_30_sp1 = _mm256_shuffle_epi32(lhs_mat_23_30, 160); //A32(0-3) A33(0-3) A32(0-3) A33(0-3) A32(0-3) A33(0-3) A32(0-3) A33(0-3) + + const __m256i lhs_mat_01_31_sp1 = _mm256_shuffle_epi32(lhs_mat_01_31, 160); //A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) + const __m256i lhs_mat_23_31_sp1 = _mm256_shuffle_epi32(lhs_mat_23_31, 160); //A32(8-11) A33(8-11) A32(8-11) A33(8-11) A32(8-11) A33(8-11) A32(8-11) A33(8-11) + + const __m256i lhs_mat_01_40_sp1 = _mm256_shuffle_epi32(lhs_mat_01_40, 160); //A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) + const __m256i lhs_mat_23_40_sp1 = _mm256_shuffle_epi32(lhs_mat_23_40, 160); //A42(0-3) A43(0-3) A42(0-3) A43(0-3) A42(0-3) A43(0-3) A42(0-3) A43(0-3) + + const __m256i lhs_mat_01_41_sp1 = _mm256_shuffle_epi32(lhs_mat_01_41, 160); //A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) + const __m256i lhs_mat_23_41_sp1 = _mm256_shuffle_epi32(lhs_mat_23_41, 160); //A42(8-11) A43(8-11) A42(8-11) A43(8-11) A42(8-11) A43(8-11) A42(8-11) A43(8-11) + + const __m256i lhs_mat_01_50_sp1 = _mm256_shuffle_epi32(lhs_mat_01_50, 160); //A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) + const __m256i lhs_mat_23_50_sp1 = _mm256_shuffle_epi32(lhs_mat_23_50, 160); //A52(0-3) A53(0-3) A52(0-3) A53(0-3) A52(0-3) A53(0-3) A52(0-3) A53(0-3) + + const __m256i lhs_mat_01_51_sp1 = _mm256_shuffle_epi32(lhs_mat_01_51, 160); //A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) + const __m256i lhs_mat_23_51_sp1 = _mm256_shuffle_epi32(lhs_mat_23_51, 160); //A52(8-11) A53(8-11) A52(8-11) A53(8-11) A52(8-11) A53(8-11) A52(8-11) A53(8-11) + + const __m256i lhs_mat_01_60_sp1 = _mm256_shuffle_epi32(lhs_mat_01_60, 160); //A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) + const __m256i lhs_mat_23_60_sp1 = _mm256_shuffle_epi32(lhs_mat_23_60, 160); //A62(0-3) A63(0-3) A62(0-3) A63(0-3) A62(0-3) A63(0-3) A62(0-3) A63(0-3) + + const __m256i lhs_mat_01_61_sp1 = _mm256_shuffle_epi32(lhs_mat_01_61, 160); //A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) + const __m256i lhs_mat_23_61_sp1 = _mm256_shuffle_epi32(lhs_mat_23_61, 160); //A62(8-11) A63(8-11) A62(8-11) A63(8-11) A62(8-11) A63(8-11) A62(8-11) A63(8-11) + + const __m256i lhs_mat_01_70_sp1 = _mm256_shuffle_epi32(lhs_mat_01_70, 160); //A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) + const __m256i lhs_mat_23_70_sp1 = _mm256_shuffle_epi32(lhs_mat_23_70, 160); //A72(0-3) A73(0-3) A72(0-3) A73(0-3) A72(0-3) A73(0-3) A72(0-3) A73(0-3) + + const __m256i lhs_mat_01_71_sp1 = _mm256_shuffle_epi32(lhs_mat_01_71, 160); //A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) + const __m256i lhs_mat_23_71_sp1 = _mm256_shuffle_epi32(lhs_mat_23_71, 160); //A72(8-11) A73(8-11) A72(8-11) A73(8-11) A72(8-11) A73(8-11) A72(8-11) A73(8-11) + + // Shuffle pattern two- left side input + const __m256i lhs_mat_01_00_sp2 = _mm256_shuffle_epi32(lhs_mat_01_00, 245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) + const __m256i lhs_mat_23_00_sp2 = _mm256_shuffle_epi32(lhs_mat_23_00, 245); //A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) + + const __m256i lhs_mat_01_01_sp2 = _mm256_shuffle_epi32(lhs_mat_01_01, 245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) + const __m256i lhs_mat_23_01_sp2 = _mm256_shuffle_epi32(lhs_mat_23_01, 245); //A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) + + const __m256i lhs_mat_01_10_sp2 = _mm256_shuffle_epi32(lhs_mat_01_10, 245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) + const __m256i lhs_mat_23_10_sp2 = _mm256_shuffle_epi32(lhs_mat_23_10, 245); //A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) + + const __m256i lhs_mat_01_11_sp2 = _mm256_shuffle_epi32(lhs_mat_01_11, 245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) + const __m256i lhs_mat_23_11_sp2 = _mm256_shuffle_epi32(lhs_mat_23_11, 245); //A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) + + const __m256i lhs_mat_01_20_sp2 = _mm256_shuffle_epi32(lhs_mat_01_20, 245); //A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) + const __m256i lhs_mat_23_20_sp2 = _mm256_shuffle_epi32(lhs_mat_23_20, 245); //A22(4-7) A23(4-7) A22(4-7) A23(4-7) A22(4-7) A23(4-7) A22(4-7) A23(4-7) + + const __m256i lhs_mat_01_21_sp2 = _mm256_shuffle_epi32(lhs_mat_01_21, 245); //A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) + const __m256i lhs_mat_23_21_sp2 = _mm256_shuffle_epi32(lhs_mat_23_21, 245); //A22(12-15) A23(12-15) A22(12-15) A23(12-15) A22(12-15) A23(12-15) A22(12-15) A23(12-15) + + const __m256i lhs_mat_01_30_sp2 = _mm256_shuffle_epi32(lhs_mat_01_30, 245); //A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) + const __m256i lhs_mat_23_30_sp2 = _mm256_shuffle_epi32(lhs_mat_23_30, 245); //A32(4-7) A33(4-7) A32(4-7) A33(4-7) A32(4-7) A33(4-7) A32(4-7) A33(4-7) + + const __m256i lhs_mat_01_31_sp2 = _mm256_shuffle_epi32(lhs_mat_01_31, 245); //A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) + const __m256i lhs_mat_23_31_sp2 = _mm256_shuffle_epi32(lhs_mat_23_31, 245); //A32(12-15) A33(12-15) A32(12-15) A33(12-15) A32(12-15) A33(12-15) A32(12-15) A33(12-15) + + const __m256i lhs_mat_01_40_sp2 = _mm256_shuffle_epi32(lhs_mat_01_40, 245); //A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) + const __m256i lhs_mat_23_40_sp2 = _mm256_shuffle_epi32(lhs_mat_23_40, 245); //A42(4-7) A43(4-7) A42(4-7) A43(4-7) A42(4-7) A43(4-7) A42(4-7) A43(4-7) + + const __m256i lhs_mat_01_41_sp2 = _mm256_shuffle_epi32(lhs_mat_01_41, 245); //A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) + const __m256i lhs_mat_23_41_sp2 = _mm256_shuffle_epi32(lhs_mat_23_41, 245); //A42(12-15) A43(12-15) A42(12-15) A43(12-15) A42(12-15) A43(12-15) A42(12-15) A43(12-15) + + const __m256i lhs_mat_01_50_sp2 = _mm256_shuffle_epi32(lhs_mat_01_50, 245); //A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) + const __m256i lhs_mat_23_50_sp2 = _mm256_shuffle_epi32(lhs_mat_23_50, 245); //A52(4-7) A53(4-7) A52(4-7) A53(4-7) A52(4-7) A53(4-7) A52(4-7) A53(4-7) + + const __m256i lhs_mat_01_51_sp2 = _mm256_shuffle_epi32(lhs_mat_01_51, 245); //A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) + const __m256i lhs_mat_23_51_sp2 = _mm256_shuffle_epi32(lhs_mat_23_51, 245); //A52(12-15) A53(12-15) A52(12-15) A53(12-15) A52(12-15) A53(12-15) A52(12-15) A53(12-15) + + const __m256i lhs_mat_01_60_sp2 = _mm256_shuffle_epi32(lhs_mat_01_60, 245); //A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) + const __m256i lhs_mat_23_60_sp2 = _mm256_shuffle_epi32(lhs_mat_23_60, 245); //A62(4-7) A63(4-7) A62(4-7) A63(4-7) A62(4-7) A63(4-7) A62(4-7) A63(4-7) + + const __m256i lhs_mat_01_61_sp2 = _mm256_shuffle_epi32(lhs_mat_01_61, 245); //A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) + const __m256i lhs_mat_23_61_sp2 = _mm256_shuffle_epi32(lhs_mat_23_61, 245); //A62(12-15) A63(12-15) A62(12-15) A63(12-15) A62(12-15) A63(12-15) A62(12-15) A63(12-15) + + const __m256i lhs_mat_01_70_sp2 = _mm256_shuffle_epi32(lhs_mat_01_70, 245); //A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) + const __m256i lhs_mat_23_70_sp2 = _mm256_shuffle_epi32(lhs_mat_23_70, 245); //A72(4-7) A73(4-7) A72(4-7) A73(4-7) A72(4-7) A73(4-7) A72(4-7) A73(4-7) + + const __m256i lhs_mat_01_71_sp2 = _mm256_shuffle_epi32(lhs_mat_01_71, 245); //A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) + const __m256i lhs_mat_23_71_sp2 = _mm256_shuffle_epi32(lhs_mat_23_71, 245); //A72(12-15) A73(12-15) A72(12-15) A73(12-15) A72(12-15) A73(12-15) A72(12-15) A73(12-15) + + // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane + __m256i iacc_mat_00_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_01_00_sp1),_mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_01_01_sp1)); + __m256i iacc_mat_01_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_01_00_sp1),_mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_01_01_sp1)); + + __m256i iacc_mat_10_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_23_00_sp1),_mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_23_01_sp1)); + __m256i iacc_mat_11_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_23_00_sp1),_mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_23_01_sp1)); + + __m256i iacc_mat_00_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_01_10_sp1),_mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_01_11_sp1)); + __m256i iacc_mat_01_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_01_10_sp1),_mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_01_11_sp1)); + + __m256i iacc_mat_10_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_23_10_sp1),_mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_23_11_sp1)); + __m256i iacc_mat_11_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_23_10_sp1),_mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_23_11_sp1)); + + __m256i iacc_mat_00_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp1, lhs_mat_01_20_sp1),_mm256_maddubs_epi16(rhs_mat_0145_21_sp1, lhs_mat_01_21_sp1)); + __m256i iacc_mat_01_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp1, lhs_mat_01_20_sp1),_mm256_maddubs_epi16(rhs_mat_2367_21_sp1, lhs_mat_01_21_sp1)); + + __m256i iacc_mat_10_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp1, lhs_mat_23_20_sp1),_mm256_maddubs_epi16(rhs_mat_0145_21_sp1, lhs_mat_23_21_sp1)); + __m256i iacc_mat_11_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp1, lhs_mat_23_20_sp1),_mm256_maddubs_epi16(rhs_mat_2367_21_sp1, lhs_mat_23_21_sp1)); + + __m256i iacc_mat_00_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp1, lhs_mat_01_30_sp1),_mm256_maddubs_epi16(rhs_mat_0145_31_sp1, lhs_mat_01_31_sp1)); + __m256i iacc_mat_01_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp1, lhs_mat_01_30_sp1),_mm256_maddubs_epi16(rhs_mat_2367_31_sp1, lhs_mat_01_31_sp1)); + + __m256i iacc_mat_10_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp1, lhs_mat_23_30_sp1),_mm256_maddubs_epi16(rhs_mat_0145_31_sp1, lhs_mat_23_31_sp1)); + __m256i iacc_mat_11_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp1, lhs_mat_23_30_sp1),_mm256_maddubs_epi16(rhs_mat_2367_31_sp1, lhs_mat_23_31_sp1)); + + __m256i iacc_mat_00_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp1, lhs_mat_01_40_sp1),_mm256_maddubs_epi16(rhs_mat_0145_41_sp1, lhs_mat_01_41_sp1)); + __m256i iacc_mat_01_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp1, lhs_mat_01_40_sp1),_mm256_maddubs_epi16(rhs_mat_2367_41_sp1, lhs_mat_01_41_sp1)); + + __m256i iacc_mat_10_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp1, lhs_mat_23_40_sp1),_mm256_maddubs_epi16(rhs_mat_0145_41_sp1, lhs_mat_23_41_sp1)); + __m256i iacc_mat_11_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp1, lhs_mat_23_40_sp1),_mm256_maddubs_epi16(rhs_mat_2367_41_sp1, lhs_mat_23_41_sp1)); + + __m256i iacc_mat_00_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp1, lhs_mat_01_50_sp1),_mm256_maddubs_epi16(rhs_mat_0145_51_sp1, lhs_mat_01_51_sp1)); + __m256i iacc_mat_01_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp1, lhs_mat_01_50_sp1),_mm256_maddubs_epi16(rhs_mat_2367_51_sp1, lhs_mat_01_51_sp1)); + + __m256i iacc_mat_10_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp1, lhs_mat_23_50_sp1),_mm256_maddubs_epi16(rhs_mat_0145_51_sp1, lhs_mat_23_51_sp1)); + __m256i iacc_mat_11_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp1, lhs_mat_23_50_sp1),_mm256_maddubs_epi16(rhs_mat_2367_51_sp1, lhs_mat_23_51_sp1)); + + __m256i iacc_mat_00_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp1, lhs_mat_01_60_sp1),_mm256_maddubs_epi16(rhs_mat_0145_61_sp1, lhs_mat_01_61_sp1)); + __m256i iacc_mat_01_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp1, lhs_mat_01_60_sp1),_mm256_maddubs_epi16(rhs_mat_2367_61_sp1, lhs_mat_01_61_sp1)); + + __m256i iacc_mat_10_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp1, lhs_mat_23_60_sp1),_mm256_maddubs_epi16(rhs_mat_0145_61_sp1, lhs_mat_23_61_sp1)); + __m256i iacc_mat_11_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp1, lhs_mat_23_60_sp1),_mm256_maddubs_epi16(rhs_mat_2367_61_sp1, lhs_mat_23_61_sp1)); + + __m256i iacc_mat_00_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp1, lhs_mat_01_70_sp1),_mm256_maddubs_epi16(rhs_mat_0145_71_sp1, lhs_mat_01_71_sp1)); + __m256i iacc_mat_01_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp1, lhs_mat_01_70_sp1),_mm256_maddubs_epi16(rhs_mat_2367_71_sp1, lhs_mat_01_71_sp1)); + + __m256i iacc_mat_10_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp1, lhs_mat_23_70_sp1),_mm256_maddubs_epi16(rhs_mat_0145_71_sp1, lhs_mat_23_71_sp1)); + __m256i iacc_mat_11_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp1, lhs_mat_23_70_sp1),_mm256_maddubs_epi16(rhs_mat_2367_71_sp1, lhs_mat_23_71_sp1)); + + + __m256i iacc_mat_00_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_01_00_sp2),_mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_01_01_sp2)); + __m256i iacc_mat_01_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_01_00_sp2),_mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_01_01_sp2)); + + __m256i iacc_mat_10_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_23_00_sp2),_mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_23_01_sp2)); + __m256i iacc_mat_11_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_23_00_sp2),_mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_23_01_sp2)); + + __m256i iacc_mat_00_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_01_10_sp2),_mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_01_11_sp2)); + __m256i iacc_mat_01_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_01_10_sp2),_mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_01_11_sp2)); + + __m256i iacc_mat_10_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_23_10_sp2),_mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_23_11_sp2)); + __m256i iacc_mat_11_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_23_10_sp2),_mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_23_11_sp2)); + + __m256i iacc_mat_00_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp2, lhs_mat_01_20_sp2),_mm256_maddubs_epi16(rhs_mat_0145_21_sp2, lhs_mat_01_21_sp2)); + __m256i iacc_mat_01_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp2, lhs_mat_01_20_sp2),_mm256_maddubs_epi16(rhs_mat_2367_21_sp2, lhs_mat_01_21_sp2)); + + __m256i iacc_mat_10_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp2, lhs_mat_23_20_sp2),_mm256_maddubs_epi16(rhs_mat_0145_21_sp2, lhs_mat_23_21_sp2)); + __m256i iacc_mat_11_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp2, lhs_mat_23_20_sp2),_mm256_maddubs_epi16(rhs_mat_2367_21_sp2, lhs_mat_23_21_sp2)); + + __m256i iacc_mat_00_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp2, lhs_mat_01_30_sp2),_mm256_maddubs_epi16(rhs_mat_0145_31_sp2, lhs_mat_01_31_sp2)); + __m256i iacc_mat_01_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp2, lhs_mat_01_30_sp2),_mm256_maddubs_epi16(rhs_mat_2367_31_sp2, lhs_mat_01_31_sp2)); + + __m256i iacc_mat_10_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp2, lhs_mat_23_30_sp2),_mm256_maddubs_epi16(rhs_mat_0145_31_sp2, lhs_mat_23_31_sp2)); + __m256i iacc_mat_11_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp2, lhs_mat_23_30_sp2),_mm256_maddubs_epi16(rhs_mat_2367_31_sp2, lhs_mat_23_31_sp2)); + + __m256i iacc_mat_00_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp2, lhs_mat_01_40_sp2),_mm256_maddubs_epi16(rhs_mat_0145_41_sp2, lhs_mat_01_41_sp2)); + __m256i iacc_mat_01_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp2, lhs_mat_01_40_sp2),_mm256_maddubs_epi16(rhs_mat_2367_41_sp2, lhs_mat_01_41_sp2)); + + __m256i iacc_mat_10_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp2, lhs_mat_23_40_sp2),_mm256_maddubs_epi16(rhs_mat_0145_41_sp2, lhs_mat_23_41_sp2)); + __m256i iacc_mat_11_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp2, lhs_mat_23_40_sp2),_mm256_maddubs_epi16(rhs_mat_2367_41_sp2, lhs_mat_23_41_sp2)); + + __m256i iacc_mat_00_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp2, lhs_mat_01_50_sp2),_mm256_maddubs_epi16(rhs_mat_0145_51_sp2, lhs_mat_01_51_sp2)); + __m256i iacc_mat_01_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp2, lhs_mat_01_50_sp2),_mm256_maddubs_epi16(rhs_mat_2367_51_sp2, lhs_mat_01_51_sp2)); + + __m256i iacc_mat_10_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp2, lhs_mat_23_50_sp2),_mm256_maddubs_epi16(rhs_mat_0145_51_sp2, lhs_mat_23_51_sp2)); + __m256i iacc_mat_11_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp2, lhs_mat_23_50_sp2),_mm256_maddubs_epi16(rhs_mat_2367_51_sp2, lhs_mat_23_51_sp2)); + + __m256i iacc_mat_00_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp2, lhs_mat_01_60_sp2),_mm256_maddubs_epi16(rhs_mat_0145_61_sp2, lhs_mat_01_61_sp2)); + __m256i iacc_mat_01_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp2, lhs_mat_01_60_sp2),_mm256_maddubs_epi16(rhs_mat_2367_61_sp2, lhs_mat_01_61_sp2)); + + __m256i iacc_mat_10_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp2, lhs_mat_23_60_sp2),_mm256_maddubs_epi16(rhs_mat_0145_61_sp2, lhs_mat_23_61_sp2)); + __m256i iacc_mat_11_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp2, lhs_mat_23_60_sp2),_mm256_maddubs_epi16(rhs_mat_2367_61_sp2, lhs_mat_23_61_sp2)); + + __m256i iacc_mat_00_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp2, lhs_mat_01_70_sp2),_mm256_maddubs_epi16(rhs_mat_0145_71_sp2, lhs_mat_01_71_sp2)); + __m256i iacc_mat_01_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp2, lhs_mat_01_70_sp2),_mm256_maddubs_epi16(rhs_mat_2367_71_sp2, lhs_mat_01_71_sp2)); + + __m256i iacc_mat_10_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp2, lhs_mat_23_70_sp2),_mm256_maddubs_epi16(rhs_mat_0145_71_sp2, lhs_mat_23_71_sp2)); + __m256i iacc_mat_11_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp2, lhs_mat_23_70_sp2),_mm256_maddubs_epi16(rhs_mat_2367_71_sp2, lhs_mat_23_71_sp2)); + + // Combine results from both shuffle patterns for each output block + __m256i iacc_mat_00_0 = _mm256_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); + __m256i iacc_mat_01_0 = _mm256_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); + __m256i iacc_mat_10_0 = _mm256_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); + __m256i iacc_mat_11_0 = _mm256_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); + + __m256i iacc_mat_00_1 = _mm256_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); + __m256i iacc_mat_01_1 = _mm256_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); + __m256i iacc_mat_10_1 = _mm256_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); + __m256i iacc_mat_11_1 = _mm256_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); + + __m256i iacc_mat_00_2 = _mm256_add_epi16(iacc_mat_00_2_sp1, iacc_mat_00_2_sp2); + __m256i iacc_mat_01_2 = _mm256_add_epi16(iacc_mat_01_2_sp1, iacc_mat_01_2_sp2); + __m256i iacc_mat_10_2 = _mm256_add_epi16(iacc_mat_10_2_sp1, iacc_mat_10_2_sp2); + __m256i iacc_mat_11_2 = _mm256_add_epi16(iacc_mat_11_2_sp1, iacc_mat_11_2_sp2); + + __m256i iacc_mat_00_3 = _mm256_add_epi16(iacc_mat_00_3_sp1, iacc_mat_00_3_sp2); + __m256i iacc_mat_01_3 = _mm256_add_epi16(iacc_mat_01_3_sp1, iacc_mat_01_3_sp2); + __m256i iacc_mat_10_3 = _mm256_add_epi16(iacc_mat_10_3_sp1, iacc_mat_10_3_sp2); + __m256i iacc_mat_11_3 = _mm256_add_epi16(iacc_mat_11_3_sp1, iacc_mat_11_3_sp2); + + __m256i iacc_mat_00_4 = _mm256_add_epi16(iacc_mat_00_4_sp1, iacc_mat_00_4_sp2); + __m256i iacc_mat_01_4 = _mm256_add_epi16(iacc_mat_01_4_sp1, iacc_mat_01_4_sp2); + __m256i iacc_mat_10_4 = _mm256_add_epi16(iacc_mat_10_4_sp1, iacc_mat_10_4_sp2); + __m256i iacc_mat_11_4 = _mm256_add_epi16(iacc_mat_11_4_sp1, iacc_mat_11_4_sp2); + + __m256i iacc_mat_00_5 = _mm256_add_epi16(iacc_mat_00_5_sp1, iacc_mat_00_5_sp2); + __m256i iacc_mat_01_5 = _mm256_add_epi16(iacc_mat_01_5_sp1, iacc_mat_01_5_sp2); + __m256i iacc_mat_10_5 = _mm256_add_epi16(iacc_mat_10_5_sp1, iacc_mat_10_5_sp2); + __m256i iacc_mat_11_5 = _mm256_add_epi16(iacc_mat_11_5_sp1, iacc_mat_11_5_sp2); + + __m256i iacc_mat_00_6 = _mm256_add_epi16(iacc_mat_00_6_sp1, iacc_mat_00_6_sp2); + __m256i iacc_mat_01_6 = _mm256_add_epi16(iacc_mat_01_6_sp1, iacc_mat_01_6_sp2); + __m256i iacc_mat_10_6 = _mm256_add_epi16(iacc_mat_10_6_sp1, iacc_mat_10_6_sp2); + __m256i iacc_mat_11_6 = _mm256_add_epi16(iacc_mat_11_6_sp1, iacc_mat_11_6_sp2); + + __m256i iacc_mat_00_7 = _mm256_add_epi16(iacc_mat_00_7_sp1, iacc_mat_00_7_sp2); + __m256i iacc_mat_01_7 = _mm256_add_epi16(iacc_mat_01_7_sp1, iacc_mat_01_7_sp2); + __m256i iacc_mat_10_7 = _mm256_add_epi16(iacc_mat_10_7_sp1, iacc_mat_10_7_sp2); + __m256i iacc_mat_11_7 = _mm256_add_epi16(iacc_mat_11_7_sp1, iacc_mat_11_7_sp2); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + iacc_mat_00_0 = _mm256_madd_epi16(iacc_mat_00_0, scale_0145_0); + iacc_mat_01_0 = _mm256_madd_epi16(iacc_mat_01_0, scale_2367_0); + iacc_mat_10_0 = _mm256_madd_epi16(iacc_mat_10_0, scale_0145_0); + iacc_mat_11_0 = _mm256_madd_epi16(iacc_mat_11_0, scale_2367_0); + + iacc_mat_00_1 = _mm256_madd_epi16(iacc_mat_00_1, scale_0145_1); + iacc_mat_01_1 = _mm256_madd_epi16(iacc_mat_01_1, scale_2367_1); + iacc_mat_10_1 = _mm256_madd_epi16(iacc_mat_10_1, scale_0145_1); + iacc_mat_11_1 = _mm256_madd_epi16(iacc_mat_11_1, scale_2367_1); + + iacc_mat_00_2 = _mm256_madd_epi16(iacc_mat_00_2, scale_0145_2); + iacc_mat_01_2 = _mm256_madd_epi16(iacc_mat_01_2, scale_2367_2); + iacc_mat_10_2 = _mm256_madd_epi16(iacc_mat_10_2, scale_0145_2); + iacc_mat_11_2 = _mm256_madd_epi16(iacc_mat_11_2, scale_2367_2); + + iacc_mat_00_3 = _mm256_madd_epi16(iacc_mat_00_3, scale_0145_3); + iacc_mat_01_3 = _mm256_madd_epi16(iacc_mat_01_3, scale_2367_3); + iacc_mat_10_3 = _mm256_madd_epi16(iacc_mat_10_3, scale_0145_3); + iacc_mat_11_3 = _mm256_madd_epi16(iacc_mat_11_3, scale_2367_3); + + iacc_mat_00_4 = _mm256_madd_epi16(iacc_mat_00_4, scale_0145_4); + iacc_mat_01_4 = _mm256_madd_epi16(iacc_mat_01_4, scale_2367_4); + iacc_mat_10_4 = _mm256_madd_epi16(iacc_mat_10_4, scale_0145_4); + iacc_mat_11_4 = _mm256_madd_epi16(iacc_mat_11_4, scale_2367_4); + + iacc_mat_00_5 = _mm256_madd_epi16(iacc_mat_00_5, scale_0145_5); + iacc_mat_01_5 = _mm256_madd_epi16(iacc_mat_01_5, scale_2367_5); + iacc_mat_10_5 = _mm256_madd_epi16(iacc_mat_10_5, scale_0145_5); + iacc_mat_11_5 = _mm256_madd_epi16(iacc_mat_11_5, scale_2367_5); + + iacc_mat_00_6 = _mm256_madd_epi16(iacc_mat_00_6, scale_0145_6); + iacc_mat_01_6 = _mm256_madd_epi16(iacc_mat_01_6, scale_2367_6); + iacc_mat_10_6 = _mm256_madd_epi16(iacc_mat_10_6, scale_0145_6); + iacc_mat_11_6 = _mm256_madd_epi16(iacc_mat_11_6, scale_2367_6); + + iacc_mat_00_7 = _mm256_madd_epi16(iacc_mat_00_7, scale_0145_7); + iacc_mat_01_7 = _mm256_madd_epi16(iacc_mat_01_7, scale_2367_7); + iacc_mat_10_7 = _mm256_madd_epi16(iacc_mat_10_7, scale_0145_7); + iacc_mat_11_7 = _mm256_madd_epi16(iacc_mat_11_7, scale_2367_7); + + __m256i iacc_mat_00 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_00_0, iacc_mat_00_1), _mm256_add_epi32(iacc_mat_00_2, iacc_mat_00_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_00_4, iacc_mat_00_5), _mm256_add_epi32(iacc_mat_00_6, iacc_mat_00_7))); + __m256i iacc_mat_01 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_01_0, iacc_mat_01_1), _mm256_add_epi32(iacc_mat_01_2, iacc_mat_01_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_01_4, iacc_mat_01_5), _mm256_add_epi32(iacc_mat_01_6, iacc_mat_01_7))); + __m256i iacc_mat_10 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_10_0, iacc_mat_10_1), _mm256_add_epi32(iacc_mat_10_2, iacc_mat_10_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_10_4, iacc_mat_10_5), _mm256_add_epi32(iacc_mat_10_6, iacc_mat_10_7))); + __m256i iacc_mat_11 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_11_0, iacc_mat_11_1), _mm256_add_epi32(iacc_mat_11_2, iacc_mat_11_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_11_4, iacc_mat_11_5), _mm256_add_epi32(iacc_mat_11_6, iacc_mat_11_7))); + + // Straighten out to make 4 row vectors + __m256i iacc_row_0 = _mm256_blend_epi32(iacc_mat_00, _mm256_shuffle_epi32(iacc_mat_01, 78), 204); + __m256i iacc_row_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00, 78), iacc_mat_01, 204); + __m256i iacc_row_2 = _mm256_blend_epi32(iacc_mat_10, _mm256_shuffle_epi32(iacc_mat_11, 78), 204); + __m256i iacc_row_3 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10, 78), iacc_mat_11, 204); + + // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes + const __m128 row_scale_f32_sse = _mm_load_ps(a_ptrs[rp][b].d); + const __m256 row_scale_f32 = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); + + // Multiply with appropiate scales and accumulate (for both d and dmin) below + acc_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); + acc_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); + acc_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); + acc_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); + + __m256i lhs_bsums_01_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_0123), lhs_raw_bsums_01_0123, 1); + __m256i lhs_bsums_23_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_0123), lhs_raw_bsums_23_0123, 1); + __m256i lhs_bsums_01_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_4567), lhs_raw_bsums_01_4567, 1); + __m256i lhs_bsums_23_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_4567), lhs_raw_bsums_23_4567, 1); + + // Take two bsums from two Q8_Ks at a time and multiply with corresponding mins values from each Q2_K + __m256i iacc_row_min_0_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 0), mins_01); + __m256i iacc_row_min_1_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 170), mins_01); + __m256i iacc_row_min_2_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 0), mins_01); + __m256i iacc_row_min_3_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 170), mins_01); + + __m256i iacc_row_min_0_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 85), mins_23); + __m256i iacc_row_min_1_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 255), mins_23); + __m256i iacc_row_min_2_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 85), mins_23); + __m256i iacc_row_min_3_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 255), mins_23); + + __m256i iacc_row_min_0_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 0), mins_45); + __m256i iacc_row_min_1_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 170), mins_45); + __m256i iacc_row_min_2_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 0), mins_45); + __m256i iacc_row_min_3_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 170), mins_45); + + __m256i iacc_row_min_0_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 85), mins_67); + __m256i iacc_row_min_1_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 255), mins_67); + __m256i iacc_row_min_2_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 85), mins_67); + __m256i iacc_row_min_3_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 255), mins_67); + + __m256i iacc_row_min_0 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_0_01, iacc_row_min_0_23), _mm256_add_epi32(iacc_row_min_0_45,iacc_row_min_0_67)); + __m256i iacc_row_min_1 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_1_01, iacc_row_min_1_23), _mm256_add_epi32(iacc_row_min_1_45,iacc_row_min_1_67)); + __m256i iacc_row_min_2 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_2_01, iacc_row_min_2_23), _mm256_add_epi32(iacc_row_min_2_45,iacc_row_min_2_67)); + __m256i iacc_row_min_3 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_3_01, iacc_row_min_3_23), _mm256_add_epi32(iacc_row_min_3_45,iacc_row_min_3_67)); + + acc_min_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_0), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[rp * 4]); + acc_min_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_1), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[rp * 4 + 1]); + acc_min_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_2), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[rp * 4 + 2]); + acc_min_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_3), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[rp * 4 + 3]); + + } + } + } + // Store the accumulated values + for (int i = 0; i < 16; i++) { + _mm256_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm256_sub_ps(acc_rows[i], acc_min_rows[i])); + + } + } + } + + for (; y < nr / 4; y ++) { + + const block_q8_Kx4 * a_ptr = a_ptr_start + (y * nb); + + // Take group of eight block_q2_kx8 structures at each pass of the loop and perform dot product operation + for (int64_t x = xstart; x < nc / 8; x++) { + + const block_q2_Kx8 * b_ptr = b_ptr_start + (x * b_nb); + + // Master FP accumulators + __m256 acc_rows[4]; + for (int i = 0; i < 4; i++) { + acc_rows[i] = _mm256_setzero_ps(); + } + + __m256 acc_min_rows[4]; + for (int i = 0; i < 4; i++) { + acc_min_rows[i] = _mm256_setzero_ps(); + } + + for (int64_t b = 0; b < nb; b++) { + // Delta values - Load the eight scale values of block_q2_kx8 + const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); + + // dmin values - Load the eight dmin values of block_q2_kx8 + const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); + + // Loop to iterate over the sixteen sub blocks of a super block - eight sub blocks are processed per iteration + for (int sb = 0; sb < QK_K / 128; sb++) { + + // Load the eight block_q2_k for eight sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 + const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + sb * 256)); + const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 32 + sb * 256)); + const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 64 + sb * 256)); + const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 96 + sb * 256)); + const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 128 + sb * 256)); + const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 160 + sb * 256)); + const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 192 + sb * 256)); + const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 224 + sb * 256)); + + // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values + //superblock sub block which part of sub block + const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); + + const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); + + const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); + + const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); + const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); + + // 2-bit -> 8-bit + // First sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_00 = _mm256_and_si256(rhs_raw_mat_0145_0, m3b); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) + const __m256i rhs_mat_2367_00 = _mm256_and_si256(rhs_raw_mat_2367_0, m3b); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) + + const __m256i rhs_mat_0145_01 = _mm256_and_si256(rhs_raw_mat_0145_1, m3b); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) + const __m256i rhs_mat_2367_01 = _mm256_and_si256(rhs_raw_mat_2367_1, m3b); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) + + // Second sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_10 = _mm256_and_si256(rhs_raw_mat_0145_2, m3b); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) + const __m256i rhs_mat_2367_10 = _mm256_and_si256(rhs_raw_mat_2367_2, m3b); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) + + const __m256i rhs_mat_0145_11 = _mm256_and_si256(rhs_raw_mat_0145_3, m3b); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) + const __m256i rhs_mat_2367_11 = _mm256_and_si256(rhs_raw_mat_2367_3, m3b); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) + + // Third sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_20 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 2), m3b); //B20(0-7) B21(0-7) B24(0-7) B25(0-7) + const __m256i rhs_mat_2367_20 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 2), m3b); //B22(0-7) B23(0-7) B26(0-7) B27(0-7) + + const __m256i rhs_mat_0145_21 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 2), m3b); //B20(8-15) B21(8-15) B24(8-15) B25(8-15) + const __m256i rhs_mat_2367_21 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 2), m3b); //B22(8-15) B23(8-15) B26(8-15) B27(8-15) + + // Fourth sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_30 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 2), m3b); //B30(0-7) B31(0-7) B34(0-7) B35(0-7) + const __m256i rhs_mat_2367_30 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 2), m3b); //B32(0-7) B33(0-7) B36(0-7) B37(0-7) + + const __m256i rhs_mat_0145_31 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 2), m3b); //B30(8-15) B31(8-15) B34(8-15) B35(8-15) + const __m256i rhs_mat_2367_31 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 2), m3b); //B32(8-15) B33(8-15) B36(8-15) B37(8-15) + + // Fifth sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_40 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m3b); //B40(0-7) B41(0-7) B44(0-7) B45(0-7) + const __m256i rhs_mat_2367_40 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m3b); //B42(0-7) B43(0-7) B46(0-7) B47(0-7) + + const __m256i rhs_mat_0145_41 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m3b); //B40(8-15) B41(8-15) B44(8-15) B45(8-15) + const __m256i rhs_mat_2367_41 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m3b); //B42(8-15) B43(8-15) B46(8-15) B47(8-15) + + // Sixth sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_50 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 4), m3b); //B50(0-7) B51(0-7) B54(0-7) B55(0-7) + const __m256i rhs_mat_2367_50 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 4), m3b); //B52(0-7) B53(0-7) B56(0-7) B57(0-7) + + const __m256i rhs_mat_0145_51 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 4), m3b); //B50(8-15) B51(8-15) B54(8-15) B55(8-15) + const __m256i rhs_mat_2367_51 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 4), m3b); //B52(8-15) B53(8-15) B56(8-15) B57(8-15) + + // Seventh sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_60 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 6), m3b); //B60(0-7) B61(0-7) B64(0-7) B65(0-7) + const __m256i rhs_mat_2367_60 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 6), m3b); //B62(0-7) B63(0-7) B66(0-7) B67(0-7) + + const __m256i rhs_mat_0145_61 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 6), m3b); //B60(8-15) B61(8-15) B64(8-15) B65(8-15) + const __m256i rhs_mat_2367_61 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 6), m3b); //B62(8-15) B63(8-15) B66(8-15) B67(8-15) + + // Eighth sub block of the eight sub blocks processed in the iteration + const __m256i rhs_mat_0145_70 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 6), m3b); //B70(0-7) B71(0-7) B74(0-7) B75(0-7) + const __m256i rhs_mat_2367_70 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 6), m3b); //B72(0-7) B73(0-7) B76(0-7) B77(0-7) + + const __m256i rhs_mat_0145_71 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 6), m3b); //B70(8-15) B71(8-15) B74(8-15) B75(8-15) + const __m256i rhs_mat_2367_71 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 6), m3b); //B72(8-15) B73(8-15) B76(8-15) B77(8-15) + + // Shuffle pattern one - right side input + const __m256i rhs_mat_0145_00_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_00, 136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) + const __m256i rhs_mat_2367_00_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_00, 136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) + + const __m256i rhs_mat_0145_01_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_01, 136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) + const __m256i rhs_mat_2367_01_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_01, 136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) + + const __m256i rhs_mat_0145_10_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_10, 136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) + const __m256i rhs_mat_2367_10_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_10, 136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) + + const __m256i rhs_mat_0145_11_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_11, 136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) + const __m256i rhs_mat_2367_11_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_11, 136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) + + const __m256i rhs_mat_0145_20_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_20, 136); //B20(0-3) B21(0-3) B20(0-3) B21(0-3) B24(0-3) B25(0-3) B24(0-3) B25(0-3) + const __m256i rhs_mat_2367_20_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_20, 136); //B22(0-3) B23(0-3) B22(0-3) B23(0-3) B26(0-3) B27(0-3) B26(0-3) B27(0-3) + + const __m256i rhs_mat_0145_21_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_21, 136); //B20(8-11) B21(8-11) B20(8-11) B21(8-11) B24(8-11) B25(8-11) B24(8-11) B25(8-11) + const __m256i rhs_mat_2367_21_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_21, 136); //B22(8-11) B23(8-11) B22(8-11) B23(8-11) B26(8-11) B27(8-11) B26(8-11) B27(8-11) + + const __m256i rhs_mat_0145_30_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_30, 136); //B30(0-3) B31(0-3) B30(0-3) B31(0-3) B34(0-3) B35(0-3) B34(0-3) B35(0-3) + const __m256i rhs_mat_2367_30_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_30, 136); //B32(0-3) B33(0-3) B32(0-3) B33(0-3) B36(0-3) B37(0-3) B36(0-3) B37(0-3) + + const __m256i rhs_mat_0145_31_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_31, 136); //B30(8-11) B31(8-11) B30(8-11) B31(8-11) B34(8-11) B35(8-11) B34(8-11) B35(8-11 + const __m256i rhs_mat_2367_31_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_31, 136); //B32(8-11) B33(8-11) B32(8-11) B33(8-11) B36(8-11) B37(8-11) B36(8-11) B37(8-11) + + const __m256i rhs_mat_0145_40_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_40, 136); //B40(0-3) B41(0-3) B40(0-3) B41(0-3) B44(0-3) B45(0-3) B44(0-3) B45(0-3) + const __m256i rhs_mat_2367_40_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_40, 136); //B42(0-3) B43(0-3) B42(0-3) B43(0-3) B46(0-3) B47(0-3) B46(0-3) B47(0-3) + + const __m256i rhs_mat_0145_41_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_41, 136); //B40(8-11) B41(8-11) B40(8-11) B41(8-11) B44(8-11) B45(8-11) B44(8-11) B45(8-11) + const __m256i rhs_mat_2367_41_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_41, 136); //B42(8-11) B43(8-11) B42(8-11) B43(8-11) B46(8-11) B47(8-11) B46(8-11) B47(8-11) + + const __m256i rhs_mat_0145_50_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_50, 136); //B50(0-3) B51(0-3) B50(0-3) B51(0-3) B54(0-3) B55(0-3) B54(0-3) B55(0-3) + const __m256i rhs_mat_2367_50_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_50, 136); //B52(0-3) B53(0-3) B52(0-3) B53(0-3) B56(0-3) B57(0-3) B56(0-3) B57(0-3) + + const __m256i rhs_mat_0145_51_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_51, 136); //B50(8-11) B51(8-11) B50(8-11) B51(8-11) B54(8-11) B55(8-11) B54(8-11) B55(8-11) + const __m256i rhs_mat_2367_51_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_51, 136); //B52(8-11) B53(8-11) B52(8-11) B53(8-11) B56(8-11) B57(8-11) B56(8-11) B57(8-11) + + const __m256i rhs_mat_0145_60_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_60, 136); //B60(0-3) B61(0-3) B60(0-3) B61(0-3) B64(0-3) B65(0-3) B64(0-3) B65(0-3) + const __m256i rhs_mat_2367_60_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_60, 136); //B62(0-3) B63(0-3) B62(0-3) B63(0-3) B66(0-3) B67(0-3) B66(0-3) B67(0-3) + + const __m256i rhs_mat_0145_61_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_61, 136); //B60(8-11) B61(8-11) B60(8-11) B61(8-11) B64(8-11) B65(8-11) B64(8-11) B65(8-11) + const __m256i rhs_mat_2367_61_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_61, 136); //B62(8-11) B63(8-11) B62(8-11) B63(8-11) B66(8-11) B67(8-11) B66(8-11) B67(8-11) + + const __m256i rhs_mat_0145_70_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_70, 136); //B70(0-3) B71(0-3) B70(0-3) B71(0-3) B74(0-3) B75(0-3) B74(0-3) B75(0-3) + const __m256i rhs_mat_2367_70_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_70, 136); //B72(0-3) B73(0-3) B72(0-3) B73(0-3) B76(0-3) B77(0-3) B76(0-3) B77(0-3) + + const __m256i rhs_mat_0145_71_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_71, 136); //B70(8-11) B71(8-11) B70(8-11) B71(8-11) B74(8-11) B75(8-11) B74(8-11) B75(8-11) + const __m256i rhs_mat_2367_71_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_71, 136); //B72(8-11) B73(8-11) B72(8-11) B73(8-11) B76(8-11) B77(8-11) B76(8-11) B77(8-11) + + + // Shuffle pattern two - right side input + const __m256i rhs_mat_0145_00_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_00, 221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) + const __m256i rhs_mat_2367_00_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_00, 221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) + + const __m256i rhs_mat_0145_01_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_01, 221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) + const __m256i rhs_mat_2367_01_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_01, 221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) + + const __m256i rhs_mat_0145_10_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_10, 221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) + const __m256i rhs_mat_2367_10_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_10, 221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) + + const __m256i rhs_mat_0145_11_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_11, 221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) + const __m256i rhs_mat_2367_11_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_11, 221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) + + const __m256i rhs_mat_0145_20_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_20, 221); //B20(4-7) B21(4-7) B20(4-7) B21(4-7) B24(4-7) B25(4-7) B24(4-7) B25(4-7) + const __m256i rhs_mat_2367_20_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_20, 221); //B22(4-7) B23(4-7) B22(4-7) B23(4-7) B26(4-7) B27(4-7) B26(4-7) B27(4-7) + + const __m256i rhs_mat_0145_21_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_21, 221); //B20(12-15) B21(12-15) B20(12-15) B21(12-15) B24(12-15) B25(12-15) B24(12-15) B25(12-15) + const __m256i rhs_mat_2367_21_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_21, 221); //B22(12-15) B23(12-15) B22(12-15) B23(12-15) B26(12-15) B27(12-15) B26(12-15) B27(12-15) + + const __m256i rhs_mat_0145_30_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_30, 221); //B30(4-7) B31(4-7) B30(4-7) B31(4-7) B34(4-7) B35(4-7) B34(4-7) B35(4-7) + const __m256i rhs_mat_2367_30_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_30, 221); //B32(4-7) B33(4-7) B32(4-7) B33(4-7) B36(4-7) B37(4-7) B36(4-7) B37(4-7) + + const __m256i rhs_mat_0145_31_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_31, 221); //B30(12-15) B31(12-15) B30(12-15) B31(12-15) B34(12-15) B35(12-15) B34(12-15) B35(12-15) + const __m256i rhs_mat_2367_31_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_31, 221); //B32(12-15) B33(12-15) B32(12-15) B33(12-15) B36(12-15) B37(12-15) B36(12-15) B37(12-15) + + const __m256i rhs_mat_0145_40_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_40, 221); //B40(4-7) B41(4-7) B40(4-7) B41(4-7) B44(4-7) B45(4-7) B44(4-7) B45(4-7) + const __m256i rhs_mat_2367_40_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_40, 221); //B42(4-7) B43(4-7) B42(4-7) B43(4-7) B46(4-7) B47(4-7) B46(4-7) B47(4-7) + + const __m256i rhs_mat_0145_41_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_41, 221); //B40(12-15) B41(12-15) B40(12-15) B41(12-15) B44(12-15) B45(12-15) B44(12-15) B45(12-15) + const __m256i rhs_mat_2367_41_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_41, 221); //B42(12-15) B43(12-15) B42(12-15) B43(12-15) B46(12-15) B47(12-15) B46(12-15) B47(12-15) + + const __m256i rhs_mat_0145_50_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_50, 221); //B50(4-7) B51(4-7) B50(4-7) B51(4-7) B54(4-7) B55(4-7) B54(4-7) B55(4-7) + const __m256i rhs_mat_2367_50_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_50, 221); //B52(4-7) B53(4-7) B52(4-7) B53(4-7) B56(4-7) B57(4-7) B56(4-7) B57(4-7) + + const __m256i rhs_mat_0145_51_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_51, 221); //B50(12-15) B51(12-15) B50(12-15) B51(12-15) B54(12-15) B55(12-15) B54(12-15) B55(12-15) + const __m256i rhs_mat_2367_51_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_51, 221); //B52(12-15) B53(12-15) B52(12-15) B53(12-15) B56(12-15) B57(12-15) B56(12-15) B57(12-15) + + const __m256i rhs_mat_0145_60_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_60, 221); //B60(4-7) B61(4-7) B60(4-7) B61(4-7) B64(4-7) B65(4-7) B64(4-7) B65(4-7) + const __m256i rhs_mat_2367_60_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_60, 221); //B62(4-7) B63(4-7) B62(4-7) B63(4-7) B66(4-7) B67(4-7) B66(4-7) B67(4-7) + + const __m256i rhs_mat_0145_61_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_61, 221); //B60(12-15) B61(12-15) B60(12-15) B61(12-15) B64(12-15) B65(12-15) B64(12-15) B65(12-15) + const __m256i rhs_mat_2367_61_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_61, 221); //B62(12-15) B63(12-15) B62(12-15) B63(12-15) B66(12-15) B67(12-15) B66(12-15) B67(12-15) + + const __m256i rhs_mat_0145_70_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_70, 221); //B70(4-7) B71(4-7) B70(4-7) B71(4-7) B74(4-7) B75(4-7) B74(4-7) B75(4-7) + const __m256i rhs_mat_2367_70_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_70, 221); //B72(4-7) B73(4-7) B72(4-7) B73(4-7) B76(4-7) B77(4-7) B76(4-7) B77(4-7) + + const __m256i rhs_mat_0145_71_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_71, 221); //B70(12-15) B71(12-15) B70(12-15) B71(12-15) B74(12-15) B75(12-15) B74(12-15) B75(12-15) + const __m256i rhs_mat_2367_71_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_71, 221); //B72(12-15) B73(12-15) B72(12-15) B73(12-15) B76(12-15) B77(12-15) B76(12-15) B77(12-15) + + + //Scales and Mins of corresponding sub blocks from different Q2_K structures are stored together + //s00 m00 s01 m01 s10 m10 s11 m11 s20 m20 s21 m21 s30 m30 s31 m31 s40 m40 s41 m41 s50 m50 s51 m51 s60 m60 s61 m61 s70 m70 s71 m71 + + // Combine mins and scales for sub-blocks: 0-1, 2-3, 4-5, 6-7 in the sb loop + const __m128i mins_and_scales_01 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + sb * 64)); + const __m128i mins_and_scales_23 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 16 + sb * 64)); + const __m128i mins_and_scales_45 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 32 + sb * 64)); + const __m128i mins_and_scales_67 = _mm_loadu_si128((const __m128i *)(b_ptr[b].scales + 48 + sb * 64)); + + // Extract scales which is lower half from mins_and_scales + const __m128i scales_01 = _mm_and_si128(mins_and_scales_01, m4b_sse); + const __m128i scales_23 = _mm_and_si128(mins_and_scales_23, m4b_sse); + const __m128i scales_45 = _mm_and_si128(mins_and_scales_45, m4b_sse); + const __m128i scales_67 = _mm_and_si128(mins_and_scales_67, m4b_sse); + + // Extract mins which is upper half from mins_and_scales + const __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_01, 4), m4b_sse)); + const __m256i mins_23 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_23, 4), m4b_sse)); + const __m256i mins_45 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_45, 4), m4b_sse)); + const __m256i mins_67 = _mm256_cvtepu8_epi16(_mm_and_si128(_mm_srli_epi16(mins_and_scales_67, 4), m4b_sse)); + + const __m256i scales_0 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_01, scalesmask1_sse)); + const __m256i scales_1 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_01, scalesmask2_sse)); + + const __m256i scales_2 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_23, scalesmask1_sse)); + const __m256i scales_3 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_23, scalesmask2_sse)); + + const __m256i scales_4 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_45, scalesmask1_sse)); + const __m256i scales_5 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_45, scalesmask2_sse)); + + const __m256i scales_6 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_67, scalesmask1_sse)); + const __m256i scales_7 = _mm256_cvtepu8_epi16(_mm_shuffle_epi8(scales_67, scalesmask2_sse)); + + const __m256i scale_0145_0 = _mm256_shuffle_epi32(scales_0, 68); + const __m256i scale_2367_0 = _mm256_shuffle_epi32(scales_0, 238); + + const __m256i scale_0145_1 = _mm256_shuffle_epi32(scales_1, 68); + const __m256i scale_2367_1 = _mm256_shuffle_epi32(scales_1, 238); + + const __m256i scale_0145_2 = _mm256_shuffle_epi32(scales_2, 68); + const __m256i scale_2367_2 = _mm256_shuffle_epi32(scales_2, 238); + + const __m256i scale_0145_3 = _mm256_shuffle_epi32(scales_3, 68); + const __m256i scale_2367_3 = _mm256_shuffle_epi32(scales_3, 238); + + const __m256i scale_0145_4 = _mm256_shuffle_epi32(scales_4, 68); + const __m256i scale_2367_4 = _mm256_shuffle_epi32(scales_4, 238); + + const __m256i scale_0145_5 = _mm256_shuffle_epi32(scales_5, 68); + const __m256i scale_2367_5 = _mm256_shuffle_epi32(scales_5, 238); + + const __m256i scale_0145_6 = _mm256_shuffle_epi32(scales_6, 68); + const __m256i scale_2367_6 = _mm256_shuffle_epi32(scales_6, 238); + + const __m256i scale_0145_7 = _mm256_shuffle_epi32(scales_7, 68); + const __m256i scale_2367_7 = _mm256_shuffle_epi32(scales_7, 238); + + // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 + // Loaded as set of 128 bit vectors and repeated into a 256 bit vector + __m256i lhs_mat_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 512 * sb))); + __m256i lhs_mat_01_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 0); + __m256i lhs_mat_23_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 17); + __m256i lhs_mat_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 32 + 512 * sb))); + __m256i lhs_mat_01_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 0); + __m256i lhs_mat_23_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 17); + __m256i lhs_mat_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 64 + 512 * sb))); + __m256i lhs_mat_01_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 0); + __m256i lhs_mat_23_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 17); + __m256i lhs_mat_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 96 + 512 * sb))); + __m256i lhs_mat_01_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 0); + __m256i lhs_mat_23_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 17); + __m256i lhs_mat_0123_20 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 128 + 512 * sb))); + __m256i lhs_mat_01_20 = _mm256_permute2f128_si256(lhs_mat_0123_20, lhs_mat_0123_20, 0); + __m256i lhs_mat_23_20 = _mm256_permute2f128_si256(lhs_mat_0123_20, lhs_mat_0123_20, 17); + __m256i lhs_mat_0123_21 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 160 + 512 * sb))); + __m256i lhs_mat_01_21 = _mm256_permute2f128_si256(lhs_mat_0123_21, lhs_mat_0123_21, 0); + __m256i lhs_mat_23_21 = _mm256_permute2f128_si256(lhs_mat_0123_21, lhs_mat_0123_21, 17); + __m256i lhs_mat_0123_30 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 192 + 512 * sb))); + __m256i lhs_mat_01_30 = _mm256_permute2f128_si256(lhs_mat_0123_30, lhs_mat_0123_30, 0); + __m256i lhs_mat_23_30 = _mm256_permute2f128_si256(lhs_mat_0123_30, lhs_mat_0123_30, 17); + __m256i lhs_mat_0123_31 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 224 + 512 * sb))); + __m256i lhs_mat_01_31 = _mm256_permute2f128_si256(lhs_mat_0123_31, lhs_mat_0123_31, 0); + __m256i lhs_mat_23_31 = _mm256_permute2f128_si256(lhs_mat_0123_31, lhs_mat_0123_31, 17); + + __m256i lhs_mat_0123_40 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 256 + 512 * sb))); + __m256i lhs_mat_01_40 = _mm256_permute2f128_si256(lhs_mat_0123_40, lhs_mat_0123_40, 0); + __m256i lhs_mat_23_40 = _mm256_permute2f128_si256(lhs_mat_0123_40, lhs_mat_0123_40, 17); + __m256i lhs_mat_0123_41 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 288 + 512 * sb))); + __m256i lhs_mat_01_41 = _mm256_permute2f128_si256(lhs_mat_0123_41, lhs_mat_0123_41, 0); + __m256i lhs_mat_23_41 = _mm256_permute2f128_si256(lhs_mat_0123_41, lhs_mat_0123_41, 17); + __m256i lhs_mat_0123_50 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 320 + 512 * sb))); + __m256i lhs_mat_01_50 = _mm256_permute2f128_si256(lhs_mat_0123_50, lhs_mat_0123_50, 0); + __m256i lhs_mat_23_50 = _mm256_permute2f128_si256(lhs_mat_0123_50, lhs_mat_0123_50, 17); + __m256i lhs_mat_0123_51 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 352 + 512 * sb))); + __m256i lhs_mat_01_51 = _mm256_permute2f128_si256(lhs_mat_0123_51, lhs_mat_0123_51, 0); + __m256i lhs_mat_23_51 = _mm256_permute2f128_si256(lhs_mat_0123_51, lhs_mat_0123_51, 17); + __m256i lhs_mat_0123_60 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 384 + 512 * sb))); + __m256i lhs_mat_01_60 = _mm256_permute2f128_si256(lhs_mat_0123_60, lhs_mat_0123_60, 0); + __m256i lhs_mat_23_60 = _mm256_permute2f128_si256(lhs_mat_0123_60, lhs_mat_0123_60, 17); + __m256i lhs_mat_0123_61 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 416 + 512 * sb))); + __m256i lhs_mat_01_61 = _mm256_permute2f128_si256(lhs_mat_0123_61, lhs_mat_0123_61, 0); + __m256i lhs_mat_23_61 = _mm256_permute2f128_si256(lhs_mat_0123_61, lhs_mat_0123_61, 17); + __m256i lhs_mat_0123_70 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 448 + 512 * sb))); + __m256i lhs_mat_01_70 = _mm256_permute2f128_si256(lhs_mat_0123_70, lhs_mat_0123_70, 0); + __m256i lhs_mat_23_70 = _mm256_permute2f128_si256(lhs_mat_0123_70, lhs_mat_0123_70, 17); + __m256i lhs_mat_0123_71 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 480 + 512 * sb))); + __m256i lhs_mat_01_71 = _mm256_permute2f128_si256(lhs_mat_0123_71, lhs_mat_0123_71, 0); + __m256i lhs_mat_23_71 = _mm256_permute2f128_si256(lhs_mat_0123_71, lhs_mat_0123_71, 17); + + // Bsums are loaded for the different Q8_K blocks + __m128i lhs_raw_bsums_01_0123 = _mm_loadu_si128((const __m128i *)((a_ptr[b].bsums + 32 * sb))); + __m128i lhs_raw_bsums_23_0123 = _mm_loadu_si128((const __m128i *)(a_ptr[b].bsums + 8 + 32 * sb)); + __m128i lhs_raw_bsums_01_4567 = _mm_loadu_si128((const __m128i *)((a_ptr[b].bsums + 16 + 32 * sb))); + __m128i lhs_raw_bsums_23_4567 = _mm_loadu_si128((const __m128i *)(a_ptr[b].bsums + 24 + 32 * sb)); + + // Shuffle pattern one - left side input + const __m256i lhs_mat_01_00_sp1 = _mm256_shuffle_epi32(lhs_mat_01_00, 160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) + const __m256i lhs_mat_23_00_sp1 = _mm256_shuffle_epi32(lhs_mat_23_00, 160); //A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) + + const __m256i lhs_mat_01_01_sp1 = _mm256_shuffle_epi32(lhs_mat_01_01, 160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) + const __m256i lhs_mat_23_01_sp1 = _mm256_shuffle_epi32(lhs_mat_23_01, 160); //A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) + + const __m256i lhs_mat_01_10_sp1 = _mm256_shuffle_epi32(lhs_mat_01_10, 160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) + const __m256i lhs_mat_23_10_sp1 = _mm256_shuffle_epi32(lhs_mat_23_10, 160); //A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) + + const __m256i lhs_mat_01_11_sp1 = _mm256_shuffle_epi32(lhs_mat_01_11, 160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) + const __m256i lhs_mat_23_11_sp1 = _mm256_shuffle_epi32(lhs_mat_23_11, 160); //A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) + + const __m256i lhs_mat_01_20_sp1 = _mm256_shuffle_epi32(lhs_mat_01_20, 160); //A20(0-3) A20(0-3) A21(0-3) A21(0-3) A20(0-3) A20(0-3) A21(0-3) A21(0-3) + const __m256i lhs_mat_23_20_sp1 = _mm256_shuffle_epi32(lhs_mat_23_20, 160); //A22(0-3) A23(0-3) A22(0-3) A23(0-3) A22(0-3) A23(0-3) A22(0-3) A23(0-3) + + const __m256i lhs_mat_01_21_sp1 = _mm256_shuffle_epi32(lhs_mat_01_21, 160); //A20(8-11) A20(8-11) A21(8-11) A21(8-11) A20(8-11) A20(8-11) A21(8-11) A21(8-11) + const __m256i lhs_mat_23_21_sp1 = _mm256_shuffle_epi32(lhs_mat_23_21, 160); //A22(8-11) A23(8-11) A22(8-11) A23(8-11) A22(8-11) A23(8-11) A22(8-11) A23(8-11) + + const __m256i lhs_mat_01_30_sp1 = _mm256_shuffle_epi32(lhs_mat_01_30, 160); //A30(0-3) A30(0-3) A31(0-3) A31(0-3) A30(0-3) A30(0-3) A31(0-3) A31(0-3) + const __m256i lhs_mat_23_30_sp1 = _mm256_shuffle_epi32(lhs_mat_23_30, 160); //A32(0-3) A33(0-3) A32(0-3) A33(0-3) A32(0-3) A33(0-3) A32(0-3) A33(0-3) + + const __m256i lhs_mat_01_31_sp1 = _mm256_shuffle_epi32(lhs_mat_01_31, 160); //A30(8-11) A30(8-11) A31(8-11) A31(8-11) A30(8-11) A30(8-11) A31(8-11) A31(8-11) + const __m256i lhs_mat_23_31_sp1 = _mm256_shuffle_epi32(lhs_mat_23_31, 160); //A32(8-11) A33(8-11) A32(8-11) A33(8-11) A32(8-11) A33(8-11) A32(8-11) A33(8-11) + + const __m256i lhs_mat_01_40_sp1 = _mm256_shuffle_epi32(lhs_mat_01_40, 160); //A40(0-3) A40(0-3) A41(0-3) A41(0-3) A40(0-3) A40(0-3) A41(0-3) A41(0-3) + const __m256i lhs_mat_23_40_sp1 = _mm256_shuffle_epi32(lhs_mat_23_40, 160); //A42(0-3) A43(0-3) A42(0-3) A43(0-3) A42(0-3) A43(0-3) A42(0-3) A43(0-3) + + const __m256i lhs_mat_01_41_sp1 = _mm256_shuffle_epi32(lhs_mat_01_41, 160); //A40(8-11) A40(8-11) A41(8-11) A41(8-11) A40(8-11) A40(8-11) A41(8-11) A41(8-11) + const __m256i lhs_mat_23_41_sp1 = _mm256_shuffle_epi32(lhs_mat_23_41, 160); //A42(8-11) A43(8-11) A42(8-11) A43(8-11) A42(8-11) A43(8-11) A42(8-11) A43(8-11) + + const __m256i lhs_mat_01_50_sp1 = _mm256_shuffle_epi32(lhs_mat_01_50, 160); //A50(0-3) A50(0-3) A51(0-3) A51(0-3) A50(0-3) A50(0-3) A51(0-3) A51(0-3) + const __m256i lhs_mat_23_50_sp1 = _mm256_shuffle_epi32(lhs_mat_23_50, 160); //A52(0-3) A53(0-3) A52(0-3) A53(0-3) A52(0-3) A53(0-3) A52(0-3) A53(0-3) + + const __m256i lhs_mat_01_51_sp1 = _mm256_shuffle_epi32(lhs_mat_01_51, 160); //A50(8-11) A50(8-11) A51(8-11) A51(8-11) A50(8-11) A50(8-11) A51(8-11) A51(8-11) + const __m256i lhs_mat_23_51_sp1 = _mm256_shuffle_epi32(lhs_mat_23_51, 160); //A52(8-11) A53(8-11) A52(8-11) A53(8-11) A52(8-11) A53(8-11) A52(8-11) A53(8-11) + + const __m256i lhs_mat_01_60_sp1 = _mm256_shuffle_epi32(lhs_mat_01_60, 160); //A60(0-3) A60(0-3) A61(0-3) A61(0-3) A60(0-3) A60(0-3) A61(0-3) A61(0-3) + const __m256i lhs_mat_23_60_sp1 = _mm256_shuffle_epi32(lhs_mat_23_60, 160); //A62(0-3) A63(0-3) A62(0-3) A63(0-3) A62(0-3) A63(0-3) A62(0-3) A63(0-3) + + const __m256i lhs_mat_01_61_sp1 = _mm256_shuffle_epi32(lhs_mat_01_61, 160); //A60(8-11) A60(8-11) A61(8-11) A61(8-11) A60(8-11) A60(8-11) A61(8-11) A61(8-11) + const __m256i lhs_mat_23_61_sp1 = _mm256_shuffle_epi32(lhs_mat_23_61, 160); //A62(8-11) A63(8-11) A62(8-11) A63(8-11) A62(8-11) A63(8-11) A62(8-11) A63(8-11) + + const __m256i lhs_mat_01_70_sp1 = _mm256_shuffle_epi32(lhs_mat_01_70, 160); //A70(0-3) A70(0-3) A71(0-3) A71(0-3) A70(0-3) A70(0-3) A71(0-3) A71(0-3) + const __m256i lhs_mat_23_70_sp1 = _mm256_shuffle_epi32(lhs_mat_23_70, 160); //A72(0-3) A73(0-3) A72(0-3) A73(0-3) A72(0-3) A73(0-3) A72(0-3) A73(0-3) + + const __m256i lhs_mat_01_71_sp1 = _mm256_shuffle_epi32(lhs_mat_01_71, 160); //A70(8-11) A70(8-11) A71(8-11) A71(8-11) A70(8-11) A70(8-11) A71(8-11) A71(8-11) + const __m256i lhs_mat_23_71_sp1 = _mm256_shuffle_epi32(lhs_mat_23_71, 160); //A72(8-11) A73(8-11) A72(8-11) A73(8-11) A72(8-11) A73(8-11) A72(8-11) A73(8-11) + + // Shuffle pattern two- left side input + const __m256i lhs_mat_01_00_sp2 = _mm256_shuffle_epi32(lhs_mat_01_00, 245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) + const __m256i lhs_mat_23_00_sp2 = _mm256_shuffle_epi32(lhs_mat_23_00, 245); //A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) + + const __m256i lhs_mat_01_01_sp2 = _mm256_shuffle_epi32(lhs_mat_01_01, 245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) + const __m256i lhs_mat_23_01_sp2 = _mm256_shuffle_epi32(lhs_mat_23_01, 245); //A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) + + const __m256i lhs_mat_01_10_sp2 = _mm256_shuffle_epi32(lhs_mat_01_10, 245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) + const __m256i lhs_mat_23_10_sp2 = _mm256_shuffle_epi32(lhs_mat_23_10, 245); //A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) + + const __m256i lhs_mat_01_11_sp2 = _mm256_shuffle_epi32(lhs_mat_01_11, 245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) + const __m256i lhs_mat_23_11_sp2 = _mm256_shuffle_epi32(lhs_mat_23_11, 245); //A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) + + const __m256i lhs_mat_01_20_sp2 = _mm256_shuffle_epi32(lhs_mat_01_20, 245); //A20(4-7) A20(4-7) A21(4-7) A21(4-7) A20(4-7) A20(4-7) A21(4-7) A21(4-7) + const __m256i lhs_mat_23_20_sp2 = _mm256_shuffle_epi32(lhs_mat_23_20, 245); //A22(4-7) A23(4-7) A22(4-7) A23(4-7) A22(4-7) A23(4-7) A22(4-7) A23(4-7) + + const __m256i lhs_mat_01_21_sp2 = _mm256_shuffle_epi32(lhs_mat_01_21, 245); //A20(12-15) A20(12-15) A21(12-15) A21(12-15) A20(12-15) A20(12-15) A21(12-15) A21(12-15) + const __m256i lhs_mat_23_21_sp2 = _mm256_shuffle_epi32(lhs_mat_23_21, 245); //A22(12-15) A23(12-15) A22(12-15) A23(12-15) A22(12-15) A23(12-15) A22(12-15) A23(12-15) + + const __m256i lhs_mat_01_30_sp2 = _mm256_shuffle_epi32(lhs_mat_01_30, 245); //A30(4-7) A30(4-7) A31(4-7) A31(4-7) A30(4-7) A30(4-7) A31(4-7) A31(4-7) + const __m256i lhs_mat_23_30_sp2 = _mm256_shuffle_epi32(lhs_mat_23_30, 245); //A32(4-7) A33(4-7) A32(4-7) A33(4-7) A32(4-7) A33(4-7) A32(4-7) A33(4-7) + + const __m256i lhs_mat_01_31_sp2 = _mm256_shuffle_epi32(lhs_mat_01_31, 245); //A30(12-15) A30(12-15) A31(12-15) A31(12-15) A30(12-15) A30(12-15) A31(12-15) A31(12-15) + const __m256i lhs_mat_23_31_sp2 = _mm256_shuffle_epi32(lhs_mat_23_31, 245); //A32(12-15) A33(12-15) A32(12-15) A33(12-15) A32(12-15) A33(12-15) A32(12-15) A33(12-15) + + const __m256i lhs_mat_01_40_sp2 = _mm256_shuffle_epi32(lhs_mat_01_40, 245); //A40(4-7) A40(4-7) A41(4-7) A41(4-7) A40(4-7) A40(4-7) A41(4-7) A41(4-7) + const __m256i lhs_mat_23_40_sp2 = _mm256_shuffle_epi32(lhs_mat_23_40, 245); //A42(4-7) A43(4-7) A42(4-7) A43(4-7) A42(4-7) A43(4-7) A42(4-7) A43(4-7) + + const __m256i lhs_mat_01_41_sp2 = _mm256_shuffle_epi32(lhs_mat_01_41, 245); //A40(12-15) A40(12-15) A41(12-15) A41(12-15) A40(12-15) A40(12-15) A41(12-15) A41(12-15) + const __m256i lhs_mat_23_41_sp2 = _mm256_shuffle_epi32(lhs_mat_23_41, 245); //A42(12-15) A43(12-15) A42(12-15) A43(12-15) A42(12-15) A43(12-15) A42(12-15) A43(12-15) + + const __m256i lhs_mat_01_50_sp2 = _mm256_shuffle_epi32(lhs_mat_01_50, 245); //A50(4-7) A50(4-7) A51(4-7) A51(4-7) A50(4-7) A50(4-7) A51(4-7) A51(4-7) + const __m256i lhs_mat_23_50_sp2 = _mm256_shuffle_epi32(lhs_mat_23_50, 245); //A52(4-7) A53(4-7) A52(4-7) A53(4-7) A52(4-7) A53(4-7) A52(4-7) A53(4-7) + + const __m256i lhs_mat_01_51_sp2 = _mm256_shuffle_epi32(lhs_mat_01_51, 245); //A50(12-15) A50(12-15) A51(12-15) A51(12-15) A50(12-15) A50(12-15) A51(12-15) A51(12-15) + const __m256i lhs_mat_23_51_sp2 = _mm256_shuffle_epi32(lhs_mat_23_51, 245); //A52(12-15) A53(12-15) A52(12-15) A53(12-15) A52(12-15) A53(12-15) A52(12-15) A53(12-15) + + const __m256i lhs_mat_01_60_sp2 = _mm256_shuffle_epi32(lhs_mat_01_60, 245); //A60(4-7) A60(4-7) A61(4-7) A61(4-7) A60(4-7) A60(4-7) A61(4-7) A61(4-7) + const __m256i lhs_mat_23_60_sp2 = _mm256_shuffle_epi32(lhs_mat_23_60, 245); //A62(4-7) A63(4-7) A62(4-7) A63(4-7) A62(4-7) A63(4-7) A62(4-7) A63(4-7) + + const __m256i lhs_mat_01_61_sp2 = _mm256_shuffle_epi32(lhs_mat_01_61, 245); //A60(12-15) A60(12-15) A61(12-15) A61(12-15) A60(12-15) A60(12-15) A61(12-15) A61(12-15) + const __m256i lhs_mat_23_61_sp2 = _mm256_shuffle_epi32(lhs_mat_23_61, 245); //A62(12-15) A63(12-15) A62(12-15) A63(12-15) A62(12-15) A63(12-15) A62(12-15) A63(12-15) + + const __m256i lhs_mat_01_70_sp2 = _mm256_shuffle_epi32(lhs_mat_01_70, 245); //A70(4-7) A70(4-7) A71(4-7) A71(4-7) A70(4-7) A70(4-7) A71(4-7) A71(4-7) + const __m256i lhs_mat_23_70_sp2 = _mm256_shuffle_epi32(lhs_mat_23_70, 245); //A72(4-7) A73(4-7) A72(4-7) A73(4-7) A72(4-7) A73(4-7) A72(4-7) A73(4-7) + + const __m256i lhs_mat_01_71_sp2 = _mm256_shuffle_epi32(lhs_mat_01_71, 245); //A70(12-15) A70(12-15) A71(12-15) A71(12-15) A70(12-15) A70(12-15) A71(12-15) A71(12-15) + const __m256i lhs_mat_23_71_sp2 = _mm256_shuffle_epi32(lhs_mat_23_71, 245); //A72(12-15) A73(12-15) A72(12-15) A73(12-15) A72(12-15) A73(12-15) A72(12-15) A73(12-15) + + // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane + __m256i iacc_mat_00_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_01_00_sp1),_mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_01_01_sp1)); + __m256i iacc_mat_01_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_01_00_sp1),_mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_01_01_sp1)); + + __m256i iacc_mat_10_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_23_00_sp1),_mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_23_01_sp1)); + __m256i iacc_mat_11_0_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_23_00_sp1),_mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_23_01_sp1)); + + __m256i iacc_mat_00_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_01_10_sp1),_mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_01_11_sp1)); + __m256i iacc_mat_01_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_01_10_sp1),_mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_01_11_sp1)); + + __m256i iacc_mat_10_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_23_10_sp1),_mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_23_11_sp1)); + __m256i iacc_mat_11_1_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_23_10_sp1),_mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_23_11_sp1)); + + __m256i iacc_mat_00_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp1, lhs_mat_01_20_sp1),_mm256_maddubs_epi16(rhs_mat_0145_21_sp1, lhs_mat_01_21_sp1)); + __m256i iacc_mat_01_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp1, lhs_mat_01_20_sp1),_mm256_maddubs_epi16(rhs_mat_2367_21_sp1, lhs_mat_01_21_sp1)); + + __m256i iacc_mat_10_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp1, lhs_mat_23_20_sp1),_mm256_maddubs_epi16(rhs_mat_0145_21_sp1, lhs_mat_23_21_sp1)); + __m256i iacc_mat_11_2_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp1, lhs_mat_23_20_sp1),_mm256_maddubs_epi16(rhs_mat_2367_21_sp1, lhs_mat_23_21_sp1)); + + __m256i iacc_mat_00_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp1, lhs_mat_01_30_sp1),_mm256_maddubs_epi16(rhs_mat_0145_31_sp1, lhs_mat_01_31_sp1)); + __m256i iacc_mat_01_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp1, lhs_mat_01_30_sp1),_mm256_maddubs_epi16(rhs_mat_2367_31_sp1, lhs_mat_01_31_sp1)); + + __m256i iacc_mat_10_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp1, lhs_mat_23_30_sp1),_mm256_maddubs_epi16(rhs_mat_0145_31_sp1, lhs_mat_23_31_sp1)); + __m256i iacc_mat_11_3_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp1, lhs_mat_23_30_sp1),_mm256_maddubs_epi16(rhs_mat_2367_31_sp1, lhs_mat_23_31_sp1)); + + __m256i iacc_mat_00_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp1, lhs_mat_01_40_sp1),_mm256_maddubs_epi16(rhs_mat_0145_41_sp1, lhs_mat_01_41_sp1)); + __m256i iacc_mat_01_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp1, lhs_mat_01_40_sp1),_mm256_maddubs_epi16(rhs_mat_2367_41_sp1, lhs_mat_01_41_sp1)); + + __m256i iacc_mat_10_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp1, lhs_mat_23_40_sp1),_mm256_maddubs_epi16(rhs_mat_0145_41_sp1, lhs_mat_23_41_sp1)); + __m256i iacc_mat_11_4_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp1, lhs_mat_23_40_sp1),_mm256_maddubs_epi16(rhs_mat_2367_41_sp1, lhs_mat_23_41_sp1)); + + __m256i iacc_mat_00_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp1, lhs_mat_01_50_sp1),_mm256_maddubs_epi16(rhs_mat_0145_51_sp1, lhs_mat_01_51_sp1)); + __m256i iacc_mat_01_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp1, lhs_mat_01_50_sp1),_mm256_maddubs_epi16(rhs_mat_2367_51_sp1, lhs_mat_01_51_sp1)); + + __m256i iacc_mat_10_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp1, lhs_mat_23_50_sp1),_mm256_maddubs_epi16(rhs_mat_0145_51_sp1, lhs_mat_23_51_sp1)); + __m256i iacc_mat_11_5_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp1, lhs_mat_23_50_sp1),_mm256_maddubs_epi16(rhs_mat_2367_51_sp1, lhs_mat_23_51_sp1)); + + __m256i iacc_mat_00_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp1, lhs_mat_01_60_sp1),_mm256_maddubs_epi16(rhs_mat_0145_61_sp1, lhs_mat_01_61_sp1)); + __m256i iacc_mat_01_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp1, lhs_mat_01_60_sp1),_mm256_maddubs_epi16(rhs_mat_2367_61_sp1, lhs_mat_01_61_sp1)); + + __m256i iacc_mat_10_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp1, lhs_mat_23_60_sp1),_mm256_maddubs_epi16(rhs_mat_0145_61_sp1, lhs_mat_23_61_sp1)); + __m256i iacc_mat_11_6_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp1, lhs_mat_23_60_sp1),_mm256_maddubs_epi16(rhs_mat_2367_61_sp1, lhs_mat_23_61_sp1)); + + __m256i iacc_mat_00_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp1, lhs_mat_01_70_sp1),_mm256_maddubs_epi16(rhs_mat_0145_71_sp1, lhs_mat_01_71_sp1)); + __m256i iacc_mat_01_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp1, lhs_mat_01_70_sp1),_mm256_maddubs_epi16(rhs_mat_2367_71_sp1, lhs_mat_01_71_sp1)); + + __m256i iacc_mat_10_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp1, lhs_mat_23_70_sp1),_mm256_maddubs_epi16(rhs_mat_0145_71_sp1, lhs_mat_23_71_sp1)); + __m256i iacc_mat_11_7_sp1 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp1, lhs_mat_23_70_sp1),_mm256_maddubs_epi16(rhs_mat_2367_71_sp1, lhs_mat_23_71_sp1)); + + + __m256i iacc_mat_00_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_01_00_sp2),_mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_01_01_sp2)); + __m256i iacc_mat_01_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_01_00_sp2),_mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_01_01_sp2)); + + __m256i iacc_mat_10_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_23_00_sp2),_mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_23_01_sp2)); + __m256i iacc_mat_11_0_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_23_00_sp2),_mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_23_01_sp2)); + + __m256i iacc_mat_00_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_01_10_sp2),_mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_01_11_sp2)); + __m256i iacc_mat_01_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_01_10_sp2),_mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_01_11_sp2)); + + __m256i iacc_mat_10_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_23_10_sp2),_mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_23_11_sp2)); + __m256i iacc_mat_11_1_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_23_10_sp2),_mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_23_11_sp2)); + + __m256i iacc_mat_00_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp2, lhs_mat_01_20_sp2),_mm256_maddubs_epi16(rhs_mat_0145_21_sp2, lhs_mat_01_21_sp2)); + __m256i iacc_mat_01_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp2, lhs_mat_01_20_sp2),_mm256_maddubs_epi16(rhs_mat_2367_21_sp2, lhs_mat_01_21_sp2)); + + __m256i iacc_mat_10_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_20_sp2, lhs_mat_23_20_sp2),_mm256_maddubs_epi16(rhs_mat_0145_21_sp2, lhs_mat_23_21_sp2)); + __m256i iacc_mat_11_2_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_20_sp2, lhs_mat_23_20_sp2),_mm256_maddubs_epi16(rhs_mat_2367_21_sp2, lhs_mat_23_21_sp2)); + + __m256i iacc_mat_00_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp2, lhs_mat_01_30_sp2),_mm256_maddubs_epi16(rhs_mat_0145_31_sp2, lhs_mat_01_31_sp2)); + __m256i iacc_mat_01_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp2, lhs_mat_01_30_sp2),_mm256_maddubs_epi16(rhs_mat_2367_31_sp2, lhs_mat_01_31_sp2)); + + __m256i iacc_mat_10_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_30_sp2, lhs_mat_23_30_sp2),_mm256_maddubs_epi16(rhs_mat_0145_31_sp2, lhs_mat_23_31_sp2)); + __m256i iacc_mat_11_3_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_30_sp2, lhs_mat_23_30_sp2),_mm256_maddubs_epi16(rhs_mat_2367_31_sp2, lhs_mat_23_31_sp2)); + + __m256i iacc_mat_00_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp2, lhs_mat_01_40_sp2),_mm256_maddubs_epi16(rhs_mat_0145_41_sp2, lhs_mat_01_41_sp2)); + __m256i iacc_mat_01_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp2, lhs_mat_01_40_sp2),_mm256_maddubs_epi16(rhs_mat_2367_41_sp2, lhs_mat_01_41_sp2)); + + __m256i iacc_mat_10_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_40_sp2, lhs_mat_23_40_sp2),_mm256_maddubs_epi16(rhs_mat_0145_41_sp2, lhs_mat_23_41_sp2)); + __m256i iacc_mat_11_4_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_40_sp2, lhs_mat_23_40_sp2),_mm256_maddubs_epi16(rhs_mat_2367_41_sp2, lhs_mat_23_41_sp2)); + + __m256i iacc_mat_00_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp2, lhs_mat_01_50_sp2),_mm256_maddubs_epi16(rhs_mat_0145_51_sp2, lhs_mat_01_51_sp2)); + __m256i iacc_mat_01_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp2, lhs_mat_01_50_sp2),_mm256_maddubs_epi16(rhs_mat_2367_51_sp2, lhs_mat_01_51_sp2)); + + __m256i iacc_mat_10_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_50_sp2, lhs_mat_23_50_sp2),_mm256_maddubs_epi16(rhs_mat_0145_51_sp2, lhs_mat_23_51_sp2)); + __m256i iacc_mat_11_5_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_50_sp2, lhs_mat_23_50_sp2),_mm256_maddubs_epi16(rhs_mat_2367_51_sp2, lhs_mat_23_51_sp2)); + + __m256i iacc_mat_00_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp2, lhs_mat_01_60_sp2),_mm256_maddubs_epi16(rhs_mat_0145_61_sp2, lhs_mat_01_61_sp2)); + __m256i iacc_mat_01_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp2, lhs_mat_01_60_sp2),_mm256_maddubs_epi16(rhs_mat_2367_61_sp2, lhs_mat_01_61_sp2)); + + __m256i iacc_mat_10_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_60_sp2, lhs_mat_23_60_sp2),_mm256_maddubs_epi16(rhs_mat_0145_61_sp2, lhs_mat_23_61_sp2)); + __m256i iacc_mat_11_6_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_60_sp2, lhs_mat_23_60_sp2),_mm256_maddubs_epi16(rhs_mat_2367_61_sp2, lhs_mat_23_61_sp2)); + + __m256i iacc_mat_00_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp2, lhs_mat_01_70_sp2),_mm256_maddubs_epi16(rhs_mat_0145_71_sp2, lhs_mat_01_71_sp2)); + __m256i iacc_mat_01_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp2, lhs_mat_01_70_sp2),_mm256_maddubs_epi16(rhs_mat_2367_71_sp2, lhs_mat_01_71_sp2)); + + __m256i iacc_mat_10_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_70_sp2, lhs_mat_23_70_sp2),_mm256_maddubs_epi16(rhs_mat_0145_71_sp2, lhs_mat_23_71_sp2)); + __m256i iacc_mat_11_7_sp2 = _mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_70_sp2, lhs_mat_23_70_sp2),_mm256_maddubs_epi16(rhs_mat_2367_71_sp2, lhs_mat_23_71_sp2)); + + // Combine results from both shuffle patterns for each output block. + __m256i iacc_mat_00_0 = _mm256_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); + __m256i iacc_mat_01_0 = _mm256_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); + __m256i iacc_mat_10_0 = _mm256_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); + __m256i iacc_mat_11_0 = _mm256_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); + + __m256i iacc_mat_00_1 = _mm256_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); + __m256i iacc_mat_01_1 = _mm256_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); + __m256i iacc_mat_10_1 = _mm256_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); + __m256i iacc_mat_11_1 = _mm256_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); + + __m256i iacc_mat_00_2 = _mm256_add_epi16(iacc_mat_00_2_sp1, iacc_mat_00_2_sp2); + __m256i iacc_mat_01_2 = _mm256_add_epi16(iacc_mat_01_2_sp1, iacc_mat_01_2_sp2); + __m256i iacc_mat_10_2 = _mm256_add_epi16(iacc_mat_10_2_sp1, iacc_mat_10_2_sp2); + __m256i iacc_mat_11_2 = _mm256_add_epi16(iacc_mat_11_2_sp1, iacc_mat_11_2_sp2); + + __m256i iacc_mat_00_3 = _mm256_add_epi16(iacc_mat_00_3_sp1, iacc_mat_00_3_sp2); + __m256i iacc_mat_01_3 = _mm256_add_epi16(iacc_mat_01_3_sp1, iacc_mat_01_3_sp2); + __m256i iacc_mat_10_3 = _mm256_add_epi16(iacc_mat_10_3_sp1, iacc_mat_10_3_sp2); + __m256i iacc_mat_11_3 = _mm256_add_epi16(iacc_mat_11_3_sp1, iacc_mat_11_3_sp2); + + __m256i iacc_mat_00_4 = _mm256_add_epi16(iacc_mat_00_4_sp1, iacc_mat_00_4_sp2); + __m256i iacc_mat_01_4 = _mm256_add_epi16(iacc_mat_01_4_sp1, iacc_mat_01_4_sp2); + __m256i iacc_mat_10_4 = _mm256_add_epi16(iacc_mat_10_4_sp1, iacc_mat_10_4_sp2); + __m256i iacc_mat_11_4 = _mm256_add_epi16(iacc_mat_11_4_sp1, iacc_mat_11_4_sp2); + + __m256i iacc_mat_00_5 = _mm256_add_epi16(iacc_mat_00_5_sp1, iacc_mat_00_5_sp2); + __m256i iacc_mat_01_5 = _mm256_add_epi16(iacc_mat_01_5_sp1, iacc_mat_01_5_sp2); + __m256i iacc_mat_10_5 = _mm256_add_epi16(iacc_mat_10_5_sp1, iacc_mat_10_5_sp2); + __m256i iacc_mat_11_5 = _mm256_add_epi16(iacc_mat_11_5_sp1, iacc_mat_11_5_sp2); + + __m256i iacc_mat_00_6 = _mm256_add_epi16(iacc_mat_00_6_sp1, iacc_mat_00_6_sp2); + __m256i iacc_mat_01_6 = _mm256_add_epi16(iacc_mat_01_6_sp1, iacc_mat_01_6_sp2); + __m256i iacc_mat_10_6 = _mm256_add_epi16(iacc_mat_10_6_sp1, iacc_mat_10_6_sp2); + __m256i iacc_mat_11_6 = _mm256_add_epi16(iacc_mat_11_6_sp1, iacc_mat_11_6_sp2); + + __m256i iacc_mat_00_7 = _mm256_add_epi16(iacc_mat_00_7_sp1, iacc_mat_00_7_sp2); + __m256i iacc_mat_01_7 = _mm256_add_epi16(iacc_mat_01_7_sp1, iacc_mat_01_7_sp2); + __m256i iacc_mat_10_7 = _mm256_add_epi16(iacc_mat_10_7_sp1, iacc_mat_10_7_sp2); + __m256i iacc_mat_11_7 = _mm256_add_epi16(iacc_mat_11_7_sp1, iacc_mat_11_7_sp2); + + // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block + iacc_mat_00_0 = _mm256_madd_epi16(iacc_mat_00_0, scale_0145_0); + iacc_mat_01_0 = _mm256_madd_epi16(iacc_mat_01_0, scale_2367_0); + iacc_mat_10_0 = _mm256_madd_epi16(iacc_mat_10_0, scale_0145_0); + iacc_mat_11_0 = _mm256_madd_epi16(iacc_mat_11_0, scale_2367_0); + + iacc_mat_00_1 = _mm256_madd_epi16(iacc_mat_00_1, scale_0145_1); + iacc_mat_01_1 = _mm256_madd_epi16(iacc_mat_01_1, scale_2367_1); + iacc_mat_10_1 = _mm256_madd_epi16(iacc_mat_10_1, scale_0145_1); + iacc_mat_11_1 = _mm256_madd_epi16(iacc_mat_11_1, scale_2367_1); + + iacc_mat_00_2 = _mm256_madd_epi16(iacc_mat_00_2, scale_0145_2); + iacc_mat_01_2 = _mm256_madd_epi16(iacc_mat_01_2, scale_2367_2); + iacc_mat_10_2 = _mm256_madd_epi16(iacc_mat_10_2, scale_0145_2); + iacc_mat_11_2 = _mm256_madd_epi16(iacc_mat_11_2, scale_2367_2); + + iacc_mat_00_3 = _mm256_madd_epi16(iacc_mat_00_3, scale_0145_3); + iacc_mat_01_3 = _mm256_madd_epi16(iacc_mat_01_3, scale_2367_3); + iacc_mat_10_3 = _mm256_madd_epi16(iacc_mat_10_3, scale_0145_3); + iacc_mat_11_3 = _mm256_madd_epi16(iacc_mat_11_3, scale_2367_3); + + iacc_mat_00_4 = _mm256_madd_epi16(iacc_mat_00_4, scale_0145_4); + iacc_mat_01_4 = _mm256_madd_epi16(iacc_mat_01_4, scale_2367_4); + iacc_mat_10_4 = _mm256_madd_epi16(iacc_mat_10_4, scale_0145_4); + iacc_mat_11_4 = _mm256_madd_epi16(iacc_mat_11_4, scale_2367_4); + + iacc_mat_00_5 = _mm256_madd_epi16(iacc_mat_00_5, scale_0145_5); + iacc_mat_01_5 = _mm256_madd_epi16(iacc_mat_01_5, scale_2367_5); + iacc_mat_10_5 = _mm256_madd_epi16(iacc_mat_10_5, scale_0145_5); + iacc_mat_11_5 = _mm256_madd_epi16(iacc_mat_11_5, scale_2367_5); + + iacc_mat_00_6 = _mm256_madd_epi16(iacc_mat_00_6, scale_0145_6); + iacc_mat_01_6 = _mm256_madd_epi16(iacc_mat_01_6, scale_2367_6); + iacc_mat_10_6 = _mm256_madd_epi16(iacc_mat_10_6, scale_0145_6); + iacc_mat_11_6 = _mm256_madd_epi16(iacc_mat_11_6, scale_2367_6); + + iacc_mat_00_7 = _mm256_madd_epi16(iacc_mat_00_7, scale_0145_7); + iacc_mat_01_7 = _mm256_madd_epi16(iacc_mat_01_7, scale_2367_7); + iacc_mat_10_7 = _mm256_madd_epi16(iacc_mat_10_7, scale_0145_7); + iacc_mat_11_7 = _mm256_madd_epi16(iacc_mat_11_7, scale_2367_7); + + __m256i iacc_mat_00 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_00_0, iacc_mat_00_1), _mm256_add_epi32(iacc_mat_00_2, iacc_mat_00_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_00_4, iacc_mat_00_5), _mm256_add_epi32(iacc_mat_00_6, iacc_mat_00_7))); + __m256i iacc_mat_01 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_01_0, iacc_mat_01_1), _mm256_add_epi32(iacc_mat_01_2, iacc_mat_01_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_01_4, iacc_mat_01_5), _mm256_add_epi32(iacc_mat_01_6, iacc_mat_01_7))); + __m256i iacc_mat_10 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_10_0, iacc_mat_10_1), _mm256_add_epi32(iacc_mat_10_2, iacc_mat_10_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_10_4, iacc_mat_10_5), _mm256_add_epi32(iacc_mat_10_6, iacc_mat_10_7))); + __m256i iacc_mat_11 = _mm256_add_epi32(_mm256_add_epi32(_mm256_add_epi32(iacc_mat_11_0, iacc_mat_11_1), _mm256_add_epi32(iacc_mat_11_2, iacc_mat_11_3)), _mm256_add_epi32(_mm256_add_epi32(iacc_mat_11_4, iacc_mat_11_5), _mm256_add_epi32(iacc_mat_11_6, iacc_mat_11_7))); + + // Straighten out to make 4 row vectors + __m256i iacc_row_0 = _mm256_blend_epi32(iacc_mat_00, _mm256_shuffle_epi32(iacc_mat_01, 78), 204); + __m256i iacc_row_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00, 78), iacc_mat_01, 204); + __m256i iacc_row_2 = _mm256_blend_epi32(iacc_mat_10, _mm256_shuffle_epi32(iacc_mat_11, 78), 204); + __m256i iacc_row_3 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10, 78), iacc_mat_11, 204); + + // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes + const __m128 row_scale_f32_sse = _mm_load_ps(a_ptr[b].d); + const __m256 row_scale_f32 = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); + + // Multiply with appropiate scales and accumulate (for both d and dmin) below + acc_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); + acc_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); + acc_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); + acc_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); + + __m256i lhs_bsums_01_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_0123), lhs_raw_bsums_01_0123, 1); + __m256i lhs_bsums_23_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_0123), lhs_raw_bsums_23_0123, 1); + __m256i lhs_bsums_01_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_01_4567), lhs_raw_bsums_01_4567, 1); + __m256i lhs_bsums_23_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(lhs_raw_bsums_23_4567), lhs_raw_bsums_23_4567, 1); + + // Take two bsums from two Q8_Ks at a time and multiply with corresponding mins values from each Q2_K + __m256i iacc_row_min_0_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 0), mins_01); + __m256i iacc_row_min_1_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 170), mins_01); + __m256i iacc_row_min_2_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 0), mins_01); + __m256i iacc_row_min_3_01 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 170), mins_01); + + __m256i iacc_row_min_0_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 85), mins_23); + __m256i iacc_row_min_1_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_0123, 255), mins_23); + __m256i iacc_row_min_2_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 85), mins_23); + __m256i iacc_row_min_3_23 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_0123, 255), mins_23); + + __m256i iacc_row_min_0_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 0), mins_45); + __m256i iacc_row_min_1_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 170), mins_45); + __m256i iacc_row_min_2_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 0), mins_45); + __m256i iacc_row_min_3_45 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 170), mins_45); + + __m256i iacc_row_min_0_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 85), mins_67); + __m256i iacc_row_min_1_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_01_4567, 255), mins_67); + __m256i iacc_row_min_2_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 85), mins_67); + __m256i iacc_row_min_3_67 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_23_4567, 255), mins_67); + + __m256i iacc_row_min_0 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_0_01, iacc_row_min_0_23), _mm256_add_epi32(iacc_row_min_0_45,iacc_row_min_0_67)); + __m256i iacc_row_min_1 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_1_01, iacc_row_min_1_23), _mm256_add_epi32(iacc_row_min_1_45,iacc_row_min_1_67)); + __m256i iacc_row_min_2 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_2_01, iacc_row_min_2_23), _mm256_add_epi32(iacc_row_min_2_45,iacc_row_min_2_67)); + __m256i iacc_row_min_3 = _mm256_add_epi32(_mm256_add_epi32(iacc_row_min_3_01, iacc_row_min_3_23), _mm256_add_epi32(iacc_row_min_3_45,iacc_row_min_3_67)); + + acc_min_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_0), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[0]); + acc_min_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_1), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[1]); + acc_min_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_2), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[2]); + acc_min_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_3), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[3]); + } + } + // Store the accumulated values + for (int i = 0; i < 4; i++) { + _mm256_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm256_sub_ps(acc_rows[i], acc_min_rows[i])); + } + } + } +#else + + ggml_gemm_q2_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc); + + +#endif +} diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/arch/x86/x86.go b/ml/backend/ggml/ggml/src/ggml-cpu/arch/x86/x86.go new file mode 100644 index 000000000..96181ebe3 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/arch/x86/x86.go @@ -0,0 +1,5 @@ +package x86 + +// #cgo CXXFLAGS: -std=c++17 +// #cgo CPPFLAGS: -I${SRCDIR}/../.. -I${SRCDIR}/../../.. -I${SRCDIR}/../../../../include +import "C" diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/common.h b/ml/backend/ggml/ggml/src/ggml-cpu/common.h index 3df01c1ed..353563dc3 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/common.h +++ b/ml/backend/ggml/ggml/src/ggml-cpu/common.h @@ -1,9 +1,10 @@ #pragma once #include "ggml.h" -#include "ggml-cpu-traits.h" +#include "traits.h" #include "ggml-cpu-impl.h" #include "ggml-impl.h" +#include "simd-mappings.h" #ifdef __cplusplus @@ -12,11 +13,11 @@ // convenience functions/macros for use in template calls // note: these won't be required after the 'traits' lookup table is used. static inline ggml_fp16_t f32_to_f16(float x) { - return GGML_FP32_TO_FP16(x); + return GGML_CPU_FP32_TO_FP16(x); } static inline float f16_to_f32(ggml_fp16_t x) { - return GGML_FP16_TO_FP32(x); + return GGML_CPU_FP16_TO_FP32(x); } static inline ggml_bf16_t f32_to_bf16(float x) { diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/cpu_amd64.go b/ml/backend/ggml/ggml/src/ggml-cpu/cpu_amd64.go new file mode 100644 index 000000000..4743a0d66 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/cpu_amd64.go @@ -0,0 +1,3 @@ +package cpu + +import _ "github.com/ollama/ollama/ml/backend/ggml/ggml/src/ggml-cpu/arch/x86" diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/cpu_arm64.go b/ml/backend/ggml/ggml/src/ggml-cpu/cpu_arm64.go new file mode 100644 index 000000000..170971e2f --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/cpu_arm64.go @@ -0,0 +1,3 @@ +package cpu + +import _ "github.com/ollama/ollama/ml/backend/ggml/ggml/src/ggml-cpu/arch/arm" diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp deleted file mode 100644 index 8ff6d64a4..000000000 --- a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +++ /dev/null @@ -1,6431 +0,0 @@ -#define GGML_COMMON_IMPL_CPP -#define GGML_COMMON_DECL_CPP -#include "ggml-common.h" -#include "ggml-backend-impl.h" - -#include "ggml-quants.h" -#include "ggml-impl.h" -#include "ggml-cpu.h" -#include "ggml-cpu-impl.h" -#include "ggml-cpu-traits.h" - -#include -#include -#include -#include -#include // for qsort -#include // for GGML_ASSERT - -#include "ggml-cpu-aarch64.h" - -// TODO: move to include file? -template constexpr int QK_0() { - if constexpr (K == 4) { - return QK4_0; - } - if constexpr (K == 8) { - return QK8_0; - } - return -1; -} - -template struct block { - ggml_half d[N]; // deltas for N qK_0 blocks - int8_t qs[(QK_0() * N * K) / 8]; // quants for N qK_0 blocks -}; - -// control size -static_assert(sizeof(block<4, 4>) == 4 * sizeof(ggml_half) + QK8_0 * 2, "wrong block<4,4> size/padding"); -static_assert(sizeof(block<4, 8>) == 8 * sizeof(ggml_half) + QK8_0 * 4, "wrong block<4,8> size/padding"); -static_assert(sizeof(block<8, 4>) == 4 * sizeof(ggml_half) + QK8_0 * 4, "wrong block<8,4> size/padding"); -static_assert(sizeof(block<8, 8>) == 8 * sizeof(ggml_half) + QK8_0 * 8, "wrong block<8,8> size/padding"); - -using block_q4_0x4 = block<4, 4>; -using block_q4_0x8 = block<4, 8>; -using block_q8_0x4 = block<8, 4>; -using block_q8_0x8 = block<8, 8>; - - -struct block_q4_Kx8 { - ggml_half d[8]; // super-block scale for quantized scales - ggml_half dmin[8]; // super-block scale for quantized mins - uint8_t scales[96]; // scales and mins, quantized with 6 bits - uint8_t qs[1024]; // 4--bit quants -}; - -static_assert(sizeof(block_q4_Kx8) == sizeof(ggml_half) * 16 + K_SCALE_SIZE * 8 + QK_K * 4, "wrong q4_K block size/padding"); - -struct block_q8_Kx4 { - float d[4]; // delta - int8_t qs[QK_K * 4]; // quants - int16_t bsums[QK_K / 4]; // sum of quants in groups of 16 -}; - -static_assert(sizeof(block_q8_Kx4) == sizeof(float) * 4 + QK_K * 4 + (QK_K / 4) * sizeof(int16_t), "wrong q8_K block size/padding"); - -struct block_iq4_nlx4 { - ggml_half d[4]; // deltas for 4 iq4_nl blocks - uint8_t qs[QK4_NL * 2]; // nibbles / quants for 4 iq4_nl blocks -}; - -static_assert(sizeof(block_iq4_nlx4) == 4 * sizeof(ggml_half) + QK4_NL * 2, "wrong iq4_nlx4 block size/padding"); - -#if defined(__GNUC__) -#pragma GCC diagnostic ignored "-Woverlength-strings" -#endif - -#define UNUSED GGML_UNUSED - -static inline int nearest_int(float fval) { - assert(fabsf(fval) <= 4194303.f); - float val = fval + 12582912.f; - int i; memcpy(&i, &val, sizeof(int)); - return (i & 0x007fffff) - 0x00400000; -} - -// Functions to create the interleaved data layout formats - -// interleave 4 block_q4_0s in blocks of blck_size_interleave -// returns an interleaved block_q4_0x4 -// in the interleaved block_q4_0x4, place deltas for 4 block_q4_0 blocks -// first, then interleave quants from 4 block_q4_0s in blocks of blck_size_interleave -// -// - in : an array of block_q4_0 pointers -// - blck_size_interleave : the block_q4_0 quants bytes are interleaved in blocks of -// blck_size_interleave bytes -// - xor_mask : the mask to convert the nibbles in block_q4_0 quants bytes -// from bias offset form to pure sign form (this saves subtract -// operations durin unpacking) -// -#if defined(__AVX__) -#if defined(__F16C__) -#if defined(__AVX512F__) -#define GGML_F32Cx8x2_LOAD(x, y) _mm512_cvtph_ps(_mm256_set_m128i(_mm_loadu_si128((const __m128i *)(y)), _mm_loadu_si128((const __m128i *)(x)))) -#define GGML_F32Cx16_REPEAT_LOAD(x) _mm512_cvtph_ps(_mm256_set_m128i(x, x)) -#endif -// the _mm256_cvt intrinsics require F16C -#define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)(x))) -#define GGML_F32Cx8_REPEAT_LOAD(x, loadMask) _mm256_cvtph_ps(_mm_shuffle_epi32(_mm_maskload_epi32((int const*)(x), loadMask), 68)) -#define GGML_F32Cx8_REARRANGE_LOAD(x, arrangeMask) _mm256_cvtph_ps(_mm_shuffle_epi8(_mm_loadu_si128((const __m128i *) x), arrangeMask)) -#else -#if defined(__AVX512F__) -static inline __m512 __avx512_f32cx8x2_load(ggml_fp16_t *x, ggml_fp16_t *y) { - float tmp[16]; - - for (int i = 0; i < 8; i++) { - tmp[i] = GGML_FP16_TO_FP32(x[i]); - } - - for (int i = 0; i < 8; i++) { - tmp[i + 8] = GGML_FP16_TO_FP32(y[i]); - } - - return _mm512_loadu_ps(tmp); -} -static inline __m512 __avx512_repeat_f32cx16_load(__m128i x) { - float tmp[16]; - uint16_t tmphalf[8]; - _mm_storeu_si128((__m128i*)tmphalf, x); - - for (int i = 0; i < 4; i++) { - tmp[i] = GGML_FP16_TO_FP32(tmphalf[i]); - tmp[i + 4] = GGML_FP16_TO_FP32(tmphalf[i]); - tmp[i + 8] = GGML_FP16_TO_FP32(tmphalf[i]); - tmp[i + 12] = GGML_FP16_TO_FP32(tmphalf[i]); - } - - return _mm512_loadu_ps(tmp); -} -#endif -static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) { - float tmp[8]; - - for (int i = 0; i < 8; i++) { - tmp[i] = GGML_FP16_TO_FP32(x[i]); - } - - return _mm256_loadu_ps(tmp); -} -static inline __m256 __avx_repeat_f32cx8_load(ggml_fp16_t *x) { - float tmp[8]; - - for (int i = 0; i < 4; i++) { - tmp[i] = GGML_FP16_TO_FP32(x[i]); - tmp[i + 4] = GGML_FP16_TO_FP32(x[i]); - } - - return _mm256_loadu_ps(tmp); -} -static inline __m256 __avx_rearranged_f32cx8_load(ggml_fp16_t *x, __m128i arrangeMask) { - uint16_t tmphalf[8]; - float tmp[8]; - - _mm_storeu_si128((__m128i*)tmphalf, _mm_shuffle_epi8(_mm_loadu_si128((const __m128i *) x), arrangeMask)); - for (int i = 0; i < 8; i++) { - tmp[i] = GGML_FP16_TO_FP32(tmphalf[i]); - } - - return _mm256_loadu_ps(tmp); -} - -#define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x) -#define GGML_F32Cx8_REPEAT_LOAD(x, loadMask) __avx_repeat_f32cx8_load(x) -#define GGML_F32Cx8_REARRANGE_LOAD(x, arrangeMask) __avx_rearranged_f32cx8_load(x, arrangeMask) -#if defined(__AVX512F__) -#define GGML_F32Cx8x2_LOAD(x, y) __avx512_f32cx8x2_load(x, y) -#define GGML_F32Cx16_REPEAT_LOAD(x) __avx512_repeat_f32cx16_load(x) -#endif -#endif -#endif - - -#if defined(__AVX2__) || defined(__AVX512F__) -#if defined(__AVX512F__) -// add int16_t pairwise and return as 512 bit int vector, then add the accumulator -static inline __m512i sum_i16_pairs_acc_int32x16(const __m512i acc, const __m512i x) { - const __m512i ones = _mm512_set1_epi16(1); - return _mm512_add_epi32(acc, _mm512_madd_epi16(ones, x)); -} - -static inline __m512i mul_sum_us8_pairs_acc_int32x16(const __m512i acc, const __m512i ax, const __m512i sy) { -#if defined(__AVX512VNNI__) - return _mm512_dpbusd_epi32(acc, ax, sy); -#else - // Perform multiplication and create 16-bit values - const __m512i dot = _mm512_maddubs_epi16(ax, sy); - return sum_i16_pairs_acc_int32x16(acc, dot); -#endif -} - -// multiply int8_t, add results pairwise twice and return as 512 bit int vector,then add the accumulator -static inline __m512i mul_sum_i8_pairs_acc_int32x16(const __m512i acc, const __m512i x, const __m512i y) { - const __m512i zero = _mm512_setzero_si512(); - // Get absolute values of x vectors - const __m512i ax = _mm512_abs_epi8(x); - // Sign the values of the y vectors - __mmask64 blt0 = _mm512_movepi8_mask(x); - const __m512i sy = _mm512_mask_sub_epi8(y, blt0, zero, y); - return mul_sum_us8_pairs_acc_int32x16(acc, ax, sy); -} -#endif - -// add int16_t pairwise and return as 256 bit int vector, then add the accumulator -static inline __m256i sum_i16_pairs_acc_int32x8(const __m256i acc, const __m256i x) { - const __m256i ones = _mm256_set1_epi16(1); - return _mm256_add_epi32(acc, _mm256_madd_epi16(ones, x)); -} - -static inline __m256i mul_sum_us8_pairs_acc_int32x8(const __m256i acc, const __m256i ax, const __m256i sy) { -#if defined(__AVX512VNNI__) && defined(__AVX512VL__) - return _mm256_dpbusd_epi32(acc, ax, sy); -#elif defined(__AVXVNNI__) - return _mm256_dpbusd_avx_epi32(acc, ax, sy); -#else - // Perform multiplication and create 16-bit values - const __m256i dot = _mm256_maddubs_epi16(ax, sy); - return sum_i16_pairs_acc_int32x8(acc, dot); -#endif -} - -// Integer variant of the function defined in ggml-quants.c -// multiply int8_t, add results pairwise twice and return as 256 bit int vector, then add the accumulator -static inline __m256i mul_sum_i8_pairs_acc_int32x8(const __m256i acc, const __m256i x, const __m256i y) { -#if defined(__AVXVNNIINT8__) - return _mm256_dpbssd_epi32(acc, x, y); -#else - // Get absolute values of x vectors - const __m256i ax = _mm256_sign_epi8(x, x); - // Sign the values of the y vectors - const __m256i sy = _mm256_sign_epi8(y, x); - return mul_sum_us8_pairs_acc_int32x8(acc, ax, sy); -#endif -} -#endif - -static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113}; - -static void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(QK8_0 == 32); - assert(k % QK8_0 == 0); - const int nb = k / QK8_0; - - block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; - -#if defined(__ARM_NEON) - float32x4_t srcv[4][8]; - float id[4]; - - for (int i = 0; i < nb; i++) { - float32x4_t asrcv[8]; - float32x4_t amaxv[8]; - - for (int row_iter = 0; row_iter < 4; row_iter++) { - for (int j = 0; j < 8; j++) srcv[row_iter][j] = vld1q_f32(x + row_iter * k + i * 32 + 4 * j); - for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[row_iter][j]); - - for (int j = 0; j < 4; j++) amaxv[2 * j] = vmaxq_f32(asrcv[2 * j], asrcv[2 * j + 1]); - for (int j = 0; j < 2; j++) amaxv[4 * j] = vmaxq_f32(amaxv[4 * j], amaxv[4 * j + 2]); - for (int j = 0; j < 1; j++) amaxv[8 * j] = vmaxq_f32(amaxv[8 * j], amaxv[8 * j + 4]); - - const float amax = vmaxvq_f32(amaxv[0]); - - const float d = amax / ((1 << 7) - 1); - id[row_iter] = d ? 1.0f / d : 0.0f; - - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); - } - - for (int j = 0; j < 8; j++) { - float32x4_t v = vmulq_n_f32(srcv[0][j], id[0]); - int32x4_t vi = vcvtnq_s32_f32(v); - y[i].qs[16 * j + 0] = vgetq_lane_s32(vi, 0); - y[i].qs[16 * j + 1] = vgetq_lane_s32(vi, 1); - y[i].qs[16 * j + 2] = vgetq_lane_s32(vi, 2); - y[i].qs[16 * j + 3] = vgetq_lane_s32(vi, 3); - - v = vmulq_n_f32(srcv[1][j], id[1]); - vi = vcvtnq_s32_f32(v); - y[i].qs[16 * j + 4] = vgetq_lane_s32(vi, 0); - y[i].qs[16 * j + 5] = vgetq_lane_s32(vi, 1); - y[i].qs[16 * j + 6] = vgetq_lane_s32(vi, 2); - y[i].qs[16 * j + 7] = vgetq_lane_s32(vi, 3); - - v = vmulq_n_f32(srcv[2][j], id[2]); - vi = vcvtnq_s32_f32(v); - y[i].qs[16 * j + 8] = vgetq_lane_s32(vi, 0); - y[i].qs[16 * j + 9] = vgetq_lane_s32(vi, 1); - y[i].qs[16 * j + 10] = vgetq_lane_s32(vi, 2); - y[i].qs[16 * j + 11] = vgetq_lane_s32(vi, 3); - - v = vmulq_n_f32(srcv[3][j], id[3]); - vi = vcvtnq_s32_f32(v); - y[i].qs[16 * j + 12] = vgetq_lane_s32(vi, 0); - y[i].qs[16 * j + 13] = vgetq_lane_s32(vi, 1); - y[i].qs[16 * j + 14] = vgetq_lane_s32(vi, 2); - y[i].qs[16 * j + 15] = vgetq_lane_s32(vi, 3); - } - } -#else - // scalar - const int blck_size_interleave = 4; - float srcv[4][QK8_0]; - float id[4]; - - for (int i = 0; i < nb; i++) { - for (int row_iter = 0; row_iter < 4; row_iter++) { - float amax = 0.0f; // absolute max - - for (int j = 0; j < QK8_0; j++) { - srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j]; - amax = MAX(amax, fabsf(srcv[row_iter][j])); - } - - const float d = amax / ((1 << 7) - 1); - id[row_iter] = d ? 1.0f / d : 0.0f; - - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); - } - - for (int j = 0; j < QK8_0 * 4; j++) { - int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; - int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; - src_offset += (j % blck_size_interleave); - - float x0 = srcv[src_id][src_offset] * id[src_id]; - y[i].qs[j] = roundf(x0); - } - } -#endif -} - -static void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(QK8_0 == 32); - assert(k % QK8_0 == 0); - const int nb = k / QK8_0; - - block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; - -#if defined(__ARM_NEON) - float32x4_t srcv[4][8]; - float id[4]; - - for (int i = 0; i < nb; i++) { - float32x4_t asrcv[8]; - float32x4_t amaxv[8]; - - for (int row_iter = 0; row_iter < 4; row_iter++) { - for (int j = 0; j < 8; j++) srcv[row_iter][j] = vld1q_f32(x + row_iter * k + i * 32 + 4 * j); - for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[row_iter][j]); - - for (int j = 0; j < 4; j++) amaxv[2 * j] = vmaxq_f32(asrcv[2 * j], asrcv[2 * j + 1]); - for (int j = 0; j < 2; j++) amaxv[4 * j] = vmaxq_f32(amaxv[4 * j], amaxv[4 * j + 2]); - for (int j = 0; j < 1; j++) amaxv[8 * j] = vmaxq_f32(amaxv[8 * j], amaxv[8 * j + 4]); - - const float amax = vmaxvq_f32(amaxv[0]); - - const float d = amax / ((1 << 7) - 1); - id[row_iter] = d ? 1.0f / d : 0.0f; - - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); - } - - for (int j = 0; j < 4; j++) { - float32x4_t v = vmulq_n_f32(srcv[0][2 * j], id[0]); - int32x4_t vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 0] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 1] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 2] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 3] = vgetq_lane_s32(vi, 3); - v = vmulq_n_f32(srcv[0][2 * j + 1], id[0]); - vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 4] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 5] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 6] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 7] = vgetq_lane_s32(vi, 3); - - v = vmulq_n_f32(srcv[1][2 * j], id[1]); - vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 8] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 9] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 10] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 11] = vgetq_lane_s32(vi, 3); - v = vmulq_n_f32(srcv[1][2 * j + 1], id[1]); - vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 12] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 13] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 14] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 15] = vgetq_lane_s32(vi, 3); - - v = vmulq_n_f32(srcv[2][2 * j], id[2]); - vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 16] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 17] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 18] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 19] = vgetq_lane_s32(vi, 3); - v = vmulq_n_f32(srcv[2][2 * j + 1], id[2]); - vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 20] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 21] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 22] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 23] = vgetq_lane_s32(vi, 3); - - v = vmulq_n_f32(srcv[3][2 * j], id[3]); - vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 24] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 25] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 26] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 27] = vgetq_lane_s32(vi, 3); - v = vmulq_n_f32(srcv[3][2 * j + 1], id[3]); - vi = vcvtnq_s32_f32(v); - y[i].qs[32 * j + 28] = vgetq_lane_s32(vi, 0); - y[i].qs[32 * j + 29] = vgetq_lane_s32(vi, 1); - y[i].qs[32 * j + 30] = vgetq_lane_s32(vi, 2); - y[i].qs[32 * j + 31] = vgetq_lane_s32(vi, 3); - } - } -#elif defined(__AVX2__) || defined(__AVX__) - float id[4]; - __m256 srcv[4][4]; - __m256 idvec[4]; - - for (int i = 0; i < nb; i++) { - for (int row_iter = 0; row_iter < 4; row_iter++) { - // Load elements into 4 AVX vectors - __m256 v0 = _mm256_loadu_ps( x + row_iter * k + i * 32 ); - __m256 v1 = _mm256_loadu_ps( x + row_iter * k + i * 32 + 8 ); - __m256 v2 = _mm256_loadu_ps( x + row_iter * k + i * 32 + 16 ); - __m256 v3 = _mm256_loadu_ps( x + row_iter * k + i * 32 + 24 ); - - // Compute max(abs(e)) for the block - const __m256 signBit = _mm256_set1_ps( -0.0f ); - __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); - - __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); - max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); - max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); - const float maxScalar = _mm_cvtss_f32( max4 ); - - // Divided by 127.f to mirror results in quantize_row_q8_0 - const float d = maxScalar / 127.f; - id[row_iter] = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; //d ? 1.0f / d : 0.0f; - - // Store the scale for the individual block - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); - - // Store the values in blocks of eight values - Aim is to use these later for block interleaving - srcv[row_iter][0] = v0; - srcv[row_iter][1] = v1; - srcv[row_iter][2] = v2; - srcv[row_iter][3] = v3; - idvec[row_iter] = _mm256_set1_ps(id[row_iter]); - } - - // The loop iterates four times - The aim is to get 4 corresponding chunks of eight bytes from the original weight blocks that are interleaved - for (int j = 0; j < 4; j++) { - // Apply the multiplier - __m256 v0 = _mm256_mul_ps(srcv[0][j], idvec[0]); - __m256 v1 = _mm256_mul_ps(srcv[1][j], idvec[1]); - __m256 v2 = _mm256_mul_ps(srcv[2][j], idvec[2]); - __m256 v3 = _mm256_mul_ps(srcv[3][j], idvec[3]); - - // Round to nearest integer - v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); - v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); - v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); - v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); - - // Convert floats to integers - __m256i i0 = _mm256_cvtps_epi32( v0 ); - __m256i i1 = _mm256_cvtps_epi32( v1 ); - __m256i i2 = _mm256_cvtps_epi32( v2 ); - __m256i i3 = _mm256_cvtps_epi32( v3 ); - -#if defined(__AVX2__) - // Convert int32 to int16 - i0 = _mm256_packs_epi32( i0, i1 ); - i2 = _mm256_packs_epi32( i2, i3 ); - // Convert int16 to int8 - i0 = _mm256_packs_epi16( i0, i2 ); - - // Permute and store the quantized weights in the required order after the pack instruction - const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); - i0 = _mm256_permutevar8x32_epi32( i0, perm ); - - _mm256_storeu_si256((__m256i *)(y[i].qs + 32 * j), i0); -#else - // Since we don't have in AVX some necessary functions, - // we split the registers in half and call AVX2 analogs from SSE - __m128i ni0 = _mm256_castsi256_si128( i0 ); - __m128i ni1 = _mm256_extractf128_si256( i0, 1); - __m128i ni2 = _mm256_castsi256_si128( i1 ); - __m128i ni3 = _mm256_extractf128_si256( i1, 1); - __m128i ni4 = _mm256_castsi256_si128( i2 ); - __m128i ni5 = _mm256_extractf128_si256( i2, 1); - __m128i ni6 = _mm256_castsi256_si128( i3 ); - __m128i ni7 = _mm256_extractf128_si256( i3, 1); - - // Convert int32 to int16 - ni0 = _mm_packs_epi32( ni0, ni1 ); - ni2 = _mm_packs_epi32( ni2, ni3 ); - ni4 = _mm_packs_epi32( ni4, ni5 ); - ni6 = _mm_packs_epi32( ni6, ni7 ); - // Convert int16 to int8 - ni0 = _mm_packs_epi16( ni0, ni2 ); - ni4 = _mm_packs_epi16( ni4, ni6 ); - _mm_storeu_si128((__m128i *)(y[i].qs + 32 * j), ni0); - _mm_storeu_si128((__m128i *)(y[i].qs + 32 * j + 16), ni4); -#endif - } - } -#else - // scalar - const int blck_size_interleave = 8; - float srcv[4][QK8_0]; - float id[4]; - - for (int i = 0; i < nb; i++) { - for (int row_iter = 0; row_iter < 4; row_iter++) { - float amax = 0.0f; // absolute max - - for (int j = 0; j < QK8_0; j++) { - srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j]; - amax = MAX(amax, fabsf(srcv[row_iter][j])); - } - - const float d = amax / ((1 << 7) - 1); - id[row_iter] = d ? 1.0f / d : 0.0f; - - y[i].d[row_iter] = GGML_FP32_TO_FP16(d); - } - - for (int j = 0; j < QK8_0 * 4; j++) { - int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; - int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; - src_offset += (j % blck_size_interleave); - - float x0 = srcv[src_id][src_offset] * id[src_id]; - y[i].qs[j] = roundf(x0); - } - } -#endif -} - -static void ggml_quantize_mat_q8_K_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(QK_K == 256); - assert(k % QK_K == 0); - const int nb = k / QK_K; - - block_q8_Kx4 * GGML_RESTRICT y = (block_q8_Kx4 *) vy; - -#if defined(__AVX2__) - float iscale[4]; - __m256 srcv[4][32]; - __m256 iscale_vec[4]; - - for (int i = 0; i < nb; i++) { - for (int row_iter = 0; row_iter < 4; row_iter++) { - // Load elements into 4 AVX vectors - __m256 v0 = _mm256_loadu_ps( x + row_iter * k + i * 256 ); - __m256 v1 = _mm256_loadu_ps( x + row_iter * k + i * 256 + 8 ); - __m256 v2 = _mm256_loadu_ps( x + row_iter * k + i * 256 + 16 ); - __m256 v3 = _mm256_loadu_ps( x + row_iter * k + i * 256 + 24 ); - - // Compute max(abs(e)) for the block - const __m256 signBit = _mm256_set1_ps( -0.0f ); - __m256 abs0 = _mm256_andnot_ps( signBit, v0 ); - __m256 abs1 = _mm256_andnot_ps( signBit, v1 ); - __m256 abs2 = _mm256_andnot_ps( signBit, v2 ); - __m256 abs3 = _mm256_andnot_ps( signBit, v3 ); - - __m256 maxAbs = _mm256_max_ps( abs0, abs1 ); - maxAbs = _mm256_max_ps( maxAbs, abs2 ); - maxAbs = _mm256_max_ps( maxAbs, abs3 ); - - __m256 mask0 = _mm256_cmp_ps( maxAbs, v0, _CMP_EQ_OQ ); - __m256 mask1 = _mm256_cmp_ps( maxAbs, v1, _CMP_EQ_OQ ); - __m256 mask2 = _mm256_cmp_ps( maxAbs, v2, _CMP_EQ_OQ ); - __m256 mask3 = _mm256_cmp_ps( maxAbs, v3, _CMP_EQ_OQ ); - - __m256 maskAbs = _mm256_or_ps(_mm256_or_ps(mask0, mask1),_mm256_or_ps(mask2, mask3)); - - srcv[row_iter][0] = v0; - srcv[row_iter][1] = v1; - srcv[row_iter][2] = v2; - srcv[row_iter][3] = v3; - - for (int sb = 1; sb < 8; sb++) { - // Temporarily stores absolute quant values - __m256 tempAbs = maxAbs; - - // Load elements into 4 AVX vectors - __m256 v0 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32); - __m256 v1 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32 + 8 ); - __m256 v2 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32 + 16 ); - __m256 v3 = _mm256_loadu_ps( x + row_iter * k + i * 256 + sb * 32 + 24 ); - - // Compute max(abs(e)) for the block - __m256 abs0 = _mm256_andnot_ps( signBit, v0 ); - __m256 abs1 = _mm256_andnot_ps( signBit, v1 ); - __m256 abs2 = _mm256_andnot_ps( signBit, v2 ); - __m256 abs3 = _mm256_andnot_ps( signBit, v3 ); - - maxAbs = _mm256_max_ps( maxAbs, abs0 ); - maxAbs = _mm256_max_ps( maxAbs, abs1 ); - maxAbs = _mm256_max_ps( maxAbs, abs2 ); - maxAbs = _mm256_max_ps( maxAbs, abs3 ); - - __m256 mask_prev = _mm256_cmp_ps( tempAbs, maxAbs, _CMP_EQ_OQ ); - maskAbs = _mm256_and_ps( maskAbs, mask_prev ); - - mask0 = _mm256_cmp_ps( maxAbs, v0, _CMP_EQ_OQ ); - mask1 = _mm256_cmp_ps( maxAbs, v1, _CMP_EQ_OQ ); - mask2 = _mm256_cmp_ps( maxAbs, v2, _CMP_EQ_OQ ); - mask3 = _mm256_cmp_ps( maxAbs, v3, _CMP_EQ_OQ ); - - __m256 mask_curr = _mm256_or_ps(_mm256_or_ps(mask0, mask1),_mm256_or_ps(mask2, mask3)); - maskAbs = _mm256_or_ps(maskAbs, mask_curr); - - srcv[row_iter][sb * 4] = v0; - srcv[row_iter][sb * 4 + 1] = v1; - srcv[row_iter][sb * 4 + 2] = v2; - srcv[row_iter][sb * 4 + 3] = v3; - } - - __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); - max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); - max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); - const float maxScalar = _mm_cvtss_f32( max4 ); - - __m256 maxScalarVec = _mm256_set1_ps(maxScalar); - - __m256 mask_next = _mm256_cmp_ps( maxScalarVec, maxAbs, _CMP_EQ_OQ ); - __m256 finalMask = _mm256_and_ps(maskAbs, mask_next); - - const int mask = _mm256_movemask_ps(finalMask); - iscale[row_iter] = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; - - if(mask) { - iscale[row_iter] = ( maxScalar != 0.0f ) ? -127.f / maxScalar: 0.0f; - } - - y[i].d[row_iter] = maxScalar ? 1/iscale[row_iter] : 0; - iscale_vec[row_iter] = _mm256_set1_ps(iscale[row_iter]); - } - - __m256i quants_interleaved[32]; - for (int j = 0; j < 32; j++) { - // Apply the multiplier - __m256 v0 = _mm256_mul_ps(srcv[0][j], iscale_vec[0]); - __m256 v1 = _mm256_mul_ps(srcv[1][j], iscale_vec[1]); - __m256 v2 = _mm256_mul_ps(srcv[2][j], iscale_vec[2]); - __m256 v3 = _mm256_mul_ps(srcv[3][j], iscale_vec[3]); - - // Round to nearest integer - v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); - v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); - v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); - v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); - - // Convert floats to integers - __m256i i0 = _mm256_cvtps_epi32( v0 ); - __m256i i1 = _mm256_cvtps_epi32( v1 ); - __m256i i2 = _mm256_cvtps_epi32( v2 ); - __m256i i3 = _mm256_cvtps_epi32( v3 ); - - // Convert int32 to int16 - i0 = _mm256_packs_epi32( i0, i1 ); - i2 = _mm256_packs_epi32( i2, i3 ); - // Convert int16 to int8 - i0 = _mm256_packs_epi16( i0, i2 ); - - // Permute and store the quantized weights in the required order after the pack instruction - const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); - i0 = _mm256_permutevar8x32_epi32( i0, perm ); - - _mm256_storeu_si256((__m256i *)(y[i].qs + 32 * j), i0); - quants_interleaved[j] = i0; - } - - // Masks to shuffle the quants of corresonding sub blocks for rearraning quants for vectorized bsums computation - __m256i shuffle_mask_sb2 = _mm256_castsi128_si256(_mm_setr_epi8(0, 1, 0, 1, 4, 5, 6, 7, 8, 9, 8, 9, 12, 13, 14, 15)); - shuffle_mask_sb2 = _mm256_permute2f128_si256(shuffle_mask_sb2, shuffle_mask_sb2, 0); - __m256i shuffle_mask_sb3 = _mm256_castsi128_si256(_mm_setr_epi8(0, 1, 2, 3, 0, 1, 6, 7, 8, 9, 10, 11, 8, 9, 14, 15)); - shuffle_mask_sb3 = _mm256_permute2f128_si256(shuffle_mask_sb3, shuffle_mask_sb3, 0); - __m256i shuffle_mask_sb4 = _mm256_castsi128_si256(_mm_setr_epi8(0, 1, 2, 3, 4, 5, 0, 1, 8, 9, 10, 11, 12, 13, 8, 9)); - shuffle_mask_sb4 = _mm256_permute2f128_si256(shuffle_mask_sb4, shuffle_mask_sb4, 0); - - for (int k = 0; k < 4; k++) { - // Quants from four different sub blocks are taken - __m256i q0 = quants_interleaved[k * 8 + 0]; - __m256i q1 = quants_interleaved[k * 8 + 1]; - __m256i q2 = quants_interleaved[k * 8 + 2]; - __m256i q3 = quants_interleaved[k * 8 + 3]; - __m256i q4 = quants_interleaved[k * 8 + 4]; - __m256i q5 = quants_interleaved[k * 8 + 5]; - __m256i q6 = quants_interleaved[k * 8 + 6]; - __m256i q7 = quants_interleaved[k * 8 + 7]; - - - // The below code block has the first half of different sub blocks shuffled and blended so as to process 2 values from each sub block at a time - __m256i sb2_h1_shuffled = _mm256_shuffle_epi8(q2, shuffle_mask_sb2); - __m256i sb_h1_interleaved = _mm256_blend_epi16(q0, sb2_h1_shuffled, 34); - __m256i sb3_h1_shuffled = _mm256_shuffle_epi8(q4, shuffle_mask_sb3); - sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb3_h1_shuffled, 68); - __m256i sb4_h1_shuffled = _mm256_shuffle_epi8(q6, shuffle_mask_sb4); - sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb4_h1_shuffled, 136); - - __m256i one = _mm256_set1_epi8(1); - __m256i bsums_r1 = _mm256_maddubs_epi16(one, sb_h1_interleaved); - - for (int l = 0; l < 3; l++) { - // Quants value shifted to process next two values from each sub block - q0 = _mm256_srli_epi64(q0, 16); - q2 = _mm256_srli_epi64(q2, 16); - q4 = _mm256_srli_epi64(q4, 16); - q6 = _mm256_srli_epi64(q6, 16); - - sb2_h1_shuffled = _mm256_shuffle_epi8(q2, shuffle_mask_sb2); - sb_h1_interleaved = _mm256_blend_epi16(q0, sb2_h1_shuffled, 34); - sb3_h1_shuffled = _mm256_shuffle_epi8(q4, shuffle_mask_sb3); - sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb3_h1_shuffled, 68); - sb4_h1_shuffled = _mm256_shuffle_epi8(q6, shuffle_mask_sb4); - sb_h1_interleaved = _mm256_blend_epi16(sb_h1_interleaved, sb4_h1_shuffled, 136); - - bsums_r1 = _mm256_add_epi16(bsums_r1, _mm256_maddubs_epi16(one, sb_h1_interleaved)); - } - - // The below code block has the second half of different sub blocks shuffled and blended so as to process 2 values from each sub block at a time - __m256i sb2_h2_shuffled = _mm256_shuffle_epi8(q3, shuffle_mask_sb2); - __m256i sb_h2_interleaved = _mm256_blend_epi16(q1, sb2_h2_shuffled, 34); - __m256i sb3_h2_shuffled = _mm256_shuffle_epi8(q5, shuffle_mask_sb3); - sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb3_h2_shuffled, 68); - __m256i sb4_h2_shuffled = _mm256_shuffle_epi8(q7, shuffle_mask_sb4); - sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb4_h2_shuffled, 136); - - __m256i bsums_r2 = _mm256_maddubs_epi16(one, sb_h2_interleaved); - - for (int l = 0; l < 3; l++) { - // Quants value shifted to process next two values from each sub block - q1 = _mm256_srli_epi64(q1, 16); - q3 = _mm256_srli_epi64(q3, 16); - q5 = _mm256_srli_epi64(q5, 16); - q7 = _mm256_srli_epi64(q7, 16); - - sb2_h2_shuffled = _mm256_shuffle_epi8(q3, shuffle_mask_sb2); - sb_h2_interleaved = _mm256_blend_epi16(q1, sb2_h2_shuffled, 34); - sb3_h2_shuffled = _mm256_shuffle_epi8(q5, shuffle_mask_sb3); - sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb3_h2_shuffled, 68); - sb4_h2_shuffled = _mm256_shuffle_epi8(q7, shuffle_mask_sb4); - sb_h2_interleaved = _mm256_blend_epi16(sb_h2_interleaved, sb4_h2_shuffled, 136); - - bsums_r2 = _mm256_add_epi16(bsums_r2, _mm256_maddubs_epi16(one, sb_h2_interleaved)); - } - - // Overall bsums in interleaved fashion computed by adding results of both halves - __m256i bsums_r = _mm256_add_epi16(bsums_r1, bsums_r2); - _mm256_storeu_si256((__m256i *)(y[i].bsums + 16 * k), bsums_r); - } - } - -#else - - // scalar - const int blck_size_interleave = 8; - float srcv[4][QK_K]; - float iscale[4]; - - for (int i = 0; i < nb; i++) { - for (int row_iter = 0; row_iter < 4; row_iter++) { - float amax = 0.0f; // absolute max - float max = 0; - - for (int j = 0; j < QK_K; j++) { - srcv[row_iter][j] = x[row_iter * k + i * QK_K + j]; - // Update the maximum value of the corresponding super block - if(amax < fabsf(srcv[row_iter][j])) { - amax = fabsf(srcv[row_iter][j]); - max = srcv[row_iter][j]; - } - } - - iscale[row_iter] = amax ? -127.f/max : 0; - - y[i].d[row_iter] = amax ? 1/iscale[row_iter] : 0; - } - - for (int j = 0; j < QK_K / 4; j++) { - y[i].bsums[j] = 0; - } - - // Quants values are interleaved in sequence of eight bytes from corresponding super blocks - // Bsums values are interleaved in sequence of four bsums from each super block taken for interleaving - // i.e first four bsums from the first super block, followed by first four bsums from second super block and so on - for (int j = 0; j < QK_K * 4; j++) { - int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; - int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; - src_offset += (j % blck_size_interleave); - int index = (((j & 31) >> 3) << 2) + ((j >> 8) << 4) + ((j >> 6) & 3); - - float x0 = srcv[src_id][src_offset] * iscale[src_id]; - y[i].qs[j] = nearest_int(x0); - y[i].bsums[index] += y[i].qs[j]; - } - } -#endif -} - -template -void ggml_quantize_mat_t(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row); - -template <> void ggml_quantize_mat_t<4, GGML_TYPE_Q8_0>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { - assert(nrow == 4); - UNUSED(nrow); - ggml_quantize_mat_q8_0_4x4(x, vy, n_per_row); -} - -template <> void ggml_quantize_mat_t<8, GGML_TYPE_Q8_0>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { - assert(nrow == 4); - UNUSED(nrow); - ggml_quantize_mat_q8_0_4x8(x, vy, n_per_row); -} - -template <> void ggml_quantize_mat_t<8, GGML_TYPE_Q8_K>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { - assert(nrow == 4); - UNUSED(nrow); - ggml_quantize_mat_q8_K_4x8(x, vy, n_per_row); -} - -static void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK8_0; - const int nb = n / qk; - const int ncols_interleaved = 4; - const int blocklen = 4; - - assert (n % qk == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx; - - for (int c = 0; c < nc; c += ncols_interleaved) { - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - float32x4_t acc = vdupq_n_f32(0); - for (int b = 0; b < nb; b++) { - int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs); - int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16); - int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32); - int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48); - float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); - - int8x16_t a0 = vld1q_s8(a_ptr->qs); - int8x16_t a1 = vld1q_s8(a_ptr->qs + qk/2); - float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); - - int32x4_t ret = vdupq_n_s32(0); - - ret = vdotq_laneq_s32(ret, b0 << 4, a0, 0); - ret = vdotq_laneq_s32(ret, b1 << 4, a0, 1); - ret = vdotq_laneq_s32(ret, b2 << 4, a0, 2); - ret = vdotq_laneq_s32(ret, b3 << 4, a0, 3); - - ret = vdotq_laneq_s32(ret, b0 & 0xf0U, a1, 0); - ret = vdotq_laneq_s32(ret, b1 & 0xf0U, a1, 1); - ret = vdotq_laneq_s32(ret, b2 & 0xf0U, a1, 2); - ret = vdotq_laneq_s32(ret, b3 & 0xf0U, a1, 3); - - acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), - vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); - a_ptr++; - b_ptr++; - } - vst1q_f32(s, acc); - s += ncols_interleaved; - } - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - float sumf[4]; - int sumi; - - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); - - for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; - for (int l = 0; l < nb; l++) { - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); - const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); - sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; - } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); - } - } - } - for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; - } -} - -static void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK8_0; - const int nb = n / qk; - const int ncols_interleaved = 4; - const int blocklen = 8; - - assert (n % qk == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx; - - for (int c = 0; c < nc; c += ncols_interleaved) { - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - float32x4_t acc = vdupq_n_f32(0); - for (int b = 0; b < nb; b++) { - int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs); - int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16); - int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32); - int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48); - float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d); - - int8x16_t a0 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs); - int8x16_t a1 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 1); - int8x16_t a2 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 2); - int8x16_t a3 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 3); - float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d); - - int32x4_t ret0 = vdupq_n_s32(0); - int32x4_t ret1 = vdupq_n_s32(0); - - ret0 = vdotq_s32(ret0, b0 << 4, a0); - ret1 = vdotq_s32(ret1, b1 << 4, a0); - ret0 = vdotq_s32(ret0, b2 << 4, a1); - ret1 = vdotq_s32(ret1, b3 << 4, a1); - - ret0 = vdotq_s32(ret0, b0 & 0xf0U, a2); - ret1 = vdotq_s32(ret1, b1 & 0xf0U, a2); - ret0 = vdotq_s32(ret0, b2 & 0xf0U, a3); - ret1 = vdotq_s32(ret1, b3 & 0xf0U, a3); - - int32x4_t ret = vpaddq_s32(ret0, ret1); - - acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), - vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); - a_ptr++; - b_ptr++; - } - vst1q_f32(s, acc); - s += ncols_interleaved; - } - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - float sumf[4]; - int sumi; - - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); - - for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; - for (int l = 0; l < nb; l++) { - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); - const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); - sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; - } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); - } - } - } - for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; - } -} - -static void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK8_0; - const int nb = n / qk; - const int ncols_interleaved = 8; - const int blocklen = 8; - - assert (n % qk == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) -#if defined(__ARM_FEATURE_SVE) - if (ggml_cpu_has_sve() && ggml_cpu_get_sve_cnt() == QK8_0) { - const void * b_ptr = vx; - const void * a_ptr = vy; - float * res_ptr = s; - - __asm__ __volatile__( - "ptrue p0.b\n" - "add %x[b_ptr], %x[b_ptr], #0x10\n" - "1:" // Column loop - "add x22, %x[a_ptr], #0x2\n" - "mov z31.b, #0x0\n" - "mov x21, %x[nb]\n" - "2:" // Block loop - "ld1b { z30.b }, p0/Z, [%x[b_ptr]]\n" - "ld1b { z29.b }, p0/Z, [%x[b_ptr], #1, MUL VL]\n" - "mov z28.s, #0x0\n" - "mov z27.s, #0x0\n" - "ld1rd { z26.d }, p0/Z, [x22]\n" - "ld1b { z25.b }, p0/Z, [%x[b_ptr], #2, MUL VL]\n" - "sub x20, x22, #0x2\n" - "sub x21, x21, #0x1\n" - "ld1b { z24.b }, p0/Z, [%x[b_ptr], #3, MUL VL]\n" - "ld1rd { z23.d }, p0/Z, [x22, #8]\n" - "lsl z22.b, z30.b, #0x4\n" - "lsl z16.b, z29.b, #0x4\n" - "and z30.b, z30.b, #0xf0\n" - "and z29.b, z29.b, #0xf0\n" - "ld1rd { z21.d }, p0/Z, [x22, #16]\n" - "ld1rd { z20.d }, p0/Z, [x22, #24]\n" - "lsl z19.b, z25.b, #0x4\n" - "and z25.b, z25.b, #0xf0\n" - "ld1rh { z17.h }, p0/Z, [x20]\n" - "ld1h { z18.s }, p0/Z, [%x[b_ptr], #-1, MUL VL]\n" - "sdot z28.s, z22.b, z26.b\n" - "sdot z27.s, z16.b, z26.b\n" - "lsl z16.b, z24.b, #0x4\n" - "add x22, x22, #0x22\n" - "and z24.b, z24.b, #0xf0\n" - "add %x[b_ptr], %x[b_ptr], #0x90\n" - "fcvt z17.s, p0/m, z17.h\n" - "fcvt z18.s, p0/m, z18.h\n" - "sdot z28.s, z19.b, z23.b\n" - "sdot z27.s, z16.b, z23.b\n" - "fmul z18.s, z18.s, z17.s\n" - "sdot z28.s, z30.b, z21.b\n" - "sdot z27.s, z29.b, z21.b\n" - "sdot z28.s, z25.b, z20.b\n" - "sdot z27.s, z24.b, z20.b\n" - "uzp1 z17.s, z28.s, z27.s\n" - "uzp2 z16.s, z28.s, z27.s\n" - "add z17.s, z17.s, z16.s\n" - "asr z17.s, z17.s, #0x4\n" - "scvtf z17.s, p0/m, z17.s\n" - "fmla z31.s, p0/M, z17.s, z18.s\n" - "cbnz x21, 2b\n" - "sub %x[nc], %x[nc], #0x8\n" - "st1w { z31.s }, p0, [%x[res_ptr]]\n" - "add %x[res_ptr], %x[res_ptr], #0x20\n" - "cbnz %x[nc], 1b\n" - : [b_ptr] "+&r" (b_ptr), [res_ptr] "+&r" (res_ptr), [nc] "+&r" (nc) - : [a_ptr] "r" (a_ptr), [nb] "r" (nb) - : "memory", "p0", "x20", "x21", "x22", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" - ); - return; - } -#endif // #if defined(__ARM_FEATURE_SVE) -#elif defined(__AVX2__) - // Lookup table to convert signed nibbles to signed bytes - __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); - signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); - __m128i changemask = _mm_set_epi8(15, 14, 7, 6, 13, 12, 5, 4, 11, 10, 3, 2, 9, 8, 1, 0); - __m256i finalpermutemask = _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0); - - // Permute mask used for easier vector processing at later stages - const __m256i m4b = _mm256_set1_epi8(0x0F); - - int64_t b_nb = n / QK4_0; - - const block_q4_0x8 * b_ptr_start = (const block_q4_0x8 *)vx; - const block_q8_0 * a_ptr_start = (const block_q8_0 *)vy; - - // Process Q8_0 blocks one by one - for (int64_t y = 0; y < nr; y++) { - - // Pointers to LHS blocks of block_q8_0 format - const block_q8_0 * a_ptr = a_ptr_start + (y * nb); - - // Take group of eight block_q4_0x8 structures at each pass of the loop and perform dot product operation - for (int64_t x = 0; x < nc / 8; x++) { - - // Pointers to RHS blocks - const block_q4_0x8 * b_ptr = b_ptr_start + (x * b_nb); - - // Master FP accumulator - __m256 acc_row = _mm256_setzero_ps(); - - for (int64_t b = 0; b < nb; b++) { - // Load 8 blocks of Q4_0 interleaved as 8 bytes (B0 - B7) - const __m256i rhs_raw_vec_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs)); - const __m256i rhs_raw_vec_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs) + 1); - const __m256i rhs_raw_vec_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs) + 2); - const __m256i rhs_raw_vec_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs) + 3); - - // 4-bit -> 8-bit - Sign is maintained - const __m256i rhs_vec_0123_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_vec_0123_0, m4b)); // B0(0-7) B1(0-7) B2(0-7) B3(0-7) - const __m256i rhs_vec_4567_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_vec_4567_0, m4b)); // B4(0-7) B5(0-7) B6(0-7) B7(0-7) - const __m256i rhs_vec_0123_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_vec_0123_1, m4b)); // B0(8-15) B1(8-15) B2(8-15) B3(8-15) - const __m256i rhs_vec_4567_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_vec_4567_1, m4b)); // B0(8-15) B1(8-15) B2(8-15) B3(8-15) - - const __m256i rhs_vec_0123_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_0, 4), m4b)); // B0(16-23) B1(16-23) B2(16-23) B3(16-23) - const __m256i rhs_vec_4567_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_0, 4), m4b)); // B4(16-23) B5(16-23) B6(16-23) B7(16-23) - const __m256i rhs_vec_0123_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_1, 4), m4b)); // B0(24-31) B1(24-31) B2(24-31) B3(24-31) - const __m256i rhs_vec_4567_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_1, 4), m4b)); // B4(24-31) B5(24-31) B6(24-31) B7(24-31) - - // Load the scale values for the 8 blocks interleaved in block_q4_0x8 - const __m256 col_scale_f32 = GGML_F32Cx8_REARRANGE_LOAD(b_ptr[b].d, changemask); - - // Load and convert to FP32 scale from block_q8_0 - const __m256 row_scale_f32 = _mm256_set1_ps(GGML_FP16_TO_FP32(a_ptr[b].d)); - - // Load the block values in block_q8_0 in batches of 16 bytes and replicate the same across 256 bit vector - __m256i lhs_vec_0 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)a_ptr[b].qs)); - __m256i lhs_vec_1 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 16))); - - lhs_vec_0 = _mm256_permute2f128_si256(lhs_vec_0, lhs_vec_0, 0); // A0 (0-15) A0(0-15) - lhs_vec_1 = _mm256_permute2f128_si256(lhs_vec_1, lhs_vec_1, 0); // A0 (16-31) A0(16-31)) - - __m256i iacc = _mm256_setzero_si256(); - - // Dot product done within 32 bit lanes and accumulated in the same vector - // B0(0-3) B4(0-3) B1(0-3) B5(0-3) B2(0-3) B6(0-3) B3(0-3) B7(0-3) with A0(0-3) - // B0(4-7) B4(4-7) B1(4-7) B5(4-7) B2(4-7) B6(4-7) B3(4-7) B7(4-7) with A0(4-7) - // ........................................................................... - // B0(28-31) B4(28-31) B1(28-31) B5(28-31) B2(28-31) B6(28-31) B3(28-31) B7(28-31) with A0(28-31) - - iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_0 ,_mm256_shuffle_epi32(rhs_vec_4567_0, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 0)); - iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_0, 177) ,rhs_vec_4567_0, 170), _mm256_shuffle_epi32(lhs_vec_0, 85)); - - iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_1 ,_mm256_shuffle_epi32(rhs_vec_4567_1, 177), 170), _mm256_shuffle_epi32(lhs_vec_0, 170)); - iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_1, 177) ,rhs_vec_4567_1, 170), _mm256_shuffle_epi32(lhs_vec_0, 255)); - - iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_2 ,_mm256_shuffle_epi32(rhs_vec_4567_2, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 0)); - iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_2, 177) ,rhs_vec_4567_2, 170), _mm256_shuffle_epi32(lhs_vec_1, 85)); - - iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(rhs_vec_0123_3 ,_mm256_shuffle_epi32(rhs_vec_4567_3, 177), 170), _mm256_shuffle_epi32(lhs_vec_1, 170)); - iacc = mul_sum_i8_pairs_acc_int32x8(iacc, _mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_3, 177) ,rhs_vec_4567_3, 170), _mm256_shuffle_epi32(lhs_vec_1, 255)); - - // Accumulated values multipled with appropriate scales - acc_row = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc), _mm256_mul_ps(col_scale_f32, row_scale_f32), acc_row); - } - - // Accumulated output values permuted so as to be stored in appropriate order post accumulation - acc_row = _mm256_permutevar8x32_ps(acc_row, finalpermutemask); - _mm256_storeu_ps(s + (y * nr + x * 8), acc_row); - } - } - return; -#elif defined(__riscv_v_intrinsic) - if (__riscv_vlenb() >= QK4_0) { - const size_t vl = QK4_0; - - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); - - vfloat32m1_t sumf = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); - for (int l = 0; l < nb; l++) { - const int64_t a0 = *(const int64_t *)&a_ptr[l].qs[0]; - const int64_t a1 = *(const int64_t *)&a_ptr[l].qs[8]; - const int64_t a2 = *(const int64_t *)&a_ptr[l].qs[16]; - const int64_t a3 = *(const int64_t *)&a_ptr[l].qs[24]; - __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment - const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a0, vl / 4)); - const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a1, vl / 4)); - const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a2, vl / 4)); - const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(a3, vl / 4)); - - const vint8m4_t rhs_raw_vec = __riscv_vle8_v_i8m4((const int8_t *)b_ptr[l].qs, vl * 4); - const vint8m4_t rhs_vec_lo = __riscv_vsra_vx_i8m4(__riscv_vsll_vx_i8m4(rhs_raw_vec, 4, vl * 4), 4, vl * 4); - const vint8m4_t rhs_vec_hi = __riscv_vsra_vx_i8m4(rhs_raw_vec, 4, vl * 4); - const vint8m2_t rhs_vec_lo_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 0); - const vint8m2_t rhs_vec_lo_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 1); - const vint8m2_t rhs_vec_hi_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 0); - const vint8m2_t rhs_vec_hi_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 1); - - const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); - const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); - const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); - const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); - - const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_hi_m)); - const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); - const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); - const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); - const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); - const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); - const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); - const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); - const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); - const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); - const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); - const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); - const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); - - // vector version needs Zvfhmin extension - const float a_scale = GGML_FP16_TO_FP32(a_ptr[l].d); - const float b_scales[8] = { - GGML_FP16_TO_FP32(b_ptr[l].d[0]), - GGML_FP16_TO_FP32(b_ptr[l].d[1]), - GGML_FP16_TO_FP32(b_ptr[l].d[2]), - GGML_FP16_TO_FP32(b_ptr[l].d[3]), - GGML_FP16_TO_FP32(b_ptr[l].d[4]), - GGML_FP16_TO_FP32(b_ptr[l].d[5]), - GGML_FP16_TO_FP32(b_ptr[l].d[6]), - GGML_FP16_TO_FP32(b_ptr[l].d[7]) - }; - const vfloat32m1_t b_scales_vec = __riscv_vle32_v_f32m1(b_scales, vl / 4); - const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scale, vl / 4); - sumf = __riscv_vfmacc_vv_f32m1(sumf, tmp1, b_scales_vec, vl / 4); - } - __riscv_vse32_v_f32m1(s + x * ncols_interleaved, sumf, vl / 4); - } - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) - { - float sumf[8]; - int sumi; - - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); - - for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; - for (int l = 0; l < nb; l++) { - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); - const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); - sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; - } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); - } - } - } - for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; - } - } -} - -static void ggml_gemv_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK_K; - const int nb = n / qk; - const int ncols_interleaved = 8; - const int blocklen = 8; - static const uint32_t kmask1 = 0x3f3f3f3f; - static const uint32_t kmask2 = 0x0f0f0f0f; - static const uint32_t kmask3 = 0x03030303; - - assert (n % qk == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if defined(__AVX2__) - // Lookup table to convert signed nibbles to signed bytes - __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); - signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); - // Shuffle masks to rearrange delta and scale values to multiply with appropriate scales - __m128i deltamask = _mm_set_epi8(15, 14, 7, 6, 13, 12, 5, 4, 11, 10, 3, 2, 9, 8, 1, 0); - __m128i scalemask = _mm_set_epi8(7, 7, 3, 3, 6, 6, 2, 2, 5, 5, 1, 1, 4, 4, 0, 0); - // Permute mask used for easier vector processing at later stages - __m256i finalpermutemask = _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0); - - // Mask to extract nibbles from bytes - const __m256i m4b = _mm256_set1_epi8(0x0F); - - int64_t b_nb = n / QK_K; - - const block_q4_Kx8 * b_ptr_start = (const block_q4_Kx8 *)vx; - const block_q8_K * a_ptr_start = (const block_q8_K *)vy; - - // Process Q8_K blocks one by one - for (int64_t y = 0; y < nr; y++) { - - // Pointers to LHS blocks of block_q8_K format - const block_q8_K * a_ptr = a_ptr_start + (y * nb); - - // Take group of eight interleaved block_q4_K structures at each pass of the loop and perform dot product operation - for (int64_t x = 0; x < nc / 8; x++) { - - // Pointers to RHS blocks - const block_q4_Kx8 * b_ptr = b_ptr_start + (x * b_nb); - - // Master FP accumulators - __m256 acc_row = _mm256_setzero_ps(); - __m256 acc_min_rows = _mm256_setzero_ps(); - - for (int64_t b = 0; b < nb; b++) { - - // Load and convert to FP32 scale from block_q8_K - const __m256 row_scale_f32 = _mm256_set1_ps((a_ptr[b].d)); - - // Load the scale values for the 8 blocks interleaved in block_q4_Kx8 - // col_scale_f32 rearranged so as to multiply with appropriate quants - const __m256 col_scale_f32 = GGML_F32Cx8_REARRANGE_LOAD(b_ptr[b].d, deltamask); - const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); - - __m256i iacc_b = _mm256_setzero_si256(); - __m256i iacc_min_b = _mm256_setzero_si256(); - - const __m256i q8sums = _mm256_loadu_si256((const __m256i * )(a_ptr[b].bsums)); - __m256i q8s = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(q8sums), _mm256_extracti128_si256(q8sums, 1))); - q8s = _mm256_permute2f128_si256(q8s, q8s, 0); - - // Processes two sub blocks from each Q4_K in each iteration - for (int sb = 0; sb < QK_K / 64; sb++) { - - // Load the eight block_q4_K for two sub blocks quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 - const __m256i rhs_raw_vec_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + sb * 256)); - const __m256i rhs_raw_vec_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 32 + sb * 256)); - const __m256i rhs_raw_vec_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 64 + sb * 256)); - const __m256i rhs_raw_vec_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 96 + sb * 256)); - const __m256i rhs_raw_vec_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 128 + sb * 256)); - const __m256i rhs_raw_vec_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 160 + sb * 256)); - const __m256i rhs_raw_vec_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 192 + sb * 256)); - const __m256i rhs_raw_vec_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 224 + sb * 256)); - - // 4-bit -> 8-bit - // Values of the first sub block of eight block_q4_K structures for the sb loop - const __m256i rhs_vec_0123_00 = _mm256_and_si256(rhs_raw_vec_0123_0, m4b); - const __m256i rhs_vec_4567_00 = _mm256_and_si256(rhs_raw_vec_4567_0, m4b); - const __m256i rhs_vec_0123_01 = _mm256_and_si256(rhs_raw_vec_0123_1, m4b); - const __m256i rhs_vec_4567_01 = _mm256_and_si256(rhs_raw_vec_4567_1, m4b); - const __m256i rhs_vec_0123_02 = _mm256_and_si256(rhs_raw_vec_0123_2, m4b); - const __m256i rhs_vec_4567_02 = _mm256_and_si256(rhs_raw_vec_4567_2, m4b); - const __m256i rhs_vec_0123_03 = _mm256_and_si256(rhs_raw_vec_0123_3, m4b); - const __m256i rhs_vec_4567_03 = _mm256_and_si256(rhs_raw_vec_4567_3, m4b); - - // Values of the second sub block of eight block_q4_K structures when sb = 1 - const __m256i rhs_vec_0123_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_0, 4), m4b); - const __m256i rhs_vec_4567_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_0, 4), m4b); - const __m256i rhs_vec_0123_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_1, 4), m4b); - const __m256i rhs_vec_4567_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_1, 4), m4b); - const __m256i rhs_vec_0123_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_2, 4), m4b); - const __m256i rhs_vec_4567_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_2, 4), m4b); - const __m256i rhs_vec_0123_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_0123_3, 4), m4b); - const __m256i rhs_vec_4567_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_vec_4567_3, 4), m4b); - - uint32_t utmp_0[4], utmp_1[4]; - - // Scales and Mins of corresponding sub blocks from different Q8_K structures are stored together - // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop - memcpy(utmp_0, b_ptr[b].scales + 24 * sb, 12); - utmp_0[3] = ((utmp_0[2] >> 4) & kmask2) | (((utmp_0[1] >> 6) & kmask3) << 4); - const uint32_t uaux_0 = utmp_0[1] & kmask1; - utmp_0[1] = (utmp_0[2] & kmask2) | (((utmp_0[0] >> 6) & kmask3) << 4); - utmp_0[2] = uaux_0; - utmp_0[0] &= kmask1; - - // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop - memcpy(utmp_1, b_ptr[b].scales + 12 + sb * 24, 12); - utmp_1[3] = ((utmp_1[2] >> 4) & kmask2) | (((utmp_1[1] >> 6) & kmask3) << 4); - const uint32_t uaux_1 = utmp_1[1] & kmask1; - utmp_1[1] = (utmp_1[2] & kmask2) | (((utmp_1[0] >> 6) & kmask3) << 4); - utmp_1[2] = uaux_1; - utmp_1[0] &= kmask1; - - // Scales of first sub block in the sb loop - const __m128i mins_and_scales_0 = _mm_set_epi32(utmp_0[3], utmp_0[2], utmp_0[1], utmp_0[0]); - __m128i scales_rearrange_0 = _mm_shuffle_epi8(mins_and_scales_0, scalemask); - __m256i scales_0 = _mm256_cvtepu8_epi16(scales_rearrange_0); - - // Scales of second sub block in the sb loop - __m128i mins_and_scales_1 = _mm_set_epi32(utmp_1[3], utmp_1[2], utmp_1[1], utmp_1[0]); - __m128i scales_rearrange_1 = _mm_shuffle_epi8(mins_and_scales_1, scalemask); - __m256i scales_1 = _mm256_cvtepu8_epi16(scales_rearrange_1); - - // Mins of first and second sub block of Q4_K block are arranged side by side - __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(_mm_shuffle_epi32(mins_and_scales_0, 78), _mm_shuffle_epi32(mins_and_scales_1, 78))); - - // Load the two sub block values corresponding to sb in block_q8_K in batches of 16 bytes and replicate the same across 256 bit vector - __m256i lhs_vec_00 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + sb * 64))); - __m256i lhs_vec_01 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 16 + sb * 64))); - __m256i lhs_vec_10 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 32 + sb * 64))); - __m256i lhs_vec_11 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(a_ptr[b].qs + 48 + sb * 64))); - - lhs_vec_00 = _mm256_permute2f128_si256(lhs_vec_00, lhs_vec_00, 0); - lhs_vec_01 = _mm256_permute2f128_si256(lhs_vec_01, lhs_vec_01, 0); - lhs_vec_10 = _mm256_permute2f128_si256(lhs_vec_10, lhs_vec_10, 0); - lhs_vec_11 = _mm256_permute2f128_si256(lhs_vec_11, lhs_vec_11, 0); - - // Dot product done within 32 bit lanes and accumulated in the same vector - // First done for first sub block and thenn for second sub block in each sb - // B0(0-3) B4(0-3) B1(0-3) B5(0-3) B2(0-3) B6(0-3) B3(0-3) B7(0-3) with A0(0-3) - // B0(4-7) B4(4-7) B1(4-7) B5(4-7) B2(4-7) B6(4-7) B3(4-7) B7(4-7) with A0(4-7) - // ........................................................................... - // B0(28-31) B4(28-31) B1(28-31) B5(28-31) B2(28-31) B6(28-31) B3(28-31) B7(28-31) with A0(28-31) - - - __m256i iacc_0 = _mm256_setzero_si256(); - __m256i iacc_1 = _mm256_setzero_si256(); - - iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_00 ,_mm256_shuffle_epi32(rhs_vec_4567_00, 177), 170), _mm256_shuffle_epi32(lhs_vec_00, 0))); - iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_00, 177) ,rhs_vec_4567_00, 170), _mm256_shuffle_epi32(lhs_vec_00, 85))); - - iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_01 ,_mm256_shuffle_epi32(rhs_vec_4567_01, 177), 170), _mm256_shuffle_epi32(lhs_vec_00, 170))); - iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_01, 177) ,rhs_vec_4567_01, 170), _mm256_shuffle_epi32(lhs_vec_00, 255))); - - iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_02 ,_mm256_shuffle_epi32(rhs_vec_4567_02, 177), 170), _mm256_shuffle_epi32(lhs_vec_01, 0))); - iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_02, 177) ,rhs_vec_4567_02, 170), _mm256_shuffle_epi32(lhs_vec_01, 85))); - - iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_03 ,_mm256_shuffle_epi32(rhs_vec_4567_03, 177), 170), _mm256_shuffle_epi32(lhs_vec_01, 170))); - iacc_0 = _mm256_add_epi16(iacc_0, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_03, 177) ,rhs_vec_4567_03, 170), _mm256_shuffle_epi32(lhs_vec_01, 255))); - - iacc_0 = _mm256_madd_epi16(iacc_0, scales_0); - - iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_10 ,_mm256_shuffle_epi32(rhs_vec_4567_10, 177), 170), _mm256_shuffle_epi32(lhs_vec_10, 0))); - iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_10, 177) ,rhs_vec_4567_10, 170), _mm256_shuffle_epi32(lhs_vec_10, 85))); - - iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_11 ,_mm256_shuffle_epi32(rhs_vec_4567_11, 177), 170), _mm256_shuffle_epi32(lhs_vec_10, 170))); - iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_11, 177) ,rhs_vec_4567_11, 170), _mm256_shuffle_epi32(lhs_vec_10, 255))); - - iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_12 ,_mm256_shuffle_epi32(rhs_vec_4567_12, 177), 170), _mm256_shuffle_epi32(lhs_vec_11, 0))); - iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_12, 177) ,rhs_vec_4567_12, 170), _mm256_shuffle_epi32(lhs_vec_11, 85))); - - iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(rhs_vec_0123_13 ,_mm256_shuffle_epi32(rhs_vec_4567_13, 177), 170), _mm256_shuffle_epi32(lhs_vec_11, 170))); - iacc_1 = _mm256_add_epi16(iacc_1, _mm256_maddubs_epi16(_mm256_blend_epi32(_mm256_shuffle_epi32(rhs_vec_0123_13, 177) ,rhs_vec_4567_13, 170), _mm256_shuffle_epi32(lhs_vec_11, 255))); - - iacc_1 = _mm256_madd_epi16(iacc_1, scales_1); - - // Accumulate the iacc value for one sb - __m256i iacc_sb = _mm256_add_epi32(iacc_0, iacc_1); - - // Broadcast the bsums of the two sub blocks of the iteration of Q8_K across the vector - // Multiply-Add with corresponding mins of Q4_Kx8 with bsums - __m256i q8s_sb = _mm256_shuffle_epi32(q8s, 0); - __m256i iacc_min_sb = _mm256_madd_epi16(q8s_sb, mins_01); - q8s = _mm256_bsrli_epi128(q8s, 4); - - // Accumulate for the complete block - iacc_b = _mm256_add_epi32(iacc_b, iacc_sb); - iacc_min_b = _mm256_add_epi32(iacc_min_b, iacc_min_sb); - } - - // Multiply-Add with scale values for the complete super block - acc_row = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_b), _mm256_mul_ps(col_scale_f32, row_scale_f32), acc_row); - acc_min_rows = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_min_b), _mm256_mul_ps(col_dmin_f32, row_scale_f32), acc_min_rows); - - } - - // Accumulated output values permuted so as to be stored in appropriate order post accumulation - acc_row = _mm256_permutevar8x32_ps(acc_row, finalpermutemask); - _mm256_storeu_ps(s + (y * nr + x * 8), _mm256_sub_ps(acc_row, acc_min_rows)); - } - } - -#else - - float sumf[8]; - float sum_minf[8]; - uint32_t utmp[32]; - int sumi1; - int sumi2; - int sumi; - - const block_q8_K * a_ptr = (const block_q8_K *) vy; - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_Kx8 * b_ptr = (const block_q4_Kx8 *) vx + (x * nb); - - for (int j = 0; j < ncols_interleaved; j++) { - sumf[j] = 0.0; - sum_minf[j] = 0.0; - } - for (int l = 0; l < nb; l++) { - for (int sb = 0; sb < 8; sb++) { - memcpy(utmp + sb * 4, b_ptr[l].scales + sb * 12, 12); - utmp[sb * 4 + 3] = ((utmp[sb * 4 + 2] >> 4) & kmask2) | (((utmp[sb * 4 + 1] >> 6) & kmask3) << 4); - const uint32_t uaux_0 = utmp[sb * 4 + 1] & kmask1; - utmp[sb * 4 + 1] = (utmp[sb * 4 + 2] & kmask2) | (((utmp[sb * 4 + 0] >> 6) & kmask3) << 4); - utmp[sb * 4 + 2] = uaux_0; - utmp[sb * 4 + 0] &= kmask1; - } - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - uint8_t *scales_0 = (uint8_t*) utmp + (k / 4) * 32; - uint8_t *scales_1 = (uint8_t*) utmp + (k / 4) * 32 + 16; - for (int j = 0; j < ncols_interleaved; j++) { - sumi1 = 0; - sumi2 = 0; - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF); - const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4); - sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 64 + (k % 4) * blocklen + i]); - sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 64 + (k % 4) * blocklen + i + 32]); - sumi1 = sumi1 * scales_0[j]; - sumi2 = sumi2 * scales_1[j]; - sumi += sumi1 + sumi2; - } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d; - } - } - for (int sb = 0; sb < 8; sb++) { - uint8_t *mins = (uint8_t*) utmp + 8 + sb * 16; - for (int j = 0; j < ncols_interleaved; j++) { - sum_minf[j] += mins[j] * (a_ptr[l].bsums[sb * 2] + a_ptr[l].bsums[sb * 2 + 1]) * GGML_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d; - } - } - } - for (int j = 0; j < ncols_interleaved; j++) { - s[x * ncols_interleaved + j] = sumf[j] - sum_minf[j]; - } - } -#endif -} - - -static void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK8_0; - const int nb = n / qk; - const int ncols_interleaved = 4; - const int blocklen = 4; - - assert (n % qk == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - float * res_ptr = s; - - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); - - float32x4_t sumf = vdupq_n_f32(0); - for (int l = 0; l < nb; l++) { - uint8x16_t b_0 = vld1q_u8(b_ptr[l].qs + 0); - uint8x16_t b_1 = vld1q_u8(b_ptr[l].qs + 16); - uint8x16_t b_2 = vld1q_u8(b_ptr[l].qs + 32); - uint8x16_t b_3 = vld1q_u8(b_ptr[l].qs + 48); - - int8x16_t b_0_hi = vqtbl1q_s8(kvalues, b_0 >> 4); - int8x16_t b_0_lo = vqtbl1q_s8(kvalues, b_0 & 0x0F); - int8x16_t b_1_hi = vqtbl1q_s8(kvalues, b_1 >> 4); - int8x16_t b_1_lo = vqtbl1q_s8(kvalues, b_1 & 0x0F); - int8x16_t b_2_hi = vqtbl1q_s8(kvalues, b_2 >> 4); - int8x16_t b_2_lo = vqtbl1q_s8(kvalues, b_2 & 0x0F); - int8x16_t b_3_hi = vqtbl1q_s8(kvalues, b_3 >> 4); - int8x16_t b_3_lo = vqtbl1q_s8(kvalues, b_3 & 0x0F); - - int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 0); - int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16); - - int32x4_t sumi = vdupq_n_s32(0); - sumi = vdotq_laneq_s32(sumi, b_0_lo, a_0, 0); - sumi = vdotq_laneq_s32(sumi, b_0_hi, a_1, 0); - sumi = vdotq_laneq_s32(sumi, b_1_lo, a_0, 1); - sumi = vdotq_laneq_s32(sumi, b_1_hi, a_1, 1); - sumi = vdotq_laneq_s32(sumi, b_2_lo, a_0, 2); - sumi = vdotq_laneq_s32(sumi, b_2_hi, a_1, 2); - sumi = vdotq_laneq_s32(sumi, b_3_lo, a_0, 3); - sumi = vdotq_laneq_s32(sumi, b_3_hi, a_1, 3); - - float32x4_t a_d = vcvt_f32_f16(vld1_dup_f16((const float16_t *)&a_ptr[l].d)); - float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); - float32x4_t d = a_d * b_d; - - sumf = vmlaq_f32(sumf, d, vcvtq_f32_s32(sumi)); - } - - vst1q_f32(res_ptr + x * 4, sumf); - } - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) - { - float sumf[4]; - int sumi; - - const block_q8_0 * a_ptr = (const block_q8_0 *) vy; - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); - - for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; - for (int l = 0; l < nb; l++) { - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; - const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; - sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])); - } - sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); - } - } - } - for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; - } - } -} - -static void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK8_0; - const int nb = n / qk; - const int ncols_interleaved = 4; - const int blocklen = 4; - - assert (n % qk == 0); - assert (nr % 4 == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const void * b_ptr = vx; - const void * a_ptr = vy; - float * res_ptr = s; - size_t res_stride = bs * sizeof(float); - - __asm__ __volatile__( - "mov x10, %x[nr]\n" - "mov x9, #0x88\n" - "cmp x10, #0x10\n" - "mul x9, %x[nb], x9\n" - "blt 4f\n" - "1:" // Row loop - "add x28, %x[b_ptr], #0x8\n" - "mov x27, %x[nc]\n" - "add x26, %x[res_ptr], %x[res_stride], LSL #4\n" - "2:" // Column loop - "add x25, %x[a_ptr], #0x8\n" - "movi v15.16b, #0x0\n" - "movi v19.16b, #0x0\n" - "mov x24, %x[nb]\n" - "add x23, x25, x9\n" - "movi v18.16b, #0x0\n" - "movi v14.16b, #0x0\n" - "add x22, x23, x9\n" - "movi v11.16b, #0x0\n" - "movi v13.16b, #0x0\n" - "add x21, x22, x9\n" - "movi v23.16b, #0x0\n" - "movi v16.16b, #0x0\n" - "movi v25.16b, #0x0\n" - "movi v7.16b, #0x0\n" - "movi v0.16b, #0x0\n" - "movi v4.16b, #0x0\n" - "movi v5.16b, #0x0\n" - "movi v21.16b, #0x0\n" - "movi v8.16b, #0x0\n" - "movi v1.16b, #0x0\n" - "3:" // Block loop - "ldr q3, [x28, #0x0]\n" - "ldr q31, [x25, #0x0]\n" - "movi v28.16b, #0x4\n" - "movi v10.4s, #0x0\n" - "ldr q22, [x28, #0x10]\n" - "ldr q6, [x25, #0x10]\n" - "movi v29.4s, #0x0\n" - "movi v9.4s, #0x0\n" - "ldr q27, [x28, #0x20]\n" - "ldr q30, [x28, #0x30]\n" - "movi v20.4s, #0x0\n" - "movi v24.16b, #0xf0\n" - "ldr d2, [x25, #-0x8]\n" - "ldr d26, [x23, #-0x8]\n" - "sshl v12.16b, v3.16b, v28.16b\n" - "sub x20, x28, #0x8\n" - "ldr d17, [x20, #0x0]\n" - "and v3.16b, v3.16b, v24.16b\n" - "subs x24, x24, #0x1\n" - "add x28, x28, #0x48\n" - ".inst 0x4f9fe18a // sdot v10.4s, v12.16b, v31.4b[0]\n" - ".inst 0x4fbfe19d // sdot v29.4s, v12.16b, v31.4b[1]\n" - ".inst 0x4f9fe989 // sdot v9.4s, v12.16b, v31.4b[2]\n" - ".inst 0x4fbfe994 // sdot v20.4s, v12.16b, v31.4b[3]\n" - "sshl v31.16b, v22.16b, v28.16b\n" - "and v22.16b, v22.16b, v24.16b\n" - "fcvtl v17.4s, v17.4h\n" - "fcvtl v2.4s, v2.4h\n" - "fcvtl v26.4s, v26.4h\n" - ".inst 0x4f86e3ea // sdot v10.4s, v31.16b, v6.4b[0]\n" - ".inst 0x4fa6e3fd // sdot v29.4s, v31.16b, v6.4b[1]\n" - ".inst 0x4f86ebe9 // sdot v9.4s, v31.16b, v6.4b[2]\n" - ".inst 0x4fa6ebf4 // sdot v20.4s, v31.16b, v6.4b[3]\n" - "sshl v6.16b, v27.16b, v28.16b\n" - "sshl v28.16b, v30.16b, v28.16b\n" - "and v27.16b, v27.16b, v24.16b\n" - "and v30.16b, v30.16b, v24.16b\n" - "ldr q24, [x25, #0x20]\n" - ".inst 0x4f98e0ca // sdot v10.4s, v6.16b, v24.4b[0]\n" - ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n" - ".inst 0x4f98e8c9 // sdot v9.4s, v6.16b, v24.4b[2]\n" - ".inst 0x4fb8e8d4 // sdot v20.4s, v6.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x30]\n" - ".inst 0x4f98e38a // sdot v10.4s, v28.16b, v24.4b[0]\n" - ".inst 0x4fb8e39d // sdot v29.4s, v28.16b, v24.4b[1]\n" - ".inst 0x4f98eb89 // sdot v9.4s, v28.16b, v24.4b[2]\n" - ".inst 0x4fb8eb94 // sdot v20.4s, v28.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x40]\n" - ".inst 0x4f98e06a // sdot v10.4s, v3.16b, v24.4b[0]\n" - ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n" - ".inst 0x4f98e869 // sdot v9.4s, v3.16b, v24.4b[2]\n" - ".inst 0x4fb8e874 // sdot v20.4s, v3.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x50]\n" - ".inst 0x4f98e2ca // sdot v10.4s, v22.16b, v24.4b[0]\n" - ".inst 0x4fb8e2dd // sdot v29.4s, v22.16b, v24.4b[1]\n" - ".inst 0x4f98eac9 // sdot v9.4s, v22.16b, v24.4b[2]\n" - ".inst 0x4fb8ead4 // sdot v20.4s, v22.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x60]\n" - ".inst 0x4f98e36a // sdot v10.4s, v27.16b, v24.4b[0]\n" - ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n" - ".inst 0x4f98eb69 // sdot v9.4s, v27.16b, v24.4b[2]\n" - ".inst 0x4fb8eb74 // sdot v20.4s, v27.16b, v24.4b[3]\n" - "ldr q24, [x25, #0x70]\n" - "add x25, x25, #0x88\n" - ".inst 0x4f98e3ca // sdot v10.4s, v30.16b, v24.4b[0]\n" - ".inst 0x4fb8e3dd // sdot v29.4s, v30.16b, v24.4b[1]\n" - ".inst 0x4f98ebc9 // sdot v9.4s, v30.16b, v24.4b[2]\n" - ".inst 0x4fb8ebd4 // sdot v20.4s, v30.16b, v24.4b[3]\n" - "fmul v24.4s, v17.4s, v2.s[0]\n" - "scvtf v10.4s, v10.4s, #0x4\n" - "scvtf v29.4s, v29.4s, #0x4\n" - "scvtf v9.4s, v9.4s, #0x4\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "fmla v15.4s, v10.4s, v24.4s\n" - "ldr q24, [x23, #0x0]\n" - "fmul v10.4s, v17.4s, v2.s[1]\n" - "fmla v19.4s, v29.4s, v10.4s\n" - "ldr q10, [x23, #0x10]\n" - "fmul v29.4s, v17.4s, v2.s[2]\n" - "fmul v2.4s, v17.4s, v2.s[3]\n" - "fmla v18.4s, v9.4s, v29.4s\n" - "movi v9.4s, #0x0\n" - "movi v29.4s, #0x0\n" - ".inst 0x4f98e189 // sdot v9.4s, v12.16b, v24.4b[0]\n" - ".inst 0x4fb8e19d // sdot v29.4s, v12.16b, v24.4b[1]\n" - "fmla v14.4s, v20.4s, v2.4s\n" - "movi v20.4s, #0x0\n" - "movi v2.4s, #0x0\n" - ".inst 0x4f98e994 // sdot v20.4s, v12.16b, v24.4b[2]\n" - ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n" - "ldr q24, [x23, #0x20]\n" - ".inst 0x4f8ae3e9 // sdot v9.4s, v31.16b, v10.4b[0]\n" - ".inst 0x4faae3fd // sdot v29.4s, v31.16b, v10.4b[1]\n" - ".inst 0x4f8aebf4 // sdot v20.4s, v31.16b, v10.4b[2]\n" - ".inst 0x4faaebe2 // sdot v2.4s, v31.16b, v10.4b[3]\n" - "ldr q10, [x23, #0x30]\n" - ".inst 0x4f98e0c9 // sdot v9.4s, v6.16b, v24.4b[0]\n" - ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n" - ".inst 0x4f98e8d4 // sdot v20.4s, v6.16b, v24.4b[2]\n" - ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n" - "ldr q24, [x23, #0x40]\n" - ".inst 0x4f8ae389 // sdot v9.4s, v28.16b, v10.4b[0]\n" - ".inst 0x4faae39d // sdot v29.4s, v28.16b, v10.4b[1]\n" - ".inst 0x4f8aeb94 // sdot v20.4s, v28.16b, v10.4b[2]\n" - ".inst 0x4faaeb82 // sdot v2.4s, v28.16b, v10.4b[3]\n" - "ldr q10, [x23, #0x50]\n" - ".inst 0x4f98e069 // sdot v9.4s, v3.16b, v24.4b[0]\n" - ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n" - ".inst 0x4f98e874 // sdot v20.4s, v3.16b, v24.4b[2]\n" - ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n" - "ldr q24, [x23, #0x60]\n" - ".inst 0x4f8ae2c9 // sdot v9.4s, v22.16b, v10.4b[0]\n" - ".inst 0x4faae2dd // sdot v29.4s, v22.16b, v10.4b[1]\n" - ".inst 0x4f8aead4 // sdot v20.4s, v22.16b, v10.4b[2]\n" - ".inst 0x4faaeac2 // sdot v2.4s, v22.16b, v10.4b[3]\n" - "ldr q10, [x23, #0x70]\n" - "add x23, x23, #0x88\n" - ".inst 0x4f98e369 // sdot v9.4s, v27.16b, v24.4b[0]\n" - ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n" - ".inst 0x4f98eb74 // sdot v20.4s, v27.16b, v24.4b[2]\n" - ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n" - "ldr q24, [x22, #0x0]\n" - ".inst 0x4f8ae3c9 // sdot v9.4s, v30.16b, v10.4b[0]\n" - ".inst 0x4faae3dd // sdot v29.4s, v30.16b, v10.4b[1]\n" - ".inst 0x4f8aebd4 // sdot v20.4s, v30.16b, v10.4b[2]\n" - ".inst 0x4faaebc2 // sdot v2.4s, v30.16b, v10.4b[3]\n" - "fmul v10.4s, v17.4s, v26.s[0]\n" - "scvtf v9.4s, v9.4s, #0x4\n" - "scvtf v29.4s, v29.4s, #0x4\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "scvtf v2.4s, v2.4s, #0x4\n" - "fmla v11.4s, v9.4s, v10.4s\n" - "ldr q9, [x22, #0x10]\n" - "fmul v10.4s, v17.4s, v26.s[1]\n" - "fmla v13.4s, v29.4s, v10.4s\n" - "ldr d29, [x22, #-0x8]\n" - "fmul v10.4s, v17.4s, v26.s[2]\n" - "fmul v26.4s, v17.4s, v26.s[3]\n" - "fcvtl v29.4s, v29.4h\n" - "fmla v23.4s, v20.4s, v10.4s\n" - "movi v20.4s, #0x0\n" - "movi v10.4s, #0x0\n" - "fmla v16.4s, v2.4s, v26.4s\n" - "movi v26.4s, #0x0\n" - "movi v2.4s, #0x0\n" - ".inst 0x4f98e194 // sdot v20.4s, v12.16b, v24.4b[0]\n" - ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n" - ".inst 0x4f98e99a // sdot v26.4s, v12.16b, v24.4b[2]\n" - ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n" - "ldr q24, [x22, #0x20]\n" - ".inst 0x4f89e3f4 // sdot v20.4s, v31.16b, v9.4b[0]\n" - ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n" - ".inst 0x4f89ebfa // sdot v26.4s, v31.16b, v9.4b[2]\n" - ".inst 0x4fa9ebe2 // sdot v2.4s, v31.16b, v9.4b[3]\n" - "ldr q9, [x22, #0x30]\n" - ".inst 0x4f98e0d4 // sdot v20.4s, v6.16b, v24.4b[0]\n" - ".inst 0x4fb8e0ca // sdot v10.4s, v6.16b, v24.4b[1]\n" - ".inst 0x4f98e8da // sdot v26.4s, v6.16b, v24.4b[2]\n" - ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n" - "ldr q24, [x22, #0x40]\n" - ".inst 0x4f89e394 // sdot v20.4s, v28.16b, v9.4b[0]\n" - ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n" - ".inst 0x4f89eb9a // sdot v26.4s, v28.16b, v9.4b[2]\n" - ".inst 0x4fa9eb82 // sdot v2.4s, v28.16b, v9.4b[3]\n" - "ldr q9, [x22, #0x50]\n" - ".inst 0x4f98e074 // sdot v20.4s, v3.16b, v24.4b[0]\n" - ".inst 0x4fb8e06a // sdot v10.4s, v3.16b, v24.4b[1]\n" - ".inst 0x4f98e87a // sdot v26.4s, v3.16b, v24.4b[2]\n" - ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n" - "ldr q24, [x22, #0x60]\n" - ".inst 0x4f89e2d4 // sdot v20.4s, v22.16b, v9.4b[0]\n" - ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n" - ".inst 0x4f89eada // sdot v26.4s, v22.16b, v9.4b[2]\n" - ".inst 0x4fa9eac2 // sdot v2.4s, v22.16b, v9.4b[3]\n" - "ldr q9, [x22, #0x70]\n" - "add x22, x22, #0x88\n" - ".inst 0x4f98e374 // sdot v20.4s, v27.16b, v24.4b[0]\n" - ".inst 0x4fb8e36a // sdot v10.4s, v27.16b, v24.4b[1]\n" - ".inst 0x4f98eb7a // sdot v26.4s, v27.16b, v24.4b[2]\n" - ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n" - "ldr q24, [x21, #0x0]\n" - ".inst 0x4f89e3d4 // sdot v20.4s, v30.16b, v9.4b[0]\n" - ".inst 0x4fa9e3ca // sdot v10.4s, v30.16b, v9.4b[1]\n" - ".inst 0x4f89ebda // sdot v26.4s, v30.16b, v9.4b[2]\n" - ".inst 0x4fa9ebc2 // sdot v2.4s, v30.16b, v9.4b[3]\n" - "fmul v9.4s, v17.4s, v29.s[0]\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "scvtf v10.4s, v10.4s, #0x4\n" - "scvtf v26.4s, v26.4s, #0x4\n" - "scvtf v2.4s, v2.4s, #0x4\n" - "fmla v25.4s, v20.4s, v9.4s\n" - "ldr q9, [x21, #0x10]\n" - "fmul v20.4s, v17.4s, v29.s[1]\n" - "fmla v7.4s, v10.4s, v20.4s\n" - "ldr d20, [x21, #-0x8]\n" - "fmul v10.4s, v17.4s, v29.s[2]\n" - "fmul v29.4s, v17.4s, v29.s[3]\n" - "fcvtl v20.4s, v20.4h\n" - "fmla v0.4s, v26.4s, v10.4s\n" - "movi v26.4s, #0x0\n" - "movi v10.4s, #0x0\n" - "fmla v4.4s, v2.4s, v29.4s\n" - "movi v2.4s, #0x0\n" - "movi v29.4s, #0x0\n" - ".inst 0x4f98e19a // sdot v26.4s, v12.16b, v24.4b[0]\n" - ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n" - ".inst 0x4f98e982 // sdot v2.4s, v12.16b, v24.4b[2]\n" - ".inst 0x4fb8e99d // sdot v29.4s, v12.16b, v24.4b[3]\n" - "ldr q12, [x21, #0x20]\n" - "fmul v24.4s, v17.4s, v20.s[0]\n" - ".inst 0x4f89e3fa // sdot v26.4s, v31.16b, v9.4b[0]\n" - ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n" - ".inst 0x4f89ebe2 // sdot v2.4s, v31.16b, v9.4b[2]\n" - ".inst 0x4fa9ebfd // sdot v29.4s, v31.16b, v9.4b[3]\n" - "ldr q9, [x21, #0x30]\n" - "fmul v31.4s, v17.4s, v20.s[1]\n" - ".inst 0x4f8ce0da // sdot v26.4s, v6.16b, v12.4b[0]\n" - ".inst 0x4face0ca // sdot v10.4s, v6.16b, v12.4b[1]\n" - ".inst 0x4f8ce8c2 // sdot v2.4s, v6.16b, v12.4b[2]\n" - ".inst 0x4face8dd // sdot v29.4s, v6.16b, v12.4b[3]\n" - "ldr q12, [x21, #0x40]\n" - "fmul v6.4s, v17.4s, v20.s[2]\n" - "fmul v20.4s, v17.4s, v20.s[3]\n" - ".inst 0x4f89e39a // sdot v26.4s, v28.16b, v9.4b[0]\n" - ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n" - ".inst 0x4f89eb82 // sdot v2.4s, v28.16b, v9.4b[2]\n" - ".inst 0x4fa9eb9d // sdot v29.4s, v28.16b, v9.4b[3]\n" - "ldr q9, [x21, #0x50]\n" - ".inst 0x4f8ce07a // sdot v26.4s, v3.16b, v12.4b[0]\n" - ".inst 0x4face06a // sdot v10.4s, v3.16b, v12.4b[1]\n" - ".inst 0x4f8ce862 // sdot v2.4s, v3.16b, v12.4b[2]\n" - ".inst 0x4face87d // sdot v29.4s, v3.16b, v12.4b[3]\n" - "ldr q12, [x21, #0x60]\n" - ".inst 0x4f89e2da // sdot v26.4s, v22.16b, v9.4b[0]\n" - ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n" - ".inst 0x4f89eac2 // sdot v2.4s, v22.16b, v9.4b[2]\n" - ".inst 0x4fa9eadd // sdot v29.4s, v22.16b, v9.4b[3]\n" - "ldr q17, [x21, #0x70]\n" - "add x21, x21, #0x88\n" - ".inst 0x4f8ce37a // sdot v26.4s, v27.16b, v12.4b[0]\n" - ".inst 0x4face36a // sdot v10.4s, v27.16b, v12.4b[1]\n" - ".inst 0x4f8ceb62 // sdot v2.4s, v27.16b, v12.4b[2]\n" - ".inst 0x4faceb7d // sdot v29.4s, v27.16b, v12.4b[3]\n" - ".inst 0x4f91e3da // sdot v26.4s, v30.16b, v17.4b[0]\n" - ".inst 0x4fb1e3ca // sdot v10.4s, v30.16b, v17.4b[1]\n" - ".inst 0x4f91ebc2 // sdot v2.4s, v30.16b, v17.4b[2]\n" - ".inst 0x4fb1ebdd // sdot v29.4s, v30.16b, v17.4b[3]\n" - "scvtf v26.4s, v26.4s, #0x4\n" - "scvtf v10.4s, v10.4s, #0x4\n" - "fmla v5.4s, v26.4s, v24.4s\n" - "scvtf v2.4s, v2.4s, #0x4\n" - "scvtf v29.4s, v29.4s, #0x4\n" - "fmla v21.4s, v10.4s, v31.4s\n" - "fmla v8.4s, v2.4s, v6.4s\n" - "fmla v1.4s, v29.4s, v20.4s\n" - "bgt 3b\n" - "mov x20, %x[res_ptr]\n" - "subs x27, x27, #0x4\n" - "add %x[res_ptr], %x[res_ptr], #0x10\n" - "str q15, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q19, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q18, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q14, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q11, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q13, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q23, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q16, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q25, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q7, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q0, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q4, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q5, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q21, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q8, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q1, [x20, #0x0]\n" - "bne 2b\n" - "mov x20, #0x4\n" - "sub x10, x10, #0x10\n" - "cmp x10, #0x10\n" - "mov %x[res_ptr], x26\n" - "madd %x[a_ptr], x20, x9, %x[a_ptr]\n" - "bge 1b\n" - "4:" // Row loop skip - "cbz x10, 9f\n" - "5:" // Row tail: Row loop - "add x24, %x[b_ptr], #0x8\n" - "mov x23, %x[nc]\n" - "add x22, %x[res_ptr], %x[res_stride], LSL #2\n" - "6:" // Row tail: Column loop - "movi v15.16b, #0x0\n" - "movi v19.16b, #0x0\n" - "add x25, %x[a_ptr], #0x8\n" - "mov x21, %x[nb]\n" - "movi v18.16b, #0x0\n" - "movi v14.16b, #0x0\n" - "7:" // Row tail: Block loop - "ldr q7, [x24, #0x0]\n" - "ldr q5, [x25, #0x0]\n" - "movi v9.16b, #0x4\n" - "movi v4.4s, #0x0\n" - "ldr q3, [x24, #0x10]\n" - "ldr q2, [x25, #0x10]\n" - "movi v1.4s, #0x0\n" - "movi v0.4s, #0x0\n" - "ldr q13, [x24, #0x20]\n" - "ldr q31, [x25, #0x20]\n" - "movi v30.4s, #0x0\n" - "movi v29.16b, #0xf0\n" - "ldr q28, [x24, #0x30]\n" - "ldr q27, [x25, #0x30]\n" - "sshl v20.16b, v7.16b, v9.16b\n" - "sub x20, x24, #0x8\n" - "ldr q26, [x25, #0x40]\n" - "ldr q25, [x25, #0x50]\n" - "sshl v17.16b, v3.16b, v9.16b\n" - "and v7.16b, v7.16b, v29.16b\n" - "ldr q24, [x25, #0x60]\n" - "ldr q16, [x25, #0x70]\n" - "sshl v22.16b, v13.16b, v9.16b\n" - "and v3.16b, v3.16b, v29.16b\n" - "ldr d21, [x20, #0x0]\n" - "ldr d12, [x25, #-0x8]\n" - ".inst 0x4f85e284 // sdot v4.4s, v20.16b, v5.4b[0]\n" - ".inst 0x4fa5e281 // sdot v1.4s, v20.16b, v5.4b[1]\n" - ".inst 0x4f85ea80 // sdot v0.4s, v20.16b, v5.4b[2]\n" - ".inst 0x4fa5ea9e // sdot v30.4s, v20.16b, v5.4b[3]\n" - "sshl v9.16b, v28.16b, v9.16b\n" - "subs x21, x21, #0x1\n" - "and v13.16b, v13.16b, v29.16b\n" - "and v28.16b, v28.16b, v29.16b\n" - "add x25, x25, #0x88\n" - "add x24, x24, #0x48\n" - "fcvtl v21.4s, v21.4h\n" - "fcvtl v12.4s, v12.4h\n" - ".inst 0x4f82e224 // sdot v4.4s, v17.16b, v2.4b[0]\n" - ".inst 0x4fa2e221 // sdot v1.4s, v17.16b, v2.4b[1]\n" - ".inst 0x4f82ea20 // sdot v0.4s, v17.16b, v2.4b[2]\n" - ".inst 0x4fa2ea3e // sdot v30.4s, v17.16b, v2.4b[3]\n" - "fmul v11.4s, v21.4s, v12.s[0]\n" - "fmul v23.4s, v21.4s, v12.s[1]\n" - "fmul v17.4s, v21.4s, v12.s[2]\n" - ".inst 0x4f9fe2c4 // sdot v4.4s, v22.16b, v31.4b[0]\n" - "fmul v6.4s, v21.4s, v12.s[3]\n" - ".inst 0x4fbfe2c1 // sdot v1.4s, v22.16b, v31.4b[1]\n" - ".inst 0x4f9feac0 // sdot v0.4s, v22.16b, v31.4b[2]\n" - ".inst 0x4fbfeade // sdot v30.4s, v22.16b, v31.4b[3]\n" - ".inst 0x4f9be124 // sdot v4.4s, v9.16b, v27.4b[0]\n" - ".inst 0x4fbbe121 // sdot v1.4s, v9.16b, v27.4b[1]\n" - ".inst 0x4f9be920 // sdot v0.4s, v9.16b, v27.4b[2]\n" - ".inst 0x4fbbe93e // sdot v30.4s, v9.16b, v27.4b[3]\n" - ".inst 0x4f9ae0e4 // sdot v4.4s, v7.16b, v26.4b[0]\n" - ".inst 0x4fbae0e1 // sdot v1.4s, v7.16b, v26.4b[1]\n" - ".inst 0x4f9ae8e0 // sdot v0.4s, v7.16b, v26.4b[2]\n" - ".inst 0x4fbae8fe // sdot v30.4s, v7.16b, v26.4b[3]\n" - ".inst 0x4f99e064 // sdot v4.4s, v3.16b, v25.4b[0]\n" - ".inst 0x4fb9e061 // sdot v1.4s, v3.16b, v25.4b[1]\n" - ".inst 0x4f99e860 // sdot v0.4s, v3.16b, v25.4b[2]\n" - ".inst 0x4fb9e87e // sdot v30.4s, v3.16b, v25.4b[3]\n" - ".inst 0x4f98e1a4 // sdot v4.4s, v13.16b, v24.4b[0]\n" - ".inst 0x4fb8e1a1 // sdot v1.4s, v13.16b, v24.4b[1]\n" - ".inst 0x4f98e9a0 // sdot v0.4s, v13.16b, v24.4b[2]\n" - ".inst 0x4fb8e9be // sdot v30.4s, v13.16b, v24.4b[3]\n" - ".inst 0x4f90e384 // sdot v4.4s, v28.16b, v16.4b[0]\n" - ".inst 0x4fb0e381 // sdot v1.4s, v28.16b, v16.4b[1]\n" - ".inst 0x4f90eb80 // sdot v0.4s, v28.16b, v16.4b[2]\n" - ".inst 0x4fb0eb9e // sdot v30.4s, v28.16b, v16.4b[3]\n" - "scvtf v4.4s, v4.4s, #0x4\n" - "scvtf v1.4s, v1.4s, #0x4\n" - "scvtf v0.4s, v0.4s, #0x4\n" - "fmla v15.4s, v4.4s, v11.4s\n" - "scvtf v30.4s, v30.4s, #0x4\n" - "fmla v19.4s, v1.4s, v23.4s\n" - "fmla v18.4s, v0.4s, v17.4s\n" - "fmla v14.4s, v30.4s, v6.4s\n" - "bgt 7b\n" - "mov x20, %x[res_ptr]\n" - "cmp x10, #0x1\n" - "str q15, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x10, #0x2\n" - "str q19, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x10, #0x3\n" - "str q18, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "str q14, [x20, #0x0]\n" - "8:" // Row tail: Accumulator store skip - "subs x23, x23, #0x4\n" - "add %x[res_ptr], %x[res_ptr], #0x10\n" - "bne 6b\n" - "subs x10, x10, #0x4\n" - "add %x[a_ptr], %x[a_ptr], x9\n" - "mov %x[res_ptr], x22\n" - "bgt 5b\n" - "9:" // Row tail: Row loop skip - : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) - : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" - ); - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) - { - float sumf[4][4]; - int sumi; - - for (int y = 0; y < nr / 4; y++) { - const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; - } - for (int l = 0; l < nb; l++) { - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); - const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); - sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + - (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; - } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); - } - } - } - } - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) - s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; - } - } - } - } -} - -static void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK8_0; - const int nb = n / qk; - const int ncols_interleaved = 4; - const int blocklen = 8; - - assert (n % qk == 0); - assert (nr % 4 == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) - if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) { - const void * b_ptr = vx; - const void * a_ptr = vy; - float * res_ptr = s; - size_t res_stride = bs * sizeof(float); - - __asm__ __volatile__( - "mov x10, %x[nr]\n" - "mov x9, #0x88\n" - "cmp x10, #0x10\n" - "mul x9, %x[nb], x9\n" - "blt 4f\n" - "1:" // Row loop - "add x28, %x[b_ptr], #0x8\n" - "mov x27, %x[nc]\n" - "add x26, %x[res_ptr], %x[res_stride], LSL #4\n" - "2:" // Column loop - "add x25, %x[a_ptr], #0x8\n" - "movi v2.16b, #0x0\n" - "movi v10.16b, #0x0\n" - "mov x24, %x[nb]\n" - "add x23, x25, x9\n" - "movi v12.16b, #0x0\n" - "movi v28.16b, #0x0\n" - "add x22, x23, x9\n" - "movi v11.16b, #0x0\n" - "movi v13.16b, #0x0\n" - "add x21, x22, x9\n" - "movi v22.16b, #0x0\n" - "movi v23.16b, #0x0\n" - "movi v25.16b, #0x0\n" - "movi v5.16b, #0x0\n" - "movi v7.16b, #0x0\n" - "movi v4.16b, #0x0\n" - "movi v6.16b, #0x0\n" - "movi v30.16b, #0x0\n" - "movi v24.16b, #0x0\n" - "movi v14.16b, #0x0\n" - "3:" // Block loop - "ldr q21, [x28, #0x0]\n" - "ldr q16, [x28, #0x10]\n" - "movi v1.16b, #0x4\n" - "movi v19.4s, #0x0\n" - "ldr q27, [x25, #0x0]\n" - "ldr q15, [x25, #0x10]\n" - "movi v26.4s, #0x0\n" - "movi v18.4s, #0x0\n" - "ldr q29, [x28, #0x20]\n" - "ldr q3, [x28, #0x30]\n" - "movi v17.4s, #0x0\n" - "movi v0.16b, #0xf0\n" - "ldr d20, [x25, #-0x8]\n" - "ldr d9, [x23, #-0x8]\n" - "sshl v8.16b, v21.16b, v1.16b\n" - "sshl v31.16b, v16.16b, v1.16b\n" - "and v21.16b, v21.16b, v0.16b\n" - "and v16.16b, v16.16b, v0.16b\n" - "sub x20, x28, #0x8\n" - "subs x24, x24, #0x1\n" - "add x28, x28, #0x48\n" - ".inst 0x4e88a773 // smmla v19.4s, v27.16b, v8.16b\n" - ".inst 0x4e9fa77a // smmla v26.4s, v27.16b, v31.16b\n" - "ldr q27, [x25, #0x20]\n" - ".inst 0x4e88a5f2 // smmla v18.4s, v15.16b, v8.16b\n" - ".inst 0x4e9fa5f1 // smmla v17.4s, v15.16b, v31.16b\n" - "sshl v15.16b, v29.16b, v1.16b\n" - "sshl v1.16b, v3.16b, v1.16b\n" - "and v29.16b, v29.16b, v0.16b\n" - "and v3.16b, v3.16b, v0.16b\n" - "ldr q0, [x25, #0x30]\n" - "fcvtl v20.4s, v20.4h\n" - ".inst 0x4e8fa773 // smmla v19.4s, v27.16b, v15.16b\n" - "fcvtl v9.4s, v9.4h\n" - ".inst 0x4e81a77a // smmla v26.4s, v27.16b, v1.16b\n" - "ldr q27, [x25, #0x40]\n" - ".inst 0x4e8fa412 // smmla v18.4s, v0.16b, v15.16b\n" - ".inst 0x4e81a411 // smmla v17.4s, v0.16b, v1.16b\n" - "ldr q0, [x25, #0x50]\n" - ".inst 0x4e95a773 // smmla v19.4s, v27.16b, v21.16b\n" - ".inst 0x4e90a77a // smmla v26.4s, v27.16b, v16.16b\n" - "ldr q27, [x25, #0x60]\n" - ".inst 0x4e95a412 // smmla v18.4s, v0.16b, v21.16b\n" - ".inst 0x4e90a411 // smmla v17.4s, v0.16b, v16.16b\n" - "ldr q0, [x25, #0x70]\n" - "add x25, x25, #0x88\n" - ".inst 0x4e9da773 // smmla v19.4s, v27.16b, v29.16b\n" - ".inst 0x4e83a77a // smmla v26.4s, v27.16b, v3.16b\n" - "ldr d27, [x20, #0x0]\n" - ".inst 0x4e9da412 // smmla v18.4s, v0.16b, v29.16b\n" - ".inst 0x4e83a411 // smmla v17.4s, v0.16b, v3.16b\n" - "fcvtl v27.4s, v27.4h\n" - "uzp1 v0.2d, v19.2d, v26.2d\n" - "uzp2 v26.2d, v19.2d, v26.2d\n" - "fmul v19.4s, v27.4s, v20.s[0]\n" - "scvtf v0.4s, v0.4s, #0x4\n" - "scvtf v26.4s, v26.4s, #0x4\n" - "fmla v2.4s, v0.4s, v19.4s\n" - "ldr q19, [x23, #0x0]\n" - "uzp1 v0.2d, v18.2d, v17.2d\n" - "uzp2 v18.2d, v18.2d, v17.2d\n" - "fmul v17.4s, v27.4s, v20.s[1]\n" - "scvtf v0.4s, v0.4s, #0x4\n" - "scvtf v18.4s, v18.4s, #0x4\n" - "fmla v10.4s, v26.4s, v17.4s\n" - "ldr q17, [x23, #0x10]\n" - "fmul v26.4s, v27.4s, v20.s[2]\n" - "fmul v20.4s, v27.4s, v20.s[3]\n" - "fmla v12.4s, v0.4s, v26.4s\n" - "ldr d0, [x22, #-0x8]\n" - "ldr d26, [x21, #-0x8]\n" - "fcvtl v0.4s, v0.4h\n" - "fmla v28.4s, v18.4s, v20.4s\n" - "movi v20.4s, #0x0\n" - "movi v18.4s, #0x0\n" - ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n" - ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n" - "ldr q19, [x23, #0x20]\n" - "fcvtl v26.4s, v26.4h\n" - ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n" - ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n" - "ldr q19, [x23, #0x40]\n" - ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n" - ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n" - "ldr q19, [x23, #0x60]\n" - ".inst 0x4e9da674 // smmla v20.4s, v19.16b, v29.16b\n" - ".inst 0x4e83a672 // smmla v18.4s, v19.16b, v3.16b\n" - "uzp1 v19.2d, v20.2d, v18.2d\n" - "scvtf v19.4s, v19.4s, #0x4\n" - "uzp2 v20.2d, v20.2d, v18.2d\n" - "fmul v18.4s, v27.4s, v9.s[0]\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "fmla v11.4s, v19.4s, v18.4s\n" - "ldr q18, [x22, #0x0]\n" - "fmul v19.4s, v27.4s, v9.s[1]\n" - "fmla v13.4s, v20.4s, v19.4s\n" - "movi v19.4s, #0x0\n" - "movi v20.4s, #0x0\n" - ".inst 0x4e88a633 // smmla v19.4s, v17.16b, v8.16b\n" - ".inst 0x4e9fa634 // smmla v20.4s, v17.16b, v31.16b\n" - "ldr q17, [x23, #0x30]\n" - ".inst 0x4e8fa633 // smmla v19.4s, v17.16b, v15.16b\n" - ".inst 0x4e81a634 // smmla v20.4s, v17.16b, v1.16b\n" - "ldr q17, [x23, #0x50]\n" - ".inst 0x4e95a633 // smmla v19.4s, v17.16b, v21.16b\n" - ".inst 0x4e90a634 // smmla v20.4s, v17.16b, v16.16b\n" - "ldr q17, [x23, #0x70]\n" - "add x23, x23, #0x88\n" - ".inst 0x4e9da633 // smmla v19.4s, v17.16b, v29.16b\n" - ".inst 0x4e83a634 // smmla v20.4s, v17.16b, v3.16b\n" - "uzp1 v17.2d, v19.2d, v20.2d\n" - "scvtf v17.4s, v17.4s, #0x4\n" - "uzp2 v20.2d, v19.2d, v20.2d\n" - "fmul v19.4s, v27.4s, v9.s[2]\n" - "fmul v9.4s, v27.4s, v9.s[3]\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "fmla v22.4s, v17.4s, v19.4s\n" - "ldr q17, [x22, #0x10]\n" - "movi v19.4s, #0x0\n" - ".inst 0x4e88a653 // smmla v19.4s, v18.16b, v8.16b\n" - "fmla v23.4s, v20.4s, v9.4s\n" - "movi v20.4s, #0x0\n" - "movi v9.4s, #0x0\n" - ".inst 0x4e9fa654 // smmla v20.4s, v18.16b, v31.16b\n" - "ldr q18, [x22, #0x20]\n" - ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n" - ".inst 0x4e8fa653 // smmla v19.4s, v18.16b, v15.16b\n" - ".inst 0x4e81a654 // smmla v20.4s, v18.16b, v1.16b\n" - "ldr q18, [x22, #0x40]\n" - ".inst 0x4e95a653 // smmla v19.4s, v18.16b, v21.16b\n" - ".inst 0x4e90a654 // smmla v20.4s, v18.16b, v16.16b\n" - "ldr q18, [x22, #0x60]\n" - ".inst 0x4e9da653 // smmla v19.4s, v18.16b, v29.16b\n" - ".inst 0x4e83a654 // smmla v20.4s, v18.16b, v3.16b\n" - "movi v18.4s, #0x0\n" - ".inst 0x4e9fa632 // smmla v18.4s, v17.16b, v31.16b\n" - "ldr q17, [x22, #0x30]\n" - ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n" - ".inst 0x4e81a632 // smmla v18.4s, v17.16b, v1.16b\n" - "ldr q17, [x22, #0x50]\n" - ".inst 0x4e95a629 // smmla v9.4s, v17.16b, v21.16b\n" - ".inst 0x4e90a632 // smmla v18.4s, v17.16b, v16.16b\n" - "ldr q17, [x22, #0x70]\n" - "add x22, x22, #0x88\n" - ".inst 0x4e9da629 // smmla v9.4s, v17.16b, v29.16b\n" - ".inst 0x4e83a632 // smmla v18.4s, v17.16b, v3.16b\n" - "uzp1 v17.2d, v19.2d, v20.2d\n" - "uzp2 v20.2d, v19.2d, v20.2d\n" - "fmul v19.4s, v27.4s, v0.s[0]\n" - "scvtf v17.4s, v17.4s, #0x4\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "fmla v25.4s, v17.4s, v19.4s\n" - "ldr q19, [x21, #0x0]\n" - "fmul v17.4s, v27.4s, v0.s[1]\n" - "fmla v5.4s, v20.4s, v17.4s\n" - "ldr q17, [x21, #0x10]\n" - "uzp1 v20.2d, v9.2d, v18.2d\n" - "uzp2 v9.2d, v9.2d, v18.2d\n" - "fmul v18.4s, v27.4s, v0.s[2]\n" - "fmul v0.4s, v27.4s, v0.s[3]\n" - "scvtf v20.4s, v20.4s, #0x4\n" - "scvtf v9.4s, v9.4s, #0x4\n" - "fmla v7.4s, v20.4s, v18.4s\n" - "movi v20.4s, #0x0\n" - "movi v18.4s, #0x0\n" - ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n" - ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n" - "ldr q19, [x21, #0x20]\n" - "fmla v4.4s, v9.4s, v0.4s\n" - "movi v9.4s, #0x0\n" - "movi v0.4s, #0x0\n" - ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n" - "fmul v8.4s, v27.4s, v26.s[0]\n" - ".inst 0x4e9fa620 // smmla v0.4s, v17.16b, v31.16b\n" - "ldr q17, [x21, #0x30]\n" - ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n" - "fmul v31.4s, v27.4s, v26.s[1]\n" - ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n" - "ldr q19, [x21, #0x40]\n" - ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n" - "fmul v15.4s, v27.4s, v26.s[2]\n" - "fmul v27.4s, v27.4s, v26.s[3]\n" - ".inst 0x4e81a620 // smmla v0.4s, v17.16b, v1.16b\n" - "ldr q1, [x21, #0x50]\n" - ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n" - ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n" - "ldr q26, [x21, #0x60]\n" - ".inst 0x4e95a429 // smmla v9.4s, v1.16b, v21.16b\n" - ".inst 0x4e90a420 // smmla v0.4s, v1.16b, v16.16b\n" - "ldr q21, [x21, #0x70]\n" - "add x21, x21, #0x88\n" - ".inst 0x4e9da754 // smmla v20.4s, v26.16b, v29.16b\n" - ".inst 0x4e83a752 // smmla v18.4s, v26.16b, v3.16b\n" - ".inst 0x4e9da6a9 // smmla v9.4s, v21.16b, v29.16b\n" - ".inst 0x4e83a6a0 // smmla v0.4s, v21.16b, v3.16b\n" - "uzp1 v29.2d, v20.2d, v18.2d\n" - "uzp2 v21.2d, v20.2d, v18.2d\n" - "scvtf v29.4s, v29.4s, #0x4\n" - "uzp1 v18.2d, v9.2d, v0.2d\n" - "uzp2 v16.2d, v9.2d, v0.2d\n" - "scvtf v21.4s, v21.4s, #0x4\n" - "fmla v6.4s, v29.4s, v8.4s\n" - "scvtf v18.4s, v18.4s, #0x4\n" - "scvtf v16.4s, v16.4s, #0x4\n" - "fmla v30.4s, v21.4s, v31.4s\n" - "fmla v24.4s, v18.4s, v15.4s\n" - "fmla v14.4s, v16.4s, v27.4s\n" - "bgt 3b\n" - "mov x20, %x[res_ptr]\n" - "subs x27, x27, #0x4\n" - "add %x[res_ptr], %x[res_ptr], #0x10\n" - "str q2, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q10, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q12, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q28, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q11, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q13, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q22, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q23, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q25, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q5, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q7, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q4, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q6, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q30, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q24, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "str q14, [x20, #0x0]\n" - "bne 2b\n" - "mov x20, #0x4\n" - "sub x10, x10, #0x10\n" - "cmp x10, #0x10\n" - "mov %x[res_ptr], x26\n" - "madd %x[a_ptr], x20, x9, %x[a_ptr]\n" - "bge 1b\n" - "4:" // Row loop skip - "cbz x10, 9f\n" - "5:" // Row tail: Row loop - "add x24, %x[b_ptr], #0x8\n" - "mov x23, %x[nc]\n" - "add x22, %x[res_ptr], %x[res_stride], LSL #2\n" - "6:" // Row tail: Column loop - "movi v2.16b, #0x0\n" - "movi v10.16b, #0x0\n" - "add x25, %x[a_ptr], #0x8\n" - "mov x21, %x[nb]\n" - "movi v12.16b, #0x0\n" - "movi v28.16b, #0x0\n" - "7:" // Row tail: Block loop - "ldr q6, [x24, #0x0]\n" - "ldr q5, [x24, #0x10]\n" - "movi v17.16b, #0x4\n" - "movi v8.4s, #0x0\n" - "ldr q4, [x25, #0x0]\n" - "ldr q13, [x25, #0x10]\n" - "movi v27.4s, #0x0\n" - "movi v0.4s, #0x0\n" - "ldr q31, [x24, #0x20]\n" - "ldr q14, [x24, #0x30]\n" - "movi v29.4s, #0x0\n" - "movi v22.16b, #0xf0\n" - "ldr q11, [x25, #0x20]\n" - "ldr q23, [x25, #0x30]\n" - "sshl v21.16b, v6.16b, v17.16b\n" - "sshl v16.16b, v5.16b, v17.16b\n" - "ldr q20, [x25, #0x40]\n" - "ldr q26, [x25, #0x50]\n" - "and v6.16b, v6.16b, v22.16b\n" - "and v5.16b, v5.16b, v22.16b\n" - "ldr q25, [x25, #0x60]\n" - "ldr q3, [x25, #0x70]\n" - "sshl v19.16b, v31.16b, v17.16b\n" - "sshl v18.16b, v14.16b, v17.16b\n" - "ldr d17, [x25, #-0x8]\n" - ".inst 0x4e95a488 // smmla v8.4s, v4.16b, v21.16b\n" - ".inst 0x4e90a49b // smmla v27.4s, v4.16b, v16.16b\n" - "and v31.16b, v31.16b, v22.16b\n" - ".inst 0x4e95a5a0 // smmla v0.4s, v13.16b, v21.16b\n" - ".inst 0x4e90a5bd // smmla v29.4s, v13.16b, v16.16b\n" - "and v14.16b, v14.16b, v22.16b\n" - "sub x20, x24, #0x8\n" - "ldr d16, [x20, #0x0]\n" - "subs x21, x21, #0x1\n" - "add x25, x25, #0x88\n" - "fcvtl v17.4s, v17.4h\n" - "add x24, x24, #0x48\n" - ".inst 0x4e93a568 // smmla v8.4s, v11.16b, v19.16b\n" - ".inst 0x4e92a57b // smmla v27.4s, v11.16b, v18.16b\n" - ".inst 0x4e93a6e0 // smmla v0.4s, v23.16b, v19.16b\n" - ".inst 0x4e92a6fd // smmla v29.4s, v23.16b, v18.16b\n" - "fcvtl v16.4s, v16.4h\n" - ".inst 0x4e86a688 // smmla v8.4s, v20.16b, v6.16b\n" - ".inst 0x4e85a69b // smmla v27.4s, v20.16b, v5.16b\n" - "fmul v23.4s, v16.4s, v17.s[0]\n" - "fmul v21.4s, v16.4s, v17.s[1]\n" - "fmul v1.4s, v16.4s, v17.s[2]\n" - "fmul v20.4s, v16.4s, v17.s[3]\n" - ".inst 0x4e86a740 // smmla v0.4s, v26.16b, v6.16b\n" - ".inst 0x4e85a75d // smmla v29.4s, v26.16b, v5.16b\n" - ".inst 0x4e9fa728 // smmla v8.4s, v25.16b, v31.16b\n" - ".inst 0x4e8ea73b // smmla v27.4s, v25.16b, v14.16b\n" - ".inst 0x4e9fa460 // smmla v0.4s, v3.16b, v31.16b\n" - ".inst 0x4e8ea47d // smmla v29.4s, v3.16b, v14.16b\n" - "uzp1 v19.2d, v8.2d, v27.2d\n" - "uzp2 v18.2d, v8.2d, v27.2d\n" - "scvtf v19.4s, v19.4s, #0x4\n" - "uzp1 v17.2d, v0.2d, v29.2d\n" - "uzp2 v16.2d, v0.2d, v29.2d\n" - "scvtf v18.4s, v18.4s, #0x4\n" - "fmla v2.4s, v19.4s, v23.4s\n" - "scvtf v17.4s, v17.4s, #0x4\n" - "scvtf v16.4s, v16.4s, #0x4\n" - "fmla v10.4s, v18.4s, v21.4s\n" - "fmla v12.4s, v17.4s, v1.4s\n" - "fmla v28.4s, v16.4s, v20.4s\n" - "bgt 7b\n" - "mov x20, %x[res_ptr]\n" - "cmp x10, #0x1\n" - "str q2, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x10, #0x2\n" - "str q10, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x10, #0x3\n" - "str q12, [x20, #0x0]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "str q28, [x20, #0x0]\n" - "8:" // Row tail: Accumulator store skip - "subs x23, x23, #0x4\n" - "add %x[res_ptr], %x[res_ptr], #0x10\n" - "bne 6b\n" - "subs x10, x10, #0x4\n" - "add %x[a_ptr], %x[a_ptr], x9\n" - "mov %x[res_ptr], x22\n" - "bgt 5b\n" - "9:" // Row tail: Row loop skip - : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) - : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) - : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" - ); - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8) - float sumf[4][4]; - int sumi; - - for (int y = 0; y < nr / 4; y++) { - const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; - } - for (int l = 0; l < nb; l++) { - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); - const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); - sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + - (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; - } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); - } - } - } - } - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) - s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; - } - } - } -} - -static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK8_0; - const int nb = n / qk; - const int ncols_interleaved = 8; - const int blocklen = 8; - - assert (n % qk == 0); - assert (nr % 4 == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) -#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) - if (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0) { - const void * b_ptr = vx; - const void * a_ptr = vy; - float * res_ptr = s; - size_t res_stride = bs * sizeof(float); - - __asm__ __volatile__( - "mov x20, #0x4\n" - "mov x13, %x[nr]\n" - "mov z28.s, #-0x4\n" - "mov x12, #0x88\n" - "ptrue p1.b\n" - "whilelt p0.s, XZR, x20\n" - "cmp x13, #0x10\n" - "mul x12, %x[nb], x12\n" - "blt 4f\n" - "1:" // Row loop - "add x11, %x[b_ptr], #0x10\n" - "mov x10, %x[nc]\n" - "add x9, %x[res_ptr], %x[res_stride], LSL #4\n" - "2:" // Column loop - "add x28, %x[a_ptr], #0x8\n" - "mov z24.b, #0x0\n" - "mov z15.b, #0x0\n" - "mov x27, %x[nb]\n" - "add x26, x28, x12\n" - "mov z12.b, #0x0\n" - "mov z0.b, #0x0\n" - "add x25, x26, x12\n" - "mov z13.b, #0x0\n" - "mov z1.b, #0x0\n" - "add x24, x25, x12\n" - "mov z20.b, #0x0\n" - "mov z25.b, #0x0\n" - "mov z11.b, #0x0\n" - "mov z16.b, #0x0\n" - "mov z19.b, #0x0\n" - "mov z26.b, #0x0\n" - "mov z8.b, #0x0\n" - "mov z29.b, #0x0\n" - "mov z27.b, #0x0\n" - "mov z10.b, #0x0\n" - "3:" // Block loop - "ld1b { z30.b }, p1/Z, [x11]\n" - "ld1b { z21.b }, p1/Z, [x11, #1, MUL VL]\n" - "mov z18.s, #0x0\n" - "mov z7.s, #0x0\n" - "ld1rqb { z3.b }, p1/Z, [x28]\n" - "ld1rqb { z5.b }, p1/Z, [x28, #16]\n" - "mov z9.s, #0x0\n" - "mov z22.s, #0x0\n" - "ld1b { z4.b }, p1/Z, [x11, #2, MUL VL]\n" - "ld1b { z17.b }, p1/Z, [x11, #3, MUL VL]\n" - "sub x20, x11, #0x10\n" - "sub x23, x28, #0x8\n" - "lsl z31.b, z30.b, #0x4\n" - "lsl z6.b, z21.b, #0x4\n" - "ld1h { z23.s }, p1/Z, [x20]\n" - "sub x22, x26, #0x8\n" - "and z30.b, z30.b, #0xf0\n" - "and z21.b, z21.b, #0xf0\n" - "sub x21, x25, #0x8\n" - "sub x20, x24, #0x8\n" - "lsl z14.b, z4.b, #0x4\n" - "lsl z2.b, z17.b, #0x4\n" - "subs x27, x27, #0x1\n" - "add x11, x11, #0x90\n" - ".inst 0x451f9872 // smmla z18.s, z3.b, z31.b\n" - ".inst 0x45069867 // smmla z7.s, z3.b, z6.b\n" - "ld1rqb { z3.b }, p1/Z, [x28, #32]\n" - "and z4.b, z4.b, #0xf0\n" - ".inst 0x451f98a9 // smmla z9.s, z5.b, z31.b\n" - ".inst 0x450698b6 // smmla z22.s, z5.b, z6.b\n" - "ld1rqb { z5.b }, p1/Z, [x28, #48]\n" - "and z17.b, z17.b, #0xf0\n" - "fcvt z23.s, p1/m, z23.h\n" - ".inst 0x450e9872 // smmla z18.s, z3.b, z14.b\n" - ".inst 0x45029867 // smmla z7.s, z3.b, z2.b\n" - "ld1rqb { z3.b }, p1/Z, [x28, #64]\n" - ".inst 0x450e98a9 // smmla z9.s, z5.b, z14.b\n" - ".inst 0x450298b6 // smmla z22.s, z5.b, z2.b\n" - "ld1rqb { z5.b }, p1/Z, [x28, #80]\n" - "fscale z23.s, p1/m, z23.s, z28.s\n" - ".inst 0x451e9872 // smmla z18.s, z3.b, z30.b\n" - ".inst 0x45159867 // smmla z7.s, z3.b, z21.b\n" - "ld1rqb { z3.b }, p1/Z, [x28, #96]\n" - ".inst 0x451e98a9 // smmla z9.s, z5.b, z30.b\n" - ".inst 0x451598b6 // smmla z22.s, z5.b, z21.b\n" - "ld1rqb { z5.b }, p1/Z, [x28, #112]\n" - "add x28, x28, #0x88\n" - ".inst 0x45049872 // smmla z18.s, z3.b, z4.b\n" - ".inst 0x45119867 // smmla z7.s, z3.b, z17.b\n" - "ld1h { z3.s }, p0/Z, [x23]\n" - ".inst 0x450498a9 // smmla z9.s, z5.b, z4.b\n" - ".inst 0x451198b6 // smmla z22.s, z5.b, z17.b\n" - "fcvt z3.s, p1/m, z3.h\n" - "uzp1 z5.d, z18.d, z7.d\n" - "uzp2 z18.d, z18.d, z7.d\n" - "mov z3.q, z3.q[0]\n" - "uzp1 z7.d, z9.d, z22.d\n" - "uzp2 z22.d, z9.d, z22.d\n" - "fmul z9.s, z23.s, z3.s[0]\n" - "scvtf z5.s, p1/m, z5.s\n" - "scvtf z18.s, p1/m, z18.s\n" - "scvtf z7.s, p1/m, z7.s\n" - "scvtf z22.s, p1/m, z22.s\n" - "fmla z24.s, p1/M, z5.s, z9.s\n" - "ld1rqb { z5.b }, p1/Z, [x26]\n" - "fmul z9.s, z23.s, z3.s[1]\n" - "fmla z15.s, p1/M, z18.s, z9.s\n" - "ld1rqb { z18.b }, p1/Z, [x26, #16]\n" - "fmul z9.s, z23.s, z3.s[2]\n" - "fmul z3.s, z23.s, z3.s[3]\n" - "fmla z12.s, p1/M, z7.s, z9.s\n" - "mov z9.s, #0x0\n" - "ld1h { z7.s }, p0/Z, [x22]\n" - ".inst 0x451f98a9 // smmla z9.s, z5.b, z31.b\n" - "fmla z0.s, p1/M, z22.s, z3.s\n" - "mov z22.s, #0x0\n" - "ld1h { z3.s }, p0/Z, [x21]\n" - ".inst 0x450698b6 // smmla z22.s, z5.b, z6.b\n" - "ld1rqb { z5.b }, p1/Z, [x26, #32]\n" - "fcvt z7.s, p1/m, z7.h\n" - "fcvt z3.s, p1/m, z3.h\n" - ".inst 0x450e98a9 // smmla z9.s, z5.b, z14.b\n" - ".inst 0x450298b6 // smmla z22.s, z5.b, z2.b\n" - "ld1rqb { z5.b }, p1/Z, [x26, #64]\n" - "mov z7.q, z7.q[0]\n" - "mov z3.q, z3.q[0]\n" - ".inst 0x451e98a9 // smmla z9.s, z5.b, z30.b\n" - ".inst 0x451598b6 // smmla z22.s, z5.b, z21.b\n" - "ld1rqb { z5.b }, p1/Z, [x26, #96]\n" - ".inst 0x450498a9 // smmla z9.s, z5.b, z4.b\n" - ".inst 0x451198b6 // smmla z22.s, z5.b, z17.b\n" - "uzp1 z5.d, z9.d, z22.d\n" - "scvtf z5.s, p1/m, z5.s\n" - "uzp2 z22.d, z9.d, z22.d\n" - "fmul z9.s, z23.s, z7.s[0]\n" - "scvtf z22.s, p1/m, z22.s\n" - "fmla z13.s, p1/M, z5.s, z9.s\n" - "ld1rqb { z9.b }, p1/Z, [x25]\n" - "fmul z5.s, z23.s, z7.s[1]\n" - "fmla z1.s, p1/M, z22.s, z5.s\n" - "mov z5.s, #0x0\n" - "mov z22.s, #0x0\n" - ".inst 0x451f9a45 // smmla z5.s, z18.b, z31.b\n" - ".inst 0x45069a56 // smmla z22.s, z18.b, z6.b\n" - "ld1rqb { z18.b }, p1/Z, [x26, #48]\n" - ".inst 0x450e9a45 // smmla z5.s, z18.b, z14.b\n" - ".inst 0x45029a56 // smmla z22.s, z18.b, z2.b\n" - "ld1rqb { z18.b }, p1/Z, [x26, #80]\n" - ".inst 0x451e9a45 // smmla z5.s, z18.b, z30.b\n" - ".inst 0x45159a56 // smmla z22.s, z18.b, z21.b\n" - "ld1rqb { z18.b }, p1/Z, [x26, #112]\n" - "add x26, x26, #0x88\n" - ".inst 0x45049a45 // smmla z5.s, z18.b, z4.b\n" - ".inst 0x45119a56 // smmla z22.s, z18.b, z17.b\n" - "uzp1 z18.d, z5.d, z22.d\n" - "scvtf z18.s, p1/m, z18.s\n" - "uzp2 z22.d, z5.d, z22.d\n" - "fmul z5.s, z23.s, z7.s[2]\n" - "fmul z7.s, z23.s, z7.s[3]\n" - "scvtf z22.s, p1/m, z22.s\n" - "fmla z20.s, p1/M, z18.s, z5.s\n" - "ld1rqb { z18.b }, p1/Z, [x25, #16]\n" - "ld1h { z5.s }, p0/Z, [x20]\n" - "fcvt z5.s, p1/m, z5.h\n" - "fmla z25.s, p1/M, z22.s, z7.s\n" - "mov z22.s, #0x0\n" - "mov z7.s, #0x0\n" - ".inst 0x451f9936 // smmla z22.s, z9.b, z31.b\n" - ".inst 0x45069927 // smmla z7.s, z9.b, z6.b\n" - "ld1rqb { z9.b }, p1/Z, [x25, #32]\n" - "mov z5.q, z5.q[0]\n" - ".inst 0x450e9936 // smmla z22.s, z9.b, z14.b\n" - ".inst 0x45029927 // smmla z7.s, z9.b, z2.b\n" - "ld1rqb { z9.b }, p1/Z, [x25, #64]\n" - ".inst 0x451e9936 // smmla z22.s, z9.b, z30.b\n" - ".inst 0x45159927 // smmla z7.s, z9.b, z21.b\n" - "ld1rqb { z9.b }, p1/Z, [x25, #96]\n" - ".inst 0x45049936 // smmla z22.s, z9.b, z4.b\n" - ".inst 0x45119927 // smmla z7.s, z9.b, z17.b\n" - "uzp1 z9.d, z22.d, z7.d\n" - "scvtf z9.s, p1/m, z9.s\n" - "uzp2 z22.d, z22.d, z7.d\n" - "fmul z7.s, z23.s, z3.s[0]\n" - "scvtf z22.s, p1/m, z22.s\n" - "fmla z11.s, p1/M, z9.s, z7.s\n" - "ld1rqb { z9.b }, p1/Z, [x24]\n" - "fmul z7.s, z23.s, z3.s[1]\n" - "fmla z16.s, p1/M, z22.s, z7.s\n" - "mov z22.s, #0x0\n" - "mov z7.s, #0x0\n" - ".inst 0x451f9a56 // smmla z22.s, z18.b, z31.b\n" - ".inst 0x45069a47 // smmla z7.s, z18.b, z6.b\n" - "ld1rqb { z18.b }, p1/Z, [x25, #48]\n" - ".inst 0x450e9a56 // smmla z22.s, z18.b, z14.b\n" - ".inst 0x45029a47 // smmla z7.s, z18.b, z2.b\n" - "ld1rqb { z18.b }, p1/Z, [x25, #80]\n" - ".inst 0x451e9a56 // smmla z22.s, z18.b, z30.b\n" - ".inst 0x45159a47 // smmla z7.s, z18.b, z21.b\n" - "ld1rqb { z18.b }, p1/Z, [x25, #112]\n" - "add x25, x25, #0x88\n" - ".inst 0x45049a56 // smmla z22.s, z18.b, z4.b\n" - ".inst 0x45119a47 // smmla z7.s, z18.b, z17.b\n" - "uzp1 z18.d, z22.d, z7.d\n" - "scvtf z18.s, p1/m, z18.s\n" - "uzp2 z7.d, z22.d, z7.d\n" - "fmul z22.s, z23.s, z3.s[2]\n" - "fmul z3.s, z23.s, z3.s[3]\n" - "scvtf z7.s, p1/m, z7.s\n" - "fmla z19.s, p1/M, z18.s, z22.s\n" - "ld1rqb { z18.b }, p1/Z, [x24, #16]\n" - "fmul z22.s, z23.s, z5.s[0]\n" - "fmla z26.s, p1/M, z7.s, z3.s\n" - "mov z3.s, #0x0\n" - "mov z7.s, #0x0\n" - ".inst 0x451f9923 // smmla z3.s, z9.b, z31.b\n" - ".inst 0x45069927 // smmla z7.s, z9.b, z6.b\n" - "ld1rqb { z9.b }, p1/Z, [x24, #32]\n" - ".inst 0x450e9923 // smmla z3.s, z9.b, z14.b\n" - ".inst 0x45029927 // smmla z7.s, z9.b, z2.b\n" - "mov z9.s, #0x0\n" - ".inst 0x451f9a49 // smmla z9.s, z18.b, z31.b\n" - "mov z31.s, #0x0\n" - ".inst 0x45069a5f // smmla z31.s, z18.b, z6.b\n" - "ld1rqb { z6.b }, p1/Z, [x24, #48]\n" - "ld1rqb { z18.b }, p1/Z, [x24, #64]\n" - ".inst 0x450e98c9 // smmla z9.s, z6.b, z14.b\n" - "fmul z14.s, z23.s, z5.s[1]\n" - ".inst 0x450298df // smmla z31.s, z6.b, z2.b\n" - "ld1rqb { z6.b }, p1/Z, [x24, #80]\n" - "fmul z2.s, z23.s, z5.s[2]\n" - "fmul z23.s, z23.s, z5.s[3]\n" - ".inst 0x451e9a43 // smmla z3.s, z18.b, z30.b\n" - ".inst 0x45159a47 // smmla z7.s, z18.b, z21.b\n" - "ld1rqb { z5.b }, p1/Z, [x24, #96]\n" - ".inst 0x451e98c9 // smmla z9.s, z6.b, z30.b\n" - ".inst 0x451598df // smmla z31.s, z6.b, z21.b\n" - "ld1rqb { z18.b }, p1/Z, [x24, #112]\n" - "add x24, x24, #0x88\n" - ".inst 0x450498a3 // smmla z3.s, z5.b, z4.b\n" - ".inst 0x451198a7 // smmla z7.s, z5.b, z17.b\n" - ".inst 0x45049a49 // smmla z9.s, z18.b, z4.b\n" - ".inst 0x45119a5f // smmla z31.s, z18.b, z17.b\n" - "uzp1 z18.d, z3.d, z7.d\n" - "uzp2 z5.d, z3.d, z7.d\n" - "scvtf z18.s, p1/m, z18.s\n" - "uzp1 z6.d, z9.d, z31.d\n" - "uzp2 z9.d, z9.d, z31.d\n" - "scvtf z5.s, p1/m, z5.s\n" - "fmla z8.s, p1/M, z18.s, z22.s\n" - "scvtf z6.s, p1/m, z6.s\n" - "scvtf z9.s, p1/m, z9.s\n" - "fmla z29.s, p1/M, z5.s, z14.s\n" - "fmla z27.s, p1/M, z6.s, z2.s\n" - "fmla z10.s, p1/M, z9.s, z23.s\n" - "bgt 3b\n" - "mov x20, %x[res_ptr]\n" - "subs x10, x10, #0x8\n" - "add %x[res_ptr], %x[res_ptr], #0x20\n" - "st1w { z24.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z15.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z12.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z0.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z13.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z1.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z20.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z25.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z11.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z16.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z19.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z26.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z8.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z29.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z27.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "st1w { z10.s }, p1, [x20]\n" - "bne 2b\n" - "mov x20, #0x4\n" - "sub x13, x13, #0x10\n" - "cmp x13, #0x10\n" - "mov %x[res_ptr], x9\n" - "madd %x[a_ptr], x20, x12, %x[a_ptr]\n" - "bge 1b\n" - "4:" // Row loop skip - "cbz x13, 9f\n" - "5:" // Row tail: Row loop - "add x25, %x[b_ptr], #0x10\n" - "mov x24, %x[nc]\n" - "add x23, %x[res_ptr], %x[res_stride], LSL #2\n" - "6:" // Row tail: Column loop - "mov z24.b, #0x0\n" - "mov z15.b, #0x0\n" - "add x28, %x[a_ptr], #0x8\n" - "mov x22, %x[nb]\n" - "mov z12.b, #0x0\n" - "mov z0.b, #0x0\n" - "7:" // Row tail: Block loop - "ld1b { z3.b }, p1/Z, [x25]\n" - "ld1b { z6.b }, p1/Z, [x25, #1, MUL VL]\n" - "mov z2.s, #0x0\n" - "mov z25.s, #0x0\n" - "ld1rqb { z26.b }, p1/Z, [x28]\n" - "ld1rqb { z21.b }, p1/Z, [x28, #16]\n" - "mov z27.s, #0x0\n" - "mov z19.s, #0x0\n" - "ld1b { z29.b }, p1/Z, [x25, #2, MUL VL]\n" - "ld1b { z16.b }, p1/Z, [x25, #3, MUL VL]\n" - "sub x21, x25, #0x10\n" - "sub x20, x28, #0x8\n" - "lsl z20.b, z3.b, #0x4\n" - "lsl z4.b, z6.b, #0x4\n" - "ld1rqb { z10.b }, p1/Z, [x28, #32]\n" - "ld1rqb { z23.b }, p1/Z, [x28, #48]\n" - "and z3.b, z3.b, #0xf0\n" - "and z6.b, z6.b, #0xf0\n" - "ld1rqb { z11.b }, p1/Z, [x28, #64]\n" - "ld1rqb { z7.b }, p1/Z, [x28, #80]\n" - "lsl z8.b, z29.b, #0x4\n" - "lsl z14.b, z16.b, #0x4\n" - "ld1rqb { z18.b }, p1/Z, [x28, #96]\n" - "ld1rqb { z30.b }, p1/Z, [x28, #112]\n" - ".inst 0x45149b42 // smmla z2.s, z26.b, z20.b\n" - ".inst 0x45049b59 // smmla z25.s, z26.b, z4.b\n" - "and z29.b, z29.b, #0xf0\n" - "ld1h { z17.s }, p1/Z, [x21]\n" - ".inst 0x45149abb // smmla z27.s, z21.b, z20.b\n" - ".inst 0x45049ab3 // smmla z19.s, z21.b, z4.b\n" - "and z16.b, z16.b, #0xf0\n" - "ld1h { z4.s }, p0/Z, [x20]\n" - "subs x22, x22, #0x1\n" - "add x28, x28, #0x88\n" - "fcvt z17.s, p1/m, z17.h\n" - "add x25, x25, #0x90\n" - ".inst 0x45089942 // smmla z2.s, z10.b, z8.b\n" - ".inst 0x450e9959 // smmla z25.s, z10.b, z14.b\n" - "fcvt z4.s, p1/m, z4.h\n" - ".inst 0x45089afb // smmla z27.s, z23.b, z8.b\n" - ".inst 0x450e9af3 // smmla z19.s, z23.b, z14.b\n" - "fscale z17.s, p1/m, z17.s, z28.s\n" - "mov z4.q, z4.q[0]\n" - ".inst 0x45039962 // smmla z2.s, z11.b, z3.b\n" - ".inst 0x45069979 // smmla z25.s, z11.b, z6.b\n" - "fmul z23.s, z17.s, z4.s[0]\n" - "fmul z9.s, z17.s, z4.s[1]\n" - "fmul z21.s, z17.s, z4.s[2]\n" - "fmul z4.s, z17.s, z4.s[3]\n" - ".inst 0x450398fb // smmla z27.s, z7.b, z3.b\n" - ".inst 0x450698f3 // smmla z19.s, z7.b, z6.b\n" - ".inst 0x451d9a42 // smmla z2.s, z18.b, z29.b\n" - ".inst 0x45109a59 // smmla z25.s, z18.b, z16.b\n" - ".inst 0x451d9bdb // smmla z27.s, z30.b, z29.b\n" - ".inst 0x45109bd3 // smmla z19.s, z30.b, z16.b\n" - "uzp1 z31.d, z2.d, z25.d\n" - "uzp2 z13.d, z2.d, z25.d\n" - "scvtf z31.s, p1/m, z31.s\n" - "uzp1 z17.d, z27.d, z19.d\n" - "uzp2 z18.d, z27.d, z19.d\n" - "scvtf z13.s, p1/m, z13.s\n" - "fmla z24.s, p1/M, z31.s, z23.s\n" - "scvtf z17.s, p1/m, z17.s\n" - "scvtf z18.s, p1/m, z18.s\n" - "fmla z15.s, p1/M, z13.s, z9.s\n" - "fmla z12.s, p1/M, z17.s, z21.s\n" - "fmla z0.s, p1/M, z18.s, z4.s\n" - "bgt 7b\n" - "mov x20, %x[res_ptr]\n" - "cmp x13, #0x1\n" - "st1w { z24.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x13, #0x2\n" - "st1w { z15.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "cmp x13, #0x3\n" - "st1w { z12.s }, p1, [x20]\n" - "add x20, x20, %x[res_stride]\n" - "ble 8f\n" - "st1w { z0.s }, p1, [x20]\n" - "8:" // Row tail: Accumulator store skip - "subs x24, x24, #0x8\n" - "add %x[res_ptr], %x[res_ptr], #0x20\n" - "bne 6b\n" - "subs x13, x13, #0x4\n" - "add %x[a_ptr], %x[a_ptr], x12\n" - "mov %x[res_ptr], x23\n" - "bgt 5b\n" - "9:" // Row tail: Row loop skip - : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr) - : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc) - : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" - ); - return; - } -#endif // #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8) -#elif defined(__AVX2__) || defined(__AVX512F__) - { - const block_q4_0x8 * b_ptr_start = (const block_q4_0x8 *)vx; - const block_q8_0x4 * a_ptr_start = (const block_q8_0x4 *)vy; - int64_t b_nb = n / QK4_0; - int64_t y = 0; - // Mask to mask out nibbles from packed bytes - const __m256i m4b = _mm256_set1_epi8(0x0F); - const __m128i loadMask = _mm_blend_epi32(_mm_setzero_si128(), _mm_set1_epi32(0xFFFFFFFF), 3); - // Lookup table to convert signed nibbles to signed bytes - __m256i signextendlut = _mm256_castsi128_si256(_mm_set_epi8(-1, -2, -3, -4, -5, -6, -7, -8, 7, 6, 5, 4, 3, 2, 1, 0)); - signextendlut = _mm256_permute2f128_si256(signextendlut, signextendlut, 0); - // Permute mask used for easier vector processing at later stages - __m256i requiredOrder = _mm256_set_epi32(3, 2, 1, 0, 7, 6, 5, 4); - int64_t xstart = 0; - int anr = nr - nr%16; // Used to align nr with boundary of 16 - #ifdef __AVX512F__ - int anc = nc - nc%16; // Used to align nc with boundary of 16 - // Mask to mask out nibbles from packed bytes expanded to 512 bit length - const __m512i m4bexpanded = _mm512_set1_epi8(0x0F); - // Lookup table to convert signed nibbles to signed bytes expanded to 512 bit length - __m512i signextendlutexpanded = _mm512_inserti32x8(_mm512_castsi256_si512(signextendlut), signextendlut, 1); - - // Take group of four block_q8_0x4 structures at each pass of the loop and perform dot product operation - for (; y < anr / 4; y += 4) { - - const block_q8_0x4 * a_ptrs[4]; - - a_ptrs[0] = a_ptr_start + (y * nb); - for (int i = 0; i < 3; ++i) { - a_ptrs[i + 1] = a_ptrs[i] + nb; - } - - // Take group of two block_q4_0x8 structures at each pass of the loop and perform dot product operation - for (int64_t x = 0; x < anc / 8; x += 2) { - - const block_q4_0x8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); - const block_q4_0x8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); - - // Master FP accumulators - __m512 acc_rows[16]; - for (int i = 0; i < 16; i++) { - acc_rows[i] = _mm512_setzero_ps(); - } - - for (int64_t b = 0; b < nb; b++) { - // Load the sixteen block_q4_0 quantized values interleaved with each other in chunks of eight - B0,B1 ....BE,BF - const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs)); - const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 32)); - const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 64)); - const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 96)); - - const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs)); - const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 32)); - const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 64)); - const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 96)); - - // Save the values in the following vectors in the formats B0B1B4B5B8B9BCBD, B2B3B6B7BABBBEBF for further processing and storing of values - const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); - const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); - - const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); - const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); - const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); - const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); - - const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); - const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); - const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); - const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); - - // 4-bit -> 8-bit - Sign is maintained - const __m512i rhs_mat_014589CD_0 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_014589CD_0, m4bexpanded)); //B0(0-7) B1(0-7) B4(0-7) B5(0-7) B8(0-7) B9(0-7) BC(0-7) BD(0-7) - const __m512i rhs_mat_2367ABEF_0 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_2367ABEF_0, m4bexpanded)); //B2(0-7) B3(0-7) B6(0-7) B7(0-7) BA(0-7) BB(0-7) BE(0-7) BF(0-7) - - const __m512i rhs_mat_014589CD_1 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_014589CD_1, m4bexpanded)); //B0(8-15) B1(8-15) B4(8-15) B5(8-15) B8(8-15) B9(8-15) BC(8-15) BD(8-15) - const __m512i rhs_mat_2367ABEF_1 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_2367ABEF_1, m4bexpanded)); //B2(8-15) B3(8-15) B6(8-15) B7(8-15) BA(8-15) BB(8-15) BE(8-15) BF(8-15) - - const __m512i rhs_mat_014589CD_2 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m4bexpanded)); //B0(16-23) B1(16-23) B4(16-23) B5(16-23) B8(16-23) B9(16-23) BC(16-23) BD(16-23) - const __m512i rhs_mat_2367ABEF_2 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m4bexpanded)); //B2(16-23) B3(16-23) B6(16-23) B7(16-23) BA(16-23) BB(16-23) BE(16-23) BF(16-23) - - const __m512i rhs_mat_014589CD_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m4bexpanded)); //B0(24-31) B1(24-31) B4(24-31) B5(24-31) B8(24-31) B9(24-31) BC(24-31) BD(24-31) - const __m512i rhs_mat_2367ABEF_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) BA(24-31) BB(24-31) BE(24-31) BF(24-31) - - // Shuffle pattern one - right side input - const __m512i rhs_mat_014589CD_0_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) B8(0-3) B9(0-3) B8(0-3) B9(0-3) BC(0-3) BD(0-3) BC(0-3) BD(0-3) - const __m512i rhs_mat_2367ABEF_0_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) BA(0-3) BB(0-3) BA(0-3) BB(0-3) BE(0-3) BF(0-3) BE(0-3) BF(0-3) - - const __m512i rhs_mat_014589CD_1_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) B8(8-11) B9(8-11) B8(8-11) B9(8-11) BC(8-11) BD(8-11) BC(8-11) BD(8-11) - const __m512i rhs_mat_2367ABEF_1_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) BA(8-11) BB(8-11) BA(8-11) BB(8-11) BE(8-11) BF(8-11) BE(8-11) BF(8-11) - - const __m512i rhs_mat_014589CD_2_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) B8(16-19) B9(16-19) B8(16-19) B9(16-19) BC(16-19) BD(16-19) BC(16-19) BD(16-19) - const __m512i rhs_mat_2367ABEF_2_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) BA(16-19) BB(16-19) BA(16-19) BB(16-19) BE(16-19) BF(16-19) BE(16-19) BF(16-19) - - const __m512i rhs_mat_014589CD_3_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) B8(24-27) B9(24-27) B8(24-27) B9(24-27) BC(24-27) BD(24-27) BC(24-27) BD(24-27) - const __m512i rhs_mat_2367ABEF_3_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) BA(24-27) BB(24-27) BA(24-27) BB(24-27) BE(24-27) BF(24-27) BE(24-27) BF(24-27) - - // Shuffle pattern two - right side input - - const __m512i rhs_mat_014589CD_0_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) B8(4-7) B9(4-7) B8(4-7) B9(4-7) BC(4-7) BD(4-7) BC(4-7) BD(4-7) - const __m512i rhs_mat_2367ABEF_0_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) BA(4-7) BB(4-7) BA(4-7) BB(4-7) BE(4-7) BF(4-7) BE(4-7) BF(4-7) - - const __m512i rhs_mat_014589CD_1_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) B8(12-15) B9(12-15) B8(12-15) B9(12-15) BC(12-15) BD(12-15) BC(12-15) BD(12-15) - const __m512i rhs_mat_2367ABEF_1_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) BA(12-15) BB(12-15) BA(12-15) BB(12-15) BE(12-15) BF(12-15) BE(12-15) BF(12-15) - - const __m512i rhs_mat_014589CD_2_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) B8(20-23) B9(20-23) B8(20-23) B9(20-23) BC(20-23) BD(20-23) BC(20-23) BD(20-23) - const __m512i rhs_mat_2367ABEF_2_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) BA(20-23) BB(20-23) BA(20-23) BB(20-23) BE(20-23) BF(20-23) BE(20-23) BF(20-23) - - const __m512i rhs_mat_014589CD_3_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) B8(28-31) B9(28-31) B8(28-31) B9(28-31) BC(28-31) BD(28-31) BC(28-31) BD(28-31) - const __m512i rhs_mat_2367ABEF_3_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) BA(28-31) BB(28-31) BA(28-31) BB(28-31) BE(28-31) BF(28-31) BE(28-31) BF(28-31) - - // Scale values - Load the weight scale values of two block_q4_0x8 - const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); - - // Process LHS in pairs of rows - for (int rp = 0; rp < 4; rp++) { - - // Load the four block_q4_0 quantized values interleaved with each other in chunks of eight - A0,A1,A2,A3 - // Loaded as set of 128 bit vectors and repeated and stored into a 256 bit vector before again repeating into 512 bit vector - __m256i lhs_mat_ymm_0123_0 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs))); - __m256i lhs_mat_ymm_01_0 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_0, lhs_mat_ymm_0123_0, 0); - __m256i lhs_mat_ymm_23_0 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_0, lhs_mat_ymm_0123_0, 17); - __m256i lhs_mat_ymm_0123_1 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 32))); - __m256i lhs_mat_ymm_01_1 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_1, lhs_mat_ymm_0123_1, 0); - __m256i lhs_mat_ymm_23_1 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_1, lhs_mat_ymm_0123_1, 17); - __m256i lhs_mat_ymm_0123_2 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 64))); - __m256i lhs_mat_ymm_01_2 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_2, lhs_mat_ymm_0123_2, 0); - __m256i lhs_mat_ymm_23_2 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_2, lhs_mat_ymm_0123_2, 17); - __m256i lhs_mat_ymm_0123_3 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 96))); - __m256i lhs_mat_ymm_01_3 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_3, lhs_mat_ymm_0123_3, 0); - __m256i lhs_mat_ymm_23_3 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_3, lhs_mat_ymm_0123_3, 17); - - __m512i lhs_mat_01_0 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_0), lhs_mat_ymm_01_0, 1); - __m512i lhs_mat_23_0 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_0), lhs_mat_ymm_23_0, 1); - __m512i lhs_mat_01_1 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_1), lhs_mat_ymm_01_1, 1); - __m512i lhs_mat_23_1 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_1), lhs_mat_ymm_23_1, 1); - __m512i lhs_mat_01_2 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_2), lhs_mat_ymm_01_2, 1); - __m512i lhs_mat_23_2 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_2), lhs_mat_ymm_23_2, 1); - __m512i lhs_mat_01_3 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_3), lhs_mat_ymm_01_3, 1); - __m512i lhs_mat_23_3 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_3), lhs_mat_ymm_23_3, 1); - - // Shuffle pattern one - left side input - - const __m512i lhs_mat_01_0_sp1 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) - const __m512i lhs_mat_23_0_sp1 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) - - const __m512i lhs_mat_01_1_sp1 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) - const __m512i lhs_mat_23_1_sp1 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) - - const __m512i lhs_mat_01_2_sp1 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) - const __m512i lhs_mat_23_2_sp1 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) - - const __m512i lhs_mat_01_3_sp1 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) - const __m512i lhs_mat_23_3_sp1 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) - - // Shuffle pattern two - left side input - - const __m512i lhs_mat_01_0_sp2 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) - const __m512i lhs_mat_23_0_sp2 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) - - const __m512i lhs_mat_01_1_sp2 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) - const __m512i lhs_mat_23_1_sp2 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) - - const __m512i lhs_mat_01_2_sp2 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) - const __m512i lhs_mat_23_2_sp2 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) - - const __m512i lhs_mat_01_3_sp2 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) - const __m512i lhs_mat_23_3_sp2 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) - - // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane - // Resembles MMLAs into 2x2 matrices in ARM Version - const __m512i zero = _mm512_setzero_epi32(); - __m512i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_01_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_01_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_01_0_sp1, rhs_mat_014589CD_0_sp1); - __m512i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367ABEF_0_sp1); - __m512i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_23_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_23_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_23_0_sp1, rhs_mat_014589CD_0_sp1); - __m512i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367ABEF_0_sp1); - __m512i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_01_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_01_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_01_0_sp2, rhs_mat_014589CD_0_sp2); - __m512i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367ABEF_0_sp2); - __m512i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_23_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_23_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_23_0_sp2, rhs_mat_014589CD_0_sp2); - __m512i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367ABEF_0_sp2); - - // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block - __m512i iacc_mat_00 = _mm512_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); - __m512i iacc_mat_01 = _mm512_add_epi32(iacc_mat_01_sp1, iacc_mat_01_sp2); - __m512i iacc_mat_10 = _mm512_add_epi32(iacc_mat_10_sp1, iacc_mat_10_sp2); - __m512i iacc_mat_11 = _mm512_add_epi32(iacc_mat_11_sp1, iacc_mat_11_sp2); - - - // Straighten out to make 4 row vectors - __m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, (_MM_PERM_ENUM)78)); - __m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, (_MM_PERM_ENUM)78), iacc_mat_01); - __m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, (_MM_PERM_ENUM)78)); - __m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, (_MM_PERM_ENUM)78), iacc_mat_11); - - // Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes - const __m128i row_scale_f16 = _mm_shuffle_epi32(_mm_maskload_epi32((int const*)(a_ptrs[rp][b].d), loadMask), 68); - const __m512 row_scale_f32 = GGML_F32Cx16_REPEAT_LOAD(row_scale_f16); - - // Multiply with appropiate scales and accumulate - acc_rows[rp * 4] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); - acc_rows[rp * 4 + 1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); - acc_rows[rp * 4 + 2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); - acc_rows[rp * 4 + 3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); - } - } - - // Store the accumulated values - for (int i = 0; i < 16; i++) { - _mm512_storeu_ps((float *)(s + ((y * 4 + i) * bs + x * 8)), acc_rows[i]); - } - } - } - // Take a block_q8_0x4 structures at each pass of the loop and perform dot product operation - for (; y < nr / 4; y ++) { - - const block_q8_0x4 * a_ptr = a_ptr_start + (y * nb); - - // Take group of two block_q4_0x8 structures at each pass of the loop and perform dot product operation - for (int64_t x = 0; x < anc / 8; x += 2) { - - const block_q4_0x8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); - const block_q4_0x8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); - - // Master FP accumulators - __m512 acc_rows[4]; - for (int i = 0; i < 4; i++) { - acc_rows[i] = _mm512_setzero_ps(); - } - - for (int64_t b = 0; b < nb; b++) { - // Load the sixteen block_q4_0 quantized values interleaved with each other in chunks of eight - B0,B1 ....BE,BF - const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs)); - const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 32)); - const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 64)); - const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_0[b].qs + 96)); - - const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs)); - const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 32)); - const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 64)); - const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i *)(b_ptr_1[b].qs + 96)); - - // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of valuess - const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); - const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); - - const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); - const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); - const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); - const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); - - const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); - const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); - const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); - const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); - - // 4-bit -> 8-bit - Sign is maintained - const __m512i rhs_mat_014589CD_0 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_014589CD_0, m4bexpanded)); //B0(0-7) B1(0-7) B4(0-7) B5(0-7) B8(0-7) B9(0-7) BC(0-7) BD(0-7) - const __m512i rhs_mat_2367ABEF_0 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_2367ABEF_0, m4bexpanded)); //B2(0-7) B3(0-7) B6(0-7) B7(0-7) BA(0-7) BB(0-7) BE(0-7) BF(0-7) - - const __m512i rhs_mat_014589CD_1 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_014589CD_1, m4bexpanded)); //B0(8-15) B1(8-15) B4(8-15) B5(8-15) B8(8-15) B9(8-15) BC(8-15) BD(8-15) - const __m512i rhs_mat_2367ABEF_1 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(rhs_raw_mat_2367ABEF_1, m4bexpanded)); //B2(8-15) B3(8-15) B6(8-15) B7(8-15) BA(8-15) BB(8-15) BE(8-15) BF(8-15) - - const __m512i rhs_mat_014589CD_2 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m4bexpanded)); //B0(16-23) B1(16-23) B4(16-23) B5(16-23) B8(16-23) B9(16-23) BC(16-23) BD(16-23) - const __m512i rhs_mat_2367ABEF_2 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m4bexpanded)); //B2(16-23) B3(16-23) B6(16-23) B7(16-23) BA(16-23) BB(16-23) BE(16-23) BF(16-23) - - const __m512i rhs_mat_014589CD_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m4bexpanded)); //B0(24-31) B1(24-31) B4(24-31) B5(24-31) B8(24-31) B9(24-31) BC(24-31) BD(24-31) - const __m512i rhs_mat_2367ABEF_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) BA(24-31) BB(24-31) BE(24-31) BF(24-31) - - // Shuffle pattern one - right side input - const __m512i rhs_mat_014589CD_0_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) B8(0-3) B9(0-3) B8(0-3) B9(0-3) BC(0-3) BD(0-3) BC(0-3) BD(0-3) - const __m512i rhs_mat_2367ABEF_0_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) BA(0-3) BB(0-3) BA(0-3) BB(0-3) BE(0-3) BF(0-3) BE(0-3) BF(0-3) - - const __m512i rhs_mat_014589CD_1_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) B8(8-11) B9(8-11) B8(8-11) B9(8-11) BC(8-11) BD(8-11) BC(8-11) BD(8-11) - const __m512i rhs_mat_2367ABEF_1_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) BA(8-11) BB(8-11) BA(8-11) BB(8-11) BE(8-11) BF(8-11) BE(8-11) BF(8-11) - - const __m512i rhs_mat_014589CD_2_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) B8(16-19) B9(16-19) B8(16-19) B9(16-19) BC(16-19) BD(16-19) BC(16-19) BD(16-19) - const __m512i rhs_mat_2367ABEF_2_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) BA(16-19) BB(16-19) BA(16-19) BB(16-19) BE(16-19) BF(16-19) BE(16-19) BF(16-19) - - const __m512i rhs_mat_014589CD_3_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) B8(24-27) B9(24-27) B8(24-27) B9(24-27) BC(24-27) BD(24-27) BC(24-27) BD(24-27) - const __m512i rhs_mat_2367ABEF_3_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) BA(24-27) BB(24-27) BA(24-27) BB(24-27) BE(24-27) BF(24-27) BE(24-27) BF(24-27) - - // Shuffle pattern two - right side input - - const __m512i rhs_mat_014589CD_0_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) B8(4-7) B9(4-7) B8(4-7) B9(4-7) BC(4-7) BD(4-7) BC(4-7) BD(4-7) - const __m512i rhs_mat_2367ABEF_0_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) BA(4-7) BB(4-7) BA(4-7) BB(4-7) BE(4-7) BF(4-7) BE(4-7) BF(4-7) - - const __m512i rhs_mat_014589CD_1_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) B8(12-15) B9(12-15) B8(12-15) B9(12-15) BC(12-15) BD(12-15) BC(12-15) BD(12-15) - const __m512i rhs_mat_2367ABEF_1_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) BA(12-15) BB(12-15) BA(12-15) BB(12-15) BE(12-15) BF(12-15) BE(12-15) BF(12-15) - - const __m512i rhs_mat_014589CD_2_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) B8(20-23) B9(20-23) B8(20-23) B9(20-23) BC(20-23) BD(20-23) BC(20-23) BD(20-23) - const __m512i rhs_mat_2367ABEF_2_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) BA(20-23) BB(20-23) BA(20-23) BB(20-23) BE(20-23) BF(20-23) BE(20-23) BF(20-23) - - const __m512i rhs_mat_014589CD_3_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) B8(28-31) B9(28-31) B8(28-31) B9(28-31) BC(28-31) BD(28-31) BC(28-31) BD(28-31) - const __m512i rhs_mat_2367ABEF_3_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) BA(28-31) BB(28-31) BA(28-31) BB(28-31) BE(28-31) BF(28-31) BE(28-31) BF(28-31) - - - // Scale values - Load the weight scale values of two block_q4_0x8 - const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); - - // Load the four block_q4_0 quantized values interleaved with each other in chunks of eight - A0,A1,A2,A3 - // Loaded as set of 128 bit vectors and repeated and stored into a 256 bit vector before again repeating into 512 bit vector - __m256i lhs_mat_ymm_0123_0 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs))); - __m256i lhs_mat_ymm_01_0 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_0, lhs_mat_ymm_0123_0, 0); - __m256i lhs_mat_ymm_23_0 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_0, lhs_mat_ymm_0123_0, 17); - __m256i lhs_mat_ymm_0123_1 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 32))); - __m256i lhs_mat_ymm_01_1 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_1, lhs_mat_ymm_0123_1, 0); - __m256i lhs_mat_ymm_23_1 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_1, lhs_mat_ymm_0123_1, 17); - __m256i lhs_mat_ymm_0123_2 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 64))); - __m256i lhs_mat_ymm_01_2 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_2, lhs_mat_ymm_0123_2, 0); - __m256i lhs_mat_ymm_23_2 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_2, lhs_mat_ymm_0123_2, 17); - __m256i lhs_mat_ymm_0123_3 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 96))); - __m256i lhs_mat_ymm_01_3 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_3, lhs_mat_ymm_0123_3, 0); - __m256i lhs_mat_ymm_23_3 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_3, lhs_mat_ymm_0123_3, 17); - - __m512i lhs_mat_01_0 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_0), lhs_mat_ymm_01_0, 1); - __m512i lhs_mat_23_0 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_0), lhs_mat_ymm_23_0, 1); - __m512i lhs_mat_01_1 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_1), lhs_mat_ymm_01_1, 1); - __m512i lhs_mat_23_1 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_1), lhs_mat_ymm_23_1, 1); - __m512i lhs_mat_01_2 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_2), lhs_mat_ymm_01_2, 1); - __m512i lhs_mat_23_2 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_2), lhs_mat_ymm_23_2, 1); - __m512i lhs_mat_01_3 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_3), lhs_mat_ymm_01_3, 1); - __m512i lhs_mat_23_3 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_3), lhs_mat_ymm_23_3, 1); - - // Shuffle pattern one - left side input - - const __m512i lhs_mat_01_0_sp1 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) - const __m512i lhs_mat_23_0_sp1 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) - - const __m512i lhs_mat_01_1_sp1 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) - const __m512i lhs_mat_23_1_sp1 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) - - const __m512i lhs_mat_01_2_sp1 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) - const __m512i lhs_mat_23_2_sp1 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) - - const __m512i lhs_mat_01_3_sp1 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) - const __m512i lhs_mat_23_3_sp1 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) - - // Shuffle pattern two - left side input - - const __m512i lhs_mat_01_0_sp2 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) - const __m512i lhs_mat_23_0_sp2 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) - - const __m512i lhs_mat_01_1_sp2 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) - const __m512i lhs_mat_23_1_sp2 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) - - const __m512i lhs_mat_01_2_sp2 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) - const __m512i lhs_mat_23_2_sp2 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) - - const __m512i lhs_mat_01_3_sp2 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) - const __m512i lhs_mat_23_3_sp2 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) - - // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane - // Resembles MMLAs into 2x2 matrices in ARM Version - const __m512i zero = _mm512_setzero_epi32(); - __m512i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_01_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_01_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_01_0_sp1, rhs_mat_014589CD_0_sp1); - __m512i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367ABEF_0_sp1); - __m512i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_014589CD_3_sp1), lhs_mat_23_2_sp1, rhs_mat_014589CD_2_sp1), lhs_mat_23_1_sp1, rhs_mat_014589CD_1_sp1), lhs_mat_23_0_sp1, rhs_mat_014589CD_0_sp1); - __m512i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp1, rhs_mat_2367ABEF_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367ABEF_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367ABEF_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367ABEF_0_sp1); - __m512i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_01_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_01_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_01_0_sp2, rhs_mat_014589CD_0_sp2); - __m512i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_01_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367ABEF_0_sp2); - __m512i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_014589CD_3_sp2), lhs_mat_23_2_sp2, rhs_mat_014589CD_2_sp2), lhs_mat_23_1_sp2, rhs_mat_014589CD_1_sp2), lhs_mat_23_0_sp2, rhs_mat_014589CD_0_sp2); - __m512i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(mul_sum_i8_pairs_acc_int32x16(zero, lhs_mat_23_3_sp2, rhs_mat_2367ABEF_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367ABEF_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367ABEF_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367ABEF_0_sp2); - - // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block - __m512i iacc_mat_00 = _mm512_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); - __m512i iacc_mat_01 = _mm512_add_epi32(iacc_mat_01_sp1, iacc_mat_01_sp2); - __m512i iacc_mat_10 = _mm512_add_epi32(iacc_mat_10_sp1, iacc_mat_10_sp2); - __m512i iacc_mat_11 = _mm512_add_epi32(iacc_mat_11_sp1, iacc_mat_11_sp2); - - - // Straighten out to make 4 row vectors - __m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, (_MM_PERM_ENUM)78)); - __m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, (_MM_PERM_ENUM)78), iacc_mat_01); - __m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, (_MM_PERM_ENUM)78)); - __m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, (_MM_PERM_ENUM)78), iacc_mat_11); - - // Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes - const __m128i row_scale_f16 = _mm_shuffle_epi32(_mm_maskload_epi32((int const*)(a_ptr[b].d), loadMask), 68); - const __m512 row_scale_f32 = GGML_F32Cx16_REPEAT_LOAD(row_scale_f16); - - // Multiply with appropiate scales and accumulate - acc_rows[0] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); - acc_rows[1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); - acc_rows[2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); - acc_rows[3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); - } - - // Store the accumulated values - for (int i = 0; i < 4; i++) { - _mm512_storeu_ps((float *)(s + ((y * 4 + i) * bs + x * 8)), acc_rows[i]); - } - } - } - if (anc != nc) { - xstart = anc/8; - y = 0; - } - #endif // __AVX512F__ - - // Take group of four block_q8_0x4 structures at each pass of the loop and perform dot product operation - - for (; y < anr / 4; y += 4) { - const block_q8_0x4 * a_ptrs[4]; - - a_ptrs[0] = a_ptr_start + (y * nb); - for (int i = 0; i < 3; ++i) { - a_ptrs[i + 1] = a_ptrs[i] + nb; - } - - // Take group of eight block_q4_0x8 structures at each pass of the loop and perform dot product operation - for (int64_t x = xstart; x < nc / 8; x++) { - - const block_q4_0x8 * b_ptr = b_ptr_start + (x * b_nb); - - // Master FP accumulators - __m256 acc_rows[16]; - for (int i = 0; i < 16; i++) { - acc_rows[i] = _mm256_setzero_ps(); - } - - for (int64_t b = 0; b < nb; b++) { - // Load the eight block_q4_0 quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 - const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs)); - const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 32)); - const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 64)); - const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 96)); - - // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values - const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); - const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); - - // 4-bit -> 8-bit - Sign is maintained - const __m256i rhs_mat_0145_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_0145_0, m4b)); //B0(0-7) B1(0-7) B4(0-7) B5(0-7) - const __m256i rhs_mat_2367_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_2367_0, m4b)); //B2(0-7) B3(0-7) B6(0-7) B7(0-7) - - const __m256i rhs_mat_0145_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_0145_1, m4b)); //B0(8-15) B1(8-15) B4(8-15) B5(8-15) - const __m256i rhs_mat_2367_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_2367_1, m4b)); //B2(8-15) B3(8-15) B6(8-15) B7(8-15) - - const __m256i rhs_mat_0145_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m4b)); //B0(16-23) B1(16-23) B4(16-23) B5(16-23) - const __m256i rhs_mat_2367_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m4b)); //B2(16-23) B3(16-23) B6(16-23) B7(16-23) - - const __m256i rhs_mat_0145_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m4b)); //B0(24-31) B1(24-31) B4(24-31) B5(24-31) - const __m256i rhs_mat_2367_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m4b)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) - - // Shuffle pattern one - right side input - const __m256i rhs_mat_0145_0_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_0, 136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) - const __m256i rhs_mat_2367_0_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_0, 136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) - - const __m256i rhs_mat_0145_1_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_1, 136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) - const __m256i rhs_mat_2367_1_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_1, 136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) - - const __m256i rhs_mat_0145_2_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_2, 136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) - const __m256i rhs_mat_2367_2_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_2, 136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) - - const __m256i rhs_mat_0145_3_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_3, 136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) - const __m256i rhs_mat_2367_3_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_3, 136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) - - // Shuffle pattern two - right side input - - const __m256i rhs_mat_0145_0_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_0, 221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) - const __m256i rhs_mat_2367_0_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_0, 221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) - - const __m256i rhs_mat_0145_1_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_1, 221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) - const __m256i rhs_mat_2367_1_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_1, 221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) - - const __m256i rhs_mat_0145_2_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_2, 221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) - const __m256i rhs_mat_2367_2_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_2, 221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) - - const __m256i rhs_mat_0145_3_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_3, 221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) - const __m256i rhs_mat_2367_3_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_3, 221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) - - // Scale values - Load the wight scale values of block_q4_0x8 - const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); - - // Process LHS in groups of four - for (int rp = 0; rp < 4; rp++) { - // Load the four block_q4_0 quantized values interleaved with each other in chunks of eight - A0,A1,A2,A3 - // Loaded as set of 128 bit vectors and repeated into a 256 bit vector - __m256i lhs_mat_0123_0 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs))); - __m256i lhs_mat_01_0 = _mm256_permute2f128_si256(lhs_mat_0123_0, lhs_mat_0123_0, 0); - __m256i lhs_mat_23_0 = _mm256_permute2f128_si256(lhs_mat_0123_0, lhs_mat_0123_0, 17); - __m256i lhs_mat_0123_1 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 32))); - __m256i lhs_mat_01_1 = _mm256_permute2f128_si256(lhs_mat_0123_1, lhs_mat_0123_1, 0); - __m256i lhs_mat_23_1 = _mm256_permute2f128_si256(lhs_mat_0123_1, lhs_mat_0123_1, 17); - __m256i lhs_mat_0123_2 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 64))); - __m256i lhs_mat_01_2 = _mm256_permute2f128_si256(lhs_mat_0123_2, lhs_mat_0123_2, 0); - __m256i lhs_mat_23_2 = _mm256_permute2f128_si256(lhs_mat_0123_2, lhs_mat_0123_2, 17); - __m256i lhs_mat_0123_3 = _mm256_loadu_si256((const __m256i *)((a_ptrs[rp][b].qs + 96))); - __m256i lhs_mat_01_3 = _mm256_permute2f128_si256(lhs_mat_0123_3, lhs_mat_0123_3, 0); - __m256i lhs_mat_23_3 = _mm256_permute2f128_si256(lhs_mat_0123_3, lhs_mat_0123_3, 17); - - // Shuffle pattern one - left side input - const __m256i lhs_mat_01_0_sp1 = _mm256_shuffle_epi32(lhs_mat_01_0, 160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) - const __m256i lhs_mat_23_0_sp1 = _mm256_shuffle_epi32(lhs_mat_23_0, 160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) - - const __m256i lhs_mat_01_1_sp1 = _mm256_shuffle_epi32(lhs_mat_01_1, 160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) - const __m256i lhs_mat_23_1_sp1 = _mm256_shuffle_epi32(lhs_mat_23_1, 160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) - - const __m256i lhs_mat_01_2_sp1 = _mm256_shuffle_epi32(lhs_mat_01_2, 160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) - const __m256i lhs_mat_23_2_sp1 = _mm256_shuffle_epi32(lhs_mat_23_2, 160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) - - const __m256i lhs_mat_01_3_sp1 = _mm256_shuffle_epi32(lhs_mat_01_3, 160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) - const __m256i lhs_mat_23_3_sp1 = _mm256_shuffle_epi32(lhs_mat_23_3, 160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) - - // Shuffle pattern two - left side input - const __m256i lhs_mat_01_0_sp2 = _mm256_shuffle_epi32(lhs_mat_01_0, 245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) - const __m256i lhs_mat_23_0_sp2 = _mm256_shuffle_epi32(lhs_mat_23_0, 245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) - - const __m256i lhs_mat_01_1_sp2 = _mm256_shuffle_epi32(lhs_mat_01_1, 245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) - const __m256i lhs_mat_23_1_sp2 = _mm256_shuffle_epi32(lhs_mat_23_1, 245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) - - const __m256i lhs_mat_01_2_sp2 = _mm256_shuffle_epi32(lhs_mat_01_2, 245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) - const __m256i lhs_mat_23_2_sp2 = _mm256_shuffle_epi32(lhs_mat_23_2, 245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) - - const __m256i lhs_mat_01_3_sp2 = _mm256_shuffle_epi32(lhs_mat_01_3, 245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) - const __m256i lhs_mat_23_3_sp2 = _mm256_shuffle_epi32(lhs_mat_23_3, 245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) - - // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane - // Resembles MMLAs into 2x2 matrices in ARM Version - const __m256i zero = _mm256_setzero_si256(); - __m256i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_01_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_01_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_01_0_sp1, rhs_mat_0145_0_sp1); - __m256i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367_0_sp1); - __m256i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_23_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_23_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_23_0_sp1, rhs_mat_0145_0_sp1); - __m256i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367_0_sp1); - __m256i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_01_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_01_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_01_0_sp2, rhs_mat_0145_0_sp2); - __m256i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367_0_sp2); - __m256i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_23_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_23_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_23_0_sp2, rhs_mat_0145_0_sp2); - __m256i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367_0_sp2); - - // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block - __m256i iacc_mat_00 = _mm256_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); - __m256i iacc_mat_01 = _mm256_add_epi32(iacc_mat_01_sp1, iacc_mat_01_sp2); - __m256i iacc_mat_10 = _mm256_add_epi32(iacc_mat_10_sp1, iacc_mat_10_sp2); - __m256i iacc_mat_11 = _mm256_add_epi32(iacc_mat_11_sp1, iacc_mat_11_sp2); - - // Straighten out to make 4 row vectors - __m256i iacc_row_0 = _mm256_blend_epi32(iacc_mat_00, _mm256_shuffle_epi32(iacc_mat_01, 78), 204); - __m256i iacc_row_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00, 78), iacc_mat_01, 204); - __m256i iacc_row_2 = _mm256_blend_epi32(iacc_mat_10, _mm256_shuffle_epi32(iacc_mat_11, 78), 204); - __m256i iacc_row_3 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10, 78), iacc_mat_11, 204); - - // Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes - const __m256 row_scale_f32 = GGML_F32Cx8_REPEAT_LOAD(a_ptrs[rp][b].d, loadMask); - - // Multiply with appropiate scales and accumulate - acc_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); - acc_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); - acc_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); - acc_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); - } - } - - // Store the accumulated values - for (int i = 0; i < 16; i++) { - _mm256_storeu_ps((float *)(s + ((y * 4 + i) * bs + x * 8)), acc_rows[i]); - } - } - } - - // Take a block_q8_0x4 structures at each pass of the loop and perform dot product operation - for (; y < nr / 4; y ++) { - - const block_q8_0x4 * a_ptr = a_ptr_start + (y * nb); - - // Load the eight block_q4_0 quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 - for (int64_t x = xstart; x < nc / 8; x++) { - - const block_q4_0x8 * b_ptr = b_ptr_start + (x * b_nb); - - // Master FP accumulators - __m256 acc_rows[4]; - for (int i = 0; i < 4; i++) { - acc_rows[i] = _mm256_setzero_ps(); - } - - for (int64_t b = 0; b < nb; b++) { - // Load the eight block_q8_0 quantized values interleaved with each other in chunks of eight - B0,B1 ....B6,B7 - const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs)); - const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 32)); - const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 64)); - const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i *)(b_ptr[b].qs + 96)); - - // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of valuess - const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); - const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); - - // 4-bit -> 8-bit - Sign is maintained - const __m256i rhs_mat_0145_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_0145_0, m4b)); //B0(0-7) B1(0-7) B4(0-7) B5(0-7) - const __m256i rhs_mat_2367_0 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_2367_0, m4b)); //B2(0-7) B3(0-7) B6(0-7) B7(0-7) - - const __m256i rhs_mat_0145_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_0145_1, m4b)); //B0(8-15) B1(8-15) B4(8-15) B5(8-15) - const __m256i rhs_mat_2367_1 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(rhs_raw_mat_2367_1, m4b)); //B2(8-15) B3(8-15) B6(8-15) B7(8-15) - - const __m256i rhs_mat_0145_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m4b)); //B0(16-23) B1(16-23) B4(16-23) B5(16-23) - const __m256i rhs_mat_2367_2 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m4b)); //B2(16-23) B3(16-23) B6(16-23) B7(16-23) - - const __m256i rhs_mat_0145_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m4b)); //B0(24-31) B1(24-31) B4(24-31) B5(24-31) - const __m256i rhs_mat_2367_3 = _mm256_shuffle_epi8(signextendlut, _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m4b)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) - - // Shuffle pattern one - right side input - const __m256i rhs_mat_0145_0_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_0, 136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) - const __m256i rhs_mat_2367_0_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_0, 136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) - - const __m256i rhs_mat_0145_1_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_1, 136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) - const __m256i rhs_mat_2367_1_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_1, 136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) - - const __m256i rhs_mat_0145_2_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_2, 136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) - const __m256i rhs_mat_2367_2_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_2, 136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) - - const __m256i rhs_mat_0145_3_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_3, 136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) - const __m256i rhs_mat_2367_3_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_3, 136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) - - // Shuffle pattern two - right side input - - const __m256i rhs_mat_0145_0_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_0, 221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) - const __m256i rhs_mat_2367_0_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_0, 221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) - - const __m256i rhs_mat_0145_1_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_1, 221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) - const __m256i rhs_mat_2367_1_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_1, 221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) - - const __m256i rhs_mat_0145_2_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_2, 221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) - const __m256i rhs_mat_2367_2_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_2, 221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) - - const __m256i rhs_mat_0145_3_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_3, 221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) - const __m256i rhs_mat_2367_3_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_3, 221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) - - // Scale values - Load the wight scale values of block_q4_0x8 - const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); - - // Load the four block_q4_0 quantized values interleaved with each other in chunks of eight - A0,A1,A2,A3 - // Loaded as set of 128 bit vectors and repeated into a 256 bit vector - __m256i lhs_mat_0123_0 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs))); - __m256i lhs_mat_01_0 = _mm256_permute2f128_si256(lhs_mat_0123_0, lhs_mat_0123_0, 0); - __m256i lhs_mat_23_0 = _mm256_permute2f128_si256(lhs_mat_0123_0, lhs_mat_0123_0, 17); - __m256i lhs_mat_0123_1 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 32))); - __m256i lhs_mat_01_1 = _mm256_permute2f128_si256(lhs_mat_0123_1, lhs_mat_0123_1, 0); - __m256i lhs_mat_23_1 = _mm256_permute2f128_si256(lhs_mat_0123_1, lhs_mat_0123_1, 17); - __m256i lhs_mat_0123_2 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 64))); - __m256i lhs_mat_01_2 = _mm256_permute2f128_si256(lhs_mat_0123_2, lhs_mat_0123_2, 0); - __m256i lhs_mat_23_2 = _mm256_permute2f128_si256(lhs_mat_0123_2, lhs_mat_0123_2, 17); - __m256i lhs_mat_0123_3 = _mm256_loadu_si256((const __m256i *)((a_ptr[b].qs + 96))); - __m256i lhs_mat_01_3 = _mm256_permute2f128_si256(lhs_mat_0123_3, lhs_mat_0123_3, 0); - __m256i lhs_mat_23_3 = _mm256_permute2f128_si256(lhs_mat_0123_3, lhs_mat_0123_3, 17); - - // Shuffle pattern one - left side input - - const __m256i lhs_mat_01_0_sp1 = _mm256_shuffle_epi32(lhs_mat_01_0, 160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) - const __m256i lhs_mat_23_0_sp1 = _mm256_shuffle_epi32(lhs_mat_23_0, 160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) - - const __m256i lhs_mat_01_1_sp1 = _mm256_shuffle_epi32(lhs_mat_01_1, 160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) - const __m256i lhs_mat_23_1_sp1 = _mm256_shuffle_epi32(lhs_mat_23_1, 160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) - - const __m256i lhs_mat_01_2_sp1 = _mm256_shuffle_epi32(lhs_mat_01_2, 160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) - const __m256i lhs_mat_23_2_sp1 = _mm256_shuffle_epi32(lhs_mat_23_2, 160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) - - const __m256i lhs_mat_01_3_sp1 = _mm256_shuffle_epi32(lhs_mat_01_3, 160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) - const __m256i lhs_mat_23_3_sp1 = _mm256_shuffle_epi32(lhs_mat_23_3, 160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) - - // Shuffle pattern two - left side input - - const __m256i lhs_mat_01_0_sp2 = _mm256_shuffle_epi32(lhs_mat_01_0, 245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) - const __m256i lhs_mat_23_0_sp2 = _mm256_shuffle_epi32(lhs_mat_23_0, 245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) - - const __m256i lhs_mat_01_1_sp2 = _mm256_shuffle_epi32(lhs_mat_01_1, 245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) - const __m256i lhs_mat_23_1_sp2 = _mm256_shuffle_epi32(lhs_mat_23_1, 245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) - - const __m256i lhs_mat_01_2_sp2 = _mm256_shuffle_epi32(lhs_mat_01_2, 245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) - const __m256i lhs_mat_23_2_sp2 = _mm256_shuffle_epi32(lhs_mat_23_2, 245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) - - const __m256i lhs_mat_01_3_sp2 = _mm256_shuffle_epi32(lhs_mat_01_3, 245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) - const __m256i lhs_mat_23_3_sp2 = _mm256_shuffle_epi32(lhs_mat_23_3, 245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) - - // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane - // Resembles MMLAs into 2x2 matrices in ARM Version - const __m256i zero = _mm256_setzero_si256(); - __m256i iacc_mat_00_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_01_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_01_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_01_0_sp1, rhs_mat_0145_0_sp1); - __m256i iacc_mat_01_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_01_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_01_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_01_0_sp1, rhs_mat_2367_0_sp1); - __m256i iacc_mat_10_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_0145_3_sp1), lhs_mat_23_2_sp1, rhs_mat_0145_2_sp1), lhs_mat_23_1_sp1, rhs_mat_0145_1_sp1), lhs_mat_23_0_sp1, rhs_mat_0145_0_sp1); - __m256i iacc_mat_11_sp1 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp1, rhs_mat_2367_3_sp1), lhs_mat_23_2_sp1, rhs_mat_2367_2_sp1), lhs_mat_23_1_sp1, rhs_mat_2367_1_sp1), lhs_mat_23_0_sp1, rhs_mat_2367_0_sp1); - __m256i iacc_mat_00_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_01_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_01_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_01_0_sp2, rhs_mat_0145_0_sp2); - __m256i iacc_mat_01_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_01_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_01_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_01_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_01_0_sp2, rhs_mat_2367_0_sp2); - __m256i iacc_mat_10_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_0145_3_sp2), lhs_mat_23_2_sp2, rhs_mat_0145_2_sp2), lhs_mat_23_1_sp2, rhs_mat_0145_1_sp2), lhs_mat_23_0_sp2, rhs_mat_0145_0_sp2); - __m256i iacc_mat_11_sp2 = mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(mul_sum_i8_pairs_acc_int32x8(zero, lhs_mat_23_3_sp2, rhs_mat_2367_3_sp2), lhs_mat_23_2_sp2, rhs_mat_2367_2_sp2), lhs_mat_23_1_sp2, rhs_mat_2367_1_sp2), lhs_mat_23_0_sp2, rhs_mat_2367_0_sp2); - - // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block - __m256i iacc_mat_00 = _mm256_add_epi32(iacc_mat_00_sp1, iacc_mat_00_sp2); - __m256i iacc_mat_01 = _mm256_add_epi32(iacc_mat_01_sp1, iacc_mat_01_sp2); - __m256i iacc_mat_10 = _mm256_add_epi32(iacc_mat_10_sp1, iacc_mat_10_sp2); - __m256i iacc_mat_11 = _mm256_add_epi32(iacc_mat_11_sp1, iacc_mat_11_sp2); - - - // Straighten out to make 4 row vectors - __m256i iacc_row_0 = _mm256_blend_epi32(iacc_mat_00, _mm256_shuffle_epi32(iacc_mat_01, 78), 204); - __m256i iacc_row_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00, 78), iacc_mat_01, 204); - __m256i iacc_row_2 = _mm256_blend_epi32(iacc_mat_10, _mm256_shuffle_epi32(iacc_mat_11, 78), 204); - __m256i iacc_row_3 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10, 78), iacc_mat_11, 204); - - // Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes - const __m256 row_scale_f32 = GGML_F32Cx8_REPEAT_LOAD(a_ptr[b].d, loadMask); - - // Multiply with appropiate scales and accumulate - acc_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); - acc_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); - acc_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); - acc_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); - } - - // Store the accumulated values - for (int i = 0; i < 4; i++) { - _mm256_storeu_ps((float *)(s + ((y * 4 + i) * bs + x * 8)), acc_rows[i]); - } - } - } - return; - } -#elif defined(__riscv_v_intrinsic) - if (__riscv_vlenb() >= QK4_0) { - const size_t vl = QK4_0; - - for (int y = 0; y < nr / 4; y++) { - const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); - vfloat32m1_t sumf0 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); - vfloat32m1_t sumf1 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); - vfloat32m1_t sumf2 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); - vfloat32m1_t sumf3 = __riscv_vfmv_v_f_f32m1(0.0, vl / 4); - for (int l = 0; l < nb; l++) { - const vint8m4_t rhs_raw_vec = __riscv_vle8_v_i8m4((const int8_t *)b_ptr[l].qs, vl * 4); - const vint8m4_t rhs_vec_lo = __riscv_vsra_vx_i8m4(__riscv_vsll_vx_i8m4(rhs_raw_vec, 4, vl * 4), 4, vl * 4); - const vint8m4_t rhs_vec_hi = __riscv_vsra_vx_i8m4(rhs_raw_vec, 4, vl * 4); - const vint8m2_t rhs_vec_lo_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 0); - const vint8m2_t rhs_vec_lo_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_lo, 1); - const vint8m2_t rhs_vec_hi_0 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 0); - const vint8m2_t rhs_vec_hi_1 = __riscv_vget_v_i8m4_i8m2(rhs_vec_hi, 1); - - // vector version needs Zvfhmin extension - const float a_scales[4] = { - GGML_FP16_TO_FP32(a_ptr[l].d[0]), - GGML_FP16_TO_FP32(a_ptr[l].d[1]), - GGML_FP16_TO_FP32(a_ptr[l].d[2]), - GGML_FP16_TO_FP32(a_ptr[l].d[3]) - }; - const float b_scales[8] = { - GGML_FP16_TO_FP32(b_ptr[l].d[0]), - GGML_FP16_TO_FP32(b_ptr[l].d[1]), - GGML_FP16_TO_FP32(b_ptr[l].d[2]), - GGML_FP16_TO_FP32(b_ptr[l].d[3]), - GGML_FP16_TO_FP32(b_ptr[l].d[4]), - GGML_FP16_TO_FP32(b_ptr[l].d[5]), - GGML_FP16_TO_FP32(b_ptr[l].d[6]), - GGML_FP16_TO_FP32(b_ptr[l].d[7]) - }; - const vfloat32m1_t b_scales_vec = __riscv_vle32_v_f32m1(b_scales, vl / 4); - - const int64_t A0 = *(const int64_t *)&a_ptr[l].qs[0]; - const int64_t A4 = *(const int64_t *)&a_ptr[l].qs[32]; - const int64_t A8 = *(const int64_t *)&a_ptr[l].qs[64]; - const int64_t Ac = *(const int64_t *)&a_ptr[l].qs[96]; - __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment - vint16m4_t sumi_l0; - { - const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A0, vl / 4)); - const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A4, vl / 4)); - const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A8, vl / 4)); - const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ac, vl / 4)); - const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); - const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); - const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); - const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); - - sumi_l0 = sumi_hi_m; - } - - { - const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l0)); - const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); - const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); - const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); - const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); - const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); - const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); - const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); - const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); - const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); - const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); - const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); - const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); - - const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[0], vl / 4); - sumf0 = __riscv_vfmacc_vv_f32m1(sumf0, tmp1, b_scales_vec, vl / 4); - } - - const int64_t A1 = *(const int64_t *)&a_ptr[l].qs[8]; - const int64_t A5 = *(const int64_t *)&a_ptr[l].qs[40]; - const int64_t A9 = *(const int64_t *)&a_ptr[l].qs[72]; - const int64_t Ad = *(const int64_t *)&a_ptr[l].qs[104]; - __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment - vint16m4_t sumi_l1; - { - const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A1, vl / 4)); - const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A5, vl / 4)); - const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A9, vl / 4)); - const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ad, vl / 4)); - const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); - const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); - const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); - const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); - - sumi_l1 = sumi_hi_m; - } - - { - const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l1)); - const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); - const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); - const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); - const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); - const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); - const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); - const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); - const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); - const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); - const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); - const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); - const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); - - const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[1], vl / 4); - sumf1 = __riscv_vfmacc_vv_f32m1(sumf1, tmp1, b_scales_vec, vl / 4); - } - - const int64_t A2 = *(const int64_t *)&a_ptr[l].qs[16]; - const int64_t A6 = *(const int64_t *)&a_ptr[l].qs[48]; - const int64_t Aa = *(const int64_t *)&a_ptr[l].qs[80]; - const int64_t Ae = *(const int64_t *)&a_ptr[l].qs[112]; - __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment - vint16m4_t sumi_l2; - { - const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A2, vl / 4)); - const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A6, vl / 4)); - const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Aa, vl / 4)); - const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ae, vl / 4)); - const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); - const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); - const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); - const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); - - sumi_l2 = sumi_hi_m; - } - - { - const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l2)); - const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); - const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); - const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); - const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); - const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); - const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); - const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); - const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); - const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); - const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); - const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); - const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); - - const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[2], vl / 4); - sumf2 = __riscv_vfmacc_vv_f32m1(sumf2, tmp1, b_scales_vec, vl / 4); - } - - const int64_t A3 = *(const int64_t *)&a_ptr[l].qs[24]; - const int64_t A7 = *(const int64_t *)&a_ptr[l].qs[56]; - const int64_t Ab = *(const int64_t *)&a_ptr[l].qs[88]; - const int64_t Af = *(const int64_t *)&a_ptr[l].qs[120]; - __asm__ __volatile__("" ::: "memory"); // prevent gcc from emitting fused vlse64, violating alignment - vint16m4_t sumi_l3; - { - const vint8m2_t lhs_0_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A3, vl / 4)); - const vint8m2_t lhs_1_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(A7, vl / 4)); - const vint8m2_t lhs_2_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Ab, vl / 4)); - const vint8m2_t lhs_3_8 =__riscv_vreinterpret_v_i64m2_i8m2(__riscv_vmv_v_x_i64m2(Af, vl / 4)); - const vint16m4_t sumi_lo_0 = __riscv_vwmul_vv_i16m4(rhs_vec_lo_0, lhs_0_8, vl * 2); - const vint16m4_t sumi_lo_1 = __riscv_vwmacc_vv_i16m4(sumi_lo_0, rhs_vec_lo_1, lhs_1_8, vl * 2); - const vint16m4_t sumi_hi_0 = __riscv_vwmacc_vv_i16m4(sumi_lo_1, rhs_vec_hi_0, lhs_2_8, vl * 2); - const vint16m4_t sumi_hi_m = __riscv_vwmacc_vv_i16m4(sumi_hi_0, rhs_vec_hi_1, lhs_3_8, vl * 2); - - sumi_l3 = sumi_hi_m; - } - - { - const vuint32m4_t sumi_i32 = __riscv_vreinterpret_v_i32m4_u32m4(__riscv_vreinterpret_v_i16m4_i32m4(sumi_l3)); - const vuint16m2_t sumi_h2_0 = __riscv_vnsrl_wx_u16m2(sumi_i32, 0, vl); - const vuint16m2_t sumi_h2_1 = __riscv_vnsrl_wx_u16m2(sumi_i32, 16, vl); - const vuint16m2_t sumi_h2 = __riscv_vadd_vv_u16m2(sumi_h2_0, sumi_h2_1, vl); - const vuint32m2_t sumi_h2_i32 = __riscv_vreinterpret_v_u16m2_u32m2(sumi_h2); - const vuint16m1_t sumi_h4_0 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 0, vl / 2); - const vuint16m1_t sumi_h4_1 = __riscv_vnsrl_wx_u16m1(sumi_h2_i32, 16, vl / 2); - const vuint16m1_t sumi_h4 = __riscv_vadd_vv_u16m1(sumi_h4_0, sumi_h4_1, vl / 2); - const vuint32m1_t sumi_h4_i32 = __riscv_vreinterpret_v_u16m1_u32m1(sumi_h4); - const vint16mf2_t sumi_h8_0 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 0, vl / 4)); - const vint16mf2_t sumi_h8_1 = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vnsrl_wx_u16mf2(sumi_h4_i32, 16, vl / 4)); - const vint32m1_t sumi_h8 = __riscv_vwadd_vv_i32m1(sumi_h8_0, sumi_h8_1, vl / 4); - const vfloat32m1_t facc = __riscv_vfcvt_f_x_v_f32m1(sumi_h8, vl / 4); - - const vfloat32m1_t tmp1 = __riscv_vfmul_vf_f32m1(facc, a_scales[3], vl / 4); - sumf3 = __riscv_vfmacc_vv_f32m1(sumf3, tmp1, b_scales_vec, vl / 4); - } - } - __riscv_vse32_v_f32m1(&s[(y * 4 + 0) * bs + x * ncols_interleaved], sumf0, vl / 4); - __riscv_vse32_v_f32m1(&s[(y * 4 + 1) * bs + x * ncols_interleaved], sumf1, vl / 4); - __riscv_vse32_v_f32m1(&s[(y * 4 + 2) * bs + x * ncols_interleaved], sumf2, vl / 4); - __riscv_vse32_v_f32m1(&s[(y * 4 + 3) * bs + x * ncols_interleaved], sumf3, vl / 4); - } - } - - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) - float sumf[4][8]; - int sumi; - - for (int y = 0; y < nr / 4; y++) { - const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; - } - for (int l = 0; l < nb; l++) { - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); - const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); - sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + - (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; - } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); - } - } - } - } - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) - s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; - } - } - } -} - -static void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK_K; - const int nb = n / qk; - const int ncols_interleaved = 8; - const int blocklen = 8; - static const uint32_t kmask1 = 0x3f3f3f3f; - static const uint32_t kmask2 = 0x0f0f0f0f; - static const uint32_t kmask3 = 0x03030303; - - assert (n % qk == 0); - assert (nr % 4 == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if defined(__AVX2__) || defined(__AVX512F__) - const block_q4_Kx8 * b_ptr_start = (const block_q4_Kx8 * ) vx; - const block_q8_Kx4 * a_ptr_start = (const block_q8_Kx4 * ) vy; - int64_t b_nb = n / QK_K; - int64_t y = 0; - - // Mask to mask out nibbles from packed bytes - const __m256i m4b = _mm256_set1_epi8(0x0F); - // Permute mask used for easier vector processing at later stages - __m256i requiredOrder = _mm256_set_epi32(3, 2, 1, 0, 7, 6, 5, 4); - int64_t xstart = 0; - int anr = nr - nr % 16;; // Used to align nr with boundary of 16 -#ifdef __AVX512F__ - int anc = nc - nc % 16; // Used to align nc with boundary of 16 - // Mask to mask out nibbles from packed bytes expanded to 512 bit length - const __m512i m4bexpanded = _mm512_set1_epi8(0x0F); - //Take group of four block_q8_Kx4 structures at each pass of the loop and perform dot product operation - for (; y < anr / 4; y += 4) { - - const block_q8_Kx4 * a_ptrs[4]; - - a_ptrs[0] = a_ptr_start + (y * nb); - for (int i = 0; i < 3; ++i) { - a_ptrs[i + 1] = a_ptrs[i] + nb; - } - - // Take group of eight block_q4_kx8 structures at each pass of the loop and perform dot product operation - for (int64_t x = 0; x < anc / 8; x += 2) { - - const block_q4_Kx8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); - const block_q4_Kx8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); - - // Master FP accumulators - __m512 acc_rows[16]; - for (int i = 0; i < 16; i++) { - acc_rows[i] = _mm512_setzero_ps(); - } - - __m512 acc_min_rows[16]; - for (int i = 0; i < 16; i++) { - acc_min_rows[i] = _mm512_setzero_ps(); - } - - // For super block - for (int64_t b = 0; b < nb; b++) { - // Scale values - Load the sixteen scale values from two block_q4_kx8 structures - const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); - - // dmin values - Load the sixteen dmin values from two block_q4_kx8 structures - const __m512 col_dmin_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].dmin, b_ptr_1[b].dmin); - - // Loop to iterate over the eight sub blocks of a super block - two sub blocks are processed per iteration - for (int sb = 0; sb < QK_K / 64; sb++) { - - const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + sb * 256)); - const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 32 + sb * 256)); - const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 64 + sb * 256)); - const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 96 + sb * 256)); - const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 128 + sb * 256)); - const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 160 + sb * 256)); - const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 192 + sb * 256)); - const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 224 + sb * 256)); - - const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + sb * 256)); - const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 32 + sb * 256)); - const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 64 + sb * 256)); - const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 96 + sb * 256)); - const __m256i rhs_raw_mat_89AB_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 128 + sb * 256)); - const __m256i rhs_raw_mat_CDEF_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 160 + sb * 256)); - const __m256i rhs_raw_mat_89AB_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 192 + sb * 256)); - const __m256i rhs_raw_mat_CDEF_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 224 + sb * 256)); - - const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); - const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); - const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); - const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); - - const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); - const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); - const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); - const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); - const __m256i rhs_raw_mat_89CD_2 = _mm256_blend_epi32(rhs_raw_mat_89AB_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_2, requiredOrder), 240); - const __m256i rhs_raw_mat_ABEF_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_2, requiredOrder), rhs_raw_mat_CDEF_2, 240); - const __m256i rhs_raw_mat_89CD_3 = _mm256_blend_epi32(rhs_raw_mat_89AB_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_3, requiredOrder), 240); - const __m256i rhs_raw_mat_ABEF_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_3, requiredOrder), rhs_raw_mat_CDEF_3, 240); - - const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); - const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); - const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); - const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); - - const __m512i rhs_raw_mat_014589CD_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_2), rhs_raw_mat_89CD_2, 1); - const __m512i rhs_raw_mat_2367ABEF_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_2), rhs_raw_mat_ABEF_2, 1); - const __m512i rhs_raw_mat_014589CD_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_3), rhs_raw_mat_89CD_3, 1); - const __m512i rhs_raw_mat_2367ABEF_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_3), rhs_raw_mat_ABEF_3, 1); - - //4-bit -> 8-bit - const __m512i rhs_mat_014589CD_00 = _mm512_and_si512(rhs_raw_mat_014589CD_0, m4bexpanded); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) B08(0-7) B09(0-7) B0C(0-7) B0D(0-7) - const __m512i rhs_mat_2367ABEF_00 = _mm512_and_si512(rhs_raw_mat_2367ABEF_0, m4bexpanded); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) B0A(0-7) B0B(0-7) B0E(0-7) B0F(0-7) - const __m512i rhs_mat_014589CD_01 = _mm512_and_si512(rhs_raw_mat_014589CD_1, m4bexpanded); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) B08(8-15) B09(8-15) B0C(8-15) B0D(8-15) - const __m512i rhs_mat_2367ABEF_01 = _mm512_and_si512(rhs_raw_mat_2367ABEF_1, m4bexpanded); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) B0A(8-15) B0B(8-15) B0E(8-15) B0F(8-15) - - const __m512i rhs_mat_014589CD_02 = _mm512_and_si512(rhs_raw_mat_014589CD_2, m4bexpanded); //B00(16-23) B01(16-23) B04(16-23) B05(16-23) B08(16-23) B09(16-23) B0C(16-23) B0D(16-23) - const __m512i rhs_mat_2367ABEF_02 = _mm512_and_si512(rhs_raw_mat_2367ABEF_2, m4bexpanded); //B02(16-23) B03(16-23) B06(16-23) B07(16-23) B0A(16-23) B0B(16-23) B0E(16-23) B0F(16-23) - const __m512i rhs_mat_014589CD_03 = _mm512_and_si512(rhs_raw_mat_014589CD_3, m4bexpanded); //B00(24-31) B01(24-31) B04(24-31) B05(24-31) B08(24-31) B09(24-31) B0C(24-31) B0D(24-31) - const __m512i rhs_mat_2367ABEF_03 = _mm512_and_si512(rhs_raw_mat_2367ABEF_3, m4bexpanded); //B02(24-31) B03(24-31) B06(24-31) B07(24-31) B0A(24-31) B0B(24-31) B0E(24-31) B0F(24-31) - - const __m512i rhs_mat_014589CD_10 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m4bexpanded); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) B18(0-7) B19(0-7) B1C(0-7) B1D(0-7) - const __m512i rhs_mat_2367ABEF_10 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m4bexpanded); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) B1A(0-7) B1B(0-7) B1E(0-7) B1F(0-7) - const __m512i rhs_mat_014589CD_11 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m4bexpanded); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) B18(8-15) B19(8-15) B1C(8-15) B1D(8-15) - const __m512i rhs_mat_2367ABEF_11 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) B1A(8-15) B1B(8-15) B1E(8-15) B1F(8-15) - - const __m512i rhs_mat_014589CD_12 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 4), m4bexpanded); //B10(16-23) B11(16-23) B14(16-23) B15(16-23) B18(16-23) B19(16-23) B1C(16-23) B1D(16-23) - const __m512i rhs_mat_2367ABEF_12 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 4), m4bexpanded); //B12(16-23) B13(16-23) B16(16-23) B17(16-23) B1A(16-23) B1B(16-23) B1E(16-23) B1F(16-23) - const __m512i rhs_mat_014589CD_13 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 4), m4bexpanded); //B10(24-31) B11(24-31) B14(24-31) B15(24-31) B18(24-31) B19(24-31) B1C(24-31) B1D(24-31) - const __m512i rhs_mat_2367ABEF_13 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 4), m4bexpanded); //B12(24-31) B13(24-31) B16(24-31) B17(24-31) B1A(24-31) B1B(24-31) B1E(24-31) B1F(24-31) - - // Shuffle pattern one - right side input - const __m512i rhs_mat_014589CD_00_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) B08(0-3) B09(0-3) B08(0-3) B09(0-3) B0C(0-3) B0D(0-3) B0C(0-3) B0D(0-3) - const __m512i rhs_mat_2367ABEF_00_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) B0A(0-3) B0B(0-3) B0A(0-3) B0B(0-3) B0E(0-3) B0F(0-3) B0E(0-3) B0F(0-3) - const __m512i rhs_mat_014589CD_01_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) B08(8-11) B09(8-11) B08(8-11) B09(8-11) B0C(8-11) B0D(8-11) B0C(8-11) B0D(8-11) - const __m512i rhs_mat_2367ABEF_01_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) B0A(8-11) B0B(8-11) B0A(8-11) B0B(8-11) B0E(8-11) B0F(8-11) B0E(8-11) B0F(8-11) - const __m512i rhs_mat_014589CD_02_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_02, (_MM_PERM_ENUM)136); //B00(16-19) B01(16-19) B00(16-19) B01(16-19) B04(16-19) B05(16-19) B04(16-19) B05(16-19) B08(16-19) B09(16-19) B08(16-19) B09(16-19) B0C(16-19) B0D(16-19) B0C(16-19) B0D(16-19) - const __m512i rhs_mat_2367ABEF_02_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_02, (_MM_PERM_ENUM)136); //B02(16-19) B03(16-19) B02(16-19) B03(16-19) B06(16-19) B07(16-19) B06(16-19) B07(16-19) B0A(16-19) B0B(16-19) B0A(16-19) B0B(16-19) B0E(16-19) B0F(16-19) B0E(16-19) B0F(16-19) - const __m512i rhs_mat_014589CD_03_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_03, (_MM_PERM_ENUM)136); //B00(24-27) B01(24-27) B00(24-27) B01(24-27) B04(24-27) B05(24-27) B04(24-27) B05(24-27) B08(24-27) B09(24-27) B08(24-27) B09(24-27) B0C(24-27) B0D(24-27) B0C(24-27) B0D(24-27) - const __m512i rhs_mat_2367ABEF_03_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_03, (_MM_PERM_ENUM)136); //B02(24-27) B03(24-27) B02(24-27) B03(24-27) B06(24-27) B07(24-27) B06(24-27) B07(24-27) B0A(24-27) B0B(24-27) B0A(24-27) B0B(24-27) B0E(24-27) B0F(24-27) B0E(24-27) B0F(24-27) - - const __m512i rhs_mat_014589CD_10_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) B18(0-3) B19(0-3) B18(0-3) B19(0-3) B1C(0-3) B1D(0-3) B1C(0-3) B1D(0-3) - const __m512i rhs_mat_2367ABEF_10_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) B1A(0-3) B1B(0-3) B1A(0-3) B1B(0-3) B1E(0-3) B1F(0-3) B1E(0-3) B1F(0-3) - const __m512i rhs_mat_014589CD_11_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) B18(8-11) B19(8-11) B18(8-11) B19(8-11) B1C(8-11) B1D(8-11) B1C(8-11) B1D(8-11) - const __m512i rhs_mat_2367ABEF_11_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) B1A(8-11) B1B(8-11) B1A(8-11) B1B(8-11) B1E(8-11) B1F(8-11) B1E(8-11) B1F(8-11) - const __m512i rhs_mat_014589CD_12_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_12, (_MM_PERM_ENUM)136); //B10(16-19) B11(16-19) B10(16-19) B11(16-19) B14(16-19) B15(16-19) B14(16-19) B15(16-19) B18(16-19) B19(16-19) B18(16-19) B19(16-19) B1C(16-19) B1D(16-19) B1C(16-19) B1D(16-19) - const __m512i rhs_mat_2367ABEF_12_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_12, (_MM_PERM_ENUM)136); //B12(16-19) B13(16-19) B12(16-19) B13(16-19) B16(16-19) B17(16-19) B16(16-19) B17(16-19) B1A(16-19) B1B(16-19) B1A(16-19) B1B(16-19) B1E(16-19) B1F(16-19) B1E(16-19) B1F(16-19) - const __m512i rhs_mat_014589CD_13_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_13, (_MM_PERM_ENUM)136); //B10(24-27) B11(24-27) B10(24-27) B11(24-27) B14(24-27) B15(24-27) B14(24-27) B15(24-27) B18(24-27) B19(24-27) B18(24-27) B19(24-27) B1C(24-27) B1D(24-27) B1C(24-27) B1D(24-27) - const __m512i rhs_mat_2367ABEF_13_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_13, (_MM_PERM_ENUM)136); //B12(24-27) B13(24-27) B12(24-27) B13(24-27) B16(24-27) B17(24-27) B16(24-27) B17(24-27) B1A(24-27) B1B(24-27) B1A(24-27) B1B(24-27) B1E(24-27) B1F(24-27) B1E(24-27) B1F(24-27) - - // Shuffle pattern two - right side input - const __m512i rhs_mat_014589CD_00_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) B08(4-7) B09(4-7) B08(4-7) B09(4-7) B0C(4-7) B0D(4-7) B0C(4-7) B0D(4-7) - const __m512i rhs_mat_2367ABEF_00_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) B0A(4-7) B0B(4-7) B0A(4-7) B0B(4-7) B0E(4-7) B0F(4-7) B0E(4-7) B0F(4-7) - const __m512i rhs_mat_014589CD_01_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) B08(12-15) B09(12-15) B08(12-15) B09(12-15) B0C(12-15) B0D(12-15) B0C(12-15) B0D(12-15) - const __m512i rhs_mat_2367ABEF_01_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) B0A(12-15) B0B(12-15) B0A(12-15) B0B(12-15) B0E(12-15) B0F(12-15) B0E(12-15) B0F(12-15) - const __m512i rhs_mat_014589CD_02_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_02, (_MM_PERM_ENUM)221); //B00(20-23) B01(20-23) B00(20-23) B01(20-23) B04(20-23) B05(20-23) B04(20-23) B05(20-23) B08(20-23) B09(20-23) B08(20-23) B09(20-23) B0C(20-23) B0D(20-23) B0C(20-23) B0D(20-23) - const __m512i rhs_mat_2367ABEF_02_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_02, (_MM_PERM_ENUM)221); //B02(20-23) B03(20-23) B02(20-23) B03(20-23) B06(20-23) B07(20-23) B06(20-23) B07(20-23) B0A(20-23) B0B(20-23) B0A(20-23) B0B(20-23) B0E(20-23) B0F(20-23) B0E(20-23) B0F(20-23) - const __m512i rhs_mat_014589CD_03_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_03, (_MM_PERM_ENUM)221); //B00(28-31) B01(28-31) B00(28-31) B01(28-31) B04(28-31) B05(28-31) B04(28-31) B05(28-31) B08(28-31) B09(28-31) B08(28-31) B09(28-31) B0C(28-31) B0D(28-31) B0C(28-31) 0BD(28-31) - const __m512i rhs_mat_2367ABEF_03_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_03, (_MM_PERM_ENUM)221); //B02(28-31) B03(28-31) B02(28-31) B03(28-31) B06(28-31) B07(28-31) B06(28-31) B07(28-31) B0A(28-31) B0B(28-31) B0A(28-31) B0B(28-31) B0E(28-31) B0F(28-31) B0E(28-31) B0F(28-31) - - const __m512i rhs_mat_014589CD_10_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) B18(4-7) B19(4-7) B18(4-7) B19(4-7) B1C(4-7) B1D(4-7) B1C(4-7) B1D(4-7) - const __m512i rhs_mat_2367ABEF_10_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) B1A(4-7) B1B(4-7) B1A(4-7) B1B(4-7) B1E(4-7) B1F(4-7) B1E(4-7) B1F(4-7) - const __m512i rhs_mat_014589CD_11_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) B18(12-15) B19(12-15) B18(12-15) B19(12-15) B1C(12-15) B1D(12-15) B1C(12-15) B1D(12-15) - const __m512i rhs_mat_2367ABEF_11_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) B1A(12-15) B1B(12-15) B1A(12-15) B1B(12-15) B1E(12-15) B1F(12-15) B1E(12-15) B1F(12-15) - const __m512i rhs_mat_014589CD_12_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_12, (_MM_PERM_ENUM)221); //B10(20-23) B11(20-23) B10(20-23) B11(20-23) B14(20-23) B15(20-23) B14(20-23) B15(20-23) B18(20-23) B19(20-23) B18(20-23) B19(20-23) B1C(20-23) B1D(20-23) B1C(20-23) B1D(20-23) - const __m512i rhs_mat_2367ABEF_12_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_12, (_MM_PERM_ENUM)221); //B12(20-23) B13(20-23) B12(20-23) B13(20-23) B16(20-23) B17(20-23) B16(20-23) B17(20-23) B1A(20-23) B1B(20-23) B1A(20-23) B1B(20-23) B1E(20-23) B1F(20-23) B1E(20-23) B1F(20-23) - const __m512i rhs_mat_014589CD_13_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_13, (_MM_PERM_ENUM)221); //B10(28-31) B11(28-31) B10(28-31) B11(28-31) B14(28-31) B15(28-31) B14(28-31) B15(28-31) B18(28-31) B19(28-31) B18(28-31) B19(28-31) B1C(28-31) B1D(28-31) B1C(28-31) B1D(28-31) - const __m512i rhs_mat_2367ABEF_13_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_13, (_MM_PERM_ENUM)221); //B12(28-31) B13(28-31) B12(28-31) B13(28-31) B16(28-31) B17(28-31) B16(28-31) B17(28-31) B1A(28-31) B1B(28-31) B1A(28-31) B1B(28-31) B1E(28-31) B1F(28-31) B1E(28-31) B1F(28-31) - - uint32_t utmp_00[4], utmp_01[4], utmp_10[4], utmp_11[4]; - - // Scales and Mins of corresponding sub blocks from different Q4_K structures are stored together - // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop - memcpy(utmp_00, b_ptr_0[b].scales + 24 * sb, 12); - utmp_00[3] = ((utmp_00[2] >> 4) & kmask2) | (((utmp_00[1] >> 6) & kmask3) << 4); - const uint32_t uaux_00 = utmp_00[1] & kmask1; - utmp_00[1] = (utmp_00[2] & kmask2) | (((utmp_00[0] >> 6) & kmask3) << 4); - utmp_00[2] = uaux_00; - utmp_00[0] &= kmask1; - - // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop - memcpy(utmp_01, b_ptr_0[b].scales + 12 + sb * 24, 12); - utmp_01[3] = ((utmp_01[2] >> 4) & kmask2) | (((utmp_01[1] >> 6) & kmask3) << 4); - const uint32_t uaux_01 = utmp_01[1] & kmask1; - utmp_01[1] = (utmp_01[2] & kmask2) | (((utmp_01[0] >> 6) & kmask3) << 4); - utmp_01[2] = uaux_01; - utmp_01[0] &= kmask1; - - memcpy(utmp_10, b_ptr_1[b].scales + sb * 24, 12); - utmp_10[3] = ((utmp_10[2] >> 4) & kmask2) | (((utmp_10[1] >> 6) & kmask3) << 4); - const uint32_t uaux_10 = utmp_10[1] & kmask1; - utmp_10[1] = (utmp_10[2] & kmask2) | (((utmp_10[0] >> 6) & kmask3) << 4); - utmp_10[2] = uaux_10; - utmp_10[0] &= kmask1; - - // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop - memcpy(utmp_11, b_ptr_1[b].scales + 12 + sb * 24, 12); - utmp_11[3] = ((utmp_11[2] >> 4) & kmask2) | (((utmp_11[1] >> 6) & kmask3) << 4); - const uint32_t uaux_11 = utmp_11[1] & kmask1; - utmp_11[1] = (utmp_11[2] & kmask2) | (((utmp_11[0] >> 6) & kmask3) << 4); - utmp_11[2] = uaux_11; - utmp_11[0] &= kmask1; - - // Scales of first sub block in the sb loop - const __m256i mins_and_scales_0 = _mm256_set_epi32(utmp_10[3], utmp_10[2], utmp_10[1], utmp_10[0], utmp_00[3], utmp_00[2], utmp_00[1], utmp_00[0]); - const __m512i scales_0 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(mins_and_scales_0, mins_and_scales_0)); - - // Scales of second sub block in the sb loop - const __m256i mins_and_scales_1 = _mm256_set_epi32(utmp_11[3], utmp_11[2], utmp_11[1], utmp_11[0], utmp_01[3], utmp_01[2], utmp_01[1], utmp_01[0]); - const __m512i scales_1 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(mins_and_scales_1, mins_and_scales_1)); - - // Mins of first and second sub block of Q4_K block are arranged side by side - const __m512i mins_01 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(_mm256_shuffle_epi32(mins_and_scales_0, 78), _mm256_shuffle_epi32(mins_and_scales_1, 78))); - - const __m512i scale_014589CD_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)68); - const __m512i scale_2367ABEF_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)238); - - const __m512i scale_014589CD_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)68); - const __m512i scale_2367ABEF_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)238); - - for (int rp = 0; rp < 4; rp++) { - - // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 - // Loaded as set of 128 bit vectors and repeated and stored into a 256 bit vector before again repeating into 512 bit vector - __m256i lhs_mat_ymm_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 256 * sb))); - __m256i lhs_mat_ymm_01_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 0); - __m256i lhs_mat_ymm_23_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 17); - __m256i lhs_mat_ymm_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 32 + 256 * sb))); - __m256i lhs_mat_ymm_01_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 0); - __m256i lhs_mat_ymm_23_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 17); - __m256i lhs_mat_ymm_0123_02 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 64 + 256 * sb))); - __m256i lhs_mat_ymm_01_02 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_02, lhs_mat_ymm_0123_02, 0); - __m256i lhs_mat_ymm_23_02 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_02, lhs_mat_ymm_0123_02, 17); - __m256i lhs_mat_ymm_0123_03 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 96 + 256 * sb))); - __m256i lhs_mat_ymm_01_03 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_03, lhs_mat_ymm_0123_03, 0); - __m256i lhs_mat_ymm_23_03 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_03, lhs_mat_ymm_0123_03, 17); - __m256i lhs_mat_ymm_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 128 + 256 * sb))); - __m256i lhs_mat_ymm_01_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 0); - __m256i lhs_mat_ymm_23_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 17); - __m256i lhs_mat_ymm_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 160 + 256 * sb))); - __m256i lhs_mat_ymm_01_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 0); - __m256i lhs_mat_ymm_23_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 17); - __m256i lhs_mat_ymm_0123_12 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 192 + 256 * sb))); - __m256i lhs_mat_ymm_01_12 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_12, lhs_mat_ymm_0123_12, 0); - __m256i lhs_mat_ymm_23_12 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_12, lhs_mat_ymm_0123_12, 17); - __m256i lhs_mat_ymm_0123_13 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 224 + 256 * sb))); - __m256i lhs_mat_ymm_01_13 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_13, lhs_mat_ymm_0123_13, 0); - __m256i lhs_mat_ymm_23_13 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_13, lhs_mat_ymm_0123_13, 17); - - __m512i lhs_mat_01_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_00), lhs_mat_ymm_01_00, 1); - __m512i lhs_mat_23_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_00), lhs_mat_ymm_23_00, 1); - __m512i lhs_mat_01_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_01), lhs_mat_ymm_01_01, 1); - __m512i lhs_mat_23_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_01), lhs_mat_ymm_23_01, 1); - __m512i lhs_mat_01_02 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_02), lhs_mat_ymm_01_02, 1); - __m512i lhs_mat_23_02 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_02), lhs_mat_ymm_23_02, 1); - __m512i lhs_mat_01_03 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_03), lhs_mat_ymm_01_03, 1); - __m512i lhs_mat_23_03 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_03), lhs_mat_ymm_23_03, 1); - - __m512i lhs_mat_01_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_10), lhs_mat_ymm_01_10, 1); - __m512i lhs_mat_23_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_10), lhs_mat_ymm_23_10, 1); - __m512i lhs_mat_01_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_11), lhs_mat_ymm_01_11, 1); - __m512i lhs_mat_23_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_11), lhs_mat_ymm_23_11, 1); - __m512i lhs_mat_01_12 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_12), lhs_mat_ymm_01_12, 1); - __m512i lhs_mat_23_12 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_12), lhs_mat_ymm_23_12, 1); - __m512i lhs_mat_01_13 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_13), lhs_mat_ymm_01_13, 1); - __m512i lhs_mat_23_13 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_13), lhs_mat_ymm_23_13, 1); - - // Bsums are loaded - four bsums are loaded (for two sub blocks) for the different Q8_K blocks - __m256i lhs_bsums_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].bsums + 16 * sb))); - __m256i lhs_bsums_hsum_ymm_0123_01 = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(lhs_bsums_0123_01), _mm256_extractf128_si256(lhs_bsums_0123_01, 1))); - lhs_bsums_hsum_ymm_0123_01 = _mm256_permute2x128_si256(lhs_bsums_hsum_ymm_0123_01, lhs_bsums_hsum_ymm_0123_01, 0); - __m512i lhs_bsums_hsum_0123_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_hsum_ymm_0123_01), lhs_bsums_hsum_ymm_0123_01, 1); - - // Shuffle pattern one - left side input - const __m512i lhs_mat_01_00_sp1 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) - const __m512i lhs_mat_23_00_sp1 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)160); //A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) - const __m512i lhs_mat_01_01_sp1 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) - const __m512i lhs_mat_23_01_sp1 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)160); //A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) - const __m512i lhs_mat_01_02_sp1 = _mm512_shuffle_epi32(lhs_mat_01_02, (_MM_PERM_ENUM)160); //A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) - const __m512i lhs_mat_23_02_sp1 = _mm512_shuffle_epi32(lhs_mat_23_02, (_MM_PERM_ENUM)160); //A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) - const __m512i lhs_mat_01_03_sp1 = _mm512_shuffle_epi32(lhs_mat_01_03, (_MM_PERM_ENUM)160); //A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) - const __m512i lhs_mat_23_03_sp1 = _mm512_shuffle_epi32(lhs_mat_23_03, (_MM_PERM_ENUM)160); //A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) - - const __m512i lhs_mat_01_10_sp1 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) - const __m512i lhs_mat_23_10_sp1 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)160); //A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) - const __m512i lhs_mat_01_11_sp1 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) - const __m512i lhs_mat_23_11_sp1 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)160); //A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) - const __m512i lhs_mat_01_12_sp1 = _mm512_shuffle_epi32(lhs_mat_01_12, (_MM_PERM_ENUM)160); //A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) - const __m512i lhs_mat_23_12_sp1 = _mm512_shuffle_epi32(lhs_mat_23_12, (_MM_PERM_ENUM)160); //A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) - const __m512i lhs_mat_01_13_sp1 = _mm512_shuffle_epi32(lhs_mat_01_13, (_MM_PERM_ENUM)160); //A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) - const __m512i lhs_mat_23_13_sp1 = _mm512_shuffle_epi32(lhs_mat_23_13, (_MM_PERM_ENUM)160); //A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) - - const __m512i lhs_mat_01_00_sp2 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) - const __m512i lhs_mat_23_00_sp2 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)245); //A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) - const __m512i lhs_mat_01_01_sp2 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) - const __m512i lhs_mat_23_01_sp2 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)245); //A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) - const __m512i lhs_mat_01_02_sp2 = _mm512_shuffle_epi32(lhs_mat_01_02, (_MM_PERM_ENUM)245); //A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) - const __m512i lhs_mat_23_02_sp2 = _mm512_shuffle_epi32(lhs_mat_23_02, (_MM_PERM_ENUM)245); //A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) - const __m512i lhs_mat_01_03_sp2 = _mm512_shuffle_epi32(lhs_mat_01_03, (_MM_PERM_ENUM)245); //A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) - const __m512i lhs_mat_23_03_sp2 = _mm512_shuffle_epi32(lhs_mat_23_03, (_MM_PERM_ENUM)245); //A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) - - const __m512i lhs_mat_01_10_sp2 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) - const __m512i lhs_mat_23_10_sp2 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)245); //A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) - const __m512i lhs_mat_01_11_sp2 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) - const __m512i lhs_mat_23_11_sp2 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)245); //A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) - const __m512i lhs_mat_01_12_sp2 = _mm512_shuffle_epi32(lhs_mat_01_12, (_MM_PERM_ENUM)245); //A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) - const __m512i lhs_mat_23_12_sp2 = _mm512_shuffle_epi32(lhs_mat_23_12, (_MM_PERM_ENUM)245); //A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) - const __m512i lhs_mat_01_13_sp2 = _mm512_shuffle_epi32(lhs_mat_01_13, (_MM_PERM_ENUM)245); //A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) - const __m512i lhs_mat_23_13_sp2 = _mm512_shuffle_epi32(lhs_mat_23_13, (_MM_PERM_ENUM)245); //A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) - - // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane - __m512i iacc_mat_00_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp1, lhs_mat_01_03_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp1, lhs_mat_01_02_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_01_01_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_01_00_sp1)); - __m512i iacc_mat_01_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp1, lhs_mat_01_03_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp1, lhs_mat_01_02_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_01_01_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_01_00_sp1)); - __m512i iacc_mat_10_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp1, lhs_mat_23_03_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp1, lhs_mat_23_02_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_23_01_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_23_00_sp1)); - __m512i iacc_mat_11_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp1, lhs_mat_23_03_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp1, lhs_mat_23_02_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_23_01_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_23_00_sp1)); - __m512i iacc_mat_00_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp1, lhs_mat_01_13_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp1, lhs_mat_01_12_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_01_11_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_01_10_sp1)); - __m512i iacc_mat_01_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp1, lhs_mat_01_13_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp1, lhs_mat_01_12_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_01_11_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_01_10_sp1)); - __m512i iacc_mat_10_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp1, lhs_mat_23_13_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp1, lhs_mat_23_12_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_23_11_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_23_10_sp1)); - __m512i iacc_mat_11_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp1, lhs_mat_23_13_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp1, lhs_mat_23_12_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_23_11_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_23_10_sp1)); - - __m512i iacc_mat_00_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp2, lhs_mat_01_03_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp2, lhs_mat_01_02_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_01_01_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_01_00_sp2)); - __m512i iacc_mat_01_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp2, lhs_mat_01_03_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp2, lhs_mat_01_02_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_01_01_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_01_00_sp2)); - __m512i iacc_mat_10_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp2, lhs_mat_23_03_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp2, lhs_mat_23_02_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_23_01_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_23_00_sp2)); - __m512i iacc_mat_11_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp2, lhs_mat_23_03_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp2, lhs_mat_23_02_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_23_01_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_23_00_sp2)); - __m512i iacc_mat_00_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp2, lhs_mat_01_13_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp2, lhs_mat_01_12_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_01_11_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_01_10_sp2)); - __m512i iacc_mat_01_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp2, lhs_mat_01_13_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp2, lhs_mat_01_12_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_01_11_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_01_10_sp2)); - __m512i iacc_mat_10_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp2, lhs_mat_23_13_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp2, lhs_mat_23_12_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_23_11_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_23_10_sp2)); - __m512i iacc_mat_11_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp2, lhs_mat_23_13_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp2, lhs_mat_23_12_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_23_11_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_23_10_sp2)); - - // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block - __m512i iacc_mat_00_0 = _mm512_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); - __m512i iacc_mat_01_0 = _mm512_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); - __m512i iacc_mat_10_0 = _mm512_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); - __m512i iacc_mat_11_0 = _mm512_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); - - __m512i iacc_mat_00_1 = _mm512_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); - __m512i iacc_mat_01_1 = _mm512_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); - __m512i iacc_mat_10_1 = _mm512_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); - __m512i iacc_mat_11_1 = _mm512_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); - - iacc_mat_00_0 = _mm512_madd_epi16(iacc_mat_00_0, scale_014589CD_0); - iacc_mat_01_0 = _mm512_madd_epi16(iacc_mat_01_0, scale_2367ABEF_0); - iacc_mat_10_0 = _mm512_madd_epi16(iacc_mat_10_0, scale_014589CD_0); - iacc_mat_11_0 = _mm512_madd_epi16(iacc_mat_11_0, scale_2367ABEF_0); - - iacc_mat_00_1 = _mm512_madd_epi16(iacc_mat_00_1, scale_014589CD_1); - iacc_mat_01_1 = _mm512_madd_epi16(iacc_mat_01_1, scale_2367ABEF_1); - iacc_mat_10_1 = _mm512_madd_epi16(iacc_mat_10_1, scale_014589CD_1); - iacc_mat_11_1 = _mm512_madd_epi16(iacc_mat_11_1, scale_2367ABEF_1); - - // Straighten out to make 4 row vectors (4 for each sub block which are accumulated together in the next step) - __m512i iacc_row_0_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00_0, _mm512_shuffle_epi32(iacc_mat_01_0, (_MM_PERM_ENUM)78)); - __m512i iacc_row_1_0 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00_0, (_MM_PERM_ENUM)78), iacc_mat_01_0); - __m512i iacc_row_2_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10_0, _mm512_shuffle_epi32(iacc_mat_11_0, (_MM_PERM_ENUM)78)); - __m512i iacc_row_3_0 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10_0, (_MM_PERM_ENUM)78), iacc_mat_11_0); - __m512i iacc_row_0_1 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00_1, _mm512_shuffle_epi32(iacc_mat_01_1, (_MM_PERM_ENUM)78)); - __m512i iacc_row_1_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00_1, (_MM_PERM_ENUM)78), iacc_mat_01_1); - __m512i iacc_row_2_1 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10_1, _mm512_shuffle_epi32(iacc_mat_11_1, (_MM_PERM_ENUM)78)); - __m512i iacc_row_3_1 = _mm512_mask_blend_epi32(0xCCCC,_mm512_shuffle_epi32(iacc_mat_10_1, (_MM_PERM_ENUM)78), iacc_mat_11_1); - - __m512i iacc_row_0 = _mm512_add_epi32(iacc_row_0_0, iacc_row_0_1); - __m512i iacc_row_1 = _mm512_add_epi32(iacc_row_1_0, iacc_row_1_1); - __m512i iacc_row_2 = _mm512_add_epi32(iacc_row_2_0, iacc_row_2_1); - __m512i iacc_row_3 = _mm512_add_epi32(iacc_row_3_0, iacc_row_3_1); - - // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes - const __m128 row_scale_f32_sse = _mm_load_ps(a_ptrs[rp][b].d); - const __m256 row_scale_f32_ymm = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); - const __m512 row_scale_f32 = _mm512_insertf32x8(_mm512_castps256_ps512(row_scale_f32_ymm), row_scale_f32_ymm, 1); - - // Multiply with appropiate scales and accumulate (for both d and dmin) below - acc_rows[rp * 4] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); - acc_rows[rp * 4 + 1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); - acc_rows[rp * 4 + 2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); - acc_rows[rp * 4 + 3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); - - __m512i iacc_row_min_0 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)0), mins_01); - __m512i iacc_row_min_1 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)85), mins_01); - __m512i iacc_row_min_2 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)170), mins_01); - __m512i iacc_row_min_3 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)255), mins_01); - - acc_min_rows[rp * 4] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_0), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[rp * 4]); - acc_min_rows[rp * 4 + 1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_1), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[rp * 4 + 1]); - acc_min_rows[rp * 4 + 2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_2), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[rp * 4 + 2]); - acc_min_rows[rp * 4 + 3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_3), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[rp * 4 + 3]); - } - } - } - // Store the accumulated values - for (int i = 0; i < 16; i++) { - _mm512_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm512_sub_ps(acc_rows[i], acc_min_rows[i])); - } - } - } - - for (; y < nr / 4; y++) { - - const block_q8_Kx4 * a_ptr = a_ptr_start + (y * nb); - - // Take group of eight block_q4_kx8 structures at each pass of the loop and perform dot product operation - for (int64_t x = 0; x < anc / 8; x += 2) { - - const block_q4_Kx8 * b_ptr_0 = b_ptr_start + ((x) * b_nb); - const block_q4_Kx8 * b_ptr_1 = b_ptr_start + ((x + 1) * b_nb); - - // Master FP accumulators - __m512 acc_rows[4]; - for (int i = 0; i < 4; i++) { - acc_rows[i] = _mm512_setzero_ps(); - } - - __m512 acc_min_rows[4]; - for (int i = 0; i < 4; i++) { - acc_min_rows[i] = _mm512_setzero_ps(); - } - - // For super block - for (int64_t b = 0; b < nb; b++) { - // Scale values - Load the sixteen scale values from two block_q4_kx8 structures - const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); - - // dmin values - Load the sixteen dmin values from two block_q4_kx8 structures - const __m512 col_dmin_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].dmin, b_ptr_1[b].dmin); - - // Loop to iterate over the eight sub blocks of a super block - two sub blocks are processed per iteration - for (int sb = 0; sb < QK_K / 64; sb++) { - - const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + sb * 256)); - const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 32 + sb * 256)); - const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 64 + sb * 256)); - const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 96 + sb * 256)); - const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 128 + sb * 256)); - const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 160 + sb * 256)); - const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 192 + sb * 256)); - const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_0[b].qs + 224 + sb * 256)); - - const __m256i rhs_raw_mat_89AB_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + sb * 256)); - const __m256i rhs_raw_mat_CDEF_0 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 32 + sb * 256)); - const __m256i rhs_raw_mat_89AB_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 64 + sb * 256)); - const __m256i rhs_raw_mat_CDEF_1 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 96 + sb * 256)); - const __m256i rhs_raw_mat_89AB_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 128 + sb * 256)); - const __m256i rhs_raw_mat_CDEF_2 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 160 + sb * 256)); - const __m256i rhs_raw_mat_89AB_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 192 + sb * 256)); - const __m256i rhs_raw_mat_CDEF_3 = _mm256_loadu_si256((const __m256i * )(b_ptr_1[b].qs + 224 + sb * 256)); - - const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); - const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); - const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); - const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); - - const __m256i rhs_raw_mat_89CD_0 = _mm256_blend_epi32(rhs_raw_mat_89AB_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_0, requiredOrder), 240); - const __m256i rhs_raw_mat_ABEF_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_0, requiredOrder), rhs_raw_mat_CDEF_0, 240); - const __m256i rhs_raw_mat_89CD_1 = _mm256_blend_epi32(rhs_raw_mat_89AB_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_1, requiredOrder), 240); - const __m256i rhs_raw_mat_ABEF_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_1, requiredOrder), rhs_raw_mat_CDEF_1, 240); - const __m256i rhs_raw_mat_89CD_2 = _mm256_blend_epi32(rhs_raw_mat_89AB_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_2, requiredOrder), 240); - const __m256i rhs_raw_mat_ABEF_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_2, requiredOrder), rhs_raw_mat_CDEF_2, 240); - const __m256i rhs_raw_mat_89CD_3 = _mm256_blend_epi32(rhs_raw_mat_89AB_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_CDEF_3, requiredOrder), 240); - const __m256i rhs_raw_mat_ABEF_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_89AB_3, requiredOrder), rhs_raw_mat_CDEF_3, 240); - - const __m512i rhs_raw_mat_014589CD_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_0), rhs_raw_mat_89CD_0, 1); - const __m512i rhs_raw_mat_2367ABEF_0 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_0), rhs_raw_mat_ABEF_0, 1); - const __m512i rhs_raw_mat_014589CD_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_1), rhs_raw_mat_89CD_1, 1); - const __m512i rhs_raw_mat_2367ABEF_1 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_1), rhs_raw_mat_ABEF_1, 1); - - const __m512i rhs_raw_mat_014589CD_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_2), rhs_raw_mat_89CD_2, 1); - const __m512i rhs_raw_mat_2367ABEF_2 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_2), rhs_raw_mat_ABEF_2, 1); - const __m512i rhs_raw_mat_014589CD_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_0145_3), rhs_raw_mat_89CD_3, 1); - const __m512i rhs_raw_mat_2367ABEF_3 = _mm512_inserti32x8(_mm512_castsi256_si512(rhs_raw_mat_2367_3), rhs_raw_mat_ABEF_3, 1); - - //4-bit -> 8-bit - const __m512i rhs_mat_014589CD_00 = _mm512_and_si512(rhs_raw_mat_014589CD_0, m4bexpanded); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) B08(0-7) B09(0-7) B0C(0-7) B0D(0-7) - const __m512i rhs_mat_2367ABEF_00 = _mm512_and_si512(rhs_raw_mat_2367ABEF_0, m4bexpanded); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) B0A(0-7) B0B(0-7) B0E(0-7) B0F(0-7) - const __m512i rhs_mat_014589CD_01 = _mm512_and_si512(rhs_raw_mat_014589CD_1, m4bexpanded); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) B08(8-15) B09(8-15) B0C(8-15) B0D(8-15) - const __m512i rhs_mat_2367ABEF_01 = _mm512_and_si512(rhs_raw_mat_2367ABEF_1, m4bexpanded); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) B0A(8-15) B0B(8-15) B0E(8-15) B0F(8-15) - - const __m512i rhs_mat_014589CD_02 = _mm512_and_si512(rhs_raw_mat_014589CD_2, m4bexpanded); //B00(16-23) B01(16-23) B04(16-23) B05(16-23) B08(16-23) B09(16-23) B0C(16-23) B0D(16-23) - const __m512i rhs_mat_2367ABEF_02 = _mm512_and_si512(rhs_raw_mat_2367ABEF_2, m4bexpanded); //B02(16-23) B03(16-23) B06(16-23) B07(16-23) B0A(16-23) B0B(16-23) B0E(16-23) B0F(16-23) - const __m512i rhs_mat_014589CD_03 = _mm512_and_si512(rhs_raw_mat_014589CD_3, m4bexpanded); //B00(24-31) B01(24-31) B04(24-31) B05(24-31) B08(24-31) B09(24-31) B0C(24-31) B0D(24-31) - const __m512i rhs_mat_2367ABEF_03 = _mm512_and_si512(rhs_raw_mat_2367ABEF_3, m4bexpanded); //B02(24-31) B03(24-31) B06(24-31) B07(24-31) B0A(24-31) B0B(24-31) B0E(24-31) B0F(24-31) - - const __m512i rhs_mat_014589CD_10 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_0, 4), m4bexpanded); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) B18(0-7) B19(0-7) B1C(0-7) B1D(0-7) - const __m512i rhs_mat_2367ABEF_10 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_0, 4), m4bexpanded); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) B1A(0-7) B1B(0-7) B1E(0-7) B1F(0-7) - const __m512i rhs_mat_014589CD_11 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_1, 4), m4bexpanded); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) B18(8-15) B19(8-15) B1C(8-15) B1D(8-15) - const __m512i rhs_mat_2367ABEF_11 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) B1A(8-15) B1B(8-15) B1E(8-15) B1F(8-15) - - const __m512i rhs_mat_014589CD_12 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_2, 4), m4bexpanded); //B10(16-23) B11(16-23) B14(16-23) B15(16-23) B18(16-23) B19(16-23) B1C(16-23) B1D(16-23) - const __m512i rhs_mat_2367ABEF_12 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_2, 4), m4bexpanded); //B12(16-23) B13(16-23) B16(16-23) B17(16-23) B1A(16-23) B1B(16-23) B1E(16-23) B1F(16-23) - const __m512i rhs_mat_014589CD_13 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_014589CD_3, 4), m4bexpanded); //B10(24-31) B11(24-31) B14(24-31) B15(24-31) B18(24-31) B19(24-31) B1C(24-31) B1D(24-31) - const __m512i rhs_mat_2367ABEF_13 = _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_3, 4), m4bexpanded); //B12(24-31) B13(24-31) B16(24-31) B17(24-31) B1A(24-31) B1B(24-31) B1E(24-31) B1F(24-31) - - // Shuffle pattern one - right side input - const __m512i rhs_mat_014589CD_00_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) B08(0-3) B09(0-3) B08(0-3) B09(0-3) B0C(0-3) B0D(0-3) B0C(0-3) B0D(0-3) - const __m512i rhs_mat_2367ABEF_00_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) B0A(0-3) B0B(0-3) B0A(0-3) B0B(0-3) B0E(0-3) B0F(0-3) B0E(0-3) B0F(0-3) - const __m512i rhs_mat_014589CD_01_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) B08(8-11) B09(8-11) B08(8-11) B09(8-11) B0C(8-11) B0D(8-11) B0C(8-11) B0D(8-11) - const __m512i rhs_mat_2367ABEF_01_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) B0A(8-11) B0B(8-11) B0A(8-11) B0B(8-11) B0E(8-11) B0F(8-11) B0E(8-11) B0F(8-11) - const __m512i rhs_mat_014589CD_02_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_02, (_MM_PERM_ENUM)136); //B00(16-19) B01(16-19) B00(16-19) B01(16-19) B04(16-19) B05(16-19) B04(16-19) B05(16-19) B08(16-19) B09(16-19) B08(16-19) B09(16-19) B0C(16-19) B0D(16-19) B0C(16-19) B0D(16-19) - const __m512i rhs_mat_2367ABEF_02_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_02, (_MM_PERM_ENUM)136); //B02(16-19) B03(16-19) B02(16-19) B03(16-19) B06(16-19) B07(16-19) B06(16-19) B07(16-19) B0A(16-19) B0B(16-19) B0A(16-19) B0B(16-19) B0E(16-19) B0F(16-19) B0E(16-19) B0F(16-19) - const __m512i rhs_mat_014589CD_03_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_03, (_MM_PERM_ENUM)136); //B00(24-27) B01(24-27) B00(24-27) B01(24-27) B04(24-27) B05(24-27) B04(24-27) B05(24-27) B08(24-27) B09(24-27) B08(24-27) B09(24-27) B0C(24-27) B0D(24-27) B0C(24-27) B0D(24-27) - const __m512i rhs_mat_2367ABEF_03_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_03, (_MM_PERM_ENUM)136); //B02(24-27) B03(24-27) B02(24-27) B03(24-27) B06(24-27) B07(24-27) B06(24-27) B07(24-27) B0A(24-27) B0B(24-27) B0A(24-27) B0B(24-27) B0E(24-27) B0F(24-27) B0E(24-27) B0F(24-27) - - const __m512i rhs_mat_014589CD_10_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) B18(0-3) B19(0-3) B18(0-3) B19(0-3) B1C(0-3) B1D(0-3) B1C(0-3) B1D(0-3) - const __m512i rhs_mat_2367ABEF_10_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) B1A(0-3) B1B(0-3) B1A(0-3) B1B(0-3) B1E(0-3) B1F(0-3) B1E(0-3) B1F(0-3) - const __m512i rhs_mat_014589CD_11_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) B18(8-11) B19(8-11) B18(8-11) B19(8-11) B1C(8-11) B1D(8-11) B1C(8-11) B1D(8-11) - const __m512i rhs_mat_2367ABEF_11_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) B1A(8-11) B1B(8-11) B1A(8-11) B1B(8-11) B1E(8-11) B1F(8-11) B1E(8-11) B1F(8-11) - const __m512i rhs_mat_014589CD_12_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_12, (_MM_PERM_ENUM)136); //B10(16-19) B11(16-19) B10(16-19) B11(16-19) B14(16-19) B15(16-19) B14(16-19) B15(16-19) B18(16-19) B19(16-19) B18(16-19) B19(16-19) B1C(16-19) B1D(16-19) B1C(16-19) B1D(16-19) - const __m512i rhs_mat_2367ABEF_12_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_12, (_MM_PERM_ENUM)136); //B12(16-19) B13(16-19) B12(16-19) B13(16-19) B16(16-19) B17(16-19) B16(16-19) B17(16-19) B1A(16-19) B1B(16-19) B1A(16-19) B1B(16-19) B1E(16-19) B1F(16-19) B1E(16-19) B1F(16-19) - const __m512i rhs_mat_014589CD_13_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_13, (_MM_PERM_ENUM)136); //B10(24-27) B11(24-27) B10(24-27) B11(24-27) B14(24-27) B15(24-27) B14(24-27) B15(24-27) B18(24-27) B19(24-27) B18(24-27) B19(24-27) B1C(24-27) B1D(24-27) B1C(24-27) B1D(24-27) - const __m512i rhs_mat_2367ABEF_13_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_13, (_MM_PERM_ENUM)136); //B12(24-27) B13(24-27) B12(24-27) B13(24-27) B16(24-27) B17(24-27) B16(24-27) B17(24-27) B1A(24-27) B1B(24-27) B1A(24-27) B1B(24-27) B1E(24-27) B1F(24-27) B1E(24-27) B1F(24-27) - - // Shuffle pattern two - right side input - const __m512i rhs_mat_014589CD_00_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_00, (_MM_PERM_ENUM)221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) B08(4-7) B09(4-7) B08(4-7) B09(4-7) B0C(4-7) B0D(4-7) B0C(4-7) B0D(4-7) - const __m512i rhs_mat_2367ABEF_00_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_00, (_MM_PERM_ENUM)221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) B0A(4-7) B0B(4-7) B0A(4-7) B0B(4-7) B0E(4-7) B0F(4-7) B0E(4-7) B0F(4-7) - const __m512i rhs_mat_014589CD_01_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_01, (_MM_PERM_ENUM)221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) B08(12-15) B09(12-15) B08(12-15) B09(12-15) B0C(12-15) B0D(12-15) B0C(12-15) B0D(12-15) - const __m512i rhs_mat_2367ABEF_01_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_01, (_MM_PERM_ENUM)221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) B0A(12-15) B0B(12-15) B0A(12-15) B0B(12-15) B0E(12-15) B0F(12-15) B0E(12-15) B0F(12-15) - const __m512i rhs_mat_014589CD_02_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_02, (_MM_PERM_ENUM)221); //B00(20-23) B01(20-23) B00(20-23) B01(20-23) B04(20-23) B05(20-23) B04(20-23) B05(20-23) B08(20-23) B09(20-23) B08(20-23) B09(20-23) B0C(20-23) B0D(20-23) B0C(20-23) B0D(20-23) - const __m512i rhs_mat_2367ABEF_02_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_02, (_MM_PERM_ENUM)221); //B02(20-23) B03(20-23) B02(20-23) B03(20-23) B06(20-23) B07(20-23) B06(20-23) B07(20-23) B0A(20-23) B0B(20-23) B0A(20-23) B0B(20-23) B0E(20-23) B0F(20-23) B0E(20-23) B0F(20-23) - const __m512i rhs_mat_014589CD_03_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_03, (_MM_PERM_ENUM)221); //B00(28-31) B01(28-31) B00(28-31) B01(28-31) B04(28-31) B05(28-31) B04(28-31) B05(28-31) B08(28-31) B09(28-31) B08(28-31) B09(28-31) B0C(28-31) B0D(28-31) B0C(28-31) 0BD(28-31) - const __m512i rhs_mat_2367ABEF_03_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_03, (_MM_PERM_ENUM)221); //B02(28-31) B03(28-31) B02(28-31) B03(28-31) B06(28-31) B07(28-31) B06(28-31) B07(28-31) B0A(28-31) B0B(28-31) B0A(28-31) B0B(28-31) B0E(28-31) B0F(28-31) B0E(28-31) B0F(28-31) - - const __m512i rhs_mat_014589CD_10_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_10, (_MM_PERM_ENUM)221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) B18(4-7) B19(4-7) B18(4-7) B19(4-7) B1C(4-7) B1D(4-7) B1C(4-7) B1D(4-7) - const __m512i rhs_mat_2367ABEF_10_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_10, (_MM_PERM_ENUM)221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) B1A(4-7) B1B(4-7) B1A(4-7) B1B(4-7) B1E(4-7) B1F(4-7) B1E(4-7) B1F(4-7) - const __m512i rhs_mat_014589CD_11_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_11, (_MM_PERM_ENUM)221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) B18(12-15) B19(12-15) B18(12-15) B19(12-15) B1C(12-15) B1D(12-15) B1C(12-15) B1D(12-15) - const __m512i rhs_mat_2367ABEF_11_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_11, (_MM_PERM_ENUM)221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) B1A(12-15) B1B(12-15) B1A(12-15) B1B(12-15) B1E(12-15) B1F(12-15) B1E(12-15) B1F(12-15) - const __m512i rhs_mat_014589CD_12_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_12, (_MM_PERM_ENUM)221); //B10(20-23) B11(20-23) B10(20-23) B11(20-23) B14(20-23) B15(20-23) B14(20-23) B15(20-23) B18(20-23) B19(20-23) B18(20-23) B19(20-23) B1C(20-23) B1D(20-23) B1C(20-23) B1D(20-23) - const __m512i rhs_mat_2367ABEF_12_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_12, (_MM_PERM_ENUM)221); //B12(20-23) B13(20-23) B12(20-23) B13(20-23) B16(20-23) B17(20-23) B16(20-23) B17(20-23) B1A(20-23) B1B(20-23) B1A(20-23) B1B(20-23) B1E(20-23) B1F(20-23) B1E(20-23) B1F(20-23) - const __m512i rhs_mat_014589CD_13_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_13, (_MM_PERM_ENUM)221); //B10(28-31) B11(28-31) B10(28-31) B11(28-31) B14(28-31) B15(28-31) B14(28-31) B15(28-31) B18(28-31) B19(28-31) B18(28-31) B19(28-31) B1C(28-31) B1D(28-31) B1C(28-31) B1D(28-31) - const __m512i rhs_mat_2367ABEF_13_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_13, (_MM_PERM_ENUM)221); //B12(28-31) B13(28-31) B12(28-31) B13(28-31) B16(28-31) B17(28-31) B16(28-31) B17(28-31) B1A(28-31) B1B(28-31) B1A(28-31) B1B(28-31) B1E(28-31) B1F(28-31) B1E(28-31) B1F(28-31) - - uint32_t utmp_00[4], utmp_01[4], utmp_10[4], utmp_11[4]; - - // Scales and Mins of corresponding sub blocks from different Q4_K structures are stored together - // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop - memcpy(utmp_00, b_ptr_0[b].scales + 24 * sb, 12); - utmp_00[3] = ((utmp_00[2] >> 4) & kmask2) | (((utmp_00[1] >> 6) & kmask3) << 4); - const uint32_t uaux_00 = utmp_00[1] & kmask1; - utmp_00[1] = (utmp_00[2] & kmask2) | (((utmp_00[0] >> 6) & kmask3) << 4); - utmp_00[2] = uaux_00; - utmp_00[0] &= kmask1; - - // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop - memcpy(utmp_01, b_ptr_0[b].scales + 12 + sb * 24, 12); - utmp_01[3] = ((utmp_01[2] >> 4) & kmask2) | (((utmp_01[1] >> 6) & kmask3) << 4); - const uint32_t uaux_01 = utmp_01[1] & kmask1; - utmp_01[1] = (utmp_01[2] & kmask2) | (((utmp_01[0] >> 6) & kmask3) << 4); - utmp_01[2] = uaux_01; - utmp_01[0] &= kmask1; - - // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop - memcpy(utmp_10, b_ptr_1[b].scales + sb * 24, 12); - utmp_10[3] = ((utmp_10[2] >> 4) & kmask2) | (((utmp_10[1] >> 6) & kmask3) << 4); - const uint32_t uaux_10 = utmp_10[1] & kmask1; - utmp_10[1] = (utmp_10[2] & kmask2) | (((utmp_10[0] >> 6) & kmask3) << 4); - utmp_10[2] = uaux_10; - utmp_10[0] &= kmask1; - - // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop - memcpy(utmp_11, b_ptr_1[b].scales + 12 + sb * 24, 12); - utmp_11[3] = ((utmp_11[2] >> 4) & kmask2) | (((utmp_11[1] >> 6) & kmask3) << 4); - const uint32_t uaux_11 = utmp_11[1] & kmask1; - utmp_11[1] = (utmp_11[2] & kmask2) | (((utmp_11[0] >> 6) & kmask3) << 4); - utmp_11[2] = uaux_11; - utmp_11[0] &= kmask1; - - // Scales of first sub block in the sb loop - const __m256i mins_and_scales_0 = _mm256_set_epi32(utmp_10[3], utmp_10[2], utmp_10[1], utmp_10[0], utmp_00[3], utmp_00[2], utmp_00[1], utmp_00[0]); - const __m512i scales_0 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(mins_and_scales_0, mins_and_scales_0)); - - // Scales of second sub block in the sb loop - const __m256i mins_and_scales_1 = _mm256_set_epi32(utmp_11[3], utmp_11[2], utmp_11[1], utmp_11[0], utmp_01[3], utmp_01[2], utmp_01[1], utmp_01[0]); - const __m512i scales_1 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(mins_and_scales_1, mins_and_scales_1)); - - // Mins of first and second sub block of Q4_K block are arranged side by side - const __m512i mins_01 = _mm512_cvtepu8_epi16(_mm256_unpacklo_epi8(_mm256_shuffle_epi32(mins_and_scales_0, 78), _mm256_shuffle_epi32(mins_and_scales_1, 78))); - - const __m512i scale_014589CD_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)68); - const __m512i scale_2367ABEF_0 = _mm512_shuffle_epi32(scales_0, (_MM_PERM_ENUM)238); - - const __m512i scale_014589CD_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)68); - const __m512i scale_2367ABEF_1 = _mm512_shuffle_epi32(scales_1, (_MM_PERM_ENUM)238); - - // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 - // Loaded as set of 128 bit vectors and repeated into a 256 bit vector - __m256i lhs_mat_ymm_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 256 * sb))); - __m256i lhs_mat_ymm_01_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 0); - __m256i lhs_mat_ymm_23_00 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_00, lhs_mat_ymm_0123_00, 17); - __m256i lhs_mat_ymm_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 32 + 256 * sb))); - __m256i lhs_mat_ymm_01_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 0); - __m256i lhs_mat_ymm_23_01 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_01, lhs_mat_ymm_0123_01, 17); - __m256i lhs_mat_ymm_0123_02 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 64 + 256 * sb))); - __m256i lhs_mat_ymm_01_02 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_02, lhs_mat_ymm_0123_02, 0); - __m256i lhs_mat_ymm_23_02 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_02, lhs_mat_ymm_0123_02, 17); - __m256i lhs_mat_ymm_0123_03 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 96 + 256 * sb))); - __m256i lhs_mat_ymm_01_03 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_03, lhs_mat_ymm_0123_03, 0); - __m256i lhs_mat_ymm_23_03 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_03, lhs_mat_ymm_0123_03, 17); - __m256i lhs_mat_ymm_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 128 + 256 * sb))); - __m256i lhs_mat_ymm_01_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 0); - __m256i lhs_mat_ymm_23_10 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_10, lhs_mat_ymm_0123_10, 17); - __m256i lhs_mat_ymm_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 160 + 256 * sb))); - __m256i lhs_mat_ymm_01_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 0); - __m256i lhs_mat_ymm_23_11 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_11, lhs_mat_ymm_0123_11, 17); - __m256i lhs_mat_ymm_0123_12 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 192 + 256 * sb))); - __m256i lhs_mat_ymm_01_12 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_12, lhs_mat_ymm_0123_12, 0); - __m256i lhs_mat_ymm_23_12 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_12, lhs_mat_ymm_0123_12, 17); - __m256i lhs_mat_ymm_0123_13 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 224 + 256 * sb))); - __m256i lhs_mat_ymm_01_13 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_13, lhs_mat_ymm_0123_13, 0); - __m256i lhs_mat_ymm_23_13 = _mm256_permute2f128_si256(lhs_mat_ymm_0123_13, lhs_mat_ymm_0123_13, 17); - - //Loaded as set of 128 bit vectors and repeated and stored into a 256 bit vector before again repeating into a 512 bit vector - __m512i lhs_mat_01_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_00), lhs_mat_ymm_01_00, 1); - __m512i lhs_mat_23_00 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_00), lhs_mat_ymm_23_00, 1); - __m512i lhs_mat_01_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_01), lhs_mat_ymm_01_01, 1); - __m512i lhs_mat_23_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_01), lhs_mat_ymm_23_01, 1); - __m512i lhs_mat_01_02 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_02), lhs_mat_ymm_01_02, 1); - __m512i lhs_mat_23_02 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_02), lhs_mat_ymm_23_02, 1); - __m512i lhs_mat_01_03 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_03), lhs_mat_ymm_01_03, 1); - __m512i lhs_mat_23_03 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_03), lhs_mat_ymm_23_03, 1); - - __m512i lhs_mat_01_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_10), lhs_mat_ymm_01_10, 1); - __m512i lhs_mat_23_10 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_10), lhs_mat_ymm_23_10, 1); - __m512i lhs_mat_01_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_11), lhs_mat_ymm_01_11, 1); - __m512i lhs_mat_23_11 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_11), lhs_mat_ymm_23_11, 1); - __m512i lhs_mat_01_12 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_12), lhs_mat_ymm_01_12, 1); - __m512i lhs_mat_23_12 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_12), lhs_mat_ymm_23_12, 1); - __m512i lhs_mat_01_13 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_01_13), lhs_mat_ymm_01_13, 1); - __m512i lhs_mat_23_13 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_mat_ymm_23_13), lhs_mat_ymm_23_13, 1); - - // Bsums are loaded - four bsums are loaded (for two sub blocks) for the different Q8_K blocks - __m256i lhs_bsums_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].bsums + 16 * sb))); - __m256i lhs_bsums_hsum_ymm_0123_01 = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(lhs_bsums_0123_01), _mm256_extractf128_si256(lhs_bsums_0123_01, 1))); - lhs_bsums_hsum_ymm_0123_01 = _mm256_permute2x128_si256(lhs_bsums_hsum_ymm_0123_01, lhs_bsums_hsum_ymm_0123_01, 0); - __m512i lhs_bsums_hsum_0123_01 = _mm512_inserti32x8(_mm512_castsi256_si512(lhs_bsums_hsum_ymm_0123_01), lhs_bsums_hsum_ymm_0123_01, 1); - - // Shuffle pattern one - left side input - const __m512i lhs_mat_01_00_sp1 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) - const __m512i lhs_mat_23_00_sp1 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)160); //A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) A02(0-3) A02(0-3) A03(0-3) A03(0-3) - const __m512i lhs_mat_01_01_sp1 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) - const __m512i lhs_mat_23_01_sp1 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)160); //A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) A02(8-11) A02(8-11) A03(8-11) A03(8-11) - const __m512i lhs_mat_01_02_sp1 = _mm512_shuffle_epi32(lhs_mat_01_02, (_MM_PERM_ENUM)160); //A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) - const __m512i lhs_mat_23_02_sp1 = _mm512_shuffle_epi32(lhs_mat_23_02, (_MM_PERM_ENUM)160); //A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) A02(16-19) A02(16-19) A03(16-19) A03(16-19) - const __m512i lhs_mat_01_03_sp1 = _mm512_shuffle_epi32(lhs_mat_01_03, (_MM_PERM_ENUM)160); //A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) - const __m512i lhs_mat_23_03_sp1 = _mm512_shuffle_epi32(lhs_mat_23_03, (_MM_PERM_ENUM)160); //A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) A02(24-27) A02(24-27) A03(24-27) A03(24-27) - - const __m512i lhs_mat_01_10_sp1 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) - const __m512i lhs_mat_23_10_sp1 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)160); //A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) A12(0-3) A12(0-3) A13(0-3) A13(0-3) - const __m512i lhs_mat_01_11_sp1 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) - const __m512i lhs_mat_23_11_sp1 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)160); //A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) A12(8-11) A12(8-11) A13(8-11) A13(8-11) - const __m512i lhs_mat_01_12_sp1 = _mm512_shuffle_epi32(lhs_mat_01_12, (_MM_PERM_ENUM)160); //A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) - const __m512i lhs_mat_23_12_sp1 = _mm512_shuffle_epi32(lhs_mat_23_12, (_MM_PERM_ENUM)160); //A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) A12(16-19) A12(16-19) A13(16-19) A13(16-19) - const __m512i lhs_mat_01_13_sp1 = _mm512_shuffle_epi32(lhs_mat_01_13, (_MM_PERM_ENUM)160); //A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) - const __m512i lhs_mat_23_13_sp1 = _mm512_shuffle_epi32(lhs_mat_23_13, (_MM_PERM_ENUM)160); //A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) A12(24-27) A12(24-27) A13(24-27) A13(24-27) - - const __m512i lhs_mat_01_00_sp2 = _mm512_shuffle_epi32(lhs_mat_01_00, (_MM_PERM_ENUM)245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) - const __m512i lhs_mat_23_00_sp2 = _mm512_shuffle_epi32(lhs_mat_23_00, (_MM_PERM_ENUM)245); //A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) A02(4-7) A02(4-7) A03(4-7) A03(4-7) - const __m512i lhs_mat_01_01_sp2 = _mm512_shuffle_epi32(lhs_mat_01_01, (_MM_PERM_ENUM)245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) - const __m512i lhs_mat_23_01_sp2 = _mm512_shuffle_epi32(lhs_mat_23_01, (_MM_PERM_ENUM)245); //A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) A02(12-15) A02(12-15) A03(12-15) A03(12-15) - const __m512i lhs_mat_01_02_sp2 = _mm512_shuffle_epi32(lhs_mat_01_02, (_MM_PERM_ENUM)245); //A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) - const __m512i lhs_mat_23_02_sp2 = _mm512_shuffle_epi32(lhs_mat_23_02, (_MM_PERM_ENUM)245); //A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) A02(20-23) A02(20-23) A03(20-23) A03(20-23) - const __m512i lhs_mat_01_03_sp2 = _mm512_shuffle_epi32(lhs_mat_01_03, (_MM_PERM_ENUM)245); //A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) - const __m512i lhs_mat_23_03_sp2 = _mm512_shuffle_epi32(lhs_mat_23_03, (_MM_PERM_ENUM)245); //A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) A02(28-31) A02(28-31) A03(28-31) A03(28-31) - - const __m512i lhs_mat_01_10_sp2 = _mm512_shuffle_epi32(lhs_mat_01_10, (_MM_PERM_ENUM)245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) - const __m512i lhs_mat_23_10_sp2 = _mm512_shuffle_epi32(lhs_mat_23_10, (_MM_PERM_ENUM)245); //A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) A12(4-7) A12(4-7) A13(4-7) A13(4-7) - const __m512i lhs_mat_01_11_sp2 = _mm512_shuffle_epi32(lhs_mat_01_11, (_MM_PERM_ENUM)245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) - const __m512i lhs_mat_23_11_sp2 = _mm512_shuffle_epi32(lhs_mat_23_11, (_MM_PERM_ENUM)245); //A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) A12(12-15) A12(12-15) A13(12-15) A13(12-15) - const __m512i lhs_mat_01_12_sp2 = _mm512_shuffle_epi32(lhs_mat_01_12, (_MM_PERM_ENUM)245); //A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) - const __m512i lhs_mat_23_12_sp2 = _mm512_shuffle_epi32(lhs_mat_23_12, (_MM_PERM_ENUM)245); //A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) A12(20-23) A12(20-23) A13(20-23) A13(20-23) - const __m512i lhs_mat_01_13_sp2 = _mm512_shuffle_epi32(lhs_mat_01_13, (_MM_PERM_ENUM)245); //A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) - const __m512i lhs_mat_23_13_sp2 = _mm512_shuffle_epi32(lhs_mat_23_13, (_MM_PERM_ENUM)245); //A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) A12(28-31) A12(28-31) A13(28-31) A13(28-31) - - // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane - __m512i iacc_mat_00_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp1, lhs_mat_01_03_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp1, lhs_mat_01_02_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_01_01_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_01_00_sp1)); - __m512i iacc_mat_01_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp1, lhs_mat_01_03_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp1, lhs_mat_01_02_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_01_01_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_01_00_sp1)); - __m512i iacc_mat_10_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp1, lhs_mat_23_03_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp1, lhs_mat_23_02_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp1, lhs_mat_23_01_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp1, lhs_mat_23_00_sp1)); - __m512i iacc_mat_11_0_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp1, lhs_mat_23_03_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp1, lhs_mat_23_02_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp1, lhs_mat_23_01_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp1, lhs_mat_23_00_sp1)); - __m512i iacc_mat_00_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp1, lhs_mat_01_13_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp1, lhs_mat_01_12_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_01_11_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_01_10_sp1)); - __m512i iacc_mat_01_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp1, lhs_mat_01_13_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp1, lhs_mat_01_12_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_01_11_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_01_10_sp1)); - __m512i iacc_mat_10_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp1, lhs_mat_23_13_sp1), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp1, lhs_mat_23_12_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp1, lhs_mat_23_11_sp1)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp1, lhs_mat_23_10_sp1)); - __m512i iacc_mat_11_1_sp1 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp1, lhs_mat_23_13_sp1), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp1, lhs_mat_23_12_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp1, lhs_mat_23_11_sp1)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp1, lhs_mat_23_10_sp1)); - - __m512i iacc_mat_00_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp2, lhs_mat_01_03_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp2, lhs_mat_01_02_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_01_01_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_01_00_sp2)); - __m512i iacc_mat_01_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp2, lhs_mat_01_03_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp2, lhs_mat_01_02_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_01_01_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_01_00_sp2)); - __m512i iacc_mat_10_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_03_sp2, lhs_mat_23_03_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_02_sp2, lhs_mat_23_02_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_01_sp2, lhs_mat_23_01_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_00_sp2, lhs_mat_23_00_sp2)); - __m512i iacc_mat_11_0_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_03_sp2, lhs_mat_23_03_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_02_sp2, lhs_mat_23_02_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_01_sp2, lhs_mat_23_01_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_00_sp2, lhs_mat_23_00_sp2)); - __m512i iacc_mat_00_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp2, lhs_mat_01_13_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp2, lhs_mat_01_12_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_01_11_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_01_10_sp2)); - __m512i iacc_mat_01_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp2, lhs_mat_01_13_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp2, lhs_mat_01_12_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_01_11_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_01_10_sp2)); - __m512i iacc_mat_10_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_014589CD_13_sp2, lhs_mat_23_13_sp2), _mm512_maddubs_epi16(rhs_mat_014589CD_12_sp2, lhs_mat_23_12_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_11_sp2, lhs_mat_23_11_sp2)), _mm512_maddubs_epi16(rhs_mat_014589CD_10_sp2, lhs_mat_23_10_sp2)); - __m512i iacc_mat_11_1_sp2 = _mm512_add_epi16(_mm512_add_epi16(_mm512_add_epi16(_mm512_maddubs_epi16(rhs_mat_2367ABEF_13_sp2, lhs_mat_23_13_sp2), _mm512_maddubs_epi16(rhs_mat_2367ABEF_12_sp2, lhs_mat_23_12_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_11_sp2, lhs_mat_23_11_sp2)), _mm512_maddubs_epi16(rhs_mat_2367ABEF_10_sp2, lhs_mat_23_10_sp2)); - - // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block - __m512i iacc_mat_00_0 = _mm512_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); - __m512i iacc_mat_01_0 = _mm512_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); - __m512i iacc_mat_10_0 = _mm512_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); - __m512i iacc_mat_11_0 = _mm512_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); - - __m512i iacc_mat_00_1 = _mm512_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); - __m512i iacc_mat_01_1 = _mm512_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); - __m512i iacc_mat_10_1 = _mm512_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); - __m512i iacc_mat_11_1 = _mm512_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); - - iacc_mat_00_0 = _mm512_madd_epi16(iacc_mat_00_0, scale_014589CD_0); - iacc_mat_01_0 = _mm512_madd_epi16(iacc_mat_01_0, scale_2367ABEF_0); - iacc_mat_10_0 = _mm512_madd_epi16(iacc_mat_10_0, scale_014589CD_0); - iacc_mat_11_0 = _mm512_madd_epi16(iacc_mat_11_0, scale_2367ABEF_0); - - iacc_mat_00_1 = _mm512_madd_epi16(iacc_mat_00_1, scale_014589CD_1); - iacc_mat_01_1 = _mm512_madd_epi16(iacc_mat_01_1, scale_2367ABEF_1); - iacc_mat_10_1 = _mm512_madd_epi16(iacc_mat_10_1, scale_014589CD_1); - iacc_mat_11_1 = _mm512_madd_epi16(iacc_mat_11_1, scale_2367ABEF_1); - - // Straighten out to make 4 row vectors (4 for each sub block which are accumulated together in the next step) - __m512i iacc_row_0_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00_0, _mm512_shuffle_epi32(iacc_mat_01_0, (_MM_PERM_ENUM)78)); - __m512i iacc_row_1_0 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00_0, (_MM_PERM_ENUM)78), iacc_mat_01_0); - __m512i iacc_row_2_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10_0, _mm512_shuffle_epi32(iacc_mat_11_0, (_MM_PERM_ENUM)78)); - __m512i iacc_row_3_0 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10_0, (_MM_PERM_ENUM)78), iacc_mat_11_0); - __m512i iacc_row_0_1 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00_1, _mm512_shuffle_epi32(iacc_mat_01_1, (_MM_PERM_ENUM)78)); - __m512i iacc_row_1_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00_1, (_MM_PERM_ENUM)78), iacc_mat_01_1); - __m512i iacc_row_2_1 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10_1, _mm512_shuffle_epi32(iacc_mat_11_1, (_MM_PERM_ENUM)78)); - __m512i iacc_row_3_1 = _mm512_mask_blend_epi32(0xCCCC,_mm512_shuffle_epi32(iacc_mat_10_1, (_MM_PERM_ENUM)78), iacc_mat_11_1); - - __m512i iacc_row_0 = _mm512_add_epi32(iacc_row_0_0, iacc_row_0_1); - __m512i iacc_row_1 = _mm512_add_epi32(iacc_row_1_0, iacc_row_1_1); - __m512i iacc_row_2 = _mm512_add_epi32(iacc_row_2_0, iacc_row_2_1); - __m512i iacc_row_3 = _mm512_add_epi32(iacc_row_3_0, iacc_row_3_1); - - // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes - const __m128 row_scale_f32_sse = _mm_load_ps(a_ptr[b].d); - const __m256 row_scale_f32_ymm = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); - const __m512 row_scale_f32 = _mm512_insertf32x8(_mm512_castps256_ps512(row_scale_f32_ymm), row_scale_f32_ymm, 1); - - // Multiply with appropiate scales and accumulate (for both d and dmin) below - acc_rows[0] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_0), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); - acc_rows[1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_1), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); - acc_rows[2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_2), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); - acc_rows[3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_3), _mm512_mul_ps(col_scale_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); - - __m512i iacc_row_min_0 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)0), mins_01); - __m512i iacc_row_min_1 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)85), mins_01); - __m512i iacc_row_min_2 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)170), mins_01); - __m512i iacc_row_min_3 = _mm512_madd_epi16(_mm512_shuffle_epi32(lhs_bsums_hsum_0123_01, (_MM_PERM_ENUM)255), mins_01); - - acc_min_rows[0] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_0), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[0]); - acc_min_rows[1] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_1), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[1]); - acc_min_rows[2] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_2), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[2]); - acc_min_rows[3] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(iacc_row_min_3), _mm512_mul_ps(col_dmin_f32, _mm512_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[3]); - } - } - // Store accumlated values - for (int i = 0; i < 4; i++) { - _mm512_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm512_sub_ps(acc_rows[i], acc_min_rows[i])); - } - } - } - if (anc != nc) { - xstart = anc/8; - y = 0; - } -#endif //AVX512F - - // Take group of four block_q8_Kx4 structures at each pass of the loop and perform dot product operation - for (; y < anr / 4; y += 4) { - - const block_q8_Kx4 * a_ptrs[4]; - - a_ptrs[0] = a_ptr_start + (y * nb); - for (int i = 0; i < 3; ++i) { - a_ptrs[i + 1] = a_ptrs[i] + nb; - } - - // Take group of eight block_q4_kx8 structures at each pass of the loop and perform dot product operation - for (int64_t x = xstart; x < nc / 8; x++) { - - const block_q4_Kx8 * b_ptr = b_ptr_start + (x * b_nb); - - // Master FP accumulators - __m256 acc_rows[16]; - for (int i = 0; i < 16; i++) { - acc_rows[i] = _mm256_setzero_ps(); - } - - __m256 acc_min_rows[16]; - for (int i = 0; i < 16; i++) { - acc_min_rows[i] = _mm256_setzero_ps(); - } - - // For super block - for (int64_t b = 0; b < nb; b++) { - - // Scale values - Load the eight scale values of block_q4_kx8 - const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); - - // dmin values - Load the eight dmin values of block_q4_kx8 - const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); - - // Loop to iterate over the eight sub blocks of a super block - two sub blocks are processed per iteration - for (int sb = 0; sb < QK_K / 64; sb++) { - - // Load the eight block_q4_K for two sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 - const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + sb * 256)); - const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 32 + sb * 256)); - const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 64 + sb * 256)); - const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 96 + sb * 256)); - const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 128 + sb * 256)); - const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 160 + sb * 256)); - const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 192 + sb * 256)); - const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 224 + sb * 256)); - - // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values - const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); - const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); - const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); - const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); - - // 4-bit -> 8-bit - // First sub block of the two sub blocks processed in the iteration - const __m256i rhs_mat_0145_00 = _mm256_and_si256(rhs_raw_mat_0145_0, m4b); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) - const __m256i rhs_mat_2367_00 = _mm256_and_si256(rhs_raw_mat_2367_0, m4b); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) - - const __m256i rhs_mat_0145_01 = _mm256_and_si256(rhs_raw_mat_0145_1, m4b); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) - const __m256i rhs_mat_2367_01 = _mm256_and_si256(rhs_raw_mat_2367_1, m4b); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) - - const __m256i rhs_mat_0145_02 = _mm256_and_si256(rhs_raw_mat_0145_2, m4b); //B00(16-23) B01(16-23) B04(16-23) B05(16-23) - const __m256i rhs_mat_2367_02 = _mm256_and_si256(rhs_raw_mat_2367_2, m4b); //B02(16-23) B03(16-23) B06(16-23) B07(16-23) - - const __m256i rhs_mat_0145_03 = _mm256_and_si256(rhs_raw_mat_0145_3, m4b); //B00(24-31) B01(24-31) B04(24-31) B05(24-31) - const __m256i rhs_mat_2367_03 = _mm256_and_si256(rhs_raw_mat_2367_3, m4b); //B02(24-31) B03(24-31) B06(24-31) B07(24-31) - - // Second sub block of the two sub blocks processed in the iteration - const __m256i rhs_mat_0145_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m4b); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) - const __m256i rhs_mat_2367_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m4b); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) - - const __m256i rhs_mat_0145_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m4b); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) - const __m256i rhs_mat_2367_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m4b); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) - - const __m256i rhs_mat_0145_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 4), m4b); //B10(16-23) B11(16-23) B14(16-23) B15(16-23) - const __m256i rhs_mat_2367_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 4), m4b); //B12(16-23) B13(16-23) B16(16-23) B17(16-23) - - const __m256i rhs_mat_0145_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 4), m4b); //B10(24-31) B11(24-31) B14(24-31) B15(24-31) - const __m256i rhs_mat_2367_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 4), m4b); //B12(24-31) B13(24-31) B16(24-31) B17(24-31) - - // Shuffle pattern one - right side input - const __m256i rhs_mat_0145_00_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_00, 136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) - const __m256i rhs_mat_2367_00_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_00, 136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) - - const __m256i rhs_mat_0145_01_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_01, 136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) - const __m256i rhs_mat_2367_01_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_01, 136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) - - const __m256i rhs_mat_0145_02_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_02, 136); //B00(16-19) B01(16-19) B00(16-19) B01(16-19) B04(16-19) B05(16-19) B04(16-19) B05(16-19) - const __m256i rhs_mat_2367_02_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_02, 136); //B02(16-19) B03(16-19) B02(16-19) B03(16-19) B06(16-19) B07(16-19) B06(16-19) B07(16-19) - - const __m256i rhs_mat_0145_03_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_03, 136); //B00(24-27) B01(24-27) B00(24-27) B01(24-27) B04(24-27) B05(24-27) B04(24-27) B05(24-27) - const __m256i rhs_mat_2367_03_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_03, 136); //B02(24-27) B03(24-27) B02(24-27) B03(24-27) B06(24-27) B07(24-27) B06(24-27) B07(24-27) - - const __m256i rhs_mat_0145_10_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_10, 136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) - const __m256i rhs_mat_2367_10_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_10, 136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) - - const __m256i rhs_mat_0145_11_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_11, 136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) - const __m256i rhs_mat_2367_11_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_11, 136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) - - const __m256i rhs_mat_0145_12_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_12, 136); //B10(16-19) B11(16-19) B10(16-19) B11(16-19) B14(16-19) B15(16-19) B14(16-19) B15(16-19) - const __m256i rhs_mat_2367_12_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_12, 136); //B12(16-19) B13(16-19) B12(16-19) B13(16-19) B16(16-19) B17(16-19) B16(16-19) B17(16-19) - - const __m256i rhs_mat_0145_13_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_13, 136); //B10(24-27) B11(24-27) B10(24-27) B11(24-27) B14(24-27) B15(24-27) B14(24-27) B15(24-27) - const __m256i rhs_mat_2367_13_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_13, 136); //B12(24-27) B13(24-27) B12(24-27) B13(24-27) B16(24-27) B17(24-27) B16(24-27) B17(24-27) - - - // Shuffle pattern two - right side input - const __m256i rhs_mat_0145_00_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_00, 221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) - const __m256i rhs_mat_2367_00_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_00, 221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) - - const __m256i rhs_mat_0145_01_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_01, 221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) - const __m256i rhs_mat_2367_01_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_01, 221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) - - const __m256i rhs_mat_0145_02_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_02, 221); //B00(20-23) B01(20-23) B00(20-23) B01(20-23) B04(20-23) B05(20-23) B04(20-23) B05(20-23) - const __m256i rhs_mat_2367_02_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_02, 221); //B02(20-23) B03(20-23) B02(20-23) B03(20-23) B06(20-23) B07(20-23) B06(20-23) B07(20-23) - - const __m256i rhs_mat_0145_03_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_03, 221); //B00(28-31) B01(28-31) B00(28-31) B01(28-31) B04(28-31) B05(28-31) B04(28-31) B05(28-31) - const __m256i rhs_mat_2367_03_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_03, 221); //B02(28-31) B03(28-31) B02(28-31) B03(28-31) B06(28-31) B07(28-31) B06(28-31) B07(28-31) - - const __m256i rhs_mat_0145_10_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_10, 221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) - const __m256i rhs_mat_2367_10_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_10, 221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) - - const __m256i rhs_mat_0145_11_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_11, 221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) - const __m256i rhs_mat_2367_11_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_11, 221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) - - const __m256i rhs_mat_0145_12_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_12, 221); //B10(20-23) B11(20-23) B10(20-23) B11(20-23) B14(20-23) B15(20-23) B14(20-23) B15(20-23) - const __m256i rhs_mat_2367_12_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_12, 221); //B12(20-23) B13(20-23) B12(20-23) B13(20-23) B16(20-23) B17(20-23) B16(20-23) B17(20-23) - - const __m256i rhs_mat_0145_13_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_13, 221); //B10(28-31) B11(28-31) B10(28-31) B11(28-31) B14(28-31) B15(28-31) B14(28-31) B15(28-31) - const __m256i rhs_mat_2367_13_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_13, 221); //B12(28-31) B13(28-31) B12(28-31) B13(28-31) B16(28-31) B17(28-31) B16(28-31) B17(28-31) - - uint32_t utmp_0[4], utmp_1[4]; - - // Scales and Mins of corresponding sub blocks from different Q4_K structures are stored together - // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop - memcpy(utmp_0, b_ptr[b].scales + 24 * sb, 12); - utmp_0[3] = ((utmp_0[2] >> 4) & kmask2) | (((utmp_0[1] >> 6) & kmask3) << 4); - const uint32_t uaux_0 = utmp_0[1] & kmask1; - utmp_0[1] = (utmp_0[2] & kmask2) | (((utmp_0[0] >> 6) & kmask3) << 4); - utmp_0[2] = uaux_0; - utmp_0[0] &= kmask1; - - // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures for the sb loop - memcpy(utmp_1, b_ptr[b].scales + 12 + sb * 24, 12); - utmp_1[3] = ((utmp_1[2] >> 4) & kmask2) | (((utmp_1[1] >> 6) & kmask3) << 4); - const uint32_t uaux_1 = utmp_1[1] & kmask1; - utmp_1[1] = (utmp_1[2] & kmask2) | (((utmp_1[0] >> 6) & kmask3) << 4); - utmp_1[2] = uaux_1; - utmp_1[0] &= kmask1; - - // Scales of first sub block in the sb loop - const __m128i mins_and_scales_0 = _mm_set_epi32(utmp_0[3], utmp_0[2], utmp_0[1], utmp_0[0]); - const __m256i scales_0 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_0, mins_and_scales_0)); - - // Scales of second sub block in the sb loop - const __m128i mins_and_scales_1 = _mm_set_epi32(utmp_1[3], utmp_1[2], utmp_1[1], utmp_1[0]); - const __m256i scales_1 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_1, mins_and_scales_1)); - - // Mins of first and second sub block of Q4_K block are arranged side by side - const __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(_mm_shuffle_epi32(mins_and_scales_0, 78), _mm_shuffle_epi32(mins_and_scales_1, 78))); - - const __m256i scale_0145_0 = _mm256_shuffle_epi32(scales_0, 68); - const __m256i scale_2367_0 = _mm256_shuffle_epi32(scales_0, 238); - - const __m256i scale_0145_1 = _mm256_shuffle_epi32(scales_1, 68); - const __m256i scale_2367_1 = _mm256_shuffle_epi32(scales_1, 238); - - for (int rp = 0; rp < 4; rp++) { - - // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 - // Loaded as set of 128 bit vectors and repeated into a 256 bit vector - __m256i lhs_mat_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 256 * sb))); - __m256i lhs_mat_01_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 0); - __m256i lhs_mat_23_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 17); - __m256i lhs_mat_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 32 + 256 * sb))); - __m256i lhs_mat_01_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 0); - __m256i lhs_mat_23_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 17); - __m256i lhs_mat_0123_02 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 64 + 256 * sb))); - __m256i lhs_mat_01_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 0); - __m256i lhs_mat_23_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 17); - __m256i lhs_mat_0123_03 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 96 + 256 * sb))); - __m256i lhs_mat_01_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 0); - __m256i lhs_mat_23_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 17); - __m256i lhs_mat_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 128 + 256 * sb))); - __m256i lhs_mat_01_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 0); - __m256i lhs_mat_23_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 17); - __m256i lhs_mat_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 160 + 256 * sb))); - __m256i lhs_mat_01_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 0); - __m256i lhs_mat_23_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 17); - __m256i lhs_mat_0123_12 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 192 + 256 * sb))); - __m256i lhs_mat_01_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 0); - __m256i lhs_mat_23_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 17); - __m256i lhs_mat_0123_13 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].qs + 224 + 256 * sb))); - __m256i lhs_mat_01_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 0); - __m256i lhs_mat_23_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 17); - - // Bsums are loaded - four bsums are loaded (for two sub blocks) for the different Q8_K blocks - __m256i lhs_bsums_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptrs[rp][b].bsums + 16 * sb))); - __m256i lhs_bsums_hsum_0123_01 = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(lhs_bsums_0123_01), _mm256_extractf128_si256(lhs_bsums_0123_01, 1))); - lhs_bsums_hsum_0123_01 = _mm256_permute2x128_si256(lhs_bsums_hsum_0123_01, lhs_bsums_hsum_0123_01, 0); - - // Shuffle pattern one - left side input - const __m256i lhs_mat_01_00_sp1 = _mm256_shuffle_epi32(lhs_mat_01_00, 160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) - const __m256i lhs_mat_23_00_sp1 = _mm256_shuffle_epi32(lhs_mat_23_00, 160); //A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) - - const __m256i lhs_mat_01_01_sp1 = _mm256_shuffle_epi32(lhs_mat_01_01, 160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) - const __m256i lhs_mat_23_01_sp1 = _mm256_shuffle_epi32(lhs_mat_23_01, 160); //A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) - - const __m256i lhs_mat_01_02_sp1 = _mm256_shuffle_epi32(lhs_mat_01_02, 160); //A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) - const __m256i lhs_mat_23_02_sp1 = _mm256_shuffle_epi32(lhs_mat_23_02, 160); //A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) - - const __m256i lhs_mat_01_03_sp1 = _mm256_shuffle_epi32(lhs_mat_01_03, 160); //A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) - const __m256i lhs_mat_23_03_sp1 = _mm256_shuffle_epi32(lhs_mat_23_03, 160); //A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) - - const __m256i lhs_mat_01_10_sp1 = _mm256_shuffle_epi32(lhs_mat_01_10, 160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) - const __m256i lhs_mat_23_10_sp1 = _mm256_shuffle_epi32(lhs_mat_23_10, 160); //A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) - - const __m256i lhs_mat_01_11_sp1 = _mm256_shuffle_epi32(lhs_mat_01_11, 160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) - const __m256i lhs_mat_23_11_sp1 = _mm256_shuffle_epi32(lhs_mat_23_11, 160); //A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) - - const __m256i lhs_mat_01_12_sp1 = _mm256_shuffle_epi32(lhs_mat_01_12, 160); //A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) - const __m256i lhs_mat_23_12_sp1 = _mm256_shuffle_epi32(lhs_mat_23_12, 160); //A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) - - const __m256i lhs_mat_01_13_sp1 = _mm256_shuffle_epi32(lhs_mat_01_13, 160); //A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) - const __m256i lhs_mat_23_13_sp1 = _mm256_shuffle_epi32(lhs_mat_23_13, 160); //A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) - - // Shuffle pattern two- left side input - const __m256i lhs_mat_01_00_sp2 = _mm256_shuffle_epi32(lhs_mat_01_00, 245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) - const __m256i lhs_mat_23_00_sp2 = _mm256_shuffle_epi32(lhs_mat_23_00, 245); //A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) - - const __m256i lhs_mat_01_01_sp2 = _mm256_shuffle_epi32(lhs_mat_01_01, 245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) - const __m256i lhs_mat_23_01_sp2 = _mm256_shuffle_epi32(lhs_mat_23_01, 245); //A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) - - const __m256i lhs_mat_01_02_sp2 = _mm256_shuffle_epi32(lhs_mat_01_02, 245); //A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) - const __m256i lhs_mat_23_02_sp2 = _mm256_shuffle_epi32(lhs_mat_23_02, 245); //A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) - - const __m256i lhs_mat_01_03_sp2 = _mm256_shuffle_epi32(lhs_mat_01_03, 245); //A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) - const __m256i lhs_mat_23_03_sp2 = _mm256_shuffle_epi32(lhs_mat_23_03, 245); //A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) - - const __m256i lhs_mat_01_10_sp2 = _mm256_shuffle_epi32(lhs_mat_01_10, 245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) - const __m256i lhs_mat_23_10_sp2 = _mm256_shuffle_epi32(lhs_mat_23_10, 245); //A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) - - const __m256i lhs_mat_01_11_sp2 = _mm256_shuffle_epi32(lhs_mat_01_11, 245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) - const __m256i lhs_mat_23_11_sp2 = _mm256_shuffle_epi32(lhs_mat_23_11, 245); //A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) - - const __m256i lhs_mat_01_12_sp2 = _mm256_shuffle_epi32(lhs_mat_01_12, 245); //A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) - const __m256i lhs_mat_23_12_sp2 = _mm256_shuffle_epi32(lhs_mat_23_12, 245); //A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) - - const __m256i lhs_mat_01_13_sp2 = _mm256_shuffle_epi32(lhs_mat_01_13, 245); //A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) - const __m256i lhs_mat_23_13_sp2 = _mm256_shuffle_epi32(lhs_mat_23_13, 245); //A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) - - // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane - __m256i iacc_mat_00_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_01_00_sp1)); - __m256i iacc_mat_01_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_01_00_sp1)); - __m256i iacc_mat_10_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_23_00_sp1)); - __m256i iacc_mat_11_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_23_00_sp1)); - __m256i iacc_mat_00_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_01_10_sp1)); - __m256i iacc_mat_01_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_01_10_sp1)); - __m256i iacc_mat_10_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_23_10_sp1)); - __m256i iacc_mat_11_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_23_10_sp1)); - - __m256i iacc_mat_00_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_01_00_sp2)); - __m256i iacc_mat_01_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_01_00_sp2)); - __m256i iacc_mat_10_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_23_00_sp2)); - __m256i iacc_mat_11_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_23_00_sp2)); - __m256i iacc_mat_00_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_01_10_sp2)); - __m256i iacc_mat_01_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_01_10_sp2)); - __m256i iacc_mat_10_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_23_10_sp2)); - __m256i iacc_mat_11_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_23_10_sp2)); - - // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block - __m256i iacc_mat_00_0 = _mm256_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); - __m256i iacc_mat_01_0 = _mm256_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); - __m256i iacc_mat_10_0 = _mm256_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); - __m256i iacc_mat_11_0 = _mm256_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); - - __m256i iacc_mat_00_1 = _mm256_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); - __m256i iacc_mat_01_1 = _mm256_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); - __m256i iacc_mat_10_1 = _mm256_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); - __m256i iacc_mat_11_1 = _mm256_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); - - // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block - iacc_mat_00_0 = _mm256_madd_epi16(iacc_mat_00_0, scale_0145_0); - iacc_mat_01_0 = _mm256_madd_epi16(iacc_mat_01_0, scale_2367_0); - iacc_mat_10_0 = _mm256_madd_epi16(iacc_mat_10_0, scale_0145_0); - iacc_mat_11_0 = _mm256_madd_epi16(iacc_mat_11_0, scale_2367_0); - - iacc_mat_00_1 = _mm256_madd_epi16(iacc_mat_00_1, scale_0145_1); - iacc_mat_01_1 = _mm256_madd_epi16(iacc_mat_01_1, scale_2367_1); - iacc_mat_10_1 = _mm256_madd_epi16(iacc_mat_10_1, scale_0145_1); - iacc_mat_11_1 = _mm256_madd_epi16(iacc_mat_11_1, scale_2367_1); - - // Straighten out to make 4 row vectors (4 for each sub block which are accumulated together in the next step) - __m256i iacc_row_0_0 = _mm256_blend_epi32(iacc_mat_00_0, _mm256_shuffle_epi32(iacc_mat_01_0, 78), 204); - __m256i iacc_row_1_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_0, 78), iacc_mat_01_0, 204); - __m256i iacc_row_2_0 = _mm256_blend_epi32(iacc_mat_10_0, _mm256_shuffle_epi32(iacc_mat_11_0, 78), 204); - __m256i iacc_row_3_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_0, 78), iacc_mat_11_0, 204); - __m256i iacc_row_0_1 = _mm256_blend_epi32(iacc_mat_00_1, _mm256_shuffle_epi32(iacc_mat_01_1, 78), 204); - __m256i iacc_row_1_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_1, 78), iacc_mat_01_1, 204); - __m256i iacc_row_2_1 = _mm256_blend_epi32(iacc_mat_10_1, _mm256_shuffle_epi32(iacc_mat_11_1, 78), 204); - __m256i iacc_row_3_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_1, 78), iacc_mat_11_1, 204); - - __m256i iacc_row_0 = _mm256_add_epi32(iacc_row_0_0, iacc_row_0_1); - __m256i iacc_row_1 = _mm256_add_epi32(iacc_row_1_0, iacc_row_1_1); - __m256i iacc_row_2 = _mm256_add_epi32(iacc_row_2_0, iacc_row_2_1); - __m256i iacc_row_3 = _mm256_add_epi32(iacc_row_3_0, iacc_row_3_1); - - // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes - const __m128 row_scale_f32_sse = _mm_load_ps(a_ptrs[rp][b].d); - const __m256 row_scale_f32 = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse);//GGML_F32Cx8_REPEAT_LOAD(a_ptrs[rp][b].d, loadMask); - - // Multiply with appropiate scales and accumulate (for both d and dmin) below - acc_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[rp * 4]); - acc_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[rp * 4 + 1]); - acc_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[rp * 4 + 2]); - acc_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[rp * 4 + 3]); - - __m256i iacc_row_min_0 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 0), mins_01); - __m256i iacc_row_min_1 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 85), mins_01); - __m256i iacc_row_min_2 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 170), mins_01); - __m256i iacc_row_min_3 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 255), mins_01); - - acc_min_rows[rp * 4] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_0), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[rp * 4]); - acc_min_rows[rp * 4 + 1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_1), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[rp * 4 + 1]); - acc_min_rows[rp * 4 + 2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_2), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[rp * 4 + 2]); - acc_min_rows[rp * 4 + 3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_3), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[rp * 4 + 3]); - - } - } - } - // Store the accumulated values - for (int i = 0; i < 16; i++) { - _mm256_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm256_sub_ps(acc_rows[i], acc_min_rows[i])); - } - } - } - for (; y < nr / 4; y++) { - - const block_q8_Kx4 * a_ptr = a_ptr_start + (y * nb); - - for (int64_t x = xstart; x < nc / 8; x++) { - - const block_q4_Kx8 * b_ptr = b_ptr_start + (x * b_nb); - - // Master FP accumulators - __m256 acc_rows[4]; - for (int i = 0; i < 4; i++) { - acc_rows[i] = _mm256_setzero_ps(); - } - - __m256 acc_min_rows[4]; - for (int i = 0; i < 4; i++) { - acc_min_rows[i] = _mm256_setzero_ps(); - } - - for (int64_t b = 0; b < nb; b++) { - - // Scale values - Load the eight scale values of block_q4_Kx8 - const __m256 col_scale_f32 = GGML_F32Cx8_LOAD(b_ptr[b].d); - - // dmin values - Load the eight dmin values of block_q4_Kx8 - const __m256 col_dmin_f32 = GGML_F32Cx8_LOAD(b_ptr[b].dmin); - - // Loop to iterate over the eight sub blocks of a super block - two sub blocks are processed per iteration - for (int sb = 0; sb < QK_K / 64; sb++) { - - // Load the eight block_q4_k for two sub blocks quantized values interleaved with each other in chunks of eight bytes - B0,B1 ....B6,B7 - const __m256i rhs_raw_mat_0123_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + sb * 256)); - const __m256i rhs_raw_mat_4567_0 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 32 + sb * 256)); - const __m256i rhs_raw_mat_0123_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 64 + sb * 256)); - const __m256i rhs_raw_mat_4567_1 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 96 + sb * 256)); - const __m256i rhs_raw_mat_0123_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 128 + sb * 256)); - const __m256i rhs_raw_mat_4567_2 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 160 + sb * 256)); - const __m256i rhs_raw_mat_0123_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 192 + sb * 256)); - const __m256i rhs_raw_mat_4567_3 = _mm256_loadu_si256((const __m256i * )(b_ptr[b].qs + 224 + sb * 256)); - - // Save the values in the following vectors in the formats B0B1B4B5, B2B3B6B7 for further processing and storing of values - const __m256i rhs_raw_mat_0145_0 = _mm256_blend_epi32(rhs_raw_mat_0123_0, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_0, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_0 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_0, requiredOrder), rhs_raw_mat_4567_0, 240); - const __m256i rhs_raw_mat_0145_1 = _mm256_blend_epi32(rhs_raw_mat_0123_1, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_1, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_1 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_1, requiredOrder), rhs_raw_mat_4567_1, 240); - const __m256i rhs_raw_mat_0145_2 = _mm256_blend_epi32(rhs_raw_mat_0123_2, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_2, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_2 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_2, requiredOrder), rhs_raw_mat_4567_2, 240); - const __m256i rhs_raw_mat_0145_3 = _mm256_blend_epi32(rhs_raw_mat_0123_3, _mm256_permutevar8x32_epi32(rhs_raw_mat_4567_3, requiredOrder), 240); - const __m256i rhs_raw_mat_2367_3 = _mm256_blend_epi32(_mm256_permutevar8x32_epi32(rhs_raw_mat_0123_3, requiredOrder), rhs_raw_mat_4567_3, 240); - - // 4-bit -> 8-bit - // First sub block of the two sub blocks processed in the iteration - const __m256i rhs_mat_0145_00 = _mm256_and_si256(rhs_raw_mat_0145_0, m4b); //B00(0-7) B01(0-7) B04(0-7) B05(0-7) - const __m256i rhs_mat_2367_00 = _mm256_and_si256(rhs_raw_mat_2367_0, m4b); //B02(0-7) B03(0-7) B06(0-7) B07(0-7) - - const __m256i rhs_mat_0145_01 = _mm256_and_si256(rhs_raw_mat_0145_1, m4b); //B00(8-15) B01(8-15) B04(8-15) B05(8-15) - const __m256i rhs_mat_2367_01 = _mm256_and_si256(rhs_raw_mat_2367_1, m4b); //B02(8-15) B03(8-15) B06(8-15) B07(8-15) - - const __m256i rhs_mat_0145_02 = _mm256_and_si256(rhs_raw_mat_0145_2, m4b); //B00(16-23) B01(16-23) B04(16-23) B05(16-23) - const __m256i rhs_mat_2367_02 = _mm256_and_si256(rhs_raw_mat_2367_2, m4b); //B02(16-23) B03(16-23) B06(16-23) B07(16-23) - - const __m256i rhs_mat_0145_03 = _mm256_and_si256(rhs_raw_mat_0145_3, m4b); //B00(24-31) B01(24-31) B04(24-31) B05(24-31) - const __m256i rhs_mat_2367_03 = _mm256_and_si256(rhs_raw_mat_2367_3, m4b); //B02(24-31) B03(24-31) B06(24-31) B07(24-31) - - // Second sub block of the two sub blocks processed in the iteration - const __m256i rhs_mat_0145_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_0, 4), m4b); //B10(0-7) B11(0-7) B14(0-7) B15(0-7) - const __m256i rhs_mat_2367_10 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_0, 4), m4b); //B12(0-7) B13(0-7) B16(0-7) B17(0-7) - - const __m256i rhs_mat_0145_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_1, 4), m4b); //B10(8-15) B11(8-15) B14(8-15) B15(8-15) - const __m256i rhs_mat_2367_11 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_1, 4), m4b); //B12(8-15) B13(8-15) B16(8-15) B17(8-15) - - const __m256i rhs_mat_0145_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_2, 4), m4b); //B10(16-23) B11(16-23) B14(16-23) B15(16-23) - const __m256i rhs_mat_2367_12 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_2, 4), m4b); //B12(16-23) B13(16-23) B16(16-23) B17(16-23) - - const __m256i rhs_mat_0145_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_0145_3, 4), m4b); //B10(24-31) B11(24-31) B14(24-31) B15(24-31) - const __m256i rhs_mat_2367_13 = _mm256_and_si256(_mm256_srli_epi16(rhs_raw_mat_2367_3, 4), m4b); //B12(24-31) B13(24-31) B16(24-31) B17(24-31) - - // Shuffle pattern one - right side input - const __m256i rhs_mat_0145_00_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_00, 136); //B00(0-3) B01(0-3) B00(0-3) B01(0-3) B04(0-3) B05(0-3) B04(0-3) B05(0-3) - const __m256i rhs_mat_2367_00_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_00, 136); //B02(0-3) B03(0-3) B02(0-3) B03(0-3) B06(0-3) B07(0-3) B06(0-3) B07(0-3) - - const __m256i rhs_mat_0145_01_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_01, 136); //B00(8-11) B01(8-11) B00(8-11) B01(8-11) B04(8-11) B05(8-11) B04(8-11) B05(8-11) - const __m256i rhs_mat_2367_01_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_01, 136); //B02(8-11) B03(8-11) B02(8-11) B03(8-11) B06(8-11) B07(8-11) B06(8-11) B07(8-11) - - const __m256i rhs_mat_0145_02_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_02, 136); //B00(16-19) B01(16-19) B00(16-19) B01(16-19) B04(16-19) B05(16-19) B04(16-19) B05(16-19) - const __m256i rhs_mat_2367_02_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_02, 136); //B02(16-19) B03(16-19) B02(16-19) B03(16-19) B06(16-19) B07(16-19) B06(16-19) B07(16-19) - - const __m256i rhs_mat_0145_03_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_03, 136); //B00(24-27) B01(24-27) B00(24-27) B01(24-27) B04(24-27) B05(24-27) B04(24-27) B05(24-27) - const __m256i rhs_mat_2367_03_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_03, 136); //B02(24-27) B03(24-27) B02(24-27) B03(24-27) B06(24-27) B07(24-27) B06(24-27) B07(24-27) - - const __m256i rhs_mat_0145_10_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_10, 136); //B10(0-3) B11(0-3) B10(0-3) B11(0-3) B14(0-3) B15(0-3) B14(0-3) B15(0-3) - const __m256i rhs_mat_2367_10_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_10, 136); //B12(0-3) B13(0-3) B12(0-3) B13(0-3) B16(0-3) B17(0-3) B16(0-3) B17(0-3) - - const __m256i rhs_mat_0145_11_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_11, 136); //B10(8-11) B11(8-11) B10(8-11) B11(8-11) B14(8-11) B15(8-11) B14(8-11) B15(8-11) - const __m256i rhs_mat_2367_11_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_11, 136); //B12(8-11) B13(8-11) B12(8-11) B13(8-11) B16(8-11) B17(8-11) B16(8-11) B17(8-11) - - const __m256i rhs_mat_0145_12_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_12, 136); //B10(16-19) B11(16-19) B10(16-19) B11(16-19) B14(16-19) B15(16-19) B14(16-19) B15(16-19) - const __m256i rhs_mat_2367_12_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_12, 136); //B12(16-19) B13(16-19) B12(16-19) B13(16-19) B16(16-19) B17(16-19) B16(16-19) B17(16-19) - - const __m256i rhs_mat_0145_13_sp1 = _mm256_shuffle_epi32(rhs_mat_0145_13, 136); //B10(24-27) B11(24-27) B10(24-27) B11(24-27) B14(24-27) B15(24-27) B14(24-27) B15(24-27) - const __m256i rhs_mat_2367_13_sp1 = _mm256_shuffle_epi32(rhs_mat_2367_13, 136); //B12(24-27) B13(24-27) B12(24-27) B13(24-27) B16(24-27) B17(24-27) B16(24-27) B17(24-27) - - // Shuffle pattern two - right side input - const __m256i rhs_mat_0145_00_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_00, 221); //B00(4-7) B01(4-7) B00(4-7) B01(4-7) B04(4-7) B05(4-7) B04(4-7) B05(4-7) - const __m256i rhs_mat_2367_00_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_00, 221); //B02(4-7) B03(4-7) B02(4-7) B03(4-7) B06(4-7) B07(4-7) B06(4-7) B07(4-7) - - const __m256i rhs_mat_0145_01_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_01, 221); //B00(12-15) B01(12-15) B00(12-15) B01(12-15) B04(12-15) B05(12-15) B04(12-15) B05(12-15) - const __m256i rhs_mat_2367_01_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_01, 221); //B02(12-15) B03(12-15) B02(12-15) B03(12-15) B06(12-15) B07(12-15) B06(12-15) B07(12-15) - - const __m256i rhs_mat_0145_02_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_02, 221); //B00(20-23) B01(20-23) B00(20-23) B01(20-23) B04(20-23) B05(20-23) B04(20-23) B05(20-23) - const __m256i rhs_mat_2367_02_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_02, 221); //B02(20-23) B03(20-23) B02(20-23) B03(20-23) B06(20-23) B07(20-23) B06(20-23) B07(20-23) - - const __m256i rhs_mat_0145_03_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_03, 221); //B00(28-31) B01(28-31) B00(28-31) B01(28-31) B04(28-31) B05(28-31) B04(28-31) B05(28-31) - const __m256i rhs_mat_2367_03_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_03, 221); //B02(28-31) B03(28-31) B02(28-31) B03(28-31) B06(28-31) B07(28-31) B06(28-31) B07(28-31) - - const __m256i rhs_mat_0145_10_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_10, 221); //B10(4-7) B11(4-7) B10(4-7) B11(4-7) B14(4-7) B15(4-7) B14(4-7) B15(4-7) - const __m256i rhs_mat_2367_10_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_10, 221); //B12(4-7) B13(4-7) B12(4-7) B13(4-7) B16(4-7) B17(4-7) B16(4-7) B17(4-7) - - const __m256i rhs_mat_0145_11_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_11, 221); //B10(12-15) B11(12-15) B10(12-15) B11(12-15) B14(12-15) B15(12-15) B14(12-15) B15(12-15) - const __m256i rhs_mat_2367_11_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_11, 221); //B12(12-15) B13(12-15) B12(12-15) B13(12-15) B16(12-15) B17(12-15) B16(12-15) B17(12-15) - - const __m256i rhs_mat_0145_12_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_12, 221); //B10(20-23) B11(20-23) B10(20-23) B11(20-23) B14(20-23) B15(20-23) B14(20-23) B15(20-23) - const __m256i rhs_mat_2367_12_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_12, 221); //B12(20-23) B13(20-23) B12(20-23) B13(20-23) B16(20-23) B17(20-23) B16(20-23) B17(20-23) - - const __m256i rhs_mat_0145_13_sp2 = _mm256_shuffle_epi32(rhs_mat_0145_13, 221); //B10(28-31) B11(28-31) B10(28-31) B11(28-31) B14(28-31) B15(28-31) B14(28-31) B15(28-31) - const __m256i rhs_mat_2367_13_sp2 = _mm256_shuffle_epi32(rhs_mat_2367_13, 221); //B12(28-31) B13(28-31) B12(28-31) B13(28-31) B16(28-31) B17(28-31) B16(28-31) B17(28-31) - - uint32_t utmp_0[4], utmp_1[4]; - - // Scales and Mins of corresponding sub blocks from different Q4_K structures are stored together - // The below block is for eg to extract first sub block's scales and mins from different Q4_K structures for the sb loop - memcpy(utmp_0, b_ptr[b].scales + 24 * sb, 12); - utmp_0[3] = ((utmp_0[2] >> 4) & kmask2) | (((utmp_0[1] >> 6) & kmask3) << 4); - const uint32_t uaux_0 = utmp_0[1] & kmask1; - utmp_0[1] = (utmp_0[2] & kmask2) | (((utmp_0[0] >> 6) & kmask3) << 4); - utmp_0[2] = uaux_0; - utmp_0[0] &= kmask1; - - // The below block is for eg to extract second sub block's scales and mins from different Q4_K structures when sb = 1 - memcpy(utmp_1, b_ptr[b].scales + 12 + sb * 24, 12); - utmp_1[3] = ((utmp_1[2] >> 4) & kmask2) | (((utmp_1[1] >> 6) & kmask3) << 4); - const uint32_t uaux_1 = utmp_1[1] & kmask1; - utmp_1[1] = (utmp_1[2] & kmask2) | (((utmp_1[0] >> 6) & kmask3) << 4); - utmp_1[2] = uaux_1; - utmp_1[0] &= kmask1; - - // Scales of first sub block in the sb loop - const __m128i mins_and_scales_0 = _mm_set_epi32(utmp_0[3], utmp_0[2], utmp_0[1], utmp_0[0]); - const __m256i scales_0 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_0, mins_and_scales_0)); - - // Scales of second sub block in the sb loop - const __m128i mins_and_scales_1 = _mm_set_epi32(utmp_1[3], utmp_1[2], utmp_1[1], utmp_1[0]); - const __m256i scales_1 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(mins_and_scales_1, mins_and_scales_1)); - - // Mins of first and second sub block of Q4_K block are arranged side by side - const __m256i mins_01 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(_mm_shuffle_epi32(mins_and_scales_0, 78), _mm_shuffle_epi32(mins_and_scales_1, 78))); - - const __m256i scale_0145_0 = _mm256_shuffle_epi32(scales_0, 68); - const __m256i scale_2367_0 = _mm256_shuffle_epi32(scales_0, 238); - - const __m256i scale_0145_1 = _mm256_shuffle_epi32(scales_1, 68); - const __m256i scale_2367_1 = _mm256_shuffle_epi32(scales_1, 238); - - // Load the four block_q8_k quantized values interleaved with each other in chunks of eight bytes - A0,A1,A2,A3 - // Loaded as set of 128 bit vectors and repeated into a 256 bit vector - __m256i lhs_mat_0123_00 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 256 * sb))); - __m256i lhs_mat_01_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 0); - __m256i lhs_mat_23_00 = _mm256_permute2f128_si256(lhs_mat_0123_00, lhs_mat_0123_00, 17); - __m256i lhs_mat_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 32 + 256 * sb))); - __m256i lhs_mat_01_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 0); - __m256i lhs_mat_23_01 = _mm256_permute2f128_si256(lhs_mat_0123_01, lhs_mat_0123_01, 17); - __m256i lhs_mat_0123_02 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 64 + 256 * sb))); - __m256i lhs_mat_01_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 0); - __m256i lhs_mat_23_02 = _mm256_permute2f128_si256(lhs_mat_0123_02, lhs_mat_0123_02, 17); - __m256i lhs_mat_0123_03 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 96 + 256 * sb))); - __m256i lhs_mat_01_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 0); - __m256i lhs_mat_23_03 = _mm256_permute2f128_si256(lhs_mat_0123_03, lhs_mat_0123_03, 17); - __m256i lhs_mat_0123_10 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 128 + 256 * sb))); - __m256i lhs_mat_01_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 0); - __m256i lhs_mat_23_10 = _mm256_permute2f128_si256(lhs_mat_0123_10, lhs_mat_0123_10, 17); - __m256i lhs_mat_0123_11 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 160 + 256 * sb))); - __m256i lhs_mat_01_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 0); - __m256i lhs_mat_23_11 = _mm256_permute2f128_si256(lhs_mat_0123_11, lhs_mat_0123_11, 17); - __m256i lhs_mat_0123_12 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 192 + 256 * sb))); - __m256i lhs_mat_01_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 0); - __m256i lhs_mat_23_12 = _mm256_permute2f128_si256(lhs_mat_0123_12, lhs_mat_0123_12, 17); - __m256i lhs_mat_0123_13 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].qs + 224 + 256 * sb))); - __m256i lhs_mat_01_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 0); - __m256i lhs_mat_23_13 = _mm256_permute2f128_si256(lhs_mat_0123_13, lhs_mat_0123_13, 17); - - // Bsums are loaded - four bsums are loaded (for two sub blocks) for the different Q8_K blocks - __m256i lhs_bsums_0123_01 = _mm256_loadu_si256((const __m256i * )((a_ptr[b].bsums + 16 * sb))); - __m256i lhs_bsums_hsum_0123_01 = _mm256_castsi128_si256(_mm_hadd_epi16(_mm256_castsi256_si128(lhs_bsums_0123_01), _mm256_extractf128_si256(lhs_bsums_0123_01, 1))); - lhs_bsums_hsum_0123_01 = _mm256_permute2x128_si256(lhs_bsums_hsum_0123_01, lhs_bsums_hsum_0123_01, 0); - - // Shuffle pattern one - left side input - const __m256i lhs_mat_01_00_sp1 = _mm256_shuffle_epi32(lhs_mat_01_00, 160); //A00(0-3) A00(0-3) A01(0-3) A01(0-3) A00(0-3) A00(0-3) A01(0-3) A01(0-3) - const __m256i lhs_mat_23_00_sp1 = _mm256_shuffle_epi32(lhs_mat_23_00, 160); //A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) A02(0-3) A03(0-3) - - const __m256i lhs_mat_01_01_sp1 = _mm256_shuffle_epi32(lhs_mat_01_01, 160); //A00(8-11) A00(8-11) A01(8-11) A01(8-11) A00(8-11) A00(8-11) A01(8-11) A01(8-11) - const __m256i lhs_mat_23_01_sp1 = _mm256_shuffle_epi32(lhs_mat_23_01, 160); //A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) A02(8-11) A03(8-11) - - const __m256i lhs_mat_01_02_sp1 = _mm256_shuffle_epi32(lhs_mat_01_02, 160); //A00(16-19) A00(16-19) A01(16-19) A01(16-19) A00(16-19) A00(16-19) A01(16-19) A01(16-19) - const __m256i lhs_mat_23_02_sp1 = _mm256_shuffle_epi32(lhs_mat_23_02, 160); //A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) A02(16-19) A03(16-19) - - const __m256i lhs_mat_01_03_sp1 = _mm256_shuffle_epi32(lhs_mat_01_03, 160); //A00(24-27) A00(24-27) A01(24-27) A01(24-27) A00(24-27) A00(24-27) A01(24-27) A01(24-27) - const __m256i lhs_mat_23_03_sp1 = _mm256_shuffle_epi32(lhs_mat_23_03, 160); //A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) A02(24-27) A03(24-27) - - const __m256i lhs_mat_01_10_sp1 = _mm256_shuffle_epi32(lhs_mat_01_10, 160); //A10(0-3) A10(0-3) A11(0-3) A11(0-3) A10(0-3) A10(0-3) A11(0-3) A11(0-3) - const __m256i lhs_mat_23_10_sp1 = _mm256_shuffle_epi32(lhs_mat_23_10, 160); //A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) A12(0-3) A13(0-3) - - const __m256i lhs_mat_01_11_sp1 = _mm256_shuffle_epi32(lhs_mat_01_11, 160); //A10(8-11) A10(8-11) A11(8-11) A11(8-11) A10(8-11) A10(8-11) A11(8-11) A11(8-11) - const __m256i lhs_mat_23_11_sp1 = _mm256_shuffle_epi32(lhs_mat_23_11, 160); //A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) A12(8-11) A13(8-11) - - const __m256i lhs_mat_01_12_sp1 = _mm256_shuffle_epi32(lhs_mat_01_12, 160); //A10(16-19) A10(16-19) A11(16-19) A11(16-19) A10(16-19) A10(16-19) A11(16-19) A11(16-19) - const __m256i lhs_mat_23_12_sp1 = _mm256_shuffle_epi32(lhs_mat_23_12, 160); //A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) A12(16-19) A13(16-19) - - const __m256i lhs_mat_01_13_sp1 = _mm256_shuffle_epi32(lhs_mat_01_13, 160); //A10(24-27) A10(24-27) A11(24-27) A11(24-27) A10(24-27) A10(24-27) A11(24-27) A11(24-27) - const __m256i lhs_mat_23_13_sp1 = _mm256_shuffle_epi32(lhs_mat_23_13, 160); //A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) A12(24-27) A13(24-27) - - // Shuffle pattern two- left side input - const __m256i lhs_mat_01_00_sp2 = _mm256_shuffle_epi32(lhs_mat_01_00, 245); //A00(4-7) A00(4-7) A01(4-7) A01(4-7) A00(4-7) A00(4-7) A01(4-7) A01(4-7) - const __m256i lhs_mat_23_00_sp2 = _mm256_shuffle_epi32(lhs_mat_23_00, 245); //A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) A02(4-7) A03(4-7) - - const __m256i lhs_mat_01_01_sp2 = _mm256_shuffle_epi32(lhs_mat_01_01, 245); //A00(12-15) A00(12-15) A01(12-15) A01(12-15) A00(12-15) A00(12-15) A01(12-15) A01(12-15) - const __m256i lhs_mat_23_01_sp2 = _mm256_shuffle_epi32(lhs_mat_23_01, 245); //A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) A02(12-15) A03(12-15) - - const __m256i lhs_mat_01_02_sp2 = _mm256_shuffle_epi32(lhs_mat_01_02, 245); //A00(20-23) A00(20-23) A01(20-23) A01(20-23) A00(20-23) A00(20-23) A01(20-23) A01(20-23) - const __m256i lhs_mat_23_02_sp2 = _mm256_shuffle_epi32(lhs_mat_23_02, 245); //A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) A02(20-23) A03(20-23) - - const __m256i lhs_mat_01_03_sp2 = _mm256_shuffle_epi32(lhs_mat_01_03, 245); //A00(28-31) A00(28-31) A01(28-31) A01(28-31) A00(28-31) A00(28-31) A01(28-31) A01(28-31) - const __m256i lhs_mat_23_03_sp2 = _mm256_shuffle_epi32(lhs_mat_23_03, 245); //A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) A02(28-31) A03(28-31) - - const __m256i lhs_mat_01_10_sp2 = _mm256_shuffle_epi32(lhs_mat_01_10, 245); //A10(4-7) A10(4-7) A11(4-7) A11(4-7) A10(4-7) A10(4-7) A11(4-7) A11(4-7) - const __m256i lhs_mat_23_10_sp2 = _mm256_shuffle_epi32(lhs_mat_23_10, 245); //A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) A12(4-7) A13(4-7) - - const __m256i lhs_mat_01_11_sp2 = _mm256_shuffle_epi32(lhs_mat_01_11, 245); //A10(12-15) A10(12-15) A11(12-15) A11(12-15) A10(12-15) A10(12-15) A11(12-15) A11(12-15) - const __m256i lhs_mat_23_11_sp2 = _mm256_shuffle_epi32(lhs_mat_23_11, 245); //A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) A12(12-15) A13(12-15) - - const __m256i lhs_mat_01_12_sp2 = _mm256_shuffle_epi32(lhs_mat_01_12, 245); //A10(20-23) A10(20-23) A11(20-23) A11(20-23) A10(20-23) A10(20-23) A11(20-23) A11(20-23) - const __m256i lhs_mat_23_12_sp2 = _mm256_shuffle_epi32(lhs_mat_23_12, 245); //A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) A12(20-23) A13(20-23) - - const __m256i lhs_mat_01_13_sp2 = _mm256_shuffle_epi32(lhs_mat_01_13, 245); //A10(28-31) A10(28-31) A11(28-31) A11(28-31) A10(28-31) A10(28-31) A11(28-31) A11(28-31) - const __m256i lhs_mat_23_13_sp2 = _mm256_shuffle_epi32(lhs_mat_23_13, 245); //A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) A12(28-31) A13(28-31) - - // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane - __m256i iacc_mat_00_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_01_00_sp1)); - __m256i iacc_mat_01_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_01_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_01_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_01_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_01_00_sp1)); - __m256i iacc_mat_10_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_0145_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp1, lhs_mat_23_00_sp1)); - __m256i iacc_mat_11_0_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp1, lhs_mat_23_03_sp1), _mm256_maddubs_epi16(rhs_mat_2367_02_sp1, lhs_mat_23_02_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp1, lhs_mat_23_01_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp1, lhs_mat_23_00_sp1)); - __m256i iacc_mat_00_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_01_10_sp1)); - __m256i iacc_mat_01_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_01_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_01_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_01_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_01_10_sp1)); - __m256i iacc_mat_10_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_0145_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp1, lhs_mat_23_10_sp1)); - __m256i iacc_mat_11_1_sp1 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp1, lhs_mat_23_13_sp1), _mm256_maddubs_epi16(rhs_mat_2367_12_sp1, lhs_mat_23_12_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp1, lhs_mat_23_11_sp1)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp1, lhs_mat_23_10_sp1)); - - __m256i iacc_mat_00_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_01_00_sp2)); - __m256i iacc_mat_01_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_01_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_01_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_01_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_01_00_sp2)); - __m256i iacc_mat_10_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_0145_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_00_sp2, lhs_mat_23_00_sp2)); - __m256i iacc_mat_11_0_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_03_sp2, lhs_mat_23_03_sp2), _mm256_maddubs_epi16(rhs_mat_2367_02_sp2, lhs_mat_23_02_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_01_sp2, lhs_mat_23_01_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_00_sp2, lhs_mat_23_00_sp2)); - __m256i iacc_mat_00_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_01_10_sp2)); - __m256i iacc_mat_01_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_01_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_01_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_01_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_01_10_sp2)); - __m256i iacc_mat_10_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_0145_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_0145_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_0145_10_sp2, lhs_mat_23_10_sp2)); - __m256i iacc_mat_11_1_sp2 = _mm256_add_epi16(_mm256_add_epi16(_mm256_add_epi16(_mm256_maddubs_epi16(rhs_mat_2367_13_sp2, lhs_mat_23_13_sp2), _mm256_maddubs_epi16(rhs_mat_2367_12_sp2, lhs_mat_23_12_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_11_sp2, lhs_mat_23_11_sp2)), _mm256_maddubs_epi16(rhs_mat_2367_10_sp2, lhs_mat_23_10_sp2)); - - // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block - __m256i iacc_mat_00_0 = _mm256_add_epi16(iacc_mat_00_0_sp1, iacc_mat_00_0_sp2); - __m256i iacc_mat_01_0 = _mm256_add_epi16(iacc_mat_01_0_sp1, iacc_mat_01_0_sp2); - __m256i iacc_mat_10_0 = _mm256_add_epi16(iacc_mat_10_0_sp1, iacc_mat_10_0_sp2); - __m256i iacc_mat_11_0 = _mm256_add_epi16(iacc_mat_11_0_sp1, iacc_mat_11_0_sp2); - - __m256i iacc_mat_00_1 = _mm256_add_epi16(iacc_mat_00_1_sp1, iacc_mat_00_1_sp2); - __m256i iacc_mat_01_1 = _mm256_add_epi16(iacc_mat_01_1_sp1, iacc_mat_01_1_sp2); - __m256i iacc_mat_10_1 = _mm256_add_epi16(iacc_mat_10_1_sp1, iacc_mat_10_1_sp2); - __m256i iacc_mat_11_1 = _mm256_add_epi16(iacc_mat_11_1_sp1, iacc_mat_11_1_sp2); - - // Output of both shuffle patterns are added in order to sum dot product outputs of all 32 values in block - iacc_mat_00_0 = _mm256_madd_epi16(iacc_mat_00_0, scale_0145_0); - iacc_mat_01_0 = _mm256_madd_epi16(iacc_mat_01_0, scale_2367_0); - iacc_mat_10_0 = _mm256_madd_epi16(iacc_mat_10_0, scale_0145_0); - iacc_mat_11_0 = _mm256_madd_epi16(iacc_mat_11_0, scale_2367_0); - - iacc_mat_00_1 = _mm256_madd_epi16(iacc_mat_00_1, scale_0145_1); - iacc_mat_01_1 = _mm256_madd_epi16(iacc_mat_01_1, scale_2367_1); - iacc_mat_10_1 = _mm256_madd_epi16(iacc_mat_10_1, scale_0145_1); - iacc_mat_11_1 = _mm256_madd_epi16(iacc_mat_11_1, scale_2367_1); - - // Straighten out to make 4 row vectors (4 for each sub block which are accumulated together in the next step) - __m256i iacc_row_0_0 = _mm256_blend_epi32(iacc_mat_00_0, _mm256_shuffle_epi32(iacc_mat_01_0, 78), 204); - __m256i iacc_row_1_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_0, 78), iacc_mat_01_0, 204); - __m256i iacc_row_2_0 = _mm256_blend_epi32(iacc_mat_10_0, _mm256_shuffle_epi32(iacc_mat_11_0, 78), 204); - __m256i iacc_row_3_0 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_0, 78), iacc_mat_11_0, 204); - __m256i iacc_row_0_1 = _mm256_blend_epi32(iacc_mat_00_1, _mm256_shuffle_epi32(iacc_mat_01_1, 78), 204); - __m256i iacc_row_1_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_00_1, 78), iacc_mat_01_1, 204); - __m256i iacc_row_2_1 = _mm256_blend_epi32(iacc_mat_10_1, _mm256_shuffle_epi32(iacc_mat_11_1, 78), 204); - __m256i iacc_row_3_1 = _mm256_blend_epi32(_mm256_shuffle_epi32(iacc_mat_10_1, 78), iacc_mat_11_1, 204); - - __m256i iacc_row_0 = _mm256_add_epi32(iacc_row_0_0, iacc_row_0_1); - __m256i iacc_row_1 = _mm256_add_epi32(iacc_row_1_0, iacc_row_1_1); - __m256i iacc_row_2 = _mm256_add_epi32(iacc_row_2_0, iacc_row_2_1); - __m256i iacc_row_3 = _mm256_add_epi32(iacc_row_3_0, iacc_row_3_1); - - // Load the scale(d) values for all the 4 Q8_k blocks and repeat it across lanes - const __m128 row_scale_f32_sse = _mm_load_ps(a_ptr[b].d); - const __m256 row_scale_f32 = _mm256_set_m128(row_scale_f32_sse, row_scale_f32_sse); //GGML_F32Cx8_REPEAT_LOAD(a_ptrs[rp][b].d, loadMask); - - // Multiply with appropiate scales and accumulate (for both d and dmin) below - acc_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_0), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_rows[0]); - acc_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_1), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_rows[1]); - acc_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_2), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_rows[2]); - acc_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_3), _mm256_mul_ps(col_scale_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_rows[3]); - - __m256i iacc_row_min_0 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 0), mins_01); - __m256i iacc_row_min_1 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 85), mins_01); - __m256i iacc_row_min_2 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 170), mins_01); - __m256i iacc_row_min_3 = _mm256_madd_epi16(_mm256_shuffle_epi32(lhs_bsums_hsum_0123_01, 255), mins_01); - - acc_min_rows[0] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_0), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 0)), acc_min_rows[0]); - acc_min_rows[1] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_1), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 85)), acc_min_rows[1]); - acc_min_rows[2] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_2), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 170)), acc_min_rows[2]); - acc_min_rows[3] = _mm256_fmadd_ps(_mm256_cvtepi32_ps(iacc_row_min_3), _mm256_mul_ps(col_dmin_f32, _mm256_shuffle_ps(row_scale_f32, row_scale_f32, 255)), acc_min_rows[3]); - } - } - - // Store the accumulated values - for (int i = 0; i < 4; i++) { - _mm256_storeu_ps((float * )(s + ((y * 4 + i) * bs + x * 8)), _mm256_sub_ps(acc_rows[i], acc_min_rows[i])); - } - } - } - -#else - - float sumf[4][8]; - float sum_minf[4][8]; - uint32_t utmp[32]; - int sumi1; - int sumi2; - int sumi; - - for (int y = 0; y < nr / 4; y++) { - const block_q8_Kx4 * a_ptr = (const block_q8_Kx4 *) vy + (y * nb); - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_q4_Kx8 * b_ptr = (const block_q4_Kx8 *) vx + (x * nb); - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumf[m][j] = 0.0; - sum_minf[m][j] = 0.0; - } - } - for (int l = 0; l < nb; l++) { - for (int sb = 0; sb < 8; sb++) { - memcpy(utmp + sb * 4, b_ptr[l].scales + sb * 12, 12); - utmp[sb * 4 + 3] = ((utmp[sb * 4 + 2] >> 4) & kmask2) | (((utmp[sb * 4 + 1] >> 6) & kmask3) << 4); - const uint32_t uaux_0 = utmp[sb * 4 + 1] & kmask1; - utmp[sb * 4 + 1] = (utmp[sb * 4 + 2] & kmask2) | (((utmp[sb * 4 + 0] >> 6) & kmask3) << 4); - utmp[sb * 4 + 2] = uaux_0; - utmp[sb * 4 + 0] &= kmask1; - } - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - uint8_t *scales_0 = (uint8_t*) utmp + (k / 4) * 32; - uint8_t *scales_1 = (uint8_t*) utmp + (k / 4) * 32 + 16; - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi1 = 0; - sumi2 = 0; - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF); - const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4); - sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 256 + (k % 4) * 4 * blocklen + m * blocklen + i]); - sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 256 + (k % 4) * 4 * blocklen + m * blocklen + i + 128]); - sumi1 = sumi1 * scales_0[j]; - sumi2 = sumi2 * scales_1[j]; - sumi += sumi1 + sumi2; - } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d[m]; - } - } - } - for (int sb = 0; sb < 8; sb++) { - uint8_t *mins = (uint8_t*) utmp + 8 + sb * 16; - for(int m = 0; m < 4; m++) { - const int16_t *bsums = a_ptr[l].bsums + (sb * 8) + (m * 4) - ((sb % 2) * 6); - for(int j = 0; j < ncols_interleaved; j++) { - sum_minf[m][j] += mins[j] * (bsums[0] + bsums[1]) * GGML_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d[m]; - } - } - } - } - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) { - s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j] - sum_minf[m][j]; - } - } - } - } -#endif -} - -static void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { - const int qk = QK8_0; - const int nb = n / qk; - const int ncols_interleaved = 4; - const int blocklen = 4; - - assert (n % qk == 0); - assert (nr % 4 == 0); - assert (nc % ncols_interleaved == 0); - - UNUSED(s); - UNUSED(bs); - UNUSED(vx); - UNUSED(vy); - UNUSED(nr); - UNUSED(nc); - UNUSED(nb); - UNUSED(ncols_interleaved); - UNUSED(blocklen); - -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); - - for (int y = 0; y < nr / 4; y++) { - const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); - - float32x4_t sumf[4]; - for (int m = 0; m < 4; m++) { - sumf[m] = vdupq_n_f32(0); - } - - for (int l = 0; l < nb; l++) { - float32x4_t a_d = vcvt_f32_f16(vld1_f16((const float16_t *)a_ptr[l].d)); - float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); - - int32x4_t sumi_0 = vdupq_n_s32(0); - int32x4_t sumi_1 = vdupq_n_s32(0); - int32x4_t sumi_2 = vdupq_n_s32(0); - int32x4_t sumi_3 = vdupq_n_s32(0); - - for (int k = 0; k < 4; k++) { - int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 16 * k + 0); - int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16 * k + 64); - - uint8x16_t b = vld1q_u8(b_ptr[l].qs + 16 * k); - int8x16_t b_hi = vqtbl1q_s8(kvalues, b >> 4); - int8x16_t b_lo = vqtbl1q_s8(kvalues, b & 0xF); - - sumi_0 = vdotq_laneq_s32(sumi_0, b_lo, a_0, 0); - sumi_1 = vdotq_laneq_s32(sumi_1, b_lo, a_0, 1); - sumi_2 = vdotq_laneq_s32(sumi_2, b_lo, a_0, 2); - sumi_3 = vdotq_laneq_s32(sumi_3, b_lo, a_0, 3); - sumi_0 = vdotq_laneq_s32(sumi_0, b_hi, a_1, 0); - sumi_1 = vdotq_laneq_s32(sumi_1, b_hi, a_1, 1); - sumi_2 = vdotq_laneq_s32(sumi_2, b_hi, a_1, 2); - sumi_3 = vdotq_laneq_s32(sumi_3, b_hi, a_1, 3); - } - - sumf[0] = vmlaq_f32(sumf[0], vmulq_laneq_f32(b_d, a_d, 0), vcvtq_f32_s32(sumi_0)); - sumf[1] = vmlaq_f32(sumf[1], vmulq_laneq_f32(b_d, a_d, 1), vcvtq_f32_s32(sumi_1)); - sumf[2] = vmlaq_f32(sumf[2], vmulq_laneq_f32(b_d, a_d, 2), vcvtq_f32_s32(sumi_2)); - sumf[3] = vmlaq_f32(sumf[3], vmulq_laneq_f32(b_d, a_d, 3), vcvtq_f32_s32(sumi_3)); - } - - for (int m = 0; m < 4; m++) { - vst1q_f32(s + (y * 4 + m) * bs + x * 4, sumf[m]); - } - } - } - return; - } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) - { - float sumf[4][4]; - int sumi; - - for (int y = 0; y < nr / 4; y++) { - const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); - for (int x = 0; x < nc / ncols_interleaved; x++) { - const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; - } - for (int l = 0; l < nb; l++) { - for (int k = 0; k < (qk / (2 * blocklen)); k++) { - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) { - sumi = 0; - for (int i = 0; i < blocklen; ++i) { - const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; - const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; - sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + - (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])); - } - sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); - } - } - } - } - for (int m = 0; m < 4; m++) { - for (int j = 0; j < ncols_interleaved; j++) - s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; - } - } - } - } -} - -static block_q4_0x4 make_block_q4_0x4(block_q4_0 * in, unsigned int blck_size_interleave) { - block_q4_0x4 out; - - for (int i = 0; i < 4; i++) { - out.d[i] = in[i].d; - } - - const int end = QK4_0 * 2 / blck_size_interleave; - - if (blck_size_interleave == 8) { - const uint64_t xor_mask = 0x8888888888888888ULL; - for (int i = 0; i < end; ++i) { - int src_id = i % 4; - int src_offset = (i / 4) * blck_size_interleave; - int dst_offset = i * blck_size_interleave; - - uint64_t elems; - // Using memcpy to avoid unaligned memory accesses - memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); - elems ^= xor_mask; - memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); - } - } else if (blck_size_interleave == 4) { - const uint32_t xor_mask = 0x88888888; - for (int i = 0; i < end; ++i) { - int src_id = i % 4; - int src_offset = (i / 4) * blck_size_interleave; - int dst_offset = i * blck_size_interleave; - - uint32_t elems; - memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint32_t)); - elems ^= xor_mask; - memcpy(&out.qs[dst_offset], &elems, sizeof(uint32_t)); - } - } else { - GGML_ASSERT(false); - } - - return out; -} - -// interleave 8 block_q4_0s in blocks of blck_size_interleave -// returns an interleaved block_q4_0x8 -// in the interleaved block_q4_0x8, place deltas for 8 block_q4_0 blocks -// first, then interleave quants from 8 block_q4_0s in blocks of blck_size_interleave -static block_q4_0x8 make_block_q4_0x8(block_q4_0 * in, unsigned int blck_size_interleave) { - block_q4_0x8 out; - - for (int i = 0; i < 8; i++) { - out.d[i] = in[i].d; - } - - const int end = QK4_0 * 4 / blck_size_interleave; - const uint64_t xor_mask = 0x8888888888888888ULL; - - for (int i = 0; i < end; ++i) { - int src_id = i % 8; - int src_offset = (i / 8) * blck_size_interleave; - int dst_offset = i * blck_size_interleave; - - uint64_t elems; - memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); - elems ^= xor_mask; - memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); - } - - return out; -} - -static block_q4_Kx8 make_block_q4_Kx8(block_q4_K * in, unsigned int blck_size_interleave) { - block_q4_Kx8 out; - //Delta(scale) and dmin values of the eight Q4_K structures are copied onto the output interleaved structure - for (int i = 0; i < 8; i++) { - out.d[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d; - } - - for (int i = 0; i < 8; i++) { - out.dmin[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.dmin; - } - - const int end = QK_K * 4 / blck_size_interleave; - - // Interleave Q4_K quants by taking 8 bytes at a time - for (int i = 0; i < end; ++i) { - int src_id = i % 8; - int src_offset = (i / 8) * blck_size_interleave; - int dst_offset = i * blck_size_interleave; - - uint64_t elems; - memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); - memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); - } - - // The below logic is designed so as to unpack and rearrange scales and mins values in Q4_K - // Currently the Q4_K structure has 8 scales and 8 mins packed in 12 bytes ( 6 bits for each value) - // The output Q4_Kx8 structure has 96 bytes - // Every 12 byte is packed such that it contains scales and mins for corresponding sub blocks from Q4_K structure - // For eg - First 12 bytes contains 8 scales and 8 mins - each of first sub block from different Q4_K structures - uint8_t s[8], m[8]; - - for (int i = 0; i < 4; i++) { - for (int j = 0; j < 8; j++) { - s[j] = in[j].scales[i] & 63; - m[j] = in[j].scales[i + 4] & 63; - } - - out.scales[i * 12] = (s[0] & 63) + ((s[4] & 48) << 2); - out.scales[i * 12 + 1] = (s[1] & 63) + ((s[5] & 48) << 2); - out.scales[i * 12 + 2] = (s[2] & 63) + ((s[6] & 48) << 2); - out.scales[i * 12 + 3] = (s[3] & 63) + ((s[7] & 48) << 2); - out.scales[i * 12 + 4] = (m[0] & 63) + ((m[4] & 48) << 2); - out.scales[i * 12 + 5] = (m[1] & 63) + ((m[5] & 48) << 2); - out.scales[i * 12 + 6] = (m[2] & 63) + ((m[6] & 48) << 2); - out.scales[i * 12 + 7] = (m[3] & 63) + ((m[7] & 48) << 2); - out.scales[i * 12 + 8] = (s[4] & 15) + ((m[4] & 15) << 4); - out.scales[i * 12 + 9] = (s[5] & 15) + ((m[5] & 15) << 4); - out.scales[i * 12 + 10] = (s[6] & 15) + ((m[6] & 15) << 4); - out.scales[i * 12 + 11] = (s[7] & 15) + ((m[7] & 15) << 4); - - } - - for (int i = 0; i < 4; i++) { - for (int j = 0; j < 8; j++) { - s[j] = ((in[j].scales[i] & 192) >> 2) | (in[j].scales[i+8] & 15); - m[j] = ((in[j].scales[i + 4] & 192) >> 2) | ((in[j].scales[i+8] & 240) >> 4); - } - - out.scales[i * 12 + 48] = (s[0] & 63) + ((s[4] & 48) << 2); - out.scales[i * 12 + 49] = (s[1] & 63) + ((s[5] & 48) << 2); - out.scales[i * 12 + 50] = (s[2] & 63) + ((s[6] & 48) << 2); - out.scales[i * 12 + 51] = (s[3] & 63) + ((s[7] & 48) << 2); - out.scales[i * 12 + 52] = (m[0] & 63) + ((m[4] & 48) << 2); - out.scales[i * 12 + 53] = (m[1] & 63) + ((m[5] & 48) << 2); - out.scales[i * 12 + 54] = (m[2] & 63) + ((m[6] & 48) << 2); - out.scales[i * 12 + 55] = (m[3] & 63) + ((m[7] & 48) << 2); - out.scales[i * 12 + 56] = (s[4] & 15) + ((m[4] & 15) << 4); - out.scales[i * 12 + 57] = (s[5] & 15) + ((m[5] & 15) << 4); - out.scales[i * 12 + 58] = (s[6] & 15) + ((m[6] & 15) << 4); - out.scales[i * 12 + 59] = (s[7] & 15) + ((m[7] & 15) << 4); - - } - - return out; -} - -static int repack_q4_0_to_q4_0_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { - GGML_ASSERT(t->type == GGML_TYPE_Q4_0); - GGML_ASSERT(interleave_block == 4 || interleave_block == 8); - constexpr int nrows_interleaved = 4; - - block_q4_0x4 * dst = (block_q4_0x4 *)t->data; - const block_q4_0 * src = (const block_q4_0 *)data; - block_q4_0 dst_tmp[4]; - int nrow = ggml_nrows(t); - int nblocks = t->ne[0] / QK4_0; - - GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0)); - - if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { - return -1; - } - - for (int b = 0; b < nrow; b += nrows_interleaved) { - for (int64_t x = 0; x < nblocks; x++) { - for (int i = 0; i < nrows_interleaved; i++) { - dst_tmp[i] = src[x + i * nblocks]; - } - *dst++ = make_block_q4_0x4(dst_tmp, interleave_block); - } - src += nrows_interleaved * nblocks; - } - return 0; - - GGML_UNUSED(data_size); -} -static int repack_q4_K_to_q4_K_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { - GGML_ASSERT(t->type == GGML_TYPE_Q4_K); - GGML_ASSERT(interleave_block == 8); - constexpr int nrows_interleaved = 8; - - block_q4_Kx8 * dst = (block_q4_Kx8*)t->data; - const block_q4_K * src = (const block_q4_K*) data; - block_q4_K dst_tmp[8]; - int nrow = ggml_nrows(t); - int nblocks = t->ne[0] / QK_K; - - GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_K)); - - if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { - return -1; - } - - for (int b = 0; b < nrow; b += nrows_interleaved) { - for (int64_t x = 0; x < nblocks; x++) { - for (int i = 0; i < nrows_interleaved; i++ ) { - dst_tmp[i] = src[x + i * nblocks]; - } - *dst++ = make_block_q4_Kx8(dst_tmp, interleave_block); - } - src += nrows_interleaved * nblocks; - } - return 0; - - GGML_UNUSED(data_size); -} - -static int repack_q4_0_to_q4_0_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { - GGML_ASSERT(t->type == GGML_TYPE_Q4_0); - GGML_ASSERT(interleave_block == 8); - constexpr int nrows_interleaved = 8; - - block_q4_0x8 * dst = (block_q4_0x8*)t->data; - const block_q4_0 * src = (const block_q4_0*) data; - block_q4_0 dst_tmp[8]; - int nrow = ggml_nrows(t); - int nblocks = t->ne[0] / QK4_0; - - GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0)); - - if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { - return -1; - } - - for (int b = 0; b < nrow; b += nrows_interleaved) { - for (int64_t x = 0; x < nblocks; x++) { - for (int i = 0; i < nrows_interleaved; i++ ) { - dst_tmp[i] = src[x + i * nblocks]; - } - *dst++ = make_block_q4_0x8(dst_tmp, interleave_block); - } - src += nrows_interleaved * nblocks; - } - return 0; - - GGML_UNUSED(data_size); -} - -static block_iq4_nlx4 make_block_iq4_nlx4(block_iq4_nl * in, unsigned int blck_size_interleave) { - block_iq4_nlx4 out; - - for (int i = 0; i < 4; i++) { - out.d[i] = in[i].d; - } - - const int end = QK4_NL * 2 / blck_size_interleave; - - // TODO: this branch seems wrong - //if (blck_size_interleave == 8) { - // for (int i = 0; i < end; ++i) { - // int src_id = i % 4; - // int src_offset = (i / 4) * blck_size_interleave; - // int dst_offset = i * blck_size_interleave; - - // // Using memcpy to avoid unaligned memory accesses - // memcpy(&out.qs[dst_offset], &in[src_id].qs[src_offset], sizeof(uint64_t)); - // } - //} else - if (blck_size_interleave == 4) { - for (int i = 0; i < end; ++i) { - int src_id = i % 4; - int src_offset = (i / 4) * blck_size_interleave; - int dst_offset = i * blck_size_interleave; - - memcpy(&out.qs[dst_offset], &in[src_id].qs[src_offset], sizeof(uint32_t)); - } - } else { - GGML_ASSERT(false); - } - - return out; -} - -static int repack_iq4_nl_to_iq4_nl_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { - GGML_ASSERT(t->type == GGML_TYPE_IQ4_NL); - //GGML_ASSERT(interleave_block == 4 || interleave_block == 8); - GGML_ASSERT(interleave_block == 4); - - block_iq4_nlx4 * dst = (block_iq4_nlx4 *)t->data; - const block_iq4_nl * src = (const block_iq4_nl *)data; - block_iq4_nl dst_tmp[4]; - int nrow = ggml_nrows(t); - int nrows_interleaved = 4; - int nblocks = t->ne[0] / QK4_0; - - GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_iq4_nl)); - - if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { - return -1; - } - - for (int b = 0; b < nrow; b += nrows_interleaved) { - for (int64_t x = 0; x < nblocks; x++) { - for (int i = 0; i < nrows_interleaved; i++) { - dst_tmp[i] = src[x + i * nblocks]; - } - *dst++ = make_block_iq4_nlx4(dst_tmp, interleave_block); - } - src += nrows_interleaved * nblocks; - } - return 0; - - GGML_UNUSED(data_size); -} - -namespace ggml::cpu::aarch64 { -// repack -template -int repack(struct ggml_tensor *, const void *, size_t); - -// TODO: generalise. -template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { - return repack_q4_0_to_q4_0_4_bl(t, 4, data, data_size); -} - -template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { - return repack_q4_0_to_q4_0_4_bl(t, 8, data, data_size); -} - -template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { - return repack_q4_0_to_q4_0_8_bl(t, 8, data, data_size); -} - -template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { - return repack_q4_K_to_q4_K_8_bl(t, 8, data, data_size); -} - -template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { - return repack_iq4_nl_to_iq4_nl_4_bl(t, 4, data, data_size); -} - -// TODO: needs to be revisited -//template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { -// return repack_iq4_nl_to_iq4_nl_4_bl(t, 8, data, data_size); -//} - -// gemv -template -void gemv(int, float *, size_t, const void *, const void *, int, int); - -template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemv_q4_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemv_q4_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemv_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemv_q4_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemv_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); -} - -// gemm -template -void gemm(int, float *, size_t, const void *, const void *, int, int); - -template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemm_q4_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemm_q4_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemm_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemm_q4_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); -} - -template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { - ggml_gemm_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); -} - -class tensor_traits_base : public ggml::cpu::tensor_traits { - public: - virtual int repack(struct ggml_tensor * t, const void * data, size_t data_size) = 0; -}; - -template class tensor_traits : public tensor_traits_base { - - bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override { - // not realy a GGML_TYPE_Q8_0 but same size. - switch (op->op) { - case GGML_OP_MUL_MAT: - size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); - return true; - case GGML_OP_MUL_MAT_ID: - size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); - size = GGML_PAD(size, sizeof(int64_t)); // + padding for next bloc. - size += sizeof(int64_t) * (1+op->src[0]->ne[2]) * op->src[1]->ne[2]; - return true; - default: - // GGML_ABORT("fatal error"); - break; - } - return false; - } - - bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override { - switch (op->op) { - case GGML_OP_MUL_MAT: - forward_mul_mat(params, op); - return true; - case GGML_OP_MUL_MAT_ID: - forward_mul_mat_id(params, op); - return true; - default: - // GGML_ABORT("fatal error"); - break; - } - return false; - } - - void forward_mul_mat(ggml_compute_params * params, ggml_tensor * op) { - const ggml_tensor * src0 = op->src[0]; - const ggml_tensor * src1 = op->src[1]; - ggml_tensor * dst = op; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - GGML_ASSERT(ne0 == ne01); - GGML_ASSERT(ne1 == ne11); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(src1->type == GGML_TYPE_F32); - - GGML_ASSERT(ggml_n_dims(op->src[0]) == 2); - // GGML_ASSERT(ggml_n_dims(op->src[1]) == 2); - - char * wdata = static_cast(params->wdata); - const size_t nbw1 = ggml_row_size(PARAM_TYPE, ne10); - - assert(params->wsize >= nbw1 * ne11); - - const ggml_from_float_t from_float = ggml_get_type_traits_cpu(PARAM_TYPE)->from_float; - - int64_t i11_processed = 0; - for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { - ggml_quantize_mat_t((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), 4, ne10); - } - - i11_processed = ne11 - ne11 % 4; - for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) { - from_float((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), ne10); - } - - ggml_barrier(params->threadpool); - - const void * src1_wdata = params->wdata; - const size_t src1_col_stride = ggml_row_size(PARAM_TYPE, ne10); - int64_t src0_start = (ith * ne01) / nth; - int64_t src0_end = ((ith + 1) * ne01) / nth; - src0_start = (src0_start % NB_COLS) ? src0_start + NB_COLS - (src0_start % NB_COLS) : src0_start; - src0_end = (src0_end % NB_COLS) ? src0_end + NB_COLS - (src0_end % NB_COLS) : src0_end; - if (src0_start >= src0_end) { - return; - } - - // If there are more than three rows in src1, use gemm; otherwise, use gemv. - if (ne11 > 3) { - gemm(ne00, - (float *) ((char *) dst->data) + src0_start, ne01, - (const char *) src0->data + src0_start * nb01, - (const char *) src1_wdata, ne11 - ne11 % 4, src0_end - src0_start); - } - for (int iter = ne11 - ne11 % 4; iter < ne11; iter++) { - gemv(ne00, - (float *) ((char *) dst->data + (iter * nb1)) + src0_start, ne01, - (const char *) src0->data + src0_start * nb01, - (const char *) src1_wdata + (src1_col_stride * iter), 1, - src0_end - src0_start); - } - } - - void forward_mul_mat_id(ggml_compute_params * params, ggml_tensor * op) { - const ggml_tensor * src0 = op->src[0]; - const ggml_tensor * src1 = op->src[1]; - const ggml_tensor * ids = op->src[2]; - ggml_tensor * dst = op; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const ggml_from_float_t from_float = ggml_get_type_traits_cpu(PARAM_TYPE)->from_float; - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == ggml_type_size(src0->type)); - GGML_ASSERT(nb10 == ggml_type_size(src1->type)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ne03 == 1); - GGML_ASSERT(ne13 == 1); - GGML_ASSERT(ne3 == 1); - - GGML_ASSERT(src1->type == GGML_TYPE_F32); - - // row groups - const int n_ids = ids->ne[0]; // n_expert_used - const int n_as = ne02; // n_expert - - const size_t nbw1 = ggml_row_size(PARAM_TYPE, ne10); - const size_t nbw2 = nbw1*ne11; - const size_t nbw3 = nbw2*ne12; - - struct mmid_row_mapping { - int32_t i1; - int32_t i2; - }; - - GGML_ASSERT(params->wsize >= (GGML_PAD(nbw3, sizeof(int64_t)) + n_as * sizeof(int64_t) + - n_as * ne12 * sizeof(mmid_row_mapping))); - - auto * wdata = (char *) params->wdata; - auto * wdata_src1_end = (char *) wdata + GGML_PAD(nbw3, sizeof(int64_t)); - auto * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as] - - struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *) (matrix_row_counts + n_as); // [n_as][ne12] - - // src1: float32 => param type - for (int64_t i12 = 0; i12 < ne12; ++i12) { - for (int64_t i11 = ith; i11 < ne11; i11 += nth) { - from_float((float *)((char *) src1->data + i12 * nb12 + i11 * nb11), - (void *) (wdata + i12 * nbw2 + i11 * nbw1), - ne10); - } - } - -#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id) * ne12 + (i1)] - - if (ith == 0) { - // initialize matrix_row_counts - memset(matrix_row_counts, 0, n_as * sizeof(int64_t)); - - // group rows by src0 matrix - for (int32_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) { - for (int32_t id = 0; id < n_ids; ++id) { - const int32_t i02 = - *(const int32_t *) ((const char *) ids->data + iid1 * ids->nb[1] + id * ids->nb[0]); - - GGML_ASSERT(i02 >= 0 && i02 < n_as); - - MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = { id, iid1 }; - matrix_row_counts[i02] += 1; - } - } - } - - ggml_barrier(params->threadpool); - - // compute each matrix multiplication in sequence - for (int cur_a = 0; cur_a < n_as; ++cur_a) { - const int64_t cne1 = matrix_row_counts[cur_a]; - - if (cne1 == 0) { - continue; - } - - const auto * src0_cur = (const char *) src0->data + cur_a*nb02; - - //const int64_t nr0 = ne01; // src0 rows - const int64_t nr1 = cne1; // src1 rows - - int64_t src0_cur_start = (ith * ne01) / nth; - int64_t src0_cur_end = ((ith + 1) * ne01) / nth; - - src0_cur_start = (src0_cur_start % NB_COLS) ? src0_cur_start + NB_COLS - (src0_cur_start % NB_COLS) : src0_cur_start; - src0_cur_end = (src0_cur_end % NB_COLS) ? src0_cur_end + NB_COLS - (src0_cur_end % NB_COLS) : src0_cur_end; - - if (src0_cur_start >= src0_cur_end) { - return; - } - - for (int ir1 = 0; ir1 < nr1; ir1++) { - struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, ir1); - - const int id = row_mapping.i1; // selected expert index - - const int64_t i11 = id % ne11; - const int64_t i12 = row_mapping.i2; // row index in src1 - - const int64_t i1 = id; // selected expert index - const int64_t i2 = i12; // row - - const auto * src1_col = (const char *) wdata + (i11 * nbw1 + i12 * nbw2); - - gemv(ne00, - (float *)((char *) dst->data + (i1 * nb1 + i2 * nb2)) + src0_cur_start, ne01, - src0_cur + src0_cur_start * nb01, - src1_col, 1, src0_cur_end - src0_cur_start); - } - } -#undef MMID_MATRIX_ROW - } - - int repack(struct ggml_tensor * t, const void * data, size_t data_size) override { - GGML_LOG_DEBUG("%s: repack tensor %s with %s_%dx%d\n", __func__, t->name, ggml_type_name(t->type), - (int) NB_COLS, (int) INTER_SIZE); - return ggml::cpu::aarch64::repack(t, data, data_size); - } -}; - -// instance for Q4 -static const tensor_traits q4_0_4x4_q8_0; -static const tensor_traits q4_0_4x8_q8_0; -static const tensor_traits q4_0_8x8_q8_0; -static const tensor_traits q4_K_8x8_q8_K; - -// instance for IQ4 -static const tensor_traits iq4_nl_4x4_q8_0; - -} // namespace ggml::cpu::aarch64 - -static const ggml::cpu::tensor_traits * ggml_aarch64_get_optimal_repack_type(const struct ggml_tensor * cur) { - if (cur->type == GGML_TYPE_Q4_0) { - if (ggml_cpu_has_avx2() || (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0)) { - if (cur->ne[1] % 8 == 0) { - return &ggml::cpu::aarch64::q4_0_8x8_q8_0; - } - } - if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) { - if (cur->ne[1] % 4 == 0) { - return &ggml::cpu::aarch64::q4_0_4x8_q8_0; - } - } - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - if (cur->ne[1] % 4 == 0) { - return &ggml::cpu::aarch64::q4_0_4x4_q8_0; - } - } - } else if (cur->type == GGML_TYPE_Q4_K) { - if (ggml_cpu_has_avx2()) { - if (cur->ne[1] % 8 == 0) { - return &ggml::cpu::aarch64::q4_K_8x8_q8_K; - } - } - } else if (cur->type == GGML_TYPE_IQ4_NL) { - if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { - if (cur->ne[1] % 4 == 0) { - return &ggml::cpu::aarch64::iq4_nl_4x4_q8_0; - } - } - } - - return nullptr; -} - -static enum ggml_status ggml_backend_cpu_aarch64_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { - tensor->extra = (void *) const_cast(ggml_aarch64_get_optimal_repack_type(tensor)); - - GGML_UNUSED(buffer); - return GGML_STATUS_SUCCESS; -} - -static void ggml_backend_cpu_aarch64_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, - const void * data, size_t offset, size_t size) { - GGML_ASSERT(offset == 0); - GGML_ASSERT(size == ggml_nbytes(tensor)); - - auto tensor_traits = (ggml::cpu::aarch64::tensor_traits_base *) tensor->extra; - auto OK = tensor_traits->repack(tensor, data, size); - - GGML_ASSERT(OK == 0); - GGML_UNUSED(buffer); -} - -static const char * ggml_backend_cpu_aarch64_buffer_type_get_name(ggml_backend_buffer_type_t buft) { - return "CPU_AARCH64"; - - GGML_UNUSED(buft); -} - -static ggml_backend_buffer_t ggml_backend_cpu_aarch64_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { - ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); - - if (buffer == nullptr) { - return nullptr; - } - - buffer->buft = buft; - buffer->iface.init_tensor = ggml_backend_cpu_aarch64_buffer_init_tensor; - buffer->iface.set_tensor = ggml_backend_cpu_aarch64_buffer_set_tensor; - buffer->iface.get_tensor = nullptr; - buffer->iface.cpy_tensor = nullptr; - return buffer; -} - -static size_t ggml_backend_cpu_aarch64_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { - return TENSOR_ALIGNMENT; - - GGML_UNUSED(buft); -} - -namespace ggml::cpu::aarch64 { -class extra_buffer_type : ggml::cpu::extra_buffer_type { - bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { - if ( op->op == GGML_OP_MUL_MAT && - op->src[0]->buffer && - (ggml_n_dims(op->src[0]) == 2) && - op->src[0]->buffer->buft == ggml_backend_cpu_aarch64_buffer_type() && - ggml_aarch64_get_optimal_repack_type(op->src[0]) - ) { - if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { - return false; - } - if (op->src[1]->type == GGML_TYPE_F32) { - return true; - } - //if (op->src[1]->type == GGML_TYPE_Q8_0) { - // return true; - //} - // may be possible if Q8_0 packed... - } else if (op->op == GGML_OP_MUL_MAT_ID - && op->src[0]->buffer - && (ggml_n_dims(op->src[0]) == 3) - && op->src[0]->buffer->buft == ggml_backend_cpu_aarch64_buffer_type() - && ggml_aarch64_get_optimal_repack_type(op->src[0]) - ) { - if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { - return false; - } - if (op->src[1]->type == GGML_TYPE_F32) { - return true; - } - //if (op->src[1]->type == GGML_TYPE_Q8_0) { - // return true; - //} - } - return false; - } - - ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override { - if (op->op == GGML_OP_MUL_MAT || op->op == GGML_OP_MUL_MAT_ID) { - if (op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_cpu_aarch64_buffer_type()) { - return (ggml::cpu::tensor_traits *) op->src[0]->extra; - } - } - return nullptr; - } -}; -} // namespace ggml::cpu::aarch64 - -ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void) { - static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_aarch64 = { - /* .iface = */ { - /* .get_name = */ ggml_backend_cpu_aarch64_buffer_type_get_name, - /* .alloc_buffer = */ ggml_backend_cpu_aarch64_buffer_type_alloc_buffer, - /* .get_alignment = */ ggml_backend_cpu_aarch64_buffer_type_get_alignment, - /* .get_max_size = */ nullptr, // defaults to SIZE_MAX - /* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes - /* .is_host = */ nullptr, - }, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), - /* .context = */ new ggml::cpu::aarch64::extra_buffer_type(), - }; - - return &ggml_backend_cpu_buffer_type_aarch64; -} diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-aarch64.h b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-aarch64.h deleted file mode 100644 index 6e84c826b..000000000 --- a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +++ /dev/null @@ -1,8 +0,0 @@ -#pragma once - -#include "ggml-cpu-traits.h" -#include "ggml.h" - -// GGML internal header - -ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void); diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-impl.h b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-impl.h index e4af07635..d839cf5c5 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-impl.h +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -62,11 +62,17 @@ struct ggml_compute_params { #if defined(__s390x__) && defined(__VEC__) #ifndef __VXE__ #define __VXE__ -#endif +#endif // __VXE__ #ifndef __VXE2__ #define __VXE2__ -#endif -#endif +#endif // __VXE2__ +#endif // __s390x__ && __VEC__ + +#if defined(__s390x__) && defined(GGML_NNPA) +#ifndef __NNPA__ +#define __NNPA__ +#endif // __NNPA__ +#endif // __s390x__ && GGML_NNPA #if defined(__ARM_FEATURE_SVE) #include @@ -320,21 +326,17 @@ inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) #ifdef __wasm_simd128__ #include -#else +#endif + #ifdef __POWER9_VECTOR__ #include -#else +#endif + #if defined(_MSC_VER) || defined(__MINGW32__) #include -#else -#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__) || defined(__SSE__) -#if !defined(__riscv) +#elif defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__) || defined(__SSE__) #include #endif -#endif -#endif -#endif -#endif #ifdef __riscv_v_intrinsic #include @@ -375,7 +377,7 @@ inline static int32x4_t ggml_vdotq_s32(int32x4_t acc, int8x16_t a, int8x16_t b) #define vec_xor(a, b) ((a) ^ (b)) // Vector XOR #endif -typedef signed char char8x16_t __attribute__((vector_size(16))); +typedef signed char char8x16_t __attribute__((vector_size(16))); typedef unsigned char uchar8x16_t __attribute__((vector_size(16))); typedef int8_t int8x16_t __attribute__((vector_size(16))); @@ -386,10 +388,10 @@ typedef uint8_t uint8x16_t __attribute__((vector_size(16))); typedef uint16_t uint16x8_t __attribute__((vector_size(16))); typedef uint32_t uint32x4_t __attribute__((vector_size(16))); -typedef float float32x4_t __attribute__((vector_size(16))); -typedef double double64x2_t __attribute((vector_size(16))); +typedef float float32x4_t __attribute__((vector_size(16))); +typedef double double64x2_t __attribute__((vector_size(16))); -typedef signed long long long64x2_t __attribute((vector_size(16))); +typedef signed long long long64x2_t __attribute__((vector_size(16))); typedef unsigned long long ulong64x2_t __attribute__((vector_size(16))); typedef struct ggml_uint8x16x2_t { @@ -507,6 +509,9 @@ static __m256 __lasx_xvreplfr2vr_s(const float val) { // TODO: move to ggml-threading void ggml_barrier(struct ggml_threadpool * tp); +void ggml_threadpool_chunk_set(struct ggml_threadpool * tp, int value); +int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value); + #ifdef __cplusplus } #endif diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-quants.c b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-quants.c deleted file mode 100644 index ccd0651eb..000000000 --- a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-quants.c +++ /dev/null @@ -1,13131 +0,0 @@ -#define GGML_COMMON_IMPL_C -#include "ggml-common.h" - -#include "ggml-quants.h" -#include "ggml-cpu-quants.h" -#include "ggml-impl.h" -#include "ggml-cpu-impl.h" -#include "ggml-cpu.h" - -#include -#include -#include -#include -#include // for qsort -#include // for GGML_ASSERT - -#define GROUP_MAX_EPS 1e-15f -#define GROUP_MAX_EPS_IQ3_XXS 1e-8f -#define GROUP_MAX_EPS_IQ2_S 1e-8f -#define GROUP_MAX_EPS_IQ1_M 1e-7f -#define GROUP_MAX_EPS_IQ1_S 1e-12f - -#define UNUSED GGML_UNUSED - -// some compilers don't provide _mm256_set_m128i, e.g. gcc 7 -#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1) - -#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) -// multiply int8_t, add results pairwise twice -static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) { - // Get absolute values of x vectors - const __m128i ax = _mm_sign_epi8(x, x); - // Sign the values of the y vectors - const __m128i sy = _mm_sign_epi8(y, x); - // Perform multiplication and create 16-bit values - const __m128i dot = _mm_maddubs_epi16(ax, sy); - const __m128i ones = _mm_set1_epi16(1); - return _mm_madd_epi16(ones, dot); -} - -#if __AVX__ || __AVX2__ || __AVX512F__ -// horizontally add 8 floats -static inline float hsum_float_8(const __m256 x) { - __m128 res = _mm256_extractf128_ps(x, 1); - res = _mm_add_ps(res, _mm256_castps256_ps128(x)); - res = _mm_add_ps(res, _mm_movehl_ps(res, res)); - res = _mm_add_ss(res, _mm_movehdup_ps(res)); - return _mm_cvtss_f32(res); -} - -// horizontally add 8 int32_t -static inline int hsum_i32_8(const __m256i a) { - const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1)); - const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128); - const __m128i sum64 = _mm_add_epi32(hi64, sum128); - const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); - return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); -} - -// horizontally add 4 int32_t -static inline int hsum_i32_4(const __m128i a) { - const __m128i hi64 = _mm_unpackhi_epi64(a, a); - const __m128i sum64 = _mm_add_epi32(hi64, a); - const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1)); - return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); -} - -#if defined(__AVX2__) || defined(__AVX512F__) -// spread 32 bits to 32 bytes { 0x00, 0xFF } -static inline __m256i bytes_from_bits_32(const uint8_t * x) { - uint32_t x32; - memcpy(&x32, x, sizeof(uint32_t)); - const __m256i shuf_mask = _mm256_set_epi64x( - 0x0303030303030303, 0x0202020202020202, - 0x0101010101010101, 0x0000000000000000); - __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask); - const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe); - bytes = _mm256_or_si256(bytes, bit_mask); - return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1)); -} - -// Unpack 32 4-bit fields into 32 bytes -// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval -static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) -{ - const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi); - const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp); - const __m256i lowMask = _mm256_set1_epi8( 0xF ); - return _mm256_and_si256(lowMask, bytes); -} - -// add int16_t pairwise and return as float vector -static inline __m256 sum_i16_pairs_float(const __m256i x) { - const __m256i ones = _mm256_set1_epi16(1); - const __m256i summed_pairs = _mm256_madd_epi16(ones, x); - return _mm256_cvtepi32_ps(summed_pairs); -} - -static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { -#if defined(__AVX512VNNI__) && defined(__AVX512VL__) - const __m256i zero = _mm256_setzero_si256(); - const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy); - return _mm256_cvtepi32_ps(summed_pairs); -#elif defined(__AVXVNNI__) - const __m256i zero = _mm256_setzero_si256(); - const __m256i summed_pairs = _mm256_dpbusd_avx_epi32(zero, ax, sy); - return _mm256_cvtepi32_ps(summed_pairs); -#else - // Perform multiplication and create 16-bit values - const __m256i dot = _mm256_maddubs_epi16(ax, sy); - return sum_i16_pairs_float(dot); -#endif -} - -// multiply int8_t, add results pairwise twice and return as float vector -static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { -#if __AVXVNNIINT8__ - const __m256i zero = _mm256_setzero_si256(); - const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y); - return _mm256_cvtepi32_ps(summed_pairs); -#else - // Get absolute values of x vectors - const __m256i ax = _mm256_sign_epi8(x, x); - // Sign the values of the y vectors - const __m256i sy = _mm256_sign_epi8(y, x); - return mul_sum_us8_pairs_float(ax, sy); -#endif -} - -static inline __m128i packNibbles( __m256i bytes ) -{ - // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh -#if __AVX512F__ - const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000 - bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh - return _mm256_cvtepi16_epi8(bytes); // abcd_efgh -#else - const __m256i lowByte = _mm256_set1_epi16( 0xFF ); - __m256i high = _mm256_andnot_si256( lowByte, bytes ); - __m256i low = _mm256_and_si256( lowByte, bytes ); - high = _mm256_srli_epi16( high, 4 ); - bytes = _mm256_or_si256( low, high ); - - // Compress uint16_t lanes into bytes - __m128i r0 = _mm256_castsi256_si128( bytes ); - __m128i r1 = _mm256_extracti128_si256( bytes, 1 ); - return _mm_packus_epi16( r0, r1 ); -#endif -} -#elif defined(__AVX__) -static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 ) -{ - // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh - const __m128i lowByte = _mm_set1_epi16( 0xFF ); - __m128i high = _mm_andnot_si128( lowByte, bytes1 ); - __m128i low = _mm_and_si128( lowByte, bytes1 ); - high = _mm_srli_epi16( high, 4 ); - bytes1 = _mm_or_si128( low, high ); - high = _mm_andnot_si128( lowByte, bytes2 ); - low = _mm_and_si128( lowByte, bytes2 ); - high = _mm_srli_epi16( high, 4 ); - bytes2 = _mm_or_si128( low, high ); - - return _mm_packus_epi16( bytes1, bytes2); -} - -static inline __m128i mul_add_epi8_sse(const __m128i x, const __m128i y) { - const __m128i ax = _mm_sign_epi8(x, x); - const __m128i sy = _mm_sign_epi8(y, x); - return _mm_maddubs_epi16(ax, sy); -} - -// spread 32 bits to 32 bytes { 0x00, 0xFF } -static inline __m256i bytes_from_bits_32(const uint8_t * x) { - uint32_t x32; - memcpy(&x32, x, sizeof(uint32_t)); - const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000); - const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202); - __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl); - __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh); - const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe); - bytesl = _mm_or_si128(bytesl, bit_mask); - bytesh = _mm_or_si128(bytesh, bit_mask); - bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1)); - bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1)); - return MM256_SET_M128I(bytesh, bytesl); -} - -// Unpack 32 4-bit fields into 32 bytes -// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval -static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) -{ - // Load 16 bytes from memory - __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi); - __m128i tmph = _mm_srli_epi16(tmpl, 4); - const __m128i lowMask = _mm_set1_epi8(0xF); - tmpl = _mm_and_si128(lowMask, tmpl); - tmph = _mm_and_si128(lowMask, tmph); - return MM256_SET_M128I(tmph, tmpl); -} - -// add int16_t pairwise and return as float vector -static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) { - const __m128i ones = _mm_set1_epi16(1); - const __m128i summed_pairsl = _mm_madd_epi16(ones, xl); - const __m128i summed_pairsh = _mm_madd_epi16(ones, xh); - const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl); - return _mm256_cvtepi32_ps(summed_pairs); -} - -static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { - const __m128i axl = _mm256_castsi256_si128(ax); - const __m128i axh = _mm256_extractf128_si256(ax, 1); - const __m128i syl = _mm256_castsi256_si128(sy); - const __m128i syh = _mm256_extractf128_si256(sy, 1); - // Perform multiplication and create 16-bit values - const __m128i dotl = _mm_maddubs_epi16(axl, syl); - const __m128i doth = _mm_maddubs_epi16(axh, syh); - return sum_i16_pairs_float(doth, dotl); -} - -// multiply int8_t, add results pairwise twice and return as float vector -static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { - const __m128i xl = _mm256_castsi256_si128(x); - const __m128i xh = _mm256_extractf128_si256(x, 1); - const __m128i yl = _mm256_castsi256_si128(y); - const __m128i yh = _mm256_extractf128_si256(y, 1); - // Get absolute values of x vectors - const __m128i axl = _mm_sign_epi8(xl, xl); - const __m128i axh = _mm_sign_epi8(xh, xh); - // Sign the values of the y vectors - const __m128i syl = _mm_sign_epi8(yl, xl); - const __m128i syh = _mm_sign_epi8(yh, xh); - // Perform multiplication and create 16-bit values - const __m128i dotl = _mm_maddubs_epi16(axl, syl); - const __m128i doth = _mm_maddubs_epi16(axh, syh); - return sum_i16_pairs_float(doth, dotl); -} - -// larger version of mul_sum_i8_pairs_float where x and y are each represented by four 128-bit vectors -static inline __m256 mul_sum_i8_quad_float(const __m128i x_1_0, const __m128i x_1_1, const __m128i x_2_0, const __m128i x_2_1, - const __m128i y_1_0, const __m128i y_1_1, const __m128i y_2_0, const __m128i y_2_1) { - const __m128i mone = _mm_set1_epi16(1); - - const __m128i p16_1_0 = mul_add_epi8_sse(x_1_0, y_1_0); - const __m128i p16_1_1 = mul_add_epi8_sse(x_1_1, y_1_1); - const __m128i p16_2_0 = mul_add_epi8_sse(x_2_0, y_2_0); - const __m128i p16_2_1 = mul_add_epi8_sse(x_2_1, y_2_1); - const __m128i p_1_0 = _mm_madd_epi16(p16_1_0, mone); - const __m128i p_1_1 = _mm_madd_epi16(p16_1_1, mone); - const __m128i p_2_0 = _mm_madd_epi16(p16_2_0, mone); - const __m128i p_2_1 = _mm_madd_epi16(p16_2_1, mone); - const __m128i p_1 = _mm_add_epi32(p_1_0, p_1_1); - const __m128i p_2 = _mm_add_epi32(p_2_0, p_2_1); - return _mm256_cvtepi32_ps(MM256_SET_M128I(p_2, p_1)); -} - -// quad fp16 delta calculation -static inline __m256 quad_fp16_delta_float(const float x0, const float y0, const float x1, const float y1) { - // GGML_FP16_TO_FP32 is faster than Intel F16C - return _mm256_set_m128(_mm_set1_ps(GGML_FP16_TO_FP32(x1) * GGML_FP16_TO_FP32(y1)), - _mm_set1_ps(GGML_FP16_TO_FP32(x0) * GGML_FP16_TO_FP32(y0))); -} -#endif -#elif defined(__SSSE3__) -// horizontally add 4x4 floats -static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) { - __m128 res_0 =_mm_hadd_ps(a, b); - __m128 res_1 =_mm_hadd_ps(c, d); - __m128 res =_mm_hadd_ps(res_0, res_1); - res =_mm_hadd_ps(res, res); - res =_mm_hadd_ps(res, res); - - return _mm_cvtss_f32(res); -} -#endif // __AVX__ || __AVX2__ || __AVX512F__ -#endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) - -#if defined(__ARM_NEON) || defined(__wasm_simd128__) || defined(__POWER9_VECTOR__) -#define B1(c,s,n) 0x ## n ## c , 0x ## n ## s -#define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) -#define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) -#define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s) -#define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s) -#define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s) -#define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s) -#define B8(c,s ) B7(c,s, c), B7(c,s, s) - -// precomputed tables for expanding 8bits to 8 bytes: -static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 -static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 -#endif - -#if defined(__loongarch_sx) - -static __m128i lsx_packs_w(__m128i a, __m128i b) { - __m128i tmp, tmp1; - tmp = __lsx_vsat_w(a, 15); - tmp1 = __lsx_vsat_w(b, 15); - return __lsx_vpickev_h(tmp1, tmp); -} - -static __m128i lsx_packs_h(__m128i a, __m128i b) { - __m128i tmp, tmp1; - tmp = __lsx_vsat_h(a, 7); - tmp1 = __lsx_vsat_h(b, 7); - return __lsx_vpickev_b(tmp1, tmp); -} - -static __m128i lsx_packus_h(__m128i a, __m128i b) { - __m128i tmp, tmp1; - tmp = __lsx_vsat_hu(a, 7); - tmp1 = __lsx_vsat_hu(b, 7); - return __lsx_vpickev_b(tmp1, tmp); -} - -static __m128i lsx_maddubs_h(__m128i a, __m128i b) { - __m128i tmp1, tmp2; - tmp1 = __lsx_vmulwev_h_b(a, b); - tmp2 = __lsx_vmulwod_h_b(a, b); - return __lsx_vsadd_h(tmp1, tmp2); -} - -static __m128i lsx_madd_h(__m128i a, __m128i b) { - __m128i tmp1, tmp2; - tmp1 = __lsx_vmulwev_w_h(a, b); - tmp2 = __lsx_vmulwod_w_h(a, b); - return __lsx_vadd_w(tmp1, tmp2); -} - -static __m128i lsx_set_w(int32_t a, int32_t b, int32_t c, int32_t d) { - v4i32 __ret = {d, c, b, a}; - return (__m128i)__ret; -} - -static __m128i lsx_shuffle_b(__m128i a, __m128i b) { - __m128i mask_f, zero, tmp0, tmp2, mask; - int f = 0x8f; - mask_f = __lsx_vreplgr2vr_b(f); - zero = __lsx_vldi(0); - tmp0 = __lsx_vand_v(b, mask_f); // get mask with low 4 bit and sign bits - tmp0 = __lsx_vori_b(tmp0, 0x10); // make each mask or with 0x10 prepare for positive - mask = __lsx_vsle_b(zero, tmp0); // if mask >= 0, set mask - tmp2 = __lsx_vand_v(tmp0, mask); // maskout the in2 < ones - return __lsx_vshuf_b(a, zero, tmp2); -} - -static __m128i lsx_hadd_h(__m128i a, __m128i b) { - __m128i tmp1 = __lsx_vpickev_h(b, a); - __m128i tmp2 = __lsx_vpickod_h(b, a); - return __lsx_vadd_h(tmp1, tmp2); -} - -static __m128i lsx_hadd_w(__m128i a, __m128i b) { - __m128i tmp1 = __lsx_vpickev_w(b, a); - __m128i tmp2 = __lsx_vpickod_w(b, a); - return __lsx_vadd_w(tmp1, tmp2); -} - -static __m128 lsx_hadd_s(__m128 a, __m128 b) { - __m128 tmp1 = (__m128)__lsx_vpickev_w((__m128i)b, (__m128i)a); - __m128 tmp2 = (__m128)__lsx_vpickod_w((__m128i)b, (__m128i)a); - - return __lsx_vfadd_s(tmp1, tmp2); -} - -static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) { - __m128 res_0 =lsx_hadd_s(a, b); - __m128 res_1 =lsx_hadd_s(c, d); - __m128 res =lsx_hadd_s(res_0, res_1); - res =lsx_hadd_s(res, res); - res =lsx_hadd_s(res, res); - - return ((v4f32)res)[0]; -} -#endif - -#if defined(__loongarch_asx) - -#ifdef __clang__ -#define VREGS_PREFIX "$vr" -#define XREGS_PREFIX "$xr" -#else // GCC -#define VREGS_PREFIX "$f" -#define XREGS_PREFIX "$f" -#endif -#define __ALL_REGS "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31" -// Convert __m128i to __m256i -static inline __m256i ____m256i(__m128i in) { - __m256i out = __lasx_xvldi(0); - __asm__ volatile ( - ".irp i," __ALL_REGS "\n\t" - " .ifc %[out], " XREGS_PREFIX"\\i \n\t" - " .irp j," __ALL_REGS "\n\t" - " .ifc %[in], " VREGS_PREFIX "\\j \n\t" - " xvpermi.q $xr\\i, $xr\\j, 0x20 \n\t" - " .endif \n\t" - " .endr \n\t" - " .endif \n\t" - ".endr \n\t" - : [out] "+f" (out) : [in] "f" (in) - ); - return out; -} -// Convert two __m128i to __m256i -static inline __m256i lasx_set_q(__m128i inhi, __m128i inlo) { - __m256i out; - __asm__ volatile ( - ".irp i," __ALL_REGS "\n\t" - " .ifc %[hi], " VREGS_PREFIX "\\i \n\t" - " .irp j," __ALL_REGS "\n\t" - " .ifc %[lo], " VREGS_PREFIX "\\j \n\t" - " xvpermi.q $xr\\i, $xr\\j, 0x20 \n\t" - " .endif \n\t" - " .endr \n\t" - " .endif \n\t" - ".endr \n\t" - ".ifnc %[out], %[hi] \n\t" - ".irp i," __ALL_REGS "\n\t" - " .ifc %[out], " XREGS_PREFIX "\\i \n\t" - " .irp j," __ALL_REGS "\n\t" - " .ifc %[hi], " VREGS_PREFIX "\\j \n\t" - " xvori.b $xr\\i, $xr\\j, 0 \n\t" - " .endif \n\t" - " .endr \n\t" - " .endif \n\t" - ".endr \n\t" - ".endif \n\t" - : [out] "=f" (out), [hi] "+f" (inhi) - : [lo] "f" (inlo) - ); - return out; -} -// Convert __m256i low part to __m128i -static inline __m128i lasx_extracti128_lo(__m256i in) { - __m128i out; - __asm__ volatile ( - ".ifnc %[out], %[in] \n\t" - ".irp i," __ALL_REGS "\n\t" - " .ifc %[out], " VREGS_PREFIX "\\i \n\t" - " .irp j," __ALL_REGS "\n\t" - " .ifc %[in], " XREGS_PREFIX "\\j \n\t" - " vori.b $vr\\i, $vr\\j, 0 \n\t" - " .endif \n\t" - " .endr \n\t" - " .endif \n\t" - ".endr \n\t" - ".endif \n\t" - : [out] "=f" (out) : [in] "f" (in) - ); - return out; -} -// Convert __m256i high part to __m128i -static inline __m128i lasx_extracti128_hi(__m256i in) { - __m128i out; - __asm__ volatile ( - ".irp i," __ALL_REGS "\n\t" - " .ifc %[out], " VREGS_PREFIX "\\i \n\t" - " .irp j," __ALL_REGS "\n\t" - " .ifc %[in], " XREGS_PREFIX "\\j \n\t" - " xvpermi.q $xr\\i, $xr\\j, 0x11 \n\t" - " .endif \n\t" - " .endr \n\t" - " .endif \n\t" - ".endr \n\t" - : [out] "=f" (out) : [in] "f" (in) - ); - return out; -} - -static __m256i lasx_set_w(int e7, int e6, int e5, int e4, int e3, int e2, int e1, int e0) { - v8i32 __ret = {e0, e1, e2, e3, e4, e5, e6, e7}; - return (__m256i)__ret; -} - -static __m256i lasx_set_d(int64_t a, int64_t b, int64_t c, int64_t d) { - v4i64 __ret = {d, c, b, a}; - return (__m256i)__ret; -} - -static __m256i lasx_insertf128( __m128i x, __m128i y) { - return lasx_set_q(x, y); -} - -static __m256i lasx_shuffle_b(__m256i a, __m256i b) { - __m256i mask_f, zero, tmp0, tmp2, mask; - int f = 0x8f; - mask_f = __lasx_xvreplgr2vr_b(f); - zero = __lasx_xvldi(0); - tmp0 = __lasx_xvand_v(b, mask_f); // get mask with low 4 bit and sign bits - tmp0 = __lasx_xvori_b(tmp0, 0x10); // make each mask or with 0x10 prepare for positive - mask = __lasx_xvsle_b(zero, tmp0); // if mask >= 0, set mask - tmp2 = __lasx_xvand_v(tmp0, mask); // maskout the in2 < ones - return __lasx_xvshuf_b(a, zero, tmp2); -} - -static __m256i lasx_extu8_16(__m128i a) { - return __lasx_vext2xv_hu_bu(____m256i(a)); -} - -static __m256i lasx_ext8_16(__m128i a) { - return __lasx_vext2xv_h_b(____m256i(a)); -} - -static __m256i lasx_ext16_32(__m128i a) { - return __lasx_vext2xv_w_h(____m256i(a)); -} - -static __m128i lasx_extracti128( __m256i a, int pos) { - __m128i ret; - if( pos == 0) - { - ret = lasx_extracti128_lo(a); - } else { - ret = lasx_extracti128_hi(a); - } - return ret; -} - -static __m128 lasx_extractf128( __m256 a, int pos) { - __m128 ret; - if( pos == 0) - { - ret = (__m128)lasx_extracti128_lo((__m256i)a); - } else { - ret = (__m128)lasx_extracti128_hi((__m256i)a); - } - return ret; -} - -static __m256i lasx_maddubs_h(__m256i a, __m256i b) { - __m256i tmp1, tmp2; - tmp1 = __lasx_xvmulwev_h_b(a, b); - tmp2 = __lasx_xvmulwod_h_b(a, b); - return __lasx_xvsadd_h(tmp1, tmp2); -} - -static __m256i lasx_madd_h(__m256i a, __m256i b) { - __m256i tmp1, tmp2; - tmp1 = __lasx_xvmulwev_w_h(a, b); - tmp2 = __lasx_xvmulwod_w_h(a, b); - return __lasx_xvadd_w(tmp1, tmp2); -} - -static __m256i lasx_packs_w(__m256i a, __m256i b) { - __m256i tmp, tmp1; - tmp = __lasx_xvsat_w(a, 15); - tmp1 = __lasx_xvsat_w(b, 15); - return __lasx_xvpickev_h(tmp1, tmp); -} - -static __m256i lasx_packs_h(__m256i a, __m256i b) { - __m256i tmp, tmp1; - tmp = __lasx_xvsat_h(a, 7); - tmp1 = __lasx_xvsat_h(b, 7); - return __lasx_xvpickev_b(tmp1, tmp); -} - -static inline __m256i lasx_madd_h_b(__m256i a, __m256i b) { - __m256i tmp1, tmp2; - tmp1 = __lasx_xvmulwev_h_b(a, b); - tmp2 = __lasx_xvmulwod_h_b(a, b); - return __lasx_xvadd_h(tmp1, tmp2); -} - -static inline __m256i lasx_xvrepl128vei_h(__m256i a, const unsigned int b) { - switch (b) { - case 0: return __lasx_xvrepl128vei_h(a, 0); - case 1: return __lasx_xvrepl128vei_h(a, 1); - case 2: return __lasx_xvrepl128vei_h(a, 2); - case 3: return __lasx_xvrepl128vei_h(a, 3); - case 4: return __lasx_xvrepl128vei_h(a, 4); - case 5: return __lasx_xvrepl128vei_h(a, 5); - case 6: return __lasx_xvrepl128vei_h(a, 6); - case 7: return __lasx_xvrepl128vei_h(a, 7); - default: __builtin_unreachable(); - } -} - -static inline __m256i lasx_xvandi_b_bit(__m256i a, const unsigned int b) { - switch (b) { - case 0: return __lasx_xvandi_b(a, 1 << 0); - case 1: return __lasx_xvandi_b(a, 1 << 1); - case 2: return __lasx_xvandi_b(a, 1 << 2); - case 3: return __lasx_xvandi_b(a, 1 << 3); - case 4: return __lasx_xvandi_b(a, 1 << 4); - case 5: return __lasx_xvandi_b(a, 1 << 5); - case 6: return __lasx_xvandi_b(a, 1 << 6); - case 7: return __lasx_xvandi_b(a, 1 << 7); - default: __builtin_unreachable(); - } -} - -// multiply int8_t, add results pairwise twice -static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) { - // Get absolute values of x vectors - const __m128i ax = __lsx_vsigncov_b(x, x); - // Sign the values of the y vectors - const __m128i sy = __lsx_vsigncov_b(x, y); - // Perform multiplication and create 16-bit values - const __m128i dot = lsx_maddubs_h(ax, sy); - const __m128i ones = __lsx_vreplgr2vr_h(1); - return lsx_madd_h(ones, dot); -} - -// horizontally add 8 floats -static inline float hsum_float_8(const __m256 x) { - __m128 res = lasx_extractf128(x, 1); - res = __lsx_vfadd_s(res, lasx_extractf128(x, 0)); - res = __lsx_vfadd_s(res, (__m128)__lsx_vpickod_d((__m128i)res, (__m128i)res)); - res = __lsx_vfadd_s(res, (__m128)__lsx_vinsgr2vr_w(__lsx_vldi(0), __lsx_vpickve2gr_w(res, 1), 0)); - return ((v4f32)res)[0]; -} - -// horizontally add 8 int32_t -static inline int hsum_i32_8(const __m256i a) { - - __m256i tmp1 = __lasx_xvpermi_q(a, a, 0x11); - __m256i tmp2 = __lasx_xvpermi_q(a, a, 0x00); - - __m128i tmp1_128 = lasx_extracti128_lo(tmp1); - __m128i tmp2_128 = lasx_extracti128_lo(tmp2); - - __m128i sum128 = __lsx_vadd_w(tmp1_128, tmp2_128); - - __m128i ev = __lsx_vpickev_w(sum128, sum128); - __m128i od = __lsx_vpickod_w(sum128, sum128); - __m128i sum64 = __lsx_vadd_w(ev, od); - - int sum64_1, sum64_2; - sum64_1 = __lsx_vpickve2gr_w(sum64, 0); - sum64_2 = __lsx_vpickve2gr_w(sum64, 1); - - return sum64_1 + sum64_2; -} - -// horizontally add 4 int32_t -static inline int hsum_i32_4(const __m128i a) { - __m128i ev = __lsx_vpickev_w(a, a); - __m128i od = __lsx_vpickod_w(a, a); - __m128i sum64 = __lsx_vadd_w(ev, od); - - int sum64_1, sum64_2; - sum64_1 = __lsx_vpickve2gr_w(sum64, 0); - sum64_2 = __lsx_vpickve2gr_w(sum64, 1); - - return sum64_1 + sum64_2; -} - -// spread 32 bits to 32 bytes { 0x00, 0xFF } -static inline __m256i bytes_from_bits_32(const uint8_t * x) { - - uint32_t x32; - memcpy(&x32, x, sizeof(uint32_t)); - const __m256i shuf_mask = lasx_set_d( - 0x0303030303030303, 0x0202020202020202, - 0x0101010101010101, 0x0000000000000000); - - __m256i bytes = lasx_shuffle_b(__lasx_xvreplgr2vr_w(x32), shuf_mask); - const __m256i bit_mask = __lasx_xvreplgr2vr_d(0x7fbfdfeff7fbfdfe); - bytes = __lasx_xvor_v(bytes, bit_mask); - return __lasx_xvseq_b(bytes, __lasx_xvreplgr2vr_d(-1)); -} - -// Unpack 32 4-bit fields into 32 bytes -// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval -static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) { - const __m128i lo = __lsx_vld((const __m128i *)rsi, 0); - __m128i hi = __lsx_vsrli_h(lo, 4); - return __lasx_xvandi_b(lasx_insertf128(hi, lo), 0xf); -} - -// add int16_t pairwise and return as float vector -static inline __m256 sum_i16_pairs_float(const __m256i x) { - __m256i v = __lasx_xvpackod_h(x, x); - __m256i summed_pairs = __lasx_xvaddwev_w_h(x, v); - return __lasx_xvffint_s_w(summed_pairs); -} - -static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { - // Perform multiplication and create 16-bit values - const __m256i dot = lasx_maddubs_h(ax, sy); - return sum_i16_pairs_float(dot); -} - -// multiply int8_t, add results pairwise twice and return as float vector -static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) { - const __m256i dot = lasx_madd_h_b(x, y); - return sum_i16_pairs_float(dot); -} - -static inline __m128i packNibbles( __m256i bytes ) { - // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh - const __m256i lowByte = __lasx_xvreplgr2vr_h(0xFF); - __m256i high = __lasx_xvandn_v(lowByte, bytes); - __m256i low = __lasx_xvand_v(lowByte, bytes); - high = __lasx_xvsrli_h(high, 4); - bytes = __lasx_xvor_v(low, high); - // Compress uint16_t lanes into bytes - __m128i *r0 = (__m128i *)&bytes; - __m256i tmp_h128 = __lasx_xvpermi_q(bytes, bytes, 0x11); - __m128i *r1 = (__m128i *)&tmp_h128; - - __m128i zero = __lsx_vldi(0); - __m128i tmp, tmp2, tmp3; - - tmp = __lsx_vmax_h(zero, *r0); - tmp2 = __lsx_vsat_hu(tmp, 7); - - tmp = __lsx_vmax_h(zero, *r1); - tmp3 = __lsx_vsat_hu(tmp, 7); - return __lsx_vpickev_b(tmp3, tmp2); -} -#endif //__loongarch_asx - -void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { - quantize_row_q4_0_ref(x, y, k); -} - -void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { - quantize_row_q4_1_ref(x, y, k); -} - -void quantize_row_q5_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { - quantize_row_q5_0_ref(x, y, k); -} - -void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { - quantize_row_q5_1_ref(x, y, k); -} - -void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(QK8_0 == 32); - assert(k % QK8_0 == 0); - const int nb = k / QK8_0; - - block_q8_0 * GGML_RESTRICT y = vy; - -#if defined(__ARM_NEON) - for (int i = 0; i < nb; i++) { - float32x4_t srcv [8]; - float32x4_t asrcv[8]; - float32x4_t amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); - - const float amax = vmaxvq_f32(amaxv[0]); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - for (int j = 0; j < 8; j++) { - const float32x4_t v = vmulq_n_f32(srcv[j], id); - const int32x4_t vi = vcvtnq_s32_f32(v); - - y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); - y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); - y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); - y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); - } - } -#elif defined __wasm_simd128__ - for (int i = 0; i < nb; i++) { - v128_t srcv [8]; - v128_t asrcv[8]; - v128_t amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]); - - const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0), - wasm_f32x4_extract_lane(amaxv[0], 1)), - MAX(wasm_f32x4_extract_lane(amaxv[0], 2), - wasm_f32x4_extract_lane(amaxv[0], 3))); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - for (int j = 0; j < 8; j++) { - const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); - const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v); - - y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0); - y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1); - y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2); - y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3); - } - } -#elif defined(__AVX2__) || defined(__AVX__) - for (int i = 0; i < nb; i++) { - // Load elements into 4 AVX vectors - __m256 v0 = _mm256_loadu_ps( x ); - __m256 v1 = _mm256_loadu_ps( x + 8 ); - __m256 v2 = _mm256_loadu_ps( x + 16 ); - __m256 v3 = _mm256_loadu_ps( x + 24 ); - x += 32; - - // Compute max(abs(e)) for the block - const __m256 signBit = _mm256_set1_ps( -0.0f ); - __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); - - __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); - max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); - max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); - const float maxScalar = _mm_cvtss_f32( max4 ); - - // Quantize these floats - const float d = maxScalar / 127.f; - y[i].d = GGML_FP32_TO_FP16(d); - const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f; - const __m256 mul = _mm256_set1_ps( id ); - - // Apply the multiplier - v0 = _mm256_mul_ps( v0, mul ); - v1 = _mm256_mul_ps( v1, mul ); - v2 = _mm256_mul_ps( v2, mul ); - v3 = _mm256_mul_ps( v3, mul ); - - // Round to nearest integer - v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); - v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); - v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); - v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); - - // Convert floats to integers - __m256i i0 = _mm256_cvtps_epi32( v0 ); - __m256i i1 = _mm256_cvtps_epi32( v1 ); - __m256i i2 = _mm256_cvtps_epi32( v2 ); - __m256i i3 = _mm256_cvtps_epi32( v3 ); - -#if defined(__AVX2__) - // Convert int32 to int16 - i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 - i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 - // Convert int16 to int8 - i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 - - // We got our precious signed bytes, but the order is now wrong - // These AVX2 pack instructions process 16-byte pieces independently - // The following instruction is fixing the order - const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); - i0 = _mm256_permutevar8x32_epi32( i0, perm ); - - _mm256_storeu_si256((__m256i *)y[i].qs, i0); -#else - // Since we don't have in AVX some necessary functions, - // we split the registers in half and call AVX2 analogs from SSE - __m128i ni0 = _mm256_castsi256_si128( i0 ); - __m128i ni1 = _mm256_extractf128_si256( i0, 1); - __m128i ni2 = _mm256_castsi256_si128( i1 ); - __m128i ni3 = _mm256_extractf128_si256( i1, 1); - __m128i ni4 = _mm256_castsi256_si128( i2 ); - __m128i ni5 = _mm256_extractf128_si256( i2, 1); - __m128i ni6 = _mm256_castsi256_si128( i3 ); - __m128i ni7 = _mm256_extractf128_si256( i3, 1); - - // Convert int32 to int16 - ni0 = _mm_packs_epi32( ni0, ni1 ); - ni2 = _mm_packs_epi32( ni2, ni3 ); - ni4 = _mm_packs_epi32( ni4, ni5 ); - ni6 = _mm_packs_epi32( ni6, ni7 ); - // Convert int16 to int8 - ni0 = _mm_packs_epi16( ni0, ni2 ); - ni4 = _mm_packs_epi16( ni4, ni6 ); - - _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); - _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); -#endif - } -#elif defined(__riscv_v_intrinsic) - - size_t vl = QK8_0; - - for (int i = 0; i < nb; i++) { - // load elements - vfloat32m8_t v_x = __riscv_vle32_v_f32m8(x+i*QK8_0, vl); - - vfloat32m8_t vfabs = __riscv_vfabs_v_f32m8(v_x, vl); - vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl); - vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m8_f32m1(vfabs, tmp, vl); - float amax = __riscv_vfmv_f_s_f32m1_f32(vmax); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - vfloat32m8_t x0 = __riscv_vfmul_vf_f32m8(v_x, id, vl); - - // convert to integer - vint16m4_t vi = __riscv_vfncvt_x_f_w_i16m4(x0, vl); - vint8m2_t vs = __riscv_vncvt_x_x_w_i8m2(vi, vl); - - // store result - __riscv_vse8_v_i8m2(y[i].qs , vs, vl); - } - -#elif defined(__POWER9_VECTOR__) - for (int i = 0; i < nb; i++) { - vector float srcv [8]; - vector float asrcv[8]; - vector float amaxv[8]; - vector signed int vi[8]; - - for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); - - const float amax = MAX(MAX(vec_extract(amaxv[0], 0), - vec_extract(amaxv[0], 1)), - MAX(vec_extract(amaxv[0], 2), - vec_extract(amaxv[0], 3))); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - const vector float vid = vec_splats(id); - - y[i].d = GGML_FP32_TO_FP16(d); - - for (int j = 0; j < 8; j++) { - const vector float v = vec_round(vec_mul(srcv[j], vid)); - vi[j] = vec_cts(v, 0); - } - vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]); - vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]); - } - -#elif defined(__loongarch_asx) - for (int i = 0; i < nb; i++) { - __m256 v0 = (__m256)__lasx_xvld( x , 0); - __m256 v1 = (__m256)__lasx_xvld( x , 32); - __m256 v2 = (__m256)__lasx_xvld( x , 64); - __m256 v3 = (__m256)__lasx_xvld( x , 96); - x += 32; - - // Compute max(abs(e)) for the block - const __m256 sign_bit = __lasx_xvreplfr2vr_s( -0.0f ); - __m256 max_abs = (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v0 ); - max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v1 ) ); - max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v2 ) ); - max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v3 ) ); - - __m128 max4 = __lsx_vfmax_s( lasx_extractf128( max_abs, 1 ), lasx_extractf128( max_abs , 0) ); - max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vpickod_d((__m128i) max4, (__m128i)max4 ) ); - __m128 tmp = max4; - max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vinsgr2vr_w(tmp, __lsx_vpickve2gr_w( max4, 1 ), 0 )); - const float max_scalar = ((v4f32)max4)[0]; - - // Quantize these floats - const float d = max_scalar / 127.f; - y[i].d = GGML_FP32_TO_FP16(d); - const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; - const __m256 mul = (__m256)__lasx_xvreplfr2vr_s( id ); - - // Apply the multiplier - v0 = __lasx_xvfmul_s( v0, mul ); - v1 = __lasx_xvfmul_s( v1, mul ); - v2 = __lasx_xvfmul_s( v2, mul ); - v3 = __lasx_xvfmul_s( v3, mul ); - - // Round to nearest integer - __m256i i0 = __lasx_xvftintrne_w_s( v0 ); - __m256i i1 = __lasx_xvftintrne_w_s( v1 ); - __m256i i2 = __lasx_xvftintrne_w_s( v2 ); - __m256i i3 = __lasx_xvftintrne_w_s( v3 ); - - __m128i ni0 = lasx_extracti128( i0, 0 ); - __m128i ni1 = lasx_extracti128( i0, 1); - __m128i ni2 = lasx_extracti128( i1, 0); - __m128i ni3 = lasx_extracti128( i1, 1); - __m128i ni4 = lasx_extracti128( i2, 0); - __m128i ni5 = lasx_extracti128( i2, 1); - __m128i ni6 = lasx_extracti128( i3, 0); - __m128i ni7 = lasx_extracti128( i3, 1); - - // Convert int32 to int16 - ni0 = lsx_packs_w( ni0, ni1 ); - ni2 = lsx_packs_w( ni2, ni3 ); - ni4 = lsx_packs_w( ni4, ni5 ); - ni6 = lsx_packs_w( ni6, ni7 ); - // Convert int16 to int8 - ni0 = lsx_packs_h( ni0, ni2 ); - ni4 = lsx_packs_h( ni4, ni6 ); - - __lsx_vst(ni0, (__m128i *)(y[i].qs + 0), 0); - __lsx_vst(ni4, (__m128i *)(y[i].qs + 16), 0); - - } -#elif defined(__VXE__) || defined(__VXE2__) - for (int i = 0; i < nb; i++) { - __vector float srcv [8]; - __vector float asrcv[8]; - __vector float amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); - for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); - - const float amax = MAX(MAX(vec_extract(amaxv[0], 0), - vec_extract(amaxv[0], 1)), - MAX(vec_extract(amaxv[0], 2), - vec_extract(amaxv[0], 3))); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f / d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - for (int j = 0; j < 8; j++) { - const __vector float v = vec_mul(srcv[j], vec_splats(id)); - const __vector int32_t vi = vec_signed(v); - - y[i].qs[4*j + 0] = vec_extract(vi, 0); - y[i].qs[4*j + 1] = vec_extract(vi, 1); - y[i].qs[4*j + 2] = vec_extract(vi, 2); - y[i].qs[4*j + 3] = vec_extract(vi, 3); - } - } -#else - GGML_UNUSED(nb); - // scalar - quantize_row_q8_0_ref(x, y, k); -#endif -} - -void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(k % QK8_1 == 0); - const int nb = k / QK8_1; - - block_q8_1 * GGML_RESTRICT y = vy; - -#if defined(__ARM_NEON) - for (int i = 0; i < nb; i++) { - float32x4_t srcv [8]; - float32x4_t asrcv[8]; - float32x4_t amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]); - - const float amax = vmaxvq_f32(amaxv[0]); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - int32x4_t accv = vdupq_n_s32(0); - - for (int j = 0; j < 8; j++) { - const float32x4_t v = vmulq_n_f32(srcv[j], id); - const int32x4_t vi = vcvtnq_s32_f32(v); - - y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0); - y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1); - y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2); - y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3); - - accv = vaddq_s32(accv, vi); - } - - y[i].s = GGML_FP32_TO_FP16(d * vaddvq_s32(accv)); - } -#elif defined __wasm_simd128__ - for (int i = 0; i < nb; i++) { - v128_t srcv [8]; - v128_t asrcv[8]; - v128_t amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]); - - const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0), - wasm_f32x4_extract_lane(amaxv[0], 1)), - MAX(wasm_f32x4_extract_lane(amaxv[0], 2), - wasm_f32x4_extract_lane(amaxv[0], 3))); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - v128_t accv = wasm_i32x4_splat(0); - - for (int j = 0; j < 8; j++) { - const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id)); - const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v); - - y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0); - y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1); - y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2); - y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3); - - accv = wasm_i32x4_add(accv, vi); - } - - y[i].s = GGML_FP32_TO_FP16( - d * (wasm_i32x4_extract_lane(accv, 0) + - wasm_i32x4_extract_lane(accv, 1) + - wasm_i32x4_extract_lane(accv, 2) + - wasm_i32x4_extract_lane(accv, 3))); - } -#elif defined(__AVX2__) || defined(__AVX__) - for (int i = 0; i < nb; i++) { - // Load elements into 4 AVX vectors - __m256 v0 = _mm256_loadu_ps( x ); - __m256 v1 = _mm256_loadu_ps( x + 8 ); - __m256 v2 = _mm256_loadu_ps( x + 16 ); - __m256 v3 = _mm256_loadu_ps( x + 24 ); - x += 32; - - // Compute max(abs(e)) for the block - const __m256 signBit = _mm256_set1_ps( -0.0f ); - __m256 maxAbs = _mm256_andnot_ps( signBit, v0 ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) ); - maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) ); - - __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) ); - max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) ); - max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) ); - const float max_scalar = _mm_cvtss_f32( max4 ); - - // Quantize these floats - const float d = max_scalar / 127.f; - y[i].d = GGML_FP32_TO_FP16(d); - const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; - const __m256 mul = _mm256_set1_ps( id ); - - // Apply the multiplier - v0 = _mm256_mul_ps( v0, mul ); - v1 = _mm256_mul_ps( v1, mul ); - v2 = _mm256_mul_ps( v2, mul ); - v3 = _mm256_mul_ps( v3, mul ); - - // Round to nearest integer - v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST ); - v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST ); - v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST ); - v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST ); - - // Convert floats to integers - __m256i i0 = _mm256_cvtps_epi32( v0 ); - __m256i i1 = _mm256_cvtps_epi32( v1 ); - __m256i i2 = _mm256_cvtps_epi32( v2 ); - __m256i i3 = _mm256_cvtps_epi32( v3 ); - -#if defined(__AVX2__) - // Compute the sum of the quants and set y[i].s - y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)))); - - // Convert int32 to int16 - i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15 - i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31 - // Convert int16 to int8 - i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 - - // We got our precious signed bytes, but the order is now wrong - // These AVX2 pack instructions process 16-byte pieces independently - // The following instruction is fixing the order - const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 ); - i0 = _mm256_permutevar8x32_epi32( i0, perm ); - - _mm256_storeu_si256((__m256i *)y[i].qs, i0); -#else - // Since we don't have in AVX some necessary functions, - // we split the registers in half and call AVX2 analogs from SSE - __m128i ni0 = _mm256_castsi256_si128( i0 ); - __m128i ni1 = _mm256_extractf128_si256( i0, 1); - __m128i ni2 = _mm256_castsi256_si128( i1 ); - __m128i ni3 = _mm256_extractf128_si256( i1, 1); - __m128i ni4 = _mm256_castsi256_si128( i2 ); - __m128i ni5 = _mm256_extractf128_si256( i2, 1); - __m128i ni6 = _mm256_castsi256_si128( i3 ); - __m128i ni7 = _mm256_extractf128_si256( i3, 1); - - // Compute the sum of the quants and set y[i].s - const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3)); - const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7)); - y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_4(_mm_add_epi32(s0, s1))); - - // Convert int32 to int16 - ni0 = _mm_packs_epi32( ni0, ni1 ); - ni2 = _mm_packs_epi32( ni2, ni3 ); - ni4 = _mm_packs_epi32( ni4, ni5 ); - ni6 = _mm_packs_epi32( ni6, ni7 ); - // Convert int16 to int8 - ni0 = _mm_packs_epi16( ni0, ni2 ); - ni4 = _mm_packs_epi16( ni4, ni6 ); - - _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0); - _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4); -#endif - } -#elif defined(__riscv_v_intrinsic) - - size_t vl = QK8_1; - - for (int i = 0; i < nb; i++) { - // load elements - vfloat32m8_t v_x = __riscv_vle32_v_f32m8(x+i*QK8_1, vl); - - vfloat32m8_t vfabs = __riscv_vfabs_v_f32m8(v_x, vl); - vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl); - vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m8_f32m1(vfabs, tmp, vl); - float amax = __riscv_vfmv_f_s_f32m1_f32(vmax); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - vfloat32m8_t x0 = __riscv_vfmul_vf_f32m8(v_x, id, vl); - - // convert to integer - vint16m4_t vi = __riscv_vfncvt_x_f_w_i16m4(x0, vl); - vint8m2_t vs = __riscv_vncvt_x_x_w_i8m2(vi, vl); - - // store result - __riscv_vse8_v_i8m2(y[i].qs , vs, vl); - - // compute sum for y[i].s - vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl); - vint16m1_t vwrs = __riscv_vwredsum_vs_i8m2_i16m1(vs, tmp2, vl); - - // set y[i].s - int sum = __riscv_vmv_x_s_i16m1_i16(vwrs); - y[i].s = GGML_FP32_TO_FP16(sum*d); - } - -#elif defined(__POWER9_VECTOR__) - for (int i = 0; i < nb; i++) { - vector float srcv [8]; - vector float asrcv[8]; - vector float amaxv[8]; - vector signed int vi[8]; - - for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); - - for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); - - const float amax = MAX(MAX(vec_extract(amaxv[0], 0), - vec_extract(amaxv[0], 1)), - MAX(vec_extract(amaxv[0], 2), - vec_extract(amaxv[0], 3))); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - const vector float vid = vec_splats(id); - - y[i].d = GGML_FP32_TO_FP16(d); - - vector int accv = vec_splats(0); - - for (int j = 0; j < 8; j++) { - const vector float v = vec_round(vec_mul(srcv[j], vid)); - vi[j] = vec_cts(v, 0); - - accv = vec_add(accv, vi[j]); - } - vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]); - vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]); - - accv = vec_add(accv, vec_sld(accv, accv, 4)); - accv = vec_add(accv, vec_sld(accv, accv, 8)); - y[i].s = GGML_FP32_TO_FP16(d * vec_extract(accv, 0)); - } - -#elif defined(__loongarch_asx) - for (int i = 0; i < nb; i++) { - __m256 v0 = (__m256)__lasx_xvld( x , 0 ); - __m256 v1 = (__m256)__lasx_xvld( x , 32 ); - __m256 v2 = (__m256)__lasx_xvld( x , 64 ); - __m256 v3 = (__m256)__lasx_xvld( x , 96 ); - x += 32; - - // Compute max(abs(e)) for the block - const __m256 sign_bit = __lasx_xvreplfr2vr_s( -0.0f ); - __m256 max_abs = (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v0 ); - max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v1 ) ); - max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v2 ) ); - max_abs = __lasx_xvfmax_s( max_abs, (__m256)__lasx_xvandn_v( (__m256i)sign_bit, (__m256i)v3 ) ); - - __m128 max4 = __lsx_vfmax_s( lasx_extractf128( max_abs, 1 ), lasx_extractf128( max_abs, 0) ); - max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vpickod_d((__m128i) max4, (__m128i)max4 ) ); - __m128 tmp = max4; - max4 = __lsx_vfmax_s( max4, (__m128)__lsx_vextrins_w((__m128i)tmp, (__m128i)max4, 0x10 )); - const float max_scalar = ((v4f32)max4)[0]; - - // Quantize these floats - const float d = max_scalar / 127.f; - y[i].d = GGML_FP32_TO_FP16(d); - const float id = ( max_scalar != 0.0f ) ? 127.f / max_scalar : 0.0f; - const __m256 mul = __lasx_xvreplfr2vr_s( id ); - - // Apply the multiplier - v0 = __lasx_xvfmul_s( v0, mul ); - v1 = __lasx_xvfmul_s( v1, mul ); - v2 = __lasx_xvfmul_s( v2, mul ); - v3 = __lasx_xvfmul_s( v3, mul ); - - // Round to nearest integer - __m256i i0 = __lasx_xvftintrne_w_s( v0 ); - __m256i i1 = __lasx_xvftintrne_w_s( v1 ); - __m256i i2 = __lasx_xvftintrne_w_s( v2 ); - __m256i i3 = __lasx_xvftintrne_w_s( v3 ); - - __m128i ni0 = lasx_extracti128(i0, 0); - __m128i ni1 = lasx_extracti128( i0, 1); - __m128i ni2 = lasx_extracti128( i1, 0); - __m128i ni3 = lasx_extracti128( i1, 1); - __m128i ni4 = lasx_extracti128( i2, 0 ); - __m128i ni5 = lasx_extracti128( i2, 1); - __m128i ni6 = lasx_extracti128( i3, 0); - __m128i ni7 = lasx_extracti128( i3, 1); - - // Compute the sum of the quants and set y[i].s - const __m128i s0 = __lsx_vadd_w(__lsx_vadd_w(ni0, ni1), __lsx_vadd_w(ni2, ni3)); - const __m128i s1 = __lsx_vadd_w(__lsx_vadd_w(ni4, ni5), __lsx_vadd_w(ni6, ni7)); - y[i].s = GGML_FP32_TO_FP16(d * hsum_i32_4(__lsx_vadd_w(s0, s1))); - - // Convert int32 to int16 - ni0 = lsx_packs_w( ni0, ni1 ); - ni2 = lsx_packs_w( ni2, ni3 ); - ni4 = lsx_packs_w( ni4, ni5 ); - ni6 = lsx_packs_w( ni6, ni7 ); - // Convert int16 to int8 - ni0 = lsx_packs_h( ni0, ni2 ); - ni4 = lsx_packs_h( ni4, ni6 ); - - __lsx_vst(ni0, (__m128i *)(y[i].qs + 0), 0); - __lsx_vst(ni4, (__m128i *)(y[i].qs + 16), 0); - } -#elif defined(__VXE__) || defined(__VXE2__) - for (int i = 0; i < nb; i++) { - __vector float srcv [8]; - __vector float asrcv[8]; - __vector float amaxv[8]; - - for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); - for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); - for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); - for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); - for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); - - const float amax = MAX(MAX(vec_extract(amaxv[0], 0), - vec_extract(amaxv[0], 1)), - MAX(vec_extract(amaxv[0], 2), - vec_extract(amaxv[0], 3))); - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f / d : 0.0f; - - y[i].d = GGML_FP32_TO_FP16(d); - - __vector int32_t acc = vec_splats(0); - - for (int j = 0; j < 8; j++) { - const __vector float v = vec_mul(srcv[j], vec_splats(id)); - const __vector int32_t vi = vec_signed(v); - - y[i].qs[4*j + 0] = vec_extract(vi, 0); - y[i].qs[4*j + 1] = vec_extract(vi, 1); - y[i].qs[4*j + 2] = vec_extract(vi, 2); - y[i].qs[4*j + 3] = vec_extract(vi, 3); - - acc = vec_add(acc, vi); - } - - y[i].s = GGML_FP32_TO_FP16(d * (acc[0] + acc[1] + acc[2] + acc[3])); - } -#else - GGML_UNUSED(nb); - // scalar - quantize_row_q8_1_ref(x, y, k); -#endif -} - -// -// 2-6 bit quantization in super-blocks -// - -// -// ===================== Helper functions -// -static inline int nearest_int(float fval) { - assert(fabsf(fval) <= 4194303.f); - float val = fval + 12582912.f; - int i; memcpy(&i, &val, sizeof(int)); - return (i & 0x007fffff) - 0x00400000; -} - -static float make_qx_quants(int n, int nmax, const float * GGML_RESTRICT x, int8_t * GGML_RESTRICT L, int rmse_type, - const float * GGML_RESTRICT qw) { - float max = 0; - float amax = 0; - for (int i = 0; i < n; ++i) { - float ax = fabsf(x[i]); - if (ax > amax) { amax = ax; max = x[i]; } - } - if (amax < GROUP_MAX_EPS) { // all zero - for (int i = 0; i < n; ++i) { - L[i] = 0; - } - return 0.f; - } - float iscale = -nmax / max; - if (rmse_type == 0) { - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - L[i] = nmax + MAX(-nmax, MIN(nmax-1, l)); - } - return 1/iscale; - } - bool return_early = false; - if (rmse_type < 0) { - rmse_type = -rmse_type; - return_early = true; - } - float sumlx = 0; - float suml2 = 0; -#ifdef HAVE_BUGGY_APPLE_LINKER - // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7 - for (volatile int i = 0; i < n; ++i) { -#else - for (int i = 0; i < n; ++i) { -#endif - int l = nearest_int(iscale * x[i]); - l = MAX(-nmax, MIN(nmax-1, l)); - L[i] = l + nmax; - float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i])); - sumlx += w*x[i]*l; - suml2 += w*l*l; - } - float scale = suml2 ? sumlx/suml2 : 0.0f; - if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale; - float best = scale * sumlx; - for (int is = -9; is <= 9; ++is) { - if (is == 0) { - continue; - } - iscale = -(nmax + 0.1f*is) / max; - sumlx = suml2 = 0; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - l = MAX(-nmax, MIN(nmax-1, l)); - float w = qw ? qw[i] : rmse_type == 1 ? x[i] * x[i] : rmse_type == 2 ? 1 : rmse_type == 3 ? fabsf(x[i]) : sqrtf(fabsf(x[i])); - sumlx += w*x[i]*l; - suml2 += w*l*l; - } - if (suml2 > 0 && sumlx*sumlx > best*suml2) { - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - L[i] = nmax + MAX(-nmax, MIN(nmax-1, l)); - } - scale = sumlx/suml2; best = scale*sumlx; - } - } - return scale; -} - -static float make_q3_quants(int n, int nmax, const float * GGML_RESTRICT x, int8_t * GGML_RESTRICT L, bool do_rmse) { - float max = 0; - float amax = 0; - for (int i = 0; i < n; ++i) { - float ax = fabsf(x[i]); - if (ax > amax) { amax = ax; max = x[i]; } - } - if (amax < GROUP_MAX_EPS) { // all zero - for (int i = 0; i < n; ++i) { L[i] = 0; } - return 0.f; - } - float iscale = -nmax / max; - if (do_rmse) { - float sumlx = 0; - float suml2 = 0; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - l = MAX(-nmax, MIN(nmax-1, l)); - L[i] = l; - float w = x[i]*x[i]; - sumlx += w*x[i]*l; - suml2 += w*l*l; - } - for (int itry = 0; itry < 5; ++itry) { - int n_changed = 0; - for (int i = 0; i < n; ++i) { - float w = x[i]*x[i]; - float slx = sumlx - w*x[i]*L[i]; - if (slx > 0) { - float sl2 = suml2 - w*L[i]*L[i]; - int new_l = nearest_int(x[i] * sl2 / slx); - new_l = MAX(-nmax, MIN(nmax-1, new_l)); - if (new_l != L[i]) { - slx += w*x[i]*new_l; - sl2 += w*new_l*new_l; - if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) { - L[i] = new_l; sumlx = slx; suml2 = sl2; - ++n_changed; - } - } - } - } - if (!n_changed) { - break; - } - } - for (int i = 0; i < n; ++i) { - L[i] += nmax; - } - return sumlx / suml2; - } - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale * x[i]); - l = MAX(-nmax, MIN(nmax-1, l)); - L[i] = l + nmax; - } - return 1/iscale; -} - -static float make_qkx1_quants(int n, int nmax, const float * GGML_RESTRICT x, uint8_t * GGML_RESTRICT L, float * GGML_RESTRICT the_min, - int ntry, float alpha) { - float min = x[0]; - float max = x[0]; - for (int i = 1; i < n; ++i) { - if (x[i] < min) min = x[i]; - if (x[i] > max) max = x[i]; - } - if (max == min) { - for (int i = 0; i < n; ++i) L[i] = 0; - *the_min = 0; - return 0.f; - } - if (min > 0) min = 0; - float iscale = nmax/(max - min); - float scale = 1/iscale; - for (int itry = 0; itry < ntry; ++itry) { - float sumlx = 0; int suml2 = 0; - bool did_change = false; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale*(x[i] - min)); - l = MAX(0, MIN(nmax, l)); - if (l != L[i]) { - L[i] = l; - did_change = true; - } - sumlx += (x[i] - min)*l; - suml2 += l*l; - } - scale = sumlx/suml2; - float sum = 0; - for (int i = 0; i < n; ++i) { - sum += x[i] - scale*L[i]; - } - min = alpha*min + (1 - alpha)*sum/n; - if (min > 0) min = 0; - iscale = 1/scale; - if (!did_change) break; - } - *the_min = -min; - return scale; -} - -static float make_qkx2_quants(int n, int nmax, const float * GGML_RESTRICT x, const float * GGML_RESTRICT weights, - uint8_t * GGML_RESTRICT L, float * GGML_RESTRICT the_min, uint8_t * GGML_RESTRICT Laux, - float rmin, float rdelta, int nstep, bool use_mad) { - float min = x[0]; - float max = x[0]; - float sum_w = weights[0]; - float sum_x = sum_w * x[0]; -#ifdef HAVE_BUGGY_APPLE_LINKER - // use 'volatile' to prevent unroll and work around a bug in Apple ld64 1015.7 - for (volatile int i = 1; i < n; ++i) { -#else - for (int i = 1; i < n; ++i) { -#endif - if (x[i] < min) min = x[i]; - if (x[i] > max) max = x[i]; - float w = weights[i]; - sum_w += w; - sum_x += w * x[i]; - } - if (min > 0) min = 0; - if (max == min) { - for (int i = 0; i < n; ++i) L[i] = 0; - *the_min = -min; - return 0.f; - } - float iscale = nmax/(max - min); - float scale = 1/iscale; - float best_mad = 0; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale*(x[i] - min)); - L[i] = MAX(0, MIN(nmax, l)); - float diff = scale * L[i] + min - x[i]; - diff = use_mad ? fabsf(diff) : diff * diff; - float w = weights[i]; - best_mad += w * diff; - } - if (nstep < 1) { - *the_min = -min; - return scale; - } - for (int is = 0; is <= nstep; ++is) { - iscale = (rmin + rdelta*is + nmax)/(max - min); - float sum_l = 0, sum_l2 = 0, sum_xl = 0; - for (int i = 0; i < n; ++i) { - int l = nearest_int(iscale*(x[i] - min)); - l = MAX(0, MIN(nmax, l)); - Laux[i] = l; - float w = weights[i]; - sum_l += w*l; - sum_l2 += w*l*l; - sum_xl += w*l*x[i]; - } - float D = sum_w * sum_l2 - sum_l * sum_l; - if (D > 0) { - float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D; - float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D; - if (this_min > 0) { - this_min = 0; - this_scale = sum_xl / sum_l2; - } - float mad = 0; - for (int i = 0; i < n; ++i) { - float diff = this_scale * Laux[i] + this_min - x[i]; - diff = use_mad ? fabsf(diff) : diff * diff; - float w = weights[i]; - mad += w * diff; - } - if (mad < best_mad) { - for (int i = 0; i < n; ++i) { - L[i] = Laux[i]; - } - best_mad = mad; - scale = this_scale; - min = this_min; - } - } - } - *the_min = -min; - return scale; -} - -static inline void get_scale_min_k4(int j, const uint8_t * GGML_RESTRICT q, uint8_t * GGML_RESTRICT d, uint8_t * GGML_RESTRICT m) { - if (j < 4) { - *d = q[j] & 63; *m = q[j + 4] & 63; - } else { - *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4); - *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); - } -} - -//========================- 2-bit (de)-quantization - -void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - quantize_row_q2_K_ref(x, vy, k); -} - -//========================= 3-bit (de)-quantization - -void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - quantize_row_q3_K_ref(x, vy, k); -} - -// ====================== 4-bit (de)-quantization - -void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(k % QK_K == 0); - block_q4_K * GGML_RESTRICT y = vy; - quantize_row_q4_K_ref(x, y, k); -} - -// ====================== 5-bit (de)-quantization - -void quantize_row_q5_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(k % QK_K == 0); - block_q5_K * GGML_RESTRICT y = vy; - quantize_row_q5_K_ref(x, y, k); -} - -// ====================== 6-bit (de)-quantization - -void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(k % QK_K == 0); - block_q6_K * GGML_RESTRICT y = vy; - quantize_row_q6_K_ref(x, y, k); -} - -// ====================== Ternary (de)-quantization (BitNet b1.58 and TriLMs) - -void quantize_row_tq1_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(k % QK_K == 0); - block_tq1_0 * GGML_RESTRICT y = vy; - quantize_row_tq1_0_ref(x, y, k); -} - -void quantize_row_tq2_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { - assert(k % QK_K == 0); - block_tq2_0 * GGML_RESTRICT y = vy; - quantize_row_tq2_0_ref(x, y, k); -} - -static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113}; - -//===================================== Q8_K ============================================== - -void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { -#ifdef __wasm_simd128__ - assert(k % QK_K == 0); - const int64_t nb = k / QK_K; - block_q8_K * GGML_RESTRICT yc = y; // Cast to proper type - - for (int i = 0; i < nb; i++) { - const float * x_block = x + i * QK_K; - - v128_t min_vec = wasm_v128_load(x_block); - v128_t max_vec = min_vec; - - for (int j = 4; j < QK_K; j += 4) { - v128_t x_vec = wasm_v128_load(x_block + j); - max_vec = wasm_f32x4_pmax(max_vec, x_vec); - min_vec = wasm_f32x4_pmin(min_vec, x_vec); - } - max_vec = wasm_f32x4_pmax(max_vec, wasm_i32x4_shuffle(max_vec, max_vec, 2, 3, 0, 1)); - max_vec = wasm_f32x4_pmax(max_vec, wasm_i32x4_shuffle(max_vec, max_vec, 1, 0, 3, 2)); - min_vec = wasm_f32x4_pmin(min_vec, wasm_i32x4_shuffle(min_vec, min_vec, 2, 3, 0, 1)); - min_vec = wasm_f32x4_pmin(min_vec, wasm_i32x4_shuffle(min_vec, min_vec, 1, 0, 3, 2)); - float max = wasm_f32x4_extract_lane(max_vec, 0); - float min = wasm_f32x4_extract_lane(min_vec, 0); - float amax = -min > max ? min : max; - - if (amax == 0.0f) { - yc[i].d = 0.0f; - const v128_t zero = wasm_i8x16_splat(0); - for (int j = 0; j < QK_K; j += 16) { - wasm_v128_store(yc[i].qs + j, zero); - } - continue; - } - - const float iscale = -127.0f / amax; - const v128_t scale_vec = wasm_f32x4_splat(iscale); - - // Process 16 elements per iteration - for (int j = 0, jb = 0; j < QK_K; j += 16, jb++) { - // Load and quantize 16 floats - v128_t x0 = wasm_v128_load(x_block + j); - v128_t x1 = wasm_v128_load(x_block + j + 4); - v128_t x2 = wasm_v128_load(x_block + j + 8); - v128_t x3 = wasm_v128_load(x_block + j + 12); - - v128_t q0 = wasm_f32x4_nearest(wasm_f32x4_mul(x0, scale_vec)); - v128_t q1 = wasm_f32x4_nearest(wasm_f32x4_mul(x1, scale_vec)); - v128_t q2 = wasm_f32x4_nearest(wasm_f32x4_mul(x2, scale_vec)); - v128_t q3 = wasm_f32x4_nearest(wasm_f32x4_mul(x3, scale_vec)); - - // Convert to i32 with saturation - v128_t i0 = wasm_i32x4_trunc_sat_f32x4(q0); - v128_t i1 = wasm_i32x4_trunc_sat_f32x4(q1); - v128_t i2 = wasm_i32x4_trunc_sat_f32x4(q2); - v128_t i3 = wasm_i32x4_trunc_sat_f32x4(q3); - - // Pack into 16 i8 values - v128_t i8 = wasm_i8x16_narrow_i16x8( - wasm_i16x8_narrow_i32x4(i0, i1), - wasm_i16x8_narrow_i32x4(i2, i3) - ); - wasm_v128_store(yc[i].qs + j, i8); - - // Calculate bsums using SIMD - v128_t sum16 = wasm_i16x8_add( - wasm_i16x8_extend_low_i8x16(i8), - wasm_i16x8_extend_high_i8x16(i8) - ); - v128_t sum32 = wasm_i32x4_add( - wasm_i32x4_extend_low_i16x8(sum16), - wasm_i32x4_extend_high_i16x8(sum16) - ); - sum32 = wasm_i32x4_add(sum32, wasm_i32x4_shuffle(sum32, sum32, 2, 3, 0, 1)); - sum32 = wasm_i32x4_add(sum32, wasm_i32x4_shuffle(sum32, sum32, 1, 0, 3, 2)); - yc[i].bsums[jb] = wasm_i32x4_extract_lane(sum32, 0); - } - - yc[i].d = 1.0f / iscale; - } -#else - quantize_row_q8_K_ref(x, y, k); -#endif -} - -//===================================== Dot products ================================= - -// -// Helper functions -// -#if __AVX__ || __AVX2__ || __AVX512F__ - -// shuffles to pick the required scales in dot products -static inline __m256i get_scale_shuffle_q3k(int i) { - static const uint8_t k_shuffle[128] = { - 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, - 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, - 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, - 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15, - }; - return _mm256_loadu_si256((const __m256i*)k_shuffle + i); -} -static inline __m256i get_scale_shuffle_k4(int i) { - static const uint8_t k_shuffle[256] = { - 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, - 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, - 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, - 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, - 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, - 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, - 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, - 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15 - }; - return _mm256_loadu_si256((const __m256i*)k_shuffle + i); -} -static inline __m128i get_scale_shuffle(int i) { - static const uint8_t k_shuffle[128] = { - 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, - 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, - 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, - 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, - 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11, - 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13, - 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15 - }; - return _mm_loadu_si128((const __m128i*)k_shuffle + i); -} -#elif defined(__loongarch_asx) -// shuffles to pick the required scales in dot products -static inline __m256i get_scale_shuffle_q3k(int i) { - static const uint8_t k_shuffle[128] = { - 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, - 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, - 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, - 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15, - }; - return __lasx_xvld((const __m256i*)k_shuffle + i, 0); -} -static inline __m256i get_scale_shuffle_k4(int i) { - static const uint8_t k_shuffle[256] = { - 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, - 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, - 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, - 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, - 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, - 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11, - 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, - 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15 - }; - return __lasx_xvld((const __m256i*)k_shuffle + i, 0); -} -static inline __m128i get_scale_shuffle(int i) { - static const uint8_t k_shuffle[128] = { - 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, - 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, - 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, - 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, - 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11, - 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13, - 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15 - }; - return __lsx_vld((const __m128i*)k_shuffle + i, 0); -} -#endif - -void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - const int qk = QK8_0; - const int nb = n / qk; - - assert(n % qk == 0); -#if defined(__ARM_FEATURE_MATMUL_INT8) - assert((nrc == 2) || (nrc == 1)); -#else - assert(nrc == 1); -#endif - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q4_0 * GGML_RESTRICT x = vx; - const block_q8_0 * GGML_RESTRICT y = vy; - -#if defined(__ARM_FEATURE_MATMUL_INT8) - if (nrc == 2) { - const block_q4_0 * GGML_RESTRICT vx0 = vx; - const block_q4_0 * GGML_RESTRICT vx1 = (const block_q4_0 *) ((const uint8_t*)vx + bx); - const block_q8_0 * GGML_RESTRICT vy0 = vy; - const block_q8_0 * GGML_RESTRICT vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by); - - float32x4_t sumv0 = vdupq_n_f32(0.0f); - - for (int i = 0; i < nb; i++) { - const block_q4_0 * GGML_RESTRICT b_x0 = &vx0[i]; - const block_q4_0 * GGML_RESTRICT b_x1 = &vx1[i]; - const block_q8_0 * GGML_RESTRICT b_y0 = &vy0[i]; - const block_q8_0 * GGML_RESTRICT b_y1 = &vy1[i]; - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - const int8x16_t s8b = vdupq_n_s8(0x8); - - const uint8x16_t v0_0 = vld1q_u8(b_x0->qs); - const uint8x16_t v0_1 = vld1q_u8(b_x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // sub 8 - const int8x16_t x0_l = vsubq_s8(v0_0l, s8b); - const int8x16_t x0_h = vsubq_s8(v0_0h, s8b); - const int8x16_t x1_l = vsubq_s8(v0_1l, s8b); - const int8x16_t x1_h = vsubq_s8(v0_1h, s8b); - - // load y - const int8x16_t y0_l = vld1q_s8(b_y0->qs); - const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); - const int8x16_t y1_l = vld1q_s8(b_y1->qs); - const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); - - float32_t _scale[4] = { - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) - }; - float32x4_t scale = vld1q_f32(_scale); - - int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - - int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - - int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - - int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - - sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), - l1, r1)), l2, r2)), l3, r3))), scale); - } - - float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); - float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); - - vst1_f32(s, vget_low_f32 (sumv2)); - vst1_f32(s + bs, vget_high_f32(sumv2)); - - return; - } -#endif - - int ib = 0; - float sumf = 0; - -#if defined(__ARM_FEATURE_SVE) - svfloat32_t sumv0 = svdup_n_f32(0.0f); - svfloat32_t sumv1 = svdup_n_f32(0.0f); - - const int vector_length = ggml_cpu_get_sve_cnt()*8; - - // VLA Implementation using switch case - switch (vector_length) { - case 128: - { - // predicate for activating higher lanes for 4 float32 elements - const svbool_t ph4 = svptrue_pat_b32(SV_VL4); - - for (; ib + 1 < nb; ib += 2) { - const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - // load x - const svuint8_t qx0r = svld1rq_u8(svptrue_b8(), x0->qs); - const svuint8_t qx1r = svld1rq_u8(svptrue_b8(), x1->qs); - - // 4-bit -> 8-bit - const svint8_t qx0l = svreinterpret_s8_u8(svand_n_u8_m(svptrue_b8(), qx0r, 0x0F)); - const svint8_t qx0h = svreinterpret_s8_u8(svlsr_n_u8_m(svptrue_b8(), qx0r, 0x04)); - const svint8_t qx1l = svreinterpret_s8_u8(svand_n_u8_m(svptrue_b8(), qx1r, 0x0F)); - const svint8_t qx1h = svreinterpret_s8_u8(svlsr_n_u8_m(svptrue_b8(), qx1r, 0x04)); - - // sub 8 - const svint8_t qx0ls = svsub_n_s8_x(svptrue_b8(), qx0h, 8); - const svint8_t qx0hs = svsub_n_s8_x(svptrue_b8(), qx0l, 8); - const svint8_t qx1ls = svsub_n_s8_x(svptrue_b8(), qx1h, 8); - const svint8_t qx1hs = svsub_n_s8_x(svptrue_b8(), qx1l, 8); - - // load y - const svint8_t qy0h = svld1_s8(svptrue_b8(), y0->qs); - const svint8_t qy0l = svld1_s8(svptrue_b8(), y0->qs + 16); - const svint8_t qy1h = svld1_s8(svptrue_b8(), y1->qs); - const svint8_t qy1l = svld1_s8(svptrue_b8(), y1->qs + 16); - - // dot product - sumv0 = svmla_n_f32_x(ph4, sumv0, svcvt_f32_s32_x(ph4, svadd_x(ph4, - svdot_s32(svdup_n_s32(0), qx0ls, qy0l), - svdot_s32(svdup_n_s32(0), qx0hs, qy0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = svmla_n_f32_x(ph4, sumv1, svcvt_f32_s32_x(ph4, svadd_x(ph4, - svdot_s32(svdup_n_s32(0), qx1ls, qy1l), - svdot_s32(svdup_n_s32(0), qx1hs, qy1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); - } break; - case 256: - { - // predicate for activating higher lanes for 16 int8 elements - const svbool_t ph16 = svptrue_pat_b8(SV_VL16); - // predicate for activating lower lanes for 16 int8 elements - const svbool_t pl16 = svnot_b_z(svptrue_b8(), ph16); - - for (; ib + 1 < nb; ib += 2) { - const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - // load x - const svuint8_t qx0r = svld1rq_u8(svptrue_b8(), x0->qs); - const svuint8_t qx1r = svld1rq_u8(svptrue_b8(), x1->qs); - - // 4-bit -> 8-bit - const svint8_t qx0 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx0r, 0x0F), 0x04)); - const svint8_t qx1 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx1r, 0x0F), 0x04)); - - // sub 8 - const svint8_t qx0s = svsub_n_s8_x(svptrue_b8(), qx0, 8); - const svint8_t qx1s = svsub_n_s8_x(svptrue_b8(), qx1, 8); - - // load y - const svint8_t qy0 = svld1_s8(svptrue_b8(), y0->qs); - const svint8_t qy1 = svld1_s8(svptrue_b8(), y1->qs); - - // dot product - sumv0 = svmla_n_f32_x(svptrue_b32(), sumv0, svcvt_f32_s32_x(svptrue_b32(), - svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = svmla_n_f32_x(svptrue_b32(), sumv1, svcvt_f32_s32_x(svptrue_b32(), - svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); - } break; - case 512: - { - // predicate for activating higher lanes for 32 int8 elements - const svbool_t ph32 = svptrue_pat_b8(SV_VL32); - - // predicate for activating higher lanes for 16 int8 elements - const svbool_t ph16 = svptrue_pat_b8(SV_VL16); - // predicate for activating lower lanes for 16 int8 elements from first 32 int8 activated lanes - const svbool_t pl16 = svnot_b_z(ph32, ph16); - - for (; ib + 1 < nb; ib += 2) { - const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - // load x - const svuint8_t qx0r = svld1rq_u8(ph32, x0->qs); - const svuint8_t qx1r = svld1rq_u8(ph32, x1->qs); - - // 4-bit -> 8-bit - const svint8_t qx0 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx0r, 0x0F), 0x04)); - const svint8_t qx1 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx1r, 0x0F), 0x04)); - - // sub 8 - const svint8_t qx0s = svsub_n_s8_x(ph32, qx0, 8); - const svint8_t qx1s = svsub_n_s8_x(ph32, qx1, 8); - - // load y - const svint8_t qy0 = svld1_s8(ph32, y0->qs); - const svint8_t qy1 = svld1_s8(ph32, y1->qs); - - // dot product - sumv0 = svmla_n_f32_x(ph32, sumv0, svcvt_f32_s32_x(ph32, - svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = svmla_n_f32_x(ph32, sumv1, svcvt_f32_s32_x(ph32, - svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = svaddv_f32(ph32, svadd_f32_x(ph32, sumv0, sumv1)); - } break; - default: - assert(false && "Unsupported vector length"); - break; - } - -#elif defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - for (; ib + 1 < nb; ib += 2) { - const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - const int8x16_t s8b = vdupq_n_s8(0x8); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // sub 8 - const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b); - const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b); - const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b); - const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - - // dot product into int32x4_t - const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h); - const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); -#elif defined __wasm_simd128__ - v128_t sumv = wasm_f32x4_splat(0.0f); - - const v128_t m4b = wasm_i8x16_splat(0x0F); - const v128_t s8b = wasm_i8x16_splat(0x8); - - for (; ib + 1 < nb; ib += 2) { - const block_q4_0 * GGML_RESTRICT x0 = &x[ib]; - const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - // Load and process x0 - v128_t v0_0 = wasm_v128_load(x0->qs); - v128_t v0_0l = wasm_v128_and(v0_0, m4b); - v128_t v0_0h = wasm_u8x16_shr(v0_0, 4); - v128_t v0_0ls = wasm_i8x16_sub(v0_0l, s8b); - v128_t v0_0hs = wasm_i8x16_sub(v0_0h, s8b); - - // Load y0 vectors - v128_t y0_l = wasm_v128_load(y0->qs); - v128_t y0_h = wasm_v128_load(y0->qs + 16); - - // Extend to i16x8 and compute dot products - v128_t dx0l = wasm_i16x8_extend_low_i8x16(v0_0ls); - v128_t dx0h = wasm_i16x8_extend_high_i8x16(v0_0ls); - v128_t dx0hl = wasm_i16x8_extend_low_i8x16(v0_0hs); - v128_t dx0hh = wasm_i16x8_extend_high_i8x16(v0_0hs); - - v128_t dy0ll = wasm_i16x8_extend_low_i8x16(y0_l); - v128_t dy0lh = wasm_i16x8_extend_high_i8x16(y0_l); - v128_t dy0hl = wasm_i16x8_extend_low_i8x16(y0_h); - v128_t dy0hh = wasm_i16x8_extend_high_i8x16(y0_h); - - v128_t dp0 = wasm_i32x4_add( - wasm_i32x4_add( - wasm_i32x4_dot_i16x8(dx0l, dy0ll), - wasm_i32x4_dot_i16x8(dx0h, dy0lh) - ), - wasm_i32x4_add( - wasm_i32x4_dot_i16x8(dx0hl, dy0hl), - wasm_i32x4_dot_i16x8(dx0hh, dy0hh) - ) - ); - - // Load and process x1 - v128_t v0_1 = wasm_v128_load(x1->qs); - v128_t v0_1l = wasm_v128_and(v0_1, m4b); - v128_t v0_1h = wasm_u8x16_shr(v0_1, 4); - v128_t v0_1ls = wasm_i8x16_sub(v0_1l, s8b); - v128_t v0_1hs = wasm_i8x16_sub(v0_1h, s8b); - - // Load y1 vectors - v128_t y1_l = wasm_v128_load(y1->qs); - v128_t y1_h = wasm_v128_load(y1->qs + 16); - - // Extend to i16x8 and compute dot products - v128_t dx1l = wasm_i16x8_extend_low_i8x16(v0_1ls); - v128_t dx1h = wasm_i16x8_extend_high_i8x16(v0_1ls); - v128_t dx1hl = wasm_i16x8_extend_low_i8x16(v0_1hs); - v128_t dx1hh = wasm_i16x8_extend_high_i8x16(v0_1hs); - - v128_t dy1ll = wasm_i16x8_extend_low_i8x16(y1_l); - v128_t dy1lh = wasm_i16x8_extend_high_i8x16(y1_l); - v128_t dy1hl = wasm_i16x8_extend_low_i8x16(y1_h); - v128_t dy1hh = wasm_i16x8_extend_high_i8x16(y1_h); - - v128_t dp1 = wasm_i32x4_add( - wasm_i32x4_add( - wasm_i32x4_dot_i16x8(dx1l, dy1ll), - wasm_i32x4_dot_i16x8(dx1h, dy1lh) - ), - wasm_i32x4_add( - wasm_i32x4_dot_i16x8(dx1hl, dy1hl), - wasm_i32x4_dot_i16x8(dx1hh, dy1hh) - ) - ); - - // Accumulate results with scaling - float scale0 = GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d); - float scale1 = GGML_FP16_TO_FP32(x1->d) * GGML_FP16_TO_FP32(y1->d); - - sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(dp0), wasm_f32x4_splat(scale0))); - sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(dp1), wasm_f32x4_splat(scale1))); - } - - sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + - wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (; ib < nb; ++ib) { - /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); - - __m256i qx = bytes_from_nibbles_32(x[ib].qs); - - // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. - const __m256i off = _mm256_set1_epi8( 8 ); - qx = _mm256_sub_epi8( qx, off ); - - __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); - - const __m256 q = mul_sum_i8_pairs_float(qx, qy); - - /* Multiply q with scale and accumulate */ - acc = _mm256_fmadd_ps( d, q, acc ); - } - - sumf = hsum_float_8(acc); -#elif defined(__AVX__) - __m256 accum = _mm256_setzero_ps(); - for (; ib + 1 < nb; ib += 2) { - const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)x[ib + 0].qs); - const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); - const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs); - const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs + 1); - const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); - const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); - - const __m128i q4b_1_0 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), q4bits_1), _mm_set1_epi8(8)); - const __m128i q4b_1_1 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(q4bits_1, 4)), _mm_set1_epi8(8)); - const __m128i q4b_2_0 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), q4bits_2), _mm_set1_epi8(8)); - const __m128i q4b_2_1 = _mm_sub_epi8(_mm_and_si128(_mm_set1_epi8(15), _mm_srli_epi16(q4bits_2, 4)), _mm_set1_epi8(8)); - - const __m128i p16_1_0 = mul_add_epi8_sse(q4b_1_0, q8b_1_0); - const __m128i p16_1_1 = mul_add_epi8_sse(q4b_1_1, q8b_1_1); - const __m128i p16_2_0 = mul_add_epi8_sse(q4b_2_0, q8b_2_0); - const __m128i p16_2_1 = mul_add_epi8_sse(q4b_2_1, q8b_2_1); - const __m128i p_1 = _mm_add_epi16(p16_1_0, p16_1_1); - const __m128i p_2 = _mm_add_epi16(p16_2_0, p16_2_1); - const __m256 p = sum_i16_pairs_float(p_2, p_1); - - const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); - accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); - } - - sumf = hsum_float_8(accum); -#elif defined(__SSSE3__) - // set constants - const __m128i lowMask = _mm_set1_epi8(0xF); - const __m128i off = _mm_set1_epi8(8); - - // Initialize accumulator with zeros - __m128 acc_0 = _mm_setzero_ps(); - __m128 acc_1 = _mm_setzero_ps(); - __m128 acc_2 = _mm_setzero_ps(); - __m128 acc_3 = _mm_setzero_ps(); - - for (; ib + 1 < nb; ib += 2) { - _mm_prefetch(&x[ib] + sizeof(block_q4_0), _MM_HINT_T0); - _mm_prefetch(&y[ib] + sizeof(block_q8_0), _MM_HINT_T0); - - // Compute combined scale for the block 0 and 1 - const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); - - const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[ib].qs); - - __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1); - __m128i by_0 = _mm_loadu_si128((const __m128i *)y[ib].qs); - bx_0 = _mm_sub_epi8(bx_0, off); - const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); - - __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4)); - __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[ib].qs + 16)); - bx_1 = _mm_sub_epi8(bx_1, off); - const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); - - _mm_prefetch(&x[ib] + 2 * sizeof(block_q4_0), _MM_HINT_T0); - _mm_prefetch(&y[ib] + 2 * sizeof(block_q8_0), _MM_HINT_T0); - - // Compute combined scale for the block 2 and 3 - const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[ib + 1].d) * GGML_FP16_TO_FP32(y[ib + 1].d) ); - - const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); - - __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3); - __m128i by_2 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); - bx_2 = _mm_sub_epi8(bx_2, off); - const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); - - __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4)); - __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[ib + 1].qs + 16)); - bx_3 = _mm_sub_epi8(bx_3, off); - const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); - - // Convert int32_t to float - __m128 p0 = _mm_cvtepi32_ps(i32_0); - __m128 p1 = _mm_cvtepi32_ps(i32_1); - __m128 p2 = _mm_cvtepi32_ps(i32_2); - __m128 p3 = _mm_cvtepi32_ps(i32_3); - - // Apply the scale - __m128 p0_d = _mm_mul_ps( d_0_1, p0 ); - __m128 p1_d = _mm_mul_ps( d_0_1, p1 ); - __m128 p2_d = _mm_mul_ps( d_2_3, p2 ); - __m128 p3_d = _mm_mul_ps( d_2_3, p3 ); - - // Acummulate - acc_0 = _mm_add_ps(p0_d, acc_0); - acc_1 = _mm_add_ps(p1_d, acc_1); - acc_2 = _mm_add_ps(p2_d, acc_2); - acc_3 = _mm_add_ps(p3_d, acc_3); - } - - sumf = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3); -#elif defined(__riscv_v_intrinsic) - size_t vl = qk / 2; - - for (; ib < nb; ++ib) { - // load elements - vuint8m1_t tx = __riscv_vle8_v_u8m1(x[ib].qs, vl); - - vint8m1_t y0 = __riscv_vle8_v_i8m1(y[ib].qs, vl); - vint8m1_t y1 = __riscv_vle8_v_i8m1(y[ib].qs+16, vl); - - // mask and store lower part of x, and then upper part - vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl); - vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl); - - vint8m1_t x_ai = __riscv_vreinterpret_v_u8m1_i8m1(x_a); - vint8m1_t x_li = __riscv_vreinterpret_v_u8m1_i8m1(x_l); - - // subtract offset - vint8m1_t v0 = __riscv_vsub_vx_i8m1(x_ai, 8, vl); - vint8m1_t v1 = __riscv_vsub_vx_i8m1(x_li, 8, vl); - - vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl); - vint16m2_t vec_mul2 = __riscv_vwmacc_vv_i16m2(vec_mul1, v1, y1, vl); - - vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); - vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl); - - int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - - sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); - } - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector signed int v0 = vec_splats((int32_t)0); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - const vector signed char v8 = vec_splats((signed char)0x8); - - vector float vsumf0 = vec_splats(0.0f); - -#pragma GCC unroll 8 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); - vector float vd = vec_mul(vxd, vyd); - - vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); - vector signed char q8y0 = vec_xl( 0, y[ib].qs); - vector signed char q8y1 = vec_xl(16, y[ib].qs); - - vector signed char q4x0 = vec_and(qxs, lowMask); - vector signed char q4x1 = vec_sr(qxs, v4); - - q4x0 = vec_sub(q4x0, v8); - q4x1 = vec_sub(q4x1, v8); - - vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); - - vector signed int vsumi0 = v0; - - vsumi0 = vec_sum4s(qv0, vsumi0); - vsumi0 = vec_sum4s(qv1, vsumi0); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - } - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - sumf = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - // Initialize accumulator with zeros - __m256 acc = (__m256)__lasx_xvldi(0); - - // Main loop - for (; ib < nb; ++ib) { - /* Compute combined scale for the block */ - const __m256 d = __lasx_xvreplfr2vr_s( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); - - __m256i qx = bytes_from_nibbles_32(x[ib].qs); - - // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval. - const __m256i off = __lasx_xvreplgr2vr_b( 8 ); - qx = __lasx_xvsub_b( qx, off ); - - __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); - - const __m256 q = mul_sum_i8_pairs_float(qx, qy); - - /* Multiply q with scale and accumulate */ - acc = __lasx_xvfmadd_s( d, q, acc ); - } - - sumf = hsum_float_8(acc); - -#elif defined(__loongarch_sx) - // set constants - const __m128i low_mask = __lsx_vreplgr2vr_b(0xF); - const __m128i off = __lsx_vreplgr2vr_b(8); - - // Initialize accumulator with zeros - __m128 acc_0 = (__m128)__lsx_vldi(0); - __m128 acc_1 = (__m128)__lsx_vldi(0); - __m128 acc_2 = (__m128)__lsx_vldi(0); - __m128 acc_3 = (__m128)__lsx_vldi(0); - - for (; ib + 1 < nb; ib += 2) { - - // Compute combined scale for the block 0 and 1 - const __m128 d_0_1 = (__m128)__lsx_vreplgr2vr_w( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); - - const __m128i tmp_0_1 = __lsx_vld((const __m128i *)x[ib].qs, 0); - - __m128i bx_0 = __lsx_vand_v(low_mask, tmp_0_1); - __m128i by_0 = __lsx_vld((const __m128i *)y[ib].qs, 0); - bx_0 = __lsx_vsub_b(bx_0, off); - const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0); - - __m128i bx_1 = __lsx_vand_v(low_mask, __lsx_vsrli_d(tmp_0_1, 4)); - __m128i by_1 = __lsx_vld((const __m128i *)(y[ib].qs + 16), 0); - bx_1 = __lsx_vsub_b(bx_1, off); - const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1); - - //_mm_prefetch(&x[ib] + 2 * sizeof(block_q4_0), _MM_HINT_T0); - //_mm_prefetch(&y[ib] + 2 * sizeof(block_q8_0), _MM_HINT_T0); - - // Compute combined scale for the block 2 and 3 - const __m128 d_2_3 = (__m128)__lsx_vreplgr2vr_w( GGML_FP16_TO_FP32(x[ib + 1].d) * GGML_FP16_TO_FP32(y[ib + 1].d) ); - - const __m128i tmp_2_3 = __lsx_vld((const __m128i *)x[ib + 1].qs, 0); - - __m128i bx_2 = __lsx_vand_v(low_mask, tmp_2_3); - __m128i by_2 = __lsx_vld((const __m128i *)y[ib + 1].qs, 0); - bx_2 = __lsx_vsub_b(bx_2, off); - const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2); - - __m128i bx_3 = __lsx_vand_v(low_mask, __lsx_vsrli_d(tmp_2_3, 4)); - __m128i by_3 = __lsx_vld((const __m128i *)(y[ib + 1].qs + 16), 0); - bx_3 = __lsx_vsub_b(bx_3, off); - const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3); - - // Convert int32_t to float - __m128 p0 = __lsx_vffint_s_w(i32_0); - __m128 p1 = __lsx_vffint_s_w(i32_1); - __m128 p2 = __lsx_vffint_s_w(i32_2); - __m128 p3 = __lsx_vffint_s_w(i32_3); - - // Apply the scale - __m128 p0_d = __lsx_vfmul_s( d_0_1, p0 ); - __m128 p1_d = __lsx_vfmul_s( d_0_1, p1 ); - __m128 p2_d = __lsx_vfmul_s( d_2_3, p2 ); - __m128 p3_d = __lsx_vfmul_s( d_2_3, p3 ); - - // Acummulate - acc_0 = __lsx_vfadd_s(p0_d, acc_0); - acc_1 = __lsx_vfadd_s(p1_d, acc_1); - acc_2 = __lsx_vfadd_s(p2_d, acc_2); - acc_3 = __lsx_vfadd_s(p3_d, acc_3); - } - - sumf = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3); -#elif defined(__VXE__) || defined(__VXE2__) - __vector float acc = vec_splats(0.0f); - - const __vector uint8_t v_m = vec_splats((const uint8_t)0x0F); - const __vector int8_t v_s = vec_splats( (const int8_t)0x08); - - for (; ib < nb; ++ib) { - const __vector uint8_t v_x = vec_xl(0, x[ib].qs); - const __vector int8_t v_xl = (const __vector int8_t)(v_x & v_m); - const __vector int8_t v_xh = (const __vector int8_t)(v_x >> 4); - - const __vector int8_t v_xls = vec_sub(v_xl, v_s); - const __vector int8_t v_xhs = vec_sub(v_xh, v_s); - - const __vector int8_t v_yl = vec_xl(0 , y[ib].qs); - const __vector int8_t v_yh = vec_xl(QK8_0/2, y[ib].qs); - - const __vector int16_t v_xylso = vec_mulo(v_xls, v_yl); - const __vector int16_t v_xylse = vec_mule(v_xls, v_yl); - const __vector int16_t v_xyhso = vec_mulo(v_xhs, v_yh); - const __vector int16_t v_xyhse = vec_mule(v_xhs, v_yh); - - __vector int16_t v_xy_ = v_xylso + v_xylse + v_xyhso + v_xyhse; v_xy_ += vec_reve(v_xy_); - - const __vector float v_xy = vec_float(vec_unpackh(v_xy_)); - const __vector float v_d = vec_splats(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); - - acc = vec_madd(v_xy, v_d, acc); - } - - sumf = acc[0] + acc[1] + acc[2] + acc[3]; -#endif - for (; ib < nb; ++ib) { - int sumi0 = 0; - int sumi1 = 0; - - for (int j = 0; j < qk/2; ++j) { - const int v0 = (x[ib].qs[j] & 0x0F) - 8; - const int v1 = (x[ib].qs[j] >> 4) - 8; - - sumi0 += (v0 * y[ib].qs[j]); - sumi1 += (v1 * y[ib].qs[j + qk/2]); - } - - int sumi = sumi0 + sumi1; - sumf += sumi*GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d); - } - - *s = sumf; -} - -void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - const int qk = QK8_1; - const int nb = n / qk; - - assert(n % qk == 0); -#if defined(__ARM_FEATURE_MATMUL_INT8) - assert((nrc == 2) || (nrc == 1)); -#else - assert(nrc == 1); -#endif - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q4_1 * GGML_RESTRICT x = vx; - const block_q8_1 * GGML_RESTRICT y = vy; - -#if defined(__ARM_FEATURE_MATMUL_INT8) - if (nrc == 2) { - const block_q4_1 * GGML_RESTRICT vx0 = vx; - const block_q4_1 * GGML_RESTRICT vx1 = (const block_q4_1 *) ((const uint8_t*)vx + bx); - const block_q8_1 * GGML_RESTRICT vy0 = vy; - const block_q8_1 * GGML_RESTRICT vy1 = (const block_q8_1 *) ((const uint8_t*)vy + by); - - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t summs0 = vdupq_n_f32(0.0f); - - for (int i = 0; i < nb; i++) { - const block_q4_1 * GGML_RESTRICT b_x0 = &vx0[i]; - const block_q4_1 * GGML_RESTRICT b_x1 = &vx1[i]; - const block_q8_1 * GGML_RESTRICT b_y0 = &vy0[i]; - const block_q8_1 * GGML_RESTRICT b_y1 = &vy1[i]; - - float32_t summs_t[4] = { - GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y0->s), - GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y0->s), - GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y1->s), - GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y1->s) - }; - summs0 = vaddq_f32(summs0, vld1q_f32(summs_t)); - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - - const uint8x16_t v0_0 = vld1q_u8(b_x0->qs); - const uint8x16_t v0_1 = vld1q_u8(b_x1->qs); - - // 4-bit -> 8-bit - const int8x16_t x0_l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t x0_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t x1_l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t x1_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // load y - const int8x16_t y0_l = vld1q_s8(b_y0->qs); - const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); - const int8x16_t y1_l = vld1q_s8(b_y1->qs); - const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); - - // mmla into int32x4_t - float32_t _scale[4] = { - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) - }; - float32x4_t scale = vld1q_f32(_scale); - - int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - - int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - - int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - - int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), - l1, r1)), l2, r2)), l3, r3))), scale); - } - - float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); - float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); - - sumv2 = vaddq_f32(sumv2, summs0); - - vst1_f32(s, vget_low_f32 (sumv2)); - vst1_f32(s + bs, vget_high_f32(sumv2)); - - return; - } -#endif - - int ib = 0; - float sumf = 0; - - // TODO: add WASM SIMD -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - float summs = 0; - - for (; ib + 1 < nb; ib += 2) { - const block_q4_1 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q4_1 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_1 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_1 * GGML_RESTRICT y1 = &y[ib + 1]; - - summs += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s) + GGML_FP16_TO_FP32(x1->m) * GGML_FP16_TO_FP32(y1->s); - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - - // dot product into int32x4_t - const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h); - const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs; -#elif defined(__AVX2__) || defined(__AVX__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - float summs = 0; - - // Main loop - for (; ib < nb; ++ib) { - const float d0 = GGML_FP16_TO_FP32(x[ib].d); - const float d1 = GGML_FP16_TO_FP32(y[ib].d); - - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); - - const __m256 d0v = _mm256_set1_ps( d0 ); - const __m256 d1v = _mm256_set1_ps( d1 ); - - // Compute combined scales - const __m256 d0d1 = _mm256_mul_ps( d0v, d1v ); - - // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes - const __m256i qx = bytes_from_nibbles_32(x[ib].qs); - const __m256i qy = _mm256_loadu_si256( (const __m256i *)y[ib].qs ); - - const __m256 xy = mul_sum_us8_pairs_float(qx, qy); - - // Accumulate d0*d1*x*y -#if defined(__AVX2__) - acc = _mm256_fmadd_ps( d0d1, xy, acc ); -#else - acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc ); -#endif - } - - sumf = hsum_float_8(acc) + summs; -#elif defined(__riscv_v_intrinsic) - size_t vl = qk / 2; - - for (; ib < nb; ++ib) { - // load elements - vuint8m1_t tx = __riscv_vle8_v_u8m1(x[ib].qs, vl); - - vint8m1_t y0 = __riscv_vle8_v_i8m1(y[ib].qs, vl); - vint8m1_t y1 = __riscv_vle8_v_i8m1(y[ib].qs+16, vl); - - // mask and store lower part of x, and then upper part - vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl); - vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl); - - vint8m1_t v0 = __riscv_vreinterpret_v_u8m1_i8m1(x_a); - vint8m1_t v1 = __riscv_vreinterpret_v_u8m1_i8m1(x_l); - - vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl); - vint16m2_t vec_mul2 = __riscv_vwmacc_vv_i16m2(vec_mul1, v1, y1, vl); - - vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl); - vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl); - - int sumi = __riscv_vmv_x_s_i32m1_i32(vs2); - - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); - } - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector signed int v0 = vec_splats((int32_t)0); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - -#pragma GCC unroll 4 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); - vector float vd = vec_mul(vxd, vyd); - - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[ib].m)); - vector float vys = {GGML_FP16_TO_FP32(y[ib].s), 0.0f, 0.0f, 0.0f}; - vsumf0 = vec_madd(vxmin, vys, vsumf0); - - vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); - vector signed char q8y0 = vec_xl( 0, y[ib].qs); - vector signed char q8y1 = vec_xl(16, y[ib].qs); - - vector unsigned char q4x0 = (vector unsigned char)vec_and(qxs, lowMask); - vector unsigned char q4x1 = (vector unsigned char)vec_sr(qxs, v4); - - vector signed int vsumi0 = v0; - - vsumi0 = vec_msum(q8y0, q4x0, vsumi0); - vsumi0 = vec_msum(q8y1, q4x1, vsumi0); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - } - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - sumf = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - // Initialize accumulator with zeros - __m256 acc = (__m256)__lasx_xvldi(0); - - float summs = 0; - - // Main loop - for (; ib < nb; ++ib) { - const float d0 = GGML_FP16_TO_FP32(x[ib].d); - const float d1 = GGML_FP16_TO_FP32(y[ib].d); - - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); - - const __m256 d0v = __lasx_xvreplfr2vr_s( d0 ); - const __m256 d1v = __lasx_xvreplfr2vr_s( d1 ); - - // Compute combined scales - const __m256 d0d1 = __lasx_xvfmul_s( d0v, d1v ); - - // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes - const __m256i qx = bytes_from_nibbles_32(x[ib].qs); - const __m256i qy = __lasx_xvld( (const __m256i *)y[ib].qs, 0); - - const __m256 xy = mul_sum_us8_pairs_float(qx, qy); - - // Accumulate d0*d1*x*y - acc = __lasx_xvfmadd_s( d0d1, xy, acc ); - } - - sumf = hsum_float_8(acc) + summs; -#elif defined(__VXE__) || defined(__VXE2__) - float summs = 0; - float32x4_t acc = vec_splats(0.0f); - - const uint8x16_t v_m = vec_splat_u8(0x0F); - -#pragma GCC unroll 4 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); - - const uint8x16_t v_x = vec_xl(0, x[ib].qs); - const int8x16_t v_xl = (const int8x16_t)(v_x & v_m); - const int8x16_t v_xh = (const int8x16_t)(v_x >> 4); - - const int8x16_t v_yl = vec_xl(0 , y[ib].qs); - const int8x16_t v_yh = vec_xl(QK8_1/2, y[ib].qs); - - const int32x4_t v_xy_ = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); - const float32x4_t v_xy = vec_float(v_xy_); - - const float32x4_t v_d = vec_splats(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); - - acc = vec_madd(v_xy, v_d, acc); - } - - sumf = acc[0] + acc[1] + acc[2] + acc[3] + summs; -#endif - for (; ib < nb; ++ib) { - int sumi0 = 0; - int sumi1 = 0; - - for (int j = 0; j < qk/2; ++j) { - const int v0 = (x[ib].qs[j] & 0x0F); - const int v1 = (x[ib].qs[j] >> 4); - - sumi0 += (v0 * y[ib].qs[j]); - sumi1 += (v1 * y[ib].qs[j + qk/2]); - } - - int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); - } - - *s = sumf; -} - -void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - const int qk = QK8_0; - const int nb = n / qk; - - int ib = 0; - float sumf = 0; - - assert(n % qk == 0); - assert(qk == QK5_0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q5_0 * GGML_RESTRICT x = vx; - const block_q8_0 * GGML_RESTRICT y = vy; - -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - uint32_t qh0; - uint32_t qh1; - - uint64_t tmp0[4]; - uint64_t tmp1[4]; - - for (; ib + 1 < nb; ib += 2) { - const block_q5_0 * GGML_RESTRICT x0 = &x[ib]; - const block_q5_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - - // extract the 5th bit via lookup table ((!b) << 4) - memcpy(&qh0, x0->qh, sizeof(qh0)); - memcpy(&qh1, x1->qh, sizeof(qh1)); - - tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF]; - tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF]; - tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF]; - tmp0[3] = table_b2b_1[(qh0 >> 24) ]; - - tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF]; - tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF]; - tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF]; - tmp1[3] = table_b2b_1[(qh1 >> 24) ]; - - const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); - const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); - const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); - const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) - const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0); - const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0); - const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1); - const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( - ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), - ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( - ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), - ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); -#elif defined __wasm_simd128__ - v128_t sumv = wasm_f32x4_splat(0.0f); - - uint32_t qh_; - uint64_t tmp[4]; - - // TODO: check if unrolling this is better - for (; ib < nb; ++ib) { - const block_q5_0 * GGML_RESTRICT x0 = &x[ib]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; - - const v128_t m4b = wasm_i8x16_splat(0x0F); - - // extract the 5th bit - memcpy(&qh_, x0->qh, sizeof(qh_)); - - tmp[0] = table_b2b_1[(qh_ >> 0) & 0xFF]; - tmp[1] = table_b2b_1[(qh_ >> 8) & 0xFF]; - tmp[2] = table_b2b_1[(qh_ >> 16) & 0xFF]; - tmp[3] = table_b2b_1[(qh_ >> 24) ]; - - const v128_t qhl = wasm_v128_load(tmp + 0); - const v128_t qhh = wasm_v128_load(tmp + 2); - - const v128_t v0 = wasm_v128_load(x0->qs); - - // 4-bit -> 8-bit - const v128_t v0l = wasm_v128_and (v0, m4b); - const v128_t v0h = wasm_u8x16_shr(v0, 4); - - // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero) - const v128_t v0lf = wasm_i8x16_sub(v0l, qhl); - const v128_t v0hf = wasm_i8x16_sub(v0h, qhh); - - // load y - const v128_t v1l = wasm_v128_load(y0->qs); - const v128_t v1h = wasm_v128_load(y0->qs + 16); - - // int8x16 -> int16x8 - const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf); - const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf); - const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf); - const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf); - - const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l); - const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l); - const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h); - const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h); - - // dot product - sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4( - wasm_i32x4_add( - wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll), - wasm_i32x4_dot_i16x8(v0lfh, v1lh)), - wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), - wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), - wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d)))); - } - - sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + - wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (; ib < nb; ++ib) { - /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); - - __m256i qx = bytes_from_nibbles_32(x[ib].qs); - __m256i bxhi = bytes_from_bits_32(x[ib].qh); - bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0)); - qx = _mm256_or_si256(qx, bxhi); - - __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); - - const __m256 q = mul_sum_i8_pairs_float(qx, qy); - - /* Multiply q with scale and accumulate */ - acc = _mm256_fmadd_ps(d, q, acc); - } - - sumf = hsum_float_8(acc); -#elif defined(__AVX__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - __m128i mask = _mm_set1_epi8((char)0xF0); - - // Main loop - for (; ib < nb; ++ib) { - /* Compute combined scale for the block */ - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); - - __m256i bx_0 = bytes_from_nibbles_32(x[ib].qs); - const __m256i bxhi = bytes_from_bits_32(x[ib].qh); - __m128i bxhil = _mm256_castsi256_si128(bxhi); - __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); - bxhil = _mm_andnot_si128(bxhil, mask); - bxhih = _mm_andnot_si128(bxhih, mask); - __m128i bxl = _mm256_castsi256_si128(bx_0); - __m128i bxh = _mm256_extractf128_si256(bx_0, 1); - bxl = _mm_or_si128(bxl, bxhil); - bxh = _mm_or_si128(bxh, bxhih); - bx_0 = MM256_SET_M128I(bxh, bxl); - - const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[ib].qs); - - const __m256 q = mul_sum_i8_pairs_float(bx_0, by_0); - - /* Multiply q with scale and accumulate */ - acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc); - } - - sumf = hsum_float_8(acc); -#elif defined(__riscv_v_intrinsic) - size_t vl; - size_t vlenb = __riscv_vlenb(); - - for (; ib < nb; ++ib) { - vl = qk / 2; - vuint8m1_t v0 = __riscv_vle8_v_u8m1(x[ib].qs, vl); - vint8m1_t v0l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(v0, 0x0F, vl)); - vint8m1_t v0h = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(v0, 4, vl)); - vint8m2_t v0c; - if (vlenb == 16) { - v0c = __riscv_vcreate_v_i8m1_i8m2(v0l, v0h); - } else { - v0l = __riscv_vslideup_vx_i8m1(v0l, v0h, 16, 32); - v0c = __riscv_vlmul_ext_v_i8m1_i8m2(v0l); - } - - vl = qk; - vbool4_t qh = __riscv_vlm_v_b4(x[ib].qh, vl); - qh = __riscv_vmnand_mm_b4(qh, qh, vl); - vint8m2_t v0f = __riscv_vsub_vx_i8m2_mu(qh, v0c, v0c, 0x10, vl); - vint8m2_t v1 = __riscv_vle8_v_i8m2(y[ib].qs, vl); - vint16m4_t mul = __riscv_vwmul_vv_i16m4(v0f, v1, vl); - vint32m1_t zero = __riscv_vmv_v_x_i32m1(0, vl); - vint32m1_t sum = __riscv_vwredsum_vs_i16m4_i32m1(mul, zero, vl); - int32_t sumi = __riscv_vmv_x_s_i32m1_i32(sum); - - sumf += (GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)) * sumi; - } - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector unsigned char v4 = vec_splats((unsigned char)4); - - vector float vsumf0 = vec_splats(0.0f); - -#pragma GCC unroll 4 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); - vector float vd = vec_mul(vxd, vyd); - - vector signed long long aux64x2_0 = {(uint64_t)(table_b2b_1[x[ib].qh[0]]), (uint64_t)(table_b2b_1[x[ib].qh[1]])}; - vector signed long long aux64x2_1 = {(uint64_t)(table_b2b_1[x[ib].qh[2]]), (uint64_t)(table_b2b_1[x[ib].qh[3]])}; - - vector signed char qh0 = (vector signed char)aux64x2_0; - vector signed char qh1 = (vector signed char)aux64x2_1; - - vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); - - vector signed char q5x0 = vec_sub(vec_and (qxs, lowMask), qh0); - vector signed char q5x1 = vec_sub(vec_sr(qxs, v4), qh1); - - vector signed char q8y0 = vec_xl( 0, y[ib].qs); - vector signed char q8y1 = vec_xl( 16, y[ib].qs); - - vector signed short qv0 = vec_add(vec_mule(q5x0, q8y0), vec_mulo(q5x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q5x1, q8y1), vec_mulo(q5x1, q8y1)); - - qv0 = vec_add(qv0, qv1); - - vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - } - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - sumf = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - // Initialize accumulator with zeros - __m256 acc = (__m256)__lasx_xvldi(0); - - // Main loop - for (; ib < nb; ++ib) { - /* Compute combined scale for the block */ - const __m256 d = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); //FIXME - - __m256i qx = bytes_from_nibbles_32(x[ib].qs); - __m256i bxhi = bytes_from_bits_32(x[ib].qh); - bxhi = __lasx_xvandn_v(bxhi, __lasx_xvreplgr2vr_b((char)0xF0)); - qx = __lasx_xvor_v(qx, bxhi); - - __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); - - const __m256 q = mul_sum_i8_pairs_float(qx, qy); - - /* Multiply q with scale and accumulate */ - acc = __lasx_xvfmadd_s(d, q, acc); - } - - sumf = hsum_float_8(acc); -#endif - for (; ib < nb; ++ib) { - uint32_t qh; - memcpy(&qh, x[ib].qh, sizeof(qh)); - - int sumi0 = 0; - int sumi1 = 0; - - for (int j = 0; j < qk/2; ++j) { - const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; - const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); - - const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16); - const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16); - - sumi0 += (x0 * y[ib].qs[j]); - sumi1 += (x1 * y[ib].qs[j + qk/2]); - } - - int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)) * sumi; - } - - *s = sumf; -} - -void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - const int qk = QK8_1; - const int nb = n / qk; - - int ib = 0; - float sumf = 0; - - assert(n % qk == 0); - assert(qk == QK5_1); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q5_1 * GGML_RESTRICT x = vx; - const block_q8_1 * GGML_RESTRICT y = vy; - -#if defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - float summs0 = 0.0f; - float summs1 = 0.0f; - - uint32_t qh0; - uint32_t qh1; - - uint64_t tmp0[4]; - uint64_t tmp1[4]; - - for (; ib + 1 < nb; ib += 2) { - const block_q5_1 * GGML_RESTRICT x0 = &x[ib]; - const block_q5_1 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_1 * GGML_RESTRICT y0 = &y[ib]; - const block_q8_1 * GGML_RESTRICT y1 = &y[ib + 1]; - - const uint8x16_t m4b = vdupq_n_u8(0x0F); - - summs0 += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s); - summs1 += GGML_FP16_TO_FP32(x1->m) * GGML_FP16_TO_FP32(y1->s); - - // extract the 5th bit via lookup table ((b) << 4) - memcpy(&qh0, x0->qh, sizeof(qh0)); - memcpy(&qh1, x1->qh, sizeof(qh1)); - - tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF]; - tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF]; - tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF]; - tmp0[3] = table_b2b_0[(qh0 >> 24) ]; - - tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF]; - tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF]; - tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF]; - tmp1[3] = table_b2b_0[(qh1 >> 24) ]; - - const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0)); - const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2)); - const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0)); - const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2)); - - const uint8x16_t v0_0 = vld1q_u8(x0->qs); - const uint8x16_t v0_1 = vld1q_u8(x1->qs); - - // 4-bit -> 8-bit - const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b)); - const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); - const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b)); - const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4)); - - // add high bit - const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0); - const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0); - const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1); - const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1); - - // load y - const int8x16_t v1_0l = vld1q_s8(y0->qs); - const int8x16_t v1_0h = vld1q_s8(y0->qs + 16); - const int8x16_t v1_1l = vld1q_s8(y1->qs); - const int8x16_t v1_1h = vld1q_s8(y1->qs + 16); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( - ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l), - ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( - ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l), - ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1; -#elif defined __wasm_simd128__ - v128_t sumv = wasm_f32x4_splat(0.0f); - - float summs = 0.0f; - - uint32_t qh_; - uint64_t tmp[4]; - - // TODO: check if unrolling this is better - for (; ib < nb; ++ib) { - const block_q5_1 * GGML_RESTRICT x0 = &x[ib]; - const block_q8_1 * GGML_RESTRICT y0 = &y[ib]; - - summs += GGML_FP16_TO_FP32(x0->m) * GGML_FP16_TO_FP32(y0->s); - - const v128_t m4b = wasm_i8x16_splat(0x0F); - - // extract the 5th bit - memcpy(&qh_, x0->qh, sizeof(qh_)); - - tmp[0] = table_b2b_0[(qh_ >> 0) & 0xFF]; - tmp[1] = table_b2b_0[(qh_ >> 8) & 0xFF]; - tmp[2] = table_b2b_0[(qh_ >> 16) & 0xFF]; - tmp[3] = table_b2b_0[(qh_ >> 24) ]; - - const v128_t qhl = wasm_v128_load(tmp + 0); - const v128_t qhh = wasm_v128_load(tmp + 2); - - const v128_t v0 = wasm_v128_load(x0->qs); - - // 4-bit -> 8-bit - const v128_t v0l = wasm_v128_and (v0, m4b); - const v128_t v0h = wasm_u8x16_shr(v0, 4); - - // add high bit - const v128_t v0lf = wasm_v128_or(v0l, qhl); - const v128_t v0hf = wasm_v128_or(v0h, qhh); - - // load y - const v128_t v1l = wasm_v128_load(y0->qs); - const v128_t v1h = wasm_v128_load(y0->qs + 16); - - // int8x16 -> int16x8 - const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf); - const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf); - const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf); - const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf); - - const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l); - const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l); - const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h); - const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h); - - // dot product - sumv = wasm_f32x4_add(sumv, - wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add( - wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll), - wasm_i32x4_dot_i16x8(v0lfh, v1lh)), - wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl), - wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), - wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d)))); - } - - sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + - wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs; -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - float summs = 0.0f; - - // Main loop - for (; ib < nb; ++ib) { - const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d)); - - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); - - __m256i qx = bytes_from_nibbles_32(x[ib].qs); - __m256i bxhi = bytes_from_bits_32(x[ib].qh); - bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10)); - qx = _mm256_or_si256(qx, bxhi); - - const __m256 dy = _mm256_set1_ps(GGML_FP16_TO_FP32(y[ib].d)); - const __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); - - const __m256 q = mul_sum_us8_pairs_float(qx, qy); - - acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc); - } - - sumf = hsum_float_8(acc) + summs; -#elif defined(__AVX__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - __m128i mask = _mm_set1_epi8(0x10); - - float summs = 0.0f; - - // Main loop - for (; ib < nb; ++ib) { - const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d)); - - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); - - __m256i bx_0 = bytes_from_nibbles_32(x[ib].qs); - const __m256i bxhi = bytes_from_bits_32(x[ib].qh); - __m128i bxhil = _mm256_castsi256_si128(bxhi); - __m128i bxhih = _mm256_extractf128_si256(bxhi, 1); - bxhil = _mm_and_si128(bxhil, mask); - bxhih = _mm_and_si128(bxhih, mask); - __m128i bxl = _mm256_castsi256_si128(bx_0); - __m128i bxh = _mm256_extractf128_si256(bx_0, 1); - bxl = _mm_or_si128(bxl, bxhil); - bxh = _mm_or_si128(bxh, bxhih); - bx_0 = MM256_SET_M128I(bxh, bxl); - - const __m256 dy = _mm256_set1_ps(GGML_FP16_TO_FP32(y[ib].d)); - const __m256i by_0 = _mm256_loadu_si256((const __m256i *)y[ib].qs); - - const __m256 q = mul_sum_us8_pairs_float(bx_0, by_0); - - acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc); - } - - sumf = hsum_float_8(acc) + summs; -#elif defined(__riscv_v_intrinsic) - size_t vl; - size_t vlenb = __riscv_vlenb(); - - for (; ib < nb; ++ib) { - vl = qk / 2; - vuint8m1_t v0 = __riscv_vle8_v_u8m1(x[ib].qs, vl); - vint8m1_t v0l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(v0, 0x0F, vl)); - vint8m1_t v0h = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(v0, 4, vl)); - vint8m2_t v0c; - if (vlenb == 16) { - v0c = __riscv_vcreate_v_i8m1_i8m2(v0l, v0h); - } else { - v0l = __riscv_vslideup_vx_i8m1(v0l, v0h, 16, 32); - v0c = __riscv_vlmul_ext_v_i8m1_i8m2(v0l); - } - - vl = qk; - vbool4_t qh = __riscv_vlm_v_b4(x[ib].qh, vl); - vint8m2_t v0f = __riscv_vor_vx_i8m2_mu(qh, v0c, v0c, 0x10, vl); - vint8m2_t v1 = __riscv_vle8_v_i8m2(y[ib].qs, vl); - vint16m4_t mul = __riscv_vwmul_vv_i16m4(v0f, v1, vl); - vint32m1_t zero = __riscv_vmv_v_x_i32m1(0, vl); - vint32m1_t sum = __riscv_vwredsum_vs_i16m4_i32m1(mul, zero, vl); - int32_t sumi = __riscv_vmv_x_s_i32m1_i32(sum); - - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); - } - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector signed int v0 = vec_splats((int32_t)0); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - -#pragma GCC unroll 4 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); - vector float vd = vec_mul(vxd, vyd); - - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[ib].m)); - vector float vys = {GGML_FP16_TO_FP32(y[ib].s), 0.f, 0.f, 0.f}; - vsumf0 = vec_madd(vxmin, vys, vsumf0); - - vector unsigned long long aux64x2_0 = {(uint64_t)(table_b2b_0[x[ib].qh[0]]), (uint64_t)(table_b2b_0[x[ib].qh[1]])}; - vector unsigned long long aux64x2_1 = {(uint64_t)(table_b2b_0[x[ib].qh[2]]), (uint64_t)(table_b2b_0[x[ib].qh[3]])}; - - vector signed char qh0 = (vector signed char)aux64x2_0; - vector signed char qh1 = (vector signed char)aux64x2_1; - - vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); - - vector unsigned char q5x0 = (vector unsigned char)vec_or(vec_and(qxs, lowMask), qh0); - vector unsigned char q5x1 = (vector unsigned char)vec_or(vec_sr(qxs, v4), qh1); - - vector signed char q8y0 = vec_xl( 0, y[ib].qs); - vector signed char q8y1 = vec_xl( 16, y[ib].qs); - - vector signed int vsumi0 = v0; - - vsumi0 = vec_msum(q8y0, q5x0, vsumi0); - vsumi0 = vec_msum(q8y1, q5x1, vsumi0); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - } - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - sumf = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - // Initialize accumulator with zeros - __m256 acc = (__m256)__lasx_xvldi(0); - - float summs = 0.0f; - - // Main loop - for (; ib < nb; ++ib) { - const __m256 dx = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ib].d)); - - summs += GGML_FP16_TO_FP32(x[ib].m) * GGML_FP16_TO_FP32(y[ib].s); - - __m256i qx = bytes_from_nibbles_32(x[ib].qs); - __m256i bxhi = bytes_from_bits_32(x[ib].qh); - bxhi = __lasx_xvand_v(bxhi, __lasx_xvreplgr2vr_b(0x10)); - qx = __lasx_xvor_v(qx, bxhi); - - const __m256 dy = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(y[ib].d)); - const __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); - - const __m256 q = mul_sum_us8_pairs_float(qx, qy); - - acc = __lasx_xvfmadd_s(q, __lasx_xvfmul_s(dx, dy), acc); - } - - sumf = hsum_float_8(acc) + summs; -#endif - for (; ib < nb; ++ib) { - uint32_t qh; - memcpy(&qh, x[ib].qh, sizeof(qh)); - - int sumi0 = 0; - int sumi1 = 0; - - for (int j = 0; j < qk/2; ++j) { - const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; - const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; - - const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0; - const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1; - - sumi0 += (x0 * y[ib].qs[j]); - sumi1 += (x1 * y[ib].qs[j + qk/2]); - } - - int sumi = sumi0 + sumi1; - sumf += (GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d))*sumi + GGML_FP16_TO_FP32(x[ib].m)*GGML_FP16_TO_FP32(y[ib].s); - } - - *s = sumf; -} - -void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - const int qk = QK8_0; - const int nb = n / qk; - - assert(n % qk == 0); -#if defined(__ARM_FEATURE_MATMUL_INT8) - assert((nrc == 2) || (nrc == 1)); -#else - assert(nrc == 1); -#endif - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q8_0 * GGML_RESTRICT x = vx; - const block_q8_0 * GGML_RESTRICT y = vy; - -#if defined(__ARM_FEATURE_MATMUL_INT8) - if (nrc == 2) { - const block_q8_0 * GGML_RESTRICT vx0 = vx; - const block_q8_0 * GGML_RESTRICT vx1 = (const block_q8_0 *) ((const uint8_t*)vx + bx); - const block_q8_0 * GGML_RESTRICT vy0 = vy; - const block_q8_0 * GGML_RESTRICT vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by); - - float32x4_t sumv0 = vdupq_n_f32(0.0f); - - for (int i = 0; i < nb; i++) { - const block_q8_0 * GGML_RESTRICT b_x0 = &vx0[i]; - const block_q8_0 * GGML_RESTRICT b_y0 = &vy0[i]; - - const block_q8_0 * GGML_RESTRICT b_x1 = &vx1[i]; - const block_q8_0 * GGML_RESTRICT b_y1 = &vy1[i]; - - const int8x16_t x0_l = vld1q_s8(b_x0->qs); - const int8x16_t x0_h = vld1q_s8(b_x0->qs + 16); - const int8x16_t x1_l = vld1q_s8(b_x1->qs); - const int8x16_t x1_h = vld1q_s8(b_x1->qs + 16); - - // load y - const int8x16_t y0_l = vld1q_s8(b_y0->qs); - const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16); - const int8x16_t y1_l = vld1q_s8(b_y1->qs); - const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); - - float32_t _scale[4] = { - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) - }; - float32x4_t scale = vld1q_f32(_scale); - - int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); - - int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h))); - - int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l))); - - int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); - - sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), - l1, r1)), l2, r2)), l3, r3))), scale); - } - - float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); - float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); - - vst1_f32(s, vget_low_f32 (sumv2)); - vst1_f32(s + bs, vget_high_f32(sumv2)); - - return; - } -#endif - - int ib = 0; - float sumf = 0; - -#if defined(__ARM_FEATURE_SVE) - svfloat32_t sumv0 = svdup_n_f32(0.0f); - svfloat32_t sumv1 = svdup_n_f32(0.0f); - - const int vector_length = ggml_cpu_get_sve_cnt()*8; - - //VLA Implemenation for SVE - switch (vector_length) { - case 128: - { - // predicate for activating lanes for 16 Int8 elements - const svbool_t ph16 = svptrue_pat_b8 (SV_VL16); - const svbool_t pl16 = svptrue_pat_b32(SV_VL4); - - for (; ib + 1 < nb; ib += 2) { - const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - // load x - const svint8_t qx0_0 = svld1_s8(ph16, x0->qs); - const svint8_t qx0_1 = svld1_s8(ph16, x0->qs+16); - const svint8_t qx1_0 = svld1_s8(ph16, x1->qs); - const svint8_t qx1_1 = svld1_s8(ph16, x1->qs+16); - - // load y - const svint8_t qy0_0 = svld1_s8(ph16, y0->qs); - const svint8_t qy0_1 = svld1_s8(ph16, y0->qs+16); - const svint8_t qy1_0 = svld1_s8(ph16, y1->qs); - const svint8_t qy1_1 = svld1_s8(ph16, y1->qs+16); - - sumv0 = svmla_n_f32_x(pl16, sumv0, svcvt_f32_s32_x(pl16, svadd_x(pl16, - svdot_s32(svdup_n_s32(0), qx0_0, qy0_0), - svdot_s32(svdup_n_s32(0), qx0_1, qy0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = svmla_n_f32_x(pl16, sumv1, svcvt_f32_s32_x(pl16, svadd_x(pl16, - svdot_s32(svdup_n_s32(0), qx1_0, qy1_0), - svdot_s32(svdup_n_s32(0), qx1_1, qy1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = svaddv_f32(pl16, svadd_f32_x(pl16, sumv0, sumv1)); - } break; - case 256: - { - //printf("sve256"); - for (; ib + 1 < nb; ib += 2) { - const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - // load x - const svint8_t qx0 = svld1_s8(svptrue_b8(), x0->qs); - const svint8_t qx1 = svld1_s8(svptrue_b8(), x1->qs); - - // load y - const svint8_t qy0 = svld1_s8(svptrue_b8(), y0->qs); - const svint8_t qy1 = svld1_s8(svptrue_b8(), y1->qs); - - sumv0 = svmla_n_f32_x(svptrue_b32(), sumv0, svcvt_f32_s32_x(svptrue_b32(), - svdot_s32(svdup_n_s32(0), qx0, qy0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - sumv1 = svmla_n_f32_x(svptrue_b32(), sumv1, svcvt_f32_s32_x(svptrue_b32(), - svdot_s32(svdup_n_s32(0), qx1, qy1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1)); - } break; - case 512: - { - // predicate for activating high 256 bit - const svbool_t ph32 = svptrue_pat_b8(SV_VL32); - // predicate for activating low 256 bit - const svbool_t pl32 = svnot_b_z(svptrue_b8(), ph32); - - // predicate for activating high lanes for 8 float32 elements - const svbool_t ph8 = svptrue_pat_b32(SV_VL8); - // predicate for activating low lanes for 8 float32 elements - const svbool_t pl8 = svnot_b_z(svptrue_b32(), ph8); - - svfloat32_t sumv00 = svdup_n_f32(0.0f); - - for (; ib + 1 < nb; ib += 2) { - const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - //load 32 int8_t in first half of vector and put another 32 int8_t in second vector lower bits - // and add them to make one 64 element vector - // load x - const svint8_t qx_32 = svld1_s8(ph32, x0->qs); - svint8_t qx_64 = svld1_s8(pl32, x0->qs + 2); - - qx_64 = svadd_s8_x(svptrue_b8(), qx_32, qx_64); - - // load y - const svint8_t qy_32 = svld1_s8(ph32, y0->qs); - svint8_t qy_64 = svld1_s8(pl32, y0->qs + 2); - - qy_64 = svadd_s8_x(svptrue_b8(), qy_32, qy_64); - - // scale creation - const float32_t deq1 = GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d); - const float32_t deq2 = GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d); - - // duplicate deq1 in first half of vector and deq2 in second half of vector - const svfloat32_t temp = svdup_f32_m(svdup_f32_z(ph8, deq1), pl8, deq2); - - const svfloat32_t sumvt = svcvt_f32_s32_x(svptrue_b32(), svdot_s32(svdup_n_s32(0), qx_64, qy_64)); - - sumv00 = svmla_f32_m(svptrue_b32(), sumv00, sumvt, temp); - } - - sumf = svaddv_f32(svptrue_b32(), sumv00); - break; - } - default: - assert(false && "Unsupported vector length"); - break; - } -#elif defined(__ARM_NEON) - float32x4_t sumv0 = vdupq_n_f32(0.0f); - float32x4_t sumv1 = vdupq_n_f32(0.0f); - - for (; ib + 1 < nb; ib += 2) { - const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0]; - const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0]; - const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1]; - - const int8x16_t x0_0 = vld1q_s8(x0->qs); - const int8x16_t x0_1 = vld1q_s8(x0->qs + 16); - const int8x16_t x1_0 = vld1q_s8(x1->qs); - const int8x16_t x1_1 = vld1q_s8(x1->qs + 16); - - // load y - const int8x16_t y0_0 = vld1q_s8(y0->qs); - const int8x16_t y0_1 = vld1q_s8(y0->qs + 16); - const int8x16_t y1_0 = vld1q_s8(y1->qs); - const int8x16_t y1_1 = vld1q_s8(y1->qs + 16); - - sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32( - ggml_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0), - ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d)); - - sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32( - ggml_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0), - ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d)); - } - - sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1); -#elif defined __wasm_simd128__ - v128_t sumv = wasm_f32x4_splat(0.0f); - - for (; ib < nb; ++ib) { - const block_q8_0 * GGML_RESTRICT x0 = &x[ib]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; - - const v128_t x0_0 = wasm_v128_load(x0->qs); - const v128_t x0_1 = wasm_v128_load(x0->qs + 16); - const v128_t y0_0 = wasm_v128_load(y0->qs); - const v128_t y0_1 = wasm_v128_load(y0->qs + 16); - - // Extend 8-bit to 16-bit - const v128_t x0_0l = wasm_i16x8_extend_low_i8x16(x0_0); - const v128_t x0_0h = wasm_i16x8_extend_high_i8x16(x0_0); - const v128_t x0_1l = wasm_i16x8_extend_low_i8x16(x0_1); - const v128_t x0_1h = wasm_i16x8_extend_high_i8x16(x0_1); - - const v128_t y0_0l = wasm_i16x8_extend_low_i8x16(y0_0); - const v128_t y0_0h = wasm_i16x8_extend_high_i8x16(y0_0); - const v128_t y0_1l = wasm_i16x8_extend_low_i8x16(y0_1); - const v128_t y0_1h = wasm_i16x8_extend_high_i8x16(y0_1); - - // Compute dot products - const v128_t dx0_0 = wasm_i32x4_dot_i16x8(x0_0l, y0_0l); - const v128_t dx0_1 = wasm_i32x4_dot_i16x8(x0_0h, y0_0h); - const v128_t dx1_0 = wasm_i32x4_dot_i16x8(x0_1l, y0_1l); - const v128_t dx1_1 = wasm_i32x4_dot_i16x8(x0_1h, y0_1h); - - // Sum all dot products - const v128_t sum_dots = wasm_i32x4_add(wasm_i32x4_add(dx0_0, dx0_1), wasm_i32x4_add(dx1_0, dx1_1)); - - // Convert to float and accumulate - const float scale = GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d); - sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(sum_dots), wasm_f32x4_splat(scale))); - } - - sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) + - wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3); -#elif defined(__AVX2__) - // Initialize accumulator with zeros - __m256 acc = _mm256_setzero_ps(); - - // Main loop - for (; ib < nb; ++ib) { - // Compute combined scale for the block - const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); - __m256i qx = _mm256_loadu_si256((const __m256i *)x[ib].qs); - __m256i qy = _mm256_loadu_si256((const __m256i *)y[ib].qs); - - const __m256 q = mul_sum_i8_pairs_float(qx, qy); - - // Multiply q with scale and accumulate - acc = _mm256_fmadd_ps( d, q, acc ); - } - - sumf = hsum_float_8(acc); -#elif defined(__AVX__) - __m256 accum = _mm256_setzero_ps(); - - for (; ib + 1 < nb; ib += 2) { - const __m128i qx_1_0 = _mm_loadu_si128((const __m128i *)x[ib].qs); - const __m128i qx_1_1 = _mm_loadu_si128((const __m128i *)x[ib].qs + 1); - const __m128i qx_2_0 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); - const __m128i qx_2_1 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs + 1); - const __m128i qy_1_0 = _mm_loadu_si128((const __m128i *)y[ib].qs); - const __m128i qy_1_1 = _mm_loadu_si128((const __m128i *)y[ib].qs + 1); - const __m128i qy_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); - const __m128i qy_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); - - const __m256 p = mul_sum_i8_quad_float(qx_1_0, qx_1_1, qx_2_0, qx_2_1, qy_1_0, qy_1_1, qy_2_0, qy_2_1); - const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); - accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); - } - - sumf = hsum_float_8(accum); -#elif defined(__riscv_v_intrinsic) - size_t vl = qk; - - for (; ib < nb; ++ib) { - // load elements - vint8m2_t bx_0 = __riscv_vle8_v_i8m2(x[ib].qs, vl); - vint8m2_t by_0 = __riscv_vle8_v_i8m2(y[ib].qs, vl); - - vint16m4_t vw_mul = __riscv_vwmul_vv_i16m4(bx_0, by_0, vl); - - vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl); - vint32m1_t v_sum = __riscv_vwredsum_vs_i16m4_i32m1(vw_mul, v_zero, vl); - - int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum); - - sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); - } -#elif defined(__POWER9_VECTOR__) - const vector signed int v0 = vec_splats((int32_t)0); - vector float vsumf0 = vec_splats(0.0f); - -#pragma GCC unroll 8 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); - vector float vd = vec_mul(vxd, vyd); - - vector signed char q8x0 = vec_xl( 0, x[ib].qs); - vector signed char q8x1 = vec_xl(16, x[ib].qs); - vector signed char q8y0 = vec_xl( 0, y[ib].qs); - vector signed char q8y1 = vec_xl(16, y[ib].qs); - - vector signed short qv0 = vec_mule(q8x0, q8y0); - vector signed short qv1 = vec_mulo(q8x0, q8y0); - vector signed short qv2 = vec_mule(q8x1, q8y1); - vector signed short qv3 = vec_mulo(q8x1, q8y1); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - - vsumi0 = vec_sum4s(qv0, vsumi0); - vsumi1 = vec_sum4s(qv1, vsumi1); - vsumi0 = vec_sum4s(qv2, vsumi0); - vsumi1 = vec_sum4s(qv3, vsumi1); - - vsumi0 = vec_add(vsumi0, vsumi1); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - } - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - sumf = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - // Initialize accumulator with zeros - __m256 acc = (__m256)__lasx_xvldi(0); - - // Main loop - for (; ib < nb; ++ib) { - // Compute combined scale for the block - const __m256 d = __lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); - __m256i qx = __lasx_xvld((const __m256i *)x[ib].qs, 0); - __m256i qy = __lasx_xvld((const __m256i *)y[ib].qs, 0); - - const __m256 q = mul_sum_i8_pairs_float(qx, qy); - - // Multiply q with scale and accumulate - acc = __lasx_xvfmadd_s( d, q, acc ); - } - - sumf = hsum_float_8(acc); -#elif defined(__VXE__) || defined(__VXE2__) - __vector float acc = vec_splats(0.0f); - -#pragma GCC unroll 8 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - const int8x16_t v_xl = vec_xl(0 , x[ib].qs); - const int8x16_t v_xh = vec_xl(QK8_0/2, x[ib].qs); - const int8x16_t v_yl = vec_xl(0 , y[ib].qs); - const int8x16_t v_yh = vec_xl(QK8_0/2, y[ib].qs); - - const int32x4_t v_xy_ = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); - const float32x4_t v_xy = vec_float(v_xy_); - const float32x4_t v_d = vec_splats(GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d)); - - acc = vec_madd(v_xy, v_d, acc); - } - - sumf = acc[0] + acc[1] + acc[2] + acc[3]; -#endif - for (; ib < nb; ++ib) { - int sumi = 0; - - for (int j = 0; j < qk; j++) { - sumi += x[ib].qs[j]*y[ib].qs[j]; - } - - sumf += sumi*(GGML_FP16_TO_FP32(x[ib].d)*GGML_FP16_TO_FP32(y[ib].d)); - } - - *s = sumf; -} - -void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_tq1_0 * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - float sumf = 0.0f; - - uint8_t k_shift[16] = {1, 1, 1, 1, 3, 3, 3, 3, 9, 9, 9, 9, 27, 27, 27, 27}; - - const uint8x16_t shift = vld1q_u8(k_shift); - - for (int i = 0; i < nb; ++i) { -#if defined(__ARM_FEATURE_DOTPROD) - int32x4_t sumi0 = vdupq_n_s32(0); - int32x4_t sumi1 = vdupq_n_s32(0); -#else - int16x8_t sumi0 = vdupq_n_s16(0); - int16x8_t sumi1 = vdupq_n_s16(0); -#endif - - // first 32 bytes of 5 elements - { - uint8x16_t qx0 = vld1q_u8(x[i].qs + 0); - uint8x16_t qx1 = vld1q_u8(x[i].qs + 16); - uint8x16_t qx2 = vmulq_u8(qx0, vdupq_n_u8(3)); - uint8x16_t qx3 = vmulq_u8(qx1, vdupq_n_u8(3)); - uint8x16_t qx4 = vmulq_u8(qx0, vdupq_n_u8(9)); - uint8x16_t qx5 = vmulq_u8(qx1, vdupq_n_u8(9)); - uint8x16_t qx6 = vmulq_u8(qx0, vdupq_n_u8(27)); - uint8x16_t qx7 = vmulq_u8(qx1, vdupq_n_u8(27)); - uint8x16_t qx8 = vmulq_u8(qx0, vdupq_n_u8(81)); - uint8x16_t qx9 = vmulq_u8(qx1, vdupq_n_u8(81)); - - // multiply by 3 and keep the 2 bits above 8 bits - int8x16_t sqx0 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx0, vshrq_n_u8(qx0, 1)), 6)); - int8x16_t sqx1 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx1, vshrq_n_u8(qx1, 1)), 6)); - int8x16_t sqx2 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx2, vshrq_n_u8(qx2, 1)), 6)); - int8x16_t sqx3 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx3, vshrq_n_u8(qx3, 1)), 6)); - int8x16_t sqx4 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx4, vshrq_n_u8(qx4, 1)), 6)); - int8x16_t sqx5 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx5, vshrq_n_u8(qx5, 1)), 6)); - int8x16_t sqx6 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx6, vshrq_n_u8(qx6, 1)), 6)); - int8x16_t sqx7 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx7, vshrq_n_u8(qx7, 1)), 6)); - int8x16_t sqx8 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx8, vshrq_n_u8(qx8, 1)), 6)); - int8x16_t sqx9 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx9, vshrq_n_u8(qx9, 1)), 6)); - - const int8x16_t qy0 = vld1q_s8(y[i].qs + 0); - const int8x16_t qy1 = vld1q_s8(y[i].qs + 16); - const int8x16_t qy2 = vld1q_s8(y[i].qs + 32); - const int8x16_t qy3 = vld1q_s8(y[i].qs + 48); - const int8x16_t qy4 = vld1q_s8(y[i].qs + 64); - const int8x16_t qy5 = vld1q_s8(y[i].qs + 80); - const int8x16_t qy6 = vld1q_s8(y[i].qs + 96); - const int8x16_t qy7 = vld1q_s8(y[i].qs + 112); - const int8x16_t qy8 = vld1q_s8(y[i].qs + 128); - const int8x16_t qy9 = vld1q_s8(y[i].qs + 144); - -#if defined(__ARM_FEATURE_DOTPROD) - sumi0 = vdotq_s32(sumi0, sqx0, qy0); - sumi1 = vdotq_s32(sumi1, sqx1, qy1); - sumi0 = vdotq_s32(sumi0, sqx2, qy2); - sumi1 = vdotq_s32(sumi1, sqx3, qy3); - sumi0 = vdotq_s32(sumi0, sqx4, qy4); - sumi1 = vdotq_s32(sumi1, sqx5, qy5); - sumi0 = vdotq_s32(sumi0, sqx6, qy6); - sumi1 = vdotq_s32(sumi1, sqx7, qy7); - sumi0 = vdotq_s32(sumi0, sqx8, qy8); - sumi1 = vdotq_s32(sumi1, sqx9, qy9); -#else - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx6), vget_low_s8(qy6)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx6), vget_high_s8(qy6)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx7), vget_low_s8(qy7)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx7), vget_high_s8(qy7)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx8), vget_low_s8(qy8)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx8), vget_high_s8(qy8)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx9), vget_low_s8(qy9)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx9), vget_high_s8(qy9)); -#endif - } - - // last 16 bytes of 5-element, along with the 4 bytes of 4 elements - { - uint8x16_t qx0 = vld1q_u8(x[i].qs + 32); - uint8x16_t qx1 = vmulq_u8(qx0, vdupq_n_u8(3)); - uint8x16_t qx2 = vmulq_u8(qx0, vdupq_n_u8(9)); - uint8x16_t qx3 = vmulq_u8(qx0, vdupq_n_u8(27)); - uint8x16_t qx4 = vmulq_u8(qx0, vdupq_n_u8(81)); - uint32_t qh; - memcpy(&qh, x[i].qh, sizeof(qh)); // potentially unaligned - uint8x16_t qx5 = vreinterpretq_u8_u32(vdupq_n_u32(qh)); - qx5 = vmulq_u8(qx5, shift); - - // multiply by 3 and keep the 2 bits above 8 bits - int8x16_t sqx0 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx0, vshrq_n_u8(qx0, 1)), 6)); - int8x16_t sqx1 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx1, vshrq_n_u8(qx1, 1)), 6)); - int8x16_t sqx2 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx2, vshrq_n_u8(qx2, 1)), 6)); - int8x16_t sqx3 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx3, vshrq_n_u8(qx3, 1)), 6)); - int8x16_t sqx4 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx4, vshrq_n_u8(qx4, 1)), 6)); - int8x16_t sqx5 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx5, vshrq_n_u8(qx5, 1)), 6)); - - const int8x16_t qy0 = vld1q_s8(y[i].qs + 160); - const int8x16_t qy1 = vld1q_s8(y[i].qs + 176); - const int8x16_t qy2 = vld1q_s8(y[i].qs + 192); - const int8x16_t qy3 = vld1q_s8(y[i].qs + 208); - const int8x16_t qy4 = vld1q_s8(y[i].qs + 224); - const int8x16_t qy5 = vld1q_s8(y[i].qs + 240); - -#if defined(__ARM_FEATURE_DOTPROD) - sumi0 = vdotq_s32(sumi0, sqx0, qy0); - sumi1 = vdotq_s32(sumi1, sqx1, qy1); - sumi0 = vdotq_s32(sumi0, sqx2, qy2); - sumi1 = vdotq_s32(sumi1, sqx3, qy3); - sumi0 = vdotq_s32(sumi0, sqx4, qy4); - sumi1 = vdotq_s32(sumi1, sqx5, qy5); -#else - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); -#endif - } - - const int16x8_t ysum0 = vld1q_s16(y[i].bsums); - const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8); - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - -#if defined(__ARM_FEATURE_DOTPROD) - sumi0 = vaddq_s32(sumi0, sumi1); - sumi0 = vsubq_s32(sumi0, vpaddlq_s16(vaddq_s16(ysum0, ysum1))); - - sumf += d * (float) vaddvq_s32(sumi0); -#else - sumi0 = vaddq_s16(sumi0, sumi1); - sumi0 = vsubq_s16(sumi0, vaddq_s16(ysum0, ysum1)); - - sumf += d * (float) vaddlvq_s16(sumi0); -#endif - } - - *s = sumf; - -#elif defined(__AVX2__) - __m256 sumf = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - // 16-bit sums - __m256i sumi0 = _mm256_setzero_si256(); - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - - // first 32 bytes of 5 elements - { - __m256i qx0 = _mm256_loadu_si256((const __m256i *) (x[i].qs)); - // 8-bit multiplies with shifts, masks and adds - __m256i qx1 = _mm256_add_epi8(qx0, _mm256_add_epi8(qx0, qx0)); // 1 * 3 - __m256i qx2 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx0, 3), _mm256_set1_epi8(-8)), qx0); // 1 * 9 - __m256i qx3 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx1, 3), _mm256_set1_epi8(-8)), qx1); // 3 * 9 - __m256i qx4 = _mm256_add_epi8(_mm256_and_si256(_mm256_slli_epi16(qx2, 3), _mm256_set1_epi8(-8)), qx2); // 9 * 9 - - // TODO: can _mm256_mulhi_epu16 be faster even if 16-bits? - - // Cancel the +1 from avg so that it behaves like a halving add - qx0 = _mm256_subs_epu8(qx0, _mm256_set1_epi8(1)); - qx1 = _mm256_subs_epu8(qx1, _mm256_set1_epi8(1)); - qx2 = _mm256_subs_epu8(qx2, _mm256_set1_epi8(1)); - qx3 = _mm256_subs_epu8(qx3, _mm256_set1_epi8(1)); - qx4 = _mm256_subs_epu8(qx4, _mm256_set1_epi8(1)); - // Multiply by 3 and get the top 2 bits - qx0 = _mm256_avg_epu8(qx0, _mm256_avg_epu8(qx0, _mm256_setzero_si256())); - qx1 = _mm256_avg_epu8(qx1, _mm256_avg_epu8(qx1, _mm256_setzero_si256())); - qx2 = _mm256_avg_epu8(qx2, _mm256_avg_epu8(qx2, _mm256_setzero_si256())); - qx3 = _mm256_avg_epu8(qx3, _mm256_avg_epu8(qx3, _mm256_setzero_si256())); - qx4 = _mm256_avg_epu8(qx4, _mm256_avg_epu8(qx4, _mm256_setzero_si256())); - qx0 = _mm256_and_si256(_mm256_srli_epi16(qx0, 6), _mm256_set1_epi8(3)); - qx1 = _mm256_and_si256(_mm256_srli_epi16(qx1, 6), _mm256_set1_epi8(3)); - qx2 = _mm256_and_si256(_mm256_srli_epi16(qx2, 6), _mm256_set1_epi8(3)); - qx3 = _mm256_and_si256(_mm256_srli_epi16(qx3, 6), _mm256_set1_epi8(3)); - qx4 = _mm256_and_si256(_mm256_srli_epi16(qx4, 6), _mm256_set1_epi8(3)); - - const __m256i qy0 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 0)); - const __m256i qy1 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 32)); - const __m256i qy2 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 64)); - const __m256i qy3 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 96)); - const __m256i qy4 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 128)); - - qx0 = _mm256_maddubs_epi16(qx0, qy0); - qx1 = _mm256_maddubs_epi16(qx1, qy1); - qx2 = _mm256_maddubs_epi16(qx2, qy2); - qx3 = _mm256_maddubs_epi16(qx3, qy3); - qx4 = _mm256_maddubs_epi16(qx4, qy4); - - sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(qx0, qx1)); - sumi1 = _mm256_add_epi16(sumi1, _mm256_add_epi16(qx2, qx3)); - sumi2 = _mm256_add_epi16(sumi2, qx4); - } - - // last 16 bytes of 5-element, along with the 4 bytes of 4 elements - { - __m128i qx0 = _mm_loadu_si128((const __m128i *) (x[i].qs + 32)); - uint32_t qh; - memcpy(&qh, x[i].qh, sizeof(qh)); // potentially unaligned - __m256i qx5_l = _mm256_cvtepu8_epi16(_mm_set1_epi32(qh)); - __m128i qx1 = _mm_add_epi8(qx0, _mm_add_epi8(qx0, qx0)); // 1 * 3 - __m128i qx2 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx0, 3), _mm_set1_epi8(-8)), qx0); // 1 * 9 - __m128i qx3 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx1, 3), _mm_set1_epi8(-8)), qx1); // 3 * 9 - __m128i qx4 = _mm_add_epi8(_mm_and_si128(_mm_slli_epi16(qx2, 3), _mm_set1_epi8(-8)), qx2); // 9 * 9 - __m256i qx01 = MM256_SET_M128I(qx1, qx0); - __m256i qx23 = MM256_SET_M128I(qx3, qx2); - - // avx2 does not have 8-bit multiplies, so 16-bit it is. - qx5_l = _mm256_mullo_epi16(qx5_l, _mm256_set_epi16(27, 27, 27, 27, 9, 9, 9, 9, 3, 3, 3, 3, 1, 1, 1, 1)); - qx5_l = _mm256_and_si256(qx5_l, _mm256_set1_epi16(0xFF)); - __m128i qx5 = _mm_packus_epi16(_mm256_castsi256_si128(qx5_l), _mm256_extracti128_si256(qx5_l, 1)); - - __m256i qx45 = MM256_SET_M128I(qx5, qx4); - - // Cancel the +1 from avg so that it behaves like a halving add - qx01 = _mm256_subs_epu8(qx01, _mm256_set1_epi8(1)); - qx23 = _mm256_subs_epu8(qx23, _mm256_set1_epi8(1)); - qx45 = _mm256_subs_epu8(qx45, _mm256_set1_epi8(1)); - // Multiply by 3 and get the top 2 bits - qx01 = _mm256_avg_epu8(qx01, _mm256_avg_epu8(qx01, _mm256_setzero_si256())); - qx23 = _mm256_avg_epu8(qx23, _mm256_avg_epu8(qx23, _mm256_setzero_si256())); - qx45 = _mm256_avg_epu8(qx45, _mm256_avg_epu8(qx45, _mm256_setzero_si256())); - qx01 = _mm256_and_si256(_mm256_srli_epi16(qx01, 6), _mm256_set1_epi8(3)); - qx23 = _mm256_and_si256(_mm256_srli_epi16(qx23, 6), _mm256_set1_epi8(3)); - qx45 = _mm256_and_si256(_mm256_srli_epi16(qx45, 6), _mm256_set1_epi8(3)); - - const __m256i qy01 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 160)); - const __m256i qy23 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 192)); - const __m256i qy45 = _mm256_loadu_si256((const __m256i *) (y[i].qs + 224)); - - qx01 = _mm256_maddubs_epi16(qx01, qy01); - qx23 = _mm256_maddubs_epi16(qx23, qy23); - qx45 = _mm256_maddubs_epi16(qx45, qy45); - - sumi0 = _mm256_add_epi16(sumi0, qx01); - sumi1 = _mm256_add_epi16(sumi1, qx23); - sumi2 = _mm256_add_epi16(sumi2, qx45); - } - - const __m256i ysum = _mm256_loadu_si256((const __m256i *) y[i].bsums); - const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(x[i].d)); - - sumi0 = _mm256_sub_epi16(sumi0, ysum); - sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(sumi1, sumi2)); - sumi0 = _mm256_madd_epi16(sumi0, _mm256_set1_epi16(1)); - - sumf = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(sumi0), d), sumf); - } - - *s = hsum_float_8(sumf); - -#else - const uint8_t pow3[6] = {1, 3, 9, 27, 81, 243}; - - float sumf = 0.0f; - - for (int i = 0; i < nb; ++i) { - int sum = 0; - - for (size_t j = 0; j < sizeof(x->qs) - sizeof(x->qs) % 32; j += 32) { - for (size_t l = 0; l < 5; ++l) { - for (size_t m = 0; m < 32; ++m) { - uint8_t q = x[i].qs[j + m] * pow3[l]; - uint16_t xi = ((uint16_t) q * 3) >> 8; - sum += (xi - 1) * y[i].qs[j*5 + l*32 + m]; - } - } - } - for (size_t j = sizeof(x->qs) - sizeof(x->qs) % 32; j < sizeof(x->qs); j += 16) { - for (size_t l = 0; l < 5; ++l) { - for (size_t m = 0; m < 16; ++m) { - uint8_t q = x[i].qs[j + m] * pow3[l]; - uint16_t xi = ((uint16_t) q * 3) >> 8; - sum += (xi - 1) * y[i].qs[j*5 + l*16 + m]; - } - } - } - - for (size_t l = 0; l < 4; ++l) { - for (size_t j = 0; j < sizeof(x->qh); ++j) { - uint8_t q = x[i].qh[j] * pow3[l]; - uint16_t xi = ((uint16_t) q * 3) >> 8; - sum += (xi - 1) * y[i].qs[sizeof(x->qs)*5 + l*sizeof(x->qh) + j]; - } - } - - sumf += (float) sum * (GGML_FP16_TO_FP32(x[i].d) * y[i].d); - } - - *s = sumf; -#endif -} - -void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_tq2_0 * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - float sumf = 0.0f; - - const uint8x16_t m3 = vdupq_n_u8(3); - - for (int i = 0; i < nb; ++i) { -#if defined(__ARM_FEATURE_DOTPROD) - int32x4_t sumi0 = vdupq_n_s32(0); - int32x4_t sumi1 = vdupq_n_s32(0); -#else - int16x8_t sumi0 = vdupq_n_s16(0); - int16x8_t sumi1 = vdupq_n_s16(0); -#endif - - for (size_t j = 0; j < sizeof(x->qs); j += 32) { - uint8x16_t qx0 = vld1q_u8(x[i].qs + j); - uint8x16_t qx1 = vld1q_u8(x[i].qs + j + 16); - uint8x16_t qx2 = vshrq_n_u8(qx0, 2); - uint8x16_t qx3 = vshrq_n_u8(qx1, 2); - uint8x16_t qx4 = vshrq_n_u8(qx0, 4); - uint8x16_t qx5 = vshrq_n_u8(qx1, 4); - uint8x16_t qx6 = vshrq_n_u8(qx0, 6); - uint8x16_t qx7 = vshrq_n_u8(qx1, 6); - - int8x16_t sqx0 = vreinterpretq_s8_u8(vandq_u8(qx0, m3)); - int8x16_t sqx1 = vreinterpretq_s8_u8(vandq_u8(qx1, m3)); - int8x16_t sqx2 = vreinterpretq_s8_u8(vandq_u8(qx2, m3)); - int8x16_t sqx3 = vreinterpretq_s8_u8(vandq_u8(qx3, m3)); - int8x16_t sqx4 = vreinterpretq_s8_u8(vandq_u8(qx4, m3)); - int8x16_t sqx5 = vreinterpretq_s8_u8(vandq_u8(qx5, m3)); - int8x16_t sqx6 = vreinterpretq_s8_u8(vandq_u8(qx6, m3)); - int8x16_t sqx7 = vreinterpretq_s8_u8(vandq_u8(qx7, m3)); - - const int8x16_t qy0 = vld1q_s8(y[i].qs + j*4 + 0); - const int8x16_t qy1 = vld1q_s8(y[i].qs + j*4 + 16); - const int8x16_t qy2 = vld1q_s8(y[i].qs + j*4 + 32); - const int8x16_t qy3 = vld1q_s8(y[i].qs + j*4 + 48); - const int8x16_t qy4 = vld1q_s8(y[i].qs + j*4 + 64); - const int8x16_t qy5 = vld1q_s8(y[i].qs + j*4 + 80); - const int8x16_t qy6 = vld1q_s8(y[i].qs + j*4 + 96); - const int8x16_t qy7 = vld1q_s8(y[i].qs + j*4 + 112); - -#if defined(__ARM_FEATURE_DOTPROD) - sumi0 = vdotq_s32(sumi0, sqx0, qy0); - sumi1 = vdotq_s32(sumi1, sqx1, qy1); - sumi0 = vdotq_s32(sumi0, sqx2, qy2); - sumi1 = vdotq_s32(sumi1, sqx3, qy3); - sumi0 = vdotq_s32(sumi0, sqx4, qy4); - sumi1 = vdotq_s32(sumi1, sqx5, qy5); - sumi0 = vdotq_s32(sumi0, sqx6, qy6); - sumi1 = vdotq_s32(sumi1, sqx7, qy7); -#else - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx6), vget_low_s8(qy6)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx6), vget_high_s8(qy6)); - sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx7), vget_low_s8(qy7)); - sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx7), vget_high_s8(qy7)); -#endif - } - - const int16x8_t ysum0 = vld1q_s16(y[i].bsums); - const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8); - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - -#if defined(__ARM_FEATURE_DOTPROD) - sumi0 = vaddq_s32(sumi0, sumi1); - sumi0 = vsubq_s32(sumi0, vpaddlq_s16(vaddq_s16(ysum0, ysum1))); - - sumf += d * (float) vaddvq_s32(sumi0); -#else - sumi0 = vaddq_s16(sumi0, sumi1); - sumi0 = vsubq_s16(sumi0, vaddq_s16(ysum0, ysum1)); - - sumf += d * (float) vaddlvq_s16(sumi0); -#endif - } - - *s = sumf; - -#elif defined(__AVX2__) - __m256 sumf = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - // 16-bit sums, because 256*127 still fits - __m256i sumi0 = _mm256_setzero_si256(); - __m256i sumi1 = _mm256_setzero_si256(); - - for (size_t j = 0; j < sizeof(x->qs); j += 32) { - __m256i qx0 = _mm256_loadu_si256((const __m256i *) (x[i].qs + j)); - __m256i qx1 = _mm256_srli_epi16(qx0, 2); - __m256i qx2 = _mm256_srli_epi16(qx0, 4); - __m256i qx3 = _mm256_srli_epi16(qx0, 6); - - // 0, 1, 2 (should not be 3) - qx0 = _mm256_and_si256(qx0, _mm256_set1_epi8(3)); - qx1 = _mm256_and_si256(qx1, _mm256_set1_epi8(3)); - qx2 = _mm256_and_si256(qx2, _mm256_set1_epi8(3)); - qx3 = _mm256_and_si256(qx3, _mm256_set1_epi8(3)); - - const __m256i qy0 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 0)); - const __m256i qy1 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 32)); - const __m256i qy2 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 64)); - const __m256i qy3 = _mm256_loadu_si256((const __m256i *) (y[i].qs + j*4 + 96)); - - qx0 = _mm256_maddubs_epi16(qx0, qy0); - qx1 = _mm256_maddubs_epi16(qx1, qy1); - qx2 = _mm256_maddubs_epi16(qx2, qy2); - qx3 = _mm256_maddubs_epi16(qx3, qy3); - - sumi0 = _mm256_add_epi16(sumi0, _mm256_add_epi16(qx0, qx1)); - sumi1 = _mm256_add_epi16(sumi1, _mm256_add_epi16(qx2, qx3)); - } - - const __m256i ysum = _mm256_loadu_si256((const __m256i *) y[i].bsums); - const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(x[i].d)); - - sumi0 = _mm256_add_epi16(sumi0, sumi1); - sumi0 = _mm256_sub_epi16(sumi0, ysum); - sumi0 = _mm256_madd_epi16(sumi0, _mm256_set1_epi16(1)); - - sumf = _mm256_add_ps(_mm256_mul_ps(_mm256_cvtepi32_ps(sumi0), d), sumf); - } - - *s = hsum_float_8(sumf); - -#else - float sumf = 0.0f; - - for (int i = 0; i < nb; ++i) { - int32_t sumi = 0; - - for (size_t j = 0; j < sizeof(x->qs); j += 32) { - for (size_t l = 0; l < 4; ++l) { - for (size_t k = 0; k < 32; ++k) { - sumi += y[i].qs[j*4 + l*32 + k] * (((x[i].qs[j + k] >> (l*2)) & 3) - 1); - } - } - } - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - sumf += (float) sumi * d; - } - - *s = sumf; -#endif -} - -void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q2_K * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#ifdef __ARM_FEATURE_SVE - const int vector_length = svcntb()*8; - const svuint8_t m3s = svdup_n_u8(0x3); - const svuint32_t m4s = svdup_n_u32(0xF); - const svint32_t vzero_sv = svdup_n_s32(0); - svfloat32_t acc_sum = svdup_n_f32(0); - svbool_t pred_s32 = svptrue_pat_b32(SV_VL4); - - switch (vector_length) { - case 128: - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - svfloat32_t d_broad = svdup_n_f32((float32_t)d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8_sv = y[i].qs; - const uint8_t * GGML_RESTRICT sc = x[i].scales; - - svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc); - const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); - - mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+4); - const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); - - svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums); - svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums+4); - - const svint32_t s0 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_2, q8sums_sv_2)); - - mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+8); - const svint32_t mins_sv_3 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); - - mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+12); - const svint32_t mins_sv_4 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4)); - - q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums+8); - q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums+12); - - svint32_t s1 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_3, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_4, q8sums_sv_2)); - - svfloat32_t temp = svcvt_f32_s32_x(svptrue_b32(), svadd_s32_x(svptrue_b32(), s0, s1)); - - acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, temp, dmin_broad); - - svint32_t sumi1 = svdup_n_s32(0); - - { - const svuint8_t q2bits_1 = svld1_u8(svptrue_b8(), q2); - svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_1, m3s)); - svint8_t q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc), m4s)); - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 0)); - - const svuint8_t q2bits_3 = svld1_u8(svptrue_b8(), q2+16); - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_3, m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 1)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 2)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 3)); - - - const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+4), m4s)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 0)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 1)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 2)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 3)); - - //------------------------------- - - q2 += 32; - const svint32_t scales_sv_2 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+8), m4s)); - const svuint8_t q2bits_2 = svld1_u8(svptrue_b8(), q2); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_2, m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 0)); - - const svuint8_t q2bits_4 = svld1_u8(svptrue_b8(), q2+16); - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_4, m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 1)); - - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 2)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 3)); - - - const svint32_t scales_sv_3 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+12), m4s)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 0)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 1)); - - - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 2)); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 3)); - } - acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, svcvt_f32_s32_x(svptrue_b32(), sumi1), d_broad); - } - *s = svaddv_f32(svptrue_b32(), acc_sum); - break; - - case 256: - case 512: - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - svfloat32_t d_broad = svdup_n_f32((float32_t)d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin); - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8_sv = y[i].qs; - const uint8_t * GGML_RESTRICT sc = x[i].scales; - - const svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); sc += 8; - const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, m4s)); - const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, 4)); - svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums); - - const svuint32_t mins_and_scales_sve_1 = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); - const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, m4s)); - const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, 4)); - - svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums+8); - - svfloat32_t temp = svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_2, q8sums_sv_2))); - - acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, temp, dmin_broad); - - svint32_t sumi1 = svdup_n_s32(0); - - { - const svuint8_t q2bits_1 = svld1_u8(svptrue_pat_b8(SV_VL32), q2); - svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_1, m3s)); - svint8_t q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - svint32_t scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 0), svdup_lane_s32(scales_sv, 1)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - svint32_t scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 2), svdup_lane_s32(scales_sv, 3)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(svdup_n_s32(0), q2bytes_sv, q8bytes_sv), scale_2); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 4), svdup_lane_s32(scales_sv, 5)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 6), svdup_lane_s32(scales_sv, 7)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); - - q2 += 32; - - const svuint8_t q2bits_2 = svld1_u8(svptrue_pat_b8(SV_VL32), q2); - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_2, m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 0), svdup_lane_s32(scales_sv_1, 1)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 2), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 2), svdup_lane_s32(scales_sv_1, 3)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 4), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 4), svdup_lane_s32(scales_sv_1, 5)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1); - - q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 6), m3s)); - q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 6), svdup_lane_s32(scales_sv_1, 7)); - sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2); - } - acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), sumi1), d_broad); - } - *s = svaddv_f32(svptrue_pat_b32(SV_VL8), acc_sum); - break; - - default: - assert(false && "Unsupported vector length"); - break; - } - -#elif __ARM_NEON - const uint8x16_t m3 = vdupq_n_u8(0x3); - const uint8x16_t m4 = vdupq_n_u8(0xF); - - const int32x4_t vzero = vdupq_n_s32(0); - - ggml_int8x16x2_t q2bytes; - uint8_t aux[16]; - - float sum = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - const uint8_t * GGML_RESTRICT sc = x[i].scales; - - const uint8x16_t mins_and_scales = vld1q_u8(sc); - const uint8x16_t scales = vandq_u8(mins_and_scales, m4); - vst1q_u8(aux, scales); - - const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4); - const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); - const ggml_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}}; - const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])), - vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0]))); - const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])), - vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1]))); - sum += dmin * vaddvq_s32(vaddq_s32(s0, s1)); - - int isum = 0; - int is = 0; - -// We use this macro instead of a function call because for some reason -// the code runs 2-3% slower, even if the function is declared inline -#define MULTIPLY_ACCUM_WITH_SCALE(index)\ - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\ - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)]; - -#define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\ - q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\ - q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\ - q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\ - MULTIPLY_ACCUM_WITH_SCALE((index)); - - for (int j = 0; j < QK_K/128; ++j) { - const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32; - - ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; - q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3)); - q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3)); - - MULTIPLY_ACCUM_WITH_SCALE(0); - - SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2); - SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4); - SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6); - - is += 8; - } - - sum += d * isum; - } - - *s = sum; - -#elif defined __AVX2__ - - const __m256i m3 = _mm256_set1_epi8(3); - const __m128i m4 = _mm_set1_epi8(0xF); - - __m256 acc = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); - const __m128i scales8 = _mm_and_si128(mins_and_scales, m4); - const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); - const __m256i mins = _mm256_cvtepi8_epi16(mins8); - const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums)); - - acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc); - - const __m256i all_scales = _mm256_cvtepi8_epi16(scales8); - const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); - const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); - const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; - - __m256i sumi = _mm256_setzero_si256(); - - for (int j = 0; j < QK_K/128; ++j) { - - const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32; - - const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - - const __m256i q2_0 = _mm256_and_si256(q2bits, m3); - const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3); - const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3); - const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3); - - __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0); - __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1); - __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2); - __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3); - - p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0); - p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1); - p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2); - p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3); - - p0 = _mm256_add_epi32(p0, p1); - p2 = _mm256_add_epi32(p2, p3); - - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2)); - } - - acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); - - } - - *s = hsum_float_8(acc); - -#elif defined __AVX__ - - const __m128i m3 = _mm_set1_epi8(0x3); - const __m128i m4 = _mm_set1_epi8(0xF); - const __m128i m2 = _mm_set1_epi8(0x2); - - __m256 acc = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - // load mins and scales from block_q2_K.scales[QK_K/16] - const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales); - const __m128i scales16 = _mm_and_si128(mins_and_scales, m4); - const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); - const __m128i mins_0 = _mm_cvtepi8_epi16(mins16); - const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16)); - - // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2 - const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0])); - const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8])); - - // sumf += -dmin * summs in 32bits*8 - acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc); - - const __m128i scales_0 = _mm_cvtepi8_epi16(scales16); - const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16)); - const __m128i scales[2] = { scales_0, scales_1 }; - - __m128i sumi_0 = _mm_setzero_si128(); - __m128i sumi_1 = _mm_setzero_si128(); - - for (int j = 0; j < QK_K/128; ++j) { - - // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K] - const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - - // load 2bits*16*8 from block_q2_K.qs[QK_K/4] - __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16; - const __m128i q2_0 = _mm_and_si128(q2bits, m3); - const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); - const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); - const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); - q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16; - const __m128i q2_1 = _mm_and_si128(q2bits, m3); - const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3); - const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3); - const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3); - - // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8 - __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0); - __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1); - __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2); - __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3); - __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4); - __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5); - __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6); - __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7); - - // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8 - __m128i shuffle = _mm_set1_epi16(0x0100); - p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0); - shuffle = _mm_add_epi16(shuffle, m2); - p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1); - shuffle = _mm_add_epi16(shuffle, m2); - p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2); - shuffle = _mm_add_epi16(shuffle, m2); - p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3); - shuffle = _mm_add_epi16(shuffle, m2); - p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4); - shuffle = _mm_add_epi16(shuffle, m2); - p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5); - shuffle = _mm_add_epi16(shuffle, m2); - p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6); - shuffle = _mm_add_epi16(shuffle, m2); - p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7); - - p0 = _mm_add_epi32(p0, p1); - p2 = _mm_add_epi32(p2, p3); - p4 = _mm_add_epi32(p4, p5); - p6 = _mm_add_epi32(p6, p7); - - // isum in 32bits*4*2 - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6)); - } - - // sumf += dall * isum - dmin * summs in 32bits - __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); - acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc); - } - - *s = hsum_float_8(acc); - -#elif defined __wasm_simd128__ - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const uint8_t * q2 = x[i].qs; - const int8_t * q8 = y[i].qs; - const uint8_t * sc = x[i].scales; - - // Vectorized summs calculation - v128_t summs_vec = wasm_i32x4_splat(0); - { - v128_t sc_vec = wasm_v128_load(sc); - v128_t sc_upper = wasm_u8x16_shr(sc_vec, 4); - - v128_t sc_low = wasm_u16x8_extend_low_u8x16(sc_upper); - v128_t sc_high = wasm_u16x8_extend_high_u8x16(sc_upper); - - v128_t bsums1 = wasm_v128_load(&y[i].bsums[0]); - v128_t bsums2 = wasm_v128_load(&y[i].bsums[8]); - - summs_vec = wasm_i32x4_add( - wasm_i32x4_add(wasm_i32x4_dot_i16x8(sc_low, bsums1), - wasm_i32x4_dot_i16x8(sc_high, bsums2)), - summs_vec - ); - - summs_vec = wasm_i32x4_add(summs_vec, wasm_i32x4_shuffle(summs_vec, summs_vec, 2, 3, 0, 1)); - summs_vec = wasm_i32x4_add(summs_vec, wasm_i32x4_shuffle(summs_vec, summs_vec, 1, 0, 3, 2)); - } - int32_t summs = wasm_i32x4_extract_lane(summs_vec, 0); - - // Vectorized isum calculation - int32_t isum = 0; - const uint8_t * sc_ptr = sc; - const int k_iters = QK_K/128; - - for (int k = 0; k < k_iters; ++k) { - v128_t isum_vec = wasm_i32x4_splat(0); - int shift = 0; - - for (int j = 0; j < 4; ++j) { - const int d0 = (sc_ptr[0] & 0xF); - const int d1 = (sc_ptr[1] & 0xF); - sc_ptr += 2; - - // Process first 16 elements - v128_t q2_0 = wasm_v128_load(q2); - v128_t q8_0 = wasm_v128_load(q8); - v128_t q2_shift_0 = wasm_u8x16_shr(q2_0, shift); - v128_t q2_bits_0 = wasm_v128_and(q2_shift_0, wasm_i8x16_splat(0x03)); - - // Process next 16 elements - v128_t q2_1 = wasm_v128_load(q2 + 16); - v128_t q8_1 = wasm_v128_load(q8 + 16); - v128_t q2_shift_1 = wasm_u8x16_shr(q2_1, shift); - v128_t q2_bits_1 = wasm_v128_and(q2_shift_1, wasm_i8x16_splat(0x03)); - - // Calculate dot products - v128_t p0 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q8_0), - wasm_i16x8_extend_low_i8x16(q2_bits_0) - ); - v128_t p1 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q8_0), - wasm_i16x8_extend_high_i8x16(q2_bits_0) - ); - v128_t p2 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q8_1), - wasm_i16x8_extend_low_i8x16(q2_bits_1) - ); - v128_t p3 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q8_1), - wasm_i16x8_extend_high_i8x16(q2_bits_1) - ); - - // Accumulate scaled results - v128_t scaled = wasm_i32x4_add( - wasm_i32x4_mul(wasm_i32x4_add(p0, p1), wasm_i32x4_splat(d0)), - wasm_i32x4_mul(wasm_i32x4_add(p2, p3), wasm_i32x4_splat(d1)) - ); - - isum_vec = wasm_i32x4_add(isum_vec, scaled); - q8 += 32; - shift += 2; - } - q2 += 32; - - // Horizontal sum of isum_vec - isum_vec = wasm_i32x4_add(isum_vec, wasm_i32x4_shuffle(isum_vec, isum_vec, 2, 3, 0, 1)); - isum_vec = wasm_i32x4_add(isum_vec, wasm_i32x4_shuffle(isum_vec, isum_vec, 1, 0, 3, 2)); - isum += wasm_i32x4_extract_lane(isum_vec, 0); - } - - const float dall = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; - sumf += dall * isum - dmin * summs; - } - - *s = sumf; - -#elif defined __riscv_v_intrinsic - - const int vector_length = __riscv_vlenb() * 8; - float sumf = 0; - - uint8_t temp_01[32] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; - uint8_t atmp[16]; - - switch (vector_length) { - case 256: - for (int i = 0; i < nb; ++i) { - const uint8_t * q2 = x[i].qs; - const int8_t * q8 = y[i].qs; - const uint8_t * sc = x[i].scales; - - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - size_t vl = 16; - - vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl); - vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl); - - vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl); - - vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl); - vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl); - vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl)); - vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl); - vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); - - sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums); - - vl = 32; - - vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); - vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl); - - uint8_t is = 0; - int isum = 0; - - for (int j = 0; j < QK_K / 128; ++j) { - // load Q2 - vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl); - - vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl); - vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03, vl); - vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03, vl); - vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03, vl); - - // duplicate scale elements for product - vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0 + is, vl), vl); - vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2 + is, vl), vl); - vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4 + is, vl), vl); - vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6 + is, vl), vl); - - vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl)); - vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl)); - vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl)); - vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl)); - - // load Q8 - vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl); - vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8 + 32, vl); - vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8 + 64, vl); - vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8 + 96, vl); - - vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl); - vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl); - vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl); - vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl); - - vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl); - vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl); - - isum += __riscv_vmv_x_s_i32m1_i32(isum1); - - q2 += 32; - q8 += 128; - is = 8; - } - - sumf += dall * isum; - } - break; - case 128: - for (int i = 0; i < nb; ++i) { - const uint8_t * q2 = x[i].qs; - const int8_t * q8 = y[i].qs; - const uint8_t * sc = x[i].scales; - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - uint8_t *patmp = atmp; - int vsums; - int tmp; - __asm__ __volatile__( - "vsetivli zero, 16, e8, m1\n\t" - "vmv.v.x v8, zero\n\t" - "vle8.v v1, (%[sc])\n\t" - "vand.vi v0, v1, 0xF\n\t" - "vsrl.vi v1, v1, 4\n\t" - "vse8.v v0, (%[scale])\n\t" - "vsetivli zero, 16, e16, m2\n\t" - "vle16.v v2, (%[bsums])\n\t" - "vzext.vf2 v0, v1\n\t" - "vwmul.vv v4, v0, v2\n\t" - "vsetivli zero, 16, e32, m4\n\t" - "vredsum.vs v8, v4, v8\n\t" - "vmv.x.s %[vsums], v8" - : [tmp] "=&r" (tmp), [vsums] "=&r" (vsums) - : [sc] "r" (sc), [scale] "r" (atmp), [bsums] "r" (y[i].bsums) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - sumf += dmin * vsums; - int isum = 0; - - for (int j = 0; j < QK_K/128; ++j) { - __asm__ __volatile__( - "vsetvli zero, %[vl32], e8, m2\n\t" - "vle8.v v0, (%[q2])\n\t" - "vsrl.vi v2, v0, 2\n\t" - "vsrl.vi v4, v0, 4\n\t" - "vsrl.vi v6, v0, 6\n\t" - "vand.vi v0, v0, 0x3\n\t" - "vand.vi v2, v2, 0x3\n\t" - "vand.vi v4, v4, 0x3\n\t" - "vsetvli zero, %[vl128], e8, m8\n\t" - "vle8.v v8, (%[q8])\n\t" - "vsetvli zero, %[vl64], e8, m4\n\t" - "vwmul.vv v16, v0, v8\n\t" - "vwmul.vv v24, v4, v12\n\t" - "vsetivli zero, 16, e16, m2\n\t" - "vmv.v.x v0, zero\n\t" - "vwredsum.vs v10, v16, v0\n\t" - "vwredsum.vs v9, v18, v0\n\t" - "vwredsum.vs v8, v20, v0\n\t" - "vwredsum.vs v7, v22, v0\n\t" - "vwredsum.vs v11, v24, v0\n\t" - "vwredsum.vs v12, v26, v0\n\t" - "vwredsum.vs v13, v28, v0\n\t" - "vwredsum.vs v14, v30, v0\n\t" - "vsetivli zero, 4, e32, m1\n\t" - "vslideup.vi v10, v9, 1\n\t" - "vslideup.vi v8, v7, 1\n\t" - "vslideup.vi v11, v12, 1\n\t" - "vslideup.vi v13, v14, 1\n\t" - "vslideup.vi v10, v8, 2\n\t" - "vslideup.vi v11, v13, 2\n\t" - "vsetivli zero, 8, e32, m2\n\t" - "vle8.v v15, (%[scale])\n\t" - "vzext.vf4 v12, v15\n\t" - "vmul.vv v10, v10, v12\n\t" - "vredsum.vs v0, v10, v0\n\t" - "vmv.x.s %[tmp], v0\n\t" - "add %[isum], %[isum], %[tmp]" - : [tmp] "=&r" (tmp), [isum] "+&r" (isum) - : [q2] "r" (q2), [scale] "r" (patmp), [q8] "r" (q8) - , [vl32] "r" (32), [vl64] "r" (64), [vl128] "r" (128) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - q2 += 32; q8 += 128; patmp += 8; - } - - sumf += dall * isum; - } - break; - default: - assert(false && "Unsupported vector length"); - break; - } - - *s = sumf; - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0x3); - const vector signed char lowScaleMask = vec_splats((signed char)0xF); - const vector int v0 = vec_splats((int32_t)0); - const vector unsigned char v2 = vec_splats((unsigned char)0x2); - const vector unsigned char v6 = vec_splats((unsigned char)0x6); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); - vector float vdmin = vec_mul(vxmin, vyd); - - vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); - vector signed short q8ysums1 = vec_xl(16, y[i].bsums); - - vector signed char q2xmins = (vector signed char)vec_xl( 0, x[i].scales); - vector signed char vscales = vec_and(q2xmins, lowScaleMask); - - q2xmins = vec_sr(q2xmins, v4); - vector signed short q2xmins0 = vec_unpackh(q2xmins); - vector signed short q2xmins1 = vec_unpackl(q2xmins); - - vector signed int prod0 = vec_mule(q2xmins0, q8ysums0); - vector signed int prod1 = vec_mulo(q2xmins0, q8ysums0); - vector signed int prod2 = vec_mule(q2xmins1, q8ysums1); - vector signed int prod3 = vec_mulo(q2xmins1, q8ysums1); - - vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); - vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); - vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); - vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - vector signed int vsumi4 = v0; - vector signed int vsumi5 = v0; - vector signed int vsumi6 = v0; - vector signed int vsumi7 = v0; - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/128; ++j) { - __builtin_prefetch(q2, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed char qxs0 = (vector signed char)vec_xl( 0, q2); - vector signed char qxs1 = (vector signed char)vec_xl(16, q2); - q2 += 32; - - vector unsigned char q2x00 = (vector unsigned char)vec_and(qxs0, lowMask); - vector unsigned char q2x01 = (vector unsigned char)vec_and(vec_sr(qxs0, v2), lowMask); - vector unsigned char q2x02 = (vector unsigned char)vec_and(vec_sr(qxs0, v4), lowMask); - vector unsigned char q2x03 = (vector unsigned char)vec_and(vec_sr(qxs0, v6), lowMask); - vector unsigned char q2x10 = (vector unsigned char)vec_and(qxs1, lowMask); - vector unsigned char q2x11 = (vector unsigned char)vec_and(vec_sr(qxs1, v2), lowMask); - vector unsigned char q2x12 = (vector unsigned char)vec_and(vec_sr(qxs1, v4), lowMask); - vector unsigned char q2x13 = (vector unsigned char)vec_and(vec_sr(qxs1, v6), lowMask); - - vector signed char q8y00 = vec_xl( 0, q8); - vector signed char q8y10 = vec_xl( 16, q8); - vector signed char q8y01 = vec_xl( 32, q8); - vector signed char q8y11 = vec_xl( 48, q8); - vector signed char q8y02 = vec_xl( 64, q8); - vector signed char q8y12 = vec_xl( 80, q8); - vector signed char q8y03 = vec_xl( 96, q8); - vector signed char q8y13 = vec_xl(112, q8); - q8 += 128; - - vector signed int qv0 = vec_msum(q8y00, q2x00, v0); - vector signed int qv1 = vec_msum(q8y01, q2x01, v0); - vector signed int qv2 = vec_msum(q8y02, q2x02, v0); - vector signed int qv3 = vec_msum(q8y03, q2x03, v0); - vector signed int qv4 = vec_msum(q8y10, q2x10, v0); - vector signed int qv5 = vec_msum(q8y11, q2x11, v0); - vector signed int qv6 = vec_msum(q8y12, q2x12, v0); - vector signed int qv7 = vec_msum(q8y13, q2x13, v0); - - vector signed short vscales_07 = vec_unpackh(vscales); - vector signed int vscales_03 = vec_unpackh(vscales_07); - vector signed int vscales_47 = vec_unpackl(vscales_07); - vector signed int vs0 = vec_splat(vscales_03, 0); - vector signed int vs1 = vec_splat(vscales_03, 1); - vector signed int vs2 = vec_splat(vscales_03, 2); - vector signed int vs3 = vec_splat(vscales_03, 3); - vector signed int vs4 = vec_splat(vscales_47, 0); - vector signed int vs5 = vec_splat(vscales_47, 1); - vector signed int vs6 = vec_splat(vscales_47, 2); - vector signed int vs7 = vec_splat(vscales_47, 3); - vscales = vec_sld(vscales, vscales, 8); - - vsumi0 = vec_add(vec_mul(qv0, vs0), vsumi0); - vsumi1 = vec_add(vec_mul(qv1, vs2), vsumi1); - vsumi2 = vec_add(vec_mul(qv2, vs4), vsumi2); - vsumi3 = vec_add(vec_mul(qv3, vs6), vsumi3); - vsumi4 = vec_add(vec_mul(qv4, vs1), vsumi4); - vsumi5 = vec_add(vec_mul(qv5, vs3), vsumi5); - vsumi6 = vec_add(vec_mul(qv6, vs5), vsumi6); - vsumi7 = vec_add(vec_mul(qv7, vs7), vsumi7); - } - - vsumi0 = vec_add(vsumi0, vsumi4); - vsumi1 = vec_add(vsumi1, vsumi5); - vsumi2 = vec_add(vsumi2, vsumi6); - vsumi3 = vec_add(vsumi3, vsumi7); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined __loongarch_asx - - __m256 acc = (__m256)__lasx_xvldi(0); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const __m128i mins_and_scales128 = __lsx_vld((const __m128i*)x[i].scales, 0); - const __m128i scales128 = __lsx_vandi_b(mins_and_scales128, 0xf); - const __m256i mins = lasx_ext8_16(__lsx_vsrli_b(mins_and_scales128, 4)); - const __m256i prod = lasx_madd_h(mins, __lasx_xvld((const __m256i*)y[i].bsums, 0)); - - acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(dmin), __lasx_xvffint_s_w(prod), acc); - - const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; - const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); - - __m256i sumi = __lasx_xvldi(0); - - for (int j = 0; j < QK_K/128; ++j) { - - const __m256i q2bits = __lasx_xvld((const __m256i*)q2, 0); q2 += 32; - - const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_3 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - - const __m256i q2_0 = __lasx_xvandi_b(q2bits, 3); - const __m256i q2_1 = __lasx_xvandi_b(__lasx_xvsrli_b(q2bits, 2), 3); - const __m256i q2_2 = __lasx_xvandi_b(__lasx_xvsrli_b(q2bits, 4), 3); - const __m256i q2_3 = __lasx_xvsrli_b(q2bits, 6); - - __m256i p0 = lasx_madd_h_b(q2_0, q8_0); - __m256i p1 = lasx_madd_h_b(q2_1, q8_1); - __m256i p2 = lasx_madd_h_b(q2_2, q8_2); - __m256i p3 = lasx_madd_h_b(q2_3, q8_3); - - p0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p0); - p1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p1); - p2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p2); - p3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p3); - - p0 = __lasx_xvadd_w(p0, p1); - p2 = __lasx_xvadd_w(p2, p3); - - sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p0, p2)); - } - - acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); - - } - - *s = hsum_float_8(acc); - -#else - - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - - const uint8_t * q2 = x[i].qs; - const int8_t * q8 = y[i].qs; - const uint8_t * sc = x[i].scales; - - int summs = 0; - for (int j = 0; j < 16; ++j) { - summs += y[i].bsums[j] * (sc[j] >> 4); - } - - const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - int isum = 0; - int is = 0; - int d; - for (int k = 0; k < QK_K/128; ++k) { - int shift = 0; - for (int j = 0; j < 4; ++j) { - d = sc[is++] & 0xF; - int isuml = 0; - for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); - isum += d * isuml; - d = sc[is++] & 0xF; - isuml = 0; - for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); - isum += d * isuml; - shift += 2; - q8 += 32; - } - q2 += 32; - } - sumf += dall * isum - dmin * summs; - } - *s = sumf; -#endif -} - -void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const uint32_t kmask1 = 0x03030303; - const uint32_t kmask2 = 0x0f0f0f0f; - - const block_q3_K * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_FEATURE_SVE) - - uint32_t aux[3]; - uint32_t utmp[4]; - - const int8_t m32 = 32; - const int vector_length = svcntb()*8; - const svuint8_t m3b_sv = svdup_n_u8(0x3); - const svint32_t vzero_sv = svdup_n_s32(0); - - const svuint8_t m0_sv = svdup_n_u8(1); - const svuint8_t m1_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 1); - const svuint8_t m2_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 2); - const svuint8_t m3_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 3); - - float sum = 0; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q3_sv = x[i].qs; - const uint8_t * GGML_RESTRICT qh_sv = x[i].hmask; - const int8_t * GGML_RESTRICT q8_sv = y[i].qs; - - // Set up scales - memcpy(aux, x[i].scales, 12); - utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); - utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); - utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); - utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); - - int8_t * scale = (int8_t *)utmp; - - for (int j = 0; j < 16; ++j) scale[j] -= m32; - - switch (vector_length) { - case 128: - { - svuint8_t qhbits_sv_1 = svld1_u8(svptrue_b8(), qh_sv); - svuint8_t qhbits_sv_2 = svld1_u8(svptrue_b8(), qh_sv+16); - svuint8_t q3h_sv; - - svint32_t sumi1_1 = svdup_n_s32(0); - svint8_t q3bytes_sv; - - for (int j = 0; j < QK_K/128; ++j) { - - const svuint8_t q3bits_sv = svld1_u8(svptrue_b8(), q3_sv); q3_sv += 16; - const svuint8_t q3bits_sv_1 = svld1_u8(svptrue_b8(), q3_sv); q3_sv += 16; - svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_1), 2); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0])); - - q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_2), 2); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv_1, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1])); - - q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_1), 1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2])); - - q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_2), 1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3])); - - - scale += 4; - q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0])); - - q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_2); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1])); - - - q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16; - - q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_1), 1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2])); - - q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_2), 1); - q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3])); - - if (j == 0) { - qhbits_sv_1 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_1, 4); - qhbits_sv_2 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_2, 4); - } - - scale += 4; - } - - sum += d * (svaddv_s32(svptrue_b32(), sumi1_1)); - } break; - case 256: - case 512: - { - svuint8_t qhbits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), qh_sv); - svuint8_t q3h_sv; - - svint32_t sumi1_1 = svdup_n_s32(0); - svint8_t q3bytes_sv; - - for (int j = 0; j < QK_K/128; ++j) { - - const svuint8_t q3bits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), q3_sv); q3_sv += 32; - svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m0_sv, qhbits_sv), 2); - q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - - svint32_t scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1])); - sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1); - - q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m1_sv, qhbits_sv), 1); - q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3])); - sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1); - - scale += 4; - q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32; - - q3h_sv = svbic_u8_x(svptrue_pat_b8(SV_VL32), m2_sv, qhbits_sv); - q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1])); - sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1); - - q3h_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m3_sv, qhbits_sv), 1); - q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv)); - - scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3])); - sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1); - - if (j == 0) { - qhbits_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), qhbits_sv, 4); - } - - scale += 4; - } - - sum += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), sumi1_1)); - } break; - default: - assert(false && "Unsupported vector length"); - break; - } - } - *s = sum; - -#elif __ARM_NEON - - uint32_t aux[3]; - uint32_t utmp[4]; - - const uint8x16_t m3b = vdupq_n_u8(0x3); - const int32x4_t vzero = vdupq_n_s32(0); - - const uint8x16_t m0 = vdupq_n_u8(1); - const uint8x16_t m1 = vshlq_n_u8(m0, 1); - const uint8x16_t m2 = vshlq_n_u8(m0, 2); - const uint8x16_t m3 = vshlq_n_u8(m0, 3); - const int8_t m32 = 32; - - ggml_int8x16x4_t q3bytes; - - float sum = 0; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].hmask; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); - - ggml_uint8x16x4_t q3h; - - int32_t isum = 0; - - // Set up scales - memcpy(aux, x[i].scales, 12); - utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); - utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); - utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); - utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); - - int8_t * scale = (int8_t *)utmp; - for (int j = 0; j < 16; ++j) scale[j] -= m32; - - for (int j = 0; j < QK_K/128; ++j) { - - const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32; - const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64; - const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64; - - q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2); - q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2); - q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1); - q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1); - - q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0])); - q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1])); - q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2])); - q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3])); - - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0]; - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1]; - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2]; - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3]; - - scale += 4; - - q3h.val[0] = vbicq_u8(m2, qhbits.val[0]); - q3h.val[1] = vbicq_u8(m2, qhbits.val[1]); - q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1); - q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1); - - q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0])); - q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1])); - q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2])); - q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3])); - - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0]; - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1]; - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2]; - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3]; - - scale += 4; - - if (j == 0) { - qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4); - qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4); - } - - } - sum += d * isum; - - } - - *s = sum; - -#elif defined __AVX2__ - - const __m256i m3 = _mm256_set1_epi8(3); - const __m256i mone = _mm256_set1_epi8(1); - const __m128i m32 = _mm_set1_epi8(32); - - __m256 acc = _mm256_setzero_ps(); - - uint32_t aux[3]; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - // Set up scales - memcpy(aux, x[i].scales, 12); - __m128i scales128 = _mm_set_epi32( - ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), - ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), - (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), - (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); - scales128 = _mm_sub_epi8(scales128, m32); - const __m256i all_scales = _mm256_cvtepi8_epi16(scales128); - const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0); - const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1); - const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)}; - - // high bit - const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask); - - // integer accumulator - __m256i sumi = _mm256_setzero_si256(); - - int bit = 0; - int is = 0; - - for (int j = 0; j < QK_K/128; ++j) { - // load low 2 bits - const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32; - - // prepare low and high bits - const __m256i q3l_0 = _mm256_and_si256(q3bits, m3); - const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); - ++bit; - - const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3); - const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); - ++bit; - - const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3); - const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); - ++bit; - - const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3); - const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2); - ++bit; - - // load Q8 quants - const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - - // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, - // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, - // and 2 if the high bit was set) - __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0); - __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1); - __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2); - __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3); - - __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0); - __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1); - __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2); - __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3); - - p16_0 = _mm256_sub_epi16(p16_0, q8s_0); - p16_1 = _mm256_sub_epi16(p16_1, q8s_1); - p16_2 = _mm256_sub_epi16(p16_2, q8s_2); - p16_3 = _mm256_sub_epi16(p16_3, q8s_3); - - // multiply with scales - p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0); - p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1); - p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2); - p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3); - - // accumulate - p16_0 = _mm256_add_epi32(p16_0, p16_1); - p16_2 = _mm256_add_epi32(p16_2, p16_3); - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2)); - - } - - // multiply with block scale and accumulate - acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); - - } - - *s = hsum_float_8(acc); - -#elif defined __AVX__ - - const __m128i m3 = _mm_set1_epi8(3); - const __m128i mone = _mm_set1_epi8(1); - const __m128i m32 = _mm_set1_epi8(32); - const __m128i m2 = _mm_set1_epi8(2); - - __m256 acc = _mm256_setzero_ps(); - - const uint32_t *aux; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - // Set up scales - aux = (const uint32_t *)x[i].scales; - __m128i scales128 = _mm_set_epi32( - ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), - ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), - (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), - (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); - scales128 = _mm_sub_epi8(scales128, m32); - const __m128i scales_0 = _mm_cvtepi8_epi16(scales128); - const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128)); - const __m128i scales[2] = { scales_0, scales_1 }; - - // high bit *128*2 from block_q3_K.hmask[QK_K/8] - const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]); - const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]); - - // integer accumulator - __m128i sumi_0 = _mm_setzero_si128(); - __m128i sumi_1 = _mm_setzero_si128(); - - for (int j = 0; j < QK_K/128; ++j) { - // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4] - const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16; - const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16; - - // prepare low and high bits - const int bit = j << 2; - - const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3); - const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3); - const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2); - const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2); - - const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3); - const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3); - const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2); - const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2); - - const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3); - const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3); - const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2); - const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2); - - const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3); - const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3); - const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2); - const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2); - - // load Q8 quants from block_q8_K.qs[QK_K] - const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - - // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16, - // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set, - // and 2 if the high bit was set) - __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0); - __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1); - __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2); - __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3); - __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4); - __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5); - __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6); - __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7); - - __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0); - __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1); - __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2); - __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3); - __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4); - __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5); - __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6); - __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7); - - p16_0 = _mm_sub_epi16(p16_0, q8s_0); - p16_1 = _mm_sub_epi16(p16_1, q8s_1); - p16_2 = _mm_sub_epi16(p16_2, q8s_2); - p16_3 = _mm_sub_epi16(p16_3, q8s_3); - p16_4 = _mm_sub_epi16(p16_4, q8s_4); - p16_5 = _mm_sub_epi16(p16_5, q8s_5); - p16_6 = _mm_sub_epi16(p16_6, q8s_6); - p16_7 = _mm_sub_epi16(p16_7, q8s_7); - - // multiply with scales - __m128i shuffle = _mm_set1_epi16(0x0100); - p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0); - shuffle = _mm_add_epi16(shuffle, m2); - p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1); - shuffle = _mm_add_epi16(shuffle, m2); - p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2); - shuffle = _mm_add_epi16(shuffle, m2); - p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3); - shuffle = _mm_add_epi16(shuffle, m2); - p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4); - shuffle = _mm_add_epi16(shuffle, m2); - p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5); - shuffle = _mm_add_epi16(shuffle, m2); - p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6); - shuffle = _mm_add_epi16(shuffle, m2); - p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7); - - // accumulate - p16_0 = _mm_add_epi32(p16_0, p16_1); - p16_2 = _mm_add_epi32(p16_2, p16_3); - p16_4 = _mm_add_epi32(p16_4, p16_5); - p16_6 = _mm_add_epi32(p16_6, p16_7); - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6)); - - } - - // multiply with block scale and accumulate - __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); - acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc); - - } - - *s = hsum_float_8(acc); - -#elif defined __wasm_simd128__ - int8_t aux8[QK_K]; - float sums[8] = {0}; - uint32_t auxs[4]; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT hm = x[i].hmask; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - // Process blocks with SIMD - int8_t * a = aux8; - uint8_t m = 1; - for (int j = 0; j < QK_K; j += 128) { - for (int shift = 0; shift <= 6; shift += 2) { - v128_t v_m = wasm_i8x16_splat(m); - for (int l = 0; l < 32; l += 16) { - v128_t v_q3 = wasm_v128_load(q3 + l); - v128_t v_shift = wasm_i8x16_shr(v_q3, shift); - v128_t v_low2 = wasm_v128_and(v_shift, wasm_i8x16_splat(0x03)); - - v128_t v_hm = wasm_v128_load(hm + l); - v128_t v_mask = wasm_v128_and(v_hm, v_m); - v_mask = wasm_i8x16_ne(v_mask, wasm_i8x16_splat(0)); - - v_low2 = wasm_i8x16_sub(v_low2, wasm_v128_and(wasm_i8x16_splat(4), wasm_v128_not(v_mask))); - wasm_v128_store(a + l, v_low2); - } - a += 32; - m <<= 1; - } - q3 += 32; - } - - // Extract scales - memcpy(auxs, x[i].scales, 12); - uint32_t tmp = auxs[2]; - auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); - auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); - auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); - auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); - const int8_t * scales = (const int8_t *)auxs; - - // SIMD dot product with register accumulators - v128_t v_acc0 = wasm_i32x4_splat(0); - v128_t v_acc1 = wasm_i32x4_splat(0); - a = aux8; - for (int j = 0; j < QK_K/16; ++j) { - const v128_t v_scale = wasm_i16x8_splat(scales[j] - 32); - - // Process 16 elements per iteration - for (int k = 0; k < 2; ++k) { - const v128_t v_q8 = wasm_i16x8_load8x8(q8); - const v128_t v_a = wasm_i16x8_load8x8(a); - - v128_t v_prod = wasm_i16x8_mul(v_q8, v_a); - v_prod = wasm_i16x8_mul(v_prod, v_scale); - - v_acc0 = wasm_i32x4_add(v_acc0, wasm_i32x4_extend_low_i16x8(v_prod)); - v_acc1 = wasm_i32x4_add(v_acc1, wasm_i32x4_extend_high_i16x8(v_prod)); - - q8 += 8; - a += 8; - } - } - - // Accumulate results - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const v128_t v_d = wasm_f32x4_splat(d); - v128_t v_sum = wasm_f32x4_add( - wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc0), v_d), - wasm_f32x4_mul(wasm_f32x4_convert_i32x4(v_acc1), v_d) - ); - - // Accumulate into sums vector - wasm_v128_store(sums, wasm_f32x4_add(wasm_v128_load(sums), v_sum)); - } - - // Horizontal sum - v128_t v_sum = wasm_f32x4_add(wasm_v128_load(sums), wasm_v128_load(sums + 4)); - sumf = wasm_f32x4_extract_lane(v_sum, 0) + - wasm_f32x4_extract_lane(v_sum, 1) + - wasm_f32x4_extract_lane(v_sum, 2) + - wasm_f32x4_extract_lane(v_sum, 3); - - *s = sumf; - -#elif defined __riscv_v_intrinsic - - uint32_t aux[3]; - uint32_t utmp[4]; - - const int vector_length = __riscv_vlenb() * 8; - float sumf = 0; - - switch (vector_length) { - case 256: - for (int i = 0; i < nb; ++i) { - - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].hmask; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(aux, x[i].scales, 12); - utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); - utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); - utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); - utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); - - int8_t * scale = (int8_t *)utmp; - for (int j = 0; j < 16; ++j) scale[j] -= 32; - - - size_t vl = 32; - uint8_t m = 1; - - vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); - vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl); - - int sum_t = 0; - - for (int j = 0; j < QK_K; j += 128) { - - vl = 32; - - // load Q3 - vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl); - - vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl)); - vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03 , vl)); - vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03 , vl)); - vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03 , vl)); - - // compute mask for subtraction - vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl); - vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl); - vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_mu(vmask_0, q3_0, q3_0, 0x4, vl); - m <<= 1; - - vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl); - vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl); - vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_mu(vmask_1, q3_1, q3_1, 0x4, vl); - m <<= 1; - - vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl); - vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl); - vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_mu(vmask_2, q3_2, q3_2, 0x4, vl); - m <<= 1; - - vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl); - vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl); - vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_mu(vmask_3, q3_3, q3_3, 0x4, vl); - m <<= 1; - - // load Q8 and take product with Q3 - vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl); - vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8+32, vl), vl); - vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8+64, vl), vl); - vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8+96, vl), vl); - - vl = 16; - - // retrieve lane to multiply with scale - vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl); - vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl); - vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl); - vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl); - vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl); - vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl); - vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl); - vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl); - - vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl); - vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl); - vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl); - vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl); - - sum_t += __riscv_vmv_x_s_i32m1_i32(isum3); - - q3 += 32; q8 += 128; scale += 8; - - } - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - - sumf += d*sum_t; - - } - break; - case 128: - for (int i = 0; i < nb; ++i) { - const uint8_t * restrict q3 = x[i].qs; - const uint8_t * restrict qh = x[i].hmask; - const int8_t * restrict q8 = y[i].qs; - - int8_t * scale = (int8_t *)utmp; - int tmp; - __asm__ __volatile__( - "vsetivli zero, 12, e8, m1\n\t" - "vle8.v v0, (%[s6b])\n\t" - "vmv1r.v v2, v0\n\t" - "vsetivli zero, 2, e64, m1\n\t" - "vmv.v.x v9, %[sh]\n\t"\ - "vslidedown.vi v1, v0, 1\n\t" - "vslide1up.vx v8, v9, zero\n\t" // {0, 0, 4, 4} - "vslideup.vi v0, v2, 1\n\t" // {aux[0], aux[1], aux[0], aux[1]} - "vsetivli zero, 4, e32, m1\n\t" - "vid.v v9\n\t" - "vmv.x.s %[tmp], v1\n\t" - "vsll.vi v9, v9, 1\n\t" // {0, 2, 4, 6} - "vmv.v.x v1, %[tmp]\n\t" // {aux[2], aux[2], aux[2], aux[2]} - "vsrl.vv v4, v1, v9\n\t" - "vsrl.vv v2, v0, v8\n\t" - "vand.vx v5, v4, %[kmask1]\n\t" - "vand.vx v3, v2, %[kmask2]\n\t" - "vsll.vi v6, v5, 4\n\t" - "vor.vv v7, v6, v3\n\t" - "vsetivli zero, 16, e8, m1\n\t" - "vsub.vx v0, v7, %[c]\n\t" - "vse8.v v0, (%[scale])" - : [tmp] "=&r" (tmp) - : [sh] "r" (0x0000000400000004), [s6b] "r" (x[i].scales), [c] "r" (32) - , [scale] "r" (scale), [kmask1] "r" (kmask1), [kmask2] "r" (kmask2) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - - uint8_t m = 1; - int isum = 0; - for (int j = 0; j < QK_K; j += 128) { - __asm__ __volatile__( - "vsetvli zero, %[vl32], e8, m2, ta, mu\n\t" - "vle8.v v8, (%[q3])\n\t" - "vsrl.vi v10, v8, 2\n\t" - "vsrl.vi v12, v8, 4\n\t" - "vsrl.vi v14, v8, 6\n\t" - "vand.vi v8, v8, 3\n\t" - "vand.vi v10, v10, 3\n\t" - "vand.vi v12, v12, 3\n\t" - "vle8.v v2, (%[qh])\n\t" - "vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "vmseq.vx v0, v4, zero\n\t" - "vadd.vi v8, v8, -4, v0.t\n\t" - "vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "vmseq.vx v0, v4, zero\n\t" - "vadd.vi v10, v10, -4, v0.t\n\t" - "vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "vmseq.vx v0, v4, zero\n\t" - "vadd.vi v12, v12, -4, v0.t\n\t" - "vand.vx v4, v2, %[m]\n\t" - "slli %[m], %[m], 1\n\t" - "vmseq.vx v0, v4, zero\n\t" - "vadd.vi v14, v14, -4, v0.t\n\t" - "vsetvli zero, %[vl128], e8, m8\n\t" - "vle8.v v0, (%[q8])\n\t" - "vsetvli zero, %[vl64], e8, m4\n\t" - "vwmul.vv v16, v0, v8\n\t" - "vwmul.vv v24, v4, v12\n\t" - "vsetivli zero, 16, e16, m2\n\t" - "vmv.v.x v0, zero\n\t" - "vwredsum.vs v10, v16, v0\n\t" - "vwredsum.vs v9, v18, v0\n\t" - "vwredsum.vs v8, v20, v0\n\t" - "vwredsum.vs v7, v22, v0\n\t" - "vwredsum.vs v11, v24, v0\n\t" - "vwredsum.vs v12, v26, v0\n\t" - "vwredsum.vs v13, v28, v0\n\t" - "vwredsum.vs v14, v30, v0\n\t" - "vsetivli zero, 4, e32, m1\n\t" - "vslideup.vi v10, v9, 1\n\t" - "vslideup.vi v8, v7, 1\n\t" - "vslideup.vi v11, v12, 1\n\t" - "vslideup.vi v13, v14, 1\n\t" - "vslideup.vi v10, v8, 2\n\t" - "vslideup.vi v11, v13, 2\n\t" - "vsetivli zero, 8, e32, m2\n\t"\ - "vle8.v v15, (%[scale])\n\t" - "vsext.vf4 v12, v15\n\t" - "vmul.vv v10, v10, v12\n\t" - "vredsum.vs v0, v10, v0\n\t" - "vmv.x.s %[tmp], v0\n\t" - "add %[isum], %[isum], %[tmp]" - : [tmp] "=&r" (tmp), [m] "+&r" (m), [isum] "+&r" (isum) - : [vl128] "r" (128), [vl64] "r" (64), [vl32] "r" (32) - , [q3] "r" (q3), [qh] "r" (qh), [scale] "r" (scale), [q8] "r" (q8) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - q3 += 32; q8 += 128; scale += 8; - } - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - sumf += d * isum; - } - break; - default: - assert(false && "Unsupported vector length"); - break; - } - - *s = sumf; - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0x3); - const vector signed char lowMask1 = vec_splats((int8_t)0xf); - const vector signed char lowMask2 = vec_splats((int8_t)0x30); - const vector int v0 = vec_splats((int32_t)0); - const vector signed char v1 = vec_splats((signed char)0x1); - const vector unsigned char v2 = vec_splats((unsigned char)0x2); - const vector unsigned char v3 = vec_splats((unsigned char)0x3); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - const vector unsigned char v6 = vec_splats((unsigned char)0x6); - const vector signed char off = vec_splats((signed char)0x20); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - UNUSED(kmask1); - UNUSED(kmask2); - - vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); - vector signed char u1 = vec_and(u0, lowMask1); - vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); - vector signed char u3 = (vector signed char)vec_mergeh((vector signed int)u2, (vector signed int)vec_sr(u2, v2)); - vector signed char u30 = vec_sl(vec_and(u3, lowMask), v4); - vector signed char u31 = vec_and(u3, lowMask2); - - u1 = vec_or(u1, u30); - u2 = vec_or(vec_sr(u0, v4), u31); - - vector signed char vscales = (vector signed char)vec_mergeh((vector signed long long)u1, (vector signed long long)u2); - vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].hmask); - vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].hmask); - - vscales = vec_sub(vscales, off); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - vector signed int vsumi4 = v0; - vector signed int vsumi5 = v0; - vector signed int vsumi6 = v0; - vector signed int vsumi7 = v0; - - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/128; ++j) { - __builtin_prefetch(q3, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed char qxs0 = (vector signed char)vec_xl( 0, q3); - vector signed char qxs1 = (vector signed char)vec_xl(16, q3); - q3 += 32; - - //the low 2 bits - vector signed char qxs00 = vec_and(qxs0, lowMask); - vector signed char qxs01 = vec_and(vec_sr(qxs0, v2), lowMask); - vector signed char qxs02 = vec_and(vec_sr(qxs0, v4), lowMask); - vector signed char qxs03 = vec_and(vec_sr(qxs0, v6), lowMask); - vector signed char qxs10 = vec_and(qxs1, lowMask); - vector signed char qxs11 = vec_and(vec_sr(qxs1, v2), lowMask); - vector signed char qxs12 = vec_and(vec_sr(qxs1, v4), lowMask); - vector signed char qxs13 = vec_and(vec_sr(qxs1, v6), lowMask); - - //the 3rd bit - vector signed char qxh00 = vec_sl(vec_andc(v1, qxhs0), v2); - vector signed char qxh01 = vec_sl(vec_andc(v1, vec_sr(qxhs0, (vector unsigned char)v1)), v2); - vector signed char qxh02 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v2)), v2); - vector signed char qxh03 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v3)), v2); - vector signed char qxh10 = vec_sl(vec_andc(v1, qxhs1), v2); - vector signed char qxh11 = vec_sl(vec_andc(v1, vec_sr(qxhs1, (vector unsigned char)v1)), v2); - vector signed char qxh12 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v2)), v2); - vector signed char qxh13 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v3)), v2); - qxhs0 = vec_sr(qxhs0, v4); - qxhs1 = vec_sr(qxhs1, v4); - - vector signed char q3x00 = vec_sub(qxs00, qxh00); - vector signed char q3x01 = vec_sub(qxs01, qxh01); - vector signed char q3x02 = vec_sub(qxs02, qxh02); - vector signed char q3x03 = vec_sub(qxs03, qxh03); - vector signed char q3x10 = vec_sub(qxs10, qxh10); - vector signed char q3x11 = vec_sub(qxs11, qxh11); - vector signed char q3x12 = vec_sub(qxs12, qxh12); - vector signed char q3x13 = vec_sub(qxs13, qxh13); - - vector signed char q8y00 = vec_xl( 0, q8); - vector signed char q8y10 = vec_xl( 16, q8); - vector signed char q8y01 = vec_xl( 32, q8); - vector signed char q8y11 = vec_xl( 48, q8); - vector signed char q8y02 = vec_xl( 64, q8); - vector signed char q8y12 = vec_xl( 80, q8); - vector signed char q8y03 = vec_xl( 96, q8); - vector signed char q8y13 = vec_xl(112, q8); - q8 += 128; - - vector signed short vscales_h = vec_unpackh(vscales); - vector signed short vs0 = vec_splat(vscales_h, 0); - vector signed short vs1 = vec_splat(vscales_h, 1); - vector signed short vs2 = vec_splat(vscales_h, 2); - vector signed short vs3 = vec_splat(vscales_h, 3); - vector signed short vs4 = vec_splat(vscales_h, 4); - vector signed short vs5 = vec_splat(vscales_h, 5); - vector signed short vs6 = vec_splat(vscales_h, 6); - vector signed short vs7 = vec_splat(vscales_h, 7); - vscales = vec_sld(vscales, vscales, 8); - - vector signed short qv00 = vec_add(vec_mule(q3x00, q8y00), vec_mulo(q3x00, q8y00)); - vector signed short qv01 = vec_add(vec_mule(q3x01, q8y01), vec_mulo(q3x01, q8y01)); - vector signed short qv02 = vec_add(vec_mule(q3x02, q8y02), vec_mulo(q3x02, q8y02)); - vector signed short qv03 = vec_add(vec_mule(q3x03, q8y03), vec_mulo(q3x03, q8y03)); - vector signed short qv10 = vec_add(vec_mule(q3x10, q8y10), vec_mulo(q3x10, q8y10)); - vector signed short qv11 = vec_add(vec_mule(q3x11, q8y11), vec_mulo(q3x11, q8y11)); - vector signed short qv12 = vec_add(vec_mule(q3x12, q8y12), vec_mulo(q3x12, q8y12)); - vector signed short qv13 = vec_add(vec_mule(q3x13, q8y13), vec_mulo(q3x13, q8y13)); - - vsumi0 = vec_msum(qv00, vs0, vsumi0); - vsumi1 = vec_msum(qv01, vs2, vsumi1); - vsumi2 = vec_msum(qv02, vs4, vsumi2); - vsumi3 = vec_msum(qv03, vs6, vsumi3); - vsumi4 = vec_msum(qv10, vs1, vsumi4); - vsumi5 = vec_msum(qv11, vs3, vsumi5); - vsumi6 = vec_msum(qv12, vs5, vsumi6); - vsumi7 = vec_msum(qv13, vs7, vsumi7); - } - - vsumi0 = vec_add(vsumi0, vsumi4); - vsumi1 = vec_add(vsumi1, vsumi5); - vsumi2 = vec_add(vsumi2, vsumi6); - vsumi3 = vec_add(vsumi3, vsumi7); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined __loongarch_asx - - const __m128i m32 = __lsx_vreplgr2vr_b(32); - - __m256 acc = (__m256)__lasx_xvldi(0); - - uint32_t aux[3]; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - // Set up scales - memcpy(aux, x[i].scales, 12); - __m128i scales128 = lsx_set_w( - ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4), - ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4), - (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4), - (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4)); - scales128 = __lsx_vsub_b(scales128, m32); - - const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; - const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); - - // high bit - const __m256i hbits = __lasx_xvld((const __m256i*)x[i].hmask, 0); - - // integer accumulator - __m256i sumi = __lasx_xvldi(0); - - for (int j = 0; j < QK_K/128; ++j) { - // load low 2 bits - const __m256i q3bits = __lasx_xvld((const __m256i*)q3, 0); q3 += 32; - - // prepare low and high bits - const __m256i q3l_0 = __lasx_xvandi_b(q3bits, 3); - const __m256i q3l_1 = __lasx_xvandi_b(__lasx_xvsrli_b(q3bits, 2), 3); - const __m256i q3l_2 = __lasx_xvandi_b(__lasx_xvsrli_b(q3bits, 4), 3); - const __m256i q3l_3 = __lasx_xvsrli_b(q3bits, 6); - const __m256i q3h_0 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 0), 0), 2); - const __m256i q3h_1 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 1), 0), 2); - const __m256i q3h_2 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 2), 0), 2); - const __m256i q3h_3 = __lasx_xvslli_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 4 * j + 3), 0), 2); - const __m256i q3_0 = __lasx_xvor_v(q3h_0, q3l_0); - const __m256i q3_1 = __lasx_xvor_v(q3h_1, q3l_1); - const __m256i q3_2 = __lasx_xvor_v(q3h_2, q3l_2); - const __m256i q3_3 = __lasx_xvor_v(q3h_3, q3l_3); - - // load Q8 quants - const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_3 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - - __m256i p16_0 = lasx_madd_h_b(q8_0, q3_0); - __m256i p16_1 = lasx_madd_h_b(q8_1, q3_1); - __m256i p16_2 = lasx_madd_h_b(q8_2, q3_2); - __m256i p16_3 = lasx_madd_h_b(q8_3, q3_3); - - // multiply with scales - p16_0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p16_0); - p16_1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p16_1); - p16_2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p16_2); - p16_3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p16_3); - - // accumulate - p16_0 = __lasx_xvadd_w(p16_0, p16_1); - p16_2 = __lasx_xvadd_w(p16_2, p16_3); - sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_2)); - } - // multiply with block scale and accumulate - acc = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); - } - - *s = hsum_float_8(acc); -#elif defined(__VXE__) || defined(__VXE2__) - uint32_t aux[3]; - uint32_t utmp[4]; - - const int32x4_t v_z = vec_splat_s32(0); - const uint8x16_t v_3m = vec_splat_u8(0x03); - - const uint8x16_t v_0c = vec_splat_u8(1); - const uint8x16_t v_1c = vec_sl(v_0c, 1); - const uint8x16_t v_2c = vec_sl(v_0c, 2); - const uint8x16_t v_3c = vec_sl(v_0c, 3); - - uint8x16_t q3h[4]; - uint8x16_t q3b[2]; - int8x16_t q3bytes[4]; - int8x16_t q8bytes[4]; - uint8x16_t qhbits[2]; - - float sum = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * restrict x0l = x[i].qs; - const uint8_t * restrict x0h = x[i].hmask; - const int8_t * restrict y0 = y[i].qs; - - qhbits[0] = vec_xl(0 , x0h); - qhbits[1] = vec_xl(16, x0h); - - int32_t isum = 0; - - memcpy(aux, x[i].scales, 12); - utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); - utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); - utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); - utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); - - int8_t * scale = (int8_t *)utmp; - for (int j = 0; j < 16; ++j) scale[j] -= 32; - - for (int j = 0; j < QK_K/128; ++j) { - int32x4_t isum0, isum1, isum2, isum3; - - q3b[0] = vec_xl(0 , x0l); - q3b[1] = vec_xl(16, x0l); - x0l += 32; - - q8bytes[0] = vec_xl(0 , y0); - q8bytes[1] = vec_xl(16 , y0); - q8bytes[2] = vec_xl(32 , y0); - q8bytes[3] = vec_xl(48 , y0); - q8bytes[4] = vec_xl(64 , y0); - q8bytes[5] = vec_xl(80 , y0); - q8bytes[6] = vec_xl(96 , y0); - q8bytes[7] = vec_xl(112, y0); - y0 += 128; - - q3h[0] = vec_sl(vec_andc(v_0c, qhbits[0]), 2); - q3h[1] = vec_sl(vec_andc(v_0c, qhbits[1]), 2); - q3h[2] = vec_sl(vec_andc(v_1c, qhbits[0]), 1); - q3h[3] = vec_sl(vec_andc(v_1c, qhbits[1]), 1); - - q3bytes[0] = vec_sub((int8x16_t)vec_and(q3b[0], v_3m), (int8x16_t)q3h[0]); - q3bytes[1] = vec_sub((int8x16_t)vec_and(q3b[1], v_3m), (int8x16_t)q3h[1]); - q3bytes[2] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 2), v_3m), (int8x16_t)q3h[2]); - q3bytes[3] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 2), v_3m), (int8x16_t)q3h[3]); - - isum0 = ggml_vec_dot(v_z, q3bytes[0], q8bytes[0]); - isum1 = ggml_vec_dot(v_z, q3bytes[1], q8bytes[1]); - isum2 = ggml_vec_dot(v_z, q3bytes[2], q8bytes[2]); - isum3 = ggml_vec_dot(v_z, q3bytes[3], q8bytes[3]); - - isum += (isum0[0] + isum0[1] + isum0[2] + isum0[3]) * scale[0]; - isum += (isum1[0] + isum1[1] + isum1[2] + isum1[3]) * scale[1]; - isum += (isum2[0] + isum2[1] + isum2[2] + isum2[3]) * scale[2]; - isum += (isum3[0] + isum3[1] + isum3[2] + isum3[3]) * scale[3]; - - scale += 4; - - q3h[0] = vec_andc(v_2c, qhbits[0]); - q3h[1] = vec_andc(v_2c, qhbits[1]); - q3h[2] = vec_sr(vec_andc(v_3c, qhbits[0]), 1); - q3h[3] = vec_sr(vec_andc(v_3c, qhbits[1]), 1); - - q3bytes[0] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 4), v_3m), (int8x16_t)q3h[0]); - q3bytes[1] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 4), v_3m), (int8x16_t)q3h[1]); - q3bytes[2] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[0], 6), v_3m), (int8x16_t)q3h[2]); - q3bytes[3] = vec_sub((int8x16_t)vec_and(vec_sr(q3b[1], 6), v_3m), (int8x16_t)q3h[3]); - - isum0 = ggml_vec_dot(v_z, q3bytes[0], q8bytes[4]); - isum1 = ggml_vec_dot(v_z, q3bytes[1], q8bytes[5]); - isum2 = ggml_vec_dot(v_z, q3bytes[2], q8bytes[6]); - isum3 = ggml_vec_dot(v_z, q3bytes[3], q8bytes[7]); - - isum += (isum0[0] + isum0[1] + isum0[2] + isum0[3]) * scale[0]; - isum += (isum1[0] + isum1[1] + isum1[2] + isum1[3]) * scale[1]; - isum += (isum2[0] + isum2[1] + isum2[2] + isum2[3]) * scale[2]; - isum += (isum3[0] + isum3[1] + isum3[2] + isum3[3]) * scale[3]; - - scale += 4; - - if (j == 0) { - qhbits[0] = vec_sr(qhbits[0], 4); - qhbits[1] = vec_sr(qhbits[1], 4); - } - } - - sum += d * isum; - } - - *s = sum; -#else - // scalar version - // This function is written like this so the compiler can manage to vectorize most of it - // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the - // manually vectorized version above. Every other version I tried would run at least 4 times slower. - // The ideal situation would be if we could just write the code once, and the compiler would - // automatically produce the best possible set of machine instructions, instead of us having to manually - // write vectorized versions for AVX, ARM_NEON, etc. - - int8_t aux8[QK_K]; - int16_t aux16[8]; - float sums [8]; - int32_t aux32[8]; - memset(sums, 0, 8*sizeof(float)); - - uint32_t auxs[4]; - const int8_t * scales = (const int8_t*)auxs; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT hm = x[i].hmask; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - memset(aux32, 0, 8*sizeof(int32_t)); - int8_t * GGML_RESTRICT a = aux8; - uint8_t m = 1; - for (int j = 0; j < QK_K; j += 128) { - for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; - for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); - a += 32; m <<= 1; - for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; - for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); - a += 32; m <<= 1; - for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; - for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); - a += 32; m <<= 1; - for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; - for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); - a += 32; m <<= 1; - q3 += 32; - } - a = aux8; - - memcpy(auxs, x[i].scales, 12); - uint32_t tmp = auxs[2]; - auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); - auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); - auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); - auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); - for (int j = 0; j < QK_K/16; ++j) { - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; - q8 += 8; a += 8; - } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - } - for (int l = 0; l < 8; ++l) sumf += sums[l]; - *s = sumf; - -#endif - -} - -void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q4_K * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - - static const uint32_t kmask1 = 0x3f3f3f3f; - static const uint32_t kmask2 = 0x0f0f0f0f; - static const uint32_t kmask3 = 0x03030303; - - uint32_t utmp[4]; - -#ifdef __ARM_FEATURE_SVE - float sumf = 0; - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); - - memcpy(utmp, x[i].scales, K_SCALE_SIZE); - - uint32x2_t mins8 = { 0 }; - mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0); - mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1); - - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[0] &= kmask1; - - const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); - const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), - vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); - sumf -= dmin * vaddvq_s32(prod); - - const uint8_t * scales = (const uint8_t *)utmp; - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const int vector_length = ggml_cpu_get_sve_cnt()*8; - const svuint8_t m4b = svdup_n_u8(0xf); - const svint32_t mzero = svdup_n_s32(0); - svint32_t sumi1 = svdup_n_s32(0); - svint32_t sumi1_1 = svdup_n_s32(0); - svint32_t sumi1_2 = svdup_n_s32(0); - svint32_t sumi2 = svdup_n_s32(0); - svint32_t sumi2_1 = svdup_n_s32(0); - svint32_t sumi2_2 = svdup_n_s32(0); - switch (vector_length) { - case 128: - { - for (int j = 0; j < QK_K/64; ++j) { - svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), m4b)); - svint8_t q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; - sumi1_1 = svmla_n_s32_x(svptrue_b32(), sumi1_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); - q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4+16), m4b)); - q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; - sumi1_2 = svmla_n_s32_x(svptrue_b32(), sumi1_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); - - q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), 4)); - q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; - sumi2_1 = svmla_n_s32_x(svptrue_b32(), sumi2_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); - q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4+16), 4)); - q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16; - sumi2_2 = svmla_n_s32_x(svptrue_b32(), sumi2_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); - q4 += 32; - } - sumi1 = svadd_s32_x(svptrue_b32(), sumi1_1, sumi1_2); - sumi2 = svadd_s32_x(svptrue_b32(), sumi2_1, sumi2_2); - sumf += d * (svaddv_s32(svptrue_b32(), svadd_s32_x(svptrue_b32(), sumi1, sumi2))); - } break; - case 256: - case 512: - { - for (int j = 0; j < QK_K/64; ++j) { - const svuint8_t q4bits = svld1_u8(svptrue_pat_b8(SV_VL32), q4); q4 += 32; - svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_pat_b8(SV_VL32), q4bits, m4b)); - svint8_t q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); q8 += 32; - sumi1 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]); - - q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q4bits, 4)); - q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); q8 += 32; - sumi2 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]); - } - sumf += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), sumi1, sumi2))); - } break; - default: - assert(false && "Unsupported vector length"); - break; - } - } - *s = sumf; -#elif defined __ARM_NEON - const uint8x16_t m4b = vdupq_n_u8(0xf); - const int32x4_t mzero = vdupq_n_s32(0); - - ggml_int8x16x2_t q4bytes; - ggml_int8x16x2_t q8bytes; - - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); - - memcpy(utmp, x[i].scales, 12); - - uint32x2_t mins8 = { 0 }; - mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0); - mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1); - - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[0] &= kmask1; - - const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); - const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), - vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); - sumf -= dmin * vaddvq_s32(prod); - - const uint8_t * scales = (const uint8_t *)utmp; - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - int32_t sumi1 = 0; - int32_t sumi2 = 0; - - for (int j = 0; j < QK_K/64; ++j) { - const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32; - - q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; - q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b)); - q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b)); - - const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); - sumi1 += vaddvq_s32(p1) * scales[2*j+0]; - - q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32; - q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4)); - q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4)); - - const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]); - - sumi2 += vaddvq_s32(p2) * scales[2*j+1]; - } - - sumf += d * (sumi1 + sumi2); - - } - - *s = sumf; - -#elif defined __wasm_simd128__ - const uint8_t * scales = (const uint8_t*)&utmp[0]; - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); // Corrected sign - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - // Process scales and mins - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - // Sum mins * q8sums - int32_t sumi = 0; - const int16_t * GGML_RESTRICT q8sums = y[i].bsums; - const uint8_t * m = (const uint8_t *)&utmp[2]; - for (int j = 0; j < 16; j += 2) { - sumi += (q8sums[j] + q8sums[j+1]) * m[j/2]; - } - sumf -= dmin * sumi; - - int32_t sumi1 = 0; - int32_t sumi2 = 0; - - for (int j = 0; j < QK_K/64; ++j) { - // Load 64 4-bit weights (32 bytes) - const v128_t q4x0 = wasm_v128_load(q4); - const v128_t q4x1 = wasm_v128_load(q4 + 16); - q4 += 32; - - // Split into low/high nibbles - const v128_t q4l0 = wasm_v128_and(q4x0, wasm_i8x16_splat(0x0F)); - const v128_t q4h0 = wasm_u8x16_shr(q4x0, 4); - const v128_t q4l1 = wasm_v128_and(q4x1, wasm_i8x16_splat(0x0F)); - const v128_t q4h1 = wasm_u8x16_shr(q4x1, 4); - - // Load 64 8-bit values (64 bytes) - const v128_t q8x0 = wasm_v128_load(q8); - const v128_t q8x1 = wasm_v128_load(q8 + 16); - const v128_t q8x2 = wasm_v128_load(q8 + 32); - const v128_t q8x3 = wasm_v128_load(q8 + 48); - q8 += 64; - - // Low nibble products - v128_t vacc1 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q4l0), - wasm_i16x8_extend_low_i8x16(q8x0) - ); - vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q4l0), - wasm_i16x8_extend_high_i8x16(q8x0) - )); - vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q4l1), - wasm_i16x8_extend_low_i8x16(q8x1) - )); - vacc1 = wasm_i32x4_add(vacc1, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q4l1), - wasm_i16x8_extend_high_i8x16(q8x1) - )); - - // High nibble products - v128_t vacc2 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q4h0), - wasm_i16x8_extend_low_i8x16(q8x2) - ); - vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q4h0), - wasm_i16x8_extend_high_i8x16(q8x2) - )); - vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q4h1), - wasm_i16x8_extend_low_i8x16(q8x3) - )); - vacc2 = wasm_i32x4_add(vacc2, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q4h1), - wasm_i16x8_extend_high_i8x16(q8x3) - )); - - // Accumulate scaled results - int32_t vacc1_sum = wasm_i32x4_extract_lane(vacc1, 0) + wasm_i32x4_extract_lane(vacc1, 1) + - wasm_i32x4_extract_lane(vacc1, 2) + wasm_i32x4_extract_lane(vacc1, 3); - sumi1 += vacc1_sum * scales[2*j]; - - int32_t vacc2_sum = wasm_i32x4_extract_lane(vacc2, 0) + wasm_i32x4_extract_lane(vacc2, 1) + - wasm_i32x4_extract_lane(vacc2, 2) + wasm_i32x4_extract_lane(vacc2, 3); - sumi2 += vacc2_sum * scales[2*j+1]; - } - - sumf += d * (sumi1 + sumi2); - } - - *s = sumf; - -#elif defined __AVX2__ - - const __m256i m4 = _mm256_set1_epi8(0xF); - - __m256 acc = _mm256_setzero_ps(); - __m128 acc_m = _mm_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); - - const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums); - const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); - const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); - acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m); - - const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); - const __m256i scales = MM256_SET_M128I(sc128, sc128); - - __m256i sumi = _mm256_setzero_si256(); - - for (int j = 0; j < QK_K/64; ++j) { - - const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0)); - const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1)); - - const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; - const __m256i q4l = _mm256_and_si256(q4bits, m4); - const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4); - - const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - __m256i p16l = _mm256_maddubs_epi16(q4l, q8l); - p16l = _mm256_madd_epi16(scale_l, p16l); - - const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - __m256i p16h = _mm256_maddubs_epi16(q4h, q8h); - p16h = _mm256_madd_epi16(scale_h, p16h); - const __m256i sumj = _mm256_add_epi32(p16l, p16h); - - sumi = _mm256_add_epi32(sumi, sumj); - } - - __m256 vd = _mm256_set1_ps(d); - acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); - - } - - acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); - acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); - - *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m); - -#elif defined __AVX__ - - const __m128i m4 = _mm_set1_epi8(0xF); - const __m128i m2 = _mm_set1_epi8(0x2); - - __m256 acc = _mm256_setzero_ps(); - __m128 acc_m = _mm_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]); - const __m128i scales = _mm_cvtepu8_epi16(utmps); - const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps)); - - const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]); - const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]); - const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1); - const __m128i prod = _mm_madd_epi16(mins, q8s); - acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m); - - __m128i sumi_0 = _mm_setzero_si128(); - __m128i sumi_1 = _mm_setzero_si128(); - - __m128i shuffle = _mm_set1_epi16(0x0100); - for (int j = 0; j < QK_K/64; ++j) { - - const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle); - shuffle = _mm_add_epi16(shuffle, m2); - const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle); - shuffle = _mm_add_epi16(shuffle, m2); - - __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16; - const __m128i q4l_0 = _mm_and_si128(q4bits, m4); - const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4); - q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16; - const __m128i q4l_1 = _mm_and_si128(q4bits, m4); - const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4); - - const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0); - p16l = _mm_madd_epi16(scale_l, p16l); - sumi_0 = _mm_add_epi32(sumi_0, p16l); - const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - p16l = _mm_maddubs_epi16(q4l_1, q8l_1); - p16l = _mm_madd_epi16(scale_l, p16l); - sumi_1 = _mm_add_epi32(sumi_1, p16l); - - const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0); - p16h = _mm_madd_epi16(scale_h, p16h); - sumi_0 = _mm_add_epi32(sumi_0, p16h); - const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - p16h = _mm_maddubs_epi16(q4h_1, q8h_1); - p16h = _mm_madd_epi16(scale_h, p16h); - sumi_1 = _mm_add_epi32(sumi_1, p16h); - - } - - __m256 vd = _mm256_set1_ps(d); - __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); - acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc); - - } - - acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); - acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); - - *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m); - -#elif defined __riscv_v_intrinsic - - const uint8_t * scales = (const uint8_t*)&utmp[0]; - const uint8_t * mins = (const uint8_t*)&utmp[2]; - - const int vector_length = __riscv_vlenb() * 8; - float sumf = 0; - - switch (vector_length) { - case 256: - for (int i = 0; i < nb; ++i) { - - size_t vl = 8; - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl); - vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl); - vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl); - vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl)); - vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl); - - vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); - sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi); - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - vl = 32; - - int32_t sum_1 = 0; - int32_t sum_2 = 0; - - vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1); - - for (int j = 0; j < QK_K/64; ++j) { - // load Q4 - vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl); - - // load Q8 and multiply it with lower Q4 nibble - vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl); - vint8m1_t q4_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl)); - vint16m2_t qv_0 = __riscv_vwmul_vv_i16m2(q4_0, q8_0, vl); - vint16m1_t vs_0 = __riscv_vredsum_vs_i16m2_i16m1(qv_0, vzero, vl); - - sum_1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[2*j+0]; - - // load Q8 and multiply it with upper Q4 nibble - vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl); - vint8m1_t q4_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl)); - vint16m2_t qv_1 = __riscv_vwmul_vv_i16m2(q4_1, q8_1, vl); - vint16m1_t vs_1 = __riscv_vredsum_vs_i16m2_i16m1(qv_1, vzero, vl); - - sum_2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[2*j+1]; - - q4 += 32; q8 += 64; - - } - - sumf += d*(sum_1 + sum_2); - - } - break; - case 128: - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - int tmp, tmp2, sumi; - __asm__ __volatile__( - "vsetivli zero, 12, e8, m1\n\t" - "vle8.v v1, (%[s6b])\n\t" // {aux[0], aux[1], aux[2]} - "vsetivli zero, 4, e32, m1\n\t" - "vslidedown.vi v2, v1, 2\n\t" - "vmv1r.v v3, v2\n\t" - "vslideup.vi v2, v3, 1\n\t" // {aux[2], aux[2]} - "vsetivli zero, 2, e32, m1\n\t" - "vmv.v.i v4, 4\n\t" - "vand.vx v8, v1, %[kmask1]\n\t" - "vslide1up.vx v5, v4, zero\n\t" // {0, 4} - "vsrl.vi v6, v1, 6\n\t" - "vsrl.vv v7, v2, v5\n\t" - "vand.vx v0, v6, %[kmask3]\n\t" - "vand.vx v2, v7, %[kmask2]\n\t" - "vsll.vi v6, v0, 4\n\t" - "li %[t2], 8\n\t" - "addi %[t1], %[utmp], 4\n\t" - "vor.vv v1, v6, v2\n\t" - "vsse32.v v8, (%[utmp]), %[t2]\n\t" - "vsse32.v v1, (%[t1]), %[t2]\n\t" - "vsetivli zero, 8, e16, m1\n\t" - "vle32.v v2, (%[bsums])\n\t" - "vnsrl.wi v0, v2, 0\n\t" - "vnsrl.wi v1, v2, 16\n\t" - "vadd.vv v2, v0, v1\n\t" - "vle8.v v3, (%[mins])\n\t" - "vzext.vf2 v4, v3\n\t" - "vwmul.vv v6, v4, v2\n\t" - "vmv.v.x v0, zero\n\t" - "vsetivli zero, 8, e32, m2\n\t" - "vredsum.vs v0, v6, v0\n\t" - "vmv.x.s %[sumi], v0" - : [t1] "=&r" (tmp), [t2] "=&r" (tmp2), [sumi] "=&r" (sumi) - : [bsums] "r" (y[i].bsums), [mins] "r" (mins), [utmp] "r" (utmp) - , [s6b] "r" (x[i].scales), [kmask1] "r" (kmask1) - , [kmask2] "r" (kmask2), [kmask3] "r" (kmask3) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - sumf -= dmin * sumi; - - const uint8_t * restrict q4 = x[i].qs; - const int8_t * restrict q8 = y[i].qs; - - sumi = 0; - const uint8_t * scale = scales; - - for (int j = 0; j < QK_K/128; ++j) { - int vl128 = 128, vl64 = 64, vl32 = 32; - __asm__ __volatile__( - "vsetvli zero, %[vl128], e8, m8\n\t" - "vle8.v v8, (%[q8])\n\t" - "vsetvli zero, %[vl64], e8, m4\n\t" - "vle8.v v0, (%[q4])\n\t" - "vsrl.vi v4, v0, 4\n\t" - "vand.vi v0, v0, 0xF\n\t" - "vsetvli zero, %[vl32], e8, m2\n\t" - "vwmul.vv v28, v6, v14\n\t" - "vwmul.vv v20, v4, v10\n\t" - "vwmul.vv v24, v2, v12\n\t" - "vwmul.vv v16, v0, v8\n\t" - "vsetivli zero, 4, e32, m1\n\t" - "vle8.v v2, (%[scale])\n\t" - "vmv.v.x v0, zero\n\t" - "vzext.vf4 v1, v2\n\t" - "vsetvli zero, %[vl32], e16, m4\n\t" - "vwredsum.vs v6, v24, v0\n\t" - "vwredsum.vs v7, v28, v0\n\t" - "vwredsum.vs v4, v16, v0\n\t" - "vwredsum.vs v5, v20, v0\n\t" - "vsetivli zero, 4, e32, m1\n\t" - "vslideup.vi v6, v7, 1\n\t" - "vslideup.vi v4, v5, 1\n\t" - "vslideup.vi v4, v6, 2\n\t" - "vmul.vv v8, v4, v1\n\t" - "vredsum.vs v0, v8, v0\n\t" - "vmv.x.s %[tmp], v0\n\t" - "add %[sumi], %[sumi], %[tmp]" - : [tmp] "=&r" (tmp), [sumi] "+&r" (sumi) - : [vl128] "r" (vl128), [vl64] "r" (vl64), [vl32] "r" (vl32) - , [q4] "r" (q4), [q8] "r" (q8), [scale] "r" (scale) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - - q4 += 64; q8 += 128; scale += 4; - } - - sumf += d * sumi; - } - break; - default: - assert(false && "Unsupported vector length"); - break; - } - - *s = sumf; - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector signed char lowMask1 = vec_splats((int8_t)0x3f); - const vector signed char lowMask2 = vec_splats((int8_t)0x30); - const vector int v0 = vec_splats((int32_t)0); - const vector unsigned char v2 = vec_splats((uint8_t)2); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); - vector float vdmin = vec_mul(vxmin, vyd); - - vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); - vector signed short q8ysums1 = vec_xl(16, y[i].bsums); - - UNUSED(kmask1); - UNUSED(kmask2); - UNUSED(kmask3); - UNUSED(utmp); - - vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); - vector signed char u1 = vec_and(vec_sr(u0, v2), lowMask2); - vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); - vector signed char u3 = vec_sr(u2, v4); - - vector signed char u30 = u1; - vector signed char u31 = (vector signed char)vec_mergeh((vector signed int)vec_and(u2, lowMask), (vector signed int)u3); - - u1 = vec_and(u0, lowMask1); - u2 = vec_or(u30, u31); - - vector signed char utmps = (vector signed char)vec_mergeh((vector signed int)u1, (vector signed int)u2); - - vector signed short vscales = vec_unpackh(utmps); - vector signed short q4xmins = vec_unpackl(utmps); - vector signed short q4xmins0 = vec_mergeh(q4xmins, q4xmins); - vector signed short q4xmins1 = vec_mergel(q4xmins, q4xmins); - - vector signed int prod0 = vec_mule(q4xmins0, q8ysums0); - vector signed int prod1 = vec_mule(q4xmins1, q8ysums1); - vector signed int prod2 = vec_mulo(q4xmins0, q8ysums0); - vector signed int prod3 = vec_mulo(q4xmins1, q8ysums1); - - vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); - vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); - vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); - vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/64; j+=2) { - __builtin_prefetch(q4, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed char qxs0 = (vector signed char)vec_xl( 0, q4); - vector signed char qxs1 = (vector signed char)vec_xl(16, q4); - vector signed char qxs2 = (vector signed char)vec_xl(32, q4); - vector signed char qxs3 = (vector signed char)vec_xl(48, q4); - q4 += 64; - - vector unsigned char q4x00 = (vector unsigned char)vec_and(qxs0, lowMask); - vector unsigned char q4x01 = (vector unsigned char)vec_sr(qxs0, v4); - vector unsigned char q4x10 = (vector unsigned char)vec_and(qxs1, lowMask); - vector unsigned char q4x11 = (vector unsigned char)vec_sr(qxs1, v4); - vector unsigned char q4x20 = (vector unsigned char)vec_and(qxs2, lowMask); - vector unsigned char q4x21 = (vector unsigned char)vec_sr(qxs2, v4); - vector unsigned char q4x30 = (vector unsigned char)vec_and(qxs3, lowMask); - vector unsigned char q4x31 = (vector unsigned char)vec_sr(qxs3, v4); - - vector signed char q8y00 = vec_xl( 0, q8); - vector signed char q8y10 = vec_xl( 16, q8); - vector signed char q8y01 = vec_xl( 32, q8); - vector signed char q8y11 = vec_xl( 48, q8); - vector signed char q8y20 = vec_xl( 64, q8); - vector signed char q8y30 = vec_xl( 80, q8); - vector signed char q8y21 = vec_xl( 96, q8); - vector signed char q8y31 = vec_xl(112, q8); - q8 += 128; - - vector signed int qv00 = vec_msum(q8y00, q4x00, v0); - vector signed int qv01 = vec_msum(q8y01, q4x01, v0); - vector signed int qv10 = vec_msum(q8y10, q4x10, v0); - vector signed int qv11 = vec_msum(q8y11, q4x11, v0); - vector signed int qv20 = vec_msum(q8y20, q4x20, v0); - vector signed int qv21 = vec_msum(q8y21, q4x21, v0); - vector signed int qv30 = vec_msum(q8y30, q4x30, v0); - vector signed int qv31 = vec_msum(q8y31, q4x31, v0); - - vector signed int vscales_h = vec_unpackh(vscales); - vector signed int vs0 = vec_splat(vscales_h, 0); - vector signed int vs1 = vec_splat(vscales_h, 1); - vector signed int vs2 = vec_splat(vscales_h, 2); - vector signed int vs3 = vec_splat(vscales_h, 3); - vscales = vec_sld(vscales, vscales, 8); - - vsumi0 = vec_add(vec_mul(qv00, vs0), vsumi0); - vsumi1 = vec_add(vec_mul(qv01, vs1), vsumi1); - vsumi2 = vec_add(vec_mul(qv20, vs2), vsumi2); - vsumi3 = vec_add(vec_mul(qv21, vs3), vsumi3); - - vsumi0 = vec_add(vec_mul(qv10, vs0), vsumi0); - vsumi1 = vec_add(vec_mul(qv11, vs1), vsumi1); - vsumi2 = vec_add(vec_mul(qv30, vs2), vsumi2); - vsumi3 = vec_add(vec_mul(qv31, vs3), vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined __loongarch_asx - - __m256 acc = (__m256)__lasx_xvldi(0); - __m128 acc_m = (__m128)__lsx_vldi(0); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const __m128i mins_and_scales128 = lsx_set_w(utmp[3], utmp[2], utmp[1], utmp[0]); - const __m128i mins128 = __lsx_vexth_h_b(mins_and_scales128); - const __m128i scales128 = __lsx_vsllwil_h_b(mins_and_scales128, 0); - - const __m256i q8sums = __lasx_xvld((const __m256i*)y[i].bsums, 0); - const __m128i q8s = lsx_hadd_h(lasx_extracti128(q8sums, 0), lasx_extracti128(q8sums, 1)); - const __m128i prod = lsx_madd_h(mins128, q8s); - acc_m = __lsx_vfmadd_s(__lsx_vreplfr2vr_s(dmin), __lsx_vffint_s_w(prod), acc_m); - - const __m256i scales = lasx_insertf128(scales128, scales128); - - __m256i sumi = __lasx_xvldi(0); - - for (int j = 0; j < QK_K/64; ++j) { - - const __m256i scale_l = lasx_xvrepl128vei_h(scales, 2 * j + 0); - const __m256i scale_h = lasx_xvrepl128vei_h(scales, 2 * j + 1); - - const __m256i q4bits = __lasx_xvld((const __m256i*)q4, 0); q4 += 32; - const __m256i q4l = __lasx_xvandi_b(q4bits, 0xf); - const __m256i q4h = __lasx_xvsrli_b(q4bits, 4); - - const __m256i q8l = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - __m256i p16l = lasx_madd_h_b(q4l, q8l); - p16l = lasx_madd_h(scale_l, p16l); - - const __m256i q8h = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - __m256i p16h = lasx_madd_h_b(q4h, q8h); - p16h = lasx_madd_h(scale_h, p16h); - const __m256i sumj = __lasx_xvadd_w(p16l, p16h); - - sumi = __lasx_xvadd_w(sumi, sumj); - } - - __m256 vd = __lasx_xvreplfr2vr_s(d); - acc = __lasx_xvfmadd_s(vd, __lasx_xvffint_s_w(sumi), acc); - - } - - acc_m = __lsx_vfadd_s(acc_m, (__m128)__lsx_vpermi_w((__m128i)acc_m, (__m128i)acc_m, 0xee)); - __m128i tmp1 = __lsx_vinsgr2vr_w(__lsx_vldi(0), __lsx_vpickve2gr_w((__m128i)acc_m, 1), 0); - acc_m = __lsx_vfadd_s(acc_m, (__m128)tmp1); - - - *s = hsum_float_8(acc) + ((v4f32)acc_m)[0]; -#elif defined(__VXE__) || defined(__VXE2__) - const uint8x16_t v_lm = vec_splat_u8(0x0F); - const int32x4_t v_z = vec_splat_s32(0); - - uint8x16_t v_x[2]; - int8x16_t v_xl[2]; - int8x16_t v_y[2]; - - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums); - const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums); - const int16x8_t v_ysums = vec_padd_s16(v_ysumsl, v_ysumsh); - - memcpy(utmp, x[i].scales, 12); - - uint32x4_t v_mins8 = { 0 }; - v_mins8 = vec_insert(utmp[1] & kmask1, v_mins8, 0); - v_mins8 = vec_insert(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), v_mins8, 1); - - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[0] &= kmask1; - - const int16x8_t v_minsh = (int16x8_t)vec_unpackh((uint8x16_t)v_mins8); - - const int32x4_t v_minso = vec_mulo(v_ysums, v_minsh); - const int32x4_t v_minse = vec_mule(v_ysums, v_minsh); - const int32x4_t v_mins = v_minso + v_minse; - sumf -= dmin * (v_mins[0] + v_mins[1] + v_mins[2] + v_mins[3]); - - const uint8_t * scales = (const uint8_t *)utmp; - const uint8_t * GGML_RESTRICT x0 = x[i].qs; - const int8_t * GGML_RESTRICT y0 = y[i].qs; - - int32_t sumi1 = 0; - int32_t sumi2 = 0; - - for (int j = 0; j < QK_K/64; ++j) { - v_x[0] = vec_xl(0 , x0); - v_x[1] = vec_xl(16, x0); - x0 += 32; - - v_y[0] = vec_xl(0 , y0); - v_y[1] = vec_xl(16, y0); - y0 += 32; - - v_xl[0] = (int8x16_t)vec_and(v_x[0], v_lm); - v_xl[1] = (int8x16_t)vec_and(v_x[1], v_lm); - - const int32x4_t p1 = ggml_vec_dot(ggml_vec_dot(v_z, v_xl[0], v_y[0]), v_xl[1], v_y[1]); - sumi1 += (p1[0] + p1[1] + p1[2] + p1[3]) * scales[2*j+0]; - - v_y[0] = vec_xl(0 , y0); - v_y[1] = vec_xl(16, y0); - y0 += 32; - - v_xl[0] = (int8x16_t)vec_sr(v_x[0], 4); - v_xl[1] = (int8x16_t)vec_sr(v_x[1], 4); - - const int32x4_t p2 = ggml_vec_dot(ggml_vec_dot(v_z, v_xl[0], v_y[0]), v_xl[1], v_y[1]); - sumi2 += (p2[0] + p2[1] + p2[2] + p2[3]) * scales[2*j+1]; - } - - sumf += d * (sumi1 + sumi2); - } - - *s = sumf; -#else - - const uint8_t * scales = (const uint8_t*)&utmp[0]; - const uint8_t * mins = (const uint8_t*)&utmp[2]; - - int8_t aux8[QK_K]; - int16_t aux16[8]; - float sums [8]; - int32_t aux32[8]; - memset(sums, 0, 8*sizeof(float)); - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - memset(aux32, 0, 8*sizeof(int32_t)); - int8_t * GGML_RESTRICT a = aux8; - for (int j = 0; j < QK_K/64; ++j) { - for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); - a += 32; - for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); - a += 32; q4 += 32; - } - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - int sumi = 0; - for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; - a = aux8; - int is = 0; - for (int j = 0; j < QK_K/32; ++j) { - int32_t scale = scales[is++]; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; - sumf -= dmin * sumi; - } - for (int l = 0; l < 8; ++l) sumf += sums[l]; - *s = sumf; -#endif -} - -void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q5_K * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - - static const uint32_t kmask1 = 0x3f3f3f3f; - static const uint32_t kmask2 = 0x0f0f0f0f; - static const uint32_t kmask3 = 0x03030303; - - uint32_t utmp[4]; - -#ifdef __ARM_NEON - const uint8x16_t m4b = vdupq_n_u8(0xf); - const uint8x16_t mone = vdupq_n_u8(1); - const uint8x16_t mtwo = vdupq_n_u8(2); - const int32x4_t mzero = vdupq_n_s32(0); - - ggml_int8x16x4_t q5bytes; - - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8)); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8); - const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8)); - const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)), - vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins))); - int32_t sumi_mins = vaddvq_s32(prod); - - const uint8_t * scales = (const uint8_t *)utmp; - - const uint8_t * GGML_RESTRICT q5 = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); - - ggml_uint8x16x4_t q5h; - - int32_t sumi = 0; - - for (int j = 0; j < QK_K/64; ++j) { - - const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32; - const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; - - q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); - q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); - q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3); - q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3); - qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2); - qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2); - - q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0])); - q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1])); - q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2])); - q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3])); - - sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++; - sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++; - } - - sumf += d * sumi - dmin * sumi_mins; - } - - *s = sumf; - -#elif defined __AVX2__ - - const __m256i m4 = _mm256_set1_epi8(0xF); - const __m128i mzero = _mm_setzero_si128(); - const __m256i mone = _mm256_set1_epi8(1); - - __m256 acc = _mm256_setzero_ps(); - - float summs = 0.f; - - for (int i = 0; i < nb; ++i) { - const uint8_t * GGML_RESTRICT q5 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0])); - - const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums); - const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1)); - const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); - const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero); - summs += dmin * _mm_extract_epi32(hsum, 0); - - const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0); - const __m256i scales = MM256_SET_M128I(sc128, sc128); - - const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh); - __m256i hmask = mone; - - __m256i sumi = _mm256_setzero_si256(); - - int bit = 0; - - for (int j = 0; j < QK_K/64; ++j) { - - const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0)); - const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1)); - - const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32; - - const __m256i q5l_0 = _mm256_and_si256(q5bits, m4); - const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4); - const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0); - hmask = _mm256_slli_epi16(hmask, 1); - - const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4); - const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4); - const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1); - hmask = _mm256_slli_epi16(hmask, 1); - - const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - - __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0); - __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1); - - p16_0 = _mm256_madd_epi16(scale_0, p16_0); - p16_1 = _mm256_madd_epi16(scale_1, p16_1); - - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); - - } - - __m256 vd = _mm256_set1_ps(d); - acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); - - } - - *s = hsum_float_8(acc) + summs; - -#elif defined __AVX__ - - const __m128i m4 = _mm_set1_epi8(0xF); - const __m128i mzero = _mm_setzero_si128(); - const __m128i mone = _mm_set1_epi8(1); - const __m128i m2 = _mm_set1_epi8(2); - - __m256 acc = _mm256_setzero_ps(); - - float summs = 0.f; - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const uint8_t * GGML_RESTRICT q5 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]); - const __m128i scales = _mm_cvtepu8_epi16(utmps); - const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps)); - - const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]); - const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]); - const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1); - const __m128i prod = _mm_madd_epi16(mins, q8s); - const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero); - summs += dmin * _mm_extract_epi32(hsum, 0); - - const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]); - const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]); - __m128i hmask = mone; - - __m128i sumi_0 = _mm_setzero_si128(); - __m128i sumi_1 = _mm_setzero_si128(); - - int bit = 0; - - __m128i shuffle = _mm_set1_epi16(0x0100); - for (int j = 0; j < QK_K/64; ++j) { - - const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle); - shuffle = _mm_add_epi16(shuffle, m2); - const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle); - shuffle = _mm_add_epi16(shuffle, m2); - - const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16; - const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16; - - __m128i q5l_0 = _mm_and_si128(q5bits_0, m4); - __m128i q5l_1 = _mm_and_si128(q5bits_1, m4); - __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4); - __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4); - __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0); - __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1); - hmask = _mm_slli_epi16(hmask, 1); - - __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0); - __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1); - p16_0 = _mm_madd_epi16(scale_0, p16_0); - p16_1 = _mm_madd_epi16(scale_0, p16_1); - - q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4); - q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4); - q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4); - q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4); - q5_0 = _mm_add_epi8(q5l_0, q5h_0); - q5_1 = _mm_add_epi8(q5l_1, q5h_1); - hmask = _mm_slli_epi16(hmask, 1); - - q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0); - __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1); - p16_2 = _mm_madd_epi16(scale_1, p16_2); - p16_3 = _mm_madd_epi16(scale_1, p16_3); - - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); - - } - - __m256 vd = _mm256_set1_ps(d); - __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); - acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc); - - } - - *s = hsum_float_8(acc) + summs; - -#elif defined __wasm_simd128__ - //const uint8_t * scales = (const uint8_t*)&utmp[0]; - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); // Fixed sign - - const uint8_t * GGML_RESTRICT q5 = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - // Process scales and mins - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - // Sum mins * q8sums - int32_t sumi_mins = 0; - const int16_t * GGML_RESTRICT q8sums = y[i].bsums; - const uint8_t * m = (const uint8_t *)&utmp[2]; - for (int j = 0; j < 16; j += 2) { - sumi_mins += (q8sums[j] + q8sums[j+1]) * m[j/2]; - } - sumf -= dmin * sumi_mins; // Correct subtraction - - v128_t qh0 = wasm_v128_load(qh); - v128_t qh1 = wasm_v128_load(qh + 16); - const uint8_t * sc = (const uint8_t *)utmp; - - int32_t sumi = 0; - - for (int j = 0; j < QK_K/64; ++j) { - const int shift = j * 2; - v128_t qh_shift0 = wasm_u8x16_shr(qh0, shift); - v128_t qh_shift1 = wasm_u8x16_shr(qh1, shift); - - v128_t qh_low0 = wasm_i8x16_shl(wasm_v128_and(qh_shift0, wasm_i8x16_splat(0x01)), 4); - v128_t qh_high0 = wasm_i8x16_shl(wasm_v128_and(qh_shift0, wasm_i8x16_splat(0x02)), 3); - v128_t qh_low1 = wasm_i8x16_shl(wasm_v128_and(qh_shift1, wasm_i8x16_splat(0x01)), 4); - v128_t qh_high1 = wasm_i8x16_shl(wasm_v128_and(qh_shift1, wasm_i8x16_splat(0x02)), 3); - - v128_t q5_0 = wasm_v128_load(q5); - v128_t q5_1 = wasm_v128_load(q5 + 16); - q5 += 32; - - v128_t q5l_0 = wasm_v128_or(wasm_v128_and(q5_0, wasm_i8x16_splat(0x0F)), qh_low0); - v128_t q5h_0 = wasm_v128_or(wasm_u8x16_shr(q5_0, 4), qh_high0); - v128_t q5l_1 = wasm_v128_or(wasm_v128_and(q5_1, wasm_i8x16_splat(0x0F)), qh_low1); - v128_t q5h_1 = wasm_v128_or(wasm_u8x16_shr(q5_1, 4), qh_high1); - - v128_t q8_0 = wasm_v128_load(q8); - v128_t q8_1 = wasm_v128_load(q8 + 16); - v128_t q8_2 = wasm_v128_load(q8 + 32); - v128_t q8_3 = wasm_v128_load(q8 + 48); - q8 += 64; - - // Process low quants - v128_t pl0 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q5l_0), - wasm_i16x8_extend_low_i8x16(q8_0) - ); - pl0 = wasm_i32x4_add(pl0, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q5l_0), - wasm_i16x8_extend_high_i8x16(q8_0) - )); - v128_t pl1 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q5l_1), - wasm_i16x8_extend_low_i8x16(q8_1) - ); - pl1 = wasm_i32x4_add(pl1, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q5l_1), - wasm_i16x8_extend_high_i8x16(q8_1) - )); - v128_t sum_low = wasm_i32x4_add(pl0, pl1); - - // Process high quants - v128_t ph0 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q5h_0), - wasm_i16x8_extend_low_i8x16(q8_2) - ); - ph0 = wasm_i32x4_add(ph0, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q5h_0), - wasm_i16x8_extend_high_i8x16(q8_2) - )); - v128_t ph1 = wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_low_i8x16(q5h_1), - wasm_i16x8_extend_low_i8x16(q8_3) - ); - ph1 = wasm_i32x4_add(ph1, wasm_i32x4_dot_i16x8( - wasm_i16x8_extend_high_i8x16(q5h_1), - wasm_i16x8_extend_high_i8x16(q8_3) - )); - v128_t sum_high = wasm_i32x4_add(ph0, ph1); - - // Accumulate with scale factors - int32_t sl = wasm_i32x4_extract_lane(sum_low, 0) + wasm_i32x4_extract_lane(sum_low, 1) + - wasm_i32x4_extract_lane(sum_low, 2) + wasm_i32x4_extract_lane(sum_low, 3); - int32_t sh = wasm_i32x4_extract_lane(sum_high, 0) + wasm_i32x4_extract_lane(sum_high, 1) + - wasm_i32x4_extract_lane(sum_high, 2) + wasm_i32x4_extract_lane(sum_high, 3); - - sumi += sl * sc[2*j] + sh * sc[2*j+1]; - } - - sumf += d * sumi; - } - - *s = sumf; - -#elif defined __riscv_v_intrinsic - - const uint8_t * scales = (const uint8_t*)&utmp[0]; - const uint8_t * mins = (const uint8_t*)&utmp[2]; - - float sumf = 0; - float sums = 0.0; - - size_t vl; - - for (int i = 0; i < nb; ++i) { - - vl = 8; - - const uint8_t * GGML_RESTRICT q5 = x[i].qs; - const uint8_t * GGML_RESTRICT hm = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; - - vint16m1_t q8sums_0 = __riscv_vlse16_v_i16m1(y[i].bsums, 4, vl); - vint16m1_t q8sums_1 = __riscv_vlse16_v_i16m1(y[i].bsums+1, 4, vl); - vint16m1_t q8sums = __riscv_vadd_vv_i16m1(q8sums_0, q8sums_1, vl); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - vuint8mf2_t mins8 = __riscv_vle8_v_u8mf2(mins, vl); - vint16m1_t v_mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl)); - vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, v_mins, vl); - - vint32m1_t sumi = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl); - sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi); - - vl = 32; - int32_t aux32 = 0; - int is = 0; - - uint8_t m = 1; - vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); - vuint8m2_t vqh = __riscv_vle8_v_u8m2(hm, vl); - - for (int j = 0; j < QK_K/64; ++j) { - // load Q5 and Q8 - vuint8m2_t q5_x = __riscv_vle8_v_u8m2(q5, vl); - vint8m2_t q8_y1 = __riscv_vle8_v_i8m2(q8, vl); - vint8m2_t q8_y2 = __riscv_vle8_v_i8m2(q8+32, vl); - - // compute mask for addition - vint8m2_t q5_a = __riscv_vreinterpret_v_u8m2_i8m2(__riscv_vand_vx_u8m2(q5_x, 0x0F, vl)); - vuint8m2_t qh_m1 = __riscv_vand_vx_u8m2(vqh, m, vl); - vbool4_t vmask_1 = __riscv_vmsne_vx_u8m2_b4(qh_m1, 0, vl); - vint8m2_t q5_m1 = __riscv_vadd_vx_i8m2_mu(vmask_1, q5_a, q5_a, 16, vl); - m <<= 1; - - vint8m2_t q5_l = __riscv_vreinterpret_v_u8m2_i8m2(__riscv_vsrl_vx_u8m2(q5_x, 0x04, vl)); - vuint8m2_t qh_m2 = __riscv_vand_vx_u8m2(vqh, m, vl); - vbool4_t vmask_2 = __riscv_vmsne_vx_u8m2_b4(qh_m2, 0, vl); - vint8m2_t q5_m2 = __riscv_vadd_vx_i8m2_mu(vmask_2, q5_l, q5_l, 16, vl); - m <<= 1; - - vint16m4_t v0 = __riscv_vwmul_vv_i16m4(q5_m1, q8_y1, vl); - vint16m4_t v1 = __riscv_vwmul_vv_i16m4(q5_m2, q8_y2, vl); - - vint32m8_t vs1 = __riscv_vwmul_vx_i32m8(v0, scales[is++], vl); - vint32m8_t vs2 = __riscv_vwmul_vx_i32m8(v1, scales[is++], vl); - - vint32m1_t vacc1 = __riscv_vredsum_vs_i32m8_i32m1(vs1, vzero, vl); - vint32m1_t vacc2 = __riscv_vredsum_vs_i32m8_i32m1(vs2, vacc1, vl); - - aux32 += __riscv_vmv_x_s_i32m1_i32(vacc2); - q5 += 32; q8 += 64; - - } - - sums += aux32 * d; - - } - - *s = sumf+sums; - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector signed char lowMask1 = vec_splats((int8_t)0x3f); - const vector signed char lowMask2 = vec_splats((int8_t)0x30); - const vector int v0 = vec_splats((int32_t)0); - const vector unsigned char v1 = vec_splats((unsigned char)0x1); - const vector unsigned char v2 = vec_splats((unsigned char)0x2); - const vector unsigned char v3 = vec_splats((unsigned char)0x3); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); - vector float vdmin = vec_mul(vxmin, vyd); - - UNUSED(kmask1); - UNUSED(kmask2); - UNUSED(kmask3); - UNUSED(utmp); - - vector signed char u0 = (vector signed char)vec_xl_len(x[i].scales, 8); - vector signed char u1 = vec_and(vec_sr(u0, v2), lowMask2); - vector signed char u2 = (vector signed char)vec_xl_len(x[i].scales + 8, 4); - vector signed char u3 = vec_sr(u2, v4); - - vector signed char u30 = u1; - vector signed char u31 = (vector signed char)vec_mergeh((vector signed int)vec_and(u2, lowMask), (vector signed int)u3); - - u1 = vec_and(u0, lowMask1); - u2 = vec_or(u30, u31); - - vector signed char utmps = (vector signed char)vec_mergeh((vector signed int)u1, (vector signed int)u2); - - vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); - vector signed short q8ysums1 = vec_xl(16, y[i].bsums); - - vector signed short vscales = vec_unpackh(utmps); - - vector signed short q5xmins = vec_unpackl(utmps); - vector signed short q5xmins0 = vec_mergeh(q5xmins, q5xmins); - vector signed short q5xmins1 = vec_mergel(q5xmins, q5xmins); - - vector signed int prod0 = vec_mule(q5xmins0, q8ysums0); - vector signed int prod1 = vec_mule(q5xmins1, q8ysums1); - vector signed int prod2 = vec_mulo(q5xmins0, q8ysums0); - vector signed int prod3 = vec_mulo(q5xmins1, q8ysums1); - - vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); - vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); - vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); - vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); - - vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].qh); - vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].qh); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - const uint8_t * GGML_RESTRICT q5 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/64; ++j) { - __builtin_prefetch(q5, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed char qxs0 = (vector signed char)vec_xl( 0, q5); - vector signed char qxs1 = (vector signed char)vec_xl(16, q5); - q5 += 32; - - vector signed char qxs00 = vec_and(qxs0, lowMask); - vector signed char qxs01 = vec_sr(qxs0, v4); - vector signed char qxs10 = vec_and(qxs1, lowMask); - vector signed char qxs11 = vec_sr(qxs1, v4); - - vector signed char q5h00 = vec_sl(vec_and((vector signed char)v1, qxhs0), v4); - vector signed char q5h01 = vec_sl(vec_and((vector signed char)v2, qxhs0), v3); - vector signed char q5h10 = vec_sl(vec_and((vector signed char)v1, qxhs1), v4); - vector signed char q5h11 = vec_sl(vec_and((vector signed char)v2, qxhs1), v3); - qxhs0 = vec_sr(qxhs0, v2); - qxhs1 = vec_sr(qxhs1, v2); - - vector unsigned char q5x00 = (vector unsigned char)vec_or(q5h00, qxs00); - vector unsigned char q5x01 = (vector unsigned char)vec_or(q5h01, qxs01); - vector unsigned char q5x10 = (vector unsigned char)vec_or(q5h10, qxs10); - vector unsigned char q5x11 = (vector unsigned char)vec_or(q5h11, qxs11); - - vector signed char q8y00 = vec_xl( 0, q8); - vector signed char q8y10 = vec_xl(16, q8); - vector signed char q8y01 = vec_xl(32, q8); - vector signed char q8y11 = vec_xl(48, q8); - q8 += 64; - - vector signed int qv00 = vec_msum(q8y00, q5x00, v0); - vector signed int qv01 = vec_msum(q8y01, q5x01, v0); - vector signed int qv10 = vec_msum(q8y10, q5x10, v0); - vector signed int qv11 = vec_msum(q8y11, q5x11, v0); - - vector signed int vscales_h = vec_unpackh(vscales); - vector signed int vs0 = vec_splat(vscales_h, 0); - vector signed int vs1 = vec_splat(vscales_h, 1); - vscales = vec_sld(vscales, vscales, 12); - - vsumi0 = vec_add(vec_mul(qv00, vs0), vsumi0); - vsumi1 = vec_add(vec_mul(qv10, vs0), vsumi1); - vsumi2 = vec_add(vec_mul(qv01, vs1), vsumi2); - vsumi3 = vec_add(vec_mul(qv11, vs1), vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined __loongarch_asx - - __m256 acc = (__m256)__lasx_xvldi(0); - __m128 acc_m = (__m128)__lsx_vldi(0); - - for (int i = 0; i < nb; ++i) { - - const uint8_t * GGML_RESTRICT q5 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const __m128i mins_and_scales128 = lsx_set_w(utmp[3], utmp[2], utmp[1], utmp[0]); - const __m128i mins128 = __lsx_vexth_h_b(mins_and_scales128); - const __m128i scales128 = __lsx_vsllwil_h_b(mins_and_scales128, 0); - - const __m256i q8sums = __lasx_xvld((const __m256i*)y[i].bsums, 0); - const __m128i q8s = lsx_hadd_h(lasx_extracti128(q8sums, 0), lasx_extracti128(q8sums, 1)); - const __m128i prod = lsx_madd_h(mins128, q8s); - acc_m = __lsx_vfmadd_s(__lsx_vreplfr2vr_s(dmin), __lsx_vffint_s_w(prod), acc_m); - - const __m256i scales = lasx_insertf128(scales128, scales128); - - const __m256i hbits = __lasx_xvld((const __m256i*)x[i].qh, 0); - - __m256i sumi = __lasx_xvldi(0); - - for (int j = 0; j < QK_K/64; ++j) { - - const __m256i scale_0 = lasx_xvrepl128vei_h(scales, 2 * j + 0); - const __m256i scale_1 = lasx_xvrepl128vei_h(scales, 2 * j + 1); - - const __m256i q5bits = __lasx_xvld((const __m256i*)q5, 0); q5 += 32; - - const __m256i q5l_0 = __lasx_xvandi_b(q5bits, 0xf); - const __m256i q5l_1 = __lasx_xvsrli_b(q5bits, 4); - const __m256i q5h_0 = __lasx_xvnori_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 2 * j + 0), 0), 0xef); - const __m256i q5h_1 = __lasx_xvnori_b(__lasx_xvseqi_b(lasx_xvandi_b_bit(hbits, 2 * j + 1), 0), 0xef); - const __m256i q5_0 = __lasx_xvor_v(q5l_0, q5h_0); - const __m256i q5_1 = __lasx_xvor_v(q5l_1, q5h_1); - - const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - - __m256i p16_0 = lasx_madd_h_b(q5_0, q8_0); - __m256i p16_1 = lasx_madd_h_b(q5_1, q8_1); - - p16_0 = lasx_madd_h(scale_0, p16_0); - p16_1 = lasx_madd_h(scale_1, p16_1); - - sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_1)); - - } - - __m256 vd = __lasx_xvreplfr2vr_s(d); - acc = __lasx_xvfmadd_s(vd, __lasx_xvffint_s_w(sumi), acc); - - } - - acc_m = __lsx_vfadd_s(acc_m, (__m128)__lsx_vbsrl_v(acc_m, 8)); - acc_m = __lsx_vfadd_s(acc_m, (__m128)__lsx_vbsrl_v(acc_m, 4)); - - *s = hsum_float_8(acc) + ((v4f32)acc_m)[0]; -#elif defined(__VXE__) || defined(__VXE2__) - const uint8x16_t v_lm = vec_splat_u8(0x0F); - const uint8x16_t v_1m = vec_splat_u8(0x01); - const uint8x16_t v_2m = vec_splat_u8(0x02); - - const int32x4_t v_z = vec_splat_s32(0); - - const uchar8x16_t v_minsm = { - 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF - }; - - int8x16_t q5b[4]; - uint8x16_t q5h[4]; - - uint8x16_t v_xl[2]; - uint8x16_t v_xh[2]; - int8x16_t v_y[4]; - - float sumf = 0; - - for (int i = 0; i < nb; ++i) { - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin); - - const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums); - const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums); - const int16x8_t v_ysums = vec_padd_s16(v_ysumsl, v_ysumsh); - - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - const uint8x16_t v_mins16 = vec_xl(0, (const uint8_t *)utmp); - const uint8x16_t v_mins8 = vec_perm(v_mins16, v_mins16, v_minsm); - const int16x8_t v_minsh = (int16x8_t)vec_unpackh(v_mins8); - - const int32x4_t v_minsho = vec_mulo(v_ysums, v_minsh); - const int32x4_t v_minshe = vec_mule(v_ysums, v_minsh); - const int32x4_t v_mins = vec_add(v_minsho, v_minshe); - const int32_t mins = v_mins[0] + v_mins[1] + v_mins[2] + v_mins[3]; - - const uint8_t * scales = (const uint8_t *)utmp; - const uint8_t * GGML_RESTRICT x0l = x[i].qs; - const uint8_t * GGML_RESTRICT x0h = x[i].qh; - const int8_t * GGML_RESTRICT y0 = y[i].qs; - - v_xh[0] = vec_xl(0 , x0h); - v_xh[1] = vec_xl(16, x0h); - - int32_t sumi = 0; - for (int j = 0; j < QK_K/64; ++j) { - v_xl[0] = vec_xl(0 , x0l); - v_xl[1] = vec_xl(16, x0l); - x0l += 32; - - v_y[0] = vec_xl(0 , y0); - v_y[1] = vec_xl(16, y0); - v_y[2] = vec_xl(32, y0); - v_y[3] = vec_xl(48, y0); - y0 += 64; - - q5h[0] = vec_sl(vec_and(v_1m, v_xh[0]), 4); - q5h[1] = vec_sl(vec_and(v_1m, v_xh[1]), 4); - q5h[2] = vec_sl(vec_and(v_2m, v_xh[0]), 3); - q5h[3] = vec_sl(vec_and(v_2m, v_xh[1]), 3); - v_xh[0] = vec_sr(v_xh[0], 2); - v_xh[1] = vec_sr(v_xh[1], 2); - - q5b[0] = (int8x16_t)vec_or(vec_and(v_xl[0], v_lm), q5h[0]); - q5b[1] = (int8x16_t)vec_or(vec_and(v_xl[1], v_lm), q5h[1]); - q5b[2] = (int8x16_t)vec_or(vec_sr(v_xl[0], 4), q5h[2]); - q5b[3] = (int8x16_t)vec_or(vec_sr(v_xl[1], 4), q5h[3]); - - int32x4_t sumi0 = ggml_vec_dot(ggml_vec_dot(v_z, q5b[0], v_y[0]), q5b[1], v_y[1]); - int32x4_t sumi1 = ggml_vec_dot(ggml_vec_dot(v_z, q5b[2], v_y[2]), q5b[3], v_y[3]); - - sumi += (sumi0[0] + sumi0[1] + sumi0[2] + sumi0[3]) * *scales++; - sumi += (sumi1[0] + sumi1[1] + sumi1[2] + sumi1[3]) * *scales++; - } - - sumf += d * sumi - dmin * mins; - } - - *s = sumf; -#else - - const uint8_t * scales = (const uint8_t*)&utmp[0]; - const uint8_t * mins = (const uint8_t*)&utmp[2]; - - int8_t aux8[QK_K]; - int16_t aux16[8]; - float sums [8]; - int32_t aux32[8]; - memset(sums, 0, 8*sizeof(float)); - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t * GGML_RESTRICT q4 = x[i].qs; - const uint8_t * GGML_RESTRICT hm = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - memset(aux32, 0, 8*sizeof(int32_t)); - int8_t * GGML_RESTRICT a = aux8; - uint8_t m = 1; - for (int j = 0; j < QK_K/64; ++j) { - for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); - for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); - a += 32; m <<= 1; - for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); - for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); - a += 32; m <<= 1; - q4 += 32; - } - memcpy(utmp, x[i].scales, 12); - utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); - const uint32_t uaux = utmp[1] & kmask1; - utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); - utmp[2] = uaux; - utmp[0] &= kmask1; - - int sumi = 0; - for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; - a = aux8; - int is = 0; - for (int j = 0; j < QK_K/32; ++j) { - int32_t scale = scales[is++]; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d; - sumf -= dmin * sumi; - } - for (int l = 0; l < 8; ++l) sumf += sums[l]; - *s = sumf; -#endif -} - -void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_q6_K * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#ifdef __ARM_FEATURE_SVE - const int vector_length = ggml_cpu_get_sve_cnt()*8; - float sum = 0; - svuint8_t m4b = svdup_n_u8(0xf); - svint32_t vzero = svdup_n_s32(0); - svuint8_t mone = svdup_n_u8(0x30); - svint8_t q6bytes_1, q6bytes_2, q6bytes_3, q6bytes_4; - svuint8_t q6h_1, q6h_2, q6h_3, q6h_4; - - for (int i = 0; i < nb; ++i) { - const float d_all = GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q6 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const int8_t * GGML_RESTRICT scale = x[i].scales; - - const svbool_t pg16_8 = svptrue_pat_b16(SV_VL8); - const svint16_t q8sums_1 = svld1_s16(pg16_8, y[i].bsums); - const svint16_t q8sums_2 = svld1_s16(pg16_8, y[i].bsums + 8); - const svint16_t q6scales_1 = svunpklo_s16(svld1_s8(svptrue_pat_b8(SV_VL8), scale)); - const svint16_t q6scales_2 = svunpklo_s16(svld1_s8(svptrue_pat_b8(SV_VL8), scale + 8)); - const svint64_t prod = svdup_n_s64(0); - int32_t isum_mins = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(prod, q8sums_1, q6scales_1), - svdot_s64(prod, q8sums_2, q6scales_2))); - int32_t isum = 0; - - switch (vector_length) { - case 128: - { - const svbool_t pg32_4 = svptrue_pat_b32(SV_VL4); - const svbool_t pg8_16 = svptrue_pat_b8(SV_VL16); - svint32_t isum_tmp = svdup_n_s32(0); - for (int j = 0; j < QK_K/128; ++j) { - svuint8_t qhbits_1 = svld1_u8(pg8_16, qh); - svuint8_t qhbits_2 = svld1_u8(pg8_16, qh+16); - qh += 32; - svuint8_t q6bits_1 = svld1_u8(pg8_16, q6); - svuint8_t q6bits_2 = svld1_u8(pg8_16, q6+16); - svuint8_t q6bits_3 = svld1_u8(pg8_16, q6+32); - svuint8_t q6bits_4 = svld1_u8(pg8_16, q6+48); - q6 += 64; - svint8_t q8bytes_1 = svld1_s8(pg8_16, q8); - svint8_t q8bytes_2 = svld1_s8(pg8_16, q8+16); - svint8_t q8bytes_3 = svld1_s8(pg8_16, q8+32); - svint8_t q8bytes_4 = svld1_s8(pg8_16, q8+48); - q8 += 64; - - q6h_1 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_1, 4)); - q6h_2 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_2, 4)); - q6h_3 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_1, 2)); - q6h_4 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_2, 2)); - q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_1, m4b), q6h_1)); - q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_2, m4b), q6h_2)); - q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_3, m4b), q6h_3)); - q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_4, m4b), q6h_4)); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale[0]); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale[1]); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale[2]); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale[3]); - - scale += 4; - q8bytes_1 = svld1_s8(pg8_16, q8); - q8bytes_2 = svld1_s8(pg8_16, q8+16); - q8bytes_3 = svld1_s8(pg8_16, q8+32); - q8bytes_4 = svld1_s8(pg8_16, q8+48); - q8 += 64; - - q6h_1 = svand_u8_x(pg16_8, mone, qhbits_1); - q6h_2 = svand_u8_x(pg16_8, mone, qhbits_2); - q6h_3 = svand_u8_x(pg16_8, mone, svlsr_n_u8_x(pg16_8, qhbits_1, 2)); - q6h_4 = svand_u8_x(pg16_8, mone, svlsr_n_u8_x(pg16_8, qhbits_2, 2)); - q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_1, 4), q6h_1)); - q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_2, 4), q6h_2)); - q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_3, 4), q6h_3)); - q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_4, 4), q6h_4)); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale[0]); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale[1]); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale[2]); - isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale[3]); - scale += 4; - } - isum += svaddv_s32(pg32_4, isum_tmp); - sum += d_all * y[i].d * (isum - 32 * isum_mins); - } - break; - case 256: - case 512: - { - const svbool_t pg8_2 = svptrue_pat_b8(SV_VL2); - const svbool_t pg32_8 = svptrue_pat_b32(SV_VL8); - const svbool_t pg8_32 = svptrue_pat_b8(SV_VL32); - svint32_t isum_tmp = svdup_n_s32(0); - for (int j = 0; j < QK_K/128; j++) { - svuint8_t qhbits_1 = svld1_u8(pg8_32, qh); - qh += 32; - svuint8_t q6bits_1 = svld1_u8(pg8_32, q6); - svuint8_t q6bits_2 = svld1_u8(pg8_32, q6+32); - q6 += 64; - svint8_t q8bytes_1 = svld1_s8(pg8_32, q8); - svint8_t q8bytes_2 = svld1_s8(pg8_32, q8+32); - svint8_t q8bytes_3 = svld1_s8(pg8_32, q8+64); - svint8_t q8bytes_4 = svld1_s8(pg8_32, q8+96); - q8 += 128; - q6h_1 = svand_u8_x(pg8_32, mone, svlsl_n_u8_x(pg8_32, qhbits_1, 4)); - q6h_2 = svand_u8_x(pg8_32, mone, svlsl_n_u8_x(pg8_32, qhbits_1, 2)); - q6h_3 = svand_u8_x(pg8_32, mone, qhbits_1); - q6h_4 = svand_u8_x(pg8_32, mone, svlsr_n_u8_x(pg8_32, qhbits_1, 2)); - q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svand_u8_x(pg8_32, q6bits_1, m4b), q6h_1)); - q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svand_u8_x(pg8_32, q6bits_2, m4b), q6h_2)); - q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svlsr_n_u8_x(pg8_32, q6bits_1, 4), q6h_3)); - q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svlsr_n_u8_x(pg8_32, q6bits_2, 4), q6h_4)); - - svint8_t scale_lane_1_tmp = svld1_s8(pg8_2, scale); - scale_lane_1_tmp= svzip1_s8(scale_lane_1_tmp, scale_lane_1_tmp); - scale_lane_1_tmp= svzip1_s8(scale_lane_1_tmp, scale_lane_1_tmp); - svint8_t scale_lane_2_tmp = svld1_s8(pg8_2, scale+2); - scale_lane_2_tmp = svzip1_s8(scale_lane_2_tmp, scale_lane_2_tmp); - scale_lane_2_tmp = svzip1_s8(scale_lane_2_tmp, scale_lane_2_tmp); - svint8_t scale_lane_3_tmp = svld1_s8(pg8_2, scale+4); - scale_lane_3_tmp = svzip1_s8(scale_lane_3_tmp, scale_lane_3_tmp); - scale_lane_3_tmp = svzip1_s8(scale_lane_3_tmp, scale_lane_3_tmp); - svint8_t scale_lane_4_tmp = svld1_s8(pg8_2, scale+6); - scale_lane_4_tmp = svzip1_s8(scale_lane_4_tmp, scale_lane_4_tmp); - scale_lane_4_tmp = svzip1_s8(scale_lane_4_tmp, scale_lane_4_tmp); - svint32_t scale_lane_1 = svunpklo_s32(svunpklo_s16(scale_lane_1_tmp)); - svint32_t scale_lane_2 = svunpklo_s32(svunpklo_s16(scale_lane_2_tmp)); - svint32_t scale_lane_3 = svunpklo_s32(svunpklo_s16(scale_lane_3_tmp)); - svint32_t scale_lane_4 = svunpklo_s32(svunpklo_s16(scale_lane_4_tmp)); - - isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale_lane_1); - isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale_lane_2); - isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale_lane_3); - isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale_lane_4); - scale += 8; - } - isum += svaddv_s32(pg32_8, isum_tmp); - sum += d_all * y[i].d * (isum - 32 * isum_mins); - } - break; - default: - assert(false && "Unsupported vector length"); - break; - } - } - - *s = sum; - -#elif __ARM_NEON - float sum = 0; - - const uint8x16_t m4b = vdupq_n_u8(0xF); - const int32x4_t vzero = vdupq_n_s32(0); - //const int8x16_t m32s = vdupq_n_s8(32); - - const uint8x16_t mone = vdupq_n_u8(3); - - ggml_int8x16x4_t q6bytes; - ggml_uint8x16x4_t q6h; - - for (int i = 0; i < nb; ++i) { - - const float d_all = GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q6 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const int8_t * GGML_RESTRICT scale = x[i].scales; - - const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums); - const int8x16_t scales = vld1q_s8(scale); - const ggml_int16x8x2_t q6scales = {{vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}}; - - const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])), - vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))), - vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])), - vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1])))); - int32_t isum_mins = vaddvq_s32(prod); - - int32_t isum = 0; - - for (int j = 0; j < QK_K/128; ++j) { - - ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32; - ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64; - ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; - - q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4); - q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4); - uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2); - q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - shifted = vshrq_n_u8(qhbits.val[1], 2); - q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - - //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s); - //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s); - //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s); - //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s); - q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])); - q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])); - q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])); - q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])); - - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + - vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + - vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + - vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; - - scale += 4; - - q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64; - - shifted = vshrq_n_u8(qhbits.val[0], 4); - q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - shifted = vshrq_n_u8(qhbits.val[1], 4); - q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - shifted = vshrq_n_u8(qhbits.val[0], 6); - q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - shifted = vshrq_n_u8(qhbits.val[1], 6); - q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4); - - //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s); - //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s); - //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s); - //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s); - q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])); - q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])); - q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])); - q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])); - - isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] + - vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] + - vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] + - vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3]; - scale += 4; - } - //sum += isum * d_all * y[i].d; - sum += d_all * y[i].d * (isum - 32 * isum_mins); - - } - *s = sum; - -#elif defined __AVX2__ - - const __m256i m4 = _mm256_set1_epi8(0xF); - const __m256i m2 = _mm256_set1_epi8(3); - const __m256i m32s = _mm256_set1_epi8(32); - - __m256 acc = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q4 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales); - - __m256i sumi = _mm256_setzero_si256(); - - int is = 0; - - for (int j = 0; j < QK_K/128; ++j) { - - const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0)); - const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); - const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); - const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); - is += 4; - - const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; - const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32; - const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32; - - const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4); - const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4); - const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4); - const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4); - - const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0); - const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1); - const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2); - const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3); - - const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - - __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0); - __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1); - __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2); - __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3); - - __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0); - __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1); - __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2); - __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3); - - p16_0 = _mm256_sub_epi16(p16_0, q8s_0); - p16_1 = _mm256_sub_epi16(p16_1, q8s_1); - p16_2 = _mm256_sub_epi16(p16_2, q8s_2); - p16_3 = _mm256_sub_epi16(p16_3, q8s_3); - - p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0); - p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1); - p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2); - p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3); - - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3)); - - } - - acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); - } - - *s = hsum_float_8(acc); - -#elif defined __AVX__ - - const __m128i m3 = _mm_set1_epi8(3); - const __m128i m15 = _mm_set1_epi8(15); - - __m256 acc = _mm256_setzero_ps(); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q4 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - // handle the q6_k -32 offset separately using bsums - const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)y[i].bsums); - const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)y[i].bsums + 1); - const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales); - const __m128i scales_16_0 = _mm_cvtepi8_epi16(scales); - const __m128i scales_16_1 = _mm_cvtepi8_epi16(_mm_bsrli_si128(scales, 8)); - const __m128i q8sclsub_0 = _mm_slli_epi32(_mm_madd_epi16(q8sums_0, scales_16_0), 5); - const __m128i q8sclsub_1 = _mm_slli_epi32(_mm_madd_epi16(q8sums_1, scales_16_1), 5); - - __m128i sumi_0 = _mm_setzero_si128(); - __m128i sumi_1 = _mm_setzero_si128(); - - int is = 0; - - for (int j = 0; j < QK_K/128; ++j) { - - const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16; - const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16; - - const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4); - const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4); - const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, _mm_set1_epi8(12)), 2); - const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, _mm_set1_epi8(12)), 2); - const __m128i q4h_4 = _mm_and_si128(q4bitsH_0, _mm_set1_epi8(48)); - const __m128i q4h_5 = _mm_and_si128(q4bitsH_1, _mm_set1_epi8(48)); - const __m128i q4h_6 = _mm_srli_epi16(_mm_and_si128(q4bitsH_0, _mm_set1_epi8(-64)), 2); - const __m128i q4h_7 = _mm_srli_epi16(_mm_and_si128(q4bitsH_1, _mm_set1_epi8(-64)), 2); - - const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; - const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; - const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; - const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16; - - const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m15), q4h_0); - const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m15), q4h_1); - const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m15), q4h_2); - const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m15), q4h_3); - const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m15), q4h_4); - const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m15), q4h_5); - const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m15), q4h_6); - const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m15), q4h_7); - - const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16; - - __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0); - __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1); - __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2); - __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3); - __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4); - __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5); - __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6); - __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7); - - const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0)); - const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); - const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); - const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); - is += 4; - - p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0); - p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_0, 8)), p16_1); - p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2); - p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_1, 8)), p16_3); - p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4); - p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_2, 8)), p16_5); - p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6); - p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_3, 8)), p16_7); - - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3)); - sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6)); - sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7)); - - } - - sumi_0 = _mm_sub_epi32(sumi_0, q8sclsub_0); - sumi_1 = _mm_sub_epi32(sumi_1, q8sclsub_1); - const __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0); - acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi)), acc); - } - - *s = hsum_float_8(acc); - -#elif defined __wasm_simd128__ - int8_t aux8[QK_K] __attribute__((aligned(16))); - int32_t aux32[8] __attribute__((aligned(16))) = {0}; - float sums[8] __attribute__((aligned(16))) = {0}; - - for (int i = 0; i < nb; ++i) { - // Unpack 6-bit quantized data into aux8 (unchanged) - const uint8_t * GGML_RESTRICT q4 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - int8_t * a = aux8; - for (int j = 0; j < QK_K; j += 128) { - for (int l = 0; l < 32; ++l) { - a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; - a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; - a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; - a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; - } - a += 128; - q4 += 64; - qh += 32; - } - - const int8_t * GGML_RESTRICT a_ptr = aux8; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - v128_t acc0 = wasm_i32x4_splat(0); - v128_t acc1 = wasm_i32x4_splat(0); - - for (int j = 0; j < QK_K/16; ++j) { - const int scale = x[i].scales[j]; - const v128_t vscale = wasm_i32x4_splat(scale); - - // Load 16 elements from a and q8 - const v128_t a_vec = wasm_v128_load(a_ptr); - const v128_t q8_vec = wasm_v128_load(q8); - - // Process low 8 elements - v128_t a_low = wasm_i16x8_extend_low_i8x16(a_vec); - v128_t q8_low = wasm_i16x8_extend_low_i8x16(q8_vec); - v128_t prod_low = wasm_i16x8_mul(a_low, q8_low); - v128_t prod_lo_lo = wasm_i32x4_extend_low_i16x8(prod_low); - v128_t prod_lo_hi = wasm_i32x4_extend_high_i16x8(prod_low); - - // Process high 8 elements - v128_t a_high = wasm_i16x8_extend_high_i8x16(a_vec); - v128_t q8_high = wasm_i16x8_extend_high_i8x16(q8_vec); - v128_t prod_high = wasm_i16x8_mul(a_high, q8_high); - v128_t prod_hi_lo = wasm_i32x4_extend_low_i16x8(prod_high); - v128_t prod_hi_hi = wasm_i32x4_extend_high_i16x8(prod_high); - - // Scale and accumulate - prod_lo_lo = wasm_i32x4_mul(prod_lo_lo, vscale); - prod_lo_hi = wasm_i32x4_mul(prod_lo_hi, vscale); - prod_hi_lo = wasm_i32x4_mul(prod_hi_lo, vscale); - prod_hi_hi = wasm_i32x4_mul(prod_hi_hi, vscale); - - acc0 = wasm_i32x4_add(acc0, wasm_i32x4_add(prod_lo_lo, prod_hi_lo)); - acc1 = wasm_i32x4_add(acc1, wasm_i32x4_add(prod_lo_hi, prod_hi_hi)); - - a_ptr += 16; - q8 += 16; - } - - // Store accumulated results - wasm_v128_store(&aux32[0], acc0); - wasm_v128_store(&aux32[4], acc1); - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - for (int l = 0; l < 8; ++l) { - sums[l] += d * aux32[l]; - } - } - - // Sum final results - float sumf = 0; - for (int l = 0; l < 8; ++l) { - sumf += sums[l]; - } - *s = sumf; - -#elif defined __riscv_v_intrinsic - - const int vector_length = __riscv_vlenb() * 8; - float sumf = 0; - - switch (vector_length) { - case 256: - for (int i = 0; i < nb; ++i) { - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - - const uint8_t * GGML_RESTRICT q6 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const int8_t * GGML_RESTRICT scale = x[i].scales; - - size_t vl; - - vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1); - - int sum_t = 0; - int is = 0; - - for (int j = 0; j < QK_K/128; ++j) { - - vl = 32; - - // load qh - vuint8m1_t qh_x = __riscv_vle8_v_u8m1(qh, vl); - - // load Q6 - vuint8m1_t q6_0 = __riscv_vle8_v_u8m1(q6, vl); - vuint8m1_t q6_1 = __riscv_vle8_v_u8m1(q6+32, vl); - - vuint8m1_t q6a_0 = __riscv_vand_vx_u8m1(q6_0, 0x0F, vl); - vuint8m1_t q6a_1 = __riscv_vand_vx_u8m1(q6_1, 0x0F, vl); - vuint8m1_t q6s_0 = __riscv_vsrl_vx_u8m1(q6_0, 0x04, vl); - vuint8m1_t q6s_1 = __riscv_vsrl_vx_u8m1(q6_1, 0x04, vl); - - vuint8m1_t qh_0 = __riscv_vand_vx_u8m1(qh_x, 0x03, vl); - vuint8m1_t qh_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x2, vl), 0x03 , vl); - vuint8m1_t qh_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x4, vl), 0x03 , vl); - vuint8m1_t qh_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x6, vl), 0x03 , vl); - - vuint8m1_t qhi_0 = __riscv_vor_vv_u8m1(q6a_0, __riscv_vsll_vx_u8m1(qh_0, 0x04, vl), vl); - vuint8m1_t qhi_1 = __riscv_vor_vv_u8m1(q6a_1, __riscv_vsll_vx_u8m1(qh_1, 0x04, vl), vl); - vuint8m1_t qhi_2 = __riscv_vor_vv_u8m1(q6s_0, __riscv_vsll_vx_u8m1(qh_2, 0x04, vl), vl); - vuint8m1_t qhi_3 = __riscv_vor_vv_u8m1(q6s_1, __riscv_vsll_vx_u8m1(qh_3, 0x04, vl), vl); - - vint8m1_t a_0 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_0), 32, vl); - vint8m1_t a_1 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_1), 32, vl); - vint8m1_t a_2 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_2), 32, vl); - vint8m1_t a_3 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_3), 32, vl); - - // load Q8 and take product - vint16m2_t va_q_0 = __riscv_vwmul_vv_i16m2(a_0, __riscv_vle8_v_i8m1(q8, vl), vl); - vint16m2_t va_q_1 = __riscv_vwmul_vv_i16m2(a_1, __riscv_vle8_v_i8m1(q8+32, vl), vl); - vint16m2_t va_q_2 = __riscv_vwmul_vv_i16m2(a_2, __riscv_vle8_v_i8m1(q8+64, vl), vl); - vint16m2_t va_q_3 = __riscv_vwmul_vv_i16m2(a_3, __riscv_vle8_v_i8m1(q8+96, vl), vl); - - vl = 16; - - vint32m2_t vaux_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 0), scale[is+0], vl); - vint32m2_t vaux_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 1), scale[is+1], vl); - vint32m2_t vaux_2 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 0), scale[is+2], vl); - vint32m2_t vaux_3 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 1), scale[is+3], vl); - vint32m2_t vaux_4 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 0), scale[is+4], vl); - vint32m2_t vaux_5 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 1), scale[is+5], vl); - vint32m2_t vaux_6 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 0), scale[is+6], vl); - vint32m2_t vaux_7 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 1), scale[is+7], vl); - - vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_0, vaux_1, vl), vzero, vl); - vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_2, vaux_3, vl), isum0, vl); - vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_4, vaux_5, vl), isum1, vl); - vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_6, vaux_7, vl), isum2, vl); - - sum_t += __riscv_vmv_x_s_i32m1_i32(isum3); - - q6 += 64; qh += 32; q8 += 128; is=8; - - } - - sumf += d * sum_t; - - } - break; - case 128: - for (int i = 0; i < nb; ++i) { - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - - const uint8_t * restrict q6 = x[i].ql; - const uint8_t * restrict qh = x[i].qh; - const int8_t * restrict q8 = y[i].qs; - - const int8_t * restrict scale = x[i].scales; - - int sum_t = 0; - int t0; - - for (int j = 0; j < QK_K/128; ++j) { - __asm__ __volatile__( - "vsetvli zero, %[vl32], e8, m2\n\t" - "vle8.v v4, (%[qh])\n\t" - "vsll.vi v0, v4, 4\n\t" - "vsll.vi v2, v4, 2\n\t" - "vsrl.vi v6, v4, 2\n\t" - "vsetvli zero, %[vl64], e8, m4\n\t" - "vle8.v v8, (%[q6])\n\t" - "vsrl.vi v12, v8, 4\n\t" - "vand.vi v8, v8, 0xF\n\t" - "vsetvli zero, %[vl128], e8, m8\n\t" - "vand.vx v0, v0, %[mask]\n\t" - "vor.vv v8, v8, v0\n\t" - "vle8.v v0, (%[q8])\n\t" - "vsub.vx v8, v8, %[vl32]\n\t" - "vsetvli zero, %[vl64], e8, m4\n\t" - "vwmul.vv v16, v0, v8\n\t" - "vwmul.vv v24, v4, v12\n\t" - "vsetivli zero, 16, e16, m2\n\t" - "vmv.v.x v0, zero\n\t" - "vwredsum.vs v10, v16, v0\n\t" - "vwredsum.vs v9, v18, v0\n\t" - "vwredsum.vs v8, v20, v0\n\t" - "vwredsum.vs v7, v22, v0\n\t" - "vwredsum.vs v11, v24, v0\n\t" - "vwredsum.vs v12, v26, v0\n\t" - "vwredsum.vs v13, v28, v0\n\t" - "vwredsum.vs v14, v30, v0\n\t" - "vsetivli zero, 4, e32, m1\n\t" - "vslideup.vi v10, v9, 1\n\t" - "vslideup.vi v8, v7, 1\n\t" - "vslideup.vi v11, v12, 1\n\t" - "vslideup.vi v13, v14, 1\n\t" - "vslideup.vi v10, v8, 2\n\t" - "vslideup.vi v11, v13, 2\n\t" - "vsetivli zero, 8, e32, m2\n\t" - "vle8.v v2, (%[scale])\n\t" - "vsext.vf4 v4, v2\n\t" - "vmul.vv v2, v4, v10\n\t" - "vredsum.vs v0, v2, v0\n\t" - "vmv.x.s %[t0], v0\n\t" - "add %[sumi], %[sumi], %[t0]" - : [sumi] "+&r" (sum_t), [t0] "=&r" (t0) - : [qh] "r" (qh), [q6] "r" (q6), [q8] "r" (q8), [scale] "r" (scale) - , [vl32] "r" (32), [vl64] "r" (64), [vl128] "r" (128) - , [mask] "r" (0x30) - : "memory" - , "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" - , "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" - , "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" - , "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" - ); - q6 += 64; qh += 32; q8 += 128; scale += 8; - } - - sumf += d * sum_t; - - } - break; - default: - assert(false && "Unsupported vector length"); - break; - } - - *s = sumf; - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector int v0 = vec_splats((int32_t)0); - const vector unsigned char v2 = vec_splats((unsigned char)0x2); - const vector unsigned char v3 = vec_splats((unsigned char)0x3); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - const vector unsigned char v6 = vec_splats((unsigned char)0x6); - const vector signed char off = vec_splats((signed char)0x20); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - vector signed int vsumi4 = v0; - vector signed int vsumi5 = v0; - vector signed int vsumi6 = v0; - vector signed int vsumi7 = v0; - - const uint8_t * GGML_RESTRICT q6 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT qs = x[i].scales; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/128; ++j) { - __builtin_prefetch(q6, 0, 0); - __builtin_prefetch(qh, 0, 0); - __builtin_prefetch(q8, 0, 0); - - vector signed char qxs0 = (vector signed char)vec_xl( 0, q6); - vector signed char qxs1 = (vector signed char)vec_xl(16, q6); - vector signed char qxs2 = (vector signed char)vec_xl(32, q6); - vector signed char qxs3 = (vector signed char)vec_xl(48, q6); - q6 += 64; - - vector signed char qxs00 = vec_and(qxs0, lowMask); - vector signed char qxs01 = vec_sr(qxs0, v4); - vector signed char qxs10 = vec_and(qxs1, lowMask); - vector signed char qxs11 = vec_sr(qxs1, v4); - vector signed char qxs20 = vec_and(qxs2, lowMask); - vector signed char qxs21 = vec_sr(qxs2, v4); - vector signed char qxs30 = vec_and(qxs3, lowMask); - vector signed char qxs31 = vec_sr(qxs3, v4); - - vector signed char qxhs0 = (vector signed char)vec_xl( 0, qh); - vector signed char qxhs1 = (vector signed char)vec_xl(16, qh); - qh += 32; - - vector signed char qxh00 = vec_sl(vec_and((vector signed char)v3, qxhs0), v4); - vector signed char qxh01 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v4)), v4); - vector signed char qxh10 = vec_sl(vec_and((vector signed char)v3, qxhs1), v4); - vector signed char qxh11 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v4)), v4); - vector signed char qxh20 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v2)), v4); - vector signed char qxh21 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v6)), v4); - vector signed char qxh30 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v2)), v4); - vector signed char qxh31 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v6)), v4); - - vector signed char q6x00 = vec_sub(vec_or(qxh00, qxs00), off); - vector signed char q6x01 = vec_sub(vec_or(qxh01, qxs01), off); - vector signed char q6x10 = vec_sub(vec_or(qxh10, qxs10), off); - vector signed char q6x11 = vec_sub(vec_or(qxh11, qxs11), off); - vector signed char q6x20 = vec_sub(vec_or(qxh20, qxs20), off); - vector signed char q6x21 = vec_sub(vec_or(qxh21, qxs21), off); - vector signed char q6x30 = vec_sub(vec_or(qxh30, qxs30), off); - vector signed char q6x31 = vec_sub(vec_or(qxh31, qxs31), off); - - vector signed char q8y00 = vec_xl( 0, q8); - vector signed char q8y10 = vec_xl( 16, q8); - vector signed char q8y20 = vec_xl( 32, q8); - vector signed char q8y30 = vec_xl( 48, q8); - vector signed char q8y01 = vec_xl( 64, q8); - vector signed char q8y11 = vec_xl( 80, q8); - vector signed char q8y21 = vec_xl( 96, q8); - vector signed char q8y31 = vec_xl(112, q8); - q8 += 128; - - vector signed short qv00 = vec_add(vec_mule(q6x00, q8y00), vec_mulo(q6x00, q8y00)); - vector signed short qv10 = vec_add(vec_mule(q6x10, q8y10), vec_mulo(q6x10, q8y10)); - vector signed short qv20 = vec_add(vec_mule(q6x20, q8y20), vec_mulo(q6x20, q8y20)); - vector signed short qv30 = vec_add(vec_mule(q6x30, q8y30), vec_mulo(q6x30, q8y30)); - vector signed short qv01 = vec_add(vec_mule(q6x01, q8y01), vec_mulo(q6x01, q8y01)); - vector signed short qv11 = vec_add(vec_mule(q6x11, q8y11), vec_mulo(q6x11, q8y11)); - vector signed short qv21 = vec_add(vec_mule(q6x21, q8y21), vec_mulo(q6x21, q8y21)); - vector signed short qv31 = vec_add(vec_mule(q6x31, q8y31), vec_mulo(q6x31, q8y31)); - - vector signed short vscales = vec_unpackh(vec_xl_len(qs, 8)); - qs += 8; - - vector signed short vs0 = vec_splat(vscales, 0); - vector signed short vs1 = vec_splat(vscales, 1); - vector signed short vs2 = vec_splat(vscales, 2); - vector signed short vs3 = vec_splat(vscales, 3); - vector signed short vs4 = vec_splat(vscales, 4); - vector signed short vs5 = vec_splat(vscales, 5); - vector signed short vs6 = vec_splat(vscales, 6); - vector signed short vs7 = vec_splat(vscales, 7); - - vsumi0 = vec_msum(qv00, vs0, vsumi0); - vsumi1 = vec_msum(qv01, vs4, vsumi1); - vsumi2 = vec_msum(qv10, vs1, vsumi2); - vsumi3 = vec_msum(qv11, vs5, vsumi3); - vsumi4 = vec_msum(qv20, vs2, vsumi4); - vsumi5 = vec_msum(qv21, vs6, vsumi5); - vsumi6 = vec_msum(qv30, vs3, vsumi6); - vsumi7 = vec_msum(qv31, vs7, vsumi7); - } - - vsumi0 = vec_add(vsumi0, vsumi4); - vsumi1 = vec_add(vsumi1, vsumi5); - vsumi2 = vec_add(vsumi2, vsumi6); - vsumi3 = vec_add(vsumi3, vsumi7); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined __loongarch_asx - - const __m256i m32s = __lasx_xvreplgr2vr_b(32); - - __m256 acc = (__m256)__lasx_xvldi(0); - - for (int i = 0; i < nb; ++i) { - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT q4 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - const __m128i scales128 = __lsx_vld((const __m128i*)x[i].scales, 0); - const v16i8 shuffle_mask = {0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15}; - const __m256i scales_shuffled = lasx_ext8_16(__lsx_vshuf_b(scales128, scales128, (__m128i)shuffle_mask)); - - __m256i sumi = __lasx_xvldi(0); - - for (int j = 0; j < QK_K/128; ++j) { - - const __m256i q4bits1 = __lasx_xvld((const __m256i*)q4, 0); q4 += 32; - const __m256i q4bits2 = __lasx_xvld((const __m256i*)q4, 0); q4 += 32; - const __m256i q4bitsH = __lasx_xvld((const __m256i*)qh, 0); qh += 32; - - const __m256i q4h_0 = __lasx_xvslli_b(__lasx_xvandi_b(q4bitsH, 3), 4); - const __m256i q4h_1 = __lasx_xvslli_b(__lasx_xvandi_b(q4bitsH, 3 << 2), 2); - const __m256i q4h_2 = __lasx_xvandi_b(q4bitsH, 3 << 4); - const __m256i q4h_3 = __lasx_xvsrli_b(__lasx_xvandi_b(q4bitsH, 3 << 6), 2); - - const __m256i q4_0 = __lasx_xvor_v(__lasx_xvandi_b(q4bits1, 0xf), q4h_0); - const __m256i q4_1 = __lasx_xvor_v(__lasx_xvandi_b(q4bits2, 0xf), q4h_1); - const __m256i q4_2 = __lasx_xvor_v(__lasx_xvsrli_b(q4bits1, 4), q4h_2); - const __m256i q4_3 = __lasx_xvor_v(__lasx_xvsrli_b(q4bits2, 4), q4h_3); - - const __m256i q8_0 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8_3 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - - __m256i p16_0 = lasx_madd_h_b(__lasx_xvsub_b(q4_0, m32s), q8_0); - __m256i p16_1 = lasx_madd_h_b(__lasx_xvsub_b(q4_1, m32s), q8_1); - __m256i p16_2 = lasx_madd_h_b(__lasx_xvsub_b(q4_2, m32s), q8_2); - __m256i p16_3 = lasx_madd_h_b(__lasx_xvsub_b(q4_3, m32s), q8_3); - - p16_0 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 0), p16_0); - p16_1 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 1), p16_1); - p16_2 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 2), p16_2); - p16_3 = lasx_madd_h(lasx_xvrepl128vei_h(scales_shuffled, 4 * j + 3), p16_3); - - sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_1)); - sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_2, p16_3)); - } - - acc = __lasx_xvfmadd_s((__m256)__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), acc); - } - - *s = hsum_float_8(acc); -#elif defined(__VXE__) || defined(__VXE2__) - float sum = 0; - - // Lower 4-bit and upper 2-bit masks - const uint8x16_t v_lm = vec_splat_u8(0x0F); - const uint8x16_t v_um = vec_splat_u8(0x03); - - const int32x4_t v_z = vec_splat_s32(0); - - int8x16_t q6b[4]; - uint8x16_t q6h[4]; - - uint8x16_t v_xl[4]; - uint8x16_t v_xh[2]; - int8x16_t v_y[4]; - - for (int i = 0; i < nb; ++i) { - const float d_all = GGML_FP16_TO_FP32(x[i].d); - - const uint8_t * GGML_RESTRICT x0l = x[i].ql; - const uint8_t * GGML_RESTRICT x0h = x[i].qh; - const int8_t * GGML_RESTRICT y0 = y[i].qs; - - const int8_t * GGML_RESTRICT scale = x[i].scales; - - const int16x8_t v_ysumsl = vec_xl(0 , y[i].bsums); - const int16x8_t v_ysumsh = vec_xl(16, y[i].bsums); - - const int8x16_t v_scale = vec_xl(0, scale); - const int16x8_t v_scalel = vec_unpackh(v_scale); - const int16x8_t v_scaleh = vec_unpackl(v_scale); - - const int32x4_t v_minslo = vec_mulo(v_ysumsl, v_scalel); - const int32x4_t v_minsle = vec_mule(v_ysumsl, v_scalel); - const int32x4_t v_minsho = vec_mulo(v_ysumsh, v_scaleh); - const int32x4_t v_minshe = vec_mule(v_ysumsh, v_scaleh); - const int32x4_t v_mins = v_minslo + v_minsle + v_minsho + v_minshe; - - const int32_t mins = v_mins[0] + v_mins[1] + v_mins[2] + v_mins[3]; - - int32_t isum = 0; - for (int j = 0; j < QK_K/128; ++j) { - // Load model upper 2 bits - v_xh[0] = vec_xl(0 , x0h); - v_xh[1] = vec_xl(16, x0h); - x0h += 32; - - // Load model lower 4 bits - v_xl[0] = vec_xl(0 , x0l); - v_xl[1] = vec_xl(16, x0l); - v_xl[2] = vec_xl(32, x0l); - v_xl[3] = vec_xl(48, x0l); - x0l += 64; - - // Load activation quants - v_y[0] = vec_xl(0 , y0); - v_y[1] = vec_xl(16, y0); - v_y[2] = vec_xl(32, y0); - v_y[3] = vec_xl(48, y0); - y0 += 64; - - q6h[0] = vec_sl(vec_and(v_um, v_xh[0]), 4); - q6h[1] = vec_sl(vec_and(v_um, v_xh[1]), 4); - uint8x16_t shifted = vec_sr(v_xh[0], 2); - q6h[2] = vec_sl(vec_and(v_um, shifted), 4); - shifted = vec_sr(v_xh[1], 2); - q6h[3] = vec_sl(vec_and(v_um, shifted), 4); - - q6b[0] = (int8x16_t)(vec_or(vec_and(v_xl[0], v_lm), q6h[0])); - q6b[1] = (int8x16_t)(vec_or(vec_and(v_xl[1], v_lm), q6h[1])); - q6b[2] = (int8x16_t)(vec_or(vec_and(v_xl[2], v_lm), q6h[2])); - q6b[3] = (int8x16_t)(vec_or(vec_and(v_xl[3], v_lm), q6h[3])); - - int32x4_t summs0 = ggml_vec_dot(v_z, q6b[0], v_y[0]); - int32x4_t summs1 = ggml_vec_dot(v_z, q6b[1], v_y[1]); - int32x4_t summs2 = ggml_vec_dot(v_z, q6b[2], v_y[2]); - int32x4_t summs3 = ggml_vec_dot(v_z, q6b[3], v_y[3]); - - isum += (summs0[0] + summs0[1] + summs0[2] + summs0[3]) * scale[0] + - (summs1[0] + summs1[1] + summs1[2] + summs1[3]) * scale[1] + - (summs2[0] + summs2[1] + summs2[2] + summs2[3]) * scale[2] + - (summs3[0] + summs3[1] + summs3[2] + summs3[3]) * scale[3]; - - scale += 4; - - - // Load activation quants - v_y[0] = vec_xl(0 , y0); - v_y[1] = vec_xl(16, y0); - v_y[2] = vec_xl(32, y0); - v_y[3] = vec_xl(48, y0); - y0 += 64; - - shifted = vec_sr(v_xh[0], 4); - q6h[0] = vec_sl(vec_and(v_um, shifted), 4); - shifted = vec_sr(v_xh[1], 4); - q6h[1] = vec_sl(vec_and(v_um, shifted), 4); - shifted = vec_sr(v_xh[0], 6); - q6h[2] = vec_sl(vec_and(v_um, shifted), 4); - shifted = vec_sr(v_xh[1], 6); - q6h[3] = vec_sl(vec_and(v_um, shifted), 4); - - q6b[0] = (int8x16_t)(vec_or(vec_sr(v_xl[0], 4), q6h[0])); - q6b[1] = (int8x16_t)(vec_or(vec_sr(v_xl[1], 4), q6h[1])); - q6b[2] = (int8x16_t)(vec_or(vec_sr(v_xl[2], 4), q6h[2])); - q6b[3] = (int8x16_t)(vec_or(vec_sr(v_xl[3], 4), q6h[3])); - - summs0 = ggml_vec_dot(v_z, q6b[0], v_y[0]); - summs1 = ggml_vec_dot(v_z, q6b[1], v_y[1]); - summs2 = ggml_vec_dot(v_z, q6b[2], v_y[2]); - summs3 = ggml_vec_dot(v_z, q6b[3], v_y[3]); - - isum += (summs0[0] + summs0[1] + summs0[2] + summs0[3]) * scale[0] + - (summs1[0] + summs1[1] + summs1[2] + summs1[3]) * scale[1] + - (summs2[0] + summs2[1] + summs2[2] + summs2[3]) * scale[2] + - (summs3[0] + summs3[1] + summs3[2] + summs3[3]) * scale[3]; - - scale += 4; - } - - sum += d_all * y[i].d * (isum - 32 * mins); - } - - *s = sum; -#else - - int8_t aux8[QK_K]; - int16_t aux16[8]; - float sums [8]; - int32_t aux32[8]; - memset(sums, 0, 8*sizeof(float)); - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const uint8_t * GGML_RESTRICT q4 = x[i].ql; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - memset(aux32, 0, 8*sizeof(int32_t)); - int8_t * GGML_RESTRICT a = aux8; - for (int j = 0; j < QK_K; j += 128) { - for (int l = 0; l < 32; ++l) { - a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; - a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; - a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; - a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; - } - a += 128; - q4 += 64; - qh += 32; - } - a = aux8; - int is = 0; - for (int j = 0; j < QK_K/16; ++j) { - int scale = x[i].scales[is++]; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; - for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; - q8 += 8; a += 8; - } - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; - } - for (int l = 0; l < 8; ++l) sumf += sums[l]; - *s = sumf; -#endif -} - -#if defined (__AVX__) || defined (__AVX2__) || defined (__ARM_NEON) || defined (__POWER9_VECTOR__) || defined(__loongarch_asx) -static const int8_t keven_signs_q2xs[1024] = { - 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, - 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, - 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1, - 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1, - 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1, - 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1, - 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1, - 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, - 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1, - 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1, - 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1, - 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1, - 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1, - 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1, - 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1, - 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1, - 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1, - 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1, - 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, - 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1, - 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1, - 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1, - 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, - 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1, - 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1, - 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, - 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1, - 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1, - 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1, - 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1, - 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, - 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -}; -#endif - -void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_iq2_xxs * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[4]; - const uint8_t * aux8 = (const uint8_t *)aux32; - - ggml_int8x16x4_t q2u; - ggml_int8x16x4_t q2s; - ggml_int8x16x4_t q8b; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - float sumf1 = 0, sumf2 = 0; - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; - q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 0])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 1]))); - q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 2])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 3]))); - q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 8])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 9]))); - q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[10])), vld1_s8((const void *)(iq2xxs_grid + aux8[11]))); - q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127)))); - q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127)))); - q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 7) & 127)))); - q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 21) & 127)))); - q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]); - q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]); - q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]); - q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]); - const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]); - const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]); - sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28)); - sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28)); - } - sumf += d*(sumf1 + sumf2); - } - *s = 0.25f * sumf; - -#elif defined(__AVX2__) - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[4]; - const uint8_t * aux8 = (const uint8_t *)aux32; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; - const __m256i q2_1 = _mm256_set_epi64x(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); - const __m256i q2_2 = _mm256_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); - const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], - signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127], - signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); - const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1); - const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2); - const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); - const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); - const uint16_t ls1 = aux32[1] >> 28; - const uint16_t ls2 = aux32[3] >> 28; - const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); - const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); - sumi1 = _mm256_add_epi32(sumi1, p1); - sumi2 = _mm256_add_epi32(sumi2, p2); - } - - accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); - - } - - *s = 0.125f * hsum_float_8(accumf); - -#elif defined(__AVX__) - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[4]; - const uint8_t * aux8 = (const uint8_t *)aux32; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; - const __m128i q2_1_0 = _mm_set_epi64x(iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); - const __m128i q2_1_1 = _mm_set_epi64x(iq2xxs_grid[aux8[3]], iq2xxs_grid[aux8[2]]); - const __m128i q2_2_0 = _mm_set_epi64x(iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); - const __m128i q2_2_1 = _mm_set_epi64x(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]]); - const __m128i s2_1_0 = _mm_set_epi64x(signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m128i s2_1_1 = _mm_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127]); - const __m128i s2_2_0 = _mm_set_epi64x(signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); - const __m128i s2_2_1 = _mm_set_epi64x(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127]); - const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, s2_1_0); - const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, s2_1_1); - const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, s2_2_0); - const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, s2_2_1); - const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); - const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); - const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); - const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); - const uint16_t ls1 = aux32[1] >> 28; - const uint16_t ls2 = aux32[3] >> 28; - const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); - const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); - const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); - const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); - sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); - sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); - sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); - sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); - } - - accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); - - } - - *s = 0.125f * hsum_float_8(accumf); - -#elif defined(__POWER9_VECTOR__) - const vector int v0 = vec_splats((int32_t)0); - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/32; j += 2) { - __builtin_prefetch(q2, 0, 1); - __builtin_prefetch(q8, 0, 1); - - uint32_t aux32[4]; - const uint8_t * aux8 = (const uint8_t *)aux32; - - memcpy(aux32, q2, 4*sizeof(uint32_t)); - q2 += 8; - - vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1])}; - vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3])}; - vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9])}; - vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11])}; - - vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127))}; - vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127))}; - vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127))}; - vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127))}; - - vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); - vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); - vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); - vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); - - vector signed char q8y0 = vec_xl( 0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); - - const uint16_t ls0 = aux32[1] >> 28; - const uint16_t ls1 = aux32[3] >> 28; - - vector signed short vscales01 = vec_splats((int16_t)(2*ls0+1)); - vector signed short vscales23 = vec_splats((int16_t)(2*ls1+1)); - - vsumi0 = vec_msum(qv0, vscales01, vsumi0); - vsumi1 = vec_msum(qv1, vscales01, vsumi1); - vsumi2 = vec_msum(qv2, vscales23, vsumi2); - vsumi3 = vec_msum(qv3, vscales23, vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = 0.125f * vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[4]; - const uint8_t * aux8 = (const uint8_t *)aux32; - - __m256 accumf = (__m256)__lasx_xvldi(0); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m256i sumi1 = __lasx_xvldi(0); - __m256i sumi2 = __lasx_xvldi(0); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8; - - const __m256i q2_1 = lasx_set_d(iq2xxs_grid[aux8[ 3]], iq2xxs_grid[aux8[ 2]], iq2xxs_grid[aux8[1]], iq2xxs_grid[aux8[0]]); - const __m256i q2_2 = lasx_set_d(iq2xxs_grid[aux8[11]], iq2xxs_grid[aux8[10]], iq2xxs_grid[aux8[9]], iq2xxs_grid[aux8[8]]); - const __m256i s2_1 = lasx_set_d(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], - signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m256i s2_2 = lasx_set_d(signs64[(aux32[3] >> 21) & 127], signs64[(aux32[3] >> 14) & 127], - signs64[(aux32[3] >> 7) & 127], signs64[(aux32[3] >> 0) & 127]); - const __m256i q8s_1 = __lasx_xvsigncov_b(s2_1, q8_1); - const __m256i q8s_2 = __lasx_xvsigncov_b(s2_2, q8_2); - const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); - const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); - const uint16_t ls1 = aux32[1] >> 28; - const uint16_t ls2 = aux32[3] >> 28; - const __m256i p1 = lasx_madd_h(dot1, __lasx_xvreplgr2vr_h(2*ls1+1)); - const __m256i p2 = lasx_madd_h(dot2, __lasx_xvreplgr2vr_h(2*ls2+1)); - sumi1 = __lasx_xvadd_w(sumi1, p1); - sumi2 = __lasx_xvadd_w(sumi2, p2); - } - - accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); - } - - *s = 0.125f * hsum_float_8(accumf); -//#elif defined(__VXE__) || defined(__VXE2__) -// const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; -// -// uint32_t aux32[4]; -// const uint8_t * aux8 = (const uint8_t *)aux32; -// -// float sumf = 0; -// -// for (int i = 0; i < nb; ++i) { -// const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; -// const uint16_t * GGML_RESTRICT q2 = x[i].qs; -// const int8_t * GGML_RESTRICT q8 = y[i].qs; -// -// float sumf1 = 0, sumf2 = 0; -// -// for (int ib32 = 0; ib32 < QK_K/32; ib += 2) { -// int8x16_t q8b0 = vec_xl( 0, q8); -// int8x16_t qb81 = vec_xl(16, q8); -// int8x16_t q8b2 = vec_xl(32, q8); -// int8x16_t q8b3 = vec_xl(48, q8); -// q8 += 64; -// -// memcpy(aux32, q2, 4 * sizeof(uint32_t)); -// q2 += 8; -// -// int8x16_t q2u0 = { *(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1]) }; -// int8x16_t q2u1 = { *(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3]) }; -// int8x16_t q2u2 = { *(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9]) }; -// int8x16_t q2u3 = { *(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11]) }; -// -// int8x16_t q2s0 = { *(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127)) }; -// int8x16_t q2s1 = { *(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127)) }; -// int8x16_t q2s2 = { *(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127)) }; -// int8x16_t q2s3 = { *(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127)) }; -// -// q2u0 = vec_mul(q2u0, q2s0); -// q2u1 = vec_mul(q2u1, q2s1); -// q2u2 = vec_mul(q2u2, q2s2); -// q2u3 = vec_mul(q2u3, q2s3); -// -// const int32x4_t p1 = ggml_vec_dot(ggml_vec_dot(vec_splat_s32(0), q2u0, q8b0), q2u1, q8b1); -// const int32x4_t p2 = ggml_vec_dot(ggml_vec_dot(vec_splat_s32(0), q2u2, q8b2), q2u3, q8b3); -// -// sumf1 += (p1[0] + p1[1] + p1[2] + p1[3]) * (0.5f + (aux32[1] >> 28)); -// sumf2 += (p2[0] + p2[1] + p2[2] + p2[3]) * (0.5f + (aux32[3] >> 28)); -// } -// -// sumf += d * (sumf1 + sumf2); -// } -// -// *s = 0.25f * sumf; -#else - - uint32_t aux32[2]; - const uint8_t * aux8 = (const uint8_t *)aux32; - - float sumf = 0.f; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - int32_t bsum = 0; - for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { - memcpy(aux32, q2, 2*sizeof(uint32_t)); - q2 += 4; - const uint32_t ls = 2*(aux32[1] >> 28) + 1; - int32_t sumi = 0; - for (int l = 0; l < 4; ++l) { - const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); - const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; - for (int j = 0; j < 8; ++j) { - sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); - } - q8 += 8; - } - bsum += sumi * ls; - } - sumf += d * bsum; - } - *s = 0.125f * sumf; -#endif -} - -void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_iq2_xs * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - ggml_int8x16x4_t q2u; - ggml_int8x16x4_t q2s; - ggml_int8x16x4_t q8b; - - int32x4x4_t scales32; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - const uint8x8_t scales8 = vld1_u8(x[i].scales); - const uint8x8_t scales_l = vand_u8(scales8, vdup_n_u8(0xf)); - const uint8x8_t scales_h = vshr_n_u8(scales8, 4); - uint8x16_t scales = vcombine_u8(vzip1_u8(scales_l, scales_h), vzip2_u8(scales_l, scales_h)); - scales = vaddq_u8(vshlq_n_u8(scales, 1), vdupq_n_u8(1)); - const uint16x8_t scales1 = vmovl_u8(vget_low_u8(scales)); - const uint16x8_t scales2 = vmovl_u8(vget_high_u8(scales)); - scales32.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales1))); - scales32.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales1))); - scales32.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales2))); - scales32.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales2))); - int32x4_t sumi = vdupq_n_s32(0); - for (int ib64 = 0; ib64 < QK_K/64; ++ib64) { - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[0] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[1] & 511)))); - q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[2] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[3] & 511)))); - q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[4] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[5] & 511)))); - q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[6] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[7] & 511)))); - q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[0] >> 9))), vld1_s8((const void *)(signs64 + (q2[1] >> 9)))); - q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[2] >> 9))), vld1_s8((const void *)(signs64 + (q2[3] >> 9)))); - q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[4] >> 9))), vld1_s8((const void *)(signs64 + (q2[5] >> 9)))); - q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[6] >> 9))), vld1_s8((const void *)(signs64 + (q2[7] >> 9)))); - q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]); - q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]); - q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]); - q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]); - const int32x4_t p1 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]); - const int32x4_t p2 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[1], q8b.val[1]); - const int32x4_t p3 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]); - const int32x4_t p4 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[3], q8b.val[3]); - const int32x4_t p = vpaddq_s32(vpaddq_s32(p1, p2), vpaddq_s32(p3, p4)); - sumi = vmlaq_s32(sumi, p, scales32.val[ib64]); - q2 += 8; - } - sumf += d*vaddvq_s32(sumi); - } - *s = 0.125f * sumf; - -#elif defined(__AVX2__) - - const __m256i mone = _mm256_set1_epi8(1); - static const char block_sign_shuffle_mask_1[32] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - }; - static const char block_sign_shuffle_mask_2[32] = { - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, - 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, - }; - static const uint8_t bit_selector_mask_bytes[32] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m256i bit_selector_mask = _mm256_loadu_si256((const __m256i*)bit_selector_mask_bytes); - const __m256i block_sign_shuffle_1 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_1); - const __m256i block_sign_shuffle_2 = _mm256_loadu_si256((const __m256i*)block_sign_shuffle_mask_2); - - static const uint8_t k_bit_helper[32] = { - 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, - 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, - }; - const __m256i bit_helper = _mm256_loadu_si256((const __m256i*)k_bit_helper); - const __m256i m511 = _mm256_set1_epi16(511); - const __m128i m4 = _mm_set1_epi8(0xf); - const __m128i m1 = _mm_set1_epi8(1); - - uint64_t aux64; - - // somewhat hacky, but gives a significant boost in performance - __m256i aux_gindex; - const uint16_t * gindex = (const uint16_t *)&aux_gindex; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(&aux64, x[i].scales, 8); - __m128i stmp = _mm_set1_epi64x(aux64); - stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4)); - const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1); - - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { - - const __m256i q2_data = _mm256_loadu_si256((const __m256i*)q2); q2 += 16; - aux_gindex = _mm256_and_si256(q2_data, m511); - - const __m256i partial_sign_bits = _mm256_srli_epi16(q2_data, 9); - const __m256i partial_sign_bits_upper = _mm256_srli_epi16(q2_data, 13); - const __m256i partial_sign_bits_for_counting = _mm256_xor_si256(partial_sign_bits, partial_sign_bits_upper); - - const __m256i odd_bits = _mm256_shuffle_epi8(bit_helper, partial_sign_bits_for_counting); - const __m256i full_sign_bits = _mm256_or_si256(partial_sign_bits, odd_bits); - - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8_3 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8_4 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - - const __m256i q2_1 = _mm256_set_epi64x(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]], - iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]); - const __m256i q2_2 = _mm256_set_epi64x(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]], - iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]); - const __m256i q2_3 = _mm256_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]], - iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]); - const __m256i q2_4 = _mm256_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]], - iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); - - const __m128i full_signs_l = _mm256_castsi256_si128(full_sign_bits); - const __m128i full_signs_h = _mm256_extractf128_si256(full_sign_bits, 1); - const __m256i full_signs_1 = MM256_SET_M128I(full_signs_l, full_signs_l); - const __m256i full_signs_2 = MM256_SET_M128I(full_signs_h, full_signs_h); - - __m256i signs; - signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_1); - signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_1 = _mm256_sign_epi8(q8_1, _mm256_or_si256(signs, mone)); - - signs = _mm256_shuffle_epi8(full_signs_1, block_sign_shuffle_2); - signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_2 = _mm256_sign_epi8(q8_2, _mm256_or_si256(signs, mone)); - - signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_1); - signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_3 = _mm256_sign_epi8(q8_3, _mm256_or_si256(signs, mone)); - - signs = _mm256_shuffle_epi8(full_signs_2, block_sign_shuffle_2); - signs = _mm256_cmpeq_epi8(_mm256_and_si256(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_4 = _mm256_sign_epi8(q8_4, _mm256_or_si256(signs, mone)); - - const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); - const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); - const __m256i dot3 = _mm256_maddubs_epi16(q2_3, q8s_3); - const __m256i dot4 = _mm256_maddubs_epi16(q2_4, q8s_4); - - const __m256i sc1 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0))); - const __m256i sc2 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1))); - const __m256i sc3 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2))); - const __m256i sc4 = _mm256_cvtepi8_epi16(_mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3))); - - sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot1, sc1)); - sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot2, sc2)); - sumi1 = _mm256_add_epi32(sumi1, _mm256_madd_epi16(dot3, sc3)); - sumi2 = _mm256_add_epi32(sumi2, _mm256_madd_epi16(dot4, sc4)); - } - - accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); - - } - - *s = 0.125f * hsum_float_8(accumf); - -#elif defined(__AVX__) - const __m128i mone = _mm_set1_epi8(1); - static const char block_sign_shuffle_mask_1[32] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - }; - static const char block_sign_shuffle_mask_2[32] = { - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, - 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, - }; - static const uint8_t bit_selector_mask_bytes[32] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m128i bit_selector_mask_0 = _mm_loadu_si128((const __m128i*)bit_selector_mask_bytes); - const __m128i bit_selector_mask_1 = _mm_loadu_si128((const __m128i*)bit_selector_mask_bytes + 1); - const __m128i block_sign_shuffle_1_0 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_1); - const __m128i block_sign_shuffle_1_1 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_1 + 1); - const __m128i block_sign_shuffle_2_0 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_2); - const __m128i block_sign_shuffle_2_1 = _mm_loadu_si128((const __m128i*)block_sign_shuffle_mask_2 + 1); - - static const uint8_t k_bit_helper[32] = { - 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, - 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, - }; - const __m128i bit_helper_0 = _mm_loadu_si128((const __m128i*)k_bit_helper); - const __m128i bit_helper_1 = _mm_loadu_si128((const __m128i*)k_bit_helper + 1); - const __m128i m511 = _mm_set1_epi16(511); - const __m128i m4 = _mm_set1_epi8(0xf); - const __m128i m1 = _mm_set1_epi8(1); - - uint64_t aux64; - - // somewhat hacky, but gives a significant boost in performance - __m256i aux_gindex; - const uint16_t * gindex = (const uint16_t *)&aux_gindex; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(&aux64, x[i].scales, 8); - __m128i stmp = _mm_set1_epi64x(aux64); - stmp = _mm_unpacklo_epi8(_mm_and_si128(stmp, m4), _mm_and_si128(_mm_srli_epi16(stmp, 4), m4)); - const __m128i scales = _mm_add_epi8(_mm_slli_epi16(stmp, 1), m1); - - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { - - const __m128i q2_data_0 = _mm_loadu_si128((const __m128i*)q2); - const __m128i q2_data_1 = _mm_loadu_si128((const __m128i*)q2 + 1); q2 += 16; - aux_gindex = MM256_SET_M128I(_mm_and_si128(q2_data_1, m511), _mm_and_si128(q2_data_0, m511)); - - const __m128i partial_sign_bits_0 = _mm_srli_epi16(q2_data_0, 9); - const __m128i partial_sign_bits_1 = _mm_srli_epi16(q2_data_1, 9); - const __m128i partial_sign_bits_upper_0 = _mm_srli_epi16(q2_data_0, 13); - const __m128i partial_sign_bits_upper_1 = _mm_srli_epi16(q2_data_1, 13); - const __m128i partial_sign_bits_for_counting_0 = _mm_xor_si128(partial_sign_bits_0, partial_sign_bits_upper_0); - const __m128i partial_sign_bits_for_counting_1 = _mm_xor_si128(partial_sign_bits_1, partial_sign_bits_upper_1); - - const __m128i odd_bits_0 = _mm_shuffle_epi8(bit_helper_0, partial_sign_bits_for_counting_0); - const __m128i odd_bits_1 = _mm_shuffle_epi8(bit_helper_1, partial_sign_bits_for_counting_1); - const __m128i full_sign_bits_0 = _mm_or_si128(partial_sign_bits_0, odd_bits_0); - const __m128i full_sign_bits_1 = _mm_or_si128(partial_sign_bits_1, odd_bits_1); - - const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_3_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_3_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_4_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_4_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - - const __m128i q2_1_0 = _mm_set_epi64x(iq2xs_grid[gindex[1]], iq2xs_grid[gindex[0]]); - const __m128i q2_1_1 = _mm_set_epi64x(iq2xs_grid[gindex[3]], iq2xs_grid[gindex[2]]); - const __m128i q2_2_0 = _mm_set_epi64x(iq2xs_grid[gindex[5]], iq2xs_grid[gindex[4]]); - const __m128i q2_2_1 = _mm_set_epi64x(iq2xs_grid[gindex[7]], iq2xs_grid[gindex[6]]); - const __m128i q2_3_0 = _mm_set_epi64x(iq2xs_grid[gindex[9]], iq2xs_grid[gindex[8]]); - const __m128i q2_3_1 = _mm_set_epi64x(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]]); - const __m128i q2_4_0 = _mm_set_epi64x(iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); - const __m128i q2_4_1 = _mm_set_epi64x(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]]); - - // AVX2 full_signs_1 is full_sign_bits_0 here - // AVX2 full_signs_2 is full_sign_bits_1 here - __m128i signs_0, signs_1; - signs_0 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_1_0); - signs_1 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_1_1); - signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); - signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); - const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, _mm_or_si128(signs_0, mone)); - const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, _mm_or_si128(signs_1, mone)); - - signs_0 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_2_0); - signs_1 = _mm_shuffle_epi8(full_sign_bits_0, block_sign_shuffle_2_1); - signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); - signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); - const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, _mm_or_si128(signs_0, mone)); - const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, _mm_or_si128(signs_1, mone)); - - signs_0 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_1_0); - signs_1 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_1_1); - signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); - signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); - const __m128i q8s_3_0 = _mm_sign_epi8(q8_3_0, _mm_or_si128(signs_0, mone)); - const __m128i q8s_3_1 = _mm_sign_epi8(q8_3_1, _mm_or_si128(signs_1, mone)); - - signs_0 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_2_0); - signs_1 = _mm_shuffle_epi8(full_sign_bits_1, block_sign_shuffle_2_1); - signs_0 = _mm_cmpeq_epi8(_mm_and_si128(signs_0, bit_selector_mask_0), bit_selector_mask_0); - signs_1 = _mm_cmpeq_epi8(_mm_and_si128(signs_1, bit_selector_mask_1), bit_selector_mask_1); - const __m128i q8s_4_0 = _mm_sign_epi8(q8_4_0, _mm_or_si128(signs_0, mone)); - const __m128i q8s_4_1 = _mm_sign_epi8(q8_4_1, _mm_or_si128(signs_1, mone)); - - const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); - const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); - const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); - const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); - const __m128i dot3_0 = _mm_maddubs_epi16(q2_3_0, q8s_3_0); - const __m128i dot3_1 = _mm_maddubs_epi16(q2_3_1, q8s_3_1); - const __m128i dot4_0 = _mm_maddubs_epi16(q2_4_0, q8s_4_0); - const __m128i dot4_1 = _mm_maddubs_epi16(q2_4_1, q8s_4_1); - - __m128i sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+0)); - const __m128i sc1_0 = _mm_cvtepi8_epi16(sc_tmp); - const __m128i sc1_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); - sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+1)); - const __m128i sc2_0 = _mm_cvtepi8_epi16(sc_tmp); - const __m128i sc2_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); - sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+2)); - const __m128i sc3_0 = _mm_cvtepi8_epi16(sc_tmp); - const __m128i sc3_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); - sc_tmp = _mm_shuffle_epi8(scales, get_scale_shuffle(ib32+3)); - const __m128i sc4_0 = _mm_cvtepi8_epi16(sc_tmp); - const __m128i sc4_1 = _mm_cvtepi8_epi16(_mm_srli_si128(sc_tmp, 8)); - - sumi1_0 = _mm_add_epi32(sumi1_0, _mm_madd_epi16(dot1_0, sc1_0)); - sumi1_1 = _mm_add_epi32(sumi1_1, _mm_madd_epi16(dot1_1, sc1_1)); - sumi2_0 = _mm_add_epi32(sumi2_0, _mm_madd_epi16(dot2_0, sc2_0)); - sumi2_1 = _mm_add_epi32(sumi2_1, _mm_madd_epi16(dot2_1, sc2_1)); - sumi1_0 = _mm_add_epi32(sumi1_0, _mm_madd_epi16(dot3_0, sc3_0)); - sumi1_1 = _mm_add_epi32(sumi1_1, _mm_madd_epi16(dot3_1, sc3_1)); - sumi2_0 = _mm_add_epi32(sumi2_0, _mm_madd_epi16(dot4_0, sc4_0)); - sumi2_1 = _mm_add_epi32(sumi2_1, _mm_madd_epi16(dot4_1, sc4_1)); - } - - accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); - - } - - *s = 0.125f * hsum_float_8(accumf); - -#elif defined(__loongarch_asx) - - const __m256i mone = __lasx_xvreplgr2vr_b(1); - static const char block_sign_shuffle_mask_1[32] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - }; - static const char block_sign_shuffle_mask_2[32] = { - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, - 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, - }; - static const uint8_t bit_selector_mask_bytes[32] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m256i bit_selector_mask = __lasx_xvld((const __m256i*)bit_selector_mask_bytes, 0); - const __m256i block_sign_shuffle_1 = __lasx_xvld((const __m256i*)block_sign_shuffle_mask_1, 0); - const __m256i block_sign_shuffle_2 = __lasx_xvld((const __m256i*)block_sign_shuffle_mask_2, 0); - - static const uint8_t k_bit_helper[32] = { - 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, - 0x00, 0x80, 0x80, 0x00, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x00, 0x80, 0x80, 0x00, - }; - const __m256i bit_helper = __lasx_xvld((const __m256i*)k_bit_helper, 0); - const __m256i m511 = __lasx_xvreplgr2vr_h(511); - const __m128i m4 = __lsx_vreplgr2vr_b(0xf); - const __m128i m1 = __lsx_vreplgr2vr_b(1); - - uint64_t aux64; - - // somewhat hacky, but gives a significant boost in performance - __m256i aux_gindex; - const uint16_t * gindex = (const uint16_t *)&aux_gindex; - - __m256 accumf = (__m256)__lasx_xvldi(0); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(&aux64, x[i].scales, 8); - __m128i stmp = __lsx_vreplgr2vr_d(aux64); - stmp = __lsx_vilvl_b( __lsx_vand_v(__lsx_vsrli_h(stmp, 4), m4), __lsx_vand_v(stmp, m4)); - const __m128i scales = __lsx_vadd_b(__lsx_vslli_h(stmp, 1), m1); - - __m256i sumi1 = __lasx_xvldi(0); - __m256i sumi2 = __lasx_xvldi(0); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 4) { - - const __m256i q2_data = __lasx_xvld((const __m256i*)q2, 0); q2 += 16; - aux_gindex = __lasx_xvand_v(q2_data, m511); - - const __m256i partial_sign_bits = __lasx_xvsrli_h(q2_data, 9); - const __m256i partial_sign_bits_upper = __lasx_xvsrli_h(q2_data, 13); - const __m256i partial_sign_bits_for_counting = __lasx_xvxor_v(partial_sign_bits, partial_sign_bits_upper); - - const __m256i odd_bits = lasx_shuffle_b(bit_helper, partial_sign_bits_for_counting); - const __m256i full_sign_bits = __lasx_xvor_v(partial_sign_bits, odd_bits); - - const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8_3 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8_4 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - - const __m256i q2_1 = lasx_set_d(iq2xs_grid[gindex[ 3]], iq2xs_grid[gindex[ 2]], - iq2xs_grid[gindex[ 1]], iq2xs_grid[gindex[ 0]]); - const __m256i q2_2 = lasx_set_d(iq2xs_grid[gindex[ 7]], iq2xs_grid[gindex[ 6]], - iq2xs_grid[gindex[ 5]], iq2xs_grid[gindex[ 4]]); - const __m256i q2_3 = lasx_set_d(iq2xs_grid[gindex[11]], iq2xs_grid[gindex[10]], - iq2xs_grid[gindex[ 9]], iq2xs_grid[gindex[ 8]]); - const __m256i q2_4 = lasx_set_d(iq2xs_grid[gindex[15]], iq2xs_grid[gindex[14]], - iq2xs_grid[gindex[13]], iq2xs_grid[gindex[12]]); - - const __m128i full_signs_l = lasx_extracti128(full_sign_bits, 0); - const __m128i full_signs_h = lasx_extracti128(full_sign_bits, 1); - const __m256i full_signs_1 = lasx_insertf128(full_signs_l, full_signs_l); - const __m256i full_signs_2 = lasx_insertf128(full_signs_h, full_signs_h); - - __m256i signs; - signs = lasx_shuffle_b(full_signs_1, block_sign_shuffle_1); - signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_1 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_1); - - signs = lasx_shuffle_b(full_signs_1, block_sign_shuffle_2); - signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_2 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_2); - - signs = lasx_shuffle_b(full_signs_2, block_sign_shuffle_1); - signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_3 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_3); - - signs = lasx_shuffle_b(full_signs_2, block_sign_shuffle_2); - signs = __lasx_xvseq_b(__lasx_xvand_v(signs, bit_selector_mask), bit_selector_mask); - const __m256i q8s_4 = __lasx_xvsigncov_b(__lasx_xvor_v(signs, mone), q8_4); - - const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); - const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); - const __m256i dot3 = lasx_maddubs_h(q2_3, q8s_3); - const __m256i dot4 = lasx_maddubs_h(q2_4, q8s_4); - - const __m256i sc1 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+0))); - const __m256i sc2 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+1))); - const __m256i sc3 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+2))); - const __m256i sc4 = lasx_ext8_16(lsx_shuffle_b(scales, get_scale_shuffle(ib32+3))); - - sumi1 = __lasx_xvadd_w(sumi1, lasx_madd_h(dot1, sc1)); - sumi2 = __lasx_xvadd_w(sumi2, lasx_madd_h(dot2, sc2)); - sumi1 = __lasx_xvadd_w(sumi1, lasx_madd_h(dot3, sc3)); - sumi2 = __lasx_xvadd_w(sumi2, lasx_madd_h(dot4, sc4)); - } - - accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); - - } - - *s = 0.125f * hsum_float_8(accumf); -#elif defined(__POWER9_VECTOR__) - const vector int v0 = vec_splats((int32_t)0); - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const uint8_t * GGML_RESTRICT sc = x[i].scales; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/64; ++j) { - __builtin_prefetch(q2, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xs_grid + (q2[0] & 511)), *(const int64_t *)(iq2xs_grid + (q2[1] & 511))}; - vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xs_grid + (q2[2] & 511)), *(const int64_t *)(iq2xs_grid + (q2[3] & 511))}; - vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xs_grid + (q2[4] & 511)), *(const int64_t *)(iq2xs_grid + (q2[5] & 511))}; - vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xs_grid + (q2[6] & 511)), *(const int64_t *)(iq2xs_grid + (q2[7] & 511))}; - - vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((q2[0] >> 9))), *(const int64_t *)(signs64 + ((q2[1] >> 9)))}; - vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((q2[2] >> 9))), *(const int64_t *)(signs64 + ((q2[3] >> 9)))}; - vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((q2[4] >> 9))), *(const int64_t *)(signs64 + ((q2[5] >> 9)))}; - vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((q2[6] >> 9))), *(const int64_t *)(signs64 + ((q2[7] >> 9)))}; - q2 += 8; - - vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); - vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); - vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); - vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); - - vector signed char q8y0 = vec_xl( 0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); - - const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); - const uint16_t ls1 = (uint16_t)(sc[0] >> 4); - const uint16_t ls2 = (uint16_t)(sc[1] & 0xf); - const uint16_t ls3 = (uint16_t)(sc[1] >> 4); - sc += 2; - - vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1)); - vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1)); - vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1)); - vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1)); - - vsumi0 = vec_msum(qv0, vscales0, vsumi0); - vsumi1 = vec_msum(qv1, vscales1, vsumi1); - vsumi2 = vec_msum(qv2, vscales2, vsumi2); - vsumi3 = vec_msum(qv3, vscales3, vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = 0.125f * vec_extract(vsumf0, 0); -#else - - float sumf = 0.f; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint16_t * GGML_RESTRICT q2 = x[i].qs; - const uint8_t * GGML_RESTRICT sc = x[i].scales; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - int32_t bsum = 0; - for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { - const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1; - const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1; - int32_t sumi = 0; - for (int l = 0; l < 2; ++l) { - const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); - const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; - for (int j = 0; j < 8; ++j) { - sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); - } - q8 += 8; - } - bsum += sumi * ls1; - sumi = 0; - for (int l = 2; l < 4; ++l) { - const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); - const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; - for (int j = 0; j < 8; ++j) { - sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); - } - q8 += 8; - } - bsum += sumi * ls2; - q2 += 4; - } - sumf += d * bsum; - } - *s = 0.125f * sumf; -#endif -} - -void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_iq2_s * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; - - const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1); - const uint8x16_t mask2 = vld1q_u8(k_mask2); - const uint8x16_t m1 = vdupq_n_u8(1); - const int32x4_t vzero = vdupq_n_s32(0); - - uint8x16x2_t vs; - ggml_int8x16x4_t q2s; - ggml_int8x16x4_t q8b; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - int sumi1 = 0, sumi2 = 0; - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - q2s.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[0] | ((qh[ib32+0] << 8) & 0x300)))), - vld1_s8((const int8_t *)(iq2s_grid + (qs[1] | ((qh[ib32+0] << 6) & 0x300))))); - q2s.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[2] | ((qh[ib32+0] << 4) & 0x300)))), - vld1_s8((const int8_t *)(iq2s_grid + (qs[3] | ((qh[ib32+0] << 2) & 0x300))))); - q2s.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[4] | ((qh[ib32+1] << 8) & 0x300)))), - vld1_s8((const int8_t *)(iq2s_grid + (qs[5] | ((qh[ib32+1] << 6) & 0x300))))); - q2s.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[6] | ((qh[ib32+1] << 4) & 0x300)))), - vld1_s8((const int8_t *)(iq2s_grid + (qs[7] | ((qh[ib32+1] << 2) & 0x300))))); - qs += 8; - - vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16))); - vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); - vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); - vs.val[0] = vceqq_u8(vs.val[0], mask2); - vs.val[1] = vceqq_u8(vs.val[1], mask2); - - q2s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[0]); - q2s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[1]); - - vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16))); - vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); - vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); - vs.val[0] = vceqq_u8(vs.val[0], mask2); - vs.val[1] = vceqq_u8(vs.val[1], mask2); - - signs += 4; - - q2s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[2]); - q2s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[3]); - - const int32x4_t p1 = ggml_vdotq_s32(vzero, q2s.val[0], q8b.val[0]); - const int32x4_t p2 = ggml_vdotq_s32(vzero, q2s.val[1], q8b.val[1]); - const int32x4_t p3 = ggml_vdotq_s32(vzero, q2s.val[2], q8b.val[2]); - const int32x4_t p4 = ggml_vdotq_s32(vzero, q2s.val[3], q8b.val[3]); - - sumi1 += vaddvq_s32(p1) * (1 + 2*(x[i].scales[ib32+0] & 0xf)); - sumi2 += vaddvq_s32(p2) * (1 + 2*(x[i].scales[ib32+0] >> 4)); - sumi1 += vaddvq_s32(p3) * (1 + 2*(x[i].scales[ib32+1] & 0xf)); - sumi2 += vaddvq_s32(p4) * (1 + 2*(x[i].scales[ib32+1] >> 4)); - } - sumf += d*(sumi1 + sumi2); - } - - *s = 0.125f * sumf; - -#elif defined(__AVX2__) - - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m128i m4 = _mm_set1_epi8(0xf); - const __m128i m1 = _mm_set1_epi8(1); - - const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1); - const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2); - - uint64_t aux64; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(&aux64, x[i].scales, 8); - const __m128i scales8 = _mm_add_epi8(_mm_slli_epi16(_mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), m4), 1), m1); - const __m256i scales16 = _mm256_cvtepi8_epi16(scales8); // 0 2 4 6 8 10 12 14 1 3 5 7 9 11 13 15 - - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q2_1 = _mm256_set_epi64x(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)], - iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)], - iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)], - iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]); - const __m256i q2_2 = _mm256_set_epi64x(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)], - iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)], - iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)], - iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]); - qs += 8; - - __m256i aux256 = _mm256_set1_epi32(signs[0] | ((uint32_t) signs[1] << 16)); - aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); - const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2); - const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1); - - aux256 = _mm256_set1_epi32(signs[2] | ((uint32_t) signs[3] << 16)); - aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); - const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2); - const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2); - - signs += 4; - - const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); // blocks 2*ib32+0, 2*ib32+1 - const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); // blocks 2*ib32+2, 2*ib32+3 - - const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+0))); - const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_shuffle_epi8(scales16, get_scale_shuffle_k4(ib32+1))); - sumi1 = _mm256_add_epi32(sumi1, p1); - sumi2 = _mm256_add_epi32(sumi2, p2); - } - - accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); - - } - - *s = 0.125f * hsum_float_8(accumf); - -#elif defined(__AVX__) - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m128i m4 = _mm_set1_epi8(0xf); - const __m128i m1 = _mm_set1_epi8(1); - - const __m128i mask1_0 = _mm_loadu_si128((const __m128i*)k_mask1); - const __m128i mask1_1 = _mm_loadu_si128((const __m128i*)k_mask1 + 1); - const __m128i mask2_0 = _mm_loadu_si128((const __m128i*)k_mask2); - const __m128i mask2_1 = _mm_loadu_si128((const __m128i*)k_mask2 + 1); - - uint64_t aux64; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(&aux64, x[i].scales, 8); - const __m128i scales8 = _mm_add_epi8(_mm_slli_epi16(_mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), m4), 1), m1); - const __m128i scales16_0 = _mm_cvtepi8_epi16(scales8); - const __m128i scales16_1 = _mm_cvtepi8_epi16(_mm_srli_si128(scales8, 8)); - - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q2_1_0 = _mm_set_epi64x(iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)], - iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]); - const __m128i q2_1_1 = _mm_set_epi64x(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)], - iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)]); - const __m128i q2_2_0 = _mm_set_epi64x(iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)], - iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]); - const __m128i q2_2_1 = _mm_set_epi64x(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)], - iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)]); - qs += 8; - - __m128i aux128_0 = _mm_set1_epi32(signs[0] | ((uint32_t) signs[1] << 16)); - __m128i aux128_1 = aux128_0; - aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); - aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); - const __m128i s2_1_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); - const __m128i s2_1_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); - const __m128i q8s_1_0 = _mm_sub_epi8(_mm_xor_si128(s2_1_0, q8_1_0), s2_1_0); - const __m128i q8s_1_1 = _mm_sub_epi8(_mm_xor_si128(s2_1_1, q8_1_1), s2_1_1); - - aux128_0 = _mm_set1_epi32(signs[2] | ((uint32_t) signs[3] << 16)); - aux128_1 = aux128_0; - aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); - aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); - const __m128i s2_2_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); - const __m128i s2_2_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); - const __m128i q8s_2_0 = _mm_sub_epi8(_mm_xor_si128(s2_2_0, q8_2_0), s2_2_0); - const __m128i q8s_2_1 = _mm_sub_epi8(_mm_xor_si128(s2_2_1, q8_2_1), s2_2_1); - - signs += 4; - - const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); - const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); - const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); - const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); - - const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_shuffle_epi8(scales16_0, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+0), 0))); - const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_shuffle_epi8(scales16_1, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+0), 1))); - const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_shuffle_epi8(scales16_0, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+1), 0))); - const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_shuffle_epi8(scales16_1, _mm256_extractf128_si256(get_scale_shuffle_k4(ib32+1), 1))); - sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); - sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); - sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); - sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); - } - - accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); - - } - - *s = 0.125f * hsum_float_8(accumf); - -#elif defined(__POWER9_VECTOR__) - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; - - const vector int v0 = vec_splats((int32_t)0); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - const vector unsigned char mask0 = vec_xl( 0, k_mask1); - const vector unsigned char mask1 = vec_xl(16, k_mask1); - const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - const uint8_t * GGML_RESTRICT q2 = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); - const uint8_t * GGML_RESTRICT sc = x[i].scales; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - for (int j = 0; j < QK_K/32; j += 2) { - __builtin_prefetch(q2, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed long long aux64x2_0 = {*(const int64_t *)(iq2s_grid + (q2[0] | ((qh[0] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[1] | ((qh[0] << 6) & 0x300)))}; - vector signed long long aux64x2_1 = {*(const int64_t *)(iq2s_grid + (q2[2] | ((qh[0] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[3] | ((qh[0] << 2) & 0x300)))}; - vector signed long long aux64x2_2 = {*(const int64_t *)(iq2s_grid + (q2[4] | ((qh[1] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[5] | ((qh[1] << 6) & 0x300)))}; - vector signed long long aux64x2_3 = {*(const int64_t *)(iq2s_grid + (q2[6] | ((qh[1] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[7] | ((qh[1] << 2) & 0x300)))}; - q2 += 8; - qh += 2; - - vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]); - vector signed char vsigns23 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]); - signs += 4; - - vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0); - vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1); - vector signed char vsigns2 = vec_perm(vsigns23, vsigns23, mask0); - vector signed char vsigns3 = vec_perm(vsigns23, vsigns23, mask1); - - vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2); - vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2); - vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2); - vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2); - - vector signed char q2x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux64x2_0), vsigns0); - vector signed char q2x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux64x2_1), vsigns1); - vector signed char q2x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux64x2_2), vsigns2); - vector signed char q2x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux64x2_3), vsigns3); - - vector signed char q8y0 = vec_xl( 0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); - - const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); - const uint16_t ls1 = (uint16_t)(sc[0] >> 4); - const uint16_t ls2 = (uint16_t)(sc[1] & 0xf); - const uint16_t ls3 = (uint16_t)(sc[1] >> 4); - sc += 2; - - vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1)); - vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1)); - vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1)); - vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1)); - - vsumi0 = vec_msum(qv0, vscales0, vsumi0); - vsumi1 = vec_msum(qv1, vscales1, vsumi1); - vsumi2 = vec_msum(qv2, vscales2, vsumi2); - vsumi3 = vec_msum(qv3, vscales3, vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = 0.125f * vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - - const __m128i m4 = __lsx_vreplgr2vr_b(0xf); - const __m128i m1 = __lsx_vreplgr2vr_b(1); - - const __m256i mask1 = __lasx_xvld((const __m256i*)k_mask1, 0); - const __m256i mask2 = __lasx_xvld((const __m256i*)k_mask2, 0); - uint64_t aux64; - - __m256 accumf = (__m256)__lasx_xvldi(0); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8); - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - __m128i tmp1; - memcpy(&aux64, x[i].scales, 8); - tmp1 = __lsx_vinsgr2vr_d(tmp1, aux64, 0); - tmp1 = __lsx_vinsgr2vr_d(tmp1, aux64 >> 4, 1); - const __m128i scales8 = __lsx_vadd_b(__lsx_vslli_h(__lsx_vand_v(tmp1, m4), 1), m1); - const __m256i scales16 = lasx_ext8_16(scales8); // 0 2 4 6 8 10 12 14 1 3 5 7 9 11 13 15 - - __m256i sumi1 = __lasx_xvldi(0); - __m256i sumi2 = __lasx_xvldi(0); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q2_1 = lasx_set_d(iq2s_grid[qs[3] | ((qh[ib32+0] << 2) & 0x300)], - iq2s_grid[qs[2] | ((qh[ib32+0] << 4) & 0x300)], - iq2s_grid[qs[1] | ((qh[ib32+0] << 6) & 0x300)], - iq2s_grid[qs[0] | ((qh[ib32+0] << 8) & 0x300)]); - const __m256i q2_2 = lasx_set_d(iq2s_grid[qs[7] | ((qh[ib32+1] << 2) & 0x300)], - iq2s_grid[qs[6] | ((qh[ib32+1] << 4) & 0x300)], - iq2s_grid[qs[5] | ((qh[ib32+1] << 6) & 0x300)], - iq2s_grid[qs[4] | ((qh[ib32+1] << 8) & 0x300)]); - qs += 8; - - __m256i aux256 = __lasx_xvreplgr2vr_w(signs[0] | ((uint32_t) signs[1] << 16)); - aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); - const __m256i s2_1 = __lasx_xvseq_b(aux256, mask2); - const __m256i q8s_1 = __lasx_xvsub_b(__lasx_xvxor_v(s2_1, q8_1), s2_1); - - aux256 = __lasx_xvreplgr2vr_w(signs[2] | ((uint32_t) signs[3] << 16)); - aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); - const __m256i s2_2 = __lasx_xvseq_b(aux256, mask2); - const __m256i q8s_2 = __lasx_xvsub_b(__lasx_xvxor_v(s2_2, q8_2), s2_2); - - signs += 4; - - const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); // blocks 2*ib32+0, 2*ib32+1 - const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); // blocks 2*ib32+2, 2*ib32+3 - - const __m256i p1 = lasx_madd_h(dot1, lasx_shuffle_b(scales16, get_scale_shuffle_k4(ib32+0))); - const __m256i p2 = lasx_madd_h(dot2, lasx_shuffle_b(scales16, get_scale_shuffle_k4(ib32+1))); - sumi1 = __lasx_xvadd_w(sumi1, p1); - sumi2 = __lasx_xvadd_w(sumi2, p2); - } - - accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); - } - - *s = 0.125f * hsum_float_8(accumf); - -#else - - float sumf = 0; - for (int i = 0; i < nb; i++) { - - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint8_t * qh = x[i].qh; - const uint8_t * signs = qs + QK_K/8; - - int bsum = 0; - for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { - int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf); - int ls2 = 1 + 2*(x[i].scales[ib32] >> 4); - int sumi1 = 0, sumi2 = 0; - for (int l = 0; l < 2; ++l) { - const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); - for (int j = 0; j < 8; ++j) { - sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); - } - q8 += 8; - } - for (int l = 2; l < 4; ++l) { - const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); - for (int j = 0; j < 8; ++j) { - sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); - } - q8 += 8; - } - bsum += ls1 * sumi1 + ls2 * sumi2; - qs += 4; - signs += 4; - } - - sumf += d * bsum; - } - - *s = 0.125f * sumf; - -#endif - -} - -void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_iq3_xxs * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[2]; - - ggml_int8x16x4_t q3s; - ggml_int8x16x4_t q8b; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - float sumf1 = 0, sumf2 = 0; - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - memcpy(aux32, gas, 2*sizeof(uint32_t)); gas += 2*sizeof(uint32_t); - const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]); - const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]); - const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]); - const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]); - q3 += 16; - q3s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 7) & 127)))); - q3s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 21) & 127)))); - q3s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127)))); - q3s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127)))); - q3s.val[0] = vmulq_s8(q3s.val[0], vreinterpretq_s8_u32(aux32x4_0)); - q3s.val[1] = vmulq_s8(q3s.val[1], vreinterpretq_s8_u32(aux32x4_1)); - q3s.val[2] = vmulq_s8(q3s.val[2], vreinterpretq_s8_u32(aux32x4_2)); - q3s.val[3] = vmulq_s8(q3s.val[3], vreinterpretq_s8_u32(aux32x4_3)); - const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]); - const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]); - sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[0] >> 28)); - sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[1] >> 28)); - } - sumf += d*(sumf1 + sumf2); - } - *s = 0.5f * sumf; - -#elif defined(__AVX2__) - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[2]; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q2_1 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], - iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); - q3 += 8; - const __m256i q2_2 = _mm256_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], - iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); - q3 += 8; - memcpy(aux32, gas, 8); gas += 8; - const __m256i s2_1 = _mm256_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127], - signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); - const __m256i s2_2 = _mm256_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], - signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m256i q8s_1 = _mm256_sign_epi8(q8_1, s2_1); - const __m256i q8s_2 = _mm256_sign_epi8(q8_2, s2_2); - const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); - const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); - const uint16_t ls1 = aux32[0] >> 28; - const uint16_t ls2 = aux32[1] >> 28; - const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); - const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); - sumi1 = _mm256_add_epi32(sumi1, p1); - sumi2 = _mm256_add_epi32(sumi2, p2); - } - - accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); - - } - - *s = 0.25f * hsum_float_8(accumf); - -#elif defined(__AVX__) - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[2]; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q2_1_0 = _mm_set_epi32(iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); - const __m128i q2_1_1 = _mm_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]]); - q3 += 8; - const __m128i q2_2_0 = _mm_set_epi32(iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); - const __m128i q2_2_1 = _mm_set_epi32(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]]); - q3 += 8; - memcpy(aux32, gas, 8); gas += 8; - const __m128i s2_1_0 = _mm_set_epi64x(signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); - const __m128i s2_1_1 = _mm_set_epi64x(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127]); - const __m128i s2_2_0 = _mm_set_epi64x(signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m128i s2_2_1 = _mm_set_epi64x(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127]); - const __m128i q8s_1_0 = _mm_sign_epi8(q8_1_0, s2_1_0); - const __m128i q8s_1_1 = _mm_sign_epi8(q8_1_1, s2_1_1); - const __m128i q8s_2_0 = _mm_sign_epi8(q8_2_0, s2_2_0); - const __m128i q8s_2_1 = _mm_sign_epi8(q8_2_1, s2_2_1); - const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); - const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); - const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); - const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); - const uint16_t ls1 = aux32[0] >> 28; - const uint16_t ls2 = aux32[1] >> 28; - const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); - const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); - const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); - const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); - sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); - sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); - sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); - sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); - } - - accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); - - } - - *s = 0.25f * hsum_float_8(accumf); - -#elif defined(__POWER9_VECTOR__) - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - const vector int v0 = vec_splats((int32_t)0); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint32_t * GGML_RESTRICT signs = (const uint32_t *)(x[i].qs + QK_K/4); - const int8_t * GGML_RESTRICT q8 = y[i].qs; - -#pragma GCC unroll 1 - for (int j = 0; j < QK_K/32; j += 2) { - __builtin_prefetch(q3, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector unsigned int aux32x4_0 = {iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]}; - vector unsigned int aux32x4_1 = {iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]}; - vector unsigned int aux32x4_2 = {iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]}; - vector unsigned int aux32x4_3 = {iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]}; - q3 += 16; - - vector unsigned long long aux64x2_0 = {(uint64_t)(signs64[(signs[0] >> 0) & 127]), (uint64_t)(signs64[(signs[0] >> 7) & 127])}; - vector unsigned long long aux64x2_1 = {(uint64_t)(signs64[(signs[0] >> 14) & 127]), (uint64_t)(signs64[(signs[0] >> 21) & 127])}; - vector unsigned long long aux64x2_2 = {(uint64_t)(signs64[(signs[1] >> 0) & 127]), (uint64_t)(signs64[(signs[1] >> 7) & 127])}; - vector unsigned long long aux64x2_3 = {(uint64_t)(signs64[(signs[1] >> 14) & 127]), (uint64_t)(signs64[(signs[1] >> 21) & 127])}; - - vector signed char q3x0 = vec_mul((vector signed char)aux64x2_0, (vector signed char)aux32x4_0); - vector signed char q3x1 = vec_mul((vector signed char)aux64x2_1, (vector signed char)aux32x4_1); - vector signed char q3x2 = vec_mul((vector signed char)aux64x2_2, (vector signed char)aux32x4_2); - vector signed char q3x3 = vec_mul((vector signed char)aux64x2_3, (vector signed char)aux32x4_3); - - vector signed char q8y0 = vec_xl( 0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3)); - - const uint16_t ls0 = (uint16_t)(signs[0] >> 28); - const uint16_t ls1 = (uint16_t)(signs[1] >> 28); - signs += 2; - - vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); - vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); - - vsumi0 = vec_msum(qv0, vscales01, vsumi0); - vsumi1 = vec_msum(qv1, vscales01, vsumi1); - vsumi2 = vec_msum(qv2, vscales23, vsumi2); - vsumi3 = vec_msum(qv3, vscales23, vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = 0.25f * vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - - const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; - - uint32_t aux32[2]; - - __m256 accumf = (__m256)__lasx_xvldi(0); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m256i sumi1 = __lasx_xvldi(0); - __m256i sumi2 = __lasx_xvldi(0); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q2_1 = lasx_set_w(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], - iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); - q3 += 8; - const __m256i q2_2 = lasx_set_w(iq3xxs_grid[q3[7]], iq3xxs_grid[q3[6]], iq3xxs_grid[q3[5]], iq3xxs_grid[q3[4]], - iq3xxs_grid[q3[3]], iq3xxs_grid[q3[2]], iq3xxs_grid[q3[1]], iq3xxs_grid[q3[0]]); - q3 += 8; - memcpy(aux32, gas, 8); gas += 8; - - const __m256i s2_1 = lasx_set_d(signs64[(aux32[0] >> 21) & 127], signs64[(aux32[0] >> 14) & 127], - signs64[(aux32[0] >> 7) & 127], signs64[(aux32[0] >> 0) & 127]); - const __m256i s2_2 = lasx_set_d(signs64[(aux32[1] >> 21) & 127], signs64[(aux32[1] >> 14) & 127], - signs64[(aux32[1] >> 7) & 127], signs64[(aux32[1] >> 0) & 127]); - const __m256i q8s_1 = __lasx_xvsigncov_b(s2_1, q8_1); - const __m256i q8s_2 = __lasx_xvsigncov_b(s2_2, q8_2); - const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); - const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); - const uint16_t ls1 = aux32[0] >> 28; - const uint16_t ls2 = aux32[1] >> 28; - - const __m256i p1 = lasx_madd_h(dot1, __lasx_xvreplgr2vr_h(2*ls1+1)); - const __m256i p2 = lasx_madd_h(dot2, __lasx_xvreplgr2vr_h(2*ls2+1)); - sumi1 = __lasx_xvadd_w(sumi1, p1); - sumi2 = __lasx_xvadd_w(sumi2, p2); - } - - accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); - } - - *s = 0.25f * hsum_float_8(accumf); - -#else - - uint32_t aux32; - - float sumf = 0.f; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - int32_t bsum = 0; - for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { - memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t); - const uint32_t ls = 2*(aux32 >> 28) + 1; - int32_t sumi = 0; - for (int l = 0; l < 4; ++l) { - const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]); - const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]); - const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127]; - for (int j = 0; j < 4; ++j) { - sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1); - sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1); - } - q8 += 8; - } - q3 += 8; - bsum += sumi * ls; - } - sumf += d * bsum; - } - *s = 0.25f * sumf; -#endif -} - -void ggml_vec_dot_iq3_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_iq3_s * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined(__ARM_NEON) - - typedef union { - uint16x8_t vec_index; - uint16_t index[8]; - } vec_index_t; - - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; - - static const int16_t k_shift[8] = {8, 7, 6, 5, 4, 3, 2, 1}; - - const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1); - const uint8x16_t mask2 = vld1q_u8(k_mask2); - - const int16x8_t hshift = vld1q_s16(k_shift); - const uint16x8_t m256 = vdupq_n_u16(256); - const uint8x16_t m1 = vdupq_n_u8(1); - - uint8x16x2_t vs; - ggml_int8x16x4_t q3s; - ggml_int8x16x4_t q8b; - vec_index_t idx; - - uint32_t scales32[2]; - const uint8_t * scales8 = (const uint8_t *)scales32; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - memcpy(scales32, x[i].scales, 4); - scales32[1] = (((scales32[0] >> 4) & 0x0f0f0f0f) << 1) | 0x01010101; - scales32[0] = ((scales32[0] & 0x0f0f0f0f) << 1) | 0x01010101; - - int sumi1 = 0, sumi2 = 0; - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - - const uint8x16_t idx_l = vld1q_u8(qs); qs += 16; - idx.vec_index = vorrq_u16(vmovl_u8(vget_low_u8 (idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+0]), hshift), m256)); - const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]], - iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]); - const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]], - iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]); - idx.vec_index = vorrq_u16(vmovl_u8(vget_high_u8(idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+1]), hshift), m256)); - const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]], - iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]); - const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]], - iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]); - - - vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16))); - vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); - vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); - vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1); - vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1); - - q3s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_0)); - q3s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_1)); - - vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16))); - vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2); - vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2); - vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1); - vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1); - - signs += 4; - - q3s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_2)); - q3s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_3)); - - const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]); - const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]); - - sumi1 += vaddvq_s32(p1) * scales8[ib32/2+0]; - sumi2 += vaddvq_s32(p2) * scales8[ib32/2+4]; - } - sumf += d*(sumi1 + sumi2); - } - *s = sumf; - -#elif defined(__AVX2__) - - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m256i mask1 = _mm256_loadu_si256((const __m256i*)k_mask1); - const __m256i mask2 = _mm256_loadu_si256((const __m256i*)k_mask2); - - const __m256i idx_shift = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); - const __m256i idx_mask = _mm256_set1_epi32(256); - - typedef union { - __m256i vec[2]; - uint32_t index[16]; - } index_t; - - index_t idx; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i idx_l = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i *)qs)); qs += 16; - idx.vec[0] = _mm256_set1_epi32(qh[ib32+0]); - idx.vec[1] = _mm256_set1_epi32(qh[ib32+1]); - idx.vec[0] = _mm256_and_si256(_mm256_sllv_epi32(idx.vec[0], idx_shift), idx_mask); - idx.vec[1] = _mm256_and_si256(_mm256_sllv_epi32(idx.vec[1], idx_shift), idx_mask); - idx.vec[0] = _mm256_or_si256(idx.vec[0], _mm256_cvtepi16_epi32(_mm256_castsi256_si128(idx_l))); - idx.vec[1] = _mm256_or_si256(idx.vec[1], _mm256_cvtepi16_epi32(_mm256_extractf128_si256(idx_l, 1))); - - // At leat on my CPU (Ryzen 7950X), using _mm256_i32gather_epi32 is slower than _mm256_set_epi32. Strange. - //const __m256i q2_1 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[0], 4); - //const __m256i q2_2 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[1], 4); - const __m256i q2_1 = _mm256_set_epi32( - iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]], - iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]] - ); - const __m256i q2_2 = _mm256_set_epi32( - iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]], - iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[ 9]], iq3s_grid[idx.index[ 8]] - ); - - __m256i aux256 = _mm256_set1_epi32(signs[0] | (signs[1] << 16)); - aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); - const __m256i s2_1 = _mm256_cmpeq_epi8(aux256, mask2); - const __m256i q8s_1 = _mm256_sub_epi8(_mm256_xor_si256(s2_1, q8_1), s2_1); - - aux256 = _mm256_set1_epi32(signs[2] | (signs[3] << 16)); - aux256 = _mm256_and_si256(_mm256_shuffle_epi8(aux256,mask1), mask2); - const __m256i s2_2 = _mm256_cmpeq_epi8(aux256, mask2); - const __m256i q8s_2 = _mm256_sub_epi8(_mm256_xor_si256(s2_2, q8_2), s2_2); - - signs += 4; - - const __m256i dot1 = _mm256_maddubs_epi16(q2_1, q8s_1); - const __m256i dot2 = _mm256_maddubs_epi16(q2_2, q8s_2); - const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; - const uint16_t ls2 = x[i].scales[ib32/2] >> 4; - const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(2*ls1+1)); - const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(2*ls2+1)); - sumi1 = _mm256_add_epi32(sumi1, p1); - sumi2 = _mm256_add_epi32(sumi2, p2); - } - - accumf = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accumf); - - } - - *s = hsum_float_8(accumf); - -#elif defined(__AVX__) - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m128i mask1_0 = _mm_loadu_si128((const __m128i*)k_mask1); - const __m128i mask1_1 = _mm_loadu_si128((const __m128i*)k_mask1 + 1); - const __m128i mask2_0 = _mm_loadu_si128((const __m128i*)k_mask2); - const __m128i mask2_1 = _mm_loadu_si128((const __m128i*)k_mask2 + 1); - - const __m128i idx_mul_0 = _mm_set_epi32(32, 64, 128, 256); - const __m128i idx_mul_1 = _mm_set_epi32(2, 4, 8, 16); - const __m128i idx_mask = _mm_set1_epi32(256); - - typedef union { - __m128i vec[4]; - uint32_t index[16]; - } index_t; - - index_t idx; - - __m256 accumf = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m128i q8_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i qs_tmp = _mm_loadu_si128((const __m128i *)qs); - const __m128i idx_l_0 = _mm_cvtepu8_epi16(qs_tmp); - const __m128i idx_l_1 = _mm_cvtepu8_epi16(_mm_srli_si128(qs_tmp, 8)); qs += 16; - idx.vec[0] = _mm_set1_epi32(qh[ib32+0]); - idx.vec[1] = idx.vec[0]; - idx.vec[2] = _mm_set1_epi32(qh[ib32+1]); - idx.vec[3] = idx.vec[2]; - - idx.vec[0] = _mm_and_si128(_mm_mullo_epi32(idx.vec[0], idx_mul_0), idx_mask); - idx.vec[1] = _mm_and_si128(_mm_mullo_epi32(idx.vec[1], idx_mul_1), idx_mask); - idx.vec[2] = _mm_and_si128(_mm_mullo_epi32(idx.vec[2], idx_mul_0), idx_mask); - idx.vec[3] = _mm_and_si128(_mm_mullo_epi32(idx.vec[3], idx_mul_1), idx_mask); - - idx.vec[0] = _mm_or_si128(idx.vec[0], _mm_cvtepi16_epi32(idx_l_0)); - idx.vec[1] = _mm_or_si128(idx.vec[1], _mm_cvtepi16_epi32(_mm_srli_si128(idx_l_0, 8))); - idx.vec[2] = _mm_or_si128(idx.vec[2], _mm_cvtepi16_epi32(idx_l_1)); - idx.vec[3] = _mm_or_si128(idx.vec[3], _mm_cvtepi16_epi32(_mm_srli_si128(idx_l_1, 8))); - - const __m128i q2_1_0 = _mm_set_epi32(iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]]); - const __m128i q2_1_1 = _mm_set_epi32(iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]]); - const __m128i q2_2_0 = _mm_set_epi32(iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[9]], iq3s_grid[idx.index[8]]); - const __m128i q2_2_1 = _mm_set_epi32(iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]]); - - __m128i aux128_0 = _mm_set1_epi32(signs[0] | (signs[1] << 16)); - __m128i aux128_1 = aux128_0; - aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); - aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); - const __m128i s2_1_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); - const __m128i s2_1_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); - const __m128i q8s_1_0 = _mm_sub_epi8(_mm_xor_si128(s2_1_0, q8_1_0), s2_1_0); - const __m128i q8s_1_1 = _mm_sub_epi8(_mm_xor_si128(s2_1_1, q8_1_1), s2_1_1); - - aux128_0 = _mm_set1_epi32(signs[2] | (signs[3] << 16)); - aux128_1 = aux128_0; - aux128_0 = _mm_and_si128(_mm_shuffle_epi8(aux128_0,mask1_0), mask2_0); - aux128_1 = _mm_and_si128(_mm_shuffle_epi8(aux128_1,mask1_1), mask2_1); - const __m128i s2_2_0 = _mm_cmpeq_epi8(aux128_0, mask2_0); - const __m128i s2_2_1 = _mm_cmpeq_epi8(aux128_1, mask2_1); - const __m128i q8s_2_0 = _mm_sub_epi8(_mm_xor_si128(s2_2_0, q8_2_0), s2_2_0); - const __m128i q8s_2_1 = _mm_sub_epi8(_mm_xor_si128(s2_2_1, q8_2_1), s2_2_1); - - signs += 4; - - const __m128i dot1_0 = _mm_maddubs_epi16(q2_1_0, q8s_1_0); - const __m128i dot1_1 = _mm_maddubs_epi16(q2_1_1, q8s_1_1); - const __m128i dot2_0 = _mm_maddubs_epi16(q2_2_0, q8s_2_0); - const __m128i dot2_1 = _mm_maddubs_epi16(q2_2_1, q8s_2_1); - const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; - const uint16_t ls2 = x[i].scales[ib32/2] >> 4; - const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(2*ls1+1)); - const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(2*ls1+1)); - const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(2*ls2+1)); - const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(2*ls2+1)); - sumi1_0 = _mm_add_epi32(sumi1_0, p1_0); - sumi1_1 = _mm_add_epi32(sumi1_1, p1_1); - sumi2_0 = _mm_add_epi32(sumi2_0, p2_0); - sumi2_1 = _mm_add_epi32(sumi2_1, p2_1); - } - - accumf = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(_mm_add_epi32(sumi1_1, sumi2_1), _mm_add_epi32(sumi1_0, sumi2_0)))), accumf); - - } - - *s = hsum_float_8(accumf); - -#elif defined(__POWER9_VECTOR__) - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; - - const vector int v0 = vec_splats((int32_t)0); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - const vector unsigned char mask0 = vec_xl( 0, k_mask1); - const vector unsigned char mask1 = vec_xl(16, k_mask1); - const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - const uint8_t * GGML_RESTRICT q3 = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].signs); - const uint8_t * GGML_RESTRICT sc = x[i].scales; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - for (int j = 0; j < QK_K/32; j += 2) { - __builtin_prefetch(q3, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector unsigned int aux32x4_0 = {iq3s_grid[q3[ 0] | ((qh[0] << 8) & 256)], iq3s_grid[q3[ 1] | ((qh[0] << 7) & 256)], - iq3s_grid[q3[ 2] | ((qh[0] << 6) & 256)], iq3s_grid[q3[ 3] | ((qh[0] << 5) & 256)]}; - vector unsigned int aux32x4_1 = {iq3s_grid[q3[ 4] | ((qh[0] << 4) & 256)], iq3s_grid[q3[ 5] | ((qh[0] << 3) & 256)], - iq3s_grid[q3[ 6] | ((qh[0] << 2) & 256)], iq3s_grid[q3[ 7] | ((qh[0] << 1) & 256)]}; - vector unsigned int aux32x4_2 = {iq3s_grid[q3[ 8] | ((qh[1] << 8) & 256)], iq3s_grid[q3[ 9] | ((qh[1] << 7) & 256)], - iq3s_grid[q3[10] | ((qh[1] << 6) & 256)], iq3s_grid[q3[11] | ((qh[1] << 5) & 256)]}; - vector unsigned int aux32x4_3 = {iq3s_grid[q3[12] | ((qh[1] << 4) & 256)], iq3s_grid[q3[13] | ((qh[1] << 3) & 256)], - iq3s_grid[q3[14] | ((qh[1] << 2) & 256)], iq3s_grid[q3[15] | ((qh[1] << 1) & 256)]}; - q3 += 16; - qh += 2; - - vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]); - vector signed char vsigns02 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]); - signs += 4; - - vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0); - vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1); - vector signed char vsigns2 = vec_perm(vsigns02, vsigns02, mask0); - vector signed char vsigns3 = vec_perm(vsigns02, vsigns02, mask1); - - vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2); - vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2); - vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2); - vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2); - - vector signed char q3x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux32x4_0), vsigns0); - vector signed char q3x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux32x4_1), vsigns1); - vector signed char q3x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux32x4_2), vsigns2); - vector signed char q3x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux32x4_3), vsigns3); - - vector signed char q8y0 = vec_xl( 0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3)); - - const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); - const uint16_t ls1 = (uint16_t)(sc[0] >> 4); - sc ++; - - vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); - vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); - - vsumi0 = vec_msum(qv0, vscales01, vsumi0); - vsumi1 = vec_msum(qv1, vscales01, vsumi1); - vsumi2 = vec_msum(qv2, vscales23, vsumi2); - vsumi3 = vec_msum(qv3, vscales23, vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - - static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 - }; - - static const uint8_t k_mask2[32] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, - }; - - const __m256i mask1 = __lasx_xvld((const __m256i*)k_mask1, 0); - const __m256i mask2 = __lasx_xvld((const __m256i*)k_mask2, 0); - - __m256i idx_shift = lasx_set_w(1, 2, 3, 4, 5, 6, 7, 8); - const __m256i idx_mask = __lasx_xvreplgr2vr_w(256); - - typedef union { - __m256i vec[2]; - uint32_t index[16]; - } index_t; - - index_t idx; - - __m256 accumf = (__m256)__lasx_xvldi(0); - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - __m256i sumi1 = __lasx_xvldi(0); - __m256i sumi2 = __lasx_xvldi(0); - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const __m256i q8_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i idx_l = lasx_extu8_16(__lsx_vld(qs, 0)); qs += 16; - idx.vec[0] = __lasx_xvreplgr2vr_w(qh[ib32+0]); - idx.vec[1] = __lasx_xvreplgr2vr_w(qh[ib32+1]); - idx.vec[0] = __lasx_xvand_v(__lasx_xvsll_w(idx.vec[0], idx_shift), idx_mask); - idx.vec[1] = __lasx_xvand_v(__lasx_xvsll_w(idx.vec[1], idx_shift), idx_mask); - idx.vec[0] = __lasx_xvor_v(idx.vec[0], lasx_ext16_32(lasx_extracti128(idx_l, 0))); - idx.vec[1] = __lasx_xvor_v(idx.vec[1], lasx_ext16_32(lasx_extracti128(idx_l, 1))); - - // At leat on my CPU (Ryzen 7950X), using _mm256_i32gather_epi32 is slower than _mm256_set_epi32. Strange. - //const __m256i q2_1 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[0], 4); - //const __m256i q2_2 = _mm256_i32gather_epi32((const int *)iq3s_grid, idx.vec[1], 4); - const __m256i q2_1 = lasx_set_w( - iq3s_grid[idx.index[7]], iq3s_grid[idx.index[6]], iq3s_grid[idx.index[5]], iq3s_grid[idx.index[4]], - iq3s_grid[idx.index[3]], iq3s_grid[idx.index[2]], iq3s_grid[idx.index[1]], iq3s_grid[idx.index[0]] - ); - const __m256i q2_2 = lasx_set_w( - iq3s_grid[idx.index[15]], iq3s_grid[idx.index[14]], iq3s_grid[idx.index[13]], iq3s_grid[idx.index[12]], - iq3s_grid[idx.index[11]], iq3s_grid[idx.index[10]], iq3s_grid[idx.index[ 9]], iq3s_grid[idx.index[ 8]] - ); - - __m256i aux256 = __lasx_xvreplgr2vr_w(signs[0] | (signs[1] << 16)); - aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); - const __m256i s2_1 = __lasx_xvseq_b(aux256, mask2); - const __m256i q8s_1 = __lasx_xvsub_b(__lasx_xvxor_v(s2_1, q8_1), s2_1); - - aux256 = __lasx_xvreplgr2vr_w(signs[2] | (signs[3] << 16)); - aux256 = __lasx_xvand_v(lasx_shuffle_b(aux256,mask1), mask2); - const __m256i s2_2 = __lasx_xvseq_b(aux256, mask2); - const __m256i q8s_2 = __lasx_xvsub_b(__lasx_xvxor_v(s2_2, q8_2), s2_2); - - signs += 4; - - const __m256i dot1 = lasx_maddubs_h(q2_1, q8s_1); - const __m256i dot2 = lasx_maddubs_h(q2_2, q8s_2); - const uint16_t ls1 = x[i].scales[ib32/2] & 0xf; - const uint16_t ls2 = x[i].scales[ib32/2] >> 4; - const __m256i p1 = lasx_madd_h(dot1, __lasx_xvreplgr2vr_h(2*ls1+1)); - const __m256i p2 = lasx_madd_h(dot2, __lasx_xvreplgr2vr_h(2*ls2+1)); - sumi1 = __lasx_xvadd_w(sumi1, p1); - sumi2 = __lasx_xvadd_w(sumi2, p2); - } - - accumf = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accumf); - } - - *s = hsum_float_8(accumf); - -#else - - float sumf = 0.f; - for (int i = 0; i < nb; ++i) { - const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d; - const uint8_t * GGML_RESTRICT qs = x[i].qs; - const uint8_t * GGML_RESTRICT qh = x[i].qh; - const uint8_t * GGML_RESTRICT signs = x[i].signs; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - int32_t bsum = 0; - for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { - const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1; - const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1; - int32_t sumi = 0; - for (int l = 0; l < 4; ++l) { - const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256))); - const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256))); - for (int j = 0; j < 4; ++j) { - sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); - sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); - } - q8 += 8; - } - qs += 8; - signs += 4; - bsum += sumi * ls1; - sumi = 0; - for (int l = 0; l < 4; ++l) { - const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256))); - const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256))); - for (int j = 0; j < 4; ++j) { - sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); - sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); - } - q8 += 8; - } - qs += 8; - signs += 4; - bsum += sumi * ls2; - } - sumf += d * bsum; - } - *s = sumf; -#endif -} - -#if defined(__AVX2__) -static inline __m256i mul_add_epi8(const __m256i x, const __m256i y) { - const __m256i ax = _mm256_sign_epi8(x, x); - const __m256i sy = _mm256_sign_epi8(y, x); - return _mm256_maddubs_epi16(ax, sy); -} -#elif defined(__loongarch_asx) -static inline __m256i mul_add_epi8(const __m256i x, const __m256i y) { - const __m256i a = __lasx_xvmulwev_h_b(x, y); - const __m256i b = __lasx_xvmulwod_h_b(x, y); - return __lasx_xvadd_h(a, b); -} -#endif - -void ggml_vec_dot_iq1_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_iq1_s * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined __ARM_NEON - - ggml_int8x16x4_t q1b; - ggml_int8x16x4_t q8b; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint16_t * qh = x[i].qh; - - int sumi1 = 0, sumi2 = 0, sumi3 = 0; - - for (int ib = 0; ib < QK_K/32; ib += 2) { - - q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[ib+0] << 8) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[ib+0] << 5) & 0x700))))); - q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[ib+0] << 2) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[ib+0] >> 1) & 0x700))))); - q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[ib+1] << 8) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[ib+1] << 5) & 0x700))))); - q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[ib+1] << 2) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[ib+1] >> 1) & 0x700))))); - qs += 8; - - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - - const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[0], q8b.val[0]), q1b.val[1], q8b.val[1]); - const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[2], q8b.val[2]), q1b.val[3], q8b.val[3]); - - const int ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; - const int ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; - sumi1 += vaddvq_s32(p1) * ls1; - sumi2 += vaddvq_s32(p2) * ls2; - sumi3 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * ls1 * (qh[ib+0] & 0x8000 ? -1 : 1) - + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * ls2 * (qh[ib+1] & 0x8000 ? -1 : 1); - - } - - sumf += y[i].d * GGML_FP16_TO_FP32(x[i].d) * (sumi1 + sumi2 + IQ1S_DELTA * sumi3); - } - - *s = sumf; - -#elif defined __AVX2__ - - __m256 accum = _mm256_setzero_ps(); - float accum1 = 0; - for (int i = 0; i < nb; ++i) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint16_t * qh = x[i].qh; - - __m256i sumi = _mm256_setzero_si256(); - int sumi1 = 0; - for (int ib = 0; ib < QK_K/32; ib += 2) { -#ifdef __BMI2__ - const uint64_t packed_idx1 = _pdep_u64(*(const uint32_t *)qs, 0x00ff00ff00ff00ffULL) | _pdep_u64(qh[ib], 0x700070007000700ULL); - const uint64_t packed_idx2 = _pdep_u64(*(const uint32_t *)(qs + 4), 0x00ff00ff00ff00ffULL) | _pdep_u64(qh[ib + 1], 0x700070007000700ULL); - const uint16_t *idx1 = (const uint16_t *)(&packed_idx1); - const uint16_t *idx2 = (const uint16_t *)(&packed_idx2); - const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[idx1[3]], iq1s_grid[idx1[2]], iq1s_grid[idx1[1]], iq1s_grid[idx1[0]]); - const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[idx2[3]], iq1s_grid[idx2[2]], iq1s_grid[idx2[1]], iq1s_grid[idx2[0]]); -#else - const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)], - iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)]); - const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)], - iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)]); -#endif - qs += 8; - const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - - const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1); - const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2); - const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; - const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; - const __m256i p1 = _mm256_madd_epi16(dot1, _mm256_set1_epi16(ls1)); - const __m256i p2 = _mm256_madd_epi16(dot2, _mm256_set1_epi16(ls2)); - - sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p1, p2)); - sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1 - + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; - } - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - accum = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi), accum); - accum1 += d * sumi1; - - } - - *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; - -#elif defined __AVX__ - __m256 accum = _mm256_setzero_ps(); - float accum1 = 0; - for (int i = 0; i < nb; ++i) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint16_t * qh = x[i].qh; - - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - int sumi1 = 0; - for (int ib = 0; ib < QK_K/32; ib += 2) { - const __m128i q1b_1_0 = _mm_set_epi64x(iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)]); - const __m128i q1b_1_1 = _mm_set_epi64x(iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)]); - const __m128i q1b_2_0 = _mm_set_epi64x(iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)]); - const __m128i q1b_2_1 = _mm_set_epi64x(iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)]); - qs += 8; - const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - - const __m128i dot1_0 = mul_add_epi8_sse(q1b_1_0, q8b_1_0); - const __m128i dot1_1 = mul_add_epi8_sse(q1b_1_1, q8b_1_1); - const __m128i dot2_0 = mul_add_epi8_sse(q1b_2_0, q8b_2_0); - const __m128i dot2_1 = mul_add_epi8_sse(q1b_2_1, q8b_2_1); - const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; - const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; - const __m128i p1_0 = _mm_madd_epi16(dot1_0, _mm_set1_epi16(ls1)); - const __m128i p1_1 = _mm_madd_epi16(dot1_1, _mm_set1_epi16(ls1)); - const __m128i p2_0 = _mm_madd_epi16(dot2_0, _mm_set1_epi16(ls2)); - const __m128i p2_1 = _mm_madd_epi16(dot2_1, _mm_set1_epi16(ls2)); - - sumi1_0 = _mm_add_epi32(sumi1_0, _mm_add_epi32(p1_0, p2_0)); - sumi1_1 = _mm_add_epi32(sumi1_1, _mm_add_epi32(p1_1, p2_1)); - sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1 - + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; - } - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - accum = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi1_1, sumi1_0))), accum); - accum1 += d * sumi1; - - } - - *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; - -#elif defined(__POWER9_VECTOR__) - const vector unsigned char v0 = vec_splats((unsigned char)0x0); - const vector unsigned short vsign = vec_splats((unsigned short)0x8000); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - for (int i = 0; i < nb; ++i) { - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); - vector float vyd = vec_splats(y[i].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = vec_splats((int32_t)0); - vector signed int vsumi1 = vec_splats((int32_t)0); - vector signed int vsumi2 = vec_splats((int32_t)0); - vector signed int vsumi3 = vec_splats((int32_t)0); - vector signed int vsumi8 = vec_splats((int32_t)0); - - const uint8_t * GGML_RESTRICT q1 = x[i].qs; - const uint16_t * GGML_RESTRICT qh = x[i].qh; - const int8_t * GGML_RESTRICT q8 = y[i].qs; - const int16_t * GGML_RESTRICT qs = y[i].bsums; - - for (int j = 0; j < QK_K/32; j += 2) { - __builtin_prefetch(q1, 0, 1); - __builtin_prefetch(qh, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed long long aux64x2_0 = {*(const int64_t *)(iq1s_grid + (q1[0] | ((qh[0] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[1] | ((qh[0] << 5) & 0x700)))}; - vector signed long long aux64x2_1 = {*(const int64_t *)(iq1s_grid + (q1[2] | ((qh[0] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[3] | ((qh[0] >> 1) & 0x700)))}; - vector signed long long aux64x2_2 = {*(const int64_t *)(iq1s_grid + (q1[4] | ((qh[1] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[5] | ((qh[1] << 5) & 0x700)))}; - vector signed long long aux64x2_3 = {*(const int64_t *)(iq1s_grid + (q1[6] | ((qh[1] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[7] | ((qh[1] >> 1) & 0x700)))}; - q1 += 8; - - vector signed char q1x0 = (vector signed char)aux64x2_0; - vector signed char q1x1 = (vector signed char)aux64x2_1; - vector signed char q1x2 = (vector signed char)aux64x2_2; - vector signed char q1x3 = (vector signed char)aux64x2_3; - - vector signed char q8y0 = vec_xl( 0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q1x0, q8y0), vec_mulo(q1x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q1x1, q8y1), vec_mulo(q1x1, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q1x2, q8y2), vec_mulo(q1x2, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q1x3, q8y3), vec_mulo(q1x3, q8y3)); - - const uint16_t ls0 = (uint16_t)((qh[0] >> 12) & 7); - const uint16_t ls1 = (uint16_t)((qh[1] >> 12) & 7); - - vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); - vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); - vector signed short vscales = vec_sld(vscales23, vscales01, 8); - - vsumi0 = vec_msum(qv0, vscales01, vsumi0); - vsumi1 = vec_msum(qv1, vscales01, vsumi1); - vsumi2 = vec_msum(qv2, vscales23, vsumi2); - vsumi3 = vec_msum(qv3, vscales23, vsumi3); - - vector signed short q8ysums = vec_xl_len(qs, 8); - qs += 4; - q8ysums = vec_mergeh(q8ysums, (vector signed short)v0); - - vector signed short qxh = (vector signed short)vec_sld(vec_splats(qh[1]), vec_splats(qh[0]), 8); - qh += 2; - vector __bool short vsel = vec_cmpge(qxh, (vector signed short)v0); - - vector signed short q8ysum = vec_sel((vector signed short)vec_xor((vector unsigned short)q8ysums, vsign), q8ysums, vsel); - - vsumi8 = vec_add(vec_mule(q8ysum, vscales), vsumi8); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - - vsumf0 = vec_madd(vec_ctf(vsumi8, 0), vec_mul(vd, vec_splats(IQ1S_DELTA)), vsumf0); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - - __m256 accum = (__m256)__lasx_xvldi(0); - float accum1 = 0; - for (int i = 0; i < nb; ++i) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint16_t * qh = x[i].qh; - - __m256i sumi = __lasx_xvldi(0); - int sumi1 = 0; - for (int ib = 0; ib < QK_K/32; ib += 2) { - __m256i q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[0] | ((qh[ib+0] << 8) & 0x700)], 0); - q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[1] | ((qh[ib+0] << 5) & 0x700)], 1); - q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[2] | ((qh[ib+0] << 2) & 0x700)], 2); - q1b_1 = __lasx_xvinsgr2vr_d(q1b_1, iq1s_grid[qs[3] | ((qh[ib+0] >> 1) & 0x700)], 3); - - __m256i q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[4] | ((qh[ib+1] << 8) & 0x700)], 0); - q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[5] | ((qh[ib+1] << 5) & 0x700)], 1); - q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[6] | ((qh[ib+1] << 2) & 0x700)], 2); - q1b_2 = __lasx_xvinsgr2vr_d(q1b_2, iq1s_grid[qs[7] | ((qh[ib+1] >> 1) & 0x700)], 3); - - qs += 8; - const __m256i q8b_1 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - const __m256i q8b_2 = __lasx_xvld((const __m256i*)q8, 0); q8 += 32; - - const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1); - const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2); - const int16_t ls1 = 2*((qh[ib+0] >> 12) & 7) + 1; - const int16_t ls2 = 2*((qh[ib+1] >> 12) & 7) + 1; - - __m256i tmp1, tmp5, tmp6; - tmp1 = __lasx_xvreplgr2vr_h(ls1); - tmp5 = __lasx_xvmulwev_w_h(dot1, tmp1); - tmp6 = __lasx_xvmulwod_w_h(dot1, tmp1); - const __m256i p1 = __lasx_xvadd_w(tmp5, tmp6); - - tmp1 = __lasx_xvreplgr2vr_h(ls2); - tmp5 = __lasx_xvmulwev_w_h(dot2, tmp1); - tmp6 = __lasx_xvmulwod_w_h(dot2, tmp1); - const __m256i p2 = __lasx_xvadd_w(tmp5, tmp6); - - sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p1, p2)); - sumi1 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * (qh[ib+0] & 0x8000 ? -1 : 1) * ls1 - + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * (qh[ib+1] & 0x8000 ? -1 : 1) * ls2; - } - - const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d); - accum = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(d), __lasx_xvffint_s_w(sumi), accum); - accum1 += d * sumi1; - } - - *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; - -#else - - float sumf = 0; - for (int i = 0; i < nb; i++) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint16_t * qh = x[i].qh; - - int sumi = 0, sumi1 = 0; - for (int ib = 0; ib < QK_K/32; ++ib) { - const int ls = 2*((qh[ib] >> 12) & 7) + 1; - const int delta = qh[ib] & 0x8000 ? -1 : 1; - int lsum = 0; - for (int l = 0; l < 4; ++l) { - const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8))); - for (int j = 0; j < 8; ++j) { - lsum += q8[j] * grid[j]; - } - q8 += 8; - } - sumi += ls * lsum; - sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]); - qs += 4; - } - - sumf += GGML_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); - } - - *s = sumf; - -#endif -} - -void ggml_vec_dot_iq1_m_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(n % QK_K == 0); - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - - const block_iq1_m * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - - iq1m_scale_t scale; - -#if defined __ARM_NEON - const int32x4_t mask = vdupq_n_s32(0x7); - const int32x4_t mone = vdupq_n_s32(1); - const int32x4_t mzero = vdupq_n_s32(0); - - ggml_int8x16x4_t deltas; - deltas.val[0] = vcombine_s8(vdup_n_s8(+1), vdup_n_s8(+1)); - deltas.val[1] = vcombine_s8(vdup_n_s8(-1), vdup_n_s8(+1)); - deltas.val[2] = vcombine_s8(vdup_n_s8(+1), vdup_n_s8(-1)); - deltas.val[3] = vcombine_s8(vdup_n_s8(-1), vdup_n_s8(-1)); - - ggml_int8x16x4_t q1b; - ggml_int8x16x4_t q8b; - - uint32_t aux32; - const uint8_t * aux8 = (const uint8_t *)&aux32; - - float sumf = 0; - for (int i = 0; i < nb; ++i) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint8_t * qh = x[i].qh; - const uint16_t * sc = (const uint16_t *)x[i].scales; - - scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); - - int32x4_t sumi1 = mzero; - int32x4_t sumi2 = mzero; - - for (int ib = 0; ib < QK_K/32; ib += 2) { - - q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[0] << 8) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[0] << 4) & 0x700))))); - q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[1] << 8) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[1] << 4) & 0x700))))); - q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[2] << 8) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[2] << 4) & 0x700))))); - q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[3] << 8) & 0x700)))), - vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[3] << 4) & 0x700))))); - - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - - const int32x4_t p1 = vpaddq_s32(ggml_vdotq_s32(mzero, q1b.val[0], q8b.val[0]), ggml_vdotq_s32(mzero, q1b.val[1], q8b.val[1])); - const int32x4_t p2 = vpaddq_s32(ggml_vdotq_s32(mzero, q1b.val[2], q8b.val[2]), ggml_vdotq_s32(mzero, q1b.val[3], q8b.val[3])); - const int32x4_t p12 = vpaddq_s32(p1, p2); - - const uint32_t * qh32 = (const uint32_t *)qh; // we are 4-byte aligned, so we can do that - aux32 = ((qh32[0] >> 3) & 0x01010101) | ((qh32[0] >> 6) & 0x02020202); - - const int32x4_t p3 = vpaddq_s32(ggml_vdotq_s32(mzero, deltas.val[aux8[0]], q8b.val[0]), ggml_vdotq_s32(mzero, deltas.val[aux8[1]], q8b.val[1])); - const int32x4_t p4 = vpaddq_s32(ggml_vdotq_s32(mzero, deltas.val[aux8[2]], q8b.val[2]), ggml_vdotq_s32(mzero, deltas.val[aux8[3]], q8b.val[3])); - const int32x4_t p34 = vpaddq_s32(p3, p4); - - int32x4_t scales_4 = ggml_vld1q_u32(sc[ib/2] >> 0, sc[ib/2] >> 3, sc[ib/2] >> 6, sc[ib/2] >> 9); - - scales_4 = vaddq_s32(vshlq_n_s32(vandq_s32(scales_4, mask), 1), mone); - - sumi1 = vmlaq_s32(sumi1, scales_4, p12); - sumi2 = vmlaq_s32(sumi2, scales_4, p34); - - qs += 8; qh += 4; - - } - - sumf += y[i].d * GGML_FP16_TO_FP32(scale.f16) * (vaddvq_s32(sumi1) + IQ1M_DELTA * vaddvq_s32(sumi2)); - } - - *s = sumf; - -#elif defined __AVX2__ - - const __m256i mask = _mm256_set1_epi16(0x7); - const __m256i mone = _mm256_set1_epi16(1); - const __m256i mone8 = _mm256_set1_epi8(1); - const __m256i mtwo8 = _mm256_set1_epi8(2); - // VPSHUFB cannot cross 128-bit lanes so odd shifts go to upper half. - const __m256i scales_shift = _mm256_set_epi64x(9, 3, 6, 0); - - __m256 accum1 = _mm256_setzero_ps(); - __m256 accum2 = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint8_t * qh = x[i].qh; - const uint16_t * sc = (const uint16_t *)x[i].scales; - - scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); - // Extract 3-bit scales (16 values) - __m256i scales = _mm256_set1_epi64x(*(const uint64_t*)sc); - scales = _mm256_srlv_epi64(scales, scales_shift); - scales = _mm256_add_epi16(_mm256_slli_epi16(_mm256_and_si256(scales, mask), 1), mone); - - // Indices to repeat each scale 8 times. - __m256i scales_idx1 = _mm256_set1_epi16(0x0100); - __m256i scales_idx2 = _mm256_add_epi8(scales_idx1, _mm256_set1_epi8(8)); - - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib = 0; ib < QK_K/32; ib += 2) { -#ifdef __BMI2__ - const uint64_t packed_idx1 = _pdep_u64(*(const uint32_t *)qs, 0x00ff00ff00ff00ffULL) - | _pdep_u64(*(const uint16_t*)(qh) & 0x7777, 0xf000f000f000f00ULL); - const uint64_t packed_idx2 = _pdep_u64(*(const uint32_t *)(qs + 4), 0x00ff00ff00ff00ffULL) - | _pdep_u64(*(const uint16_t*)(qh + 2) & 0x7777, 0xf000f000f000f00ULL); - const uint16_t *idx1 = (const uint16_t *)(&packed_idx1); - const uint16_t *idx2 = (const uint16_t *)(&packed_idx2); - const __m256i q1b_1 = _mm256_set_epi64x(iq1s_grid[idx1[3]], iq1s_grid[idx1[2]], iq1s_grid[idx1[1]], iq1s_grid[idx1[0]]); - const __m256i q1b_2 = _mm256_set_epi64x(iq1s_grid[idx2[3]], iq1s_grid[idx2[2]], iq1s_grid[idx2[1]], iq1s_grid[idx2[0]]); - - // Convert signs to bytes 0x81 (negative) or 0x01 (positive) - const uint64_t delta_sign = _pdep_u64(*(const uint32_t*)(qh) & 0x88888888, 0xf0f0f0f0f0f0f0f0ULL); - const __m256i delta1 = _mm256_or_si256(mone8, _mm256_cvtepi8_epi64(_mm_set1_epi32(delta_sign))); - const __m256i delta2 = _mm256_or_si256(mone8, _mm256_cvtepi8_epi64(_mm_set1_epi32(delta_sign >> 32))); -#else - const __m256i q1b_1 = _mm256_set_epi64x( - iq1s_grid[qs[3] | (((uint16_t)qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | (((uint16_t)qh[1] << 8) & 0x700)], - iq1s_grid[qs[1] | (((uint16_t)qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | (((uint16_t)qh[0] << 8) & 0x700)] - ); - const __m256i q1b_2 = _mm256_set_epi64x( - iq1s_grid[qs[7] | (((uint16_t)qh[3] << 4) & 0x700)], iq1s_grid[qs[6] | (((uint16_t)qh[3] << 8) & 0x700)], - iq1s_grid[qs[5] | (((uint16_t)qh[2] << 4) & 0x700)], iq1s_grid[qs[4] | (((uint16_t)qh[2] << 8) & 0x700)] - ); - - const __m256i delta1 = _mm256_set_epi64x(qh[1] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[1] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101, - qh[0] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[0] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); - const __m256i delta2 = _mm256_set_epi64x(qh[3] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[3] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101, - qh[2] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[2] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); -#endif - const __m256i q8b_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - const __m256i q8b_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32; - - const __m256i dot1 = mul_add_epi8(q1b_1, q8b_1); - const __m256i dot2 = mul_add_epi8(q1b_2, q8b_2); - const __m256i dot3 = _mm256_maddubs_epi16(mone8, _mm256_sign_epi8(q8b_1, delta1)); - const __m256i dot4 = _mm256_maddubs_epi16(mone8, _mm256_sign_epi8(q8b_2, delta2)); - - __m256i scale1 = _mm256_shuffle_epi8(scales, scales_idx1); - __m256i scale2 = _mm256_shuffle_epi8(scales, scales_idx2); - - scales_idx1 = _mm256_add_epi8(scales_idx1, mtwo8); - scales_idx2 = _mm256_add_epi8(scales_idx2, mtwo8); - - const __m256i p1 = _mm256_madd_epi16(dot1, scale1); - const __m256i p2 = _mm256_madd_epi16(dot2, scale2); - const __m256i p3 = _mm256_madd_epi16(dot3, scale1); - const __m256i p4 = _mm256_madd_epi16(dot4, scale2); - - sumi1 = _mm256_add_epi32(sumi1, _mm256_add_epi32(p1, p2)); - sumi2 = _mm256_add_epi32(sumi2, _mm256_add_epi32(p3, p4)); - - qs += 8; qh += 4; - } - - const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(scale.f16)); - - accum1 = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi1), accum1); - accum2 = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi2), accum2); - } - - *s = hsum_float_8(accum1) + IQ1M_DELTA * hsum_float_8(accum2); - -#elif defined __AVX__ - const __m128i mask = _mm_set1_epi16(0x7); - const __m128i mone = _mm_set1_epi16(1); - - __m256 accum1 = _mm256_setzero_ps(); - __m256 accum2 = _mm256_setzero_ps(); - for (int i = 0; i < nb; ++i) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint8_t * qh = x[i].qh; - const uint16_t * sc = (const uint16_t *)x[i].scales; - - scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); - - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib = 0; ib < QK_K/32; ib += 2) { - const __m128i q1b_1_0 = _mm_set_epi64x( - iq1s_grid[qs[1] | (((uint16_t)qh[0] << 4) & 0x700)], iq1s_grid[qs[0] | (((uint16_t)qh[0] << 8) & 0x700)]); - const __m128i q1b_1_1 = _mm_set_epi64x( - iq1s_grid[qs[3] | (((uint16_t)qh[1] << 4) & 0x700)], iq1s_grid[qs[2] | (((uint16_t)qh[1] << 8) & 0x700)]); - const __m128i q1b_2_0 = _mm_set_epi64x( - iq1s_grid[qs[5] | (((uint16_t)qh[2] << 4) & 0x700)], iq1s_grid[qs[4] | (((uint16_t)qh[2] << 8) & 0x700)]); - const __m128i q1b_2_1 = _mm_set_epi64x( - iq1s_grid[qs[7] | (((uint16_t)qh[3] << 4) & 0x700)], iq1s_grid[qs[6] | (((uint16_t)qh[3] << 8) & 0x700)]); - const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - - const __m128i dot1_0 = mul_add_epi8_sse(q1b_1_0, q8b_1_0); - const __m128i dot1_1 = mul_add_epi8_sse(q1b_1_1, q8b_1_1); - const __m128i dot2_0 = mul_add_epi8_sse(q1b_2_0, q8b_2_0); - const __m128i dot2_1 = mul_add_epi8_sse(q1b_2_1, q8b_2_1); - - const __m128i delta1_0 = _mm_set_epi64x(qh[0] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[0] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); - const __m128i delta1_1 = _mm_set_epi64x(qh[1] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[1] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); - const __m128i delta2_0 = _mm_set_epi64x(qh[2] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[2] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); - const __m128i delta2_1 = _mm_set_epi64x(qh[3] & 0x80 ? 0xffffffffffffffff : 0x0101010101010101, - qh[3] & 0x08 ? 0xffffffffffffffff : 0x0101010101010101); - - const __m128i dot3_0 = mul_add_epi8_sse(delta1_0, q8b_1_0); - const __m128i dot3_1 = mul_add_epi8_sse(delta1_1, q8b_1_1); - const __m128i dot4_0 = mul_add_epi8_sse(delta2_0, q8b_2_0); - const __m128i dot4_1 = mul_add_epi8_sse(delta2_1, q8b_2_1); - - __m128i scale1_0 = _mm_set1_epi16(sc[ib/2] >> 0); - __m128i scale1_1 = _mm_set1_epi16(sc[ib/2] >> 3); - __m128i scale2_0 = _mm_set1_epi16(sc[ib/2] >> 6); - __m128i scale2_1 = _mm_set1_epi16(sc[ib/2] >> 9); - - scale1_0 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale1_0, mask), 1), mone); - scale1_1 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale1_1, mask), 1), mone); - scale2_0 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale2_0, mask), 1), mone); - scale2_1 = _mm_add_epi16(_mm_slli_epi16(_mm_and_si128(scale2_1, mask), 1), mone); - const __m128i p1_0 = _mm_madd_epi16(dot1_0, scale1_0); - const __m128i p1_1 = _mm_madd_epi16(dot1_1, scale1_1); - const __m128i p2_0 = _mm_madd_epi16(dot2_0, scale2_0); - const __m128i p2_1 = _mm_madd_epi16(dot2_1, scale2_1); - const __m128i p3_0 = _mm_madd_epi16(dot3_0, scale1_0); - const __m128i p3_1 = _mm_madd_epi16(dot3_1, scale1_1); - const __m128i p4_0 = _mm_madd_epi16(dot4_0, scale2_0); - const __m128i p4_1 = _mm_madd_epi16(dot4_1, scale2_1); - - sumi1_0 = _mm_add_epi32(sumi1_0, _mm_add_epi32(p1_0, p2_0)); - sumi1_1 = _mm_add_epi32(sumi1_1, _mm_add_epi32(p1_1, p2_1)); - sumi2_0 = _mm_add_epi32(sumi2_0, _mm_add_epi32(p3_0, p4_0)); - sumi2_1 = _mm_add_epi32(sumi2_1, _mm_add_epi32(p3_1, p4_1)); - - qs += 8; qh += 4; - } - - const __m256 d = _mm256_set1_ps(y[i].d * GGML_FP16_TO_FP32(scale.f16)); - - accum1 = _mm256_add_ps(_mm256_mul_ps(d, _mm256_cvtepi32_ps(MM256_SET_M128I(sumi1_1, sumi1_0))), accum1); - accum2 = _mm256_add_ps(_mm256_mul_ps(d, _mm256_cvtepi32_ps(MM256_SET_M128I(sumi2_1, sumi2_0))), accum2); - } - - *s = hsum_float_8(accum1) + IQ1M_DELTA * hsum_float_8(accum2); - -#else - - int sum1[2], sum2[2], delta[4]; - - float sumf = 0; - for (int i = 0; i < nb; i++) { - - const int8_t * q8 = y[i].qs; - const uint8_t * qs = x[i].qs; - const uint8_t * qh = x[i].qh; - const uint16_t * sc = (const uint16_t *)x[i].scales; - - scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); - - int sumi1 = 0, sumi2 = 0; - for (int ib = 0; ib < QK_K/32; ++ib) { - delta[0] = qh[0] & 0x08 ? -1 : 1; - delta[1] = qh[0] & 0x80 ? -1 : 1; - delta[2] = qh[1] & 0x08 ? -1 : 1; - delta[3] = qh[1] & 0x80 ? -1 : 1; - sum1[0] = sum1[1] = sum2[0] = sum2[1] = 0; - for (int l = 0; l < 4; ++l) { - const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((uint16_t)qh[l/2] << (8 - 4*(l%2))) & 0x700))); - int lsum1 = 0, lsum2 = 0; - for (int j = 0; j < 8; ++j) { - lsum1 += q8[j] * grid[j]; - lsum2 += q8[j]; - } - q8 += 8; - sum1[l/2] += lsum1; - sum2[l/2] += lsum2*delta[l]; - } - - const int ls1 = 2*((sc[ib/2] >> (6*(ib%2)+0)) & 0x7) + 1; - const int ls2 = 2*((sc[ib/2] >> (6*(ib%2)+3)) & 0x7) + 1; - - sumi1 += sum1[0] * ls1 + sum1[1] * ls2; - sumi2 += sum2[0] * ls1 + sum2[1] * ls2; - qs += 4; - qh += 2; - } - - sumf += GGML_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2); - } - - *s = sumf; - -#endif -} - -void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - assert(n % QK4_NL == 0); - static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); - - const block_iq4_nl * GGML_RESTRICT x = vx; - const block_q8_0 * GGML_RESTRICT y = vy; - - const int nb = n / QK4_NL; - - int ib = 0; - float sumf = 0; - -#if defined __ARM_NEON - const int8x16_t values = vld1q_s8(kvalues_iq4nl); - const uint8x16_t m4b = vdupq_n_u8(0x0f); - uint8x16x2_t q4bits; - int8x16x4_t q4b; - int8x16x4_t q8b; - int32x4_t prod_1, prod_2; - - for (; ib + 1 < nb; ib += 2) { - - q4bits.val[0] = vld1q_u8(x[ib + 0].qs); - q4bits.val[1] = vld1q_u8(x[ib + 1].qs); - q8b.val[0] = vld1q_s8(y[ib + 0].qs); - q8b.val[1] = vld1q_s8(y[ib + 0].qs + 16); - q8b.val[2] = vld1q_s8(y[ib + 1].qs); - q8b.val[3] = vld1q_s8(y[ib + 1].qs + 16); - - q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b)); - q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4)); - q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b)); - q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4)); - - prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]); - prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]); - - sumf += - GGML_FP16_TO_FP32(x[ib+0].d) * GGML_FP16_TO_FP32(y[ib + 0].d) * vaddvq_s32(prod_1) + - GGML_FP16_TO_FP32(x[ib+1].d) * GGML_FP16_TO_FP32(y[ib + 1].d) * vaddvq_s32(prod_2); - } - -#elif defined __AVX2__ - - const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); - const __m128i m4b = _mm_set1_epi8(0x0f); - const __m256i mone = _mm256_set1_epi16(1); - - __m256 accum1 = _mm256_setzero_ps(); - __m256 accum2 = _mm256_setzero_ps(); - for (; ib + 1 < nb; ib += 2) { - const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)x[ib + 0].qs); - const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)x[ib + 1].qs); - const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)y[ib + 0].qs); - const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)y[ib + 1].qs); - const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)), - _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b))); - const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)), - _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b))); - const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); - const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); - const __m256i p_1 = _mm256_madd_epi16(p16_1, mone); - const __m256i p_2 = _mm256_madd_epi16(p16_2, mone); - accum1 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[ib + 0].d)*GGML_FP16_TO_FP32(x[ib + 0].d)), - _mm256_cvtepi32_ps(p_1), accum1); - accum2 = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(y[ib + 1].d)*GGML_FP16_TO_FP32(x[ib + 1].d)), - _mm256_cvtepi32_ps(p_2), accum2); - } - - sumf = hsum_float_8(_mm256_add_ps(accum1, accum2)); - -#elif defined __AVX__ - const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); - const __m128i m4b = _mm_set1_epi8(0x0f); - - __m256 accum = _mm256_setzero_ps(); - for (; ib + 1 < nb; ib += 2) { - const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)x[ib + 0].qs); - const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)x[ib + 1].qs); - const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs); - const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)y[ib + 0].qs + 1); - const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs); - const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)y[ib + 1].qs + 1); - - const __m128i q4b_1_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)); - const __m128i q4b_1_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)); - const __m128i q4b_2_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)); - const __m128i q4b_2_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)); - - const __m256 p = mul_sum_i8_quad_float(q4b_1_0, q4b_1_1, q4b_2_0, q4b_2_1, q8b_1_0, q8b_1_1, q8b_2_0, q8b_2_1); - const __m256 deltas = quad_fp16_delta_float(x[ib].d, y[ib].d, x[ib + 1].d, y[ib + 1].d); - accum = _mm256_add_ps(_mm256_mul_ps(deltas, p), accum); - } - - sumf = hsum_float_8(accum); - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector signed int v0 = vec_splats((int32_t)0); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - - const vector signed char values = vec_xl( 0, kvalues_iq4nl); - -#pragma GCC unroll 4 - for (; ib < nb; ++ib) { - __builtin_prefetch(x[ib].qs, 0, 1); - __builtin_prefetch(y[ib].qs, 0, 1); - - - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); - vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); - vector float vd = vec_mul(vxd, vyd); - - vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); - vector signed char q4x0 = vec_and(qxs, lowMask); - vector signed char q4x1 = vec_sr(qxs, v4); - - q4x0 = vec_perm(values, values, (vector unsigned char)q4x0); - q4x1 = vec_perm(values, values, (vector unsigned char)q4x1); - - vector signed char q8y0 = vec_xl( 0, y[ib].qs); - vector signed char q8y1 = vec_xl(16, y[ib].qs); - - vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - - vsumi0 = vec_sum4s(qv0, vsumi0); - vsumi1 = vec_sum4s(qv1, vsumi1); - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - } - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - sumf = vec_extract(vsumf0, 0); - -#elif defined (__loongarch_asx) - - const __m128i values128 = __lsx_vld((const __m128i*)kvalues_iq4nl, 0); - const __m128i m4b = __lsx_vreplgr2vr_b(0x0f); - const __m256i mone = __lasx_xvreplgr2vr_h(1); - - __m256 accum1 = (__m256)__lasx_xvldi(0); - __m256 accum2 = (__m256)__lasx_xvldi(0); - for (; ib + 1 < nb; ib += 2) { - const __m128i q4bits_1 = __lsx_vld((const __m128i*)x[ib + 0].qs, 0); - const __m128i q4bits_2 = __lsx_vld((const __m128i*)x[ib + 1].qs, 0); - const __m256i q8b_1 = __lasx_xvld((const __m256i *)y[ib + 0].qs, 0); - const __m256i q8b_2 = __lasx_xvld((const __m256i *)y[ib + 1].qs, 0); - const __m256i q4b_1 = lasx_insertf128(lsx_shuffle_b(values128, __lsx_vand_v(__lsx_vsrli_h(q4bits_1, 4), m4b)), - lsx_shuffle_b(values128, __lsx_vand_v(q4bits_1, m4b))); - const __m256i q4b_2 = lasx_insertf128(lsx_shuffle_b(values128, __lsx_vand_v(__lsx_vsrli_h(q4bits_2, 4), m4b)), - lsx_shuffle_b(values128, __lsx_vand_v(q4bits_2, m4b))); - const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); - const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); - const __m256i p_1 = lasx_madd_h(p16_1, mone); - const __m256i p_2 = lasx_madd_h(p16_2, mone); - accum1 = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(y[ib + 0].d)*GGML_FP16_TO_FP32(x[ib + 0].d)), - __lasx_xvffint_s_w(p_1), accum1); - accum2 = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(y[ib + 1].d)*GGML_FP16_TO_FP32(x[ib + 1].d)), - __lasx_xvffint_s_w(p_2), accum2); - } - - sumf = hsum_float_8(__lasx_xvfadd_s(accum1, accum2)); - -#elif defined(__VXE__) || defined(__VXE2__) - const int8x16_t v_k = vec_xl(0, kvalues_iq4nl); - const uint8x16_t v_m = vec_splat_u8(0x0F); - - for (; ib < nb; ++ib) { - const block_iq4_nl * GGML_RESTRICT x0 = &x[ib]; - const block_q8_0 * GGML_RESTRICT y0 = &y[ib]; - - const uint8x16_t v_x = vec_xl(0, x0->qs); - int8x16_t v_xl = (int8x16_t)vec_and(v_x, v_m); - int8x16_t v_xh = (int8x16_t)vec_sr(v_x, 4); - - v_xl = vec_perm(v_k, v_k, (uchar8x16_t)v_xl); - v_xh = vec_perm(v_k, v_k, (uchar8x16_t)v_xh); - - const int8x16_t v_yl = vec_xl(0 , y0->qs); - const int8x16_t v_yh = vec_xl(QK8_0/2, y0->qs); - const int32x4_t v_xy = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_xl, v_yl), v_xh, v_yh); - - sumf += GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d) * (v_xy[0] + v_xy[1] + v_xy[2] + v_xy[3]); - } -#endif - for (; ib < nb; ++ib) { - const float d = GGML_FP16_TO_FP32(y[ib].d)*GGML_FP16_TO_FP32(x[ib].d); - int sumi1 = 0, sumi2 = 0; - for (int j = 0; j < QK4_NL/2; ++j) { - sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; - sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; - } - sumf += d * (sumi1 + sumi2); - } - *s = sumf; -} - -void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { - assert(nrc == 1); - UNUSED(nrc); - UNUSED(bx); - UNUSED(by); - UNUSED(bs); - assert(n % QK_K == 0); - - const block_iq4_xs * GGML_RESTRICT x = vx; - const block_q8_K * GGML_RESTRICT y = vy; - - const int nb = n / QK_K; - -#if defined __ARM_NEON - const int8x16_t values = vld1q_s8(kvalues_iq4nl); - const uint8x16_t m4b = vdupq_n_u8(0x0f); - ggml_uint8x16x2_t q4bits; - ggml_int8x16x4_t q4b; - ggml_int8x16x4_t q8b; - int32x4_t prod_1, prod_2; - - float sumf = 0; - - for (int ibl = 0; ibl < nb; ++ibl) { - - const int8_t * q8 = y[ibl].qs; - const uint8_t * q4 = x[ibl].qs; - uint16_t h = x[ibl].scales_h; - - int sumi1 = 0, sumi2 = 0; - for (int ib = 0; ib < QK_K/64; ++ib) { - - q4bits = ggml_vld1q_u8_x2(q4); q4 += 32; - q8b = ggml_vld1q_s8_x4(q8); q8 += 64; - - q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b)); - q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4)); - q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b)); - q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4)); - - prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]); - prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]); - - int ls1 = ((x[ibl].scales_l[ib] & 0xf) | ((h << 4) & 0x30)) - 32; - int ls2 = ((x[ibl].scales_l[ib] >> 4) | ((h << 2) & 0x30)) - 32; - h >>= 4; - sumi1 += vaddvq_s32(prod_1) * ls1; - sumi2 += vaddvq_s32(prod_2) * ls2; - - } - - sumf += GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2); - } - - *s = sumf; - -#elif defined __AVX2__ - - const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); - const __m128i m4b = _mm_set1_epi8(0x0f); - - __m256 accum = _mm256_setzero_ps(); - for (int ibl = 0; ibl < nb; ++ibl) { - const uint8_t * qs = x[ibl].qs; - const int8_t * q8 = y[ibl].qs; - uint16_t sh = x[ibl].scales_h; - __m256i sumi1 = _mm256_setzero_si256(); - __m256i sumi2 = _mm256_setzero_si256(); - for (int ib = 0; ib < QK_K/32; ib += 2) { - const __m128i q4bits_1 = _mm_loadu_si128((const __m128i*)qs); qs += 16; - const __m128i q4bits_2 = _mm_loadu_si128((const __m128i*)qs); qs += 16; - const __m256i q8b_1 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q8b_2 = _mm256_loadu_si256((const __m256i *)q8); q8 += 32; - const __m256i q4b_1 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)), - _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b))); - const __m256i q4b_2 = MM256_SET_M128I(_mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)), - _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b))); - const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); - const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); - const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32; - const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32; - sh >>= 4; - const __m256i p_1 = _mm256_madd_epi16(p16_1, _mm256_set1_epi16(ls1)); - const __m256i p_2 = _mm256_madd_epi16(p16_2, _mm256_set1_epi16(ls2)); - sumi1 = _mm256_add_epi32(p_1, sumi1); - sumi2 = _mm256_add_epi32(p_2, sumi2); - } - accum = _mm256_fmadd_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d), - _mm256_cvtepi32_ps(_mm256_add_epi32(sumi1, sumi2)), accum); - } - - *s = hsum_float_8(accum); - -#elif defined __AVX__ - const __m128i values128 = _mm_loadu_si128((const __m128i*)kvalues_iq4nl); - const __m128i m4b = _mm_set1_epi8(0x0f); - - __m256 accum = _mm256_setzero_ps(); - for (int ibl = 0; ibl < nb; ++ibl) { - const uint8_t * qs = x[ibl].qs; - const int8_t * q8 = y[ibl].qs; - uint16_t sh = x[ibl].scales_h; - __m128i sumi1_0 = _mm_setzero_si128(); - __m128i sumi1_1 = _mm_setzero_si128(); - __m128i sumi2_0 = _mm_setzero_si128(); - __m128i sumi2_1 = _mm_setzero_si128(); - for (int ib = 0; ib < QK_K/32; ib += 2) { - const __m128i q4bits_1 = _mm_loadu_si128((const __m128i *)qs); qs += 16; - const __m128i q4bits_2 = _mm_loadu_si128((const __m128i *)qs); qs += 16; - const __m128i q8b_1_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_1_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_2_0 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q8b_2_1 = _mm_loadu_si128((const __m128i *)q8); q8 += 16; - const __m128i q4b_1_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_1, m4b)); - const __m128i q4b_1_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4b)); - const __m128i q4b_2_0 = _mm_shuffle_epi8(values128, _mm_and_si128(q4bits_2, m4b)); - const __m128i q4b_2_1 = _mm_shuffle_epi8(values128, _mm_and_si128(_mm_srli_epi16(q4bits_2, 4), m4b)); - const __m128i p16_1_0 = mul_add_epi8_sse(q4b_1_0, q8b_1_0); - const __m128i p16_1_1 = mul_add_epi8_sse(q4b_1_1, q8b_1_1); - const __m128i p16_2_0 = mul_add_epi8_sse(q4b_2_0, q8b_2_0); - const __m128i p16_2_1 = mul_add_epi8_sse(q4b_2_1, q8b_2_1); - const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32; - const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32; - sh >>= 4; - const __m128i p_1_0 = _mm_madd_epi16(p16_1_0, _mm_set1_epi16(ls1)); - const __m128i p_1_1 = _mm_madd_epi16(p16_1_1, _mm_set1_epi16(ls1)); - const __m128i p_2_0 = _mm_madd_epi16(p16_2_0, _mm_set1_epi16(ls2)); - const __m128i p_2_1 = _mm_madd_epi16(p16_2_1, _mm_set1_epi16(ls2)); - sumi1_0 = _mm_add_epi32(p_1_0, sumi1_0); - sumi1_1 = _mm_add_epi32(p_1_1, sumi1_1); - sumi2_0 = _mm_add_epi32(p_2_0, sumi2_0); - sumi2_1 = _mm_add_epi32(p_2_1, sumi2_1); - } - __m128i sumi12_0 = _mm_add_epi32(sumi1_0, sumi2_0); - __m128i sumi12_1 = _mm_add_epi32(sumi1_1, sumi2_1); - accum = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d), - _mm256_cvtepi32_ps(MM256_SET_M128I(sumi12_1, sumi12_0))), accum); - } - - *s = hsum_float_8(accum); - -#elif defined(__POWER9_VECTOR__) - const vector signed char lowMask = vec_splats((signed char)0xF); - const vector int v0 = vec_splats((int32_t)0); - const vector unsigned char v4 = vec_splats((unsigned char)0x4); - - vector float vsumf0 = vec_splats(0.0f); - vector float vsumf1 = vec_splats(0.0f); - vector float vsumf2 = vec_splats(0.0f); - vector float vsumf3 = vec_splats(0.0f); - - const vector signed char values = vec_xl( 0, kvalues_iq4nl); - - for (int ibl = 0; ibl < nb; ++ibl) { - - vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ibl].d)); - vector float vyd = vec_splats(y[ibl].d); - vector float vd = vec_mul(vxd, vyd); - - vector signed int vsumi0 = v0; - vector signed int vsumi1 = v0; - vector signed int vsumi2 = v0; - vector signed int vsumi3 = v0; - - uint16_t h = x[ibl].scales_h; - - const uint8_t * GGML_RESTRICT q4 = x[ibl].qs; - const uint8_t * GGML_RESTRICT sc = x[ibl].scales_l; - const int8_t * GGML_RESTRICT q8 = y[ibl].qs; - - for (int ib = 0; ib < QK_K/64; ib ++ ) { - __builtin_prefetch(q4, 0, 1); - __builtin_prefetch(q8, 0, 1); - - vector signed char qxs0 = (vector signed char)vec_xl( 0, q4); - vector signed char qxs1 = (vector signed char)vec_xl(16, q4); - q4 += 32; - - vector signed char q4x00 = (vector signed char)vec_and(qxs0, lowMask); - vector signed char q4x01 = (vector signed char)vec_sr(qxs0, v4); - vector signed char q4x10 = (vector signed char)vec_and(qxs1, lowMask); - vector signed char q4x11 = (vector signed char)vec_sr(qxs1, v4); - - q4x00 = vec_perm(values, values, (vector unsigned char)q4x00); - q4x01 = vec_perm(values, values, (vector unsigned char)q4x01); - q4x10 = vec_perm(values, values, (vector unsigned char)q4x10); - q4x11 = vec_perm(values, values, (vector unsigned char)q4x11); - - vector signed char q8y0 = vec_xl( 0, q8); - vector signed char q8y1 = vec_xl(16, q8); - vector signed char q8y2 = vec_xl(32, q8); - vector signed char q8y3 = vec_xl(48, q8); - q8 += 64; - - vector signed short qv0 = vec_add(vec_mule(q4x00, q8y0), vec_mulo(q4x00, q8y0)); - vector signed short qv1 = vec_add(vec_mule(q4x01, q8y1), vec_mulo(q4x01, q8y1)); - vector signed short qv2 = vec_add(vec_mule(q4x10, q8y2), vec_mulo(q4x10, q8y2)); - vector signed short qv3 = vec_add(vec_mule(q4x11, q8y3), vec_mulo(q4x11, q8y3)); - - const uint16_t ls0 = (uint16_t)(((sc[0] & 0xf) | ((h << 4) & 0x30)) - 32); - const uint16_t ls1 = (uint16_t)(((sc[0] >> 4) | ((h << 2) & 0x30)) - 32); - h >>= 4; - sc ++; - - vector signed short vscales01 = vec_splats((int16_t)ls0); - vector signed short vscales23 = vec_splats((int16_t)ls1); - - vsumi0 = vec_msum(qv0, vscales01, vsumi0); - vsumi1 = vec_msum(qv1, vscales01, vsumi1); - vsumi2 = vec_msum(qv2, vscales23, vsumi2); - vsumi3 = vec_msum(qv3, vscales23, vsumi3); - } - - vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); - vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); - vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); - vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); - } - - vsumf0 = vec_add(vsumf0, vsumf2); - vsumf1 = vec_add(vsumf1, vsumf3); - - vsumf0 = vec_add(vsumf0, vsumf1); - - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); - vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); - - *s = vec_extract(vsumf0, 0); - -#elif defined(__loongarch_asx) - - const __m128i values128 = __lsx_vld((const __m128i*)kvalues_iq4nl, 0); - - __m256 accum = (__m256)__lasx_xvldi(0); - - for (int ibl = 0; ibl < nb; ++ibl) { - const uint8_t * qs = x[ibl].qs; - const int8_t * q8 = y[ibl].qs; - uint16_t sh = x[ibl].scales_h; - __m256i sumi1 = __lasx_xvldi(0); - __m256i sumi2 = __lasx_xvldi(0); - for (int ib = 0; ib < QK_K/32; ib += 2) { - const __m128i q4bits_1 = __lsx_vld((const __m128i*)qs, 0); qs += 16; - const __m128i q4bits_2 = __lsx_vld((const __m128i*)qs, 0); qs += 16; - const __m256i q8b_1 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q8b_2 = __lasx_xvld((const __m256i *)q8, 0); q8 += 32; - const __m256i q4b_1 = lasx_insertf128(__lsx_vshuf_b(values128, values128, __lsx_vsrli_b(q4bits_1, 4)), - __lsx_vshuf_b(values128, values128, __lsx_vandi_b(q4bits_1, 0xf))); - const __m256i q4b_2 = lasx_insertf128(__lsx_vshuf_b(values128, values128, __lsx_vsrli_b(q4bits_2, 4)), - __lsx_vshuf_b(values128, values128, __lsx_vandi_b(q4bits_2, 0xf))); - const __m256i p16_1 = mul_add_epi8(q4b_1, q8b_1); - const __m256i p16_2 = mul_add_epi8(q4b_2, q8b_2); - const int16_t ls1 = ((x[ibl].scales_l[ib/2] & 0xf) | ((sh << 4) & 0x30)) - 32; - const int16_t ls2 = ((x[ibl].scales_l[ib/2] >> 4) | ((sh << 2) & 0x30)) - 32; - sh >>= 4; - const __m256i p_1 = lasx_madd_h(p16_1, __lasx_xvreplgr2vr_h(ls1)); - const __m256i p_2 = lasx_madd_h(p16_2, __lasx_xvreplgr2vr_h(ls2)); - sumi1 = __lasx_xvadd_w(p_1, sumi1); - sumi2 = __lasx_xvadd_w(p_2, sumi2); - } - accum = __lasx_xvfmadd_s(__lasx_xvreplfr2vr_s(GGML_FP16_TO_FP32(x[ibl].d)*y[ibl].d), - __lasx_xvffint_s_w(__lasx_xvadd_w(sumi1, sumi2)), accum); - } - - *s = hsum_float_8(accum); -#elif defined(__VXE__) || defined(__VXE2__) - const int8x16_t v_k = vec_xl(0, kvalues_iq4nl); - const uint8x16_t v_m = vec_splat_u8(0x0F); - - float sumf = 0; - - for (int ibl = 0; ibl < nb; ++ibl) { - const uint8_t * GGML_RESTRICT q4 = x[ibl].qs; - const int8_t * GGML_RESTRICT q8 = y[ibl].qs; - - uint16_t h = x[ibl].scales_h; - - int sumi1 = 0, sumi2 = 0; - for (int ib = 0; ib < QK_K/64; ++ib) { - const uint8x16_t v_x0 = vec_xl(0 , q4); - const uint8x16_t v_x1 = vec_xl(QK4_NL/2, q4); - q4 += 32; - - int8x16_t v_x0l = (int8x16_t)vec_and(v_x0, v_m); - int8x16_t v_x0h = (int8x16_t)vec_sr(v_x0, 4); - int8x16_t v_x1l = (int8x16_t)vec_and(v_x1, v_m); - int8x16_t v_x1h = (int8x16_t)vec_sr(v_x1, 4); - - v_x0l = vec_perm(v_k, v_k, (uchar8x16_t)v_x0l); - v_x0h = vec_perm(v_k, v_k, (uchar8x16_t)v_x0h); - v_x1l = vec_perm(v_k, v_k, (uchar8x16_t)v_x1l); - v_x1h = vec_perm(v_k, v_k, (uchar8x16_t)v_x1h); - - const int8x16_t v_y0 = vec_xl( 0, q8); - const int8x16_t v_y1 = vec_xl(16, q8); - const int8x16_t v_y2 = vec_xl(32, q8); - const int8x16_t v_y3 = vec_xl(48, q8); - q8 += 64; - - int32x4_t vsumi0 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x0l, v_y0), v_x0h, v_y1); - int32x4_t vsumi1 = ggml_vec_dot(ggml_vec_dot(vec_splats(0), v_x1l, v_y2), v_x1h, v_y3); - - int ls1 = ((x[ibl].scales_l[ib] & 0xF) | ((h << 4) & 0x30)) - 32; - int ls2 = ((x[ibl].scales_l[ib] >> 4) | ((h << 2) & 0x30)) - 32; - - h >>= 4; - - sumi1 += (vsumi0[0] + vsumi0[1] + vsumi0[2] + vsumi0[3]) * ls1; - sumi2 += (vsumi1[0] + vsumi1[1] + vsumi1[2] + vsumi1[3]) * ls2; - } - - sumf += GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2); - } - - *s = sumf; - -#else - float sumf = 0; - for (int ibl = 0; ibl < nb; ++ibl) { - const float d4d8 = GGML_FP16_TO_FP32(x[ibl].d) * y[ibl].d; - uint16_t h = x[ibl].scales_h; - const uint8_t * qs = x[ibl].qs; - const int8_t * q8 = y[ibl].qs; - for (int ib = 0; ib < QK_K/32; ib += 2) { - const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30); - const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30); - h >>= 4; - const float d1 = d4d8*(ls1 - 32); - const float d2 = d4d8*(ls2 - 32); - int sumi1 = 0, sumi2 = 0; - for (int j = 0; j < 16; ++j) { - sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; - sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; - } - sumf += d1 * (sumi1 + sumi2); - qs += 16; - q8 += 32; - sumi1 = sumi2 = 0; - for (int j = 0; j < 16; ++j) { - sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; - sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; - } - sumf += d2 * (sumi1 + sumi2); - qs += 16; - q8 += 32; - } - } - *s = sumf; -#endif -} - -// ============================ 4-bit non-linear quants - -void quantize_row_iq4_nl(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { - assert(k % QK4_NL == 0); - quantize_row_iq4_nl_ref(x, y, k); -} - -void quantize_row_iq4_xs(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { - assert(k % QK_K == 0); - quantize_iq4_xs(x, y, 1, k, NULL); -} diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.c b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.c index bff9c426e..85af19a37 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.c @@ -3,11 +3,11 @@ #include "ggml-backend-impl.h" #include "ggml-backend.h" -#include "ggml-cpu-traits.h" +#include "traits.h" #include "ggml-cpu-impl.h" #include "ggml-cpu.h" #include "ggml-impl.h" -#include "ggml-cpu-quants.h" +#include "quants.h" #include "ggml-threading.h" #include "unary-ops.h" #include "binary-ops.h" @@ -74,15 +74,13 @@ #define UNUSED GGML_UNUSED #define SWAP(x, y, T) do { T SWAP = x; (x) = y; (y) = SWAP; } while (0) +// precomputed f32 table for f16 (256 KB) (simd-mappings.h) +float ggml_table_f32_f16[1 << 16]; + #if defined(__ARM_ARCH) struct ggml_arm_arch_features_type { - int has_neon; - int has_dotprod; - int has_i8mm; - int has_sve; int sve_cnt; - int has_sme; -} ggml_arm_arch_features = {-1, -1, -1, -1, 0, -1}; +} ggml_arm_arch_features = { 0 }; #endif @@ -199,6 +197,7 @@ typedef pthread_t ggml_thread_t; static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { [GGML_TYPE_F32] = { + .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_fp32, .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32, .vec_dot_type = GGML_TYPE_F32, .nrows = 1, @@ -256,6 +255,12 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { .vec_dot_type = GGML_TYPE_Q8_1, .nrows = 1, }, + [GGML_TYPE_MXFP4] = { + .from_float = quantize_row_mxfp4, + .vec_dot = ggml_vec_dot_mxfp4_q8_0, + .vec_dot_type = GGML_TYPE_Q8_0, + .nrows = 1, + }, [GGML_TYPE_Q2_K] = { .from_float = quantize_row_q2_K, .vec_dot = ggml_vec_dot_q2_K_q8_K, @@ -272,7 +277,11 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { .from_float = quantize_row_q4_K, .vec_dot = ggml_vec_dot_q4_K_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, +#if defined (__ARM_FEATURE_MATMUL_INT8) + .nrows = 2, +#else .nrows = 1, +#endif }, [GGML_TYPE_Q5_K] = { .from_float = quantize_row_q5_K, @@ -284,7 +293,11 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { .from_float = quantize_row_q6_K, .vec_dot = ggml_vec_dot_q6_K_q8_K, .vec_dot_type = GGML_TYPE_Q8_K, +#if defined (__ARM_FEATURE_MATMUL_INT8) + .nrows = 2, +#else .nrows = 1, +#endif }, [GGML_TYPE_IQ2_XXS] = { .from_float = NULL, @@ -362,11 +375,6 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, }, - [GGML_TYPE_MXFP4] = { - .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_mxfp4, - .vec_dot_type = GGML_TYPE_F32, - .nrows = 1, - }, }; const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type) { @@ -558,6 +566,14 @@ void ggml_barrier(struct ggml_threadpool * tp) { #endif } +void ggml_threadpool_chunk_set(struct ggml_threadpool * tp, int value) { + atomic_store_explicit(&tp->current_chunk, value, memory_order_relaxed); +} + +int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value) { + return atomic_fetch_add_explicit(&tp->current_chunk, value, memory_order_relaxed); +} + #if defined(__gnu_linux__) static cpu_set_t ggml_get_numa_affinity(void) { cpu_set_t cpuset; @@ -669,87 +685,15 @@ bool ggml_is_numa(void) { #if defined(__linux__) && defined(__aarch64__) #include -#elif defined(__APPLE__) -#include -#endif - -#if !defined(HWCAP2_I8MM) -#define HWCAP2_I8MM (1 << 13) -#endif - -#if !defined(HWCAP2_SME) -#define HWCAP2_SME (1 << 23) #endif static void ggml_init_arm_arch_features(void) { -#if defined(__linux__) && defined(__aarch64__) - uint32_t hwcap = getauxval(AT_HWCAP); - uint32_t hwcap2 = getauxval(AT_HWCAP2); - - ggml_arm_arch_features.has_neon = !!(hwcap & HWCAP_ASIMD); - ggml_arm_arch_features.has_dotprod = !!(hwcap & HWCAP_ASIMDDP); - ggml_arm_arch_features.has_i8mm = !!(hwcap2 & HWCAP2_I8MM); - ggml_arm_arch_features.has_sve = !!(hwcap & HWCAP_SVE); - ggml_arm_arch_features.has_sme = !!(hwcap2 & HWCAP2_SME); - -#if defined(__ARM_FEATURE_SVE) +#if defined(__linux__) && defined(__aarch64__) && defined(__ARM_FEATURE_SVE) ggml_arm_arch_features.sve_cnt = PR_SVE_VL_LEN_MASK & prctl(PR_SVE_GET_VL); #endif -#elif defined(__APPLE__) - int oldp = 0; - size_t size = sizeof(oldp); - if (sysctlbyname("hw.optional.AdvSIMD", &oldp, &size, NULL, 0) != 0) { - oldp = 0; - } - ggml_arm_arch_features.has_neon = oldp; - - if (sysctlbyname("hw.optional.arm.FEAT_DotProd", &oldp, &size, NULL, 0) != 0) { - oldp = 0; - } - ggml_arm_arch_features.has_dotprod = oldp; - - if (sysctlbyname("hw.optional.arm.FEAT_I8MM", &oldp, &size, NULL, 0) != 0) { - oldp = 0; - } - ggml_arm_arch_features.has_i8mm = oldp; - - if (sysctlbyname("hw.optional.arm.FEAT_SME", &oldp, &size, NULL, 0) != 0) { - oldp = 0; - } - ggml_arm_arch_features.has_sme = oldp; - - ggml_arm_arch_features.has_sve = 0; - ggml_arm_arch_features.sve_cnt = 0; -#else -// Run-time CPU feature detection not implemented for this platform, fallback to compile time -#if defined(__ARM_NEON) - ggml_arm_arch_features.has_neon = 1; -#else - ggml_arm_arch_features.has_neon = 0; -#endif - -#if defined(__ARM_FEATURE_MATMUL_INT8) - ggml_arm_arch_features.has_i8mm = 1; -#else - ggml_arm_arch_features.has_i8mm = 0; -#endif - -#if defined(__ARM_FEATURE_SVE) - ggml_arm_arch_features.has_sve = 1; - ggml_arm_arch_features.sve_cnt = 16; -#else - ggml_arm_arch_features.has_sve = 0; - ggml_arm_arch_features.sve_cnt = 0; -#endif - -#if defined(__ARM_FEATURE_SME) || defined(__ARM_FEATURE_SME2) - ggml_arm_arch_features.has_sme = 1; -#else - ggml_arm_arch_features.has_sme = 0; -#endif -#endif } -#endif + +#endif // __ARM_ARCH struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) { GGML_ASSERT(!ggml_get_no_alloc(ctx)); @@ -804,7 +748,7 @@ struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) { { assert(tensor->nb[0] == sizeof(ggml_fp16_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value)); + ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_CPU_FP32_TO_FP16(value)); } } break; case GGML_TYPE_BF16: @@ -863,7 +807,7 @@ struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { { assert(tensor->nb[0] == sizeof(ggml_fp16_t)); for (int i = 0; i < n; i++) { - ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_FP32_TO_FP16(value)); + ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_CPU_FP32_TO_FP16(value)); } } break; case GGML_TYPE_BF16: @@ -914,7 +858,7 @@ int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) { case GGML_TYPE_F16: { GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); + return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); } case GGML_TYPE_BF16: { @@ -959,7 +903,7 @@ void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) { case GGML_TYPE_F16: { GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); - ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value); + ((ggml_fp16_t *)(tensor->data))[i] = GGML_CPU_FP32_TO_FP16(value); } break; case GGML_TYPE_BF16: { @@ -988,7 +932,7 @@ int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i case GGML_TYPE_I32: return ((int32_t *) data)[0]; case GGML_TYPE_F16: - return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); + return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); case GGML_TYPE_BF16: return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]); case GGML_TYPE_F32: @@ -1015,7 +959,7 @@ void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, } break; case GGML_TYPE_F16: { - ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value); + ((ggml_fp16_t *)(data))[0] = GGML_CPU_FP32_TO_FP16(value); } break; case GGML_TYPE_BF16: { @@ -1053,7 +997,7 @@ float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) { } case GGML_TYPE_F16: { - return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); + return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); } case GGML_TYPE_BF16: { @@ -1092,7 +1036,7 @@ void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) { } break; case GGML_TYPE_F16: { - ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value); + ((ggml_fp16_t *)(tensor->data))[i] = GGML_CPU_FP32_TO_FP16(value); } break; case GGML_TYPE_BF16: { @@ -1119,7 +1063,7 @@ float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, case GGML_TYPE_I32: return ((int32_t *) data)[0]; case GGML_TYPE_F16: - return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); + return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); case GGML_TYPE_BF16: return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]); case GGML_TYPE_F32: @@ -1146,7 +1090,7 @@ void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, } break; case GGML_TYPE_F16: { - ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value); + ((ggml_fp16_t *)(data))[0] = GGML_CPU_FP32_TO_FP16(value); } break; case GGML_TYPE_BF16: { @@ -1257,7 +1201,7 @@ static void ggml_compute_forward_mul_mat_one_chunk( } } -static void ggml_compute_forward_mul_mat( +void ggml_compute_forward_mul_mat( const struct ggml_compute_params * params, struct ggml_tensor * dst) { @@ -1734,6 +1678,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_add(params, tensor); } break; + case GGML_OP_ADD_ID: + { + ggml_compute_forward_add_id(params, tensor); + } break; case GGML_OP_ADD1: { ggml_compute_forward_add1(params, tensor); @@ -1882,6 +1830,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_get_rows_back(params, tensor); } break; + case GGML_OP_SET_ROWS: + { + ggml_compute_forward_set_rows(params, tensor); + } break; case GGML_OP_DIAG: { ggml_compute_forward_diag(params, tensor); @@ -1926,6 +1878,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_im2col_back_f32(params, tensor); } break; + case GGML_OP_CONV_2D: + { + ggml_compute_forward_conv_2d(params, tensor); + } break; case GGML_OP_CONV_2D_DW: { ggml_compute_forward_conv_2d_dw(params, tensor); @@ -1958,6 +1914,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_pad_reflect_1d(params, tensor); } break; + case GGML_OP_ROLL: + { + ggml_compute_forward_roll(params, tensor); + } break; case GGML_OP_ARANGE: { ggml_compute_forward_arange(params, tensor); @@ -1976,7 +1936,7 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm } break; case GGML_OP_FLASH_ATTN_EXT: { - ggml_compute_forward_flash_attn_ext(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], tensor); + ggml_compute_forward_flash_attn_ext(params, tensor); } break; case GGML_OP_FLASH_ATTN_BACK: { @@ -2005,6 +1965,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_unary(params, tensor); } break; + case GGML_OP_GLU: + { + ggml_compute_forward_glu(params, tensor); + } break; case GGML_OP_GET_REL_POS: { ggml_compute_forward_get_rel_pos(params, tensor); @@ -2159,6 +2123,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { case GGML_OP_DUP: case GGML_OP_CONT: case GGML_OP_ADD: + case GGML_OP_ADD_ID: case GGML_OP_ADD1: case GGML_OP_ACC: { @@ -2205,6 +2170,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { } break; case GGML_UNARY_OP_GELU: + case GGML_UNARY_OP_GELU_ERF: case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_SILU: { @@ -2214,6 +2180,21 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { GGML_ABORT("fatal error"); } break; + case GGML_OP_GLU: + switch (ggml_get_glu_op(node)) { + case GGML_GLU_OP_REGLU: + case GGML_GLU_OP_GEGLU: + case GGML_GLU_OP_SWIGLU: + case GGML_GLU_OP_SWIGLU_OAI: + case GGML_GLU_OP_GEGLU_ERF: + case GGML_GLU_OP_GEGLU_QUICK: + { + n_tasks = n_threads; + } break; + default: + GGML_ABORT("fatal error"); + } + break; case GGML_OP_SILU_BACK: case GGML_OP_MUL: case GGML_OP_DIV: @@ -2230,6 +2211,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { n_tasks = n_threads; } break; case GGML_OP_GET_ROWS: + case GGML_OP_SET_ROWS: { // FIXME: get_rows can use additional threads, but the cost of launching additional threads // decreases performance with GPU offloading @@ -2266,6 +2248,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { } break; case GGML_OP_IM2COL: case GGML_OP_IM2COL_BACK: + case GGML_OP_CONV_2D: case GGML_OP_CONV_2D_DW: case GGML_OP_CONV_TRANSPOSE_1D: case GGML_OP_CONV_TRANSPOSE_2D: @@ -2281,6 +2264,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { case GGML_OP_UPSCALE: case GGML_OP_PAD: case GGML_OP_PAD_REFLECT_1D: + case GGML_OP_ROLL: case GGML_OP_ARANGE: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_ARGSORT: @@ -2416,12 +2400,32 @@ static bool ggml_thread_apply_priority(int32_t prio) { // This is up to the applications. DWORD p = THREAD_PRIORITY_NORMAL; switch (prio) { + case GGML_SCHED_PRIO_LOW: p = THREAD_PRIORITY_BELOW_NORMAL; break; case GGML_SCHED_PRIO_NORMAL: p = THREAD_PRIORITY_NORMAL; break; case GGML_SCHED_PRIO_MEDIUM: p = THREAD_PRIORITY_ABOVE_NORMAL; break; case GGML_SCHED_PRIO_HIGH: p = THREAD_PRIORITY_HIGHEST; break; case GGML_SCHED_PRIO_REALTIME: p = THREAD_PRIORITY_TIME_CRITICAL; break; } + if (prio != GGML_SCHED_PRIO_LOW) { + // Tell Windows that this thread should not be throttled (needs its own CPU core). + // Newer Windows 11 versions aggresively park (offline) CPU cores and often place + // all our threads onto the first 4 cores which results in terrible performance with + // n_threads > 4 + #if (_WIN32_WINNT >= 0x0602) && !defined(__GNUC__) + THREAD_POWER_THROTTLING_STATE t; + ZeroMemory(&t, sizeof(t)); + t.Version = THREAD_POWER_THROTTLING_CURRENT_VERSION; + t.ControlMask = THREAD_POWER_THROTTLING_EXECUTION_SPEED; + t.StateMask = 0; + + if (!SetThreadInformation(GetCurrentThread(), ThreadPowerThrottling, &t, sizeof(t))) { + GGML_LOG_DEBUG("failed to disable thread power throttling %d : (%d)\n", prio, (int) GetLastError()); + return false; + } + #endif + } + if (prio == GGML_SCHED_PRIO_NORMAL) { // Keep inherited policy/priority return true; @@ -2449,6 +2453,8 @@ static bool ggml_thread_apply_priority(int32_t prio) { struct sched_param p; int32_t policy = SCHED_OTHER; switch (prio) { + // TODO: there seems to be no way to set lower prio on Apple platforms + case GGML_SCHED_PRIO_LOW: policy = SCHED_OTHER; p.sched_priority = 0; break; case GGML_SCHED_PRIO_NORMAL: policy = SCHED_OTHER; p.sched_priority = 0; break; case GGML_SCHED_PRIO_MEDIUM: policy = SCHED_FIFO; p.sched_priority = 40; break; case GGML_SCHED_PRIO_HIGH: policy = SCHED_FIFO; p.sched_priority = 80; break; @@ -2505,6 +2511,7 @@ static bool ggml_thread_apply_priority(int32_t prio) { struct sched_param p; int32_t policy = SCHED_OTHER; switch (prio) { + case GGML_SCHED_PRIO_LOW: policy = SCHED_BATCH; p.sched_priority = 0; break; case GGML_SCHED_PRIO_NORMAL: policy = SCHED_OTHER; p.sched_priority = 0; break; case GGML_SCHED_PRIO_MEDIUM: policy = SCHED_FIFO; p.sched_priority = 40; break; case GGML_SCHED_PRIO_HIGH: policy = SCHED_FIFO; p.sched_priority = 80; break; @@ -2680,6 +2687,7 @@ struct ggml_cplan ggml_graph_plan( } } break; case GGML_OP_ADD: + case GGML_OP_ADD_ID: case GGML_OP_ADD1: { if (ggml_is_quantized(node->src[0]->type)) { @@ -2760,6 +2768,10 @@ struct ggml_cplan ggml_graph_plan( GGML_ABORT("fatal error"); } } break; + case GGML_OP_CONV_2D: + { + cur = GGML_IM2COL_WORK_SIZE; + } break; case GGML_OP_CONV_TRANSPOSE_2D: { const int64_t ne00 = node->src[0]->ne[0]; // W @@ -3164,6 +3176,10 @@ enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct g return ggml_graph_compute(cgraph, &cplan); } +void ggml_cpu_fp32_to_fp32(const float * x, float * y, int64_t n) { + memcpy(y, x, n * sizeof(float)); +} + void ggml_cpu_fp32_to_fp16(const float * x, ggml_fp16_t * y, int64_t n) { int64_t i = 0; #if defined(__F16C__) @@ -3184,9 +3200,24 @@ void ggml_cpu_fp32_to_fp16(const float * x, ggml_fp16_t * y, int64_t n) { __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT); _mm_storel_epi64((__m128i *)(y + i), y_vec); } +#elif defined(__NNPA__) + for (; i + 7 < n; i += 8) { + float32x4_t v_xh = vec_xl(0, (const float *)(x + i + 0)); + float32x4_t v_xl = vec_xl(0, (const float *)(x + i + 4)); + uint16x8_t v_yd = vec_round_from_fp32(v_xh, v_xl, 0); + uint16x8_t v_y = vec_convert_to_fp16(v_yd, 0); + vec_xst(v_y, 0, (ggml_fp16_t *)(y + i)); + } + for (; i + 3 < n; i += 4) { + float32x4_t v_x = vec_xl(0, (const float *)(x + i)); + float32x4_t v_zero = vec_splats(0.0f); + uint16x8_t v_yd = vec_round_from_fp32(v_x, v_zero, 0); + uint16x8_t v_y = vec_convert_to_fp16(v_yd, 0); + vec_xst(v_y, 0, (ggml_fp16_t *)(y + i)); + } #endif for (; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16(x[i]); } } @@ -3210,9 +3241,25 @@ void ggml_cpu_fp16_to_fp32(const ggml_fp16_t * x, float * y, int64_t n) { __m128 y_vec = _mm_cvtph_ps(x_vec); _mm_storeu_ps(y + i, y_vec); } +#elif defined(__NNPA__) + for (; i + 7 < n; i += 8) { + uint16x8_t v_x = vec_xl(0, (const ggml_fp16_t *)(x + i)); + uint16x8_t v_yd = vec_convert_from_fp16(v_x, 0); + float32x4_t v_yh = vec_extend_to_fp32_hi(v_yd, 0); + float32x4_t v_yl = vec_extend_to_fp32_lo(v_yd, 0); + vec_xst(v_yh, 0, (float *)(y + i + 0)); + vec_xst(v_yl, 0, (float *)(y + i + 4)); + } + for (; i + 3 < n; i += 4) { + uint16x8_t v_x = vec_xl(0, (const ggml_fp16_t *)(x + i)); + uint16x8_t v_yd = vec_convert_from_fp16(v_x, 0); + float32x4_t v_yh = vec_extend_to_fp32_hi(v_yd, 0); + vec_xst(v_yh, 0, (float *)(y + i)); + } #endif + for (; i < n; ++i) { - y[i] = GGML_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP16_TO_FP32(x[i]); } } @@ -3412,9 +3459,17 @@ int ggml_cpu_has_vxe(void) { #endif } +int ggml_cpu_has_nnpa(void) { +#if defined(GGML_NNPA) + return 1; +#else + return 0; +#endif +} + int ggml_cpu_has_neon(void) { #if defined(__ARM_ARCH) && defined(__ARM_NEON) - return ggml_arm_arch_features.has_neon; + return 1; #else return 0; #endif @@ -3422,7 +3477,7 @@ int ggml_cpu_has_neon(void) { int ggml_cpu_has_dotprod(void) { #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_DOTPROD) - return ggml_arm_arch_features.has_dotprod; + return 1; #else return 0; #endif @@ -3430,7 +3485,7 @@ int ggml_cpu_has_dotprod(void) { int ggml_cpu_has_sve(void) { #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE) - return ggml_arm_arch_features.has_sve; + return 1; #else return 0; #endif @@ -3438,7 +3493,7 @@ int ggml_cpu_has_sve(void) { int ggml_cpu_has_matmul_int8(void) { #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_MATMUL_INT8) - return ggml_arm_arch_features.has_i8mm; + return 1; #else return 0; #endif @@ -3454,14 +3509,14 @@ int ggml_cpu_get_sve_cnt(void) { int ggml_cpu_has_sme(void) { #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SME) - return ggml_arm_arch_features.has_sme; + return 1; #else return 0; #endif } void ggml_cpu_init(void) { - // needed to initialize f16 tables + // needed to initialize ggml_time { struct ggml_init_params params = { 0, NULL, false }; struct ggml_context * ctx = ggml_init(params); @@ -3482,14 +3537,28 @@ void ggml_cpu_init(void) { uint16_t u16; ggml_fp16_t fp16; } u = {i}; - float f = GGML_FP16_TO_FP32(u.fp16); - ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f)); - ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f)); + float f = GGML_COMPUTE_FP16_TO_FP32(u.fp16); + ggml_table_f32_f16[i] = f; + ggml_table_gelu_f16[i] = GGML_CPU_FP32_TO_FP16(ggml_gelu_f32(f)); + ggml_table_gelu_quick_f16[i] = GGML_CPU_FP32_TO_FP16(ggml_gelu_quick_f32(f)); } const uint64_t t_end = ggml_time_us(); UNUSED(t_end); GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0); + +#ifdef GGML_USE_OPENMP + //if (!getenv("OMP_WAIT_POLICY")) { + // // set the wait policy to active, so that OpenMP threads don't sleep + // putenv("OMP_WAIT_POLICY=active"); + //} + + if (!getenv("KMP_BLOCKTIME")) { + // set the time to wait before sleeping a thread + // this is less aggressive than setting the wait policy to active, but should achieve similar results in most cases + putenv("KMP_BLOCKTIME=200"); // 200ms + } +#endif } #if defined(__ARM_ARCH) diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.cpp index e013e8b41..8dacd3671 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.cpp +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.cpp @@ -1,8 +1,8 @@ #include "ggml-backend.h" #include "ggml-backend-impl.h" #include "ggml-cpu.h" -#include "ggml-cpu-aarch64.h" -#include "ggml-cpu-traits.h" +#include "repack.h" +#include "traits.h" #include "ggml-impl.h" #include "amx/amx.h" @@ -11,7 +11,7 @@ #include #ifdef GGML_USE_CPU_HBM -# include "ggml-cpu-hbm.h" +# include "hbm.h" #endif #ifdef GGML_USE_CPU_KLEIDIAI @@ -35,7 +35,7 @@ // ggml-backend interface -std::vector& ggml_backend_cpu_get_extra_buffers_type() { +std::vector & ggml_backend_cpu_get_extra_buffer_types() { static std::vector bufts = []() { std::vector bufts; @@ -51,14 +51,12 @@ std::vector& ggml_backend_cpu_get_extra_buffers_type } #endif -#ifdef GGML_USE_CPU_AARCH64 - if (ggml_backend_cpu_aarch64_buffer_type()) { - bufts.push_back(ggml_backend_cpu_aarch64_buffer_type()); +#ifdef GGML_USE_CPU_REPACK + if (ggml_backend_cpu_repack_buffer_type()) { + bufts.push_back(ggml_backend_cpu_repack_buffer_type()); } #endif - bufts.push_back(NULL); - return bufts; }(); @@ -66,14 +64,20 @@ std::vector& ggml_backend_cpu_get_extra_buffers_type } static ggml_backend_buffer_type_t * ggml_backend_cpu_device_get_extra_buffers_type(ggml_backend_dev_t device) { - return ggml_backend_cpu_get_extra_buffers_type().data(); + static std::vector extra_bufts = [] { + std::vector bufts = ggml_backend_cpu_get_extra_buffer_types(); + bufts.push_back(nullptr); + return bufts; + }(); + + return extra_bufts.data(); GGML_UNUSED(device); } static bool ggml_backend_cpu_is_extra_buffer_type(ggml_backend_buffer_type_t buft) { - for (auto * extra : ggml_backend_cpu_get_extra_buffers_type()) { - if (extra && extra == buft) { + for (auto * extra : ggml_backend_cpu_get_extra_buffer_types()) { + if (extra == buft) { return true; } } @@ -210,10 +214,10 @@ ggml_backend_t ggml_backend_cpu_init(void) { ctx->abort_callback_data = NULL; ggml_backend_t cpu_backend = new ggml_backend { - /* .guid = */ ggml_backend_cpu_guid(), - /* .interface = */ ggml_backend_cpu_i, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), - /* .context = */ ctx, + /* .guid = */ ggml_backend_cpu_guid(), + /* .iface = */ ggml_backend_cpu_i, + /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), + /* .context = */ ctx, }; if (cpu_backend == NULL) { @@ -397,25 +401,19 @@ static bool ggml_backend_cpu_device_supports_op(ggml_backend_dev_t dev, const st return true; } - // extra_buffer_op? - for (auto extra : ggml_backend_cpu_get_extra_buffers_type()) { - if (extra) { - auto buf_extra = (ggml::cpu::extra_buffer_type*) extra->context; - if (buf_extra && buf_extra->supports_op(dev, op)) { - return true; - } - } - } - - // the other case need host buffer. - for (int i = 0; i < GGML_MAX_SRC; i++) { - if (op->src[i] && op->src[i]->buffer && !ggml_backend_buft_is_host(op->src[i]->buffer->buft)) { - return false; + // check extra buffer types + // note: only the first sources are checked for extra buffer types to reduce overhead, increase if necessary + for (int i = 0; i < 4; i++) { + if (op->src[i] && op->src[i]->buffer && + ggml_backend_cpu_is_extra_buffer_type(op->src[i]->buffer->buft)) { + auto * buf_extra = (ggml::cpu::extra_buffer_type *) op->src[i]->buffer->buft->context; + return buf_extra->supports_op(dev, op); } } switch (op->op) { case GGML_OP_CPY: + case GGML_OP_SET_ROWS: return op->type != GGML_TYPE_IQ3_XXS && op->type != GGML_TYPE_IQ3_S && @@ -578,6 +576,9 @@ static ggml_backend_feature * ggml_backend_cpu_get_features(ggml_backend_reg_t r if (ggml_cpu_has_vxe()) { features.push_back({ "VXE", "1" }); } + if (ggml_cpu_has_nnpa()) { + features.push_back({ "NNPA", "1" }); + } if (ggml_cpu_has_wasm_simd()) { features.push_back({ "WASM_SIMD", "1" }); } @@ -596,8 +597,8 @@ static ggml_backend_feature * ggml_backend_cpu_get_features(ggml_backend_reg_t r #ifdef GGML_USE_CPU_KLEIDIAI features.push_back({ "KLEIDIAI", "1" }); #endif - #ifdef GGML_USE_CPU_AARCH64 - features.push_back({ "AARCH64_REPACK", "1" }); + #ifdef GGML_USE_CPU_REPACK + features.push_back({ "REPACK", "1" }); #endif features.push_back({ nullptr, nullptr }); diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/hbm.cpp similarity index 98% rename from ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp rename to ml/backend/ggml/ggml/src/ggml-cpu/hbm.cpp index fa8dea2af..a4073c15e 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp +++ b/ml/backend/ggml/ggml/src/ggml-cpu/hbm.cpp @@ -5,7 +5,7 @@ #include "ggml-cpu.h" #include "ggml-impl.h" -#include "ggml-cpu-hbm.h" +#include "hbm.h" // buffer type HBM diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.h b/ml/backend/ggml/ggml/src/ggml-cpu/hbm.h similarity index 100% rename from ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.h rename to ml/backend/ggml/ggml/src/ggml-cpu/hbm.h diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/sgemm.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/sgemm.cpp index 1d46158f9..2be54c31b 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/sgemm.cpp +++ b/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/sgemm.cpp @@ -52,8 +52,8 @@ #include "ggml-impl.h" #include "ggml-cpu-impl.h" #include "ggml-quants.h" +#include "simd-mappings.h" -#include #include #include @@ -63,7 +63,7 @@ #define NOINLINE __attribute__((__noinline__)) #endif -#if defined(__ARM_NEON) || defined(__AVX512F__) +#if defined(__ARM_NEON) || defined(__AVX512F__) || defined(__VXE__) || defined(__VXE2__) #define VECTOR_REGISTERS 32 #else #define VECTOR_REGISTERS 16 @@ -74,7 +74,7 @@ namespace { inline float unhalf(ggml_fp16_t d) { - return GGML_FP16_TO_FP32(d); + return GGML_CPU_FP16_TO_FP32(d); } //////////////////////////////////////////////////////////////////////////////////////////////////// @@ -110,6 +110,12 @@ inline float16x8_t sub(float16x8_t x, float16x8_t y) { return vsubq_f16(x, y); } inline float16x8_t mul(float16x8_t x, float16x8_t y) { return vmulq_f16(x, y); } #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#if defined(__VXE__) || defined(__VXE2__) +inline float32x4_t add(float32x4_t x, float32x4_t y) { return vec_add(x, y); } +inline float32x4_t sub(float32x4_t x, float32x4_t y) { return vec_sub(x, y); } +inline float32x4_t mul(float32x4_t x, float32x4_t y) { return vec_mul(x, y); } +#endif + #if defined(__MMA__) typedef vector unsigned char vec_t; typedef __vector_quad acc_t; @@ -163,6 +169,13 @@ inline float16x8_t madd(float16x8_t a, float16x8_t b, float16x8_t c) { #endif #endif +#if defined(__VXE__) || defined(__VXE2__) +template <> +inline float32x4_t madd(float32x4_t a, float32x4_t b, float32x4_t c) { + return vec_madd(a, b, c); +} +#endif + //////////////////////////////////////////////////////////////////////////////////////////////////// // VECTORIZED HORIZONTAL SUM @@ -179,6 +192,13 @@ inline float hsum(float16x8_t x) { } #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC +#if defined(__VXE__) || defined(__VXE2__) +inline float hsum(float32x4_t x) { + float32x4_t tmp = x + vec_reve(x); + return tmp[0] + tmp[1]; +} +#endif + #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) inline float hsum(__m128 x) { #if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) @@ -228,6 +248,21 @@ template <> inline float32x4_t load(const ggml_fp16_t *p) { #endif // _MSC_VER #endif // __ARM_NEON +#if defined(__VXE__) || defined(__VXE2__) +template <> inline float32x4_t load(const ggml_fp16_t * p) { + float tmp[4]; + + for (int i = 0; i < 4; i++) { + tmp[i] = GGML_CPU_FP16_TO_FP32(p[i]); + } + + return vec_xl(0, (const float *)(tmp)); +} +template <> inline float32x4_t load(const float * p) { + return vec_xl(0, p); +} +#endif + #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) template <> inline __m128 load(const float *p) { return _mm_loadu_ps(p); @@ -394,8 +429,6 @@ class tinyBLAS { template NOINLINE void gemm(int64_t m, int64_t n, int64_t BN) { - static std::atomic current_chunk; - GGML_ASSERT(m % (RM * BM) == 0); const int64_t ytiles = m / (RM * BM); const int64_t xtiles = (n + RN -1) / RN; @@ -410,7 +443,7 @@ class tinyBLAS { if (params->ith == 0) { GGML_ASSERT( jj_BN * SIZE_BN + (NB_BN - jj_BN) * (SIZE_BN - 1) == xtiles); // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. - std::atomic_store_explicit(¤t_chunk, (int64_t)params->nth, std::memory_order_relaxed); + ggml_threadpool_chunk_set(params->threadpool, params->nth); } ggml_barrier(params->threadpool); @@ -439,8 +472,7 @@ class tinyBLAS { GGML_ASSERT(jj == jj2); } - // next step. - job = std::atomic_fetch_add_explicit(¤t_chunk, (int64_t)1, std::memory_order_relaxed); + job = ggml_threadpool_chunk_add(params->threadpool, 1); } ggml_barrier(params->threadpool); @@ -1509,7 +1541,7 @@ class tinyBLAS_BF16_PPC { } else if constexpr(RM == 8 && RN == 4) { KERNEL_8x4(ii,jj); } else { - static_assert(false, "RN/RM values not supported"); + assert(false && "RN/RM values not supported"); } } @@ -1541,13 +1573,13 @@ class tinyBLAS_BF16_PPC { const int nth; }; -template +template class tinyBLAS_Q0_PPC { public: tinyBLAS_Q0_PPC(int64_t k, const TA *A, int64_t lda, - const TB *B, int64_t ldb, - TC *C, int64_t ldc, + const block_q8_0 *B, int64_t ldb, + float *C, int64_t ldc, int ith, int nth) : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { } @@ -1558,8 +1590,7 @@ class tinyBLAS_Q0_PPC { private: - template - inline void save_res(int ii, int jj, int idx, vector float* fin_res) { + inline void save_res(int ii, int jj, int idx, vector float* fin_res, int RM=4, int RN=4) { for (int I = 0; I < RM; I++) { for (int J = 0; J < RN; J++) { *((float*)(C+ii+((jj+J)*ldc)+I)) = *((float*)&fin_res[idx+I]+J); @@ -1579,29 +1610,67 @@ class tinyBLAS_Q0_PPC { fin_res[s_idx+i] = vec_madd(res[i], vs[s_idx+i], fin_res[s_idx+i]); } } - - template - void packNormalInt4(const TA* a, int64_t lda, int rows, int cols, VA* vec, std::array& comparray) { - int64_t i, j; - TA *aoffset = NULL; - VA *vecOffset = NULL; - TA *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL; - TA *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL; - VB c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2] = {0}; - VB c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2] = {0}; - VB t1, t2, t3, t4, t5, t6, t7, t8; + /* This function processes quantized data from block_q4_0 elements. + * First the we try to extract the two int4 values stored in single int8_t into two signed int8. + * And then we subtract each of the resultant element with 8, to convert signed int8 to unsigned int8. + * Also compute the rowsum which is required to compensate the above conversion. */ + inline void process_q4_elements(vector signed char (&c)[2], int* ca) { const vector signed char lowMask = vec_splats((signed char)0xF); const vector unsigned char v4 = vec_splats((unsigned char)0x4); const vector signed char v8 = vec_splats((signed char)0x8); - aoffset = const_cast(a); - vecOffset = vec; + vector signed int vsum = {0}; + vector signed int vsum2 = {0}; + c[0] = vec_and(c[1], lowMask); + c[1] = vec_sr(c[1], v4); + c[0] = vec_sub(c[0], v8); + c[1] = vec_sub(c[1], v8); + vsum = vec_sum4s(c[0], vsum); + vsum2 = vec_sum4s(c[1], vsum2); + vsum = vec_add(vsum, vsum2); + *(ca) = vsum[0] + vsum[1] + vsum[2] + vsum[3]; + } + + template + inline void vector_permute_store(V2 &s1, V2 &s2, V2 &s3, V2 &s4, V1 *vecOffset, bool flip) { vector unsigned char swiz1 = {0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23}; vector unsigned char swiz2 = {8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31}; vector unsigned char swiz3 = {0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}; vector unsigned char swiz4 = {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}; - vector signed int vsum = {0}; - vector signed int vsum2 = {0}; + V2 t1, t2, t3, t4, t5, t6, t7, t8; + vector unsigned char xor_vector; + uint8_t flip_vec = 0x80; + xor_vector = vec_splats(flip_vec); + t1 = vec_perm(s1, s2, swiz1); + t2 = vec_perm(s1, s2, swiz2); + t3 = vec_perm(s3, s4, swiz1); + t4 = vec_perm(s3, s4, swiz2); + t5 = vec_perm(t1, t3, swiz3); + t6 = vec_perm(t1, t3, swiz4); + t7 = vec_perm(t2, t4, swiz3); + t8 = vec_perm(t2, t4, swiz4); + if (flip == true) { + t5 = vec_xor(t5, xor_vector); + t6 = vec_xor(t6, xor_vector); + t7 = vec_xor(t7, xor_vector); + t8 = vec_xor(t8, xor_vector); + } + vec_xst(t5, 0, vecOffset); + vec_xst(t6, 0, vecOffset+16); + vec_xst(t7, 0, vecOffset+32); + vec_xst(t8, 0, vecOffset+48); + } + template + void packNormalInt4(const TA* a, int64_t lda, int rows, int cols, int8_t* vec, std::array& comparray) { + int64_t i, j; + TA *aoffset = NULL; + int8_t *vecOffset = NULL; + TA *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL; + TA *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL; + vector signed char c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2] = {0}; + vector signed char c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2] = {0}; + aoffset = const_cast(a); + vecOffset = vec; j = (rows >> 3); if (j > 0) { do { @@ -1614,159 +1683,30 @@ class tinyBLAS_Q0_PPC { aoffset7 = aoffset6 + lda; aoffset8 = aoffset7 + lda; aoffset += 8 * lda; - i = (cols >> 2); if (i > 0) { do { - c1[1] = reinterpret_cast(vec_xl(0, aoffset1->qs)); - c2[1] = reinterpret_cast(vec_xl(0, aoffset2->qs)); - c3[1] = reinterpret_cast(vec_xl(0, aoffset3->qs)); - c4[1] = reinterpret_cast(vec_xl(0, aoffset4->qs)); - c5[1] = reinterpret_cast(vec_xl(0, aoffset5->qs)); - c6[1] = reinterpret_cast(vec_xl(0, aoffset6->qs)); - c7[1] = reinterpret_cast(vec_xl(0, aoffset7->qs)); - c8[1] = reinterpret_cast(vec_xl(0, aoffset8->qs)); - - c1[0] = vec_and(c1[1], lowMask); - c1[1] = vec_sr(c1[1], v4); - c1[0] = vec_sub(c1[0], v8); - c1[1] = vec_sub(c1[1], v8); - vsum = vec_sum4s(c1[0], vsum); - vsum2 = vec_sum4s(c1[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[0] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - c2[0] = vec_and(c2[1], lowMask); - c2[1] = vec_sr(c2[1], v4); - c2[0] = vec_sub(c2[0], v8); - c2[1] = vec_sub(c2[1], v8); - vsum = vec_sum4s(c2[0], vsum); - vsum2 = vec_sum4s(c2[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[1] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - c3[0] = vec_and(c3[1], lowMask); - c3[1] = vec_sr(c3[1], v4); - c3[0] = vec_sub(c3[0], v8); - c3[1] = vec_sub(c3[1], v8); - vsum = vec_sum4s(c3[0], vsum); - vsum2 = vec_sum4s(c3[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[2] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - c4[0] = vec_and(c4[1], lowMask); - c4[1] = vec_sr(c4[1], v4); - c4[0] = vec_sub(c4[0], v8); - c4[1] = vec_sub(c4[1], v8); - vsum = vec_sum4s(c4[0], vsum); - vsum2 = vec_sum4s(c4[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[3] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - c5[0] = vec_and(c5[1], lowMask); - c5[1] = vec_sr(c5[1], v4); - c5[0] = vec_sub(c5[0], v8); - c5[1] = vec_sub(c5[1], v8); - vsum = vec_sum4s(c5[0], vsum); - vsum2 = vec_sum4s(c5[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[4] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - c6[0] = vec_and(c6[1], lowMask); - c6[1] = vec_sr(c6[1], v4); - c6[0] = vec_sub(c6[0], v8); - c6[1] = vec_sub(c6[1], v8); - vsum = vec_sum4s(c6[0], vsum); - vsum2 = vec_sum4s(c6[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[5] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - c7[0] = vec_and(c7[1], lowMask); - c7[1] = vec_sr(c7[1], v4); - c7[0] = vec_sub(c7[0], v8); - c7[1] = vec_sub(c7[1], v8); - vsum = vec_sum4s(c7[0], vsum); - vsum2 = vec_sum4s(c7[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[6] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - c8[0] = vec_and(c8[1], lowMask); - c8[1] = vec_sr(c8[1], v4); - c8[0] = vec_sub(c8[0], v8); - c8[1] = vec_sub(c8[1], v8); - vsum = vec_sum4s(c8[0], vsum); - vsum2 = vec_sum4s(c8[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[7] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - t1 = vec_perm(c1[0], c2[0], swiz1); - t2 = vec_perm(c1[0], c2[0], swiz2); - t3 = vec_perm(c3[0], c4[0], swiz1); - t4 = vec_perm(c3[0], c4[0], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - vec_xst(t5, 0, vecOffset); - vec_xst(t6, 0, vecOffset+16); - vec_xst(t7, 0, vecOffset+32); - vec_xst(t8, 0, vecOffset+48); - - t1 = vec_perm(c1[1], c2[1], swiz1); - t2 = vec_perm(c1[1], c2[1], swiz2); - t3 = vec_perm(c3[1], c4[1], swiz1); - t4 = vec_perm(c3[1], c4[1], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - vec_xst(t5, 0, vecOffset+64); - vec_xst(t6, 0, vecOffset+80); - vec_xst(t7, 0, vecOffset+96); - vec_xst(t8, 0, vecOffset+112); - - t1 = vec_perm(c5[0], c6[0], swiz1); - t2 = vec_perm(c5[0], c6[0], swiz2); - t3 = vec_perm(c7[0], c8[0], swiz1); - t4 = vec_perm(c7[0], c8[0], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - vec_xst(t5, 0, vecOffset+128); - vec_xst(t6, 0, vecOffset+144); - vec_xst(t7, 0, vecOffset+160); - vec_xst(t8, 0, vecOffset+176); - - t1 = vec_perm(c5[1], c6[1], swiz1); - t2 = vec_perm(c5[1], c6[1], swiz2); - t3 = vec_perm(c7[1], c8[1], swiz1); - t4 = vec_perm(c7[1], c8[1], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - vec_xst(t5, 0, vecOffset+192); - vec_xst(t6, 0, vecOffset+208); - vec_xst(t7, 0, vecOffset+224); - vec_xst(t8, 0, vecOffset+240); + c1[1] = reinterpret_cast(vec_xl(0, aoffset1->qs)); + c2[1] = reinterpret_cast(vec_xl(0, aoffset2->qs)); + c3[1] = reinterpret_cast(vec_xl(0, aoffset3->qs)); + c4[1] = reinterpret_cast(vec_xl(0, aoffset4->qs)); + c5[1] = reinterpret_cast(vec_xl(0, aoffset5->qs)); + c6[1] = reinterpret_cast(vec_xl(0, aoffset6->qs)); + c7[1] = reinterpret_cast(vec_xl(0, aoffset7->qs)); + c8[1] = reinterpret_cast(vec_xl(0, aoffset8->qs)); + process_q4_elements(c1, &comparray[0]); + process_q4_elements(c2, &comparray[1]); + process_q4_elements(c3, &comparray[2]); + process_q4_elements(c4, &comparray[3]); + process_q4_elements(c5, &comparray[4]); + process_q4_elements(c6, &comparray[5]); + process_q4_elements(c7, &comparray[6]); + process_q4_elements(c8, &comparray[7]); + vector_permute_store(c1[0], c2[0], c3[0], c4[0], vecOffset, false); + vector_permute_store(c1[1], c2[1], c3[1], c4[1], vecOffset+64, false); + vector_permute_store(c5[0], c6[0], c7[0], c8[0], vecOffset+128, false); + vector_permute_store(c5[1], c6[1], c7[1], c8[1], vecOffset+192, false); aoffset1 += lda; aoffset2 += lda; aoffset3 += lda; @@ -1789,85 +1729,20 @@ class tinyBLAS_Q0_PPC { aoffset3 = aoffset2 + lda; aoffset4 = aoffset3 + lda; aoffset += 4 * lda; - i = (cols >> 2); if (i > 0) { do { - c1[1] = reinterpret_cast(vec_xl(0, aoffset1->qs)); - c2[1] = reinterpret_cast(vec_xl(0, aoffset2->qs)); - c3[1] = reinterpret_cast(vec_xl(0, aoffset3->qs)); - c4[1] = reinterpret_cast(vec_xl(0, aoffset4->qs)); - - c1[0] = vec_and(c1[1], lowMask); - c1[1] = vec_sr(c1[1], v4); - c1[0] = vec_sub(c1[0], v8); - c1[1] = vec_sub(c1[1], v8); - vsum = vec_sum4s(c1[0], vsum); - vsum2 = vec_sum4s(c1[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[0] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - c2[0] = vec_and(c2[1], lowMask); - c2[1] = vec_sr(c2[1], v4); - c2[0] = vec_sub(c2[0], v8); - c2[1] = vec_sub(c2[1], v8); - vsum = vec_sum4s(c2[0], vsum); - vsum2 = vec_sum4s(c2[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[1] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - c3[0] = vec_and(c3[1], lowMask); - c3[1] = vec_sr(c3[1], v4); - c3[0] = vec_sub(c3[0], v8); - c3[1] = vec_sub(c3[1], v8); - vsum = vec_sum4s(c3[0], vsum); - vsum2 = vec_sum4s(c3[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[2] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - c4[0] = vec_and(c4[1], lowMask); - c4[1] = vec_sr(c4[1], v4); - c4[0] = vec_sub(c4[0], v8); - c4[1] = vec_sub(c4[1], v8); - vsum = vec_sum4s(c4[0], vsum); - vsum2 = vec_sum4s(c4[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[3] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats( 0); - - t1 = vec_perm(c1[0], c2[0], swiz1); - t2 = vec_perm(c1[0], c2[0], swiz2); - t3 = vec_perm(c3[0], c4[0], swiz1); - t4 = vec_perm(c3[0], c4[0], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - vec_xst(t5, 0, vecOffset); - vec_xst(t6, 0, vecOffset+16); - vec_xst(t7, 0, vecOffset+32); - vec_xst(t8, 0, vecOffset+48); - - t1 = vec_perm(c1[1], c2[1], swiz1); - t2 = vec_perm(c1[1], c2[1], swiz2); - t3 = vec_perm(c3[1], c4[1], swiz1); - t4 = vec_perm(c3[1], c4[1], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - vec_xst(t5, 0, vecOffset+64); - vec_xst(t6, 0, vecOffset+80); - vec_xst(t7, 0, vecOffset+96); - vec_xst(t8, 0, vecOffset+112); + c1[1] = reinterpret_cast(vec_xl(0, aoffset1->qs)); + c2[1] = reinterpret_cast(vec_xl(0, aoffset2->qs)); + c3[1] = reinterpret_cast(vec_xl(0, aoffset3->qs)); + c4[1] = reinterpret_cast(vec_xl(0, aoffset4->qs)); + process_q4_elements(c1, &comparray[0]); + process_q4_elements(c2, &comparray[1]); + process_q4_elements(c3, &comparray[2]); + process_q4_elements(c4, &comparray[3]); + vector_permute_store(c1[0], c2[0], c3[0], c4[0], vecOffset, false); + vector_permute_store(c1[1], c2[1], c3[1], c4[1], vecOffset+64, false); aoffset1 += lda; aoffset2 += lda; aoffset3 += lda; @@ -1886,80 +1761,17 @@ class tinyBLAS_Q0_PPC { if (i > 0) { do { switch(rows) { - case 3: c3[1] = reinterpret_cast(vec_xl(0, aoffset3->qs)); - case 2: c2[1] = reinterpret_cast(vec_xl(0, aoffset2->qs)); - case 1: c1[1] = reinterpret_cast(vec_xl(0, aoffset1->qs)); + case 3: c3[1] = reinterpret_cast(vec_xl(0, aoffset3->qs)); + case 2: c2[1] = reinterpret_cast(vec_xl(0, aoffset2->qs)); + case 1: c1[1] = reinterpret_cast(vec_xl(0, aoffset1->qs)); break; } - c1[0] = vec_and(c1[1], lowMask); - c1[1] = vec_sr(c1[1], v4); - c1[0] = vec_sub(c1[0], v8); - c1[1] = vec_sub(c1[1], v8); - vsum = vec_sum4s(c1[0], vsum); - vsum2 = vec_sum4s(c1[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[0] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - c2[0] = vec_and(c2[1], lowMask); - c2[1] = vec_sr(c2[1], v4); - c2[0] = vec_sub(c2[0], v8); - c2[1] = vec_sub(c2[1], v8); - vsum = vec_sum4s(c2[0], vsum); - vsum2 = vec_sum4s(c2[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[1] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - c3[0] = vec_and(c3[1], lowMask); - c3[1] = vec_sr(c3[1], v4); - c3[0] = vec_sub(c3[0], v8); - c3[1] = vec_sub(c3[1], v8); - vsum = vec_sum4s(c3[0], vsum); - vsum2 = vec_sum4s(c3[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[2] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - c4[0] = vec_and(c4[1], lowMask); - c4[1] = vec_sr(c4[1], v4); - c4[0] = vec_sub(c4[0], v8); - c4[1] = vec_sub(c4[1], v8); - vsum = vec_sum4s(c4[0], vsum); - vsum2 = vec_sum4s(c4[1], vsum2); - vsum = vec_add(vsum, vsum2); - comparray[3] = vsum[0] + vsum[1] + vsum[2] + vsum[3]; - vsum = vec_splats(0); - vsum2 = vec_splats(0); - - t1 = vec_perm(c1[0], c2[0], swiz1); - t2 = vec_perm(c1[0], c2[0], swiz2); - t3 = vec_perm(c3[0], c4[0], swiz1); - t4 = vec_perm(c3[0], c4[0], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - vec_xst(t5, 0, vecOffset); - vec_xst(t6, 0, vecOffset+16); - vec_xst(t7, 0, vecOffset+32); - vec_xst(t8, 0, vecOffset+48); - - t1 = vec_perm(c1[1], c2[1], swiz1); - t2 = vec_perm(c1[1], c2[1], swiz2); - t3 = vec_perm(c3[1], c4[1], swiz1); - t4 = vec_perm(c3[1], c4[1], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - vec_xst(t5, 0, vecOffset+64); - vec_xst(t6, 0, vecOffset+80); - vec_xst(t7, 0, vecOffset+96); - vec_xst(t8, 0, vecOffset+112); + process_q4_elements(c1, &comparray[0]); + process_q4_elements(c2, &comparray[1]); + process_q4_elements(c3, &comparray[2]); + process_q4_elements(c4, &comparray[3]); + vector_permute_store(c1[0], c2[0], c3[0], c4[0], vecOffset, false); + vector_permute_store(c1[1], c2[1], c3[1], c4[1], vecOffset+64, false); aoffset1 += lda; aoffset2 += lda; aoffset3 += lda; @@ -1969,146 +1781,40 @@ class tinyBLAS_Q0_PPC { } } } - template - void packNormal(const TB* a, int64_t lda, int rows, int cols, VA* vec, bool flip) { + void packNormal(const block_q8_0* a, int64_t lda, int rows, int cols, VA* vec, bool flip) { int64_t i, j; - TB *aoffset = NULL; + block_q8_0 *aoffset = NULL; VA *vecOffset = NULL; - TB *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL; - TB *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL; - __vector_pair C1, C2, C3, C4, C5, C6, C7, C8; - VB c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2]={0}; - VB c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2]={0}; - VB t1, t2, t3, t4, t5, t6, t7, t8; - vector unsigned char xor_vector; - uint8_t flip_vec = 0x80; - xor_vector = vec_splats(flip_vec); - vector unsigned char swiz1 = {0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23}; - vector unsigned char swiz2 = {8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31}; - vector unsigned char swiz3 = {0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}; - vector unsigned char swiz4 = {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}; - - aoffset = const_cast(a); + block_q8_0* aoffsets[8]; + __vector_pair arr[8]; + VB c[8][2] = {0}; + VB c1[8] = {0}; VB c2[8] = {0}; + aoffset = const_cast(a); vecOffset = vec; j = (rows >> 3); if (j > 0) { do { - aoffset1 = aoffset; - aoffset2 = aoffset1 + lda; - aoffset3 = aoffset2 + lda; - aoffset4 = aoffset3 + lda; - aoffset5 = aoffset4 + lda; - aoffset6 = aoffset5 + lda; - aoffset7 = aoffset6 + lda; - aoffset8 = aoffset7 + lda; + aoffsets[0] = aoffset; + for (int it = 1; it < 8; it++) + aoffsets[it] = aoffsets[it-1] + lda; aoffset += 8 * lda; i = (cols >> 3); if (i > 0) { do { - C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs); - C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs); - C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs); - C4 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset4->qs); - C5 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset5->qs); - C6 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset6->qs); - C7 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset7->qs); - C8 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset8->qs); - - __builtin_vsx_disassemble_pair(c1, &C1); - __builtin_vsx_disassemble_pair(c2, &C2); - __builtin_vsx_disassemble_pair(c3, &C3); - __builtin_vsx_disassemble_pair(c4, &C4); - __builtin_vsx_disassemble_pair(c5, &C5); - __builtin_vsx_disassemble_pair(c6, &C6); - __builtin_vsx_disassemble_pair(c7, &C7); - __builtin_vsx_disassemble_pair(c8, &C8); - - t1 = vec_perm(c1[0], c2[0], swiz1); - t2 = vec_perm(c1[0], c2[0], swiz2); - t3 = vec_perm(c3[0], c4[0], swiz1); - t4 = vec_perm(c3[0], c4[0], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - if (flip == true) { - t5 = vec_xor(t5, xor_vector); - t6 = vec_xor(t6, xor_vector); - t7 = vec_xor(t7, xor_vector); - t8 = vec_xor(t8, xor_vector); + for (int it = 0; it < 8; it++) { + arr[it] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[it]->qs); + __builtin_vsx_disassemble_pair(c[it], &arr[it]); + c1[it] = c[it][0]; + c2[it] = c[it][1]; } - vec_xst(t5, 0, vecOffset); - vec_xst(t6, 0, vecOffset+16); - vec_xst(t7, 0, vecOffset+32); - vec_xst(t8, 0, vecOffset+48); - - t1 = vec_perm(c1[1], c2[1], swiz1); - t2 = vec_perm(c1[1], c2[1], swiz2); - t3 = vec_perm(c3[1], c4[1], swiz1); - t4 = vec_perm(c3[1], c4[1], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - if (flip == true) { - t5 = vec_xor(t5, xor_vector); - t6 = vec_xor(t6, xor_vector); - t7 = vec_xor(t7, xor_vector); - t8 = vec_xor(t8, xor_vector); - } - vec_xst(t5, 0, vecOffset+64); - vec_xst(t6, 0, vecOffset+80); - vec_xst(t7, 0, vecOffset+96); - vec_xst(t8, 0, vecOffset+112); - - t1 = vec_perm(c5[0], c6[0], swiz1); - t2 = vec_perm(c5[0], c6[0], swiz2); - t3 = vec_perm(c7[0], c8[0], swiz1); - t4 = vec_perm(c7[0], c8[0], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - if (flip == true) { - t5 = vec_xor(t5, xor_vector); - t6 = vec_xor(t6, xor_vector); - t7 = vec_xor(t7, xor_vector); - t8 = vec_xor(t8, xor_vector); - } - vec_xst(t5, 0, vecOffset+128); - vec_xst(t6, 0, vecOffset+144); - vec_xst(t7, 0, vecOffset+160); - vec_xst(t8, 0, vecOffset+176); - - t1 = vec_perm(c5[1], c6[1], swiz1); - t2 = vec_perm(c5[1], c6[1], swiz2); - t3 = vec_perm(c7[1], c8[1], swiz1); - t4 = vec_perm(c7[1], c8[1], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - if (flip == true) { - t5 = vec_xor(t5, xor_vector); - t6 = vec_xor(t6, xor_vector); - t7 = vec_xor(t7, xor_vector); - t8 = vec_xor(t8, xor_vector); - } - vec_xst(t5, 0, vecOffset+192); - vec_xst(t6, 0, vecOffset+208); - vec_xst(t7, 0, vecOffset+224); - vec_xst(t8, 0, vecOffset+240); - - aoffset1 += lda; - aoffset2 += lda; - aoffset3 += lda; - aoffset4 += lda; - aoffset5 += lda; - aoffset6 += lda; - aoffset7 += lda; - aoffset8 += lda; + vector_permute_store(c1[0], c1[1], c1[2], c1[3], vecOffset, flip); + vector_permute_store(c2[0], c2[1], c2[2], c2[3], vecOffset+64, flip); + vector_permute_store(c1[4], c1[5], c1[6], c1[7], vecOffset+128, flip); + vector_permute_store(c2[4], c2[5], c2[6], c2[7], vecOffset+192, flip); + for (int it = 0; it < 8; it++) + aoffsets[it] += lda; vecOffset += 256; i--; } while(i > 0); @@ -2118,129 +1824,53 @@ class tinyBLAS_Q0_PPC { } if (rows & 4) { - aoffset1 = aoffset; - aoffset2 = aoffset1 + lda; - aoffset3 = aoffset2 + lda; - aoffset4 = aoffset3 + lda; - aoffset += 4 * lda; - + aoffsets[0] = aoffset; + for (int it = 1; it < 4; it++ ) + aoffsets[it] = aoffsets[it-1] + lda; + aoffset += 4 * lda; i = (cols >> 3); if (i > 0) { do { - C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs); - C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs); - C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs); - C4 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset4->qs); - - __builtin_vsx_disassemble_pair(c1, &C1); - __builtin_vsx_disassemble_pair(c2, &C2); - __builtin_vsx_disassemble_pair(c3, &C3); - __builtin_vsx_disassemble_pair(c4, &C4); - - t1 = vec_perm(c1[0], c2[0], swiz1); - t2 = vec_perm(c1[0], c2[0], swiz2); - t3 = vec_perm(c3[0], c4[0], swiz1); - t4 = vec_perm(c3[0], c4[0], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - if (flip == true) { - t5 = vec_xor(t5, xor_vector); - t6 = vec_xor(t6, xor_vector); - t7 = vec_xor(t7, xor_vector); - t8 = vec_xor(t8, xor_vector); + for (int it = 0; it < 4; it++) { + arr[it] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[it]->qs); + __builtin_vsx_disassemble_pair(c[it], &arr[it]); + c1[it] = c[it][0]; + c2[it] = c[it][1]; } - vec_xst(t5, 0, vecOffset); - vec_xst(t6, 0, vecOffset+16); - vec_xst(t7, 0, vecOffset+32); - vec_xst(t8, 0, vecOffset+48); - - t1 = vec_perm(c1[1], c2[1], swiz1); - t2 = vec_perm(c1[1], c2[1], swiz2); - t3 = vec_perm(c3[1], c4[1], swiz1); - t4 = vec_perm(c3[1], c4[1], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - if (flip == true) { - t5 = vec_xor(t5, xor_vector); - t6 = vec_xor(t6, xor_vector); - t7 = vec_xor(t7, xor_vector); - t8 = vec_xor(t8, xor_vector); + vector_permute_store(c1[0], c1[1], c1[2], c1[3], vecOffset, flip); + vector_permute_store(c2[0], c2[1], c2[2], c2[3], vecOffset+64, flip); + for (int it = 0; it < 4; it++) { + aoffsets[it] += lda; } - vec_xst(t5, 0, vecOffset+64); - vec_xst(t6, 0, vecOffset+80); - vec_xst(t7, 0, vecOffset+96); - vec_xst(t8, 0, vecOffset+112); - - aoffset1 += lda; - aoffset2 += lda; - aoffset3 += lda; - aoffset4 += lda; vecOffset += 128; i--; } while(i > 0); } } + if (rows & 3) { - aoffset1 = aoffset; - aoffset2 = aoffset1 + lda; - aoffset3 = aoffset2 + lda; + aoffsets[0] = aoffset; + for (int it = 1; it < 3; it++ ) + aoffsets[it] = aoffsets[it-1] + lda; i = (cols >> 3); if (i > 0) { do { switch(rows) { - case 3: C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs); - __builtin_vsx_disassemble_pair(c3, &C3); - case 2: C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs); - __builtin_vsx_disassemble_pair(c2, &C2); - case 1: C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs); - __builtin_vsx_disassemble_pair(c1, &C1); + case 3: arr[2] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[2]->qs); + __builtin_vsx_disassemble_pair(c[2], &arr[2]); + c1[2] = c[2][0]; c2[2] = c[2][1]; + case 2: arr[1] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[1]->qs); + __builtin_vsx_disassemble_pair(c[1], &arr[1]); + c1[1] = c[1][0]; c2[1] = c[1][1]; + case 1: arr[0] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[0]->qs); + __builtin_vsx_disassemble_pair(c[0], &arr[0]); + c1[0] = c[0][0]; c2[0] = c[0][1]; break; } - t1 = vec_perm(c1[0], c2[0], swiz1); - t2 = vec_perm(c1[0], c2[0], swiz2); - t3 = vec_perm(c3[0], c4[0], swiz1); - t4 = vec_perm(c3[0], c4[0], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - if (flip == true) { - t5 = vec_xor(t5, xor_vector); - t6 = vec_xor(t6, xor_vector); - t7 = vec_xor(t7, xor_vector); - t8 = vec_xor(t8, xor_vector); - } - vec_xst(t5, 0, vecOffset); - vec_xst(t6, 0, vecOffset+16); - vec_xst(t7, 0, vecOffset+32); - vec_xst(t8, 0, vecOffset+48); - - t1 = vec_perm(c1[1], c2[1], swiz1); - t2 = vec_perm(c1[1], c2[1], swiz2); - t3 = vec_perm(c3[1], c4[1], swiz1); - t4 = vec_perm(c3[1], c4[1], swiz2); - t5 = vec_perm(t1, t3, swiz3); - t6 = vec_perm(t1, t3, swiz4); - t7 = vec_perm(t2, t4, swiz3); - t8 = vec_perm(t2, t4, swiz4); - if (flip == true) { - t5 = vec_xor(t5, xor_vector); - t6 = vec_xor(t6, xor_vector); - t7 = vec_xor(t7, xor_vector); - t8 = vec_xor(t8, xor_vector); - } - vec_xst(t5, 0, vecOffset+64); - vec_xst(t6, 0, vecOffset+80); - vec_xst(t7, 0, vecOffset+96); - vec_xst(t8, 0, vecOffset+112); - - aoffset1 += lda; - aoffset2 += lda; - aoffset3 += lda; + vector_permute_store(c1[0], c1[1], c1[2], c1[3], vecOffset, flip); + vector_permute_store(c2[0], c2[1], c2[2], c2[3], vecOffset+64, flip); + for (int it = 0; it < 3; it++) + aoffsets[it] += lda; vecOffset += 128; i--; } while(i > 0); @@ -2249,159 +1879,42 @@ class tinyBLAS_Q0_PPC { } void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) { - int64_t mc, nc, mp, np; - int m_rem = MIN(m - m0, 8); - int n_rem = MIN(n - n0, 8); - // TO-DO: KERNEL_16x8 and KERNEL_8x16 are having some performance - // issues. After resolving them, below code will be enabled. - /*if (m_rem >= 16 && n_rem >= 8) { - mc = 16; - nc = 8; - gemm<16,8>(m0, m, n0, n); - } else if(m_rem >= 8 && n_rem >= 16) { - mc = 8; - nc = 16; - gemm<8,16>(m0, m, n0, n); - }*/ + int m_rem = MIN(m - m0, 16); + int n_rem = MIN(n - n0, 16); + + int mc = 0, nc = 0; + if (m_rem >= 8 && n_rem >= 8) { - mc = 8; - nc = 8; - gemm<8,8>(m0, m, n0, n); + mc = 8; + nc = 8; + gemm<8, 8>(m0, m, n0, n); } else if (m_rem >= 4 && n_rem >= 8) { mc = 4; nc = 8; - gemm<4,8>(m0, m, n0, n); + gemm<4, 8>(m0, m, n0, n); } else if (m_rem >= 8 && n_rem >= 4) { mc = 8; nc = 4; - gemm<8,4>(m0, m, n0, n); + gemm<8, 4>(m0, m, n0, n); } else if (m_rem >= 4 && n_rem >= 4) { mc = 4; nc = 4; - gemm_small<4, 4>(m0, m, n0, n); - } else if ((m_rem < 4) && (n_rem > 4)) { - nc = 4; - switch(m_rem) { - case 1: - mc = 1; - gemm_small<1, 4>(m0, m, n0, n); - break; - case 2: - mc = 2; - gemm_small<2, 4>(m0, m, n0, n); - break; - case 3: - mc = 3; - gemm_small<3, 4>(m0, m, n0, n); - break; - default: - return; - } - } else if ((m_rem > 4) && (n_rem < 4)) { - mc = 4; - switch(n_rem) { - case 1: - nc = 1; - gemm_small<4, 1>(m0, m, n0, n); - break; - case 2: - nc = 2; - gemm_small<4, 2>(m0, m, n0, n); - break; - case 3: - nc = 3; - gemm_small<4, 3>(m0, m, n0, n); - break; - default: - return; - } + gemm_small(m0, m, n0, n, mc, nc); } else { - switch((m_rem << 4) | n_rem) { - case 0x43: - mc = 4; - nc = 3; - gemm_small<4, 3>(m0, m, n0, n); - break; - case 0x42: - mc = 4; - nc = 2; - gemm_small<4, 2>(m0, m, n0, n); - break; - case 0x41: - mc = 4; - nc = 1; - gemm_small<4, 1>(m0, m, n0, n); - break; - case 0x34: - mc = 3; - nc = 4; - gemm_small<3, 4>(m0, m, n0, n); - break; - case 0x33: - mc = 3; - nc = 3; - gemm_small<3, 3>(m0, m, n0, n); - break; - case 0x32: - mc = 3; - nc = 2; - gemm_small<3, 2>(m0, m, n0, n); - break; - case 0x31: - mc = 3; - nc = 1; - gemm_small<3, 1>(m0, m, n0, n); - break; - case 0x24: - mc = 2; - nc = 4; - gemm_small<2, 4>(m0, m, n0, n); - break; - case 0x23: - mc = 2; - nc = 3; - gemm_small<2, 3>(m0, m, n0, n); - break; - case 0x22: - mc = 2; - nc = 2; - gemm_small<2, 2>(m0, m, n0, n); - break; - case 0x21: - mc = 2; - nc = 1; - gemm_small<2, 1>(m0, m, n0, n); - break; - case 0x14: - mc = 1; - nc = 4; - gemm_small<1, 4>(m0, m, n0, n); - break; - case 0x13: - mc = 1; - nc = 3; - gemm_small<1, 3>(m0, m, n0, n); - break; - case 0x12: - mc = 1; - nc = 2; - gemm_small<1, 2>(m0, m, n0, n); - break; - case 0x11: - mc = 1; - nc = 1; - gemm_small<1, 1>(m0, m, n0, n); - break; - default: - return; - } + mc = (m_rem >= 4) ? 4 : m_rem; + nc = (n_rem >= 4) ? 4 : n_rem; + if (mc == 0 || nc == 0) + return; + gemm_small(m0, m, n0, n, mc, nc); } - mp = m0 + (m - m0) / mc * mc; - np = n0 + (n - n0) / nc * nc; + + int64_t mp = m0 + ((m - m0) / mc) * mc; + int64_t np = n0 + ((n - n0) / nc) * nc; mnpack(mp, m, n0, np); mnpack(m0, m, np, n); } + void KERNEL_4x8(int64_t ii, int64_t jj) { vec_t vec_A[8], vec_B[16] = {0}; acc_t acc_0, acc_1; @@ -2413,9 +1926,9 @@ class tinyBLAS_Q0_PPC { __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); if (std::is_same_v) { - packNormalInt4((A+(ii*lda)+l), lda, 4, 4, (int8_t*)vec_A, comparray); + packNormalInt4<4>((A+(ii*lda)+l), lda, 4, 4, (int8_t*)vec_A, comparray); } else { - packNormal((const TB*)(A+(ii*lda)+l), lda, 4, 8, (int8_t*)vec_A, false); + packNormal((const block_q8_0*)(A+(ii*lda)+l), lda, 4, 8, (int8_t*)vec_A, false); } packNormal((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B, true); for(int x = 0; x < 8; x++) { @@ -2443,8 +1956,8 @@ class tinyBLAS_Q0_PPC { compute<4>(&acc_0, 0, 0, comparray, vs, fin_res); compute<4>(&acc_1, 0, 4, comparray, vs, fin_res); } - save_res<4, 4>(ii, jj, 0, fin_res); - save_res<4, 4>(ii, jj+4, 4, fin_res); + save_res(ii, jj, 0, fin_res); + save_res(ii, jj+4, 4, fin_res); } void KERNEL_8x4(int64_t ii, int64_t jj) { @@ -2458,9 +1971,9 @@ class tinyBLAS_Q0_PPC { __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); if (std::is_same_v) { - packNormalInt4((A+(ii*lda)+l), lda, 8, 4, (int8_t*)vec_A, comparray); + packNormalInt4<8>((A+(ii*lda)+l), lda, 8, 4, (int8_t*)vec_A, comparray); } else { - packNormal((const TB*)(A+(ii*lda)+l), lda, 8, 8, (int8_t*)vec_A, false); + packNormal((const block_q8_0*)(A+(ii*lda)+l), lda, 8, 8, (int8_t*)vec_A, false); } packNormal((B+(jj*ldb)+l), ldb, 4, 8, (uint8_t*)vec_B, true); for(int x = 0; x < 8; x++) { @@ -2487,8 +2000,8 @@ class tinyBLAS_Q0_PPC { compute<8>(&acc_0, 0, 0, comparray, vs, fin_res); compute<8>(&acc_1, 4, 4, comparray, vs, fin_res); } - save_res<4, 4>(ii, jj, 0, fin_res); - save_res<4, 4>(ii+4, jj, 4, fin_res); + save_res(ii, jj, 0, fin_res); + save_res(ii+4, jj, 4, fin_res); } void KERNEL_8x8(int64_t ii, int64_t jj) { @@ -2504,9 +2017,9 @@ class tinyBLAS_Q0_PPC { __builtin_mma_xxsetaccz(&acc_2); __builtin_mma_xxsetaccz(&acc_3); if (std::is_same_v) { - packNormalInt4((A+(ii*lda)+l), lda, 8, 4, (int8_t*)vec_A, comparray); + packNormalInt4<8>((A+(ii*lda)+l), lda, 8, 4, (int8_t*)vec_A, comparray); } else { - packNormal((const TB*)(A+(ii*lda)+l), lda, 8, 8, (int8_t*)vec_A, false); + packNormal((const block_q8_0*)(A+(ii*lda)+l), lda, 8, 8, (int8_t*)vec_A, false); } packNormal((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B, true); for(int x = 0; x < 8; x++) { @@ -2538,14 +2051,13 @@ class tinyBLAS_Q0_PPC { compute<8>(&acc_2, 0, 8, comparray, vs, fin_res); compute<8>(&acc_3, 4, 12, comparray, vs, fin_res); } - save_res<4, 4>(ii, jj, 0, fin_res); - save_res<4, 4>(ii+4, jj, 4, fin_res); - save_res<4, 4>(ii, jj+4, 8, fin_res); - save_res<4, 4>(ii+4, jj+4, 12, fin_res); + save_res(ii, jj, 0, fin_res); + save_res(ii+4, jj, 4, fin_res); + save_res(ii, jj+4, 8, fin_res); + save_res(ii+4, jj+4, 12, fin_res); } - template - void gemm_small(int64_t m0, int64_t m, int64_t n0, int64_t n) { + void gemm_small(int64_t m0, int64_t m, int64_t n0, int64_t n, int RM, int RN) { int64_t ytiles = (m - m0) / RM; int64_t xtiles = (n - n0) / RN; int64_t tiles = xtiles * ytiles; @@ -2574,9 +2086,9 @@ class tinyBLAS_Q0_PPC { __builtin_prefetch((B+(jj*ldb)+(l+1))->qs, 0, 1); // prefetch one loop ahead __builtin_mma_xxsetaccz(&acc_0); if (isAblock_q4) { - packNormalInt4((A+(ii*lda)+l), lda, RM, 4, (int8_t*)vec_A, comparray); + packNormalInt4<4>((A+(ii*lda)+l), lda, RM, 4, (int8_t*)vec_A, comparray); } else { - packNormal((const TB*)(A+(ii*lda)+l), lda, RM, 8, (int8_t*)vec_A, false); + packNormal((const block_q8_0*)(A+(ii*lda)+l), lda, RM, 8, (int8_t*)vec_A, false); } packNormal((B+(jj*ldb)+l), ldb, RN, 8, (uint8_t*)vec_B, true); for(int x = 0; x < 8; x+=4) { @@ -2609,7 +2121,7 @@ class tinyBLAS_Q0_PPC { fin_res[i] = vec_madd(res[i], vs[i], fin_res[i]); } } - save_res(ii, jj, 0, fin_res); + save_res(ii, jj, 0, fin_res, RM, RN); } } @@ -2622,7 +2134,7 @@ class tinyBLAS_Q0_PPC { } else if constexpr(RM == 8 && RN == 8) { KERNEL_8x8(ii,jj); } else { - static_assert(false, "RN/RM values not supported"); + assert(false && "RN/RM values not supported"); } } @@ -2644,10 +2156,8 @@ class tinyBLAS_Q0_PPC { } const TA *const A; - const TB *const B; - TC *C; - TA *At; - TB *Bt; + const block_q8_0 *const B; + float *C; const int64_t k; const int64_t lda; const int64_t ldb; @@ -2656,13 +2166,12 @@ class tinyBLAS_Q0_PPC { const int nth; }; -template class tinyBLAS_PPC { public: tinyBLAS_PPC(int64_t k, - const TA *A, int64_t lda, - const TB *B, int64_t ldb, - TC *C, int64_t ldc, + const float *A, int64_t lda, + const float *B, int64_t ldb, + float *C, int64_t ldc, int ith, int nth) : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) { } @@ -2675,247 +2184,139 @@ class tinyBLAS_PPC { void (tinyBLAS_PPC::*kernel)(int64_t, int64_t); - template - void packTranspose(const TA* a, int64_t lda, int rows, int cols, TA* vec) { + inline void vector_permute_store_4(vector float *src, float *vecOffset) { + vector float t1, t2, t3, t4, t5, t6, t7, t8; + t1 = vec_mergeh(src[0], src[1]); + t2 = vec_mergeh(src[2], src[3]); + t3 = vec_mergel(src[0], src[1]); + t4 = vec_mergel(src[2], src[3]); + + t5 = vec_xxpermdi(t1, t2, 0); + t6 = vec_xxpermdi(t1, t2, 3); + t7 = vec_xxpermdi(t3, t4, 0); + t8 = vec_xxpermdi(t3, t4, 3); + + vec_xst(t5, 0, vecOffset); + vec_xst(t6, 0, vecOffset + 4); + vec_xst(t7, 0, vecOffset + 8); + vec_xst(t8, 0, vecOffset + 12); + } + + inline void vector_permute_store_8(vector float *src, float *vecOffset) { + vector float t1, t2, t3, t4, t5, t6, t7, t8; + t1 = vec_mergeh(src[0], src[1]); + t2 = vec_mergeh(src[2], src[3]); + t3 = vec_mergeh(src[4], src[5]); + t4 = vec_mergeh(src[6], src[7]); + + t5 = vec_xxpermdi(t1, t2, 0); + t6 = vec_xxpermdi(t3, t4, 0); + t7 = vec_xxpermdi(t1, t2, 3); + t8 = vec_xxpermdi(t3, t4, 3); + + vec_xst(t5, 0, vecOffset); + vec_xst(t6, 0, vecOffset + 4); + vec_xst(t7, 0, vecOffset + 8); + vec_xst(t8, 0, vecOffset + 12); + + t1 = vec_mergel(src[0], src[1]); + t2 = vec_mergel(src[2], src[3]); + t3 = vec_mergel(src[4], src[5]); + t4 = vec_mergel(src[6], src[7]); + + t5 = vec_xxpermdi(t1, t2, 0); + t6 = vec_xxpermdi(t3, t4, 0); + t7 = vec_xxpermdi(t1, t2, 3); + t8 = vec_xxpermdi(t3, t4, 3); + + vec_xst(t5, 0, vecOffset + 16); + vec_xst(t6, 0, vecOffset + 20); + vec_xst(t7, 0, vecOffset + 24); + vec_xst(t8, 0, vecOffset + 28); + } + + void packTranspose(const float* a, int64_t lda, int rows, int cols, float* vec) { int64_t i, j; - TA *aoffset = NULL, *boffset = NULL; - TA *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL; - TA *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL; - __vector_pair C1, C2, C3, C4, C5, C6, C7, C8; - VA c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2] = {0}; - VA c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2] = {0}; - VA t1, t2, t3, t4, t5, t6, t7, t8; - aoffset = const_cast(a); + float * aoffsets[8]; + float *aoffset = NULL, *boffset = NULL; + __vector_pair arr[8]; + vector float c[8][2] = {0}; + vector float c1[8] = {0}; + vector float c2[8] = {0}; + aoffset = const_cast(a); boffset = vec; j = (rows >> 3); if (j > 0) { do { - aoffset1 = aoffset; - aoffset2 = aoffset1 + lda; - aoffset3 = aoffset2 + lda; - aoffset4 = aoffset3 + lda; - aoffset5 = aoffset4 + lda; - aoffset6 = aoffset5 + lda; - aoffset7 = aoffset6 + lda; - aoffset8 = aoffset7 + lda; + aoffsets[0] = aoffset; + for (int it = 1; it< 8; it++) + aoffsets[it] = aoffsets[it-1] + lda; aoffset += 8 * lda; i = (cols >> 3); if (i > 0) { do { - C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1); - C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2); - C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3); - C4 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset4); - C5 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset5); - C6 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset6); - C7 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset7); - C8 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset8); - __builtin_vsx_disassemble_pair(c1, &C1); - __builtin_vsx_disassemble_pair(c2, &C2); - __builtin_vsx_disassemble_pair(c3, &C3); - __builtin_vsx_disassemble_pair(c4, &C4); - __builtin_vsx_disassemble_pair(c5, &C5); - __builtin_vsx_disassemble_pair(c6, &C6); - __builtin_vsx_disassemble_pair(c7, &C7); - __builtin_vsx_disassemble_pair(c8, &C8); + for (int it = 0; it< 8; it++) { + arr[it] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[it]); + __builtin_vsx_disassemble_pair(c[it], &arr[it]); + c1[it] = c[it][0]; + c2[it] = c[it][1]; + } - t1 = vec_mergeh(c1[0], c2[0]); - t2 = vec_mergeh(c3[0], c4[0]); - t3 = vec_mergeh(c5[0], c6[0]); - t4 = vec_mergeh(c7[0], c8[0]); - t5 = vec_xxpermdi(t1, t2, 0); - t6 = vec_xxpermdi(t3, t4, 0); - t7 = vec_xxpermdi(t1, t2, 3); - t8 = vec_xxpermdi(t3, t4, 3); - vec_xst(t5, 0, boffset); - vec_xst(t6, 0, boffset+4); - vec_xst(t7, 0, boffset+8); - vec_xst(t8, 0, boffset+12); - - t1 = vec_mergel(c1[0], c2[0]); - t2 = vec_mergel(c3[0], c4[0]); - t3 = vec_mergel(c5[0], c6[0]); - t4 = vec_mergel(c7[0], c8[0]); - t5 = vec_xxpermdi(t1, t2, 0); - t6 = vec_xxpermdi(t3, t4, 0); - t7 = vec_xxpermdi(t1, t2, 3); - t8 = vec_xxpermdi(t3, t4, 3); - vec_xst(t5, 0, boffset+16); - vec_xst(t6, 0, boffset+20); - vec_xst(t7, 0, boffset+24); - vec_xst(t8, 0, boffset+28); - - t1 = vec_mergeh(c1[1], c2[1]); - t2 = vec_mergeh(c3[1], c4[1]); - t3 = vec_mergeh(c5[1], c6[1]); - t4 = vec_mergeh(c7[1], c8[1]); - t5 = vec_xxpermdi(t1, t2, 0); - t6 = vec_xxpermdi(t3, t4, 0); - t7 = vec_xxpermdi(t1, t2, 3); - t8 = vec_xxpermdi(t3, t4, 3); - vec_xst(t5, 0, boffset+32); - vec_xst(t6, 0, boffset+36); - vec_xst(t7, 0, boffset+40); - vec_xst(t8, 0, boffset+44); - - t1 = vec_mergel(c1[1], c2[1]); - t2 = vec_mergel(c3[1], c4[1]); - t3 = vec_mergel(c5[1], c6[1]); - t4 = vec_mergel(c7[1], c8[1]); - t5 = vec_xxpermdi(t1, t2, 0); - t6 = vec_xxpermdi(t3, t4, 0); - t7 = vec_xxpermdi(t1, t2, 3); - t8 = vec_xxpermdi(t3, t4, 3); - vec_xst(t5, 0, boffset+48); - vec_xst(t6, 0, boffset+52); - vec_xst(t7, 0, boffset+56); - vec_xst(t8, 0, boffset+60); - - aoffset1 += 8*lda; - aoffset2 += 8*lda; - aoffset3 += 8*lda; - aoffset4 += 8*lda; + vector_permute_store_8(c1, boffset); + vector_permute_store_8(c2, boffset+32); + for (int it = 0; it < 4; it++) + aoffsets[it] = aoffsets[it] + 8*lda; boffset += 64; i--; } while(i > 0); } if (cols & 4) { - c1[0] = vec_xl(0, aoffset1); - c2[0] = vec_xl(0, aoffset2); - c3[0] = vec_xl(0, aoffset3); - c4[0] = vec_xl(0, aoffset4); - c5[0] = vec_xl(0, aoffset5); - c6[0] = vec_xl(0, aoffset6); - c7[0] = vec_xl(0, aoffset7); - c8[0] = vec_xl(0, aoffset8); - - t1 = vec_mergeh(c1[0], c2[0]); - t2 = vec_mergeh(c3[0], c4[0]); - t3 = vec_mergeh(c5[0], c6[0]); - t4 = vec_mergeh(c7[0], c8[0]); - t5 = vec_xxpermdi(t1, t2, 0); - t6 = vec_xxpermdi(t3, t4, 0); - t7 = vec_xxpermdi(t1, t2, 3); - t8 = vec_xxpermdi(t3, t4, 3); - vec_xst(t5, 0, boffset); - vec_xst(t6, 0, boffset+4); - vec_xst(t7, 0, boffset+8); - vec_xst(t8, 0, boffset+12); - - t1 = vec_mergel(c1[0], c2[0]); - t2 = vec_mergel(c3[0], c4[0]); - t3 = vec_mergel(c5[0], c6[0]); - t4 = vec_mergel(c7[0], c8[0]); - t5 = vec_xxpermdi(t1, t2, 0); - t6 = vec_xxpermdi(t3, t4, 0); - t7 = vec_xxpermdi(t1, t2, 3); - t8 = vec_xxpermdi(t3, t4, 3); - vec_xst(t5, 0, boffset+16); - vec_xst(t6, 0, boffset+20); - vec_xst(t7, 0, boffset+24); - vec_xst(t8, 0, boffset+28); + for (int it = 0; it < 8 ; it++) + c1[it] = vec_xl(0, aoffsets[it]); + vector_permute_store_8(c1, boffset); } j--; } while(j > 0); } if (rows & 4) { - aoffset1 = aoffset; - aoffset2 = aoffset1 + lda; - aoffset3 = aoffset2 + lda; - aoffset4 = aoffset3 + lda; + aoffsets[0] = aoffset; + for (int it = 1; it < 4; it++) + aoffsets[it] = aoffsets[it-1] + lda; aoffset += 4 * lda; i = (cols >> 3); if (i > 0) { do { - C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1); - C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2); - C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3); - C4 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset4); - __builtin_vsx_disassemble_pair(c1, &C1); - __builtin_vsx_disassemble_pair(c2, &C2); - __builtin_vsx_disassemble_pair(c3, &C3); - __builtin_vsx_disassemble_pair(c4, &C4); - - t1 = vec_mergeh(c1[0], c2[0]); - t2 = vec_mergeh(c3[0], c4[0]); - t3 = vec_mergel(c1[0], c2[0]); - t4 = vec_mergel(c3[0], c4[0]); - t5 = vec_xxpermdi(t1, t2, 0); - t6 = vec_xxpermdi(t1, t2, 3); - t7 = vec_xxpermdi(t3, t4, 0); - t8 = vec_xxpermdi(t3, t4, 3); - vec_xst(t5, 0, boffset); - vec_xst(t6, 0, boffset+4); - vec_xst(t7, 0, boffset+8); - vec_xst(t8, 0, boffset+12); - - t1 = vec_mergeh(c1[1], c2[1]); - t2 = vec_mergeh(c3[1], c4[1]); - t3 = vec_mergel(c1[1], c2[1]); - t4 = vec_mergel(c3[1], c4[1]); - t5 = vec_xxpermdi(t1, t2, 0); - t6 = vec_xxpermdi(t1, t2, 3); - t7 = vec_xxpermdi(t3, t4, 0); - t8 = vec_xxpermdi(t3, t4, 3); - vec_xst(t5, 0, boffset+16); - vec_xst(t6, 0, boffset+20); - vec_xst(t7, 0, boffset+24); - vec_xst(t8, 0, boffset+28); - - aoffset1 += 8*lda; - aoffset2 += 8*lda; - aoffset3 += 8*lda; - aoffset4 += 8*lda; + for (int it = 0; it < 4; it++) { + arr[it] = __builtin_vsx_lxvp(0, (__vector_pair*)aoffsets[it]); + __builtin_vsx_disassemble_pair(c[it], &arr[it]); + c1[it] = c[it][0]; + c2[it] = c[it][1]; + } + vector_permute_store_4(c1, boffset); + vector_permute_store_4(c2, boffset+16); + for (int it = 0; it < 4; it++) + aoffsets[it] += 8*lda; boffset += 32; i--; } while(i > 0); } if (cols & 4) { - c1[0] = vec_xl(0, aoffset1); - c2[0] = vec_xl(0, aoffset2); - c3[0] = vec_xl(0, aoffset3); - c4[0] = vec_xl(0, aoffset4); - - t1 = vec_mergeh(c1[0], c2[0]); - t2 = vec_mergeh(c3[0], c4[0]); - t3 = vec_xxpermdi(t1, t2, 0); - t4 = vec_xxpermdi(t1, t2, 3); - vec_xst(t3, 0, boffset); - vec_xst(t4, 0, boffset+4); - - t1 = vec_mergel(c1[0], c2[0]); - t2 = vec_mergel(c3[0], c4[0]); - t3 = vec_xxpermdi(t1, t2, 0); - t4 = vec_xxpermdi(t1, t2, 3); - vec_xst(t3, 0, boffset+8); - vec_xst(t4, 0, boffset+12); + for (int it = 0; it < 4; it++) + c1[it] = vec_xl(0, aoffsets[it]); + vector_permute_store_4(c1, boffset); } } if (rows & 3) { - aoffset1 = aoffset; - aoffset2 = aoffset1 + lda; - aoffset3 = aoffset2 + lda; + aoffsets[0] = aoffset; + for (int it = 1; it < 3; it++) + aoffsets[it] = aoffsets[it-1] + lda; if (cols & 4) { - c1[0] = vec_xl(0, aoffset1); - c2[0] = vec_xl(0, aoffset2); - c3[0] = vec_xl(0, aoffset3); - - t1 = vec_mergeh(c1[0], c2[0]); - t2 = vec_mergeh(c3[0], c4[0]); - t3 = vec_xxpermdi(t1, t2, 0); - t4 = vec_xxpermdi(t1, t2, 3); - vec_xst(t3, 0, boffset); - vec_xst(t4, 0, boffset+4); - - t1 = vec_mergel(c1[0], c2[0]); - t2 = vec_mergel(c3[0], c4[0]); - t3 = vec_xxpermdi(t1, t2, 0); - t4 = vec_xxpermdi(t1, t2, 3); - vec_xst(t3, 0, boffset+8); - vec_xst(t4, 0, boffset+12); + for (int it = 0; it < 3; it++) + c1[it] = vec_xl(0, aoffsets[it]); + vector_permute_store_4(c1, boffset); } } } @@ -2925,8 +2326,8 @@ class tinyBLAS_PPC { acc_t acc_0; __builtin_mma_xxsetaccz(&acc_0); for (int l = 0; l < k; l+=4) { - packTranspose(A+(ii*lda)+l, lda, 4, 4, (TA*)vec_A); - packTranspose(B+(jj*ldb)+l, ldb, 4, 4, (TA*)vec_B); + packTranspose(A+(ii*lda)+l, lda, 4, 4, (float*)vec_A); + packTranspose(B+(jj*ldb)+l, ldb, 4, 4, (float*)vec_B); __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], vec_B[0]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], vec_B[1]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[2], vec_B[2]); @@ -2941,8 +2342,8 @@ class tinyBLAS_PPC { __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); for (int64_t l = 0; l < k; l+=4) { - packTranspose(A+(ii*lda)+l, lda, 4, 4, (TA*)vec_A); - packTranspose(B+(jj*ldb)+l, ldb, 8, 4, (TA*)vec_B); + packTranspose(A+(ii*lda)+l, lda, 4, 4, (float*)vec_A); + packTranspose(B+(jj*ldb)+l, ldb, 8, 4, (float*)vec_B); __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], (vec_t)vec_B[0]); __builtin_mma_xvf32gerpp(&acc_1, vec_A[0], (vec_t)vec_B[1]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], (vec_t)vec_B[2]); @@ -2962,8 +2363,8 @@ class tinyBLAS_PPC { __builtin_mma_xxsetaccz(&acc_0); __builtin_mma_xxsetaccz(&acc_1); for (int64_t l = 0; l < k; l+=4) { - packTranspose(A+(ii*lda)+l, lda, 8, 4, (TA*)vec_A); - packTranspose(B+(jj*ldb)+l, ldb, 4, 4, (TA*)vec_B); + packTranspose(A+(ii*lda)+l, lda, 8, 4, (float*)vec_A); + packTranspose(B+(jj*ldb)+l, ldb, 4, 4, (float*)vec_B); __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[0], vec_B[0]); __builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[1], vec_B[0]); __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[2], vec_B[1]); @@ -2985,8 +2386,8 @@ class tinyBLAS_PPC { __builtin_mma_xxsetaccz(&acc_2); __builtin_mma_xxsetaccz(&acc_3); for (int l = 0; l < k; l+=8) { - packTranspose(A+(ii*lda)+l, lda, 8, 8, (TA*)vec_A); - packTranspose(B+(jj*ldb)+l, ldb, 8, 8, (TA*)vec_B); + packTranspose(A+(ii*lda)+l, lda, 8, 8, (float*)vec_A); + packTranspose(B+(jj*ldb)+l, ldb, 8, 8, (float*)vec_B); for(int x = 0; x < 16; x+=2) { __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[x], vec_B[x]); __builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[x], vec_B[x+1]); @@ -3001,155 +2402,37 @@ class tinyBLAS_PPC { } void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) { - int64_t mc, nc, mp, np; - int m_rem = MIN(m - m0, 16); - int n_rem = MIN(n - n0, 16); - if (m_rem >= 16 && n_rem >= 8) { - mc = 8; - nc = 8; - gemm<8,8>(m0, m, n0, n); - } else if(m_rem >= 8 && n_rem >= 16) { - mc = 8; - nc = 8; - gemm<8,8>(m0, m, n0, n); - } else if (m_rem >= 8 && n_rem >= 8) { - mc = 8; - nc = 8; - gemm<8,8>(m0, m, n0, n); + int m_rem = MIN(m - m0, 8); + int n_rem = MIN(n - n0, 8); + int mc = 0, nc = 0; + if (m_rem >= 8 && n_rem >= 8) { + mc = 8; + nc = 8; + gemm<8, 8>(m0, m, n0, n); } else if (m_rem >= 4 && n_rem >= 8) { - mc = 4; - nc = 8; - gemm<4,8>(m0, m, n0, n); + mc = 4; + nc = 8; + gemm<4, 8>(m0, m, n0, n); } else if (m_rem >= 8 && n_rem >= 4) { - mc = 8; - nc = 4; - gemm<8,4>(m0, m, n0, n); + mc = 8; + nc = 4; + gemm<8, 4>(m0, m, n0, n); } else if (m_rem >= 4 && n_rem >= 4) { - mc = 4; - nc = 4; - gemm<4,4>(m0, m, n0, n); - } else if ((m_rem < 4) && (n_rem > 4)) { - nc = 4; - switch(m_rem) { - case 1: - mc = 1; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 2: - mc = 2; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 3: - mc = 3; - gemm_small(m0, m, n0, n, mc, nc); - break; - default: - return; - } - } else if ((m_rem > 4) && (n_rem < 4)) { - mc = 4; - switch(n_rem) { - case 1: - nc = 1; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 2: - nc = 2; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 3: - nc = 3; - gemm_small(m0, m, n0, n, mc, nc); - break; - default: - return; - } + mc = 4; + nc = 4; + gemm<4, 4>(m0, m, n0, n); } else { - switch((m_rem << 4) | n_rem) { - case 0x43: - mc = 4; - nc = 3; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 0x42: - mc = 4; - nc = 2; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 0x41: - mc = 4; - nc = 1; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 0x34: - mc = 3; - nc = 4; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 0x33: - mc = 3; - nc = 3; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 0x32: - mc = 3; - nc = 2; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 0x31: - mc = 3; - nc = 1; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 0x24: - mc = 2; - nc = 4; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 0x23: - mc = 2; - nc = 3; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 0x22: - mc = 2; - nc = 2; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 0x21: - mc = 2; - nc = 1; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 0x14: - mc = 1; - nc = 4; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 0x13: - mc = 1; - nc = 3; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 0x12: - mc = 1; - nc = 2; - gemm_small(m0, m, n0, n, mc, nc); - break; - case 0x11: - mc = 1; - nc = 1; - gemm_small(m0, m, n0, n, mc, nc); - break; - default: - return; - } + mc = (m_rem >= 4) ? 4 : m_rem; + nc = (n_rem >= 4) ? 4 : n_rem; + if (mc == 0 || nc == 0) + return; + gemm_small(m0, m, n0, n, mc, nc); } - mp = m0 + (m - m0) / mc * mc; - np = n0 + (n - n0) / nc * nc; + int64_t mp = m0 + ((m - m0) / mc) * mc; + int64_t np = n0 + ((n - n0) / nc) * nc; mnpack(mp, m, n0, np); mnpack(m0, m, np, n); - } + } void gemm_small(int64_t m0, int64_t m, int64_t n0, int64_t n, int RM, int RN) { int64_t ytiles = (m - m0) / RM; @@ -3174,22 +2457,22 @@ class tinyBLAS_PPC { * matrix elements. */ if (RM == 1) { - TA* a = const_cast(A+(ii)*lda+l); - packTranspose(B+(jj*ldb)+l, ldb, RN, 4, (TA*)vec_B); + float* a = const_cast(A+(ii)*lda+l); + packTranspose(B+(jj*ldb)+l, ldb, RN, 4, (float*)vec_B); vec_A[0] = (vec_t)vec_xl(0,a); - vec_A[1] = (vec_t)vec_splats(*((TA*)&vec_A+1)); - vec_A[2] = (vec_t)vec_splats(*((TA*)&vec_A+2)); - vec_A[3] = (vec_t)vec_splats(*((TA*)&vec_A+3)); + vec_A[1] = (vec_t)vec_splats(*((float*)&vec_A+1)); + vec_A[2] = (vec_t)vec_splats(*((float*)&vec_A+2)); + vec_A[3] = (vec_t)vec_splats(*((float*)&vec_A+3)); } else if (RN == 1) { - packTranspose(A+(ii*lda)+l, lda, RM, 4, (TA*)vec_A); - TB* b = const_cast(B+(jj)*ldb+l); + packTranspose(A+(ii*lda)+l, lda, RM, 4, (float*)vec_A); + float* b = const_cast(B+(jj)*ldb+l); vec_B[0] = (vec_t)vec_xl(0,b); - vec_B[1] = (vec_t)vec_splats(*((TB*)&vec_B+1)); - vec_B[2] = (vec_t)vec_splats(*((TB*)&vec_B+2)); - vec_B[3] = (vec_t)vec_splats(*((TB*)&vec_B+3)); + vec_B[1] = (vec_t)vec_splats(*((float*)&vec_B+1)); + vec_B[2] = (vec_t)vec_splats(*((float*)&vec_B+2)); + vec_B[3] = (vec_t)vec_splats(*((float*)&vec_B+3)); } else { - packTranspose(A+(ii*lda)+l, lda, RM, 4, (TA*)vec_A); - packTranspose(B+(jj*ldb)+l, ldb, RN, 4, (TA*)vec_B); + packTranspose(A+(ii*lda)+l, lda, RM, 4, (float*)vec_A); + packTranspose(B+(jj*ldb)+l, ldb, RN, 4, (float*)vec_B); } __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], vec_B[0]); __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], vec_B[1]); @@ -3199,7 +2482,7 @@ class tinyBLAS_PPC { __builtin_mma_disassemble_acc(vec_C, &acc_0); for (int I = 0; I < RM; I++) { for (int J = 0; J < RN; J++) { - *((TC*)(C+ii+((jj+J)*ldc)+I)) = *((TC*)&vec_C[I]+J); + *((float*)(C+ii+((jj+J)*ldc)+I)) = *((float*)&vec_C[I]+J); } } } @@ -3231,11 +2514,9 @@ class tinyBLAS_PPC { } } - const TA *const A; - const TB *const B; - TC *C; - TA *At; - TB *Bt; + const float *const A; + const float *const B; + float *C; const int64_t k; const int64_t lda; const int64_t ldb; @@ -3323,10 +2604,18 @@ bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t m, int64 (const float *)B, ldb, (float *)C, ldc}; return tb.matmul(m, n); +#elif defined(__VXE__) || defined(__VXE2__) + if (n < 4) + return false; + tinyBLAS<4, float32x4_t, float32x4_t, float, float, float> tb{ params, + k, (const float *)A, lda, + (const float *)B, ldb, + (float *)C, ldc}; + return tb.matmul(m, n); #elif defined(__MMA__) if (k % 8) return false; - tinyBLAS_PPC tb{ + tinyBLAS_PPC tb{ k, (const float *)A, lda, (const float *)B, ldb, (float *)C, ldc, @@ -3414,6 +2703,16 @@ bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t m, int64 (float *)C, ldc}; return tb.matmul(m, n); } +#elif defined(__VXE__) || defined(__VXE2__) + if (n < 4) + return false; + if (Btype == GGML_TYPE_F16) { + tinyBLAS<4, float32x4_t, float32x4_t, ggml_fp16_t, ggml_fp16_t, float> tb{ params, + k, (const ggml_fp16_t *)A, lda, + (const ggml_fp16_t *)B, ldb, + (float *)C, ldc}; + return tb.matmul(m, n); + } #endif return false; } @@ -3443,7 +2742,7 @@ bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t m, int64 return false; if (m < 8 && m != 4) return false; - tinyBLAS_Q0_PPC tb{ + tinyBLAS_Q0_PPC tb{ k, (const block_q8_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, @@ -3480,7 +2779,7 @@ bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t m, int64 return false; if (m < 8 && m != 4) return false; - tinyBLAS_Q0_PPC tb{ + tinyBLAS_Q0_PPC tb{ k, (const block_q4_0 *)A, lda, (const block_q8_0 *)B, ldb, (float *)C, ldc, diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/sgemm.h b/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/sgemm.h index 3d2909515..729e8853d 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/sgemm.h +++ b/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/sgemm.h @@ -1,6 +1,11 @@ #pragma once #include #include + +#if defined(__VXE__) || defined(__VXE2__) +#include +#endif + #ifdef __cplusplus extern "C" { #endif diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ops.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/ops.cpp index be0aa683b..a29247576 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/ops.cpp +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ops.cpp @@ -3,10 +3,12 @@ #include "ggml-cpu.h" #include "ggml-impl.h" #include "binary-ops.h" +#include "ggml.h" #include "unary-ops.h" #include "vec.h" #include +#include // ggml_compute_forward_dup @@ -108,7 +110,7 @@ static void ggml_compute_forward_dup_f16( for (int i01 = ir0; i01 < ir1; i01++) { const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); for (int i00 = 0; i00 < ne00; i00++) { - dst_ptr[id] = GGML_FP16_TO_FP32(src0_ptr[i00]); + dst_ptr[id] = GGML_CPU_FP16_TO_FP32(src0_ptr[i00]); id++; } } @@ -130,7 +132,7 @@ static void ggml_compute_forward_dup_f16( const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); for (int i00 = 0; i00 < ne00; i00++) { - src0_f32[i00] = GGML_FP16_TO_FP32(src0_ptr[i00]); + src0_f32[i00] = GGML_CPU_FP16_TO_FP32(src0_ptr[i00]); } quantize_row_q(src0_f32, dst_ptr + id, ne00); @@ -156,7 +158,7 @@ static void ggml_compute_forward_dup_f16( for (int i00 = 0; i00 < ne00; i00++) { const ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - dst_ptr[id] = GGML_FP16_TO_FP32(*src0_ptr); + dst_ptr[id] = GGML_CPU_FP16_TO_FP32(*src0_ptr); id++; } } @@ -267,7 +269,7 @@ static void ggml_compute_forward_dup_f16( const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); - *(float *) dst_ptr = GGML_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr); + *(float *) dst_ptr = GGML_CPU_FP16_TO_FP32(*(const ggml_fp16_t *) src0_ptr); if (++i10 == ne0) { i10 = 0; @@ -372,7 +374,7 @@ static void ggml_compute_forward_dup_bf16( for (int i01 = ir0; i01 < ir1; i01++) { const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); for (int i00 = 0; i00 < ne00; i00++) { - dst_ptr[id] = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(src0_ptr[i00])); + dst_ptr[id] = GGML_CPU_FP32_TO_FP16(GGML_BF16_TO_FP32(src0_ptr[i00])); id++; } } @@ -473,7 +475,7 @@ static void ggml_compute_forward_dup_bf16( for (int i00 = 0; i00 < ne00; i00++) { const ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - dst_ptr[id] = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(*src0_ptr)); + dst_ptr[id] = GGML_CPU_FP32_TO_FP16(GGML_BF16_TO_FP32(*src0_ptr)); id++; } } @@ -566,7 +568,7 @@ static void ggml_compute_forward_dup_bf16( const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); - *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(GGML_BF16_TO_FP32(*(const ggml_bf16_t *) src0_ptr)); + *(ggml_fp16_t *) dst_ptr = GGML_CPU_FP32_TO_FP16(GGML_BF16_TO_FP32(*(const ggml_bf16_t *) src0_ptr)); if (++i10 == ne0) { i10 = 0; @@ -696,24 +698,8 @@ static void ggml_compute_forward_dup_f32( if (ggml_is_contiguous(dst)) { // TODO: simplify if (nb00 == sizeof(float)) { - if (dst->type == GGML_TYPE_F32) { - size_t id = 0; - const size_t rs = ne00 * nb00; - char * dst_ptr = (char *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const char * src0_ptr = (char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03; - memcpy(dst_ptr + id, src0_ptr, rs); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else if (ggml_get_type_traits_cpu(dst->type)->from_float) { - ggml_from_float_t const quantize_row_q = ggml_get_type_traits_cpu(dst->type)->from_float; + if (ggml_get_type_traits_cpu(dst->type)->from_float) { + ggml_from_float_t const from_float = ggml_get_type_traits_cpu(dst->type)->from_float; size_t id = 0; size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type)); @@ -724,7 +710,7 @@ static void ggml_compute_forward_dup_f32( id += rs * ir0; for (int i01 = ir0; i01 < ir1; i01++) { const float * src0_ptr = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); - quantize_row_q(src0_ptr, dst_ptr + id, ne00); + from_float(src0_ptr, dst_ptr + id, ne00); id += rs; } id += rs * (ne01 - ir1); @@ -765,7 +751,7 @@ static void ggml_compute_forward_dup_f32( for (int i00 = 0; i00 < ne00; i00++) { const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); - dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr); + dst_ptr[id] = GGML_CPU_FP32_TO_FP16(*src0_ptr); id++; } } @@ -878,7 +864,7 @@ static void ggml_compute_forward_dup_f32( const char * src0_ptr = ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); char * dst_ptr = ((char *) dst->data + i10*nb0 + i11*nb1 + i12*nb2 + i13*nb3); - *(ggml_fp16_t *) dst_ptr = GGML_FP32_TO_FP16(*(const float *) src0_ptr); + *(ggml_fp16_t *) dst_ptr = GGML_CPU_FP32_TO_FP16(*(const float *) src0_ptr); if (++i10 == ne0) { i10 = 0; @@ -1298,6 +1284,7 @@ void ggml_compute_forward_add( case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: + case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -1324,6 +1311,77 @@ void ggml_compute_forward_add( } } +// ggml_compute_forward_add_id + +static void ggml_compute_forward_add_id_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(src2->type == GGML_TYPE_I32); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src1->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_TERNARY_OP_LOCALS + + GGML_ASSERT( nb0 == sizeof(float)); + GGML_ASSERT(nb10 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 indices + const int i3 = ir/(ne2*ne1); + const int i2 = (ir - i3*ne2*ne1)/ne1; + const int i1 = (ir - i3*ne2*ne1 - i2*ne1); + + // src1 indices + const int i11 = *(int32_t *) ((char *) src2->data + i1*nb20 + i2*nb21); + + GGML_ASSERT(i11 >= 0 && i11 < ne11); + + ggml_vec_add_f32(ne0, + (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), + (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), + (float *) ((char *) src1->data + i11*nb11)); + } +} + +void ggml_compute_forward_add_id( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_add_id_f32(params, dst); + } break; + default: + { + GGML_ABORT("unsupported type for ggml_compute_forward_add_id: %s", ggml_type_name(src0->type)); + } + } +} + // ggml_compute_forward_add1 static void ggml_compute_forward_add1_f32( @@ -1419,7 +1477,7 @@ static void ggml_compute_forward_add1_f16_f32( ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ); ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v); + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); } } } @@ -1435,7 +1493,7 @@ static void ggml_compute_forward_add1_f16_f16( GGML_ASSERT(ggml_is_scalar(src1)); // scalar to add - const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data); + const float v = GGML_CPU_FP16_TO_FP32(*(ggml_fp16_t *) src1->data); const int ith = params->ith; const int nth = params->nth; @@ -1467,7 +1525,7 @@ static void ggml_compute_forward_add1_f16_f16( ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ); ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01); for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v); + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); } } } @@ -1675,6 +1733,7 @@ void ggml_compute_forward_add1( case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -1802,6 +1861,7 @@ void ggml_compute_forward_acc( case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -1889,7 +1949,7 @@ static void ggml_compute_forward_sum_f16( } } } - ((ggml_fp16_t *) dst->data)[0] = GGML_FP32_TO_FP16(sum); + ((ggml_fp16_t *) dst->data)[0] = GGML_CPU_FP32_TO_FP16(sum); } static void ggml_compute_forward_sum_bf16( @@ -2300,6 +2360,12 @@ void ggml_compute_forward_repeat( { ggml_compute_forward_repeat_f32(params, dst); } break; + // TODO: templateify the implemenation and support for I64 + // ref https://github.com/ggml-org/llama.cpp/pull/14274#discussion_r2169492225 + //case GGML_TYPE_I64: + // { + // ggml_compute_forward_repeat_i64(params, dst); + // } break; default: { GGML_ABORT("fatal error"); @@ -2660,7 +2726,7 @@ static void ggml_compute_forward_gelu_f16( #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); + const float v = GGML_CPU_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); @@ -2691,6 +2757,109 @@ static void ggml_compute_forward_gelu( } } +// ggml_compute_forward_gelu_erf + +static void ggml_compute_forward_gelu_erf_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_erf_f32(nc, + (float *) ((char *) dst->data + i1*( dst->nb[1])), + (float *) ((char *) src0->data + i1*(src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_erf_f16( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_erf_f16(nc, + (ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i1*(src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_erf( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gelu_erf_f32(params, dst); + } break; + case GGML_TYPE_F16: + { + ggml_compute_forward_gelu_erf_f16(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + // ggml_compute_forward_gelu_quick static void ggml_compute_forward_gelu_quick_f32( @@ -2763,7 +2932,7 @@ static void ggml_compute_forward_gelu_quick_f16( #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); + const float v = GGML_CPU_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); @@ -2866,7 +3035,7 @@ static void ggml_compute_forward_silu_f16( #ifndef NDEBUG for (int k = 0; k < nc; k++) { const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); + const float v = GGML_CPU_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); @@ -3060,7 +3229,7 @@ static void ggml_compute_forward_silu_back_f16( #ifndef NDEBUG for (int k = 0; k < nc; k++) { const float x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); + const float v = GGML_CPU_FP16_TO_FP32(x); GGML_UNUSED(v); assert(!isnan(v)); assert(!isinf(v)); @@ -3091,6 +3260,808 @@ void ggml_compute_forward_silu_back( } } +// ggml_compute_forward_reglu + +static void ggml_compute_forward_reglu_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1*src0_o); + float * src1_p = (float *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_reglu_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_reglu_f16( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1*src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_reglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_reglu( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_reglu_f32(params, dst); + } break; + case GGML_TYPE_F16: + { + ggml_compute_forward_reglu_f16(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_geglu + +static void ggml_compute_forward_geglu_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1*src0_o); + float * src1_p = (float *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_f16( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1*src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_geglu( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_geglu_f32(params, dst); + } break; + case GGML_TYPE_F16: + { + ggml_compute_forward_geglu_f16(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_swiglu + +static void ggml_compute_forward_swiglu_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1*src0_o); + float * src1_p = (float *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_swiglu_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_swiglu_f16( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1*src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_swiglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_swiglu( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_swiglu_f32(params, dst); + } break; + case GGML_TYPE_F16: + { + ggml_compute_forward_swiglu_f16(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_swiglu_oai + +static void ggml_compute_forward_swiglu_oai_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + const float alpha = ggml_get_op_params_f32(dst, 2); + const float limit = ggml_get_op_params_f32(dst, 3); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1*src0_o); + float * src1_p = (float *) (src1_d + i1*src1_o); + float * dst_p = (float *) ((char *) dst->data + i1*(dst->nb[1])); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + for (int k = 0; k < nc; k++) { + const float x = std::min(src0_p[k], limit); + const float y = std::clamp(src1_p[k], -limit, limit); + const float out_glu = x / (1.f + expf(alpha * (-x))); + dst_p[k] = out_glu * (y + 1.f); + } + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = dst_p[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_swiglu_oai( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_swiglu_oai_f32(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_geglu_erf + +static void ggml_compute_forward_geglu_erf_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1*src0_o); + float * src1_p = (float *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_erf_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_erf_f16( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1*src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_erf( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_geglu_erf_f32(params, dst); + } break; + case GGML_TYPE_F16: + { + ggml_compute_forward_geglu_erf_f16(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_geglu_quick + +static void ggml_compute_forward_geglu_quick_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1*src0_o); + float * src1_p = (float *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_quick_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_quick_f16( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1*src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1*src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1*(dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1*( dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_quick( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_geglu_quick_f32(params, dst); + } break; + case GGML_TYPE_F16: + { + ggml_compute_forward_geglu_quick_f16(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + // ggml_compute_forward_norm static void ggml_compute_forward_norm_f32( @@ -3206,6 +4177,9 @@ static void ggml_compute_forward_rms_norm_f32( const float scale = 1.0f/sqrtf(mean + eps); + // if you hit this, likely you got an inf somewhere earlier + assert(scale > 0.0f); + ggml_vec_scale_f32(ne00, y, scale); } } @@ -3787,6 +4761,7 @@ void ggml_compute_forward_out_prod( case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: + case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -3834,9 +4809,11 @@ static void ggml_compute_forward_scale_f32( GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(ggml_are_same_shape(src0, dst)); - // scale factor - float v; - memcpy(&v, dst->op_params, sizeof(float)); + float s; // scale factor + float b; // bias + + memcpy(&s, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&b, (float *) dst->op_params + 1, sizeof(float)); const int ith = params->ith; const int nth = params->nth; @@ -3855,12 +4832,22 @@ static void ggml_compute_forward_scale_f32( const size_t nb1 = dst->nb[1]; - for (int i1 = ir0; i1 < ir1; i1++) { - if (dst->data != src0->data) { - // src0 is same shape as dst => same indices - memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float)); + if (b == 0.0f) { + for (int i1 = ir0; i1 < ir1; i1++) { + if (dst->data != src0->data) { + // src0 is same shape as dst => same indices + // TODO: add x parameter to ggml_vec_scale_f32 and remove this memcpy + memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float)); + } + ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), s); + } + } else { + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_mad1_f32(nc, + (float *) ((char *) dst->data + i1*nb1), + (float *) ((char *) src0->data + i1*nb1), + s, b); } - ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v); } } @@ -4049,6 +5036,7 @@ void ggml_compute_forward_set( case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -4310,6 +5298,7 @@ void ggml_compute_forward_get_rows( case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -4367,6 +5356,74 @@ void ggml_compute_forward_get_rows( //} } +static void ggml_compute_forward_set_rows_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ne01; + + assert(ne0 == nc); + assert(ne2 == ne02); + assert(ne3 == ne03); + assert(src0->type == GGML_TYPE_F32); + assert(ne02 % ne11 == 0); + assert(ne03 % ne12 == 0); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int64_t dr = (nr + nth - 1)/nth; + + // row range for this thread + const int64_t ir0 = dr*ith; + const int64_t ir1 = std::min(ir0 + dr, nr); + + ggml_from_float_t const from_float = ggml_get_type_traits_cpu(dst->type)->from_float; + + for (int64_t i03 = 0; i03 < ne03; ++i03) { + for (int64_t i02 = 0; i02 < ne02; ++i02) { + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i03%ne12; + const int64_t i11 = i02%ne11; + const int64_t i10 = i; + + const int64_t i1 = *(int64_t *) ((char *) src1->data + i10*nb10 + i11*nb11 + i12*nb12); + + GGML_ASSERT(i1 >= 0 && i1 < ne1); + + from_float( + (const float *) ((char *) src0->data + i*nb01 + i02*nb02 + i03*nb03), + ((char *) dst->data + i1*nb1 + i02*nb2 + i03*nb3), nc); + } + } + } +} + +void ggml_compute_forward_set_rows( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_set_rows_f32(params, dst); + } break; + default: + { + GGML_ABORT("src0->type = %d (%s) not supported", src0->type, ggml_type_name(src0->type)); + } + } +} + // ggml_compute_forward_get_rows_back static void ggml_compute_forward_get_rows_back_f32_f16( @@ -4397,7 +5454,7 @@ static void ggml_compute_forward_get_rows_back_f32_f16( for (int j = 0; j < nc; ++j) { ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j]; - ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v); + ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_CPU_FP16_TO_FP32(v); } } } @@ -4631,6 +5688,7 @@ static void ggml_compute_forward_soft_max_f32( const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; assert(ggml_is_contiguous(dst)); assert(ggml_are_same_shape(src0, dst)); @@ -4641,14 +5699,17 @@ static void ggml_compute_forward_soft_max_f32( memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); - // TODO: handle transposed/permuted matrices - const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_UNARY_OP_LOCALS - //const int64_t ne11 = src1 ? src1->ne[1] : 1; + const int64_t nb11 = src1 ? src1->nb[1] : 1; + const int64_t nb12 = src1 ? src1->nb[2] : 1; + const int64_t nb13 = src1 ? src1->nb[3] : 1; + + const int64_t ne12 = src1 ? src1->ne[2] : 1; + const int64_t ne13 = src1 ? src1->ne[3] : 1; // TODO: is this supposed to be ceil instead of floor? // https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L370 @@ -4658,68 +5719,78 @@ static void ggml_compute_forward_soft_max_f32( const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1)/nth; - - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - - float * wp = (float *) params->wdata + (nc + CACHE_LINE_SIZE_F32) * ith; + float * wp = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); - for (int i1 = ir0; i1 < ir1; i1++) { - // ALiBi - const uint32_t h = (i1/ne01)%ne02; // head - const float slope = (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1) : 1.0f; + // sinks + const float * sk = src2 ? (float *)((char *) src2->data) : nullptr; - float * sp = (float *)((char *) src0->data + i1*src0->nb[1]); - float * dp = (float *)((char *) dst->data + i1*dst->nb[1]); + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + const int64_t i11 = i01; + const int64_t i12 = i02%ne12; + const int64_t i13 = i03%ne13; - // broadcast the mask across rows - ggml_fp16_t * mp_f16 = src1 ? (ggml_fp16_t *)((char *) src1->data) + (i1%ne01)*ne00 : NULL; - float * mp_f32 = src1 ? (float *)((char *) src1->data) + (i1%ne01)*ne00 : NULL; + // ALiBi + const uint32_t h = i02; // head + const float slope = (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1) : 1.0f; - ggml_vec_cpy_f32 (nc, wp, sp); - ggml_vec_scale_f32(nc, wp, scale); - if (mp_f32) { - if (use_f16) { - for (int i = 0; i < nc; ++i) { - wp[i] += slope*GGML_FP16_TO_FP32(mp_f16[i]); + float * sp = (float *)((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03); + float * dp = (float *)((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3); + + // broadcast the mask across rows + ggml_fp16_t * mp_f16 = src1 ? (ggml_fp16_t *)((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13) : NULL; + float * mp_f32 = src1 ? (float *)((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13) : NULL; + + ggml_vec_cpy_f32 (ne00, wp, sp); + ggml_vec_scale_f32(ne00, wp, scale); + if (mp_f32) { + if (use_f16) { + for (int i = 0; i < ne00; ++i) { + wp[i] += slope*GGML_CPU_FP16_TO_FP32(mp_f16[i]); + } + } else { + for (int i = 0; i < ne00; ++i) { + wp[i] += slope*mp_f32[i]; + } + } } - } else { - for (int i = 0; i < nc; ++i) { - wp[i] += slope*mp_f32[i]; + +#ifndef NDEBUG + for (int i = 0; i < ne00; ++i) { + //printf("p[%d] = %f\n", i, p[i]); + assert(!isnan(wp[i])); } +#endif + + float max = -INFINITY; + ggml_vec_max_f32(ne00, &max, wp); + + // if we have sinks, make a correction as if they were included in the softmax + if (sk) { + max = MAX(max, sk[i02]); + } + + ggml_float sum = ggml_vec_soft_max_f32(ne00, dp, wp, max); + assert(sum > 0.0); + + if (sk) { + sum += (ggml_float) expf(sk[i02] - max); + } + + sum = 1.0/sum; + ggml_vec_scale_f32(ne00, dp, sum); + +#ifndef NDEBUG + for (int i = 0; i < ne00; ++i) { + assert(!isnan(dp[i])); + assert(!isinf(dp[i])); + } +#endif } } - -#ifndef NDEBUG - for (int i = 0; i < nc; ++i) { - //printf("p[%d] = %f\n", i, p[i]); - assert(!isnan(wp[i])); - } -#endif - - float max = -INFINITY; - ggml_vec_max_f32(nc, &max, wp); - - ggml_float sum = ggml_vec_soft_max_f32(nc, dp, wp, max); - assert(sum > 0.0); - - sum = 1.0/sum; - ggml_vec_scale_f32(nc, dp, sum); - -#ifndef NDEBUG - for (int i = 0; i < nc; ++i) { - assert(!isnan(dp[i])); - assert(!isinf(dp[i])); - } -#endif } } @@ -4915,8 +5986,8 @@ static void ggml_compute_forward_clamp_f16( ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j*nb01); for (int i = 0; i < nc; i++) { - float v = GGML_FP16_TO_FP32(src0_ptr[i]); - dst_ptr[i] = GGML_FP32_TO_FP16(MAX(MIN(v, max), min)); + float v = GGML_CPU_FP16_TO_FP32(src0_ptr[i]); + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(MAX(MIN(v, max), min)); } } } @@ -4943,6 +6014,7 @@ void ggml_compute_forward_clamp( case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -4965,7 +6037,6 @@ void ggml_compute_forward_clamp( case GGML_TYPE_I32: case GGML_TYPE_I64: case GGML_TYPE_F64: - case GGML_TYPE_MXFP4: case GGML_TYPE_COUNT: { GGML_ABORT("fatal error"); @@ -5374,11 +6445,11 @@ static void ggml_compute_forward_rope_f16( const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); - const float x0 = GGML_FP16_TO_FP32(src[0]); - const float x1 = GGML_FP16_TO_FP32(src[n_dims]); + const float x0 = GGML_CPU_FP16_TO_FP32(src[0]); + const float x1 = GGML_CPU_FP16_TO_FP32(src[n_dims]); - dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[n_dims] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + dst_data[0] = GGML_CPU_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); + dst_data[n_dims] = GGML_CPU_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); } } else { for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { @@ -5390,11 +6461,11 @@ static void ggml_compute_forward_rope_f16( const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); - const float x0 = GGML_FP16_TO_FP32(src[0]); - const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]); + const float x0 = GGML_CPU_FP16_TO_FP32(src[0]); + const float x1 = GGML_CPU_FP16_TO_FP32(src[n_dims/2]); - dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + dst_data[0] = GGML_CPU_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); + dst_data[n_dims/2] = GGML_CPU_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); } } } else { @@ -5405,11 +6476,11 @@ static void ggml_compute_forward_rope_f16( const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - const float x0 = GGML_FP16_TO_FP32(src[0]); - const float x1 = GGML_FP16_TO_FP32(src[1]); + const float x0 = GGML_CPU_FP16_TO_FP32(src[0]); + const float x1 = GGML_CPU_FP16_TO_FP32(src[1]); - dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + dst_data[0] = GGML_CPU_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); + dst_data[1] = GGML_CPU_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); } } @@ -5423,11 +6494,11 @@ static void ggml_compute_forward_rope_f16( const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); - const float x0 = GGML_FP16_TO_FP32(src[0]); - const float x1 = GGML_FP16_TO_FP32(src[n_dims]); + const float x0 = GGML_CPU_FP16_TO_FP32(src[0]); + const float x1 = GGML_CPU_FP16_TO_FP32(src[n_dims]); - dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[n_dims] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + dst_data[0] = GGML_CPU_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); + dst_data[n_dims] = GGML_CPU_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); } } else { for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { @@ -5538,7 +6609,7 @@ static void ggml_compute_forward_conv_transpose_1d_f16_f32( for (int64_t i11 = 0; i11 < ne11; i11++) { const float * const src = (float *)((char *) src1->data + i11*nb11); for (int64_t i10 = 0; i10 < ne10; i10++) { - dst_data[i10*ne11 + i11] = GGML_FP32_TO_FP16(src[i10]); + dst_data[i10*ne11 + i11] = GGML_CPU_FP32_TO_FP16(src[i10]); } } } @@ -5831,7 +6902,7 @@ static void ggml_compute_forward_im2col_f16( if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { dst_data[iic*(KH*KW) + ikh*KW + ikw] = 0; } else { - dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]); + dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_CPU_FP32_TO_FP16(src_data[iih*IW + iiw]); } } } @@ -5956,6 +7027,186 @@ void ggml_compute_forward_im2col_back_f32( } } +static void ggml_call_mul_mat(ggml_type type, const ggml_compute_params * params, int64_t m, int64_t n, int64_t k, + void * a, void * b, float * c) { + const ggml_type_traits * traits = ggml_get_type_traits(type); + struct ggml_tensor src1 = {}; + src1.type = type; + src1.ne[0] = k; + src1.ne[1] = m; + src1.ne[2] = 1; + src1.ne[3] = 1; + src1.nb[0] = traits->type_size; + src1.nb[1] = k * traits->type_size; + src1.nb[2] = src1.nb[1]; + src1.nb[3] = src1.nb[2]; + src1.data = a; + + struct ggml_tensor src0 = {}; + src0.type = type; + src0.ne[0] = k; + src0.ne[1] = n; + src0.ne[2] = 1; + src0.ne[3] = 1; + src0.nb[0] = traits->type_size; + src0.nb[1] = k * traits->type_size; + src0.nb[2] = src0.nb[1]; + src0.nb[3] = src0.nb[2]; + src0.data = b; + + struct ggml_tensor dst = {}; + dst.ne[0] = n; + dst.ne[1] = m; + dst.ne[2] = 1; + dst.ne[3] = 1; + dst.nb[0] = sizeof(float); + dst.nb[1] = n * sizeof(float); + dst.nb[2] = dst.nb[1]; + dst.nb[3] = dst.nb[2]; + dst.data = c; + dst.src[0] = &src0; + dst.src[1] = &src1; + + ggml_compute_forward_mul_mat(params, &dst); +} + +// ggml_compute_forward_conv_2d + +static void ggml_compute_forward_conv_2d_impl(const ggml_compute_params * params, + const ggml_tensor * kernel, // [KW, KH, IC, OC] + const ggml_tensor * src, // [W, H, C, N] + ggml_tensor * dst, // [OW, OH, OC, N] + ggml_type kernel_type) { + + GGML_ASSERT(ggml_is_contiguous(kernel)); + GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); + GGML_ASSERT(kernel->type == kernel_type); + + const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); + + const int32_t stride_x = dst->op_params[0]; + const int32_t stride_y = dst->op_params[1]; + const int32_t pad_x = dst->op_params[2]; + const int32_t pad_y = dst->op_params[3]; + const int32_t dilation_x = dst->op_params[4]; + const int32_t dilation_y = dst->op_params[5]; + + const int64_t c_in = src->ne[2]; + const int64_t c_out = kernel->ne[3]; + GGML_ASSERT(c_in == kernel->ne[2]); + + const int64_t src_w = src->ne[0]; + const int64_t src_h = src->ne[1]; + const int64_t knl_w = kernel->ne[0]; + const int64_t knl_h = kernel->ne[1]; + const int64_t dst_w = dst->ne[0]; + const int64_t dst_h = dst->ne[1]; + + const float * src_data = (float *) src->data; + void * knl_data = kernel->data; + float * dst_data = (float *) dst->data; + + const int64_t knl_n = knl_w * knl_h * c_in; + const int64_t patch_total = dst->ne[3] * dst_w * dst_h; + + const int64_t space_per_patch = knl_n * traits->type_size + c_out * sizeof(float); + const int64_t batch_size = params->wsize / space_per_patch; + const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; + const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; + + GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); + + void * tmp = params->wdata; + + for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { + + const int64_t patch_start_batch = batch_i * patches_per_batch; + const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, + patch_total); + const int64_t patch_n = patch_end_batch - patch_start_batch; + + const int64_t patch_per_thread = (patch_n + params->nth - 1) / params->nth; + const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; + const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); + + //im2col for a patch + for (int64_t p = patch_start; p < patch_end; ++p) { + const int64_t batch_n = p / (dst_w * dst_h); + const int64_t src_x = (p / dst_w) % dst_h; + const int64_t src_y = p % dst_w; + + const float * src_base = (const float *)((const char *)src_data + batch_n * src->nb[3]); + char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n * traits->type_size; + + for (int64_t ic = 0; ic < c_in; ++ic) { + for (int64_t ky = 0; ky < knl_h; ++ky) { + for (int64_t kx = 0; kx < knl_w; ++kx) { + const int64_t sy = src_x * stride_y + ky * dilation_y - pad_y; + const int64_t sx = src_y * stride_x + kx * dilation_x - pad_x; + + int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx; + + float src_val; + if (sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { + src_val = 0.0f; + } else { + const float * src_ptr = (const float *)((const char *)src_base + sx * src->nb[0] + sy * src->nb[1] + ic * src->nb[2]); + src_val = *src_ptr; + } + + char * element_ptr = dst_row + dst_idx * traits->type_size; + if (kernel_type == GGML_TYPE_F32) { + *(float *) element_ptr = src_val; + } else if (kernel_type == GGML_TYPE_F16) { + *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); + } + } + } + } + } // patches handled by this thread + + ggml_barrier(params->threadpool); + + float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n * traits->type_size); + + GGML_ASSERT(gemm_output + patch_n * c_out <= (float*)tmp + params->wsize); + + // GEMM: patches[patch_n, knl_n] × kernel[knl_n, c_out] = output[patch_n, c_out] + ggml_call_mul_mat(kernel_type, params, patch_n, c_out, knl_n, tmp, knl_data, gemm_output); + + ggml_barrier(params->threadpool); + + + //permute back [OC, N, OH, OW] to [N, OC, OH, OW] + const int64_t permute_per_thread = (patch_n + params->nth - 1) / params->nth; + const int64_t permute_start = params->ith * permute_per_thread; + const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n); + + for (int64_t i = permute_start; i < permute_end; ++i) { + const int64_t p = patch_start_batch + i; + const int64_t batch_n = p / (dst_w * dst_h); + const int64_t dst_y = (p / dst_w) % dst_h; + const int64_t dst_x = p % dst_w; + + for (int64_t oc = 0; oc < c_out; ++oc) { + const float value = gemm_output[i * c_out + oc]; + float * dst_ptr = (float *)((char *)dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + oc * dst->nb[2] + batch_n * dst->nb[3]); + *dst_ptr = value; + } + } + } +} + +void ggml_compute_forward_conv_2d( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + ggml_compute_forward_conv_2d_impl(params, src0, src1, dst, src0->type); +} + // ggml_compute_forward_conv_transpose_2d void ggml_compute_forward_conv_transpose_2d( @@ -6007,7 +7258,7 @@ void ggml_compute_forward_conv_transpose_2d( const float * const src = (float *)((char *) src1->data + i12*nb12 + i11*nb11); ggml_fp16_t * dst_data = wdata + i11*ne10*ne12; for (int i10 = 0; i10 < ne10; i10++) { - dst_data[i10*ne12 + i12] = GGML_FP32_TO_FP16(src[i10]); + dst_data[i10*ne12 + i12] = GGML_CPU_FP32_TO_FP16(src[i10]); } } } @@ -6256,7 +7507,7 @@ static void ggml_compute_forward_pool_1d_sk_p0( case GGML_OP_POOL_COUNT: GGML_ABORT("fatal error"); } for (int ki = 0; ki < k; ++ki) { - const float srow_j = (src->type == GGML_TYPE_F32) ? ((const float*)srow)[j] : GGML_FP16_TO_FP32(((const ggml_fp16_t*)srow)[j]); + const float srow_j = (src->type == GGML_TYPE_F32) ? ((const float*)srow)[j] : GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t*)srow)[j]); switch (op) { case GGML_OP_POOL_AVG: drow[i] += srow_j; break; case GGML_OP_POOL_MAX: if (srow_j > drow[i]) drow[i] = srow_j; break; @@ -6348,7 +7599,7 @@ void ggml_compute_forward_pool_2d( for (int kx = 0; kx < k0; ++kx) { int j = ix + kx; if (j < 0 || j >= src->ne[0]) continue; - const float srow_j = (src->type == GGML_TYPE_F32) ? ((const float*)srow)[j] : GGML_FP16_TO_FP32(((const ggml_fp16_t*)srow)[j]); + const float srow_j = (src->type == GGML_TYPE_F32) ? ((const float*)srow)[j] : GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t*)srow)[j]); switch (op) { case GGML_OP_POOL_AVG: *out += srow_j; break; case GGML_OP_POOL_MAX: if (srow_j > *out) *out = srow_j; break; @@ -6436,7 +7687,7 @@ void ggml_compute_forward_pool_2d_back( } const float val = dst->type == GGML_TYPE_F32 ? - ((const float *) drowf)[j] : GGML_FP16_TO_FP32(((const ggml_fp16_t *) drowf)[j]); + ((const float *) drowf)[j] : GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drowf)[j]); if (val <= maxval) { continue; } @@ -6456,7 +7707,7 @@ void ggml_compute_forward_pool_2d_back( if (dst->type == GGML_TYPE_F32) { ((float *) drow)[j] += grad0; } else { - ((ggml_fp16_t *) drow)[j] = GGML_FP32_TO_FP16(grad0 + GGML_FP16_TO_FP32(((const ggml_fp16_t *) drow)[j])); + ((ggml_fp16_t *) drow)[j] = GGML_CPU_FP32_TO_FP16(grad0 + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drow)[j])); } } else if (op == GGML_OP_POOL_AVG) { const float grad = grad0 / ka; @@ -6475,7 +7726,7 @@ void ggml_compute_forward_pool_2d_back( if (dst->type == GGML_TYPE_F32) { ((float *) drow)[j] += grad; } else { - ((ggml_fp16_t *) drow)[j] += GGML_FP32_TO_FP16(grad); + ((ggml_fp16_t *) drow)[j] += GGML_CPU_FP32_TO_FP16(grad); } } } @@ -6506,12 +7757,13 @@ static void ggml_compute_forward_upscale_f32( GGML_TENSOR_UNARY_OP_LOCALS - const float sf0 = (float)ne0/src0->ne[0]; - const float sf1 = (float)ne1/src0->ne[1]; - const float sf2 = (float)ne2/src0->ne[2]; - const float sf3 = (float)ne3/src0->ne[3]; + float sf0 = (float)ne0/src0->ne[0]; + float sf1 = (float)ne1/src0->ne[1]; + float sf2 = (float)ne2/src0->ne[2]; + float sf3 = (float)ne3/src0->ne[3]; - const ggml_scale_mode mode = (ggml_scale_mode) ggml_get_op_params_i32(dst, 0); + const int32_t mode_flags = ggml_get_op_params_i32(dst, 0); + const ggml_scale_mode mode = (ggml_scale_mode) (mode_flags & 0xFF); if (mode == GGML_SCALE_MODE_NEAREST) { for (int64_t i3 = 0; i3 < ne3; i3++) { @@ -6532,8 +7784,12 @@ static void ggml_compute_forward_upscale_f32( } } } else if (mode == GGML_SCALE_MODE_BILINEAR) { - // setting a pixel offset of 0 would replicate the behavior of pytorch interpolate with align_corners=True - const float pixel_offset = 0.5f; + float pixel_offset = 0.5f; + if (mode_flags & GGML_SCALE_FLAG_ALIGN_CORNERS) { + pixel_offset = 0.0f; + sf0 = (float)(ne0 - 1) / (src0->ne[0] - 1); + sf1 = (float)(ne1 - 1) / (src0->ne[1] - 1); + } for (int64_t i3 = 0; i3 < ne3; i3++) { const int64_t i03 = i3 / sf3; @@ -6691,6 +7947,73 @@ void ggml_compute_forward_pad_reflect_1d( } } +// ggml_compute_forward_roll + +static int64_t ggml_wrap_index(int64_t i, int64_t ne) { + if (i < 0) { + return i + ne; + } else if (i >= ne) { + return i - ne; + } + return i; +} + +static void ggml_compute_forward_roll_f32( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + const float * src_data = (const float *) src0->data; + float * dst_data = (float *) dst->data; + + GGML_TENSOR_UNARY_OP_LOCALS + + const int s0 = ggml_get_op_params_i32(dst, 0); + const int s1 = ggml_get_op_params_i32(dst, 1); + const int s2 = ggml_get_op_params_i32(dst, 2); + const int s3 = ggml_get_op_params_i32(dst, 3); + + const int64_t total = ne1 * ne2 * ne3; + const int64_t per_thread = (total + params->nth) / params->nth; + const int64_t start = params->ith * per_thread; + const int64_t end = std::min(start + per_thread, total); + + for (int64_t i = start; i < end; ++i) { + const int64_t i1 = i % ne1; + const int64_t i2 = (i / ne1) % ne2; + const int64_t i3 = i / (ne2 * ne1); + float * dst_row = dst_data + (i3*nb3 + i2*nb2 + i1*nb1) / sizeof(float); + + const int64_t i01 = ggml_wrap_index(i1 - s1, ne01); + const int64_t i02 = ggml_wrap_index(i2 - s2, ne02); + const int64_t i03 = ggml_wrap_index(i3 - s3, ne03); + const float * src_row = src_data + (i03*nb03 + i02*nb02 + i01*nb01) / sizeof(float); + + const int64_t s = ggml_wrap_index(-s0, ne00); + const int64_t n = ne00 - s; + ggml_vec_cpy_f32(n, dst_row, src_row + s); + ggml_vec_cpy_f32(s, dst_row + n, src_row); + } +} + +void ggml_compute_forward_roll( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_roll_f32(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + // ggml_compute_forward_arange static void ggml_compute_forward_arange_f32( @@ -6888,12 +8211,14 @@ void ggml_compute_forward_argsort( static void ggml_compute_forward_flash_attn_ext_f16( const ggml_compute_params * params, - const ggml_tensor * q, - const ggml_tensor * k, - const ggml_tensor * v, - const ggml_tensor * mask, ggml_tensor * dst) { + const ggml_tensor * q = dst->src[0]; + const ggml_tensor * k = dst->src[1]; + const ggml_tensor * v = dst->src[2]; + const ggml_tensor * mask = dst->src[3]; + const ggml_tensor * sinks = dst->src[4]; + GGML_TENSOR_LOCALS(int64_t, neq, q, ne) GGML_TENSOR_LOCALS(size_t, nbq, q, nb) GGML_TENSOR_LOCALS(int64_t, nek, k, ne) @@ -6967,7 +8292,7 @@ static void ggml_compute_forward_flash_attn_ext_f16( const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); - ggml_type const k_vec_dot_type = ggml_get_type_traits_cpu(k->type)->vec_dot_type; + ggml_type const k_vec_dot_type = ggml_get_type_traits_cpu(k->type)->vec_dot_type; ggml_from_float_t const q_to_vec_dot = ggml_get_type_traits_cpu(k_vec_dot_type)->from_float; ggml_vec_dot_t const kq_vec_dot = ggml_get_type_traits_cpu(k->type)->vec_dot; ggml_to_float_t const v_to_float = ggml_get_type_traits(v->type)->to_float; @@ -6999,7 +8324,7 @@ static void ggml_compute_forward_flash_attn_ext_f16( memset(VKQ32, 0, DV*sizeof(float)); } - const ggml_fp16_t * mp = mask ? (ggml_fp16_t *)((char *) mask->data + iq1*mask->nb[1]) : NULL; + const ggml_fp16_t * mp = mask ? (ggml_fp16_t *)((char *) mask->data + iq1*mask->nb[1] + (iq2%mask->ne[2])*mask->nb[2] + (iq3%mask->ne[3])*mask->nb[3]) : NULL; // k indices const int ik3 = iq3 / rk3; @@ -7016,7 +8341,7 @@ static void ggml_compute_forward_flash_attn_ext_f16( // loop over n_kv and n_head_kv // ref: https://arxiv.org/pdf/2112.05682.pdf for (int64_t ic = 0; ic < nek1; ++ic) { - const float mv = mp ? slope*GGML_FP16_TO_FP32(mp[ic]) : 0.0f; + const float mv = mp ? slope*GGML_CPU_FP16_TO_FP32(mp[ic]) : 0.0f; if (mv == -INFINITY) { continue; } @@ -7084,10 +8409,27 @@ static void ggml_compute_forward_flash_attn_ext_f16( if (v->type == GGML_TYPE_F16) { for (int64_t d = 0; d < DV; ++d) { - VKQ32[d] = GGML_FP16_TO_FP32(VKQ16[d]); + VKQ32[d] = GGML_CPU_FP16_TO_FP32(VKQ16[d]); } } + // sinks + if (sinks) { + const float s = ((float *)((char *) sinks->data))[h]; + + float ms = 1.0f; + float vs = 1.0f; + + if (s > M) { + ms = expf(M - s); + ggml_vec_scale_f32(DV, VKQ32, ms); + } else { + vs = expf(s - M); + } + + S = S*ms + vs; + } + // V /= S const float S_inv = 1.0f/S; ggml_vec_scale_f32(DV, VKQ32, S_inv); @@ -7107,17 +8449,13 @@ static void ggml_compute_forward_flash_attn_ext_f16( void ggml_compute_forward_flash_attn_ext( const ggml_compute_params * params, - const ggml_tensor * q, - const ggml_tensor * k, - const ggml_tensor * v, - const ggml_tensor * mask, ggml_tensor * dst) { switch (dst->op_params[3]) { case GGML_PREC_DEFAULT: case GGML_PREC_F32: { // uses F32 accumulators - ggml_compute_forward_flash_attn_ext_f16(params, q, k, v, mask, dst); + ggml_compute_forward_flash_attn_ext_f16(params, dst); } break; default: { @@ -7537,74 +8875,208 @@ void ggml_compute_forward_ssm_conv( static void ggml_compute_forward_ssm_scan_f32( const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; // s - const ggml_tensor * src1 = dst->src[1]; // x - const ggml_tensor * src2 = dst->src[2]; // dt - const ggml_tensor * src3 = dst->src[3]; // A - const ggml_tensor * src4 = dst->src[4]; // B - const ggml_tensor * src5 = dst->src[5]; // C + const ggml_tensor * src0 = dst->src[0]; // s {d_state, dim, n_head, n_seqs+} + const ggml_tensor * src1 = dst->src[1]; // x {dim, n_head, n_seq_tokens, n_seqs} + const ggml_tensor * src2 = dst->src[2]; // dt {n_head, n_seq_tokens, n_seqs} + const ggml_tensor * src3 = dst->src[3]; // A {d_state, n_head} or {1, n_head} + const ggml_tensor * src4 = dst->src[4]; // B {d_state, n_group, n_seq_tokens, n_seqs} + const ggml_tensor * src5 = dst->src[5]; // C {d_state, n_group, n_seq_tokens, n_seqs} + const ggml_tensor * src6 = dst->src[6]; // ids {n_seqs} const int ith = params->ith; const int nth = params->nth; - const int64_t nc = src0->ne[0]; // d_state - const int64_t nr = src0->ne[1]; // d_inner - const int64_t n_t = src1->ne[1]; // number of tokens per sequence - const int64_t n_s = src0->ne[2]; // number of sequences in the batch + const int64_t nc = src0->ne[0]; // d_state + const int64_t nr = src0->ne[1]; // dim + const int64_t nh = src1->ne[1]; // n_head + const int64_t ng = src4->ne[1]; + const int64_t nt = src1->ne[2]; // number of tokens per sequence + const int64_t ns = src1->ne[3]; // number of sequences in the batch - GGML_ASSERT(ggml_nelements(src1) + ggml_nelements(src0) == ggml_nelements(dst)); + // can't use ggml_nbytes because src1 is not necessarily contiguous + const int64_t s_off = ggml_nelements(src1) * ggml_element_size(src1); + + GGML_ASSERT(ggml_nelements(src1) + nc*nr*nh*ns == ggml_nelements(dst)); GGML_ASSERT(src0->nb[0] == sizeof(float)); GGML_ASSERT(src1->nb[0] == sizeof(float)); GGML_ASSERT(src2->nb[0] == sizeof(float)); GGML_ASSERT(src3->nb[0] == sizeof(float)); GGML_ASSERT(src4->nb[0] == sizeof(float)); GGML_ASSERT(src5->nb[0] == sizeof(float)); - // required for the dot product between s and C - GGML_ASSERT(src0->nb[1] == src0->ne[0]*sizeof(float)); - // required for per-sequence offsets for states - GGML_ASSERT(src0->nb[2] == src0->ne[0]*src0->ne[1]*sizeof(float)); - // required to get correct offset for state destination (i.e. src1->nb[3]) - GGML_ASSERT(src1->nb[3] == src1->ne[0]*src1->ne[1]*src1->ne[2]*sizeof(float)); + GGML_ASSERT(src6->nb[0] == sizeof(int32_t)); + // allows optimizing the modulo since n_group should be a power of 2 + GGML_ASSERT((ng & -ng) == ng); - // rows per thread - const int dr = (nr + nth - 1)/nth; + // heads per thread + const int dh = (nh + nth - 1)/nth; - // row range for this thread - const int ir0 = dr*ith; - const int ir1 = MIN(ir0 + dr, nr); - const int ir = ir1 - ir0; + // head range for this thread + const int ih0 = dh*ith; + const int ih1 = MIN(ih0 + dh, nh); - for (int i3 = 0; i3 < n_s; ++i3) { - for (int i2 = 0; i2 < n_t; ++i2) { - const float * s0 = (const float *) ((const char *) src0->data + ir0*(src0->nb[1]) + i3*(src0->nb[2])); // {d_state, d_inner, n_s} - const float * x = (const float *) ((const char *) src1->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} - const float * dt = (const float *) ((const char *) src2->data + ir0*(src2->nb[0]) + i2*(src2->nb[1]) + i3*(src2->nb[2])); // {d_inner, n_t, n_s} - const float * A = (const float *) ((const char *) src3->data + ir0*(src3->nb[1])); // {d_state, d_inner} - const float * B = (const float *) ((const char *) src4->data + i2*(src4->nb[1]) + i3*(src4->nb[2])); // {d_state, n_t, n_s} - const float * C = (const float *) ((const char *) src5->data + i2*(src5->nb[1]) + i3*(src5->nb[2])); // {d_state, n_t, n_s} - float * y = ( float *) (( char *) dst->data + ir0*(src1->nb[0]) + i2*(src1->nb[1]) + i3*(src1->nb[2])); // {d_inner, n_t, n_s} - float * s = ( float *) (( char *) dst->data + ir0*(src0->nb[1]) + i3*(src0->nb[2]) + src1->nb[3]); // {d_state, d_inner, n_s} + const int32_t * ids = (const int32_t *) src6->data; - // use the output as the source for the next token-wise iterations - if (i2 > 0) { s0 = s; } + for (int i3 = 0; i3 < ns; ++i3) { + const float * s0 = (const float *) ((const char *) src0->data + ids[i3]*(src0->nb[3])); // {d_state, dim, nh, ns} + float * s = ( float *) (( char *) dst->data + i3*(src0->nb[3]) + s_off); // {d_state, dim, nh, ns} - // d_inner - for (int i1 = 0; i1 < ir; ++i1) { - // ref: https://github.com/state-spaces/mamba/blob/34076d664838588a3c97727b263478ab9f621a07/mamba_ssm/ops/triton/selective_state_update.py#L78 - float dt_soft_plus = dt[i1] <= 20.0f ? log1pf(expf(dt[i1])) : dt[i1]; - float x_dt = x[i1] * dt_soft_plus; - float sumf = 0.0f; - // d_state - for (int i0 = 0; i0 < nc; ++i0) { - int i = i0 + i1*nc; - // state = prev_state * dA + dB * x - float state = (s0[i] * expf(dt_soft_plus * A[i])) + (B[i0] * x_dt); - // y = rowwise_dotprod(state, C) - sumf += state * C[i0]; - s[i] = state; + for (int i2 = 0; i2 < nt; ++i2) { + const float * x = (const float *) ((const char *) src1->data + i2*(src1->nb[2]) + i3*(src1->nb[3])); // {dim, nh, nt, ns} + const float * dt = (const float *) ((const char *) src2->data + i2*(src2->nb[1]) + i3*(src2->nb[2])); // {nh, nt, ns} + const float * A = (const float *) ((const char *) src3->data); // {d_state, nh} or {1, nh} + const float * B = (const float *) ((const char *) src4->data + i2*(src4->nb[2]) + i3*(src4->nb[3])); // {d_state, ng, nt, ns} + const float * C = (const float *) ((const char *) src5->data + i2*(src5->nb[2]) + i3*(src5->nb[3])); // {d_state, ng, nt, ns} + float * y = ( float *) (( char *) dst->data + i2*(nh*nr*sizeof(float)) + i3*(nt*nh*nr*sizeof(float))); // {dim, nh, nt, ns} + + if (src3->ne[0] == 1) { + // Mamba-2 has a scalar decay factor per head; dA can be outside the state-wise loop + + // n_head + for (int h = ih0; h < ih1; ++h) { + // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 + const float dt_soft_plus = dt[h] <= 20.0f ? log1pf(expf(dt[h])) : dt[h]; + const float dA = expf(dt_soft_plus * A[h]); + + // dim + for (int i1 = 0; i1 < nr; ++i1) { + const int ii = i1 + h*nr; + const float x_dt = x[ii] * dt_soft_plus; + float sumf = 0.0f; +#if defined(GGML_SIMD) + #if defined(__ARM_FEATURE_SVE) + const int ggml_f32_epr = svcntw(); + const int ggml_f32_step = 1 * ggml_f32_epr; + + const int np = (nc & ~(ggml_f32_step - 1)); + + GGML_F32_VEC sum = GGML_F32_VEC_ZERO; + + GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); + GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); + + for (int i = 0; i < np; i += ggml_f32_step) { + // TODO: maybe unroll more? + for (int j = 0; j < 1; j++) { + GGML_F32_VEC t0 = GGML_F32_VEC_LOAD(s0 + i + j*ggml_f32_epr + ii*nc); + GGML_F32_VEC t1 = GGML_F32_VEC_LOAD(B + i + j*ggml_f32_epr + (h & (ng - 1))*nc); + GGML_F32_VEC t2 = GGML_F32_VEC_LOAD(C + i + j*ggml_f32_epr + (h & (ng - 1))*nc); + + t0 = GGML_F32_VEC_MUL(t0, adA); + t1 = GGML_F32_VEC_MUL(t1, axdt); + + t0 = GGML_F32_VEC_ADD(t0, t1); + + sum = GGML_F32_VEC_FMA(sum, t0, t2); + + GGML_F32_VEC_STORE(s + i + j*ggml_f32_epr + ii*nc, t0); + } + } + + sumf = GGML_F32xt_REDUCE_ONE(sum); + #else + const int np = (nc & ~(GGML_F32_STEP - 1)); + + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + + GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); + GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); + + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + GGML_F32_VEC az[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ax[j] = GGML_F32_VEC_LOAD(s0 + i + j*GGML_F32_EPR + ii*nc); + ay[j] = GGML_F32_VEC_LOAD(B + i + j*GGML_F32_EPR + (h & (ng - 1))*nc); + az[j] = GGML_F32_VEC_LOAD(C + i + j*GGML_F32_EPR + (h & (ng - 1))*nc); + + ax[j] = GGML_F32_VEC_MUL(ax[j], adA); + ay[j] = GGML_F32_VEC_MUL(ay[j], axdt); + + ax[j] = GGML_F32_VEC_ADD(ax[j], ay[j]); + + sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], az[j]); + + GGML_F32_VEC_STORE(s + i + j*GGML_F32_EPR + ii*nc, ax[j]); + } + } + + // reduce sum0..sum3 to sum0 + GGML_F32_VEC_REDUCE(sumf, sum); + #endif +#else + const int np = 0; +#endif + // d_state + for (int i0 = np; i0 < nc; ++i0) { + const int i = i0 + ii*nc; + const int ig = i0 + (h & (ng - 1))*nc; + // state = prev_state * dA + dB * x + const float state = (s0[i] * dA) + (B[ig] * x_dt); + // y = rowwise_dotprod(state, C) + sumf += state * C[ig]; + s[i] = state; + } + y[ii] = sumf; + } + } + } else { + // Mamba-1 has an element-wise decay factor for the states + + // n_head + for (int h = ih0; h < ih1; ++h) { + // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 + const float dt_soft_plus = dt[h] <= 20.0f ? log1pf(expf(dt[h])) : dt[h]; + + // dim + for (int i1 = 0; i1 < nr; ++i1) { + const int ii = i1 + h*nr; + const float x_dt = x[ii] * dt_soft_plus; +#if defined(__ARM_FEATURE_SVE) + svfloat32_t vx_dt = GGML_F32_VEC_SET1(x_dt); + svfloat32_t vdt_soft_plus = GGML_F32_VEC_SET1(dt_soft_plus); + svfloat32_t r1_vector = GGML_F32_VEC_ZERO; + + // d_state + // TODO: what happens when (d_state % svcntw()) != 0? + for (int64_t k = 0; k < nc; k += svcntw()) { + svfloat32_t vA = GGML_F32_VEC_LOAD(&A[h*nc + k]); + svfloat32_t vB = GGML_F32_VEC_LOAD(&B[k + (h & (ng - 1))*nc]); + svfloat32_t vC = GGML_F32_VEC_LOAD(&C[k + (h & (ng - 1))*nc]); + svfloat32_t vs0 = GGML_F32_VEC_LOAD(&s0[ii*nc + k]); + + svfloat32_t t1 = GGML_F32_VEC_MUL(vdt_soft_plus, vA); + t1 = exp_ps_sve(svptrue_b32(), t1); + svfloat32_t t2 = GGML_F32_VEC_MUL(vx_dt, vB); + + vs0 = GGML_F32_VEC_FMA(t2, vs0, t1); + r1_vector = GGML_F32_VEC_ADD(GGML_F32_VEC_MUL(vs0, vC), r1_vector); + + GGML_F32_VEC_STORE(&s[ii*nc + k], vs0); + } + y[ii] = GGML_F32xt_REDUCE_ONE(r1_vector); +#else + float sumf = 0.0f; + // NOTE: can't really use GGML_SIMD here because d_state is usually 16 + // and also because expf is used within the loop. + // d_state + for (int i0 = 0; i0 < nc; ++i0) { + const int i = i0 + ii*nc; + const int ig = i0 + (h & (ng - 1))*nc; + // state = prev_state * dA + dB * x + const float state = (s0[i] * expf(dt_soft_plus * A[i0 + h*nc])) + (B[ig] * x_dt); + // y = rowwise_dotprod(state, C) + sumf += state * C[ig]; + s[i] = state; + } + y[ii] = sumf; +#endif + } } - y[i1] = sumf; } + // use the output as the source when it's not the first token-wise iteration + s0 = s; } } } @@ -7793,6 +9265,10 @@ void ggml_compute_forward_unary( { ggml_compute_forward_gelu(params, dst); } break; + case GGML_UNARY_OP_GELU_ERF: + { + ggml_compute_forward_gelu_erf(params, dst); + } break; case GGML_UNARY_OP_GELU_QUICK: { ggml_compute_forward_gelu_quick(params, dst); @@ -7820,6 +9296,46 @@ void ggml_compute_forward_unary( } } +//ggml_compute_forward_glu + +void ggml_compute_forward_glu( + const ggml_compute_params * params, + ggml_tensor * dst) { + + const ggml_glu_op op = ggml_get_glu_op(dst); + + switch (op) { + case GGML_GLU_OP_REGLU: + { + ggml_compute_forward_reglu(params, dst); + } break; + case GGML_GLU_OP_GEGLU: + { + ggml_compute_forward_geglu(params, dst); + } break; + case GGML_GLU_OP_SWIGLU: + { + ggml_compute_forward_swiglu(params, dst); + } break; + case GGML_GLU_OP_SWIGLU_OAI: + { + ggml_compute_forward_swiglu_oai(params, dst); + } break; + case GGML_GLU_OP_GEGLU_ERF: + { + ggml_compute_forward_geglu_erf(params, dst); + } break; + case GGML_GLU_OP_GEGLU_QUICK: + { + ggml_compute_forward_geglu_quick(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + // ggml_compute_forward_get_rel_pos static void ggml_compute_forward_get_rel_pos_f16( @@ -8007,6 +9523,14 @@ static void ggml_compute_forward_rwkv_wkv6_f32( #define GGML_F32X_MUL GGML_F32x16_MUL #define GGML_F32X_FMA GGML_F32x16_FMA #define WKV_VECTOR_SIZE 16 + #elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) + #define GGML_F32X GGML_F32xt + #define GGML_F32X_SET1 GGML_F32xt_SET1 + #define GGML_F32X_LOAD GGML_F32xt_LOAD + #define GGML_F32X_STORE GGML_F32xt_STORE + #define GGML_F32X_MUL GGML_F32xt_MUL + #define GGML_F32X_FMA GGML_F32xt_FMA + #define WKV_VECTOR_SIZE 8 #elif defined(__ARM_NEON) && defined(__aarch64__) #define GGML_F32X GGML_F32x4 #define GGML_F32X_SET1 GGML_F32x4_SET1 @@ -8018,7 +9542,13 @@ static void ggml_compute_forward_rwkv_wkv6_f32( #endif #ifdef WKV_VECTOR_SIZE - const int64_t vec_count = head_size / WKV_VECTOR_SIZE; + int wkv_vector_size; + #if defined(__ARM_FEATURE_SVE) + wkv_vector_size = svcntw(); + #else + wkv_vector_size = WKV_VECTOR_SIZE; + #endif + const int64_t vec_count = head_size / wkv_vector_size; for (int64_t t = 0; t < T; t++) { size_t t_offset = t * t_stride; @@ -8048,7 +9578,7 @@ static void ggml_compute_forward_rwkv_wkv6_f32( GGML_F32X time_decay_vec = GGML_F32X_SET1(time_decay_val); for (int64_t j = 0; j < vec_count; j++) { - size_t base_j = j * WKV_VECTOR_SIZE; + size_t base_j = j * wkv_vector_size; size_t t_h_j_offset = t_h_offset + base_j; size_t h_2d_i_j_offset = h_2d_i_offset + base_j; @@ -8073,7 +9603,7 @@ static void ggml_compute_forward_rwkv_wkv6_f32( } // Handle remaining elements, this will not be used. - for (int64_t j = vec_count * WKV_VECTOR_SIZE; j < head_size; j++) { + for (int64_t j = vec_count * wkv_vector_size; j < head_size; j++) { size_t t_h_j_offset = t_h_offset + j; size_t h_2d_i_j_offset = h_2d_i_offset + j; float v_val = v[t_h_j_offset]; @@ -8209,6 +9739,14 @@ static void ggml_compute_forward_gla_f32( #define GGML_F32X_MUL GGML_F32x16_MUL #define GGML_F32X_FMA GGML_F32x16_FMA #define GLA_VECTOR_SIZE 16 + #elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) + #define GGML_F32X GGML_F32xt + #define GGML_F32X_SET1 GGML_F32xt_SET1 + #define GGML_F32X_LOAD GGML_F32xt_LOAD + #define GGML_F32X_STORE GGML_F32xt_STORE + #define GGML_F32X_MUL GGML_F32xt_MUL + #define GGML_F32X_FMA GGML_F32xt_FMA + #define GLA_VECTOR_SIZE 8 #elif defined(__ARM_NEON) && defined(__aarch64__) #define GGML_F32X GGML_F32x4 #define GGML_F32X_SET1 GGML_F32x4_SET1 @@ -8220,7 +9758,13 @@ static void ggml_compute_forward_gla_f32( #endif #ifdef GLA_VECTOR_SIZE - const int64_t vec_count = head_size / GLA_VECTOR_SIZE; + int gla_vector_size; + #if defined(__ARM_FEATURE_SVE) + gla_vector_size = svcntw(); + #else + gla_vector_size = GLA_VECTOR_SIZE; + #endif + const int64_t vec_count = head_size / gla_vector_size; for (int64_t t = 0; t < T; t++) { size_t t_offset = t * t_stride; @@ -8247,7 +9791,7 @@ static void ggml_compute_forward_gla_f32( GGML_F32X g_vec = GGML_F32X_SET1(g_val); for (int64_t j = 0; j < vec_count; j++) { - size_t base_j = j * GLA_VECTOR_SIZE; + size_t base_j = j * gla_vector_size; size_t t_h_j_offset = t_h_offset + base_j; size_t h_2d_i_j_offset = h_2d_i_offset + base_j; @@ -8271,7 +9815,7 @@ static void ggml_compute_forward_gla_f32( } // Handle remaining elements, this will not be used. - for (int64_t j = vec_count * GLA_VECTOR_SIZE; j < head_size; j++) { + for (int64_t j = vec_count * gla_vector_size; j < head_size; j++) { size_t t_h_j_offset = t_h_offset + j; size_t h_2d_i_j_offset = h_2d_i_offset + j; float v_val = v[t_h_j_offset]; @@ -8380,83 +9924,126 @@ static void ggml_compute_forward_rwkv_wkv7_f32( int64_t h_stride_2d = head_size * head_size; #if defined(GGML_SIMD) - for (int64_t t = 0; t < T; t++) { - int64_t t_offset = t * t_stride; - int64_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[6]->data + state_offset; + #if defined(__ARM_FEATURE_SVE) + // scalar Route to scalar implementation //TODO: Write SVE code + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[6]->data + state_offset; - for (int64_t h = h_start; h < h_end; h++) { - int64_t h_offset = h * h_stride; - int64_t t_h_offset = t_offset + h_offset; - int64_t h_2d_offset = h * h_stride_2d; + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; - for (int64_t ii = 0; ii < head_size; ii++) { - int64_t t_h_i_offset = t_h_offset + ii; - int64_t h_2d_i_offset = h_2d_offset + ii * h_stride; + for (int64_t i = 0; i < head_size; i++) { + int64_t t_h_i_offset = t_h_offset + i; + int64_t h_2d_i_offset = h_2d_offset + i * h_stride; - GGML_F32_VEC v_vec = GGML_F32_VEC_SET1(v[t_h_i_offset]); + float v_val = v[t_h_i_offset]; - float sa = 0; - { - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - for (int64_t j = 0; j < head_size; j += GGML_F32_STEP) { - for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { - ax[kk] = GGML_F32_VEC_LOAD(&a[t_h_offset + j + kk * GGML_F32_EPR]); - ay[kk] = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_offset + j + kk * GGML_F32_EPR]); - sum[kk] = GGML_F32_VEC_FMA(sum[kk], ax[kk], ay[kk]); - } + float sa = 0, result = 0; + for (int64_t j = 0; j < head_size; j++) { + sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; } - GGML_F32_VEC_REDUCE(sa, sum); - } - GGML_F32_VEC sa_vec = GGML_F32_VEC_SET1(sa); + for (int64_t j = 0; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; - int64_t j = 0; - GGML_F32_VEC result_vec[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - for (; j < head_size; j += GGML_F32_STEP) { - for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { - int64_t t_h_j_offset = t_h_offset + j + kk * GGML_F32_EPR; - int64_t h_2d_i_j_offset = h_2d_i_offset + j + kk * GGML_F32_EPR; - - GGML_F32_VEC r_vec = GGML_F32_VEC_LOAD(&r[t_h_j_offset]); - GGML_F32_VEC w_vec = GGML_F32_VEC_LOAD(&w[t_h_j_offset]); - GGML_F32_VEC k_vec = GGML_F32_VEC_LOAD(&k[t_h_j_offset]); - GGML_F32_VEC b_vec = GGML_F32_VEC_LOAD(&b[t_h_j_offset]); - - k_vec = GGML_F32_VEC_MUL(v_vec, k_vec); - - GGML_F32_VEC state_vec = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_j_offset]); - // kv + s * decay + sa * b - state_vec = GGML_F32_VEC_FMA(k_vec, state_vec, w_vec); - state_vec = GGML_F32_VEC_FMA(state_vec, sa_vec, b_vec); - GGML_F32_VEC_STORE(&state_cur[h_2d_i_j_offset], state_vec); - - result_vec[kk] = GGML_F32_VEC_FMA(result_vec[kk], state_vec, r_vec); + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + result += state_cur[h_2d_i_j_offset] * r_val; } - } - GGML_F32_VEC_REDUCE(dst_data[t_h_i_offset], result_vec); - - // There shouldn't be left-overs though. - for (; j < head_size; j++) { - int64_t t_h_j_offset = t_h_offset + j; - int64_t h_2d_i_j_offset = h_2d_i_offset + j; - - float r_val = r[t_h_j_offset]; - float w_val = w[t_h_j_offset]; - float k_val = k[t_h_j_offset]; - float b_val = b[t_h_j_offset]; - float kv_val = v[t_h_i_offset] * k_val; - - float prev_state_val = state_prev[h_2d_i_j_offset]; - state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; - dst_data[t_h_i_offset] += state_cur[h_2d_i_j_offset] * r_val; + dst_data[t_h_i_offset] = result; } } } - } + #else + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[6]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; + + for (int64_t ii = 0; ii < head_size; ii++) { + int64_t t_h_i_offset = t_h_offset + ii; + int64_t h_2d_i_offset = h_2d_offset + ii * h_stride; + + GGML_F32_VEC v_vec = GGML_F32_VEC_SET1(v[t_h_i_offset]); + + float sa = 0; + { + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + for (int64_t j = 0; j < head_size; j += GGML_F32_STEP) { + for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { + ax[kk] = GGML_F32_VEC_LOAD(&a[t_h_offset + j + kk * GGML_F32_EPR]); + ay[kk] = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_offset + j + kk * GGML_F32_EPR]); + sum[kk] = GGML_F32_VEC_FMA(sum[kk], ax[kk], ay[kk]); + } + } + GGML_F32_VEC_REDUCE(sa, sum); + } + + GGML_F32_VEC sa_vec = GGML_F32_VEC_SET1(sa); + + int64_t j = 0; + GGML_F32_VEC result_vec[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + for (; j < head_size; j += GGML_F32_STEP) { + for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { + int64_t t_h_j_offset = t_h_offset + j + kk * GGML_F32_EPR; + int64_t h_2d_i_j_offset = h_2d_i_offset + j + kk * GGML_F32_EPR; + + GGML_F32_VEC r_vec = GGML_F32_VEC_LOAD(&r[t_h_j_offset]); + GGML_F32_VEC w_vec = GGML_F32_VEC_LOAD(&w[t_h_j_offset]); + GGML_F32_VEC k_vec = GGML_F32_VEC_LOAD(&k[t_h_j_offset]); + GGML_F32_VEC b_vec = GGML_F32_VEC_LOAD(&b[t_h_j_offset]); + + k_vec = GGML_F32_VEC_MUL(v_vec, k_vec); + + GGML_F32_VEC state_vec = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_j_offset]); + // kv + s * decay + sa * b + state_vec = GGML_F32_VEC_FMA(k_vec, state_vec, w_vec); + state_vec = GGML_F32_VEC_FMA(state_vec, sa_vec, b_vec); + GGML_F32_VEC_STORE(&state_cur[h_2d_i_j_offset], state_vec); + + result_vec[kk] = GGML_F32_VEC_FMA(result_vec[kk], state_vec, r_vec); + } + } + GGML_F32_VEC_REDUCE(dst_data[t_h_i_offset], result_vec); + + // There shouldn't be left-overs though. + for (; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; + + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v[t_h_i_offset] * k_val; + + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + dst_data[t_h_i_offset] += state_cur[h_2d_i_j_offset] * r_val; + } + } + } + } + #endif #else for (int64_t t = 0; t < T; t++) { int64_t t_offset = t * t_stride; diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ops.h b/ml/backend/ggml/ggml/src/ggml-cpu/ops.h index dc081b9e6..f154afb46 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/ops.h +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ops.h @@ -20,12 +20,16 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float); +// Work buffer size for im2col operations in CONV2D +#define GGML_IM2COL_WORK_SIZE (16 * 1024 * 1024) + #ifdef __cplusplus extern "C" { #endif void ggml_compute_forward_dup(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_add(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_add_id(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_add1(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_acc(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_sum(const struct ggml_compute_params * params, struct ggml_tensor * dst); @@ -53,6 +57,7 @@ void ggml_compute_forward_permute(const struct ggml_compute_params * params, str void ggml_compute_forward_transpose(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_get_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_get_rows_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_set_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_diag(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_diag_mask_inf(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_diag_mask_zero(const struct ggml_compute_params * params, struct ggml_tensor * dst); @@ -64,6 +69,7 @@ void ggml_compute_forward_clamp(const struct ggml_compute_params * params, struc void ggml_compute_forward_conv_transpose_1d(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_im2col(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_im2col_back_f32(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_conv_2d(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_conv_transpose_2d(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_conv_2d_dw(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_pool_1d(const struct ggml_compute_params * params, struct ggml_tensor * dst); @@ -72,17 +78,12 @@ void ggml_compute_forward_pool_2d_back(const struct ggml_compute_params * params void ggml_compute_forward_upscale(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_pad(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_pad_reflect_1d(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_roll(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_arange(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_timestep_embedding(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_argsort(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_leaky_relu(const struct ggml_compute_params * params, struct ggml_tensor * dst); -void ggml_compute_forward_flash_attn_ext( - const struct ggml_compute_params * params, - const struct ggml_tensor * q, - const struct ggml_tensor * k, - const struct ggml_tensor * v, - const struct ggml_tensor * mask, - struct ggml_tensor * dst); +void ggml_compute_forward_flash_attn_ext(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_flash_attn_back( const struct ggml_compute_params * params, const bool masked, @@ -92,6 +93,7 @@ void ggml_compute_forward_ssm_scan(const struct ggml_compute_params * params, st void ggml_compute_forward_win_part(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_win_unpart(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_unary(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_glu(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_get_rel_pos(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_add_rel_pos(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_rwkv_wkv6(const struct ggml_compute_params * params, struct ggml_tensor * dst); @@ -104,6 +106,7 @@ void ggml_compute_forward_custom(const struct ggml_compute_params * params, stru void ggml_compute_forward_cross_entropy_loss(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_cross_entropy_loss_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); void ggml_compute_forward_opt_step_adamw(const struct ggml_compute_params * params, struct ggml_tensor * dst); +void ggml_compute_forward_mul_mat(const struct ggml_compute_params * params, struct ggml_tensor * dst); #ifdef __cplusplus } diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/quants.c b/ml/backend/ggml/ggml/src/ggml-cpu/quants.c new file mode 100644 index 000000000..365cb36d2 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/quants.c @@ -0,0 +1,1193 @@ +#define GGML_COMMON_IMPL_C +#include "ggml-common.h" + +#include "ggml-cpu-impl.h" +#include "simd-mappings.h" +#include "ggml-quants.h" +#include "quants.h" + +#include "arch-fallback.h" + +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT + +#define GROUP_MAX_EPS 1e-15f +#define GROUP_MAX_EPS_IQ3_XXS 1e-8f +#define GROUP_MAX_EPS_IQ2_S 1e-8f +#define GROUP_MAX_EPS_IQ1_M 1e-7f +#define GROUP_MAX_EPS_IQ1_S 1e-12f + +#define UNUSED GGML_UNUSED + +void quantize_row_q4_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q4_0_ref(x, y, k); +} + +void quantize_row_q4_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q4_1_ref(x, y, k); +} + +void quantize_row_q5_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q5_0_ref(x, y, k); +} + +void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q5_1_ref(x, y, k); +} + +void quantize_row_q8_0_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q8_0_ref(x, y, k); +} + +void quantize_row_q8_1_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q8_1_ref(x, y, k); +} + +void quantize_row_mxfp4(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_mxfp4_ref(x, y, k); +} + +// +// 2-6 bit quantization in super-blocks +// + +//========================- 2-bit (de)-quantization + +void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + quantize_row_q2_K_ref(x, vy, k); +} + +//========================= 3-bit (de)-quantization + +void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + quantize_row_q3_K_ref(x, vy, k); +} + +// ====================== 4-bit (de)-quantization + +void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK_K == 0); + block_q4_K * GGML_RESTRICT y = vy; + quantize_row_q4_K_ref(x, y, k); +} + +// ====================== 5-bit (de)-quantization + +void quantize_row_q5_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK_K == 0); + block_q5_K * GGML_RESTRICT y = vy; + quantize_row_q5_K_ref(x, y, k); +} + +// ====================== 6-bit (de)-quantization + +void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK_K == 0); + block_q6_K * GGML_RESTRICT y = vy; + quantize_row_q6_K_ref(x, y, k); +} + +// ====================== Ternary (de)-quantization (BitNet b1.58 and TriLMs) + +void quantize_row_tq1_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK_K == 0); + block_tq1_0 * GGML_RESTRICT y = vy; + quantize_row_tq1_0_ref(x, y, k); +} + +void quantize_row_tq2_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(k % QK_K == 0); + block_tq2_0 * GGML_RESTRICT y = vy; + quantize_row_tq2_0_ref(x, y, k); +} + +//===================================== Q8_K ============================================== + +void quantize_row_q8_K_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + quantize_row_q8_K_ref(x, y, k); +} + +//===================================== Dot products ================================= + +void ggml_vec_dot_q4_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F) - 8; + const int v1 = (x[ib].qs[j] >> 4) - 8; + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d); + } + + *s = sumf; +} + +// TODO: add WASM SIMD +void ggml_vec_dot_q4_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + + for (; ib < nb; ++ib) { + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const int v0 = (x[ib].qs[j] & 0x0F); + const int v1 = (x[ib].qs[j] >> 4); + + sumi0 += (v0 * y[ib].qs[j]); + sumi1 += (v1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_mxfp4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK_MXFP4 == 0); + static_assert(QK_MXFP4 == QK8_0, "QK_MXFP4 and QK8_0 must be the same"); + + const block_mxfp4 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + const int nb = n / QK_MXFP4; + + int ib = 0; + float sumf = 0; + + for (; ib < nb; ++ib) { + const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_E8M0_TO_FP32_HALF(x[ib].e); + + int sumi1 = 0; + int sumi2 = 0; + for (int j = 0; j < QK_MXFP4/2; ++j) { + sumi1 += y[ib].qs[j + 0] * kvalues_mxfp4[x[ib].qs[j] & 0xf]; + sumi2 += y[ib].qs[j + QK_MXFP4/2] * kvalues_mxfp4[x[ib].qs[j] >> 4]; + } + sumf += d * (sumi1 + sumi2); + } + *s = sumf; +} + +void ggml_vec_dot_q5_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4; + const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12)); + + const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16); + const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16); + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi; + } + + *s = sumf; +} + +void ggml_vec_dot_q5_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_1; + const int nb = n / qk; + + int ib = 0; + float sumf = 0; + + assert(n % qk == 0); + assert(qk == QK5_1); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_1 * GGML_RESTRICT x = vx; + const block_q8_1 * GGML_RESTRICT y = vy; + + for (; ib < nb; ++ib) { + uint32_t qh; + memcpy(&qh, x[ib].qh, sizeof(qh)); + + int sumi0 = 0; + int sumi1 = 0; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10; + const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10; + + const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0; + const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1; + + sumi0 += (x0 * y[ib].qs[j]); + sumi1 += (x1 * y[ib].qs[j + qk/2]); + } + + int sumi = sumi0 + sumi1; + sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s); + } + + *s = sumf; +} + +void ggml_vec_dot_q8_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + const int qk = QK8_0; + const int nb = n / qk; + + assert(n % qk == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q8_0 * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + int ib = 0; + float sumf = 0; + + for (; ib < nb; ++ib) { + int sumi = 0; + + for (int j = 0; j < qk; j++) { + sumi += x[ib].qs[j]*y[ib].qs[j]; + } + + sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)); + } + + *s = sumf; +} + +void ggml_vec_dot_tq1_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_tq1_0 * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + const uint8_t pow3[6] = {1, 3, 9, 27, 81, 243}; + + float sumf = 0.0f; + + for (int i = 0; i < nb; ++i) { + int sum = 0; + + for (size_t j = 0; j < sizeof(x->qs) - sizeof(x->qs) % 32; j += 32) { + for (size_t l = 0; l < 5; ++l) { + for (size_t m = 0; m < 32; ++m) { + uint8_t q = x[i].qs[j + m] * pow3[l]; + uint16_t xi = ((uint16_t) q * 3) >> 8; + sum += (xi - 1) * y[i].qs[j*5 + l*32 + m]; + } + } + } + for (size_t j = sizeof(x->qs) - sizeof(x->qs) % 32; j < sizeof(x->qs); j += 16) { + for (size_t l = 0; l < 5; ++l) { + for (size_t m = 0; m < 16; ++m) { + uint8_t q = x[i].qs[j + m] * pow3[l]; + uint16_t xi = ((uint16_t) q * 3) >> 8; + sum += (xi - 1) * y[i].qs[j*5 + l*16 + m]; + } + } + } + + for (size_t l = 0; l < 4; ++l) { + for (size_t j = 0; j < sizeof(x->qh); ++j) { + uint8_t q = x[i].qh[j] * pow3[l]; + uint16_t xi = ((uint16_t) q * 3) >> 8; + sum += (xi - 1) * y[i].qs[sizeof(x->qs)*5 + l*sizeof(x->qh) + j]; + } + } + + sumf += (float) sum * (GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d); + } + + *s = sumf; +} + +void ggml_vec_dot_tq2_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_tq2_0 * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + float sumf = 0.0f; + + for (int i = 0; i < nb; ++i) { + int32_t sumi = 0; + + for (size_t j = 0; j < sizeof(x->qs); j += 32) { + for (size_t l = 0; l < 4; ++l) { + for (size_t k = 0; k < 32; ++k) { + sumi += y[i].qs[j*4 + l*32 + k] * (((x[i].qs[j + k] >> (l*2)) & 3) - 1); + } + } + } + + const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + + sumf += (float) sumi * d; + } + + *s = sumf; +} + +void ggml_vec_dot_q2_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q2_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + float sumf = 0; + + for (int i = 0; i < nb; ++i) { + + const uint8_t * q2 = x[i].qs; + const int8_t * q8 = y[i].qs; + const uint8_t * sc = x[i].scales; + + int summs = 0; + for (int j = 0; j < 16; ++j) { + summs += y[i].bsums[j] * (sc[j] >> 4); + } + + const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d); + const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin); + + int isum = 0; + int is = 0; + int d; + for (int k = 0; k < QK_K/128; ++k) { + int shift = 0; + for (int j = 0; j < 4; ++j) { + d = sc[is++] & 0xF; + int isuml = 0; + for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + d = sc[is++] & 0xF; + isuml = 0; + for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3); + isum += d * isuml; + shift += 2; + q8 += 32; + } + q2 += 32; + } + sumf += dall * isum - dmin * summs; + } + *s = sumf; +} + +void ggml_vec_dot_q3_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const uint32_t kmask1 = 0x03030303; + const uint32_t kmask2 = 0x0f0f0f0f; + + const block_q3_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + // scalar version + // This function is written like this so the compiler can manage to vectorize most of it + // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the + // manually vectorized version above. Every other version I tried would run at least 4 times slower. + // The ideal situation would be if we could just write the code once, and the compiler would + // automatically produce the best possible set of machine instructions, instead of us having to manually + // write vectorized versions for AVX, ARM_NEON, etc. + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + uint32_t auxs[4]; + const int8_t * scales = (const int8_t*)auxs; + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].hmask; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3; + for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4); + a += 32; m <<= 1; + q3 += 32; + } + a = aux8; + + memcpy(auxs, x[i].scales, 12); + uint32_t tmp = auxs[2]; + auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4); + auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4); + auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4); + auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4); + for (int j = 0; j < QK_K/16; ++j) { + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +} + +void ggml_vec_dot_q4_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q4_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + a += 32; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + a += 32; q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +} + +void ggml_vec_dot_q5_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q5_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + uint32_t utmp[4]; + + const uint8_t * scales = (const uint8_t*)&utmp[0]; + const uint8_t * mins = (const uint8_t*)&utmp[2]; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].qs; + const uint8_t * GGML_RESTRICT hm = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + uint8_t m = 1; + for (int j = 0; j < QK_K/64; ++j) { + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4); + for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0); + a += 32; m <<= 1; + q4 += 32; + } + memcpy(utmp, x[i].scales, 12); + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + int sumi = 0; + for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2]; + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/32; ++j) { + int32_t scale = scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d; + sumf -= dmin * sumi; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +} + +void ggml_vec_dot_q6_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_q6_K * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + int8_t aux8[QK_K]; + int16_t aux16[8]; + float sums [8]; + int32_t aux32[8]; + memset(sums, 0, 8*sizeof(float)); + + float sumf = 0; + for (int i = 0; i < nb; ++i) { + const uint8_t * GGML_RESTRICT q4 = x[i].ql; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + memset(aux32, 0, 8*sizeof(int32_t)); + int8_t * GGML_RESTRICT a = aux8; + for (int j = 0; j < QK_K; j += 128) { + for (int l = 0; l < 32; ++l) { + a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32; + a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32; + a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32; + a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32; + } + a += 128; + q4 += 64; + qh += 32; + } + a = aux8; + int is = 0; + for (int j = 0; j < QK_K/16; ++j) { + int scale = x[i].scales[is++]; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l]; + for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l]; + q8 += 8; a += 8; + } + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l]; + } + for (int l = 0; l < 8; ++l) sumf += sums[l]; + *s = sumf; +} + +void ggml_vec_dot_iq2_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + uint32_t aux32[2]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + memcpy(aux32, q2, 2*sizeof(uint32_t)); + q2 += 4; + const uint32_t ls = 2*(aux32[1] >> 28) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]); + const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls; + } + sumf += d * bsum; + } + *s = 0.125f * sumf; +} + +void ggml_vec_dot_iq2_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint16_t * GGML_RESTRICT q2 = x[i].qs; + const uint8_t * GGML_RESTRICT sc = x[i].scales; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1; + const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1; + int32_t sumi = 0; + for (int l = 0; l < 2; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); + const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls1; + sumi = 0; + for (int l = 2; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511)); + const uint8_t signs = ksigns_iq2xs[q2[l] >> 9]; + for (int j = 0; j < 8; ++j) { + sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += sumi * ls2; + q2 += 4; + } + sumf += d * bsum; + } + *s = 0.125f * sumf; +} + +void ggml_vec_dot_iq2_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq2_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint8_t * signs = qs + QK_K/8; + + int bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf); + int ls2 = 1 + 2*(x[i].scales[ib32] >> 4); + int sumi1 = 0, sumi2 = 0; + for (int l = 0; l < 2; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); + for (int j = 0; j < 8; ++j) { + sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + for (int l = 2; l < 4; ++l) { + const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300))); + for (int j = 0; j < 8; ++j) { + sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1); + } + q8 += 8; + } + bsum += ls1 * sumi1 + ls2 * sumi2; + qs += 4; + signs += 4; + } + + sumf += d * bsum; + } + + *s = 0.125f * sumf; +} + +void ggml_vec_dot_iq3_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_xxs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + uint32_t aux32; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT q3 = x[i].qs; + const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ++ib32) { + memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t); + const uint32_t ls = 2*(aux32 >> 28) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]); + const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]); + const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127]; + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + q3 += 8; + bsum += sumi * ls; + } + sumf += d * bsum; + } + *s = 0.25f * sumf; +} + +void ggml_vec_dot_iq3_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq3_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + float sumf = 0.f; + for (int i = 0; i < nb; ++i) { + const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d; + const uint8_t * GGML_RESTRICT qs = x[i].qs; + const uint8_t * GGML_RESTRICT qh = x[i].qh; + const uint8_t * GGML_RESTRICT signs = x[i].signs; + const int8_t * GGML_RESTRICT q8 = y[i].qs; + int32_t bsum = 0; + for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) { + const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1; + const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1; + int32_t sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256))); + const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256))); + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + qs += 8; + signs += 4; + bsum += sumi * ls1; + sumi = 0; + for (int l = 0; l < 4; ++l) { + const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256))); + const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256))); + for (int j = 0; j < 4; ++j) { + sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1); + sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1); + } + q8 += 8; + } + qs += 8; + signs += 4; + bsum += sumi * ls2; + } + sumf += d * bsum; + } + *s = sumf; +} + +void ggml_vec_dot_iq1_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq1_s * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint16_t * qh = x[i].qh; + + int sumi = 0, sumi1 = 0; + for (int ib = 0; ib < QK_K/32; ++ib) { + const int ls = 2*((qh[ib] >> 12) & 7) + 1; + const int delta = qh[ib] & 0x8000 ? -1 : 1; + int lsum = 0; + for (int l = 0; l < 4; ++l) { + const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8))); + for (int j = 0; j < 8; ++j) { + lsum += q8[j] * grid[j]; + } + q8 += 8; + } + sumi += ls * lsum; + sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]); + qs += 4; + } + + sumf += GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1); + } + + *s = sumf; +} + +void ggml_vec_dot_iq1_m_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(n % QK_K == 0); + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + + const block_iq1_m * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + iq1m_scale_t scale; + + int sum1[2], sum2[2], delta[4]; + + float sumf = 0; + for (int i = 0; i < nb; i++) { + + const int8_t * q8 = y[i].qs; + const uint8_t * qs = x[i].qs; + const uint8_t * qh = x[i].qh; + const uint16_t * sc = (const uint16_t *)x[i].scales; + + scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000); + + int sumi1 = 0, sumi2 = 0; + for (int ib = 0; ib < QK_K/32; ++ib) { + delta[0] = qh[0] & 0x08 ? -1 : 1; + delta[1] = qh[0] & 0x80 ? -1 : 1; + delta[2] = qh[1] & 0x08 ? -1 : 1; + delta[3] = qh[1] & 0x80 ? -1 : 1; + sum1[0] = sum1[1] = sum2[0] = sum2[1] = 0; + for (int l = 0; l < 4; ++l) { + const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((uint16_t)qh[l/2] << (8 - 4*(l%2))) & 0x700))); + int lsum1 = 0, lsum2 = 0; + for (int j = 0; j < 8; ++j) { + lsum1 += q8[j] * grid[j]; + lsum2 += q8[j]; + } + q8 += 8; + sum1[l/2] += lsum1; + sum2[l/2] += lsum2*delta[l]; + } + + const int ls1 = 2*((sc[ib/2] >> (6*(ib%2)+0)) & 0x7) + 1; + const int ls2 = 2*((sc[ib/2] >> (6*(ib%2)+3)) & 0x7) + 1; + + sumi1 += sum1[0] * ls1 + sum1[1] * ls2; + sumi2 += sum2[0] * ls1 + sum2[1] * ls2; + qs += 4; + qh += 2; + } + + sumf += GGML_CPU_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2); + } + + *s = sumf; +} + +void ggml_vec_dot_iq4_nl_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK4_NL == 0); + static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same"); + + const block_iq4_nl * GGML_RESTRICT x = vx; + const block_q8_0 * GGML_RESTRICT y = vy; + + const int nb = n / QK4_NL; + + int ib = 0; + float sumf = 0; + + for (; ib < nb; ++ib) { + const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < QK4_NL/2; ++j) { + sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf]; + sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4]; + } + sumf += d * (sumi1 + sumi2); + } + *s = sumf; +} + +void ggml_vec_dot_iq4_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) { + assert(nrc == 1); + UNUSED(nrc); + UNUSED(bx); + UNUSED(by); + UNUSED(bs); + assert(n % QK_K == 0); + + const block_iq4_xs * GGML_RESTRICT x = vx; + const block_q8_K * GGML_RESTRICT y = vy; + + const int nb = n / QK_K; + + float sumf = 0; + for (int ibl = 0; ibl < nb; ++ibl) { + const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d; + uint16_t h = x[ibl].scales_h; + const uint8_t * qs = x[ibl].qs; + const int8_t * q8 = y[ibl].qs; + for (int ib = 0; ib < QK_K/32; ib += 2) { + const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30); + const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30); + h >>= 4; + const float d1 = d4d8*(ls1 - 32); + const float d2 = d4d8*(ls2 - 32); + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < 16; ++j) { + sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; + sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; + } + sumf += d1 * (sumi1 + sumi2); + qs += 16; + q8 += 32; + sumi1 = sumi2 = 0; + for (int j = 0; j < 16; ++j) { + sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf]; + sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4]; + } + sumf += d2 * (sumi1 + sumi2); + qs += 16; + q8 += 32; + } + } + *s = sumf; +} + +// ============================ 4-bit non-linear quants + +void quantize_row_iq4_nl(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + assert(k % QK4_NL == 0); + quantize_row_iq4_nl_ref(x, y, k); +} + +void quantize_row_iq4_xs(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) { + assert(k % QK_K == 0); + quantize_iq4_xs(x, y, 1, k, NULL); +} diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-quants.h b/ml/backend/ggml/ggml/src/ggml-cpu/quants.h similarity index 53% rename from ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-quants.h rename to ml/backend/ggml/ggml/src/ggml-cpu/quants.h index 6a25d0626..d83eb1b14 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-quants.h +++ b/ml/backend/ggml/ggml/src/ggml-cpu/quants.h @@ -19,6 +19,8 @@ void quantize_row_q5_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, in void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void quantize_row_mxfp4(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); + void quantize_row_q2_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q3_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); void quantize_row_q4_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); @@ -39,6 +41,8 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const voi void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_mxfp4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); + void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); @@ -58,7 +62,35 @@ void ggml_vec_dot_iq4_nl_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void ggml_vec_dot_iq4_xs_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); void ggml_vec_dot_iq3_s_q8_K (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); -void ggml_vec_dot_mxfp4(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const float * GGML_RESTRICT y, size_t by, int nrc); +// Generic implementation +void quantize_row_q8_0_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void quantize_row_q8_1_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void quantize_row_q8_K_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); +void ggml_vec_dot_q4_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q4_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q5_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q5_1_q8_1_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q8_0_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); + +void ggml_vec_dot_mxfp4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); + +void ggml_vec_dot_tq1_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_tq2_0_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); + +void ggml_vec_dot_q2_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q3_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q4_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q5_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_q6_K_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq2_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq2_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq2_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq3_xxs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq3_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq1_s_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq1_m_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq4_nl_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); +void ggml_vec_dot_iq4_xs_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc); #ifdef __cplusplus } diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/repack.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/repack.cpp new file mode 100644 index 000000000..2583aefae --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/repack.cpp @@ -0,0 +1,1833 @@ +#define GGML_COMMON_IMPL_CPP +#define GGML_COMMON_DECL_CPP +#include "ggml-common.h" +#include "ggml-backend-impl.h" + +#include "ggml-impl.h" +#include "ggml-cpu.h" +#include "ggml-cpu-impl.h" +#include "simd-mappings.h" +#include "traits.h" + +#include "arch-fallback.h" + +#include +#include +#include +#include // for GGML_ASSERT + +#include "repack.h" + +#if defined(__GNUC__) +#pragma GCC diagnostic ignored "-Woverlength-strings" +#endif + +#define UNUSED GGML_UNUSED + +static inline int nearest_int(float fval) { + assert(fabsf(fval) <= 4194303.f); + float val = fval + 12582912.f; + int i; memcpy(&i, &val, sizeof(int)); + return (i & 0x007fffff) - 0x00400000; +} + +// Functions to create the interleaved data layout formats + +// interleave 4 block_q4_0s in blocks of blck_size_interleave +// returns an interleaved block_q4_0x4 +// in the interleaved block_q4_0x4, place deltas for 4 block_q4_0 blocks +// first, then interleave quants from 4 block_q4_0s in blocks of blck_size_interleave +// +// - in : an array of block_q4_0 pointers +// - blck_size_interleave : the block_q4_0 quants bytes are interleaved in blocks of +// blck_size_interleave bytes +// - xor_mask : the mask to convert the nibbles in block_q4_0 quants bytes +// from bias offset form to pure sign form (this saves subtract +// operations durin unpacking) +// + +extern "C" { + +void ggml_quantize_mat_q8_0_4x4_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; + + // scalar + const int blck_size_interleave = 4; + float srcv[4][QK8_0]; + float id[4]; + + for (int i = 0; i < nb; i++) { + for (int row_iter = 0; row_iter < 4; row_iter++) { + float amax = 0.0f; // absolute max + + for (int j = 0; j < QK8_0; j++) { + srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j]; + amax = MAX(amax, fabsf(srcv[row_iter][j])); + } + + const float d = amax / ((1 << 7) - 1); + id[row_iter] = d ? 1.0f / d : 0.0f; + + y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); + } + + for (int j = 0; j < QK8_0 * 4; j++) { + int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; + int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; + src_offset += (j % blck_size_interleave); + + float x0 = srcv[src_id][src_offset] * id[src_id]; + y[i].qs[j] = roundf(x0); + } + } +} + +void ggml_quantize_mat_q8_0_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK8_0 == 32); + assert(k % QK8_0 == 0); + const int nb = k / QK8_0; + + block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; + + // scalar + const int blck_size_interleave = 8; + float srcv[4][QK8_0]; + float id[4]; + + for (int i = 0; i < nb; i++) { + for (int row_iter = 0; row_iter < 4; row_iter++) { + float amax = 0.0f; // absolute max + + for (int j = 0; j < QK8_0; j++) { + srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j]; + amax = MAX(amax, fabsf(srcv[row_iter][j])); + } + + const float d = amax / ((1 << 7) - 1); + id[row_iter] = d ? 1.0f / d : 0.0f; + + y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d); + } + + for (int j = 0; j < QK8_0 * 4; j++) { + int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; + int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; + src_offset += (j % blck_size_interleave); + + float x0 = srcv[src_id][src_offset] * id[src_id]; + y[i].qs[j] = roundf(x0); + } + } +} + +void ggml_quantize_mat_q8_K_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { + assert(QK_K == 256); + assert(k % QK_K == 0); + const int nb = k / QK_K; + + block_q8_Kx4 * GGML_RESTRICT y = (block_q8_Kx4 *) vy; + + // scalar + const int blck_size_interleave = 8; + float srcv[4][QK_K]; + float iscale[4]; + + for (int i = 0; i < nb; i++) { + for (int row_iter = 0; row_iter < 4; row_iter++) { + float amax = 0.0f; // absolute max + float max = 0; + + for (int j = 0; j < QK_K; j++) { + srcv[row_iter][j] = x[row_iter * k + i * QK_K + j]; + // Update the maximum value of the corresponding super block + if(amax < fabsf(srcv[row_iter][j])) { + amax = fabsf(srcv[row_iter][j]); + max = srcv[row_iter][j]; + } + } + + iscale[row_iter] = amax ? -127.f/max : 0; + + y[i].d[row_iter] = amax ? 1/iscale[row_iter] : 0; + } + + for (int j = 0; j < QK_K / 4; j++) { + y[i].bsums[j] = 0; + } + + // Quants values are interleaved in sequence of eight bytes from corresponding super blocks + // Bsums values are interleaved in sequence of four bsums from each super block taken for interleaving + // i.e first four bsums from the first super block, followed by first four bsums from second super block and so on + for (int j = 0; j < QK_K * 4; j++) { + int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave; + int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave; + src_offset += (j % blck_size_interleave); + int index = (((j & 31) >> 3) << 2) + ((j >> 8) << 4) + ((j >> 6) & 3); + + float x0 = srcv[src_id][src_offset] * iscale[src_id]; + y[i].qs[j] = nearest_int(x0); + y[i].bsums[index] += y[i].qs[j]; + } + } +} + +} // extern "C" + +template +void ggml_quantize_mat_t(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row); + +template <> void ggml_quantize_mat_t<4, GGML_TYPE_Q8_0>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { + assert(nrow == 4); + UNUSED(nrow); + ggml_quantize_mat_q8_0_4x4(x, vy, n_per_row); +} + +template <> void ggml_quantize_mat_t<8, GGML_TYPE_Q8_0>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { + assert(nrow == 4); + UNUSED(nrow); + ggml_quantize_mat_q8_0_4x8(x, vy, n_per_row); +} + +template <> void ggml_quantize_mat_t<8, GGML_TYPE_Q8_K>(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row) { + assert(nrow == 4); + UNUSED(nrow); + ggml_quantize_mat_q8_K_4x8(x, vy, n_per_row); +} + +extern "C" { + +void ggml_gemv_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + float sumf[4]; + int sumi; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; + } + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); + } + } + } + for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; + } +} + +void ggml_gemv_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + float sumf[4]; + int sumi; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; + } + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); + } + } + } + for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; + } +} + +void ggml_gemv_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + { + float sumf[8]; + int sumi; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4; + } + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); + } + } + } + for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; + } + } +} + +void ggml_gemv_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK_K; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + float sumf[8]; + float sum_minf[8]; + uint32_t utmp[32]; + int sumi1; + int sumi2; + int sumi; + + const block_q8_K * a_ptr = (const block_q8_K *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_Kx8 * b_ptr = (const block_q4_Kx8 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) { + sumf[j] = 0.0; + sum_minf[j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int sb = 0; sb < 8; sb++) { + memcpy(utmp + sb * 4, b_ptr[l].scales + sb * 12, 12); + utmp[sb * 4 + 3] = ((utmp[sb * 4 + 2] >> 4) & kmask2) | (((utmp[sb * 4 + 1] >> 6) & kmask3) << 4); + const uint32_t uaux_0 = utmp[sb * 4 + 1] & kmask1; + utmp[sb * 4 + 1] = (utmp[sb * 4 + 2] & kmask2) | (((utmp[sb * 4 + 0] >> 6) & kmask3) << 4); + utmp[sb * 4 + 2] = uaux_0; + utmp[sb * 4 + 0] &= kmask1; + } + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + uint8_t *scales_0 = (uint8_t*) utmp + (k / 4) * 32; + uint8_t *scales_1 = (uint8_t*) utmp + (k / 4) * 32 + 16; + for (int j = 0; j < ncols_interleaved; j++) { + sumi1 = 0; + sumi2 = 0; + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4); + sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 64 + (k % 4) * blocklen + i]); + sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 64 + (k % 4) * blocklen + i + 32]); + sumi1 = sumi1 * scales_0[j]; + sumi2 = sumi2 * scales_1[j]; + sumi += sumi1 + sumi2; + } + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d; + } + } + for (int sb = 0; sb < 8; sb++) { + uint8_t *mins = (uint8_t*) utmp + 8 + sb * 16; + for (int j = 0; j < ncols_interleaved; j++) { + sum_minf[j] += mins[j] * (a_ptr[l].bsums[sb * 2] + a_ptr[l].bsums[sb * 2 + 1]) * GGML_CPU_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d; + } + } + } + for (int j = 0; j < ncols_interleaved; j++) { + s[x * ncols_interleaved + j] = sumf[j] - sum_minf[j]; + } + } +} + +void ggml_gemv_q2_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK_K; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + float sumf[8]; + float sum_minf[8]; + int sumi1,sumi2,sumi3,sumi4; + int sumi; + + const block_q8_K * a_ptr = (const block_q8_K *)vy; + for(int x = 0; x < nc / ncols_interleaved; x++) { + const block_q2_Kx8 * b_ptr = (const block_q2_Kx8 *) vx + (x * nb); + for (int j = 0; j < ncols_interleaved; j++) { + sumf[j] = 0.0; + sum_minf[j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (4 * blocklen)); k++) { + const uint8_t *scales_0 = b_ptr[l].scales + (k / 4) * 64 ; + const uint8_t *scales_1 = b_ptr[l].scales + (k / 4) * 64 + 16; + const uint8_t *scales_2 = b_ptr[l].scales + (k / 4) * 64 + 32; + const uint8_t *scales_3 = b_ptr[l].scales + (k / 4) * 64 + 48; + for (int j = 0; j < ncols_interleaved; j++) { + sumi1 = 0; + sumi2 = 0; + sumi3 = 0; + sumi4 = 0; + sumi = 0; + int offset = ((k / 2) % 2) + j * 2; + for (int i = 0; i < blocklen; ++i){ + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 3); + const int v1 = (int8_t) ((b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 2 ) & 3); + const int v2 = (int8_t) ((b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4 ) & 3); + const int v3 = (int8_t) ((b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 6 ) & 3); + sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 128 + (k % 4) * blocklen + i]); + sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 128 + (k % 4) * blocklen + i + 32]); + sumi3 = (v2 * a_ptr[l].qs[(k >> 2) * 128 + (k % 4) * blocklen + i + 64]); + sumi4 = (v3 * a_ptr[l].qs[(k >> 2) * 128 + (k % 4) * blocklen + i + 96]); + + sumi1 = sumi1 * (scales_0[offset] & 0xF); + sumi2 = sumi2 * (scales_1[offset] & 0xF); + sumi3 = sumi3 * (scales_2[offset] & 0xF); + sumi4 = sumi4 * (scales_3[offset] & 0xF); + sumi += sumi1 + sumi2 + sumi3 + sumi4; + } + sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d; + } + } + for(int sb = 0; sb < 8; sb++) { + const uint8_t *mins = b_ptr[l].scales + sb * 16; + for(int j = 0; j < ncols_interleaved; j++){ + sum_minf[j] += ((mins[j * 2] >> 4) * a_ptr[l].bsums[sb * 2] + (mins[(j * 2)+ 1] >> 4) * a_ptr[l].bsums[sb * 2 + 1]) * GGML_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d; + } + } + } + for (int j = 0; j < ncols_interleaved; j++) { + s[x * ncols_interleaved + j] = sumf[j] - sum_minf[j]; + } + } +} + +void ggml_gemv_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + { + float sumf[4]; + int sumi; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; + const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; + sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])); + } + sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d); + } + } + } + for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; + } + } +} + +void ggml_gemm_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + { + float sumf[4][4]; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; + } + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; + } + } + } + } +} + +void ggml_gemm_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + float sumf[4][4]; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; + } + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; + } + } + } +} + +void ggml_gemm_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + float sumf[4][8]; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0); + sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4; + } + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; + } + } + } +} + +void ggml_gemm_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK_K; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + static const uint32_t kmask1 = 0x3f3f3f3f; + static const uint32_t kmask2 = 0x0f0f0f0f; + static const uint32_t kmask3 = 0x03030303; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + float sumf[4][8]; + float sum_minf[4][8]; + uint32_t utmp[32]; + int sumi1; + int sumi2; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_Kx4 * a_ptr = (const block_q8_Kx4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q4_Kx8 * b_ptr = (const block_q4_Kx8 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumf[m][j] = 0.0; + sum_minf[m][j] = 0.0; + } + } + for (int l = 0; l < nb; l++) { + for (int sb = 0; sb < 8; sb++) { + memcpy(utmp + sb * 4, b_ptr[l].scales + sb * 12, 12); + utmp[sb * 4 + 3] = ((utmp[sb * 4 + 2] >> 4) & kmask2) | (((utmp[sb * 4 + 1] >> 6) & kmask3) << 4); + const uint32_t uaux_0 = utmp[sb * 4 + 1] & kmask1; + utmp[sb * 4 + 1] = (utmp[sb * 4 + 2] & kmask2) | (((utmp[sb * 4 + 0] >> 6) & kmask3) << 4); + utmp[sb * 4 + 2] = uaux_0; + utmp[sb * 4 + 0] &= kmask1; + } + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + uint8_t *scales_0 = (uint8_t*) utmp + (k / 4) * 32; + uint8_t *scales_1 = (uint8_t*) utmp + (k / 4) * 32 + 16; + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi1 = 0; + sumi2 = 0; + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF); + const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4); + sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 256 + (k % 4) * 4 * blocklen + m * blocklen + i]); + sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 256 + (k % 4) * 4 * blocklen + m * blocklen + i + 128]); + sumi1 = sumi1 * scales_0[j]; + sumi2 = sumi2 * scales_1[j]; + sumi += sumi1 + sumi2; + } + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d[m]; + } + } + } + for (int sb = 0; sb < 8; sb++) { + uint8_t *mins = (uint8_t*) utmp + 8 + sb * 16; + for(int m = 0; m < 4; m++) { + const int16_t *bsums = a_ptr[l].bsums + (sb * 8) + (m * 4) - ((sb % 2) * 6); + for(int j = 0; j < ncols_interleaved; j++) { + sum_minf[m][j] += mins[j] * (bsums[0] + bsums[1]) * GGML_CPU_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d[m]; + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j] - sum_minf[m][j]; + } + } + } + } +} + +void ggml_gemm_q2_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK_K; + const int nb = n / qk; + const int ncols_interleaved = 8; + const int blocklen = 8; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + float sumf[4][8]; + float sum_minf[4][8]; + int sumi1, sumi2, sumi3, sumi4; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_Kx4 * a_ptr = (const block_q8_Kx4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_q2_Kx8 * b_ptr = (const block_q2_Kx8 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumf[m][j] = 0.0; + sum_minf[m][j] = 0.0; + } + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (4 * blocklen)); k++) { + + const uint8_t *scales_0 = b_ptr[l].scales + (k / 4) * 64 ; + const uint8_t *scales_1 = b_ptr[l].scales + (k / 4) * 64 + 16; + const uint8_t *scales_2 = b_ptr[l].scales + (k / 4) * 64 + 32; + const uint8_t *scales_3 = b_ptr[l].scales + (k / 4) * 64 + 48; + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi1 = 0; + sumi2 = 0; + sumi3 = 0; + sumi4 = 0; + sumi = 0; + int offset = ((k / 2) % 2) + j * 2; + for (int i = 0; i < blocklen; ++i){ + const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 3); + const int v1 = (int8_t) ((b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 2 ) & 3); + const int v2 = (int8_t) ((b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4 ) & 3); + const int v3 = (int8_t) ((b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 6 ) & 3); + sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 512 + (k % 4) * 4 * blocklen + m * blocklen + i]); + sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 512 + (k % 4) * 4 * blocklen + m * blocklen + i + 128]); + sumi3 = (v2 * a_ptr[l].qs[(k >> 2) * 512 + (k % 4) * 4 * blocklen + m * blocklen + i + 256]); + sumi4 = (v3 * a_ptr[l].qs[(k >> 2) * 512 + (k % 4) * 4 * blocklen + m * blocklen + i + 384]); + sumi1 = sumi1 * (scales_0[offset] & 0xF); + sumi2 = sumi2 * (scales_1[offset] & 0xF); + sumi3 = sumi3 * (scales_2[offset] & 0xF); + sumi4 = sumi4 * (scales_3[offset] & 0xF); + sumi += sumi1 + sumi2 + sumi3 + sumi4; + } + sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d[m]; + } + } + } + for(int sb = 0; sb < 8; sb++) { + const uint8_t *mins = b_ptr[l].scales + sb * 16; + for(int m = 0; m < 4; m++) { + const int16_t *bsums = a_ptr[l].bsums + (sb * 8) + (m * 4) - ((sb % 2) * 6); + for(int j = 0; j < ncols_interleaved; j++) { + int mins_prod = ((mins[j * 2] >> 4) * bsums[0] + (mins[(j * 2)+ 1] >> 4) * bsums[1]); + sum_minf[m][j] += (mins_prod) * GGML_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d[m]; + } + } + } + } + + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j] - sum_minf[m][j]; + } + } + } + } +} + + +void ggml_gemm_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + + { + float sumf[4][4]; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; + const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; + sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])); + } + sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]); + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; + } + } + } + } +} + +} // extern "C" + +static block_q4_0x4 make_block_q4_0x4(block_q4_0 * in, unsigned int blck_size_interleave) { + block_q4_0x4 out; + + for (int i = 0; i < 4; i++) { + out.d[i] = in[i].d; + } + + const int end = QK4_0 * 2 / blck_size_interleave; + + if (blck_size_interleave == 8) { + const uint64_t xor_mask = 0x8888888888888888ULL; + for (int i = 0; i < end; ++i) { + int src_id = i % 4; + int src_offset = (i / 4) * blck_size_interleave; + int dst_offset = i * blck_size_interleave; + + uint64_t elems; + // Using memcpy to avoid unaligned memory accesses + memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); + elems ^= xor_mask; + memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); + } + } else if (blck_size_interleave == 4) { + const uint32_t xor_mask = 0x88888888; + for (int i = 0; i < end; ++i) { + int src_id = i % 4; + int src_offset = (i / 4) * blck_size_interleave; + int dst_offset = i * blck_size_interleave; + + uint32_t elems; + memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint32_t)); + elems ^= xor_mask; + memcpy(&out.qs[dst_offset], &elems, sizeof(uint32_t)); + } + } else { + GGML_ASSERT(false); + } + + return out; +} + +// interleave 8 block_q4_0s in blocks of blck_size_interleave +// returns an interleaved block_q4_0x8 +// in the interleaved block_q4_0x8, place deltas for 8 block_q4_0 blocks +// first, then interleave quants from 8 block_q4_0s in blocks of blck_size_interleave +static block_q4_0x8 make_block_q4_0x8(block_q4_0 * in, unsigned int blck_size_interleave) { + block_q4_0x8 out; + + for (int i = 0; i < 8; i++) { + out.d[i] = in[i].d; + } + + const int end = QK4_0 * 4 / blck_size_interleave; + const uint64_t xor_mask = 0x8888888888888888ULL; + + for (int i = 0; i < end; ++i) { + int src_id = i % 8; + int src_offset = (i / 8) * blck_size_interleave; + int dst_offset = i * blck_size_interleave; + + uint64_t elems; + memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); + elems ^= xor_mask; + memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); + } + + return out; +} + +static block_q4_Kx8 make_block_q4_Kx8(block_q4_K * in, unsigned int blck_size_interleave) { + block_q4_Kx8 out; + //Delta(scale) and dmin values of the eight Q4_K structures are copied onto the output interleaved structure + for (int i = 0; i < 8; i++) { + out.d[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d; + } + + for (int i = 0; i < 8; i++) { + out.dmin[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.dmin; + } + + const int end = QK_K * 4 / blck_size_interleave; + + // Interleave Q4_K quants by taking 8 bytes at a time + for (int i = 0; i < end; ++i) { + int src_id = i % 8; + int src_offset = (i / 8) * blck_size_interleave; + int dst_offset = i * blck_size_interleave; + + uint64_t elems; + memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); + memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); + } + + // The below logic is designed so as to unpack and rearrange scales and mins values in Q4_K + // Currently the Q4_K structure has 8 scales and 8 mins packed in 12 bytes ( 6 bits for each value) + // The output Q4_Kx8 structure has 96 bytes + // Every 12 byte is packed such that it contains scales and mins for corresponding sub blocks from Q4_K structure + // For eg - First 12 bytes contains 8 scales and 8 mins - each of first sub block from different Q4_K structures + uint8_t s[8], m[8]; + + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 8; j++) { + s[j] = in[j].scales[i] & 63; + m[j] = in[j].scales[i + 4] & 63; + } + + out.scales[i * 12] = (s[0] & 63) + ((s[4] & 48) << 2); + out.scales[i * 12 + 1] = (s[1] & 63) + ((s[5] & 48) << 2); + out.scales[i * 12 + 2] = (s[2] & 63) + ((s[6] & 48) << 2); + out.scales[i * 12 + 3] = (s[3] & 63) + ((s[7] & 48) << 2); + out.scales[i * 12 + 4] = (m[0] & 63) + ((m[4] & 48) << 2); + out.scales[i * 12 + 5] = (m[1] & 63) + ((m[5] & 48) << 2); + out.scales[i * 12 + 6] = (m[2] & 63) + ((m[6] & 48) << 2); + out.scales[i * 12 + 7] = (m[3] & 63) + ((m[7] & 48) << 2); + out.scales[i * 12 + 8] = (s[4] & 15) + ((m[4] & 15) << 4); + out.scales[i * 12 + 9] = (s[5] & 15) + ((m[5] & 15) << 4); + out.scales[i * 12 + 10] = (s[6] & 15) + ((m[6] & 15) << 4); + out.scales[i * 12 + 11] = (s[7] & 15) + ((m[7] & 15) << 4); + + } + + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 8; j++) { + s[j] = ((in[j].scales[i] & 192) >> 2) | (in[j].scales[i+8] & 15); + m[j] = ((in[j].scales[i + 4] & 192) >> 2) | ((in[j].scales[i+8] & 240) >> 4); + } + + out.scales[i * 12 + 48] = (s[0] & 63) + ((s[4] & 48) << 2); + out.scales[i * 12 + 49] = (s[1] & 63) + ((s[5] & 48) << 2); + out.scales[i * 12 + 50] = (s[2] & 63) + ((s[6] & 48) << 2); + out.scales[i * 12 + 51] = (s[3] & 63) + ((s[7] & 48) << 2); + out.scales[i * 12 + 52] = (m[0] & 63) + ((m[4] & 48) << 2); + out.scales[i * 12 + 53] = (m[1] & 63) + ((m[5] & 48) << 2); + out.scales[i * 12 + 54] = (m[2] & 63) + ((m[6] & 48) << 2); + out.scales[i * 12 + 55] = (m[3] & 63) + ((m[7] & 48) << 2); + out.scales[i * 12 + 56] = (s[4] & 15) + ((m[4] & 15) << 4); + out.scales[i * 12 + 57] = (s[5] & 15) + ((m[5] & 15) << 4); + out.scales[i * 12 + 58] = (s[6] & 15) + ((m[6] & 15) << 4); + out.scales[i * 12 + 59] = (s[7] & 15) + ((m[7] & 15) << 4); + + } + + return out; +} + +static block_q2_Kx8 make_block_q2_Kx8(block_q2_K * in, unsigned int blck_size_interleave) { + block_q2_Kx8 out; + + // Delta(scale) and dmin values of the eight Q2_K structures are copied onto the output interleaved structure + for (int i = 0; i < 8; i++) { + out.d[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.d; + } + + for (int i = 0; i < 8; i++) { + out.dmin[i] = in[i].GGML_COMMON_AGGR_U.GGML_COMMON_AGGR_S.dmin; + } + + const int end = QK_K * 2 / blck_size_interleave; + + // Interleave Q2_K quants by taking 8 bytes at a time + for (int i = 0; i < end; ++i) { + int src_id = i % 8; + int src_offset = (i / 8) * blck_size_interleave; + int dst_offset = i * blck_size_interleave; + + uint64_t elems; + memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); + memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); + } + + // The below logic is designed so as to unpack and rearrange scales and mins values in Q2_K + // Currently the Q2_K structure has 16 scales and 16 mins packed in 16 bytes ( 4 bits for each value) + // The output Q2_Kx8 structure has 128 bytes for storing scales and mins + // Every 16 byte is packed such that it contains scales and mins for corresponding sub blocks from Q2_K structure + // For eg - First 16 bytes contains 16 scales and 16 mins - each of first and second sub blocks from different Q2_K structures + + for(int i = 0; i < 128; i++){ + + // Index for selecting which q2k super block + int src1 = (i % 16) / 2; + // Index for selecting scale + int src2 = ((i / 16) * 2) + (i % 2); + + out.scales[i] = in[src1].scales[src2]; + } + return out; + +} + +static int repack_q4_0_to_q4_0_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { + GGML_ASSERT(t->type == GGML_TYPE_Q4_0); + GGML_ASSERT(interleave_block == 4 || interleave_block == 8); + constexpr int nrows_interleaved = 4; + + block_q4_0x4 * dst = (block_q4_0x4 *)t->data; + const block_q4_0 * src = (const block_q4_0 *)data; + block_q4_0 dst_tmp[4]; + int nrow = ggml_nrows(t); + int nblocks = t->ne[0] / QK4_0; + + GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0)); + + if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { + return -1; + } + + for (int b = 0; b < nrow; b += nrows_interleaved) { + for (int64_t x = 0; x < nblocks; x++) { + for (int i = 0; i < nrows_interleaved; i++) { + dst_tmp[i] = src[x + i * nblocks]; + } + *dst++ = make_block_q4_0x4(dst_tmp, interleave_block); + } + src += nrows_interleaved * nblocks; + } + return 0; + + GGML_UNUSED(data_size); +} +static int repack_q4_K_to_q4_K_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { + GGML_ASSERT(t->type == GGML_TYPE_Q4_K); + GGML_ASSERT(interleave_block == 8); + constexpr int nrows_interleaved = 8; + + block_q4_Kx8 * dst = (block_q4_Kx8*)t->data; + const block_q4_K * src = (const block_q4_K*) data; + block_q4_K dst_tmp[8]; + int nrow = ggml_nrows(t); + int nblocks = t->ne[0] / QK_K; + + GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_K)); + + if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { + return -1; + } + + for (int b = 0; b < nrow; b += nrows_interleaved) { + for (int64_t x = 0; x < nblocks; x++) { + for (int i = 0; i < nrows_interleaved; i++ ) { + dst_tmp[i] = src[x + i * nblocks]; + } + *dst++ = make_block_q4_Kx8(dst_tmp, interleave_block); + } + src += nrows_interleaved * nblocks; + } + return 0; + + GGML_UNUSED(data_size); +} + +static int repack_q2_K_to_q2_K_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { + GGML_ASSERT(t->type == GGML_TYPE_Q2_K); + GGML_ASSERT(interleave_block == 8); + constexpr int nrows_interleaved = 8; + + block_q2_Kx8 * dst = (block_q2_Kx8*)t->data; + const block_q2_K * src = (const block_q2_K*) data; + block_q2_K dst_tmp[8]; + int nrow = ggml_nrows(t); + int nblocks = t->ne[0] / QK_K; + + GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q2_K)); + + if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { + return -1; + } + + for (int b = 0; b < nrow; b += nrows_interleaved) { + for (int64_t x = 0; x < nblocks; x++) { + for (int i = 0; i < nrows_interleaved; i++ ) { + dst_tmp[i] = src[x + i * nblocks]; + } + *dst++ = make_block_q2_Kx8(dst_tmp, interleave_block); + } + src += nrows_interleaved * nblocks; + } + return 0; + + GGML_UNUSED(data_size); +} + +static int repack_q4_0_to_q4_0_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { + GGML_ASSERT(t->type == GGML_TYPE_Q4_0); + GGML_ASSERT(interleave_block == 8); + constexpr int nrows_interleaved = 8; + + block_q4_0x8 * dst = (block_q4_0x8*)t->data; + const block_q4_0 * src = (const block_q4_0*) data; + block_q4_0 dst_tmp[8]; + int nrow = ggml_nrows(t); + int nblocks = t->ne[0] / QK4_0; + + GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0)); + + if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { + return -1; + } + + for (int b = 0; b < nrow; b += nrows_interleaved) { + for (int64_t x = 0; x < nblocks; x++) { + for (int i = 0; i < nrows_interleaved; i++ ) { + dst_tmp[i] = src[x + i * nblocks]; + } + *dst++ = make_block_q4_0x8(dst_tmp, interleave_block); + } + src += nrows_interleaved * nblocks; + } + return 0; + + GGML_UNUSED(data_size); +} + +static block_iq4_nlx4 make_block_iq4_nlx4(block_iq4_nl * in, unsigned int blck_size_interleave) { + block_iq4_nlx4 out; + + for (int i = 0; i < 4; i++) { + out.d[i] = in[i].d; + } + + const int end = QK4_NL * 2 / blck_size_interleave; + + // TODO: this branch seems wrong + //if (blck_size_interleave == 8) { + // for (int i = 0; i < end; ++i) { + // int src_id = i % 4; + // int src_offset = (i / 4) * blck_size_interleave; + // int dst_offset = i * blck_size_interleave; + + // // Using memcpy to avoid unaligned memory accesses + // memcpy(&out.qs[dst_offset], &in[src_id].qs[src_offset], sizeof(uint64_t)); + // } + //} else + if (blck_size_interleave == 4) { + for (int i = 0; i < end; ++i) { + int src_id = i % 4; + int src_offset = (i / 4) * blck_size_interleave; + int dst_offset = i * blck_size_interleave; + + memcpy(&out.qs[dst_offset], &in[src_id].qs[src_offset], sizeof(uint32_t)); + } + } else { + GGML_ASSERT(false); + } + + return out; +} + +static int repack_iq4_nl_to_iq4_nl_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { + GGML_ASSERT(t->type == GGML_TYPE_IQ4_NL); + //GGML_ASSERT(interleave_block == 4 || interleave_block == 8); + GGML_ASSERT(interleave_block == 4); + + block_iq4_nlx4 * dst = (block_iq4_nlx4 *)t->data; + const block_iq4_nl * src = (const block_iq4_nl *)data; + block_iq4_nl dst_tmp[4]; + int nrow = ggml_nrows(t); + int nrows_interleaved = 4; + int nblocks = t->ne[0] / QK4_0; + + GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_iq4_nl)); + + if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { + return -1; + } + + for (int b = 0; b < nrow; b += nrows_interleaved) { + for (int64_t x = 0; x < nblocks; x++) { + for (int i = 0; i < nrows_interleaved; i++) { + dst_tmp[i] = src[x + i * nblocks]; + } + *dst++ = make_block_iq4_nlx4(dst_tmp, interleave_block); + } + src += nrows_interleaved * nblocks; + } + return 0; + + GGML_UNUSED(data_size); +} + +namespace ggml::cpu::repack { +// repack +template +int repack(struct ggml_tensor *, const void *, size_t); + +// TODO: generalise. +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_q4_0_to_q4_0_4_bl(t, 4, data, data_size); +} + +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_q4_0_to_q4_0_4_bl(t, 8, data, data_size); +} + +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_q4_0_to_q4_0_8_bl(t, 8, data, data_size); +} + +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_q4_K_to_q4_K_8_bl(t, 8, data, data_size); +} + +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_q2_K_to_q2_K_8_bl(t, 8, data, data_size); +} + +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_iq4_nl_to_iq4_nl_4_bl(t, 4, data, data_size); +} + +// TODO: needs to be revisited +//template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { +// return repack_iq4_nl_to_iq4_nl_4_bl(t, 8, data, data_size); +//} + +// gemv +template +void gemv(int, float *, size_t, const void *, const void *, int, int); + +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_q4_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_q4_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_q4_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_q2_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); +} + +// gemm +template +void gemm(int, float *, size_t, const void *, const void *, int, int); + +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_q4_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_q4_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_q4_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_q2_K_8x8_q8_K(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); +} + +class tensor_traits_base : public ggml::cpu::tensor_traits { + public: + virtual int repack(struct ggml_tensor * t, const void * data, size_t data_size) = 0; +}; + +template class tensor_traits : public tensor_traits_base { + + bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override { + // not realy a GGML_TYPE_Q8_0 but same size. + switch (op->op) { + case GGML_OP_MUL_MAT: + { + size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); + return true; + } + case GGML_OP_MUL_MAT_ID: + { + size = ggml_row_size(PARAM_TYPE, ggml_nelements(op->src[1])); + size = GGML_PAD(size, sizeof(int64_t)); // + padding for next bloc. + + const int64_t ne02 = op->src[0]->ne[2]; // n_as, n_expert + const int64_t ne12 = op->src[1]->ne[2]; // n_tokens + + const size_t sizeof_mmid_row_mapping = sizeof(int64_t); + + size += sizeof_mmid_row_mapping*ne02*(ne12 + 1); + + return true; + } + default: + // GGML_ABORT("fatal error"); + break; + } + return false; + } + + bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override { + switch (op->op) { + case GGML_OP_MUL_MAT: + forward_mul_mat(params, op); + return true; + case GGML_OP_MUL_MAT_ID: + forward_mul_mat_id(params, op); + return true; + default: + // GGML_ABORT("fatal error"); + break; + } + return false; + } + + void forward_mul_mat(ggml_compute_params * params, ggml_tensor * op) { + const ggml_tensor * src0 = op->src[0]; + const ggml_tensor * src1 = op->src[1]; + ggml_tensor * dst = op; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + GGML_ASSERT(ne0 == ne01); + GGML_ASSERT(ne1 == ne11); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + GGML_ASSERT(ggml_n_dims(op->src[0]) == 2); + // GGML_ASSERT(ggml_n_dims(op->src[1]) == 2); + + char * wdata = static_cast(params->wdata); + const size_t nbw1 = ggml_row_size(PARAM_TYPE, ne10); + + assert(params->wsize >= nbw1 * ne11); + + const ggml_from_float_t from_float = ggml_get_type_traits_cpu(PARAM_TYPE)->from_float; + + int64_t i11_processed = 0; + for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { + ggml_quantize_mat_t((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), 4, ne10); + } + + i11_processed = ne11 - ne11 % 4; + for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) { + from_float((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), ne10); + } + + ggml_barrier(params->threadpool); + + const void * src1_wdata = params->wdata; + const size_t src1_col_stride = ggml_row_size(PARAM_TYPE, ne10); + int64_t src0_start = (ith * ne01) / nth; + int64_t src0_end = ((ith + 1) * ne01) / nth; + src0_start = (src0_start % NB_COLS) ? src0_start + NB_COLS - (src0_start % NB_COLS) : src0_start; + src0_end = (src0_end % NB_COLS) ? src0_end + NB_COLS - (src0_end % NB_COLS) : src0_end; + if (src0_start >= src0_end) { + return; + } + + // If there are more than three rows in src1, use gemm; otherwise, use gemv. + if (ne11 > 3) { + gemm(ne00, + (float *) ((char *) dst->data) + src0_start, ne01, + (const char *) src0->data + src0_start * nb01, + (const char *) src1_wdata, ne11 - ne11 % 4, src0_end - src0_start); + } + for (int iter = ne11 - ne11 % 4; iter < ne11; iter++) { + gemv(ne00, + (float *) ((char *) dst->data + (iter * nb1)) + src0_start, ne01, + (const char *) src0->data + src0_start * nb01, + (const char *) src1_wdata + (src1_col_stride * iter), 1, + src0_end - src0_start); + } + } + + void forward_mul_mat_id(ggml_compute_params * params, ggml_tensor * op) { + const ggml_tensor * src0 = op->src[0]; + const ggml_tensor * src1 = op->src[1]; + const ggml_tensor * ids = op->src[2]; + ggml_tensor * dst = op; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const ggml_from_float_t from_float = ggml_get_type_traits_cpu(PARAM_TYPE)->from_float; + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == ggml_type_size(src0->type)); + GGML_ASSERT(nb10 == ggml_type_size(src1->type)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ne03 == 1); + GGML_ASSERT(ne13 == 1); + GGML_ASSERT(ne3 == 1); + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + // row groups + const int n_ids = ids->ne[0]; // n_expert_used + const int n_as = ne02; // n_expert + + const size_t nbw1 = ggml_row_size(PARAM_TYPE, ne10); + const size_t nbw2 = nbw1*ne11; + const size_t nbw3 = nbw2*ne12; + + struct mmid_row_mapping { + int32_t i1; + int32_t i2; + }; + + GGML_ASSERT(params->wsize >= + (GGML_PAD(nbw3, sizeof(int64_t)) + + n_as*(ne12 + 1)*sizeof(mmid_row_mapping)) + ); + + auto * wdata = (char *)params->wdata; + auto * wdata_src1_end = (char *)wdata + GGML_PAD(nbw3, sizeof(int64_t)); + + // total of [n_as][ne12 + 1] elemets of type mmid_row_mapping (2*int32_t = int64_t) + auto * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as] + struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *) (matrix_row_counts + n_as); // [n_as][ne12] + + // src1: float32 => param type + for (int64_t i12 = 0; i12 < ne12; ++i12) { + for (int64_t i11 = ith; i11 < ne11; i11 += nth) { + from_float((float *)((char *) src1->data + i12 * nb12 + i11 * nb11), + (void *) (wdata + i12 * nbw2 + i11 * nbw1), + ne10); + } + } + +#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id) * ne12 + (i1)] + + if (ith == 0) { + // initialize matrix_row_counts + memset(matrix_row_counts, 0, n_as * sizeof(int64_t)); + + // group rows by src0 matrix + for (int32_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) { + for (int32_t id = 0; id < n_ids; ++id) { + const int32_t i02 = + *(const int32_t *) ((const char *) ids->data + iid1 * ids->nb[1] + id * ids->nb[0]); + + GGML_ASSERT(i02 >= 0 && i02 < n_as); + + MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = { id, iid1 }; + matrix_row_counts[i02] += 1; + } + } + } + + ggml_barrier(params->threadpool); + + // compute each matrix multiplication in sequence + for (int cur_a = 0; cur_a < n_as; ++cur_a) { + const int64_t cne1 = matrix_row_counts[cur_a]; + + if (cne1 == 0) { + continue; + } + + const auto * src0_cur = (const char *) src0->data + cur_a*nb02; + + //const int64_t nr0 = ne01; // src0 rows + const int64_t nr1 = cne1; // src1 rows + + int64_t src0_cur_start = (ith * ne01) / nth; + int64_t src0_cur_end = ((ith + 1) * ne01) / nth; + + src0_cur_start = (src0_cur_start % NB_COLS) ? src0_cur_start + NB_COLS - (src0_cur_start % NB_COLS) : src0_cur_start; + src0_cur_end = (src0_cur_end % NB_COLS) ? src0_cur_end + NB_COLS - (src0_cur_end % NB_COLS) : src0_cur_end; + + if (src0_cur_start >= src0_cur_end) { + return; + } + + for (int ir1 = 0; ir1 < nr1; ir1++) { + struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, ir1); + + const int id = row_mapping.i1; // selected expert index + + const int64_t i11 = id % ne11; + const int64_t i12 = row_mapping.i2; // row index in src1 + + const int64_t i1 = id; // selected expert index + const int64_t i2 = i12; // row + + const auto * src1_col = (const char *) wdata + (i11 * nbw1 + i12 * nbw2); + + gemv(ne00, + (float *)((char *) dst->data + (i1 * nb1 + i2 * nb2)) + src0_cur_start, ne01, + src0_cur + src0_cur_start * nb01, + src1_col, 1, src0_cur_end - src0_cur_start); + } + } +#undef MMID_MATRIX_ROW + } + + int repack(struct ggml_tensor * t, const void * data, size_t data_size) override { + GGML_LOG_DEBUG("%s: repack tensor %s with %s_%dx%d\n", __func__, t->name, ggml_type_name(t->type), + (int) NB_COLS, (int) INTER_SIZE); + return ggml::cpu::repack::repack(t, data, data_size); + } +}; + +} // namespace ggml::cpu::repack + +static const ggml::cpu::tensor_traits * ggml_repack_get_optimal_repack_type(const struct ggml_tensor * cur) { + + // instance for Q4 + static const ggml::cpu::repack::tensor_traits q4_0_4x4_q8_0; + static const ggml::cpu::repack::tensor_traits q4_0_4x8_q8_0; + static const ggml::cpu::repack::tensor_traits q4_0_8x8_q8_0; + static const ggml::cpu::repack::tensor_traits q4_K_8x8_q8_K; + + // instance for Q2 + static const ggml::cpu::repack::tensor_traits q2_K_8x8_q8_K; + + // instance for IQ4 + static const ggml::cpu::repack::tensor_traits iq4_nl_4x4_q8_0; + + if (cur->type == GGML_TYPE_Q4_0) { + if (ggml_cpu_has_avx2() || (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0)) { + if (cur->ne[1] % 8 == 0) { + return &q4_0_8x8_q8_0; + } + } + if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) { + if (cur->ne[1] % 4 == 0) { + return &q4_0_4x8_q8_0; + } + } + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { + if (cur->ne[1] % 4 == 0) { + return &q4_0_4x4_q8_0; + } + } + } else if (cur->type == GGML_TYPE_Q4_K) { + if (ggml_cpu_has_avx2()) { + if (cur->ne[1] % 8 == 0) { + return &q4_K_8x8_q8_K; + } + } + } else if (cur->type == GGML_TYPE_Q2_K) { + if (ggml_cpu_has_avx512()) { + if (cur->ne[1] % 8 == 0) { + return &q2_K_8x8_q8_K; + } + } + } else if (cur->type == GGML_TYPE_IQ4_NL) { + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { + if (cur->ne[1] % 4 == 0) { + return &iq4_nl_4x4_q8_0; + } + } + } + + return nullptr; +} + +static enum ggml_status ggml_backend_cpu_repack_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { + tensor->extra = (void *) const_cast(ggml_repack_get_optimal_repack_type(tensor)); + + GGML_UNUSED(buffer); + return GGML_STATUS_SUCCESS; +} + +static void ggml_backend_cpu_repack_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, + const void * data, size_t offset, size_t size) { + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(tensor)); + + auto tensor_traits = (ggml::cpu::repack::tensor_traits_base *) tensor->extra; + auto OK = tensor_traits->repack(tensor, data, size); + + GGML_ASSERT(OK == 0); + GGML_UNUSED(buffer); +} + +static const char * ggml_backend_cpu_repack_buffer_type_get_name(ggml_backend_buffer_type_t buft) { + return "CPU_REPACK"; + + GGML_UNUSED(buft); +} + +static ggml_backend_buffer_t ggml_backend_cpu_repack_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { + ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); + + if (buffer == nullptr) { + return nullptr; + } + + buffer->buft = buft; + buffer->iface.init_tensor = ggml_backend_cpu_repack_buffer_init_tensor; + buffer->iface.set_tensor = ggml_backend_cpu_repack_buffer_set_tensor; + buffer->iface.get_tensor = nullptr; + buffer->iface.cpy_tensor = nullptr; + return buffer; +} + +static size_t ggml_backend_cpu_repack_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { + return TENSOR_ALIGNMENT; + + GGML_UNUSED(buft); +} + +namespace ggml::cpu::repack { +class extra_buffer_type : ggml::cpu::extra_buffer_type { + bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { + if ( op->op == GGML_OP_MUL_MAT && + op->src[0]->buffer && + (ggml_n_dims(op->src[0]) == 2) && + op->src[0]->buffer->buft == ggml_backend_cpu_repack_buffer_type() && + ggml_repack_get_optimal_repack_type(op->src[0]) + ) { + if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { + return false; + } + if (op->src[1]->type == GGML_TYPE_F32) { + return true; + } + //if (op->src[1]->type == GGML_TYPE_Q8_0) { + // return true; + //} + // may be possible if Q8_0 packed... + } else if (op->op == GGML_OP_MUL_MAT_ID + && op->src[0]->buffer + && (ggml_n_dims(op->src[0]) == 3) + && op->src[0]->buffer->buft == ggml_backend_cpu_repack_buffer_type() + && ggml_repack_get_optimal_repack_type(op->src[0]) + ) { + if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { + return false; + } + if (op->src[1]->type == GGML_TYPE_F32) { + return true; + } + //if (op->src[1]->type == GGML_TYPE_Q8_0) { + // return true; + //} + } + return false; + } + + ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override { + if (op->op == GGML_OP_MUL_MAT || op->op == GGML_OP_MUL_MAT_ID) { + if (op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_cpu_repack_buffer_type()) { + return (ggml::cpu::tensor_traits *) op->src[0]->extra; + } + } + return nullptr; + } +}; +} // namespace ggml::cpu::repack + +ggml_backend_buffer_type_t ggml_backend_cpu_repack_buffer_type(void) { + static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_repack = { + /* .iface = */ { + /* .get_name = */ ggml_backend_cpu_repack_buffer_type_get_name, + /* .alloc_buffer = */ ggml_backend_cpu_repack_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_cpu_repack_buffer_type_get_alignment, + /* .get_max_size = */ nullptr, // defaults to SIZE_MAX + /* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes + /* .is_host = */ nullptr, + }, + /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), + /* .context = */ new ggml::cpu::repack::extra_buffer_type(), + }; + + return &ggml_backend_cpu_buffer_type_repack; +} diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/repack.h b/ml/backend/ggml/ggml/src/ggml-cpu/repack.h new file mode 100644 index 000000000..cd322e743 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/repack.h @@ -0,0 +1,109 @@ +#pragma once + +#define GGML_COMMON_DECL_CPP +#include "ggml-common.h" + +#include "traits.h" +#include "ggml.h" + +// GGML internal header + +ggml_backend_buffer_type_t ggml_backend_cpu_repack_buffer_type(void); + +template constexpr int QK_0() { + if constexpr (K == 4) { + return QK4_0; + } + if constexpr (K == 8) { + return QK8_0; + } + return -1; +} + +template struct block { + ggml_half d[N]; // deltas for N qK_0 blocks + int8_t qs[(QK_0() * N * K) / 8]; // quants for N qK_0 blocks +}; + +// control size +static_assert(sizeof(block<4, 4>) == 4 * sizeof(ggml_half) + QK8_0 * 2, "wrong block<4,4> size/padding"); +static_assert(sizeof(block<4, 8>) == 8 * sizeof(ggml_half) + QK8_0 * 4, "wrong block<4,8> size/padding"); +static_assert(sizeof(block<8, 4>) == 4 * sizeof(ggml_half) + QK8_0 * 4, "wrong block<8,4> size/padding"); +static_assert(sizeof(block<8, 8>) == 8 * sizeof(ggml_half) + QK8_0 * 8, "wrong block<8,8> size/padding"); + +using block_q4_0x4 = block<4, 4>; +using block_q4_0x8 = block<4, 8>; +using block_q8_0x4 = block<8, 4>; +using block_q8_0x8 = block<8, 8>; + +struct block_q4_Kx8 { + ggml_half d[8]; // super-block scale for quantized scales + ggml_half dmin[8]; // super-block scale for quantized mins + uint8_t scales[96]; // scales and mins, quantized with 6 bits + uint8_t qs[1024]; // 4--bit quants +}; + +static_assert(sizeof(block_q4_Kx8) == sizeof(ggml_half) * 16 + K_SCALE_SIZE * 8 + QK_K * 4, "wrong q4_K block size/padding"); +struct block_q2_Kx8 { + ggml_half d[8]; // super-block scale for quantized scales + ggml_half dmin[8]; // super-block scale for quantized mins + uint8_t scales[128]; // scales and mins, quantized with 4 bits + uint8_t qs[512]; // 2--bit quants +}; + +static_assert(sizeof(block_q2_Kx8) == sizeof(ggml_half) * 16 + QK_K/2 + QK_K * 2, "wrong q2_K block size/padding"); +struct block_q8_Kx4 { + float d[4]; // delta + int8_t qs[QK_K * 4]; // quants + int16_t bsums[QK_K / 4]; // sum of quants in groups of 16 +}; + +static_assert(sizeof(block_q8_Kx4) == sizeof(float) * 4 + QK_K * 4 + (QK_K / 4) * sizeof(int16_t), "wrong q8_K block size/padding"); + +struct block_iq4_nlx4 { + ggml_half d[4]; // deltas for 4 iq4_nl blocks + uint8_t qs[QK4_NL * 2]; // nibbles / quants for 4 iq4_nl blocks +}; + +static_assert(sizeof(block_iq4_nlx4) == 4 * sizeof(ggml_half) + QK4_NL * 2, "wrong iq4_nlx4 block size/padding"); + +#if defined(__cplusplus) +extern "C" { +#endif + +void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void ggml_quantize_mat_q8_K_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_q2_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q2_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); + +// Native implementations +void ggml_quantize_mat_q8_0_4x4_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void ggml_quantize_mat_q8_0_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void ggml_quantize_mat_q8_K_4x8_generic(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k); +void ggml_gemv_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_q2_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemv_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_0_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_0_4x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_0_8x8_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q4_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_q2_K_8x8_q8_K_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); +void ggml_gemm_iq4_nl_4x4_q8_0_generic(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); + +#if defined(__cplusplus) +} // extern "C" +#endif diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/simd-mappings.h b/ml/backend/ggml/ggml/src/ggml-cpu/simd-mappings.h index 45c31cf1f..b4ad68c9f 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/simd-mappings.h +++ b/ml/backend/ggml/ggml/src/ggml-cpu/simd-mappings.h @@ -2,10 +2,167 @@ #include "ggml-cpu-impl.h" +#ifdef __ARM_FEATURE_SVE +#include +#endif // __ARM_FEATURE_SVE + +#if defined(__ARM_NEON) && !defined(__CUDACC__) && !defined(__MUSACC__) +// if YCM cannot find , make a symbolic link to it, for example: +// +// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/ +// +#include +#endif + +#if defined(__F16C__) +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + // // simd mappings // +// FP16 to FP32 conversion + +// 16-bit float +// on Arm, we use __fp16 +// on x86, we use uint16_t +// +// for old CUDA compilers (<= 11), we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/10616 +// for MUSA compilers , we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/11843 +// +#if defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__) + #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) neon_compute_fp16_to_fp32(x) + #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) neon_compute_fp32_to_fp16(x) + + #define GGML_CPU_FP16_TO_FP32(x) GGML_CPU_COMPUTE_FP16_TO_FP32(x) + + static inline float neon_compute_fp16_to_fp32(ggml_fp16_t h) { + __fp16 tmp; + memcpy(&tmp, &h, sizeof(ggml_fp16_t)); + return (float)tmp; + } + + static inline ggml_fp16_t neon_compute_fp32_to_fp16(float f) { + ggml_fp16_t res; + __fp16 tmp = f; + memcpy(&res, &tmp, sizeof(ggml_fp16_t)); + return res; + } +#elif defined(__F16C__) + #ifdef _MSC_VER + #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x))) + #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0) + #else + #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) + #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) + #endif +#elif defined(__POWER9_VECTOR__) + #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) power_compute_fp16_to_fp32(x) + #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) power_compute_fp32_to_fp16(x) + /* the inline asm below is about 12% faster than the lookup method */ + #define GGML_CPU_FP16_TO_FP32(x) GGML_CPU_COMPUTE_FP16_TO_FP32(x) + #define GGML_CPU_FP32_TO_FP16(x) GGML_CPU_COMPUTE_FP32_TO_FP16(x) + + static inline float power_compute_fp16_to_fp32(ggml_fp16_t h) { + float f; + double d; + __asm__( + "mtfprd %0,%2\n" + "xscvhpdp %0,%0\n" + "frsp %1,%0\n" : + /* temp */ "=d"(d), + /* out */ "=f"(f): + /* in */ "r"(h)); + return f; + } + + static inline ggml_fp16_t power_compute_fp32_to_fp16(float f) { + double d; + ggml_fp16_t r; + __asm__( /* xscvdphp can work on double or single precision */ + "xscvdphp %0,%2\n" + "mffprd %1,%0\n" : + /* temp */ "=d"(d), + /* out */ "=r"(r): + /* in */ "f"(f)); + return r; + } +#elif defined(__riscv) && defined(__riscv_zfhmin) + static inline float riscv_compute_fp16_to_fp32(ggml_fp16_t h) { + float f; + __asm__( + "fmv.h.x %[f], %[h]\n\t" + "fcvt.s.h %[f], %[f]" + : [f] "=&f" (f) + : [h] "r" (h) + ); + return f; + } + + static inline ggml_fp16_t riscv_compute_fp32_to_fp16(float f) { + ggml_fp16_t res; + __asm__( + "fcvt.h.s %[f], %[f]\n\t" + "fmv.x.h %[h], %[f]" + : [h] "=&r" (res) + : [f] "f" (f) + ); + return res; + } + + #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) riscv_compute_fp16_to_fp32(x) + #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) riscv_compute_fp32_to_fp16(x) + #define GGML_CPU_FP16_TO_FP32(x) GGML_CPU_COMPUTE_FP16_TO_FP32(x) + #define GGML_CPU_FP32_TO_FP16(x) GGML_CPU_COMPUTE_FP32_TO_FP16(x) +#elif defined(__NNPA__) + #define GGML_CPU_COMPUTE_FP16_TO_FP32(x) nnpa_compute_fp16_to_fp32(x) + #define GGML_CPU_COMPUTE_FP32_TO_FP16(x) nnpa_compute_fp32_to_fp16(x) + + #define GGML_CPU_FP16_TO_FP32(x) GGML_CPU_COMPUTE_FP16_TO_FP32(x) + #define GGML_CPU_FP32_TO_FP16(x) GGML_CPU_COMPUTE_FP32_TO_FP16(x) + + static inline float nnpa_compute_fp16_to_fp32(ggml_fp16_t h) { + uint16x8_t v_h = vec_splats(h); + uint16x8_t v_hd = vec_convert_from_fp16(v_h, 0); + return vec_extend_to_fp32_hi(v_hd, 0)[0]; + } + + static inline ggml_fp16_t nnpa_compute_fp32_to_fp16(float f) { + float32x4_t v_f = vec_splats(f); + float32x4_t v_zero = vec_splats(0.0f); + uint16x8_t v_hd = vec_round_from_fp32(v_f, v_zero, 0); + uint16x8_t v_h = vec_convert_to_fp16(v_hd, 0); + return vec_extract(v_h, 0); + } +#endif + +// precomputed f32 table for f16 (256 KB) +// defined in ggml-cpu.c, initialized in ggml_cpu_init() +extern float ggml_table_f32_f16[1 << 16]; + +// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32, +// so we define GGML_CPU_FP16_TO_FP32 and GGML_CPU_FP32_TO_FP16 elsewhere for NEON. +// This is also true for POWER9. +#if !defined(GGML_CPU_FP16_TO_FP32) +inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) { + uint16_t s; + memcpy(&s, &f, sizeof(uint16_t)); + return ggml_table_f32_f16[s]; +} + +#define GGML_CPU_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x) +#endif + +#if !defined(GGML_CPU_FP32_TO_FP16) +#define GGML_CPU_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) +#endif + + // we define a common set of C macros which map to specific intrinsics based on the current architecture // we then implement the fundamental computation operations below using only these macros // adding support for new architectures requires to define the corresponding SIMD macros @@ -17,7 +174,123 @@ // number of elements to fit in a single register // -#if defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA) +#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_FMA) + +#define GGML_SIMD + +// F32 SVE +#define GGML_F32_EPR 8 +#define DEFAULT_PG svptrue_b32() + +#define GGML_F32xt svfloat32_t +#define GGML_F32xt_ZERO svdup_n_f32(0.0f) +#define GGML_F32xt_SET1(x) svdup_n_f32(x) +#define GGML_F32xt_LOAD_IMPL(pg, a, ...) svld1_f32(pg, a) +#define GGML_F32xt_LOAD(...) GGML_F32xt_LOAD_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_STORE_IMPL(pg,a,b) svst1_f32(pg, a, b) +#define GGML_F32xt_STORE(...) GGML_F32xt_STORE_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_FMA_IMPL(pg, a, b, c) svmad_f32_m(pg, b, c, a) +#define GGML_F32xt_FMA(...) GGML_F32xt_FMA_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_ADD_IMPL(pg, a, b) svadd_f32_m(pg, a, b) +#define GGML_F32xt_ADD(...) GGML_F32xt_ADD_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_MUL_IMPL(pg, a, b) svmul_f32_m(pg, a, b) +#define GGML_F32xt_MUL(...) GGML_F32xt_MUL_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_REDUCE_ONE_IMPL(pg, a) svaddv(pg, a) +#define GGML_F32xt_REDUCE_ONE(...) GGML_F32xt_REDUCE_ONE_IMPL(DEFAULT_PG, __VA_ARGS__) +#define GGML_F32xt_REDUCE_IMPL(pg, res, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8) \ +{ \ + sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum2); \ + sum3 = svadd_f32_m(DEFAULT_PG, sum3, sum4); \ + sum5 = svadd_f32_m(DEFAULT_PG, sum5, sum6); \ + sum7 = svadd_f32_m(DEFAULT_PG, sum7, sum8); \ + sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum3); \ + sum5 = svadd_f32_m(DEFAULT_PG, sum5, sum7); \ + sum1 = svadd_f32_m(DEFAULT_PG, sum1, sum5); \ + (res) = (ggml_float) GGML_F32xt_REDUCE_ONE(sum1); \ +} +#define GGML_F32xt_REDUCE(...) GGML_F32xt_REDUCE_IMPL(DEFAULT_PG, __VA_ARGS__) + +#define GGML_F32_VEC GGML_F32xt +#define GGML_F32_VEC_ZERO GGML_F32xt_ZERO +#define GGML_F32_VEC_SET1 GGML_F32xt_SET1 +#define GGML_F32_VEC_LOAD GGML_F32xt_LOAD +#define GGML_F32_VEC_STORE GGML_F32xt_STORE +#define GGML_F32_VEC_FMA GGML_F32xt_FMA +#define GGML_F32_VEC_ADD GGML_F32xt_ADD +#define GGML_F32_VEC_MUL GGML_F32xt_MUL +#define GGML_F32_VEC_REDUCE GGML_F32xt_REDUCE + +// F16 NEON + +#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + #define GGML_F16_STEP 32 + #define GGML_F16_EPR 8 + + #define GGML_F16x8 float16x8_t + #define GGML_F16x8_ZERO vdupq_n_f16(0.0f) + #define GGML_F16x8_SET1(x) vdupq_n_f16(x) + #define GGML_F16x8_LOAD(x) vld1q_f16((const __fp16 *)(x)) + #define GGML_F16x8_STORE vst1q_f16 + #define GGML_F16x8_FMA(a, b, c) vfmaq_f16(a, b, c) + #define GGML_F16x8_ADD vaddq_f16 + #define GGML_F16x8_MUL vmulq_f16 + #define GGML_F16x8_REDUCE(res, x) \ + do { \ + int offset = GGML_F16_ARR >> 1; \ + for (int i = 0; i < offset; ++i) { \ + (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + (x)[i] = vaddq_f16((x)[i], (x)[offset+i]); \ + } \ + const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 ((x)[0])); \ + const float32x4_t t1 = vcvt_f32_f16(vget_high_f16((x)[0])); \ + (res) = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \ + } while (0) + + #define GGML_F16_VEC GGML_F16x8 + #define GGML_F16_VEC_ZERO GGML_F16x8_ZERO + #define GGML_F16_VEC_SET1 GGML_F16x8_SET1 + #define GGML_F16_VEC_LOAD(p, i) GGML_F16x8_LOAD(p) + #define GGML_F16_VEC_STORE(p, r, i) GGML_F16x8_STORE((__fp16 *)(p), (r)[i]) + #define GGML_F16_VEC_FMA GGML_F16x8_FMA + #define GGML_F16_VEC_ADD GGML_F16x8_ADD + #define GGML_F16_VEC_MUL GGML_F16x8_MUL + #define GGML_F16_VEC_REDUCE GGML_F16x8_REDUCE +#else + // if FP16 vector arithmetic is not supported, we use FP32 instead + // and take advantage of the vcvt_ functions to convert to/from FP16 + + #define GGML_F16_STEP 16 + #define GGML_F16_EPR 4 + + #define GGML_F32Cx4 float32x4_t + #define GGML_F32Cx4_ZERO vdupq_n_f32(0.0f) + #define GGML_F32Cx4_SET1(x) vdupq_n_f32(x) + #define GGML_F32Cx4_LOAD(x) vcvt_f32_f16(vld1_f16((const __fp16 *)(x))) + #define GGML_F32Cx4_STORE(x, y) vst1_f16(x, vcvt_f16_f32(y)) + #define GGML_F32Cx4_FMA(a, b, c) vfmaq_f32(a, b, c) + #define GGML_F32Cx4_ADD vaddq_f32 + #define GGML_F32Cx4_MUL vmulq_f32 + #define GGML_F32Cx4_REDUCE GGML_F32x4_REDUCE + + #define GGML_F16_VEC GGML_F32Cx4 + #define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO + #define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1 + #define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p) + #define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE((__fp16 *)(p), r[i]) + #define GGML_F16_VEC_FMA GGML_F32Cx4_FMA + #define GGML_F16_VEC_ADD GGML_F32Cx4_ADD + #define GGML_F16_VEC_MUL GGML_F32Cx4_MUL + #define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE +#endif + +#elif defined(__ARM_NEON) && defined(__ARM_FEATURE_FMA) #define GGML_SIMD @@ -299,7 +572,7 @@ static inline __m256 __avx_f32cx8_load(const ggml_fp16_t * x) { float tmp[8]; for (int i = 0; i < 8; i++) { - tmp[i] = GGML_FP16_TO_FP32(x[i]); + tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); } return _mm256_loadu_ps(tmp); @@ -310,7 +583,7 @@ static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) { _mm256_storeu_ps(arr, y); for (int i = 0; i < 8; i++) - x[i] = GGML_FP32_TO_FP16(arr[i]); + x[i] = GGML_CPU_FP32_TO_FP16(arr[i]); } #define GGML_F32Cx8_LOAD(x) __avx_f32cx8_load(x) #define GGML_F32Cx8_STORE(x, y) __avx_f32cx8_store(x, y) @@ -458,10 +731,10 @@ static inline unsigned char ggml_endian_byte(int i) { inline static v128_t __wasm_f16x4_load(const ggml_fp16_t * p) { float tmp[4]; - tmp[0] = GGML_FP16_TO_FP32(p[0]); - tmp[1] = GGML_FP16_TO_FP32(p[1]); - tmp[2] = GGML_FP16_TO_FP32(p[2]); - tmp[3] = GGML_FP16_TO_FP32(p[3]); + tmp[0] = GGML_CPU_FP16_TO_FP32(p[0]); + tmp[1] = GGML_CPU_FP16_TO_FP32(p[1]); + tmp[2] = GGML_CPU_FP16_TO_FP32(p[2]); + tmp[3] = GGML_CPU_FP16_TO_FP32(p[3]); return wasm_v128_load(tmp); } @@ -471,10 +744,10 @@ inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { wasm_v128_store(tmp, x); - p[0] = GGML_FP32_TO_FP16(tmp[0]); - p[1] = GGML_FP32_TO_FP16(tmp[1]); - p[2] = GGML_FP32_TO_FP16(tmp[2]); - p[3] = GGML_FP32_TO_FP16(tmp[3]); + p[0] = GGML_CPU_FP32_TO_FP16(tmp[0]); + p[1] = GGML_CPU_FP32_TO_FP16(tmp[1]); + p[2] = GGML_CPU_FP32_TO_FP16(tmp[2]); + p[3] = GGML_CPU_FP32_TO_FP16(tmp[3]); } #define GGML_F16x4 v128_t @@ -574,10 +847,10 @@ inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) { static inline __m128 __sse_f16x4_load(const ggml_fp16_t * x) { float tmp[4]; - tmp[0] = GGML_FP16_TO_FP32(x[0]); - tmp[1] = GGML_FP16_TO_FP32(x[1]); - tmp[2] = GGML_FP16_TO_FP32(x[2]); - tmp[3] = GGML_FP16_TO_FP32(x[3]); + tmp[0] = GGML_CPU_FP16_TO_FP32(x[0]); + tmp[1] = GGML_CPU_FP16_TO_FP32(x[1]); + tmp[2] = GGML_CPU_FP16_TO_FP32(x[2]); + tmp[3] = GGML_CPU_FP16_TO_FP32(x[3]); return _mm_loadu_ps(tmp); } @@ -587,10 +860,10 @@ static inline void __sse_f16x4_store(ggml_fp16_t * x, __m128 y) { _mm_storeu_ps(arr, y); - x[0] = GGML_FP32_TO_FP16(arr[0]); - x[1] = GGML_FP32_TO_FP16(arr[1]); - x[2] = GGML_FP32_TO_FP16(arr[2]); - x[3] = GGML_FP32_TO_FP16(arr[3]); + x[0] = GGML_CPU_FP32_TO_FP16(arr[0]); + x[1] = GGML_CPU_FP32_TO_FP16(arr[1]); + x[2] = GGML_CPU_FP32_TO_FP16(arr[2]); + x[3] = GGML_CPU_FP32_TO_FP16(arr[3]); } #define GGML_F32Cx4 __m128 @@ -712,7 +985,7 @@ static inline void __lasx_f32cx8_store(ggml_fp16_t * x, __m256 y) { #define GGML_F32x4_ZERO __lsx_vldi(0) #define GGML_F32x4_SET1(x) __lsx_vinsgr2vr_w(__lsx_vldi(0),(x), 0) #define GGML_F32x4_LOAD(x) __lsx_vld((x), 0) -#define GGML_F32x4_STORE((x),(y)) __lsx_vst((y), (x), 0) +#define GGML_F32x4_STORE(x, y) __lsx_vst(y, x, 0) #define GGML_F32x4_FMA(a, b, c) __lsx_vfmadd_s(b, c, a) #define GGML_F32x4_ADD __lsx_vfadd_s #define GGML_F32x4_MUL __lsx_vfmul_s @@ -758,10 +1031,10 @@ static inline void __lasx_f32cx8_store(ggml_fp16_t * x, __m256 y) { static inline __m128 __lsx_f16x4_load(const ggml_fp16_t * x) { float tmp[4]; - tmp[0] = GGML_FP16_TO_FP32(x[0]); - tmp[1] = GGML_FP16_TO_FP32(x[1]); - tmp[2] = GGML_FP16_TO_FP32(x[2]); - tmp[3] = GGML_FP16_TO_FP32(x[3]); + tmp[0] = GGML_CPU_FP16_TO_FP32(x[0]); + tmp[1] = GGML_CPU_FP16_TO_FP32(x[1]); + tmp[2] = GGML_CPU_FP16_TO_FP32(x[2]); + tmp[3] = GGML_CPU_FP16_TO_FP32(x[3]); return __lsx_vld(tmp, 0); } @@ -771,10 +1044,10 @@ static inline void __lsx_f16x4_store(ggml_fp16_t * x, __m128 y) { __lsx_vst(y, arr, 0); - x[0] = GGML_FP32_TO_FP16(arr[0]); - x[1] = GGML_FP32_TO_FP16(arr[1]); - x[2] = GGML_FP32_TO_FP16(arr[2]); - x[3] = GGML_FP32_TO_FP16(arr[3]); + x[0] = GGML_CPU_FP32_TO_FP16(arr[0]); + x[1] = GGML_CPU_FP32_TO_FP16(arr[1]); + x[2] = GGML_CPU_FP32_TO_FP16(arr[2]); + x[3] = GGML_CPU_FP32_TO_FP16(arr[3]); } #define GGML_F32Cx4 __m128 @@ -806,7 +1079,7 @@ static inline void __lsx_f16x4_store(ggml_fp16_t * x, __m128 y) { #define GGML_F32_STEP 32 #define GGML_F32_EPR 4 -#define GGML_F32x4 __vector float +#define GGML_F32x4 float32x4_t #define GGML_F32x4_ZERO vec_splats(0.0f) #define GGML_F32x4_SET1 vec_splats #define GGML_F32x4_LOAD(p) vec_xl(0, p) @@ -828,10 +1101,8 @@ static inline void __lsx_f16x4_store(ggml_fp16_t * x, __m128 y) { for (int i = 0; i < offset; ++i) { \ x[i] = vec_add(x[i], x[offset + i]); \ } \ - res = vec_extract(x[0], 0) + \ - vec_extract(x[0], 1) + \ - vec_extract(x[0], 2) + \ - vec_extract(x[0], 3); \ + float32x4_t tmp = x[0] + vec_reve(x[0]); \ + res = tmp[0] + tmp[1]; \ } #define GGML_F32_VEC GGML_F32x4 @@ -848,28 +1119,45 @@ static inline void __lsx_f16x4_store(ggml_fp16_t * x, __m128 y) { #define GGML_F16_STEP GGML_F32_STEP #define GGML_F16_EPR GGML_F32_EPR -static inline __vector float __lzs_f16cx4_load(const ggml_fp16_t * x) { +static inline float32x4_t __lzs_f16cx4_load(const ggml_fp16_t * x) { +#if defined(__NNPA__) + uint16x8_t v_x = vec_xl(0, (const ggml_fp16_t *)x); + uint16x8_t v_xd = vec_convert_from_fp16(v_x, 0); + return vec_extend_to_fp32_hi(v_xd, 0); +#else float tmp[4]; for (int i = 0; i < 4; i++) { - tmp[i] = GGML_FP16_TO_FP32(x[i]); + tmp[i] = GGML_CPU_FP16_TO_FP32(x[i]); } // note: keep type-cast here to prevent compiler bugs // see: https://github.com/ggml-org/llama.cpp/issues/12846 return vec_xl(0, (const float *)(tmp)); +#endif } -static inline void __lzs_f16cx4_store(ggml_fp16_t * x, __vector float y) { +static inline void __lzs_f16cx4_store(ggml_fp16_t * x, float32x4_t v_y) { +#if defined(__NNPA__) + float32x4_t v_zero = vec_splats(0.0f); + uint16x8_t v_xd = vec_round_from_fp32(v_y, v_zero, 0); + uint16x8_t v_x = vec_convert_to_fp16(v_xd, 0); + + x[0] = vec_extract(v_x, 0); + x[1] = vec_extract(v_x, 1); + x[2] = vec_extract(v_x, 2); + x[3] = vec_extract(v_x, 3); +#else float arr[4]; // note: keep type-cast here to prevent compiler bugs // see: https://github.com/ggml-org/llama.cpp/issues/12846 - vec_xst(y, 0, (float *)(arr)); + vec_xst(v_y, 0, (float *)(arr)); for (int i = 0; i < 4; i++) { - x[i] = GGML_FP32_TO_FP16(arr[i]); + x[i] = GGML_CPU_FP32_TO_FP16(arr[i]); } +#endif } #define GGML_F16_VEC GGML_F32x4 @@ -890,3 +1178,7 @@ static inline void __lzs_f16cx4_store(ggml_fp16_t * x, __vector float y) { #define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR) #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR) #endif + +#ifdef __cplusplus +} +#endif diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-traits.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/traits.cpp similarity index 87% rename from ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-traits.cpp rename to ml/backend/ggml/ggml/src/ggml-cpu/traits.cpp index 62a0712da..4f32f1025 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-traits.cpp +++ b/ml/backend/ggml/ggml/src/ggml-cpu/traits.cpp @@ -1,4 +1,4 @@ -#include "ggml-cpu-traits.h" +#include "traits.h" #include "ggml-backend-impl.h" #include "ggml-backend.h" @@ -10,7 +10,7 @@ extra_buffer_type::~extra_buffer_type() {} } // namespace ggml::cpu bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) { - for (auto extra : ggml_backend_cpu_get_extra_buffers_type()) { + for (auto extra : ggml_backend_cpu_get_extra_buffer_types()) { if (extra && extra->context) { auto buf_extra = (ggml::cpu::extra_buffer_type *) extra->context; auto tensor_traits = buf_extra->get_tensor_traits(op); @@ -23,7 +23,7 @@ bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct } bool ggml_cpu_extra_work_size(int n_threads, const struct ggml_tensor * op, size_t * size) { - for (auto extra : ggml_backend_cpu_get_extra_buffers_type()) { + for (auto extra : ggml_backend_cpu_get_extra_buffer_types()) { if (extra && extra->context) { auto buf_extra = (ggml::cpu::extra_buffer_type *) extra->context; auto tensor_traits = buf_extra->get_tensor_traits(op); diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-traits.h b/ml/backend/ggml/ggml/src/ggml-cpu/traits.h similarity index 98% rename from ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-traits.h rename to ml/backend/ggml/ggml/src/ggml-cpu/traits.h index 99a6186b1..f4e0990dd 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-traits.h +++ b/ml/backend/ggml/ggml/src/ggml-cpu/traits.h @@ -33,6 +33,6 @@ class extra_buffer_type { } // namespace ggml::cpu // implemented in ggml-cpu.cpp. -std::vector & ggml_backend_cpu_get_extra_buffers_type(); +std::vector & ggml_backend_cpu_get_extra_buffer_types(); #endif diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/vec.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/vec.cpp index ec3ec9b17..07b377bdd 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/vec.cpp +++ b/ml/backend/ggml/ggml/src/ggml-cpu/vec.cpp @@ -17,29 +17,98 @@ void ggml_vec_dot_f32(int n, float * GGML_RESTRICT s, size_t bs, const float * G #if defined(GGML_SIMD) float sumf = 0.0f; - const int np = (n & ~(GGML_F32_STEP - 1)); - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + #if defined(__ARM_FEATURE_SVE) + const int sve_register_length = ggml_cpu_get_sve_cnt() * 8; + const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16 + const int ggml_f32_step = 8 * ggml_f32_epr; // choose 8 SVE registers - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; + const int np = (n & ~(ggml_f32_step - 1)); + svfloat32_t sum1 = svdup_n_f32(0.0f); + svfloat32_t sum2 = svdup_n_f32(0.0f); + svfloat32_t sum3 = svdup_n_f32(0.0f); + svfloat32_t sum4 = svdup_n_f32(0.0f); + svfloat32_t sum5 = svdup_n_f32(0.0f); + svfloat32_t sum6 = svdup_n_f32(0.0f); + svfloat32_t sum7 = svdup_n_f32(0.0f); + svfloat32_t sum8 = svdup_n_f32(0.0f); + svfloat32_t ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8; + svfloat32_t ay1,ay2,ay3,ay4,ay5,ay6,ay7,ay8; + for (int i = 0; i < np; i += ggml_f32_step) { + ax1 = GGML_F32_VEC_LOAD(x + i); + ay1 = GGML_F32_VEC_LOAD(y + i); + sum1 = GGML_F32_VEC_FMA(sum1, ax1, ay1); - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + ax2 = GGML_F32_VEC_LOAD(x + i + 1*ggml_f32_epr); + ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr); + sum2 = GGML_F32_VEC_FMA(sum2, ax2, ay2); - sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); + ax3 = GGML_F32_VEC_LOAD(x + i + 2*ggml_f32_epr); + ay3 = GGML_F32_VEC_LOAD(y + i + 2*ggml_f32_epr); + sum3 = GGML_F32_VEC_FMA(sum3, ax3, ay3); + + ax4 = GGML_F32_VEC_LOAD(x + i + 3*ggml_f32_epr); + ay4 = GGML_F32_VEC_LOAD(y + i + 3*ggml_f32_epr); + sum4 = GGML_F32_VEC_FMA(sum4, ax4, ay4); + + ax5 = GGML_F32_VEC_LOAD(x + i + 4*ggml_f32_epr); + ay5 = GGML_F32_VEC_LOAD(y + i + 4*ggml_f32_epr); + sum5 = GGML_F32_VEC_FMA(sum5, ax5, ay5); + + ax6 = GGML_F32_VEC_LOAD(x + i + 5*ggml_f32_epr); + ay6 = GGML_F32_VEC_LOAD(y + i + 5*ggml_f32_epr); + sum6 = GGML_F32_VEC_FMA(sum6, ax6, ay6); + + ax7 = GGML_F32_VEC_LOAD(x + i + 6*ggml_f32_epr); + ay7 = GGML_F32_VEC_LOAD(y + i + 6*ggml_f32_epr); + sum7 = GGML_F32_VEC_FMA(sum7, ax7, ay7); + + ax8 = GGML_F32_VEC_LOAD(x + i + 7*ggml_f32_epr); + ay8 = GGML_F32_VEC_LOAD(y + i + 7*ggml_f32_epr); + sum8 = GGML_F32_VEC_FMA(sum8, ax8, ay8); } - } + // leftovers + // Since 8 unrolls are done in above loop, leftovers lie in range [0, ggml_f32_step] which is handled in below loop + const int np2 = (n & ~(ggml_f32_epr - 1)); + for (int i = np; i < np2; i += ggml_f32_epr) { + ax1 = GGML_F32_VEC_LOAD(x + i); + ay1 = GGML_F32_VEC_LOAD(y + i); + sum1 = GGML_F32_VEC_FMA(sum1, ax1, ay1); + } + // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only + if (np2 < n) { + svbool_t pg = svwhilelt_b32(np2, n); + ax1 = svld1_f32(pg, x + np2); + ay1 = svld1_f32(pg, y + np2); + sum1 = svmad_f32_m(pg, ax1, ay1, sum1); + } + // reduce sum1,sum2 to sum1 + GGML_F32_VEC_REDUCE(sumf, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum8); + #else + const int np = (n & ~(GGML_F32_STEP - 1)); - // reduce sum0..sum3 to sum0 - GGML_F32_VEC_REDUCE(sumf, sum); + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - // leftovers - for (int i = np; i < n; ++i) { - sumf += x[i]*y[i]; - } + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + + sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); + } + } + + // reduce sum0..sum3 to sum0 + GGML_F32_VEC_REDUCE(sumf, sum); + + // leftovers + for (int i = np; i < n; ++i) { + sumf += x[i]*y[i]; + } + #endif #else // scalar ggml_float sumf = 0.0; @@ -150,11 +219,14 @@ void ggml_vec_dot_f16(int n, float * GGML_RESTRICT s, size_t bs, ggml_fp16_t * G // leftovers for (int i = np; i < n; ++i) { - sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i])); + sumf += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[i])*GGML_CPU_FP16_TO_FP32(y[i])); } + + // if you hit this, you are likely running outside the FP range + assert(!isnan(sumf) && !isinf(sumf)); #else for (int i = 0; i < n; ++i) { - sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i])); + sumf += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[i])*GGML_CPU_FP16_TO_FP32(y[i])); } #endif @@ -185,6 +257,30 @@ void ggml_vec_silu_f32(const int n, float * y, const float * x) { } } +void ggml_vec_swiglu_f32(const int n, float * y, const float * x, const float * g) { + int i = 0; +#if defined(__AVX512F__) && defined(__AVX512DQ__) + for (; i + 15 < n; i += 16) { + _mm512_storeu_ps(y + i, _mm512_mul_ps(ggml_v_silu(_mm512_loadu_ps(x + i)), _mm512_loadu_ps(g + i))); + } +#elif defined(__AVX2__) && defined(__FMA__) + for (; i + 7 < n; i += 8) { + _mm256_storeu_ps(y + i, _mm256_mul_ps(ggml_v_silu(_mm256_loadu_ps(x + i)), _mm256_loadu_ps(g + i))); + } +#elif defined(__SSE2__) + for (; i + 3 < n; i += 4) { + _mm_storeu_ps(y + i, _mm_mul_ps(ggml_v_silu(_mm_loadu_ps(x + i)), _mm_loadu_ps(g + i))); + } +#elif defined(__ARM_NEON) && defined(__aarch64__) + for (; i + 3 < n; i += 4) { + vst1q_f32(y + i, vmulq_f32(ggml_v_silu(vld1q_f32(x + i)), vld1q_f32(g + i))); + } +#endif + for (; i < n; ++i) { + y[i] = ggml_silu_f32(x[i]) * g[i]; + } +} + ggml_float ggml_vec_soft_max_f32(const int n, float * y, const float * x, float max) { int i = 0; ggml_float sum = 0; @@ -250,93 +346,3 @@ ggml_float ggml_vec_log_soft_max_f32(const int n, float * y, const float * x, fl } return sum = (ggml_float)logf(sum); } - -#define MXFP4 32 -typedef struct { - uint8_t d; // scale E8M0 float - uint8_t qs[MXFP4 / 2]; // (32) 4 bit elements E2M1 float -} block_mxfp4; -static_assert(sizeof(block_mxfp4) == sizeof(uint8_t) + MXFP4/2, "wrong mxfp4 block size/padding"); -#define MXFP4_VALS {0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 6.0, 0.0, -0.5, -1.0, -1.5, -2.0, -3.0, -4.0, -6.0} - -void ggml_vec_dot_mxfp4(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const float * GGML_RESTRICT y, size_t by, int nrc) { - assert(nrc == 1); - GGML_UNUSED(nrc); - GGML_UNUSED(bx); - GGML_UNUSED(by); - GGML_UNUSED(bs); - ggml_float mxfp4_table[] = MXFP4_VALS; - -#if defined(GGML_SIMD) - float sumf = 0.0f; - const int np = (n & ~(GGML_F32_STEP - 1)); - const block_mxfp4 * GGML_RESTRICT xx = (const block_mxfp4 *) vx; - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - - GGML_F32_VEC scalev; - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - for (int i = 0; i < np; i += GGML_F32_STEP) { // ARM: +16 AVX512: +64 - for (int j = 0; j < GGML_F32_ARR; j++) { // ARM: 0 .. 4 AVX512: 0 .. 4 - // convert GGML_F32_ARR X elements - const int ib = (i + j*GGML_F32_EPR) / MXFP4; - const block_mxfp4 * GGML_RESTRICT x = &xx[ib]; - union { - uint32_t as_bits; - float as_value; - } scale; - scale.as_bits = (((uint32_t)x->d) << 23); - scalev = GGML_F32_VEC_SET1(scale.as_value); - float xf[GGML_F32_EPR]= {0.f}; - assert(((i+j*GGML_F32_EPR) % MXFP4)+GGML_F32_ARR < MXFP4 && "block overrun"); - for (int qi = 0; qi < GGML_F32_EPR/2 ; ++qi) { - xf[qi*2] = mxfp4_table[(x->qs[((i+j*GGML_F32_EPR)%MXFP4)/2+qi] & 0xf)]; - xf[qi*2+1] = mxfp4_table[(x->qs[((i+j*GGML_F32_EPR)%MXFP4)/2+qi] & 0xf0) >> 4]; - } - - ax[j] = GGML_F32_VEC_MUL(GGML_F32_VEC_LOAD(xf), scalev); - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]); - } - } - GGML_F32_VEC_REDUCE(sumf, sum); - - // leftovers - for (int i = np; i < n; i+=2) { - const int ib = i / MXFP4; - const block_mxfp4 * GGML_RESTRICT x = &xx[ib]; - union { - uint32_t as_bits; - float as_value; - } scale; - scale.as_bits = (((uint32_t)x->d) << 23); - sumf += y[i] * scale.as_value * mxfp4_table[(x->qs[(i%MXFP4)/2] & 0xf)]; - sumf += y[i+1] * scale.as_value * mxfp4_table[(x->qs[(i%MXFP4)/2] & 0xf0) >> 4]; - } - - -#else // defined(GGML_SIMD) - const int nb = n / MXFP4; - assert(n % MXFP4 == 0); - - int yi = 0; - - const block_mxfp4 * GGML_RESTRICT xx = (const block_mxfp4 *) vx; - - ggml_float sumf = 0.0; - for (int ib = 0; ib < nb; ++ib) { - const block_mxfp4 * GGML_RESTRICT x = &xx[ib + 0]; - union { - uint32_t as_bits; - float as_value; - } scale; - scale.as_bits = (((uint32_t)x->d) << 23); - for (int i = 0; i < MXFP4/2; ++i) { - sumf += mxfp4_table[(x->qs[i] & 0xf)] * (ggml_float)(scale.as_value) * (ggml_float)(y[ib*MXFP4 + i*2]); - sumf += mxfp4_table[(x->qs[i] & 0xf0) >> 4] * (ggml_float)(scale.as_value) * (ggml_float)(y[ib*MXFP4 + i*2+1]); - } - } -#endif - - *s = sumf; -} diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/vec.h b/ml/backend/ggml/ggml/src/ggml-cpu/vec.h index 7480ca089..2250d93cb 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/vec.h +++ b/ml/backend/ggml/ggml/src/ggml-cpu/vec.h @@ -5,6 +5,7 @@ #include "ggml-impl.h" #include "simd-mappings.h" #include "ggml.h" +#include "ggml-cpu.h" #if defined(GGML_USE_ACCELERATE) #include @@ -42,8 +43,6 @@ void ggml_vec_dot_f32(int n, float * GGML_RESTRICT s, size_t bs, const float * G void ggml_vec_dot_bf16(int n, float * GGML_RESTRICT s, size_t bs, ggml_bf16_t * GGML_RESTRICT x, size_t bx, ggml_bf16_t * GGML_RESTRICT y, size_t by, int nrc); void ggml_vec_dot_f16(int n, float * GGML_RESTRICT s, size_t bs, ggml_fp16_t * GGML_RESTRICT x, size_t bx, ggml_fp16_t * GGML_RESTRICT y, size_t by, int nrc); -void ggml_vec_dot_mxfp4(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const float * GGML_RESTRICT y, size_t by, int nrc); - void ggml_vec_silu_f32(const int n, float * y, const float * x); ggml_float ggml_vec_soft_max_f32(const int n, float * y, const float * x, float max); ggml_float ggml_vec_log_soft_max_f32(const int n, float * y, const float * x, float max); @@ -56,10 +55,25 @@ inline static void ggml_vec_cpy_i32(const int n, int32_t * y, const int32_t * x) inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const ggml_fp16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } inline static void ggml_vec_set_bf16(const int n, ggml_bf16_t * x, const ggml_bf16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } -inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; } + +inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { + int i = 0; +#if defined(__AVX2__) + for (; i + 7 < n; i += 8) { + __m256 vx = _mm256_loadu_ps(x + i); + __m256 vy = _mm256_loadu_ps(y + i); + __m256 vz = _mm256_add_ps(vx, vy); + _mm256_storeu_ps(z + i, vz); + } +#endif + for (; i < n; ++i) { + z[i] = x[i] + y[i]; + } +} + inline static void ggml_vec_add_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) { for (int i = 0; i < n; ++i) { - z[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(x[i]) + GGML_FP16_TO_FP32(y[i])); + z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) + GGML_CPU_FP16_TO_FP32(y[i])); } } inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; } @@ -68,7 +82,7 @@ inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; } inline static void ggml_vec_sub_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) { for (int i = 0; i < n; ++i) { - z[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(x[i]) - GGML_FP16_TO_FP32(y[i])); + z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) - GGML_CPU_FP16_TO_FP32(y[i])); } } inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; } @@ -76,20 +90,20 @@ inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; } inline static void ggml_vec_neg_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(-GGML_FP16_TO_FP32(x[i])); + y[i] = GGML_CPU_FP32_TO_FP16(-GGML_CPU_FP16_TO_FP32(x[i])); } } inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; } inline static void ggml_vec_mul_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) { for (int i = 0; i < n; ++i) { - z[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(x[i]) * GGML_FP16_TO_FP32(y[i])); + z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) * GGML_CPU_FP16_TO_FP32(y[i])); } } inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; } inline static void ggml_vec_div_f16 (const int n, ggml_fp16_t * z, const ggml_fp16_t * x, const ggml_fp16_t * y) { for (int i = 0; i < n; ++i) { - z[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(x[i]) / GGML_FP16_TO_FP32(y[i])); + z[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(x[i]) / GGML_CPU_FP16_TO_FP32(y[i])); } } @@ -132,13 +146,13 @@ inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * GG // leftovers for (int i = np; i < n; ++i) { for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) { - sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i])); + sumf[j] += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[j][i])*GGML_CPU_FP16_TO_FP32(y[i])); } } #else for (int i = 0; i < n; ++i) { for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) { - sumf[j] += (ggml_float)(GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i])); + sumf[j] += (ggml_float)(GGML_CPU_FP16_TO_FP32(x[j][i])*GGML_CPU_FP16_TO_FP32(y[i])); } } #endif @@ -150,27 +164,108 @@ inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * GG inline static void ggml_vec_mad_f32(const int n, float * GGML_RESTRICT y, const float * GGML_RESTRICT x, const float v) { #if defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); + #if defined(__ARM_FEATURE_SVE) - GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + const int sve_register_length = ggml_cpu_get_sve_cnt() * 8; + const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16 + const int ggml_f32_step = 8 * ggml_f32_epr; // choose 8 SVE registers + GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; + const int np = (n & ~(ggml_f32_step - 1)); + svfloat32_t ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8; + svfloat32_t ay1, ay2, ay3, ay4, ay5, ay6, ay7, ay8; + for (int i = 0; i < np; i += ggml_f32_step) { - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx); + ax1 = GGML_F32_VEC_LOAD(x + i); + ay1 = GGML_F32_VEC_LOAD(y + i); + ay1 = GGML_F32_VEC_FMA(ay1, ax1, vx); - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + GGML_F32_VEC_STORE(y + i, ay1); + + ax2 = GGML_F32_VEC_LOAD(x + i + 1*ggml_f32_epr); + ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr); + ay2 = GGML_F32_VEC_FMA(ay2, ax2, vx); + + GGML_F32_VEC_STORE(y + i + 1*ggml_f32_epr, ay2); + + ax3 = GGML_F32_VEC_LOAD(x + i + 2*ggml_f32_epr); + ay3 = GGML_F32_VEC_LOAD(y + i + 2*ggml_f32_epr); + ay3 = GGML_F32_VEC_FMA(ay3, ax3, vx); + + GGML_F32_VEC_STORE(y + i + 2*ggml_f32_epr, ay3); + + ax4 = GGML_F32_VEC_LOAD(x + i + 3*ggml_f32_epr); + ay4 = GGML_F32_VEC_LOAD(y + i + 3*ggml_f32_epr); + ay4 = GGML_F32_VEC_FMA(ay4, ax4, vx); + + GGML_F32_VEC_STORE(y + i + 3*ggml_f32_epr, ay4); + + ax5 = GGML_F32_VEC_LOAD(x + i + 4*ggml_f32_epr); + ay5 = GGML_F32_VEC_LOAD(y + i + 4*ggml_f32_epr); + ay5 = GGML_F32_VEC_FMA(ay5, ax5, vx); + + GGML_F32_VEC_STORE(y + i + 4*ggml_f32_epr, ay5); + + ax6 = GGML_F32_VEC_LOAD(x + i + 5*ggml_f32_epr); + ay6 = GGML_F32_VEC_LOAD(y + i + 5*ggml_f32_epr); + ay6 = GGML_F32_VEC_FMA(ay6, ax6, vx); + + GGML_F32_VEC_STORE(y + i + 5*ggml_f32_epr, ay6); + + ax7 = GGML_F32_VEC_LOAD(x + i + 6*ggml_f32_epr); + ay7 = GGML_F32_VEC_LOAD(y + i + 6*ggml_f32_epr); + ay7 = GGML_F32_VEC_FMA(ay7, ax7, vx); + + GGML_F32_VEC_STORE(y + i + 6*ggml_f32_epr, ay7); + + ax8 = GGML_F32_VEC_LOAD(x + i + 7*ggml_f32_epr); + ay8 = GGML_F32_VEC_LOAD(y + i + 7*ggml_f32_epr); + ay8 = GGML_F32_VEC_FMA(ay8, ax8, vx); + + GGML_F32_VEC_STORE(y + i + 7*ggml_f32_epr, ay8); } - } + // leftovers + // Since 8 unrolls are done in above loop, leftovers lie in range [0, ggml_f32_step] which is handled in below loop + const int np2 = (n & ~(ggml_f32_epr - 1)); + for (int i = np; i < np2; i += ggml_f32_epr) { + ax1 = GGML_F32_VEC_LOAD(x + i); + ay1 = GGML_F32_VEC_LOAD(y + i); + ay1 = GGML_F32_VEC_FMA(ay1, ax1, vx); - // leftovers - for (int i = np; i < n; ++i) { - y[i] += x[i]*v; - } + GGML_F32_VEC_STORE(y + i, ay1); + } + // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only + if (np2 < n) { + svbool_t pg =svwhilelt_b32(np2, n); + ax1 = svld1_f32(pg, x + np2); + ay1 = svld1_f32(pg, y + np2); + ay1 = svmad_f32_m(pg, ax1, vx, ay1); + + svst1_f32(pg, y + np2, ay1); + } + #else + const int np = (n & ~(GGML_F32_STEP - 1)); + + GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx); + + GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + } + } + + // leftovers + for (int i = np; i < n; ++i) { + y[i] += x[i]*v; + } + #endif #else // scalar for (int i = 0; i < n; ++i) { @@ -200,12 +295,12 @@ inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * GGML_RESTRICT y, // leftovers for (int i = np; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i]) + GGML_FP16_TO_FP32(x[i])*v); + y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i]) + GGML_CPU_FP16_TO_FP32(x[i])*v); } #else // scalar for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i]) + GGML_FP16_TO_FP32(x[i])*v); + y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i]) + GGML_CPU_FP16_TO_FP32(x[i])*v); } #endif } @@ -222,36 +317,45 @@ inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int } #if defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); - - GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL]; - - for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { - vx[k] = GGML_F32_VEC_SET1(v[k][0]); - } - - GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - - for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { - ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]); + #if defined(__ARM_FEATURE_SVE) + // scalar Route to scalar implementation //TODO: Write SVE code + for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { + for (int i = 0; i < n; ++i) { + y[i] += x[k][i]*v[k][0]; } - - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); } - } + #else + const int np = (n & ~(GGML_F32_STEP - 1)); - // leftovers - for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { - for (int i = np; i < n; ++i) { - y[i] += x[k][i]*v[k][0]; + GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL]; + + for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { + vx[k] = GGML_F32_VEC_SET1(v[k][0]); } - } + + GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + + for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { + ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]); + } + + GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + } + } + + // leftovers + for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { + for (int i = np; i < n; ++i) { + y[i] += x[k][i]*v[k][0]; + } + } + #endif #else // scalar for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) { @@ -262,30 +366,97 @@ inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int #endif } +inline static void ggml_vec_mad1_f32(const int n, float * y, const float * x, const float s, const float b) { +#if defined(GGML_USE_ACCELERATE) + vDSP_vsmsa(x, 1, &s, &b, y, 1, n); +#elif defined(GGML_SIMD) + #if defined(__ARM_FEATURE_SVE) + // scalar ; TODO: Write SVE code + for (int i = 0; i < n; ++i) { + y[i] = x[i]*s + b; + } + #else + const int np = (n & ~(GGML_F32_STEP - 1)); + + GGML_F32_VEC vs = GGML_F32_VEC_SET1(s); + GGML_F32_VEC vb = GGML_F32_VEC_SET1(b); + + GGML_F32_VEC ay[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ay[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_FMA(ay[j], vs, vb); + + GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + } + } + + // leftovers + for (int i = np; i < n; ++i) { + y[i] = x[i]*s + b; + } + #endif +#else + // scalar + for (int i = 0; i < n; ++i) { + y[i] = x[i]*s + b; + } +#endif +} + //inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; } inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { #if defined(GGML_USE_ACCELERATE) vDSP_vsmul(y, 1, &v, y, 1, n); #elif defined(GGML_SIMD) - const int np = (n & ~(GGML_F32_STEP - 1)); + #if defined(__ARM_FEATURE_SVE) + const int sve_register_length = ggml_cpu_get_sve_cnt() * 8; + const int ggml_f32_epr = sve_register_length / 32;//8;//svcntw(); // SVE128:4, SVE256:8, SVE512:16 + const int ggml_f32_step = 2 * ggml_f32_epr; - GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + const int np = (n & ~(ggml_f32_step - 1)); + svfloat32_t ay1; + svfloat32_t ay2; + for (int i = 0; i < np; i += ggml_f32_step) { + ay1 = GGML_F32_VEC_LOAD(y + i); + ay1 = GGML_F32_VEC_MUL(ay1, vx); + GGML_F32_VEC_STORE(y + i, ay1); - GGML_F32_VEC ay[GGML_F32_ARR]; - - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); - ay[j] = GGML_F32_VEC_MUL(ay[j], vx); - - GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + ay2 = GGML_F32_VEC_LOAD(y + i + 1*ggml_f32_epr); + ay2 = GGML_F32_VEC_MUL(ay2, vx); + GGML_F32_VEC_STORE(y + i + 1*ggml_f32_epr, ay2); } - } + // leftovers + // maximum number of leftover elements will be less that ggml_f32_epr. Apply predicated svmad on available elements only + if (np < n) { + svbool_t pg = svwhilelt_b32(np, n); + ay1 = svld1_f32(pg, y + np); + ay1 = svmul_f32_m(pg, ay1, vx); + svst1_f32(pg, y + np, ay1); + } + #else + const int np = (n & ~(GGML_F32_STEP - 1)); - // leftovers - for (int i = np; i < n; ++i) { - y[i] *= v; - } + GGML_F32_VEC vx = GGML_F32_VEC_SET1(v); + + GGML_F32_VEC ay[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR); + ay[j] = GGML_F32_VEC_MUL(ay[j], vx); + + GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]); + } + } + + // leftovers + for (int i = np; i < n; ++i) { + y[i] *= v; + } + #endif #else // scalar for (int i = 0; i < n; ++i) { @@ -313,12 +484,12 @@ inline static void ggml_vec_scale_f16(const int n, ggml_fp16_t * y, const float // leftovers for (int i = np; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i])*v); + y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i])*v); } #else // scalar for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i])*v); + y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(y[i])*v); } #endif } @@ -327,109 +498,110 @@ inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; } inline static void ggml_vec_sqr_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(x[i]); - y[i] = GGML_FP32_TO_FP16(v*v); + float v = GGML_CPU_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16(v*v); } } inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); } inline static void ggml_vec_sqrt_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(sqrtf(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(sqrtf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); } inline static void ggml_vec_log_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(logf(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(logf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_sin_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sinf(x[i]); } inline static void ggml_vec_sin_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(sinf(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(sinf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_cos_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = cosf(x[i]); } inline static void ggml_vec_cos_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(cosf(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(cosf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); } inline static void ggml_vec_abs_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(fabsf(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(fabsf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); } inline static void ggml_vec_sgn_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(x[i]); - y[i] = GGML_FP32_TO_FP16((v > 0.f) ? 1.f : ((v < 0.f) ? -1.f : 0.f)); + float v = GGML_CPU_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16((v > 0.f) ? 1.f : ((v < 0.f) ? -1.f : 0.f)); } } inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; } inline static void ggml_vec_step_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16((GGML_FP16_TO_FP32(x[i]) > 0.f) ? 1.f : 0.f); + y[i] = GGML_CPU_FP32_TO_FP16((GGML_CPU_FP16_TO_FP32(x[i]) > 0.f) ? 1.f : 0.f); } } inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); } inline static void ggml_vec_tanh_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(tanhf(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(tanhf(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expm1f(x[i]); } inline static void ggml_vec_elu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(expm1f(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(expm1f(GGML_CPU_FP16_TO_FP32(x[i]))); } } inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; } inline static void ggml_vec_relu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(x[i]); - y[i] = GGML_FP32_TO_FP16((v > 0.f) ? v : 0.f); + float v = GGML_CPU_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16((v > 0.f) ? v : 0.f); } } inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); } inline static void ggml_vec_leaky_relu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const float ns) { for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(x[i]); - y[i] = GGML_FP32_TO_FP16(((v > 0.f) ? v : 0.f) + ns * ((v < 0.0f) ? v : 0.f)); + float v = GGML_CPU_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16(((v > 0.f) ? v : 0.f) + ns * ((v < 0.0f) ? v : 0.f)); } } inline static void ggml_vec_sigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = 1.f / (1.f + expf(-x[i])); } inline static void ggml_vec_sigmoid_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(1.f / (1.f + expf(-GGML_FP16_TO_FP32(x[i])))); + y[i] = GGML_CPU_FP32_TO_FP16(1.f / (1.f + expf(-GGML_CPU_FP16_TO_FP32(x[i])))); } } // TODO: optimize performance inline static void ggml_vec_hardswish_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i] * fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); } inline static void ggml_vec_hardswish_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(x[i]); - y[i] = GGML_FP32_TO_FP16(v * fminf(1.0f, fmaxf(0.0f, (v + 3.0f) / 6.0f))); + float v = GGML_CPU_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16(v * fminf(1.0f, fmaxf(0.0f, (v + 3.0f) / 6.0f))); } } inline static void ggml_vec_hardsigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); } inline static void ggml_vec_hardsigmoid_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(fminf(1.0f, fmaxf(0.0f, (GGML_FP16_TO_FP32(x[i]) + 3.0f) / 6.0f))); + y[i] = GGML_CPU_FP32_TO_FP16(fminf(1.0f, fmaxf(0.0f, (GGML_CPU_FP16_TO_FP32(x[i]) + 3.0f) / 6.0f))); } } inline static void ggml_vec_exp_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = expf(x[i]); } inline static void ggml_vec_exp_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - y[i] = GGML_FP32_TO_FP16(expf(GGML_FP16_TO_FP32(x[i]))); + y[i] = GGML_CPU_FP32_TO_FP16(expf(GGML_CPU_FP16_TO_FP32(x[i]))); } } static const float GELU_COEF_A = 0.044715f; static const float GELU_QUICK_COEF = -1.702f; static const float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; +static const float SQRT_2_INV = 0.70710678118654752440084436210484f; inline static float ggml_gelu_f32(float x) { return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); @@ -442,6 +614,14 @@ inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp } } +inline static void ggml_vec_gelu_erf_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { + for (int i = 0; i < n; ++i) { + float xi = GGML_CPU_FP16_TO_FP32(x[i]); + float res = 0.5f*xi*(1.0f + erff(xi*SQRT_2_INV)); + y[i] = GGML_CPU_FP32_TO_FP16(res); + } +} + #ifdef GGML_GELU_FP16 inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) { uint16_t t; @@ -451,9 +631,9 @@ inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) { } else if (x[i] >= 10.0f) { y[i] = x[i]; } else { - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); + ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_f16[t]); + y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_f16[t]); } } } @@ -465,6 +645,13 @@ inline static void ggml_vec_gelu_f32(const int n, float * y, const float * x) { } #endif +inline static void ggml_vec_gelu_erf_f32(const int n, float * y, const float * x) { + for (int i = 0; i < n; ++i) { + float xi = x[i]; + y[i] = 0.5f*xi*(1.0f + erff(xi*SQRT_2_INV)); + } +} + inline static float ggml_gelu_quick_f32(float x) { return x*(1.0f/(1.0f+expf(GELU_QUICK_COEF*x))); } @@ -480,9 +667,9 @@ inline static float ggml_gelu_quick_f32(float x) { inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * x) { uint16_t t; for (int i = 0; i < n; ++i) { - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); + ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]); memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]); + y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]); } } #else @@ -495,8 +682,8 @@ inline static void ggml_vec_gelu_quick_f32(const int n, float * y, const float * inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { for (int i = 0; i < n; ++i) { - float v = GGML_FP16_TO_FP32(x[i]); - y[i] = GGML_FP32_TO_FP16(v*(1.0f/(1.0f+expf(GELU_QUICK_COEF*v)))); + float v = GGML_CPU_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16(v*(1.0f/(1.0f+expf(GELU_QUICK_COEF*v)))); } } @@ -505,8 +692,8 @@ inline static float ggml_silu_f32(float x) { return x/(1.0f + expf(-x)); } inline static ggml_fp16_t ggml_silu_f16(ggml_fp16_t x) { - float v = GGML_FP16_TO_FP32(x); - return GGML_FP32_TO_FP16(v/(1.0f + expf(-v))); + float v = GGML_CPU_FP16_TO_FP32(x); + return GGML_CPU_FP32_TO_FP16(v/(1.0f + expf(-v))); } #if __FINITE_MATH_ONLY__ @@ -514,6 +701,42 @@ inline static ggml_fp16_t ggml_silu_f16(ggml_fp16_t x) { #error "ref: https://github.com/ggml-org/llama.cpp/pull/7154#issuecomment-2143844461" #endif +/* Below function was borrowed from the GitHub repository: +https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/common.hpp */ +#if defined(__ARM_FEATURE_SVE) && defined(__aarch64__) + inline static svfloat32_t exp_ps_sve(svbool_t pg, svfloat32_t src) { + // Constants + const svfloat32_t log2_e = svdup_n_f32(1.4426950409f); + const svfloat32_t ln2 = svdup_n_f32(0.6931473921f); + const svfloat32_t half_ln2_sq = svdup_n_f32(0.2413862043f); + const svuint32_t not_mask17 = svdup_n_u32(~((1u << 17) - 1)); + const svfloat32_t one = svdup_n_f32(1.0f); + const svfloat32_t inactive1 = svdup_n_f32(0.0f); + const svint32_t inactive2 = svdup_n_s32(0); + + // Algorithm starts here + svfloat32_t t0 = svmul_f32_m(pg, src, log2_e); // y = x * log2(e) + svfloat32_t t1 = svrintm_f32_m(inactive1, pg, t0); // rount to int (float) + svint32_t t2 = svcvt_s32_f32_m(inactive2, pg, t1); // n + + t1 = svsub_f32_m(pg, t0, t1); // a = y - floor(y) + t1 = svadd_f32_m(pg, t1, one); // b = a + 1 + + svuint32_t t3 = svlsr_n_u32_m(pg, svreinterpret_u32_f32(t1), 17); // v = b >> 17 (u32) + svfloat32_t t4 = svexpa_f32(t3); // c = fexpa(v) + t4 = svscale_f32_m(pg, t4, t2); // fexpa(v) * 2^(n) + + // and_(t2.d, t1.d, not_mask17.d) + svfloat32_t t5 = svreinterpret_f32_u32(svand_u32_m(pg, svreinterpret_u32_f32(t1), not_mask17)); + t5 = svsub_f32_m(pg, t1, t5); // z + t0 = svmla_f32_m(pg, ln2, t5, half_ln2_sq); // ln2 + half_ln2_sq * z + t0 = svmla_f32_m(pg, one, t5, t0); // 1 + (ln2 * z) + (half_ln2_sq * z * z) + t0 = svmul_f32_m(pg, t0, t4); // Final result + + return t0; + } +#endif + #if defined(__ARM_NEON) && defined(__aarch64__) // adapted from arm limited optimized routine @@ -719,9 +942,9 @@ inline static float ggml_silu_backward_f32(float x, float dy) { } inline static ggml_fp16_t ggml_silu_backward_f16(ggml_fp16_t x, ggml_fp16_t dy) { - const float v = GGML_FP16_TO_FP32(x); + const float v = GGML_CPU_FP16_TO_FP32(x); const float s = 1.0f/(1.0f + expf(-v)); - return GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(dy)*s*(1.0f + v*(1.0f - s))); + return GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(dy)*s*(1.0f + v*(1.0f - s))); } inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) { @@ -736,6 +959,100 @@ inline static void ggml_vec_silu_backward_f16(const int n, ggml_fp16_t * dx, con } } +inline static void ggml_vec_reglu_f32 (const int n, float * y, const float * x, const float * g) { + for (int i = 0; i < n; ++i) { + y[i] = (x[i] > 0.f) ? x[i] * g[i] : 0.f; + } +} + +inline static void ggml_vec_reglu_f16 (const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { + for (int i = 0; i < n; ++i) { + float v = GGML_CPU_FP16_TO_FP32(x[i]); + y[i] = GGML_CPU_FP32_TO_FP16((v > 0.f) ? v * GGML_CPU_FP16_TO_FP32(g[i]) : 0.f); + } +} + +#ifdef GGML_GELU_FP16 +inline static void ggml_vec_geglu_f32(const int n, float * y, const float * x, const float * g) { + uint16_t t; + for (int i = 0; i < n; ++i) { + if (x[i] <= -10.0f) { + y[i] = 0.0f; + } else if (x[i] >= 10.0f) { + y[i] = x[i] * g[i]; + } else { + ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]); + memcpy(&t, &fp16, sizeof(uint16_t)); + y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_f16[t]) * g[i]; + } + } +} +#else +inline static void ggml_vec_geglu_f32(const int n, float * y, const float * x, const float * g) { + for (int i = 0; i < n; ++i) { + y[i] = ggml_gelu_f32(x[i]) * g[i]; + } +} +#endif + +inline static void ggml_vec_geglu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { + const uint16_t * i16 = (const uint16_t *) x; + for (int i = 0; i < n; ++i) { + float v = GGML_CPU_FP16_TO_FP32(g[i]); + y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(ggml_table_gelu_f16[i16[i]]) * v); + } +} + +void ggml_vec_swiglu_f32(const int n, float * y, const float * x, const float * g); + +inline static void ggml_vec_swiglu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { + for (int i = 0; i < n; ++i) { + float xi = GGML_CPU_FP16_TO_FP32(x[i]); + float gi = GGML_CPU_FP16_TO_FP32(g[i]); + y[i] = GGML_CPU_FP32_TO_FP16((xi/(1.0f + expf(-xi))) * gi); + } +} + +inline static void ggml_vec_geglu_erf_f32(const int n, float * y, const float * x, const float * g) { + for (int i = 0; i < n; ++i) { + float xi = x[i]; + y[i] = 0.5f * xi * (1.0f + erff(xi*SQRT_2_INV)) * g[i]; + } +} + +inline static void ggml_vec_geglu_erf_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { + for (int i = 0; i < n; ++i) { + float xi = GGML_CPU_FP16_TO_FP32(x[i]); + float gi = GGML_CPU_FP16_TO_FP32(g[i]); + y[i] = GGML_CPU_FP32_TO_FP16(0.5f * xi * (1.0f + erff(xi*SQRT_2_INV)) * gi); + } +} + +#ifdef GGML_GELU_QUICK_FP16 +inline static void ggml_vec_geglu_quick_f32(const int n, float * y, const float * x, const float * g) { + uint16_t t; + for (int i = 0; i < n; ++i) { + ggml_fp16_t fp16 = GGML_CPU_FP32_TO_FP16(x[i]); + memcpy(&t, &fp16, sizeof(uint16_t)); + y[i] = GGML_CPU_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]) * g[i]; + } +} +#else +inline static void ggml_vec_geglu_quick_f32(const int n, float * y, const float * x, const float * g) { + for (int i = 0; i < n; ++i) { + y[i] = ggml_gelu_quick_f32(x[i]) * g[i]; + } +} +#endif + +inline static void ggml_vec_geglu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x, const ggml_fp16_t * g) { + const uint16_t * i16 = (const uint16_t *) x; + for (int i = 0; i < n; ++i) { + float v = GGML_CPU_FP16_TO_FP32(g[i]); + y[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(ggml_table_gelu_quick_f16[i16[i]]) * v); + } +} + inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) { #ifndef GGML_USE_ACCELERATE ggml_float sum = 0.0; @@ -759,7 +1076,7 @@ inline static void ggml_vec_sum_f32_ggf(const int n, ggml_float * s, const float inline static void ggml_vec_sum_f16_ggf(const int n, float * s, const ggml_fp16_t * x) { float sum = 0.0f; for (int i = 0; i < n; ++i) { - sum += GGML_FP16_TO_FP32(x[i]); + sum += GGML_CPU_FP16_TO_FP32(x[i]); } *s = sum; } diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/CMakeLists.txt b/ml/backend/ggml/ggml/src/ggml-cuda/CMakeLists.txt index c9ff4aa32..98ed29bc9 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/CMakeLists.txt +++ b/ml/backend/ggml/ggml/src/ggml-cuda/CMakeLists.txt @@ -102,12 +102,12 @@ if (CUDAToolkit_FOUND) if (GGML_STATIC) if (WIN32) # As of 12.3.1 CUDA Toolkit for Windows does not offer a static cublas library - target_link_libraries(ggml-cuda PRIVATE CUDA::cudart_static CUDA::cublas CUDA::cublasLt) + target_link_libraries(ggml-cuda PRIVATE CUDA::cudart_static CUDA::cublas) else () - target_link_libraries(ggml-cuda PRIVATE CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static) + target_link_libraries(ggml-cuda PRIVATE CUDA::cudart_static CUDA::cublas_static) endif() else() - target_link_libraries(ggml-cuda PRIVATE CUDA::cudart CUDA::cublas CUDA::cublasLt) + target_link_libraries(ggml-cuda PRIVATE CUDA::cudart CUDA::cublas) endif() if (GGML_CUDA_NO_VMM) diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/add-id.cu b/ml/backend/ggml/ggml/src/ggml-cuda/add-id.cu new file mode 100644 index 000000000..8bed62ac9 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/add-id.cu @@ -0,0 +1,58 @@ +#include "add-id.cuh" + +static __global__ void add_id_kernel( + const float * src0, const float * src1, const int32_t * src2, float * dst, + int64_t ne0, int64_t ne1, + size_t nb01, size_t nb02, + size_t nb11, + size_t nb21 + ) { + + const int64_t i1 = blockIdx.x; + const int64_t i2 = blockIdx.y; + + const int i11 = *(int32_t *) ((char *) src2 + i1*sizeof(int32_t) + i2*nb21); + + const size_t nb1 = ne0 * sizeof(float); + const size_t nb2 = ne1 * nb1; + + float * dst_row = (float *)((char *)dst + i1*nb1 + i2*nb2); + const float * src0_row = (const float *)((char *)src0 + i1*nb01 + i2*nb02); + const float * src1_row = (const float *)((char *)src1 + i11*nb11); + + for (int64_t i0 = threadIdx.x; i0 < ne0; i0 += blockDim.x) { + dst_row[i0] = src0_row[i0] + src1_row[i0]; + } +} + +void ggml_cuda_op_add_id(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + + GGML_TENSOR_TERNARY_OP_LOCALS + + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(src2->type == GGML_TYPE_I32); + + GGML_ASSERT(nb00 == sizeof(float)); + GGML_ASSERT(nb10 == sizeof(float)); + GGML_ASSERT(nb20 == sizeof(int32_t)); + + const float * src0_d = (const float *)src0->data; + const float * src1_d = (const float *)src1->data; + const int32_t * src2_d = (const int32_t *)src2->data; + float * dst_d = (float *)dst->data; + + int threads = std::min((int)ne00, 768); // cols + dim3 blocks(ne01, ne02); // n_experts_used, n_tokens + add_id_kernel<<>>( + src0_d, src1_d, src2_d, dst_d, + ne0, ne1, + nb01, nb02, + nb11, + nb21 + ); +} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/add-id.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/add-id.cuh new file mode 100644 index 000000000..30b1721ac --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/add-id.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_op_add_id(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/common.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/common.cuh index 5b9a0fe3f..2e5d48797 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/common.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/common.cuh @@ -1,6 +1,7 @@ #pragma once #include "ggml.h" +#include "ggml-impl.h" #include "ggml-cuda.h" #include @@ -19,10 +20,10 @@ #endif #include "ggml-common.h" -#include #include #include #include +#include #include #include @@ -56,7 +57,7 @@ #define GGML_CUDA_CC_GCN4 (GGML_CUDA_CC_OFFSET_AMD + 0x803) // Tonga, Fiji, Polaris, minimum for fast fp16 #define GGML_CUDA_CC_VEGA (GGML_CUDA_CC_OFFSET_AMD + 0x900) // Vega56/64, minimum for fp16 dual issue #define GGML_CUDA_CC_VEGA20 (GGML_CUDA_CC_OFFSET_AMD + 0x906) // MI50/Radeon VII, minimum for dp4a -#define GGML_CUDA_CC_CDNA (GGML_CUDA_CC_OFFSET_AMD + 0x908) // MI100, minimum for MFMA, acc registers +#define GGML_CUDA_CC_CDNA1 (GGML_CUDA_CC_OFFSET_AMD + 0x908) // MI100, minimum for MFMA, acc registers #define GGML_CUDA_CC_CDNA2 (GGML_CUDA_CC_OFFSET_AMD + 0x910) // MI210, minimum acc register renameing #define GGML_CUDA_CC_CDNA3 (GGML_CUDA_CC_OFFSET_AMD + 0x942) // MI300 @@ -72,15 +73,14 @@ #define GGML_CUDA_CC_IS_RDNA2(cc) (cc >= GGML_CUDA_CC_RDNA2 && cc < GGML_CUDA_CC_RDNA3) #define GGML_CUDA_CC_IS_RDNA3(cc) (cc >= GGML_CUDA_CC_RDNA3 && cc < GGML_CUDA_CC_RDNA4) #define GGML_CUDA_CC_IS_RDNA4(cc) (cc >= GGML_CUDA_CC_RDNA4) -#define GGML_CUDA_CC_IS_GCN(cc) (cc > GGML_CUDA_CC_OFFSET_AMD && cc < GGML_CUDA_CC_CDNA) -#define GGML_CUDA_CC_IS_CDNA(cc) (cc >= GGML_CUDA_CC_CDNA && cc < GGML_CUDA_CC_RDNA1) +#define GGML_CUDA_CC_IS_GCN(cc) (cc > GGML_CUDA_CC_OFFSET_AMD && cc < GGML_CUDA_CC_CDNA1) +#define GGML_CUDA_CC_IS_CDNA(cc) (cc >= GGML_CUDA_CC_CDNA1 && cc < GGML_CUDA_CC_RDNA1) +#define GGML_CUDA_CC_IS_CDNA3(cc) (cc >= GGML_CUDA_CC_CDNA3 && cc < GGML_CUDA_CC_RDNA1) // Moore Threads -#define GGML_CUDA_MUSA_ARCH_IS_QY1 (__MUSA_ARCH__ <= 210) - -#define GGML_CUDA_CC_QY1 (GGML_CUDA_CC_OFFSET_MTHREADS + 0x210) // MTT S80, MTT S3000 -#define GGML_CUDA_CC_QY2 (GGML_CUDA_CC_OFFSET_MTHREADS + 0x220) // MTT S4000 -#define GGML_CUDA_CC_NG (GGML_CUDA_CC_OFFSET_MTHREADS + 0x310) // TBD +#define GGML_CUDA_CC_QY1 (GGML_CUDA_CC_OFFSET_MTHREADS + 0x210) // MTT S80, MTT S3000 +#define GGML_CUDA_CC_QY2 (GGML_CUDA_CC_OFFSET_MTHREADS + 0x220) // MTT S4000 +#define GGML_CUDA_CC_NG (GGML_CUDA_CC_OFFSET_MTHREADS + 0x310) // TBD #define GGML_CUDA_CC_IS_MTHREADS(cc) (cc >= GGML_CUDA_CC_OFFSET_MTHREADS && cc < GGML_CUDA_CC_OFFSET_AMD) #define GGML_CUDA_CC_IS_QY1(cc) (cc >= GGML_CUDA_CC_QY1 && cc < GGML_CUDA_CC_QY2) @@ -168,7 +168,7 @@ void ggml_cuda_error(const char * stmt, const char * func, const char * file, in #define CUBLAS_CHECK(err) CUDA_CHECK_GEN(err, CUBLAS_STATUS_SUCCESS, cublas_get_error_str) -#if !defined(GGML_USE_HIP) +#if !defined(GGML_USE_HIP) && !defined(GGML_CUDA_NO_VMM) static const char * cu_get_error_str(CUresult err) { const char * err_str; cuGetErrorString(err, &err_str); @@ -177,6 +177,23 @@ static const char * cu_get_error_str(CUresult err) { #define CU_CHECK(err) CUDA_CHECK_GEN(err, CUDA_SUCCESS, cu_get_error_str) #endif +#if !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) +# define CUDA_SET_SHARED_MEMORY_LIMIT(kernel, nbytes) \ + do { \ + static bool shared_memory_limit_raised[GGML_CUDA_MAX_DEVICES] = { false }; \ + const int id = ggml_cuda_get_device(); \ + if (!shared_memory_limit_raised[id]) { \ + CUDA_CHECK(cudaFuncSetAttribute(kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, nbytes)); \ + shared_memory_limit_raised[id] = true; \ + } \ + } while (0) +#else +# define CUDA_SET_SHARED_MEMORY_LIMIT(kernel, nbytes) \ + do { \ + GGML_UNUSED(nbytes); \ + } while (0) +#endif // !(defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) + #if CUDART_VERSION >= 11010 || defined(GGML_USE_MUSA) #define GGML_CUDA_ASSUME(x) __builtin_assume(x) #else @@ -195,33 +212,41 @@ typedef float2 dfloat2; #define GGML_USE_VMM #endif // (!defined(GGML_USE_HIP) && !defined(GGML_CUDA_NO_VMM)) || (defined(GGML_USE_HIP) && !defined(GGML_HIP_NO_VMM)) -#if (defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL +#if defined(GGML_USE_HIP) || __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL #define FP16_AVAILABLE -#endif // (defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL +#endif // defined(GGML_USE_HIP) || __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL #if defined(FP16_AVAILABLE) && __CUDA_ARCH__ != 610 #define FAST_FP16_AVAILABLE #endif // defined(FP16_AVAILABLE) && __CUDA_ARCH__ != 610 -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA +#if (!defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA) || defined(GGML_USE_MUSA) #define FP16_MMA_AVAILABLE -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA +#endif // (!defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA) || defined(GGML_USE_MUSA) -#if defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || defined(RDNA4)) +#if defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || (defined(GGML_HIP_ROCWMMA_FATTN_GFX12) && defined(RDNA4))) #define FP16_MMA_AVAILABLE -#endif // defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || defined(RDNA4)) +#endif // defined(GGML_HIP_ROCWMMA_FATTN) && (defined(CDNA) || defined(RDNA3) || (defined(GGML_HIP_ROCWMMA_FATTN_GFX12) && defined(RDNA4))) -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_TURING -#define NEW_MMA_AVAILABLE -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_TURING +#if defined(GGML_USE_HIP) && defined(CDNA) && !defined(GGML_HIP_NO_MMQ_MFMA) +#define AMD_MFMA_AVAILABLE +#endif // defined(GGML_USE_HIP) && defined(CDNA) && !defined(GGML_HIP_NO_MMQ_MFMA) -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE +#if !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_TURING +#define TURING_MMA_AVAILABLE +#endif // !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_TURING + +#if !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE +#define AMPERE_MMA_AVAILABLE +#endif // !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE + +#if !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE #define CP_ASYNC_AVAILABLE -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE +#endif // !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE -#if !defined(GGML_CUDA_NO_FA) && !(defined(GGML_USE_MUSA) && GGML_CUDA_MUSA_ARCH_IS_QY1) +#if !defined(GGML_CUDA_NO_FA) && !(defined(GGML_USE_MUSA) && __MUSA_ARCH__ < 220) #define FLASH_ATTN_AVAILABLE -#endif // !defined(GGML_CUDA_NO_FA) && !(defined(GGML_USE_MUSA) && GGML_CUDA_MUSA_ARCH_IS_QY1) +#endif // !defined(GGML_CUDA_NO_FA) && !(defined(GGML_USE_MUSA) && __MUSA_ARCH__ < 220) static bool fp16_available(const int cc) { return ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_PASCAL; @@ -233,54 +258,87 @@ static bool fast_fp16_available(const int cc) { // To be used for feature selection of external libraries, e.g. cuBLAS. static bool fast_fp16_hardware_available(const int cc) { - return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_PASCAL && cc != 610) || GGML_CUDA_CC_IS_AMD(cc); + return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_PASCAL && cc != 610) || GGML_CUDA_CC_IS_AMD(cc) || + (GGML_CUDA_CC_IS_MTHREADS(cc) && cc >= GGML_CUDA_CC_QY2); } // Any FP16 tensor core instructions are available for ggml code. static bool fp16_mma_available(const int cc) { -#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && !defined(GGML_HIP_ROCWMMA_FATTN) +#if defined(GGML_USE_HIP) && !defined(GGML_HIP_ROCWMMA_FATTN) return false; #else - return (GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA) || - GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc); -#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) && !defined(GGML_HIP_ROCWMMA_FATTN) + if ((GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA) || + GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || + GGML_CUDA_CC_IS_MTHREADS(cc)) { + return true; + } else if (GGML_CUDA_CC_IS_RDNA4(cc)) { +#if defined(GGML_HIP_ROCWMMA_FATTN) && defined(GGML_HIP_ROCWMMA_FATTN_GFX12) + return true; +#else + return false; +#endif // defined(GGML_HIP_ROCWMMA_FATTN) && defined(GGML_HIP_ROCWMMA_FATTN_GFX12) + } else { + return false; + } +#endif // defined(GGML_USE_HIP) && !defined(GGML_HIP_ROCWMMA_FATTN) } // To be used for feature selection of external libraries, e.g. cuBLAS. static bool fp16_mma_hardware_available(const int cc) { return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_VOLTA) || - GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc); + GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc) || + (GGML_CUDA_CC_IS_MTHREADS(cc) && cc >= GGML_CUDA_CC_QY2); +} + +static bool bf16_mma_hardware_available(const int cc) { + return (GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_AMPERE) || GGML_CUDA_CC_IS_CDNA(cc) || cc >= GGML_CUDA_CC_RDNA3; +} + +static bool fp32_mma_hardware_available(const int cc) { + return GGML_CUDA_CC_IS_CDNA(cc); +} + +static bool amd_mfma_available(const int cc) { +#if !defined(GGML_HIP_NO_MMQ_MFMA) + return GGML_CUDA_CC_IS_CDNA(cc); +#else + return false; +#endif //!defined(GGML_HIP_NO_MMQ_MFMA) } // Volta technically had FP16 tensor cores but they work very differently compared to Turing and later. -static bool new_mma_available(const int cc) { +static bool turing_mma_available(const int cc) { return GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_TURING; } +static bool ampere_mma_available(const int cc) { + return cc < GGML_CUDA_CC_OFFSET_AMD && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_AMPERE; +} + static bool cp_async_available(const int cc) { return cc < GGML_CUDA_CC_OFFSET_AMD && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_AMPERE; } static constexpr __device__ int ggml_cuda_get_physical_warp_size() { -#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) - return __AMDGCN_WAVEFRONT_SIZE; +#if defined(GGML_USE_HIP) && (defined(__GFX9__) || defined(__GFX8__)) + return 64; #else return 32; -#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#endif // defined(GGML_USE_HIP) && (defined(__GFX9__) || defined(__GFX8__)) } [[noreturn]] static __device__ void no_device_code( const char * file_name, const int line, const char * function_name, const int arch, const char * arch_list) { -#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#if defined(GGML_USE_HIP) printf("%s:%d: ERROR: HIP kernel %s has no device code compatible with HIP arch %d.\n", file_name, line, function_name, arch); GGML_UNUSED(arch_list); #else printf("%s:%d: ERROR: CUDA kernel %s has no device code compatible with CUDA arch %d. ggml-cuda.cu was compiled for: %s\n", file_name, line, function_name, arch, arch_list); -#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#endif // defined(GGML_USE_HIP) __trap(); GGML_UNUSED(no_device_code); // suppress unused function warning @@ -317,7 +375,7 @@ struct ggml_cuda_unroll<1> { template static __device__ __forceinline__ int warp_reduce_sum(int x) { -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE +#if !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE return __reduce_add_sync(0xffffffff, x); #else #pragma unroll @@ -325,7 +383,7 @@ static __device__ __forceinline__ int warp_reduce_sum(int x) { x += __shfl_xor_sync(0xffffffff, x, offset, width); } return x; -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE +#endif // !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE } template @@ -382,6 +440,20 @@ static __global__ void reduce_rows_f32(const float * x, float * dst, const int n dst[row] = norm ? sum / ncols : sum; } +template +static __device__ __forceinline__ int warp_reduce_all(int x) { +#ifdef GGML_USE_HIP +#pragma unroll + for (int offset = width/2; offset > 0; offset >>= 1) { + x = x && __shfl_xor_sync(0xffffffff, x, offset, width); + } + return x; +#else + static_assert(width == WARP_SIZE, "width != WARP_SIZE not implemented"); + return __all_sync(0xffffffff, x); +#endif // GGML_USE_HIP +} + template static __device__ __forceinline__ float warp_reduce_max(float x) { #pragma unroll @@ -394,11 +466,11 @@ static __device__ __forceinline__ float warp_reduce_max(float x) { static __device__ __forceinline__ half ggml_cuda_hmax(const half a, const half b) { #ifdef FP16_AVAILABLE -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX +#if !defined(GGML_USE_HIP) && CUDART_VERSION < CUDART_HMAX return __float2half(fmaxf(__half2float(a), __half2float(b))); #else return __hmax(a, b); -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && CUDART_VERSION < CUDART_HMAX +#endif // !defined(GGML_USE_HIP) && CUDART_VERSION < CUDART_HMAX #else NO_DEVICE_CODE; @@ -426,7 +498,7 @@ static __device__ __forceinline__ half2 ggml_cuda_hmax2(const half2 a, const hal template static __device__ __forceinline__ half2 warp_reduce_max(half2 x) { -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL || (defined(GGML_USE_HIP) && HIP_VERSION >= 50700000) +#if !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL || (defined(GGML_USE_HIP) && HIP_VERSION >= 50700000) #pragma unroll for (int offset = width/2; offset > 0; offset >>= 1) { x = ggml_cuda_hmax2(x, __shfl_xor_sync(0xffffffff, x, offset, width)); @@ -435,7 +507,7 @@ static __device__ __forceinline__ half2 warp_reduce_max(half2 x) { #else GGML_UNUSED(x); NO_DEVICE_CODE; -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL || (defined(GGML_USE_HIP) && HIP_VERSION >= 50700000) +#endif // !defined(GGML_USE_HIP) && __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL || (defined(GGML_USE_HIP) && HIP_VERSION >= 50700000) } #if CUDART_VERSION < CUDART_HMASK @@ -447,7 +519,7 @@ static __device__ __forceinline__ uint32_t __hgt2_mask(const half2 a, const half #endif // CUDART_VERSION < CUDART_HMASK static __device__ __forceinline__ int ggml_cuda_dp4a(const int a, const int b, int c) { -#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#if defined(GGML_USE_HIP) #if defined(CDNA) || defined(RDNA2) || defined(__gfx906__) c = __builtin_amdgcn_sdot4(a, b, c, false); #elif defined(RDNA3) || defined(RDNA4) @@ -473,7 +545,7 @@ static __device__ __forceinline__ int ggml_cuda_dp4a(const int a, const int b, i #endif return c; -#else // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#else // defined(GGML_USE_HIP) #if __CUDA_ARCH__ >= GGML_CUDA_CC_DP4A || defined(GGML_USE_MUSA) return __dp4a(a, b, c); @@ -483,11 +555,26 @@ static __device__ __forceinline__ int ggml_cuda_dp4a(const int a, const int b, i return c + a8[0]*b8[0] + a8[1]*b8[1] + a8[2]*b8[2] + a8[3]*b8[3]; #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_DP4A || defined(GGML_USE_MUSA) -#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#endif // defined(GGML_USE_HIP) } -// TODO: move to ggml-common.h -static constexpr __device__ int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113}; +static __device__ __forceinline__ float ggml_cuda_e8m0_to_fp32(uint8_t x) { +#if CUDART_VERSION >= 12080 + const nv_bfloat16 e = __nv_cvt_e8m0_to_bf16raw(x); + return (float) e; +#else + uint32_t bits; + if (x == 0) { + bits = 0x00400000; + } else { + bits = (uint32_t) x << 23; + } + + float result; + memcpy(&result, &bits, sizeof(float)); + return result; +#endif // CUDART_VERSION >= 12050 +} typedef void (*dequantize_kernel_t)(const void * vx, const int64_t ib, const int iqs, dfloat2 & v); @@ -547,6 +634,13 @@ struct ggml_cuda_type_traits { static constexpr int qi = QI8_0; }; +template<> +struct ggml_cuda_type_traits { + static constexpr int qk = QK_MXFP4; + static constexpr int qr = QR_MXFP4; + static constexpr int qi = QI_MXFP4; +}; + template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; @@ -655,6 +749,7 @@ struct ggml_cuda_device_info { int nsm; // number of streaming multiprocessors size_t smpb; // max. shared memory per block size_t smpbo; // max. shared memory per block (with opt-in) + bool integrated; // Device is integrated as opposed to discrete bool vmm; // virtual memory support size_t vmm_granularity; // granularity of virtual memory size_t total_vram; @@ -731,7 +826,7 @@ struct ggml_tensor_extra_gpu { }; -#if (defined(GGML_CUDA_USE_GRAPHS) || defined(GGML_HIP_GRAPHS)) +#if (defined(GGML_CUDA_USE_GRAPHS) || defined(GGML_HIP_GRAPHS)) || defined(GGML_MUSA_GRAPHS) #define USE_CUDA_GRAPH #endif @@ -789,21 +884,7 @@ struct ggml_backend_cuda_context { name(GGML_CUDA_NAME + std::to_string(device)) { } - ~ggml_backend_cuda_context() { - if (copy_event != nullptr) { - CUDA_CHECK(cudaEventDestroy(copy_event)); - } - for (int i = 0; i < GGML_CUDA_MAX_DEVICES; ++i) { - for (int j = 0; j < GGML_CUDA_MAX_STREAMS; ++j) { - if (streams[i][j] != nullptr) { - CUDA_CHECK(cudaStreamDestroy(streams[i][j])); - } - } - if (cublas_handles[i] != nullptr) { - CUBLAS_CHECK(cublasDestroy(cublas_handles[i])); - } - } - } + ~ggml_backend_cuda_context(); cudaStream_t stream(int device, int stream) { if (streams[device][stream] == nullptr) { diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/conv2d-dw.cu b/ml/backend/ggml/ggml/src/ggml-cuda/conv2d-dw.cu new file mode 100644 index 000000000..7583233b1 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/conv2d-dw.cu @@ -0,0 +1,161 @@ +#include "conv2d-dw.cuh" + +struct conv_params { + int in_w, in_h; + int out_w, out_h; + int kernel_w, kernel_h; + int stride_x, stride_y; + int padding_x, padding_y; + int dilation_x, dilation_y; + int channels, batches; +}; + +struct kernel_bounds { + int y_min, y_max; + int x_min, x_max; +}; + +__device__ __forceinline__ kernel_bounds calculate_kernel_bounds(int out_x, int out_y, const conv_params & params) { + kernel_bounds bounds; + bounds.y_min = max(0, (params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y); + bounds.y_max = + min(params.kernel_h, + (params.in_h + params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y); + bounds.x_min = max(0, (params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x); + bounds.x_max = + min(params.kernel_w, + (params.in_w + params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x); + return bounds; +} + +__device__ __forceinline__ int calculate_input_coord(int out_coord, int kern_coord, int stride, int dilation, int padding) { + return out_coord * stride + kern_coord * dilation - padding; +} + +struct whcn_layout { + __device__ static int input_index(int n, int c, int y, int x, const conv_params & params) { + return n * (params.channels * params.in_w * params.in_h) + c * params.in_w * params.in_h + y * params.in_w + x; + } + + __device__ static int kernel_index(int c, int ky, int kx, const conv_params & params) { + return c * params.kernel_h * params.kernel_w + ky * params.kernel_w + kx; + } + + __device__ static int output_index(int n, int c, int y, int x, const conv_params & params) { + return n * (params.channels * params.out_w * params.out_h) + c * params.out_w * params.out_h + + y * params.out_w + x; + } + + __device__ static void unpack_indices(int global_idx, const conv_params & params, int & n, int & c, int & out_y, + int & out_x) { + out_x = global_idx % params.out_w; + out_y = (global_idx / params.out_w) % params.out_h; + c = (global_idx / (params.out_w * params.out_h)) % params.channels; + n = global_idx / (params.out_w * params.out_h * params.channels); + } +}; + +struct cwhn_layout { + __device__ static int input_index(int n, int c, int y, int x, const conv_params & params) { + return n * (params.channels * params.in_w * params.in_h) + (y * params.in_w + x) * params.channels + c; + } + + __device__ static int kernel_index(int c, int ky, int kx, const conv_params & params) { + return (ky * params.kernel_w + kx) * params.channels + c; + } + + __device__ static int output_index(int n, int c, int y, int x, const conv_params & params) { + return n * (params.channels * params.out_w * params.out_h) + y * (params.out_w * params.channels) + + x * params.channels + c; + } + + __device__ static void unpack_indices(int global_idx, const conv_params & params, int & n, int & c, int & out_y, + int & out_x) { + c = global_idx % params.channels; + out_x = (global_idx / params.channels) % params.out_w; + out_y = (global_idx / (params.channels * params.out_w)) % params.out_h; + n = global_idx / (params.channels * params.out_w * params.out_h); + } +}; + +template +__global__ void conv2d_dw_kernel(const T * __restrict__ input, const T * __restrict__ kernel, T * __restrict__ output, + const int in_w, const int in_h, const int out_w, const int out_h, + const int kernel_w, const int kernel_h, const int stride_x, const int stride_y, + const int padding_x, const int padding_y, const int dilation_x, const int dilation_y, + const int channels, const int batches) { + const int global_idx = blockIdx.x * blockDim.x + threadIdx.x; + const int total_elements = batches * channels * out_h * out_w; + + if (global_idx >= total_elements) { + return; + } + + conv_params params = { in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, + stride_y, padding_x, padding_y, dilation_x, dilation_y, channels, batches }; + + int batch_idx, channel_idx, out_y_idx, out_x_idx; + Layout::unpack_indices(global_idx, params, batch_idx, channel_idx, out_y_idx, out_x_idx); + + T accumulator = 0; + kernel_bounds bounds = calculate_kernel_bounds(out_x_idx, out_y_idx, params); + + for (int kern_y = bounds.y_min; kern_y < bounds.y_max; ++kern_y) { + int in_y_idx = calculate_input_coord(out_y_idx, kern_y, params.stride_y, params.dilation_y, params.padding_y); + + for (int kern_x = bounds.x_min; kern_x < bounds.x_max; ++kern_x) { + int in_x_idx = calculate_input_coord(out_x_idx, kern_x, params.stride_x, params.dilation_x, params.padding_x); + + const T input_val = input[Layout::input_index(batch_idx, channel_idx, in_y_idx, in_x_idx, params)]; + const T kernel_val = kernel[Layout::kernel_index(channel_idx, kern_y, kern_x, params)]; + + accumulator += input_val * kernel_val; + } + } + + output[Layout::output_index(batch_idx, channel_idx, out_y_idx, out_x_idx, params)] = accumulator; +} + +void ggml_cuda_op_conv2d_dw(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * kernel = dst->src[0]; + const ggml_tensor * input = dst->src[1]; + + GGML_ASSERT(kernel->type == GGML_TYPE_F32 && input->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32); + const float * w_d = (const float *) kernel->data; + const float * x_d = (const float *) input->data; + float * y_d = (float *) dst->data; + + const int32_t * p = (const int32_t *) dst->op_params; + const int stride_x = p[0]; + const int stride_y = p[1]; + const int padding_x = p[2]; + const int padding_y = p[3]; + const int dilation_x = p[4]; + const int dilation_y = p[5]; + + const int in_w = input->ne[0]; + const int in_h = input->ne[1]; + const int kernel_w = kernel->ne[0]; + const int kernel_h = kernel->ne[1]; + const int out_w = dst->ne[0]; + const int out_h = dst->ne[1]; + const int channels = dst->ne[2]; + const int batches = dst->ne[3]; + + cudaStream_t st = ctx.stream(); + + const int total = batches * channels * out_h * out_w; + const int blocks = (total + CUDA_CONV2D_DW_BLOCK_SIZE - 1) / CUDA_CONV2D_DW_BLOCK_SIZE; + + if (ggml_is_contiguous(input)) { + conv2d_dw_kernel<<>>( + x_d, w_d, y_d, in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, stride_y, padding_x, padding_y, + dilation_x, dilation_y, channels, batches); + } else if (ggml_is_contiguous_channels(input)) { + conv2d_dw_kernel<<>>( + x_d, w_d, y_d, in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, stride_y, padding_x, padding_y, + dilation_x, dilation_y, channels, batches); + } else { + GGML_ABORT("Unsupported memory layout for conv_2d_dw"); + } +} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/conv2d-dw.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/conv2d-dw.cuh new file mode 100644 index 000000000..b5d5a69d3 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/conv2d-dw.cuh @@ -0,0 +1,5 @@ +#pragma once +#include "common.cuh" + +#define CUDA_CONV2D_DW_BLOCK_SIZE 256 +void ggml_cuda_op_conv2d_dw(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/conv2d-transpose.cu b/ml/backend/ggml/ggml/src/ggml-cuda/conv2d-transpose.cu new file mode 100644 index 000000000..03224e404 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/conv2d-transpose.cu @@ -0,0 +1,91 @@ +#include + +#include "conv2d-transpose.cuh" +#include "ggml.h" + +__global__ void conv2d_transpose_kernel(const float * __restrict__ input, const half * __restrict__ kernel, + float * __restrict__ output, const int in_w, const int in_h, const int out_w, + const int out_h, const int kernel_w, const int kernel_h, const int stride, + const int c_in, const int c_out, const int batches) { + const int global_idx = blockIdx.x * blockDim.x + threadIdx.x; + + const int total_elements = out_w * out_h * c_out * batches; + + if (global_idx >= total_elements) { + return; + } + + const int out_x_idx = global_idx % out_w; + const int out_y_idx = (global_idx / out_w) % out_h; + const int c_idx = (global_idx / (out_w * out_h)) % c_out; + const int n_idx = global_idx / (out_w * out_h * c_out); + + float accumulator = 0; + // For each output idx, find the inputs that contribute to it by checking stride alignment and bounds + + for (int c_in_idx = 0; c_in_idx < c_in; c_in_idx++) { + for (int kh = 0; kh < kernel_h; ++kh) { + int in_y = out_y_idx - kh; + if (in_y < 0 || in_y % stride) continue; + in_y /= stride; + if (in_y >= in_h) continue; + + for (int kw = 0; kw < kernel_w; ++kw) { + int in_x = out_x_idx - kw; + if (in_x < 0 || in_x % stride) continue; + in_x /= stride; + if (in_x >= in_w) continue; + + const int input_idx = (in_w * in_h * c_in) * n_idx + (in_w * in_h) * c_in_idx + (in_w) *in_y + in_x; + const int kernel_idx = + (kernel_h * kernel_w * c_out) * c_in_idx + (kernel_h * kernel_w) * c_idx + (kernel_w) *kh + kw; + + float input_val = input[input_idx]; + half kern_val = kernel[kernel_idx]; + + accumulator += input_val * (float) kern_val; + } + } + } + + output[(out_w * out_h * c_out) * n_idx + (out_w * out_h) * c_idx + (out_w) *out_y_idx + out_x_idx] = accumulator; +} + +//input is (W, H, C_in, N), Kernel is (W, H, C_out, C_in) +void ggml_cuda_conv_2d_transpose_p0(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * kernel = dst->src[0]; + const ggml_tensor * input = dst->src[1]; + + GGML_ASSERT(kernel->type == GGML_TYPE_F16 && input->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32); + + const float * input_data = (const float *) input->data; + float * output_data = (float *) dst->data; + const half * kernel_data = (const half *) kernel->data; + + const int input_w = input->ne[0]; + const int input_h = input->ne[1]; + const int output_w = dst->ne[0]; + const int output_h = dst->ne[1]; + const int channels_in = input->ne[2]; + const int channels_out = kernel->ne[2]; + const int kernel_w = kernel->ne[0]; + const int kernel_h = kernel->ne[1]; + const int stride = dst->op_params[0]; + const int batches = input->ne[3]; + + GGML_ASSERT(channels_in == kernel->ne[3]); + GGML_ASSERT(stride > 0); + + cudaStream_t st = ctx.stream(); + + GGML_ASSERT(ggml_is_contiguous(input)); + GGML_ASSERT(ggml_is_contiguous(kernel)); + GGML_ASSERT(ggml_is_contiguous(dst)); + + const int total = (output_w * output_h * channels_out * batches); + const int blocks = (total + CUDA_CONV2D_TRANSPOSE_BLOCK_SIZE - 1) / CUDA_CONV2D_TRANSPOSE_BLOCK_SIZE; + + conv2d_transpose_kernel<<>>( + input_data, kernel_data, output_data, input_w, input_h, output_w, output_h, kernel_w, kernel_h, stride, + channels_in, channels_out, batches); +} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/conv2d-transpose.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/conv2d-transpose.cuh new file mode 100644 index 000000000..c9430b248 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/conv2d-transpose.cuh @@ -0,0 +1,4 @@ +#include "common.cuh" + +#define CUDA_CONV2D_TRANSPOSE_BLOCK_SIZE 256 +void ggml_cuda_conv_2d_transpose_p0(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/convert.cu b/ml/backend/ggml/ggml/src/ggml-cuda/convert.cu index 0e016ccc0..e3beddbc1 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/convert.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/convert.cu @@ -6,24 +6,33 @@ #define CUDA_Q8_0_NE_ALIGN 2048 template -static __global__ void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t k) { - const int64_t i = (int64_t)2*(blockDim.x*blockIdx.x + threadIdx.x); +static __global__ void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, + const int64_t ne00, const int64_t ne01, const int64_t ne02, + const int64_t s01, const int64_t s02, const int64_t s03) { + const int64_t i00 = 2 * (int64_t(blockDim.x)*blockIdx.x + threadIdx.x); - if (i >= k) { + if (i00 >= ne00) { return; } - const int64_t ib = i/qk; // block index - const int64_t iqs = (i%qk)/qr; // quant index - const int64_t iybs = i - i%qk; // y block start index + const int64_t i01 = blockIdx.y; + const int64_t i02 = blockIdx.z % ne02; + const int64_t i03 = blockIdx.z / ne02; + + const int64_t ibx0 = i03*s03 + i02*s02 + i01*s01; + + const int64_t ib = ibx0 + i00/qk; // block index + const int64_t iqs = (i00%qk)/qr; // quant index + const int64_t iybs = i00 - i00%qk; // y block start index const int64_t y_offset = qr == 1 ? 1 : qk/2; // dequantize dfloat2 v; dequantize_kernel(vx, ib, iqs, v); - y[iybs + iqs + 0] = v.x; - y[iybs + iqs + y_offset] = v.y; + const int64_t iy0 = ((i03*ne02 + i02)*ne01 + i01)*ne00 + iybs + iqs; + y[iy0 + 0] = float(v.x); + y[iy0 + y_offset] = float(v.y); } template @@ -456,10 +465,36 @@ static __global__ void dequantize_block_iq4_xs(const void * __restrict__ vx, dst } } +template +static __global__ void dequantize_block_mxfp4(const void * __restrict__ vx, dst_t * __restrict__ yy) { + + const int64_t i = blockIdx.x; + const block_mxfp4 * x = (const block_mxfp4 *) vx + i*(QK_K/QK_MXFP4); + + const int64_t tid = threadIdx.x; + const int64_t il = tid/8; // 0...3 + const int64_t ib = tid%8; // 0...7 + dst_t * y = yy + i*QK_K + 32*ib + 4*il; + const uint8_t * q4 = x[ib].qs + 4*il; + const float d = ggml_cuda_e8m0_to_fp32(x[ib].e); + for (int j = 0; j < 4; ++j) { + y[j+ 0] = d * kvalues_mxfp4[q4[j] & 0xf]*0.5f; + y[j+16] = d * kvalues_mxfp4[q4[j] >> 4]*0.5f; + } +} + template -static void dequantize_block_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t k, cudaStream_t stream) { - const int num_blocks = (k + 2*CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / (2*CUDA_DEQUANTIZE_BLOCK_SIZE); - dequantize_block<<>>(vx, y, k); +static void dequantize_block_cuda(const void * vx, dst_t * y, + const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, + const int64_t s01, const int64_t s02, const int64_t s03, cudaStream_t stream) { + const dim3 num_blocks((ne00 + 2*CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / (2*CUDA_DEQUANTIZE_BLOCK_SIZE), ne01, ne02*ne03); + dequantize_block<<>> + (vx, y, ne00, ne01, ne02, s01, s02, s03); +} + +template +static void dequantize_block_cont_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t k, cudaStream_t stream) { + dequantize_block_cuda(vx, y, k, 1, 1, 1, k/qk, k/qk, k/qk, stream); } static void dequantize_block_q8_0_f16_cuda(const void * __restrict__ vx, half * __restrict__ y, const int64_t k, cudaStream_t stream) { @@ -571,80 +606,10 @@ static void dequantize_row_iq4_xs_cuda(const void * vx, dst_t * y, const int64_t dequantize_block_iq4_xs<<>>(vx, y); } -// MXFP4 dequantize derived from dequantize_block_q4_0 -template -static __global__ void dequantize_block_mxfp4(const void * __restrict__ vx, dst_t * __restrict__ yy, int nb32) { - const uint16_t dst_bias = 15; - const uint16_t dst_0p5 = 0x3800; - const uint16_t dst_m_bits = 10; - const int64_t i = blockIdx.x; - - // assume 32 threads - const int64_t tid = threadIdx.x; - const int64_t il = tid/8; - const int64_t ir = tid%8; - const int64_t ib = 8*i + ir; - if (ib >= nb32) { - return; - } - - const uint64_t offset = 256*i + MXFP4*ir + 8*il; - dst_t * y = yy + offset; - - const block_mxfp4 * x = (const block_mxfp4 *)vx + ib; - union { - uint32_t as_bits; - float as_value; - } scale; - scale.as_bits = (((uint32_t)x->d) << 23); - - // offset within the block 1/4 chunks (8 items) - const uint8_t * q = x->qs + 4*il; - - for (int l = 0; l < 4; ++l) { - uint16_t em0 = q[l] & 0x07; - uint16_t em1 = q[l] & 0x70; - // float16 values - iq1m_scale_t x0; - iq1m_scale_t x1; - - x0.u16 = (em0 << (dst_m_bits - 1)) | ((q[l] & 0x08) << 12); - x1.u16 = (em1 << (dst_m_bits - 5)) | ((q[l] & 0x80) << 8); - - // Three cases: - // x is normal and non-zero: Correct bias - if ((em0 & 0x06) != 0) { - x0.u16 = x0.u16 + ((dst_bias - 1) << dst_m_bits); - } - if ((em1 & 0x60) != 0) { - x1.u16 = x1.u16 + ((dst_bias - 1) << dst_m_bits); - } - // x is subnormal (x == 0bs001 where s is the sign): Map to +-0.5 in the dst type - if (em0 == 0x01) { - x0.u16 = dst_0p5 | (x0.u16 & 0x8000); - } - if (em1 == 0x10) { - x1.u16 = dst_0p5 | (x1.u16 & 0x8000); - } - // x is zero, do nothing - - // XXX it looks correct here - but mulmat still gives bad results... - // printf("i:%lld ir:%lld il:%lld l:%d y_offset:[%3lld +%d] = %f \n", - // i, ir, il, l, 256*i + 32*ir + 4*il, l*2+ 0, scale * float(x0.f16)); - // printf("i:%lld ir:%lld il:%lld l:%d y_offset:[%3lld +%d] = %f \n", - // i, ir, il, l, 256*i + 32*ir + 4*il, l*2+ 1, scale * float(x1.f16)); - - y[l*2] = scale.as_value * float(x0.f16); - y[l*2+1] = scale.as_value * float(x1.f16); - } -} - -// derived from dequantize_row_q4_0_cuda template static void dequantize_row_mxfp4_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { - const int nb32 = k / 32; - const int nb = (k + 255) / 256; - dequantize_block_mxfp4<<>>(vx, y, nb32); + const int nb = (k + QK_K - 1) / QK_K; + dequantize_block_mxfp4<<>>(vx, y); } template @@ -700,14 +665,14 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { case GGML_TYPE_Q4_1: return dequantize_row_q4_1_cuda; case GGML_TYPE_Q5_0: - return dequantize_block_cuda; + return dequantize_block_cont_cuda; case GGML_TYPE_Q5_1: - return dequantize_block_cuda; + return dequantize_block_cont_cuda; case GGML_TYPE_Q8_0: if (fp16_available(ggml_cuda_info().devices[ggml_cuda_get_device()].cc)) { return dequantize_block_q8_0_f16_cuda; } - return dequantize_block_cuda; + return dequantize_block_cont_cuda; case GGML_TYPE_Q2_K: return dequantize_row_q2_K_cuda; case GGML_TYPE_Q3_K: @@ -736,12 +701,12 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { return dequantize_row_iq4_xs_cuda; case GGML_TYPE_IQ3_S: return dequantize_row_iq3_s_cuda; + case GGML_TYPE_MXFP4: + return dequantize_row_mxfp4_cuda; case GGML_TYPE_F32: return convert_unary_cont_cuda; case GGML_TYPE_BF16: return convert_unary_cont_cuda; - case GGML_TYPE_MXFP4: - return dequantize_row_mxfp4_cuda; default: return nullptr; } @@ -754,11 +719,11 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) { case GGML_TYPE_Q4_1: return dequantize_row_q4_1_cuda; case GGML_TYPE_Q5_0: - return dequantize_block_cuda; + return dequantize_block_cont_cuda; case GGML_TYPE_Q5_1: - return dequantize_block_cuda; + return dequantize_block_cont_cuda; case GGML_TYPE_Q8_0: - return dequantize_block_cuda; + return dequantize_block_cont_cuda; case GGML_TYPE_Q2_K: return dequantize_row_q2_K_cuda; case GGML_TYPE_Q3_K: @@ -787,12 +752,12 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) { return dequantize_row_iq4_xs_cuda; case GGML_TYPE_IQ3_S: return dequantize_row_iq3_s_cuda; + case GGML_TYPE_MXFP4: + return dequantize_row_mxfp4_cuda; case GGML_TYPE_F16: return convert_unary_cont_cuda; case GGML_TYPE_BF16: return convert_unary_cont_cuda; - case GGML_TYPE_MXFP4: - return dequantize_row_mxfp4_cuda; default: return nullptr; } @@ -802,9 +767,61 @@ to_fp16_nc_cuda_t ggml_get_to_fp16_nc_cuda(ggml_type type) { switch (type) { case GGML_TYPE_F32: return convert_unary_cuda; + case GGML_TYPE_Q4_0: + return dequantize_block_cuda; + case GGML_TYPE_Q4_1: + return dequantize_block_cuda; + case GGML_TYPE_Q5_0: + return dequantize_block_cuda; + case GGML_TYPE_Q5_1: + return dequantize_block_cuda; + case GGML_TYPE_Q8_0: + return dequantize_block_cuda; case GGML_TYPE_BF16: return convert_unary_cuda; default: return nullptr; } } + +to_bf16_nc_cuda_t ggml_get_to_bf16_nc_cuda(ggml_type type) { + switch (type) { + case GGML_TYPE_F32: + return convert_unary_cuda; + case GGML_TYPE_Q4_0: + return dequantize_block_cuda; + case GGML_TYPE_Q4_1: + return dequantize_block_cuda; + case GGML_TYPE_Q5_0: + return dequantize_block_cuda; + case GGML_TYPE_Q5_1: + return dequantize_block_cuda; + case GGML_TYPE_Q8_0: + return dequantize_block_cuda; + case GGML_TYPE_F16: + return convert_unary_cuda; + default: + return nullptr; + } +} + +to_fp32_nc_cuda_t ggml_get_to_fp32_nc_cuda(ggml_type type) { + switch (type) { + case GGML_TYPE_F16: + return convert_unary_cuda; + case GGML_TYPE_Q4_0: + return dequantize_block_cuda; + case GGML_TYPE_Q4_1: + return dequantize_block_cuda; + case GGML_TYPE_Q5_0: + return dequantize_block_cuda; + case GGML_TYPE_Q5_1: + return dequantize_block_cuda; + case GGML_TYPE_Q8_0: + return dequantize_block_cuda; + case GGML_TYPE_BF16: + return convert_unary_cuda; + default: + return nullptr; + } +} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/convert.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/convert.cuh index b65b98e08..f04214be1 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/convert.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/convert.cuh @@ -22,5 +22,10 @@ using to_t_nc_cuda_t = void (*)(const void * x, T * y, int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne03, int64_t s01, int64_t s02, int64_t s03, cudaStream_t stream); +typedef to_t_nc_cuda_t to_fp32_nc_cuda_t; typedef to_t_nc_cuda_t to_fp16_nc_cuda_t; +typedef to_t_nc_cuda_t to_bf16_nc_cuda_t; + +to_fp32_nc_cuda_t ggml_get_to_fp32_nc_cuda(ggml_type type); to_fp16_nc_cuda_t ggml_get_to_fp16_nc_cuda(ggml_type type); +to_bf16_nc_cuda_t ggml_get_to_bf16_nc_cuda(ggml_type type); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/cpy-utils.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/cpy-utils.cuh new file mode 100644 index 000000000..b8e9e107f --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/cpy-utils.cuh @@ -0,0 +1,231 @@ +#pragma once + +#include "ggml-common.h" + +template +static __device__ __forceinline__ void convert_flt(const src_t * src, dst_t * dst) { + if constexpr (std::is_same_v) { + *dst = *src; + } else { + *dst = float(*src); + } +} + +static __device__ __forceinline__ int best_index_int8(int n, const int8_t * val, float x) { + if (x <= val[0]) return 0; + if (x >= val[n-1]) return n-1; + int ml = 0, mu = n-1; + while (mu-ml > 1) { + int mav = (ml+mu)/2; + if (x < val[mav]) mu = mav; else ml = mav; + } + return x - val[mu-1] < val[mu] - x ? mu-1 : mu; +} + +static __device__ void quantize_f32_q4_0_block(const float * __restrict__ x, block_q4_0 * __restrict__ y) { + float amax = 0.0f; + float vmax = 0.0f; + + for (int j = 0; j < QK4_0; ++j) { + const float v = x[j]; + if (amax < fabsf(v)) { + amax = fabsf(v); + vmax = v; + } + } + + const float d = vmax / -8; + const float id = d ? 1.0f/d : 0.0f; + + y->d = d; + + for (int j = 0; j < QK4_0/2; ++j) { + const float x0 = x[0 + j]*id; + const float x1 = x[QK4_0/2 + j]*id; + + const uint8_t xi0 = min(15, (int8_t)(x0 + 8.5f)); + const uint8_t xi1 = min(15, (int8_t)(x1 + 8.5f)); + + y->qs[j] = xi0; + y->qs[j] |= xi1 << 4; + } +} + +static __device__ void quantize_f32_q4_1_block(const float * __restrict__ x, block_q4_1 * __restrict__ y) { + float vmin = FLT_MAX; + float vmax = -FLT_MAX; + + for (int j = 0; j < QK4_1; ++j) { + const float v = x[j]; + if (v < vmin) vmin = v; + if (v > vmax) vmax = v; + } + + const float d = (vmax - vmin) / ((1 << 4) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y->dm.x = d; + y->dm.y = vmin; + + for (int j = 0; j < QK4_1/2; ++j) { + const float x0 = (x[0 + j] - vmin)*id; + const float x1 = (x[QK4_1/2 + j] - vmin)*id; + + const uint8_t xi0 = min(15, (int8_t)(x0 + 0.5f)); + const uint8_t xi1 = min(15, (int8_t)(x1 + 0.5f)); + + y->qs[j] = xi0; + y->qs[j] |= xi1 << 4; + } +} + +static __device__ void quantize_f32_q5_0_block(const float * __restrict__ x, block_q5_0 * __restrict__ y) { + float amax = 0.0f; + float vmax = 0.0f; + + for (int j = 0; j < QK5_0; ++j) { + const float v = x[j]; + if (amax < fabsf(v)) { + amax = fabsf(v); + vmax = v; + } + } + + const float d = vmax / -16; + const float id = d ? 1.0f/d : 0.0f; + + y->d = d; + + uint32_t qh = 0; + for (int j = 0; j < QK5_0/2; ++j) { + const float x0 = x[0 + j]*id; + const float x1 = x[QK5_0/2 + j]*id; + + const uint8_t xi0 = min(31, (int8_t)(x0 + 16.5f)); + const uint8_t xi1 = min(31, (int8_t)(x1 + 16.5f)); + + y->qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); + qh |= ((xi0 & 0x10u) >> 4) << (j + 0); + qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); + } + memcpy(y->qh, &qh, sizeof(qh)); +} + +static __device__ void quantize_f32_q5_1_block(const float * __restrict__ x, block_q5_1 * __restrict__ y) { + float min = x[0]; + float max = x[0]; + + for (int j = 1; j < QK5_1; ++j) { + const float v = x[j]; + min = v < min ? v : min; + max = v > max ? v : max; + } + + const float d = (max - min) / 31; + const float id = d ? 1.0f/d : 0.0f; + + y->dm.x = d; + y->dm.y = min; + + uint32_t qh = 0; + for (int j = 0; j < QK5_1/2; ++j) { + const float x0 = (x[0 + j] - min)*id; + const float x1 = (x[QK5_1/2 + j] - min)*id; + + const uint8_t xi0 = (uint8_t)(x0 + 0.5f); + const uint8_t xi1 = (uint8_t)(x1 + 0.5f); + + y->qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); + qh |= ((xi0 & 0x10u) >> 4) << (j + 0); + qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1/2); + } + memcpy(y->qh, &qh, sizeof(qh)); +} + +static __device__ void quantize_f32_q8_0_block(const float * __restrict__ x, block_q8_0 * __restrict__ y) { + float amax = 0.0f; // absolute max + + for (int j = 0; j < QK8_0; j++) { + const float v = x[j]; + amax = fmaxf(amax, fabsf(v)); + } + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + y->d = d; + + for (int j = 0; j < QK8_0; ++j) { + const float x0 = x[j]*id; + y->qs[j] = roundf(x0); + } +} + +static __device__ void quantize_f32_iq4_nl_block(const float * __restrict__ x, block_iq4_nl * __restrict__ y) { + float amax = 0.0f; + float vmax = 0.0f; + + for (int j = 0; j < QK4_NL; ++j) { + const float v = x[j]; + if (amax < fabsf(v)) { + amax = fabsf(v); + vmax = v; + } + } + + float d = vmax / kvalues_iq4nl[0]; + const float id = d ? 1.0f/d : 0.0f; + + float sumqx = 0, sumq2 = 0; + for (int j = 0; j < QK4_NL/2; ++j) { + const float x0 = x[0 + j]*id; + const float x1 = x[QK4_NL/2 + j]*id; + const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl, x0); + const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl, x1); + y->qs[j] = xi0 | (xi1 << 4); + const float v0 = kvalues_iq4nl[xi0]; + const float v1 = kvalues_iq4nl[xi1]; + const float w0 = x[0 + j]*x[0 + j]; + const float w1 = x[QK4_NL/2 + j]*x[QK4_NL/2 + j]; + sumqx += w0*v0*x[j] + w1*v1*x[QK4_NL/2 + j]; + sumq2 += w0*v0*v0 + w1*v1*v1; + } + + y->d = sumq2 > 0 ? sumqx/sumq2 : d; +} + +// Wrapper functions for cpy.cu compatibility +static __device__ void cpy_blck_f32_q4_0(const char * cxi, char * cdsti) { + quantize_f32_q4_0_block((const float *)cxi, (block_q4_0 *)cdsti); +} + +static __device__ void cpy_blck_f32_q4_1(const char * cxi, char * cdsti) { + quantize_f32_q4_1_block((const float *)cxi, (block_q4_1 *)cdsti); +} + +static __device__ void cpy_blck_f32_q5_0(const char * cxi, char * cdsti) { + quantize_f32_q5_0_block((const float *)cxi, (block_q5_0 *)cdsti); +} + +static __device__ void cpy_blck_f32_q5_1(const char * cxi, char * cdsti) { + quantize_f32_q5_1_block((const float *)cxi, (block_q5_1 *)cdsti); +} + +static __device__ void cpy_blck_f32_q8_0(const char * cxi, char * cdsti) { + quantize_f32_q8_0_block((const float *)cxi, (block_q8_0 *)cdsti); +} + +static __device__ void cpy_blck_f32_iq4_nl(const char * cxi, char * cdsti) { + quantize_f32_iq4_nl_block((const float *)cxi, (block_iq4_nl *)cdsti); +} + +template +static __device__ void cpy_1_flt(const char * cxi, char * cdsti) { + convert_flt((const src_t *)cxi, (dst_t *)cdsti); +} + +static __device__ void cpy_1_i32_i32(const char * cxi, char * cdsti) { + const int32_t * src = (const int32_t *)cxi; + int32_t * dst = (int32_t *)cdsti; + *dst = *src; +} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/cpy.cu b/ml/backend/ggml/ggml/src/ggml-cuda/cpy.cu index 4abd01d79..9c3774e5d 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/cpy.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/cpy.cu @@ -1,55 +1,17 @@ #include "cpy.cuh" #include "dequantize.cuh" +#include "cpy-utils.cuh" +#if defined(GGML_USE_MUSA) && defined(GGML_MUSA_MUDNN_COPY) +#include "ggml-musa/mudnn.cuh" +#endif // GGML_USE_MUSA && GGML_MUSA_MUDNN_COPY typedef void (*cpy_kernel_t)(const char * cx, char * cdst); -static __device__ void cpy_1_f32_f32(const char * cxi, char * cdsti) { - const float * xi = (const float *) cxi; - float * dsti = (float *) cdsti; - - *dsti = *xi; -} - -static __device__ void cpy_1_f32_bf16(const char * cxi, char * cdsti) { - const float * xi = (const float *) cxi; - nv_bfloat16 * dsti = (nv_bfloat16 *) cdsti; - - *dsti = *xi; -} - -static __device__ void cpy_1_f32_f16(const char * cxi, char * cdsti) { - const float * xi = (const float *) cxi; - half * dsti = (half *) cdsti; - - *dsti = __float2half(*xi); -} - -static __device__ void cpy_1_f16_f16(const char * cxi, char * cdsti) { - const half * xi = (const half *) cxi; - half * dsti = (half *) cdsti; - - *dsti = *xi; -} - -static __device__ void cpy_1_f16_f32(const char * cxi, char * cdsti) { - const half * xi = (const half *) cxi; - float * dsti = (float *) cdsti; - - *dsti = *xi; -} - -static __device__ void cpy_1_i32_i32(const char * cxi, char * cdsti) { - const int32_t * xi = (const int32_t *) cxi; - int32_t * dsti = (int32_t *) cdsti; - - *dsti = *xi; -} - template -static __global__ void cpy_f32_f16(const char * cx, char * cdst_direct, const int ne, - const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, - const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, - const int nb12, const int nb13, char ** cdst_indirect, int graph_cpynode_index) { +static __global__ void cpy_flt(const char * cx, char * cdst_direct, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, + const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, + const int nb12, const int nb13, char ** cdst_indirect, int graph_cpynode_index) { const int64_t i = blockDim.x*blockIdx.x + threadIdx.x; if (i >= ne) { @@ -75,67 +37,6 @@ static __global__ void cpy_f32_f16(const char * cx, char * cdst_direct, const in cpy_1(cx + x_offset, cdst + dst_offset); } -// First, add this template function after the other template functions -template -static __global__ void cpy_i32_i32(const char * cx, char * cdst, const int ne, - const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, - const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, - const int nb12, const int nb13) { - const int64_t i = blockDim.x*blockIdx.x + threadIdx.x; - - if (i >= ne) { - return; - } - - const int64_t i03 = i/(ne00 * ne01 * ne02); - const int64_t i02 = (i - i03*ne00*ne01*ne02 )/ (ne00*ne01); - const int64_t i01 = (i - i03*ne00*ne01*ne02 - i02*ne01*ne00) / ne00; - const int64_t i00 = i - i03*ne00*ne01*ne02 - i02*ne01*ne00 - i01*ne00; - const int64_t x_offset = i00*nb00 + i01*nb01 + i02*nb02 + i03 * nb03; - - const int64_t i13 = i/(ne10 * ne11 * ne12); - const int64_t i12 = (i - i13*ne10*ne11*ne12) / (ne10*ne11); - const int64_t i11 = (i - i13*ne10*ne11*ne12 - i12*ne10*ne11) / ne10; - const int64_t i10 = i - i13*ne10*ne11*ne12 - i12*ne10*ne11 - i11*ne10; - const int64_t dst_offset = i10*nb10 + i11*nb11 + i12*nb12 + i13 * nb13; - - cpy_1(cx + x_offset, cdst + dst_offset); -} - -// Then modify the ggml_cpy_i32_i32_cuda function to use the new template -static void ggml_cpy_i32_i32_cuda( - const char * cx, char * cdst, const int ne, - const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, - const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream, char ** cdst_indirect, int graph_cpynode_index) { - - const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; - cpy_i32_i32<<>> - (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13); -} - -static __device__ void cpy_blck_f32_q8_0(const char * cxi, char * cdsti) { - const float * xi = (const float *) cxi; - block_q8_0 * dsti = (block_q8_0 *) cdsti; - - float amax = 0.0f; // absolute max - - for (int j = 0; j < QK8_0; j++) { - const float v = xi[j]; - amax = fmaxf(amax, fabsf(v)); - } - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - dsti->d = d; - - for (int j = 0; j < QK8_0; ++j) { - const float x0 = xi[j]*id; - - dsti->qs[j] = roundf(x0); - } -} - static __device__ void cpy_blck_q8_0_f32(const char * cxi, char * cdsti) { float * cdstf = (float *)(cdsti); @@ -148,139 +49,6 @@ static __device__ void cpy_blck_q8_0_f32(const char * cxi, char * cdsti) { } } -static __device__ void cpy_blck_f32_q4_0(const char * cxi, char * cdsti) { - const float * xi = (const float *) cxi; - block_q4_0 * dsti = (block_q4_0 *) cdsti; - - float amax = 0.0f; - float vmax = 0.0f; - - for (int j = 0; j < QK4_0; ++j) { - const float v = xi[j]; - if (amax < fabsf(v)) { - amax = fabsf(v); - vmax = v; - } - } - - const float d = vmax / -8; - const float id = d ? 1.0f/d : 0.0f; - - dsti->d = d; - - for (int j = 0; j < QK4_0/2; ++j) { - const float x0 = xi[0 + j]*id; - const float x1 = xi[QK4_0/2 + j]*id; - - const uint8_t xi0 = min(15, (int8_t)(x0 + 8.5f)); - const uint8_t xi1 = min(15, (int8_t)(x1 + 8.5f)); - - dsti->qs[j] = xi0; - dsti->qs[j] |= xi1 << 4; - } -} - -static __device__ void cpy_blck_f32_q4_1(const char * cxi, char * cdsti) { - const float * xi = (const float *) cxi; - block_q4_1 * dsti = (block_q4_1 *) cdsti; - - float vmin = FLT_MAX; - float vmax = -FLT_MAX; - - for (int j = 0; j < QK4_1; ++j) { - const float v = xi[j]; - - if (v < vmin) vmin = v; - if (v > vmax) vmax = v; - } - - const float d = (vmax - vmin) / ((1 << 4) - 1); - const float id = d ? 1.0f/d : 0.0f; - - dsti->dm.x = d; - dsti->dm.y = vmin; - - for (int j = 0; j < QK4_1/2; ++j) { - const float x0 = (xi[0 + j] - vmin)*id; - const float x1 = (xi[QK4_1/2 + j] - vmin)*id; - - const uint8_t xi0 = min(15, (int8_t)(x0 + 0.5f)); - const uint8_t xi1 = min(15, (int8_t)(x1 + 0.5f)); - - dsti->qs[j] = xi0; - dsti->qs[j] |= xi1 << 4; - } -} - -static __device__ void cpy_blck_f32_q5_0(const char * cxi, char * cdsti) { - const float * xi = (const float *) cxi; - block_q5_0 * dsti = (block_q5_0 *) cdsti; - - float amax = 0.0f; - float vmax = 0.0f; - - for (int j = 0; j < QK5_0; ++j) { - const float v = xi[j]; - if (amax < fabsf(v)) { - amax = fabsf(v); - vmax = v; - } - } - - const float d = vmax / -16; - const float id = d ? 1.0f/d : 0.0f; - - dsti->d = d; - - uint32_t qh = 0; - for (int j = 0; j < QK5_0/2; ++j) { - const float x0 = xi[0 + j]*id; - const float x1 = xi[QK5_0/2 + j]*id; - - const uint8_t xi0 = min(31, (int8_t)(x0 + 16.5f)); - const uint8_t xi1 = min(31, (int8_t)(x1 + 16.5f)); - - dsti->qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); - qh |= ((xi0 & 0x10u) >> 4) << (j + 0); - qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); - } - memcpy(dsti->qh, &qh, sizeof(qh)); -} - -static __device__ void cpy_blck_f32_q5_1(const char * cxi, char * cdsti) { - const float * xi = (const float *) cxi; - block_q5_1 * dsti = (block_q5_1 *) cdsti; - - float min = xi[0]; - float max = xi[0]; - - for (int j = 1; j < QK5_1; ++j) { - const float v = xi[j]; - min = v < min ? v : min; - max = v > max ? v : max; - } - - const float d = (max - min) / 31; - const float id = d ? 1.0f/d : 0.0f; - - dsti->dm.x = d; - dsti->dm.y = min; - - uint32_t qh = 0; - for (int j = 0; j < QK5_1/2; ++j) { - const float x0 = (xi[0 + j] - min)*id; - const float x1 = (xi[QK5_1/2 + j] - min)*id; - - const uint8_t xi0 = (uint8_t)(x0 + 0.5f); - const uint8_t xi1 = (uint8_t)(x1 + 0.5f); - - dsti->qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); - qh |= ((xi0 & 0x10u) >> 4) << (j + 0); - qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1/2); - } - memcpy(dsti->qh, &qh, sizeof(qh)); -} - template static __device__ void cpy_blck_q_f32(const char * cxi, char * cdsti) { float * cdstf = (float *)(cdsti); @@ -294,53 +62,6 @@ static __device__ void cpy_blck_q_f32(const char * cxi, char * cdsti) { } } -static __device__ __forceinline__ int best_index_int8(int n, const int8_t * val, float x) { - if (x <= val[0]) return 0; - if (x >= val[n-1]) return n-1; - int ml = 0, mu = n-1; - while (mu-ml > 1) { - int mav = (ml+mu)/2; - if (x < val[mav]) mu = mav; else ml = mav; - } - return x - val[mu-1] < val[mu] - x ? mu-1 : mu; -} - -static __device__ void cpy_blck_f32_iq4_nl(const char * cxi, char * cdsti) { - const float * xi = (const float *) cxi; - block_iq4_nl * dsti = (block_iq4_nl *) cdsti; - - float amax = 0.0f; - float vmax = 0.0f; - - for (int j = 0; j < QK4_NL; ++j) { - const float v = xi[j]; - if (amax < fabsf(v)) { - amax = fabsf(v); - vmax = v; - } - } - - float d = vmax / kvalues_iq4nl[0]; - const float id = d ? 1.0f/d : 0.0f; - - float sumqx = 0, sumq2 = 0; - for (int j = 0; j < QK4_NL/2; ++j) { - const float x0 = xi[0 + j]*id; - const float x1 = xi[QK4_NL/2 + j]*id; - const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl, x0); - const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl, x1); - dsti->qs[j] = xi0 | (xi1 << 4); - const float v0 = kvalues_iq4nl[xi0]; - const float v1 = kvalues_iq4nl[xi1]; - const float w0 = xi[0 + j]*xi[0 + j]; - const float w1 = xi[QK4_NL/2 + j]*xi[QK4_NL/2 + j]; - sumqx += w0*v0*xi[j] + w1*v1*xi[QK4_NL/2 + j]; - sumq2 += w0*v0*v0 + w1*v1*v1; - } - - dsti->d = sumq2 > 0 ? sumqx/sumq2 : d; -} - template static __global__ void cpy_f32_q(const char * cx, char * cdst_direct, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, @@ -400,7 +121,7 @@ static __global__ void cpy_q_f32(const char * cx, char * cdst_direct, const int // Copy destination pointers to GPU to be available when pointer indirection is in use void ggml_cuda_cpy_dest_ptrs_copy(ggml_cuda_graph * cuda_graph, char ** host_dest_ptrs, const int host_dest_ptrs_size, cudaStream_t stream) { -#if defined(GGML_CUDA_USE_GRAPHS) || defined(GGML_HIP_GRAPHS) +#if defined(GGML_CUDA_USE_GRAPHS) || defined(GGML_HIP_GRAPHS) || defined(GGML_MUSA_GRAPHS) if (cuda_graph->dest_ptrs_size < host_dest_ptrs_size) { // (re-)allocate GPU memory for destination pointers CUDA_CHECK(cudaStreamSynchronize(stream)); if (cuda_graph->dest_ptrs_d != nullptr) { @@ -418,43 +139,14 @@ void ggml_cuda_cpy_dest_ptrs_copy(ggml_cuda_graph * cuda_graph, char ** host_des #endif } -static void ggml_cpy_f16_f32_cuda( +template +static void ggml_cpy_flt_cuda( const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream, char ** cdst_indirect, int & graph_cpynode_index) { const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; - cpy_f32_f16<<>> - (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++); -} - -static void ggml_cpy_f32_f32_cuda( - const char * cx, char * cdst, const int ne, - const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, - const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream, char ** cdst_indirect, int & graph_cpynode_index) { - - const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; - cpy_f32_f16<<>> - (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++); -} - -static void ggml_cpy_f32_bf16_cuda( - const char * cx, char * cdst, const int ne, - const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, - const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream, char ** cdst_indirect, int & graph_cpynode_index) { - - const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; - cpy_f32_f16<<>> - (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++); -} - -static void ggml_cpy_f32_f16_cuda( - const char * cx, char * cdst, const int ne, - const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, - const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream, char ** cdst_indirect, int & graph_cpynode_index) { - - const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; - cpy_f32_f16<<>> + cpy_flt><<>> (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++); } @@ -586,14 +278,45 @@ static void ggml_cpy_f32_iq4_nl_cuda( (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++); } -static void ggml_cpy_f16_f16_cuda( +template +static __global__ void cpy_i32_i32( + const char *cx, char *cdst, const int ne, + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, + const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, + cudaStream_t stream, char ** cdst_indirect, int & graph_cpynode_index) { + + const int64_t i = blockDim.x * blockIdx.x + threadIdx.x; + + if (i >= ne) { + return; + } + + const int64_t i03 = i / (ne00 * ne01 * ne02); + const int64_t i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01); + const int64_t i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00; + const int64_t i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00; + const int64_t x_offset = i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; + + const int64_t i13 = i / (ne10 * ne11 * ne12); + const int64_t i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11); + const int64_t i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10; + const int64_t i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10; + const int64_t dst_offset = i10 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13; + + char * cdst_ptr = (cdst_indirect != nullptr) ? cdst_indirect[graph_cpynode_index] : cdst; + cpy_1(cx + x_offset, cdst_ptr + dst_offset); +} + + +static void ggml_cpy_i32_i32_cuda( const char * cx, char * cdst, const int ne, - const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, - const int nb03, const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, cudaStream_t stream, char ** cdst_indirect, int & graph_cpynode_index) { + const int ne00, const int ne01, const int ne02, const int nb00, const int nb01, const int nb02, const int nb03, + const int ne10, const int ne11, const int ne12, const int nb10, const int nb11, const int nb12, const int nb13, + cudaStream_t stream, char ** cdst_indirect, int graph_cpynode_index) { const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; - cpy_f32_f16<<>> - (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, cdst_indirect, graph_cpynode_index++); + cpy_i32_i32<<>> + (cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, stream, cdst_indirect, graph_cpynode_index); } void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1, bool disable_indirection_for_this_node) { @@ -632,7 +355,7 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg char ** dest_ptrs_d = nullptr; int graph_cpynode_index = -1; -#if defined(GGML_CUDA_USE_GRAPHS) || defined(GGML_HIP_GRAPHS) +#if defined(GGML_CUDA_USE_GRAPHS) || defined(GGML_HIP_GRAPHS) || defined(GGML_MUSA_GRAPHS) if(ctx.cuda_graph->use_cpy_indirection && !disable_indirection_for_this_node) { dest_ptrs_d = ctx.cuda_graph->dest_ptrs_d; graph_cpynode_index = ctx.cuda_graph->graph_cpynode_index; @@ -642,13 +365,20 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg #endif if (src0->type == src1->type && ggml_is_contiguous(src0) && ggml_is_contiguous(src1)) { GGML_ASSERT(ggml_nbytes(src0) == ggml_nbytes(src1)); - CUDA_CHECK(cudaMemcpyAsync(src1_ddc, src0_ddc, ggml_nbytes(src0), cudaMemcpyDeviceToDevice, main_stream)); +#if defined(GGML_USE_MUSA) && defined(GGML_MUSA_MUDNN_COPY) + if (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16) { + CUDA_CHECK(mudnnMemcpyAsync(ctx, src1, src0)); + } else +#endif // GGML_USE_MUSA && GGML_MUSA_MUDNN_COPY + { + CUDA_CHECK(cudaMemcpyAsync(src1_ddc, src0_ddc, ggml_nbytes(src0), cudaMemcpyDeviceToDevice, main_stream)); + } } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { - ggml_cpy_f32_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + ggml_cpy_flt_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_BF16) { - ggml_cpy_f32_bf16_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + ggml_cpy_flt_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) { - ggml_cpy_f32_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + ggml_cpy_flt_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) { ggml_cpy_f32_q8_0_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); } else if (src0->type == GGML_TYPE_Q8_0 && src1->type == GGML_TYPE_F32) { @@ -675,16 +405,24 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg } else if (src0->type == GGML_TYPE_Q5_1 && src1->type == GGML_TYPE_F32) { ggml_cpy_q5_1_f32_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) { - ggml_cpy_f16_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + ggml_cpy_flt_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_BF16) { + ggml_cpy_flt_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) { - ggml_cpy_f16_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + ggml_cpy_flt_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); } else if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32) { ggml_cpy_i32_i32_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_BF16) { + ggml_cpy_flt_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_F16) { + ggml_cpy_flt_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); + } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_F32) { + ggml_cpy_flt_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index); } else { GGML_ABORT("%s: unsupported type combination (%s to %s)\n", __func__, ggml_type_name(src0->type), ggml_type_name(src1->type)); } -#if defined(GGML_CUDA_USE_GRAPHS) || defined(GGML_HIP_GRAPHS) +#if defined(GGML_CUDA_USE_GRAPHS) || defined(GGML_HIP_GRAPHS) || defined(GGML_MUSA_GRAPHS) if(ctx.cuda_graph->use_cpy_indirection && !disable_indirection_for_this_node) { ctx.cuda_graph->graph_cpynode_index = graph_cpynode_index; } @@ -704,11 +442,11 @@ void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) { if (src0->type == src1->type && ggml_is_contiguous(src0) && ggml_is_contiguous(src1)) { return nullptr; } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { - return (void*) cpy_f32_f16; + return (void*) cpy_flt>; } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_BF16) { - return (void*) cpy_f32_f16; + return (void*) cpy_flt>; } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) { - return (void*) cpy_f32_f16; + return (void*) cpy_flt>; } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) { return (void*) cpy_f32_q; } else if (src0->type == GGML_TYPE_Q8_0 && src1->type == GGML_TYPE_F32) { @@ -732,11 +470,17 @@ void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) { } else if (src0->type == GGML_TYPE_Q5_1 && src1->type == GGML_TYPE_F32) { return (void*) cpy_q_f32, QK5_1>; } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) { - return (void*) cpy_f32_f16; + return (void*) cpy_flt>; + } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_BF16) { + return (void*) cpy_flt>; } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) { - return (void*) cpy_f32_f16; - } else if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32) { - return (void*) cpy_i32_i32; + return (void*) cpy_flt>; + } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_F16) { + return (void*) cpy_flt>; + } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_BF16) { + return (void*) cpy_flt>; + } else if (src0->type == GGML_TYPE_BF16 && src1->type == GGML_TYPE_F32) { + return (void*) cpy_flt>; } else { GGML_ABORT("%s: unsupported type combination (%s to %s)\n", __func__, ggml_type_name(src0->type), ggml_type_name(src1->type)); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/cross-entropy-loss.cu b/ml/backend/ggml/ggml/src/ggml-cuda/cross-entropy-loss.cu index 0ce4afbb2..0c8b08197 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/cross-entropy-loss.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/cross-entropy-loss.cu @@ -123,13 +123,7 @@ void ggml_cuda_cross_entropy_loss(ggml_backend_cuda_context & ctx, ggml_tensor * ggml_cuda_pool_alloc dst_tmp(pool, blocks_num.x); if (nbytes_shared <= smpbo) { -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && !defined(GGML_USE_MUSA) - static bool shared_memory_limit_raised[GGML_CUDA_MAX_DEVICES] = {false}; - if (!shared_memory_limit_raised[id]) { - CUDA_CHECK(cudaFuncSetAttribute(cross_entropy_loss_f32, cudaFuncAttributeMaxDynamicSharedMemorySize, smpbo)); - shared_memory_limit_raised[id] = true; - } -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && !defined(GGML_USE_MUSA) + CUDA_SET_SHARED_MEMORY_LIMIT((cross_entropy_loss_f32), smpbo); cross_entropy_loss_f32<<>>(src0_d, src1_d, dst_tmp.ptr, ne00, nrows); } else { cross_entropy_loss_f32<<>>(src0_d, src1_d, dst_tmp.ptr, ne00, nrows); @@ -175,13 +169,7 @@ void ggml_cuda_cross_entropy_loss_back(ggml_backend_cuda_context & ctx, ggml_ten const size_t smpbo = ggml_cuda_info().devices[id].smpbo; if (nbytes_shared <= smpbo) { -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && !defined(GGML_USE_MUSA) - static bool shared_memory_limit_raised[GGML_CUDA_MAX_DEVICES] = {false}; - if (!shared_memory_limit_raised[id]) { - CUDA_CHECK(cudaFuncSetAttribute(cross_entropy_loss_back_f32, cudaFuncAttributeMaxDynamicSharedMemorySize, smpbo)); - shared_memory_limit_raised[id] = true; - } -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && !defined(GGML_USE_MUSA) + CUDA_SET_SHARED_MEMORY_LIMIT((cross_entropy_loss_back_f32), smpbo); cross_entropy_loss_back_f32<<>>(grad_d, src0f_d, src1f_d, dst_d, ne00); } else { cross_entropy_loss_back_f32<<>>(grad_d, src0f_d, src1f_d, dst_d, ne00); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-common.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-common.cuh index b7180d595..e46f0e208 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-common.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-common.cuh @@ -15,6 +15,8 @@ typedef void (* fattn_kernel_t)( const char * __restrict__ K, const char * __restrict__ V, const char * __restrict__ mask, + const char * __restrict__ sinks, + const int * __restrict__ KV_max, float * __restrict__ dst, float2 * __restrict__ dst_meta, const float scale, @@ -23,29 +25,13 @@ typedef void (* fattn_kernel_t)( const float m1, const uint32_t n_head_log2, const float logit_softcap, - const int ne00, - const int ne01, - const int ne02, - const int ne03, - const int ne10, - const int ne11, - const int ne12, - const int ne13, - const int ne31, - const int nb31, - const int nb01, - const int nb02, - const int nb03, - const int nb11, - const int nb12, - const int nb13, - const int nb21, - const int nb22, - const int nb23, - const int ne0, - const int ne1, - const int ne2, - const int ne3); + const int32_t ne00, const int32_t ne01, const int32_t ne02, const int32_t ne03, + const int32_t nb01, const int32_t nb02, const int32_t nb03, + const int32_t ne10, const int32_t ne11, const int32_t ne12, const int32_t ne13, + const int32_t nb11, const int32_t nb12, const int64_t nb13, + const int32_t nb21, const int32_t nb22, const int64_t nb23, + const int32_t ne31, const int32_t ne32, const int32_t ne33, + const int32_t nb31, const int32_t nb32, const int64_t nb33); typedef half (*vec_dot_KQ_f16_t)( const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8 , const void * __restrict__ Q_ds); @@ -516,10 +502,59 @@ constexpr __device__ dequantize_1_f32_t get_dequantize_1_f32(ggml_type type_V) { nullptr; } +template +__launch_bounds__(FATTN_KQ_STRIDE/2, 1) +static __global__ void flash_attn_mask_to_KV_max( + const half2 * __restrict__ mask, int * __restrict__ KV_max, const int ne30, const int s31, const int s33) { + const int ne31 = gridDim.x; + const int tid = threadIdx.x; + const int sequence = blockIdx.y; + const int jt = blockIdx.x; + + mask += sequence*s33 + jt*ncols1*s31; + + __shared__ int buf_iw[WARP_SIZE]; + if (tid < WARP_SIZE) { + buf_iw[tid] = 1; + } + __syncthreads(); + + int KV_max_sj = (ne30 - 1) * FATTN_KQ_STRIDE; + for (; KV_max_sj >= 0; KV_max_sj -= FATTN_KQ_STRIDE) { + int all_inf = 1; + +#pragma unroll + for (int j = 0; j < ncols1; ++j) { + const float2 tmp = __half22float2(mask[j*s31 + KV_max_sj/2 + tid]); + all_inf = all_inf && int(isinf(tmp.x)) && int(isinf(tmp.y)); + } + + all_inf = warp_reduce_all(all_inf); + if (tid % WARP_SIZE == 0) { + buf_iw[tid / WARP_SIZE] = all_inf; + } + __syncthreads(); + all_inf = buf_iw[tid % WARP_SIZE]; + __syncthreads(); + all_inf = warp_reduce_all(all_inf); + + if (!all_inf) { + KV_max_sj += FATTN_KQ_STRIDE; + break; + } + } + + if (threadIdx.x != 0) { + return; + } + + KV_max[sequence*ne31 + jt] = KV_max_sj; +} + template // D == head size __launch_bounds__(D, 1) static __global__ void flash_attn_stream_k_fixup( - float * __restrict__ dst, const float2 * __restrict__ dst_fixup, const int ne01, const int ne02, const int ne11) { + float * __restrict__ dst, const float2 * __restrict__ dst_fixup, const int ne01, const int ne02, const int ne03, const int ne11) { constexpr int ncols = ncols1*ncols2; const int bidx0 = blockIdx.x; @@ -533,8 +568,8 @@ static __global__ void flash_attn_stream_k_fixup( const int iter_k = ne11 / FATTN_KQ_STRIDE; const int iter_j = (ne01 + (ncols1 - 1)) / ncols1; - const int kbc0 = (bidx0 + 0)*iter_k*iter_j*(ne02/ncols2) / gridDim.x; - const int kbc0_stop = (bidx0 + 1)*iter_k*iter_j*(ne02/ncols2) / gridDim.x; + const int kbc0 = (bidx0 + 0)*(iter_k*iter_j*(ne02/ncols2)*ne03) / gridDim.x; + const int kbc0_stop = (bidx0 + 1)*(iter_k*iter_j*(ne02/ncols2)*ne03) / gridDim.x; const bool did_not_have_any_data = kbc0 == kbc0_stop; const bool wrote_beginning_of_tile = kbc0 % iter_k == 0; @@ -543,14 +578,15 @@ static __global__ void flash_attn_stream_k_fixup( return; } - const int channel = kbc0 / (iter_k*iter_j); - const int jt = (kbc0 - channel*iter_k*iter_j) / iter_k; + const int sequence = kbc0 / (iter_k*iter_j*(ne02/ncols2)); + const int head = (kbc0 - iter_k*iter_j*(ne02/ncols2)*sequence) / (iter_k*iter_j); + const int jt = (kbc0 - iter_k*iter_j*(ne02/ncols2)*sequence - iter_k*iter_j*head) / iter_k; // j index of current tile. if (jt*ncols1 + j >= ne01) { return; } - dst += jt*ne02*(ncols1*D) + channel*(ncols2*D) + (j*ne02 + c)*D + tid; + dst += sequence*ne02*ne01*D + jt*ne02*(ncols1*D) + head*(ncols2*D) + (j*ne02 + c)*D + tid; // Load the partial result that needs a fixup: float dst_val = 0.0f; @@ -569,7 +605,7 @@ static __global__ void flash_attn_stream_k_fixup( int bidx = bidx0 - 1; int kbc_stop = kbc0; while(true) { - const int kbc = bidx*iter_k*iter_j*(ne02/ncols2) / gridDim.x; + const int kbc = bidx*(iter_k*iter_j*(ne02/ncols2)*ne03) / gridDim.x; if (kbc == kbc_stop) { // Did not have any data. bidx--; kbc_stop = kbc; @@ -607,24 +643,39 @@ static __global__ void flash_attn_stream_k_fixup( } template // D == head size -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#if !defined(GGML_USE_HIP) __launch_bounds__(D, 1) -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#endif // !(defined(GGML_USE_HIP) static __global__ void flash_attn_combine_results( const float * __restrict__ VKQ_parts, const float2 * __restrict__ VKQ_meta, float * __restrict__ dst, const int parallel_blocks) { - VKQ_parts += parallel_blocks*D * gridDim.z*blockIdx.x; - VKQ_meta += parallel_blocks * gridDim.z*blockIdx.x; - dst += D * gridDim.z*blockIdx.x; + // Dimension 0: threadIdx.x + // Dimension 1: blockIdx.x + // Dimension 2: blockIdx.y + // Dimension 3: blockIdx.z + // Memory layout is permuted with [0, 2, 1, 3] + + const int ne01 = gridDim.x; + const int ne02 = gridDim.y; + + const int col = blockIdx.x; + const int head = blockIdx.y; + const int sequence = blockIdx.z; + + const int j_dst_unrolled = (sequence*ne01 + col)*ne02 + head; + + VKQ_parts += j_dst_unrolled * parallel_blocks*D; + VKQ_meta += j_dst_unrolled * parallel_blocks; + dst += j_dst_unrolled * D; const int tid = threadIdx.x; __builtin_assume(tid < D); extern __shared__ float2 meta[]; - if (tid < 2*parallel_blocks) { - ((float *) meta)[threadIdx.x] = ((const float *)VKQ_meta) [blockIdx.z*(2*parallel_blocks) + tid]; + for (int i = tid; i < 2*parallel_blocks; i += D) { + ((float *) meta)[i] = ((const float *)VKQ_meta) [i]; } __syncthreads(); @@ -642,11 +693,11 @@ static __global__ void flash_attn_combine_results( const uint32_t ftz_mask = 0xFFFFFFFF * (diff > SOFTMAX_FTZ_THRESHOLD); *((uint32_t *) &KQ_max_scale) &= ftz_mask; - VKQ_numerator += KQ_max_scale * VKQ_parts[l*gridDim.z*D + blockIdx.z*D + tid]; + VKQ_numerator += KQ_max_scale * VKQ_parts[l*D + tid]; VKQ_denominator += KQ_max_scale * meta[l].y; } - dst[blockIdx.z*D + tid] = VKQ_numerator / VKQ_denominator; + dst[tid] = VKQ_numerator / VKQ_denominator; } [[noreturn]] @@ -678,25 +729,32 @@ void launch_fattn( ) { constexpr int ncols = ncols1 * ncols2; + const bool is_mla = DV == 512; // TODO better parameterization + const ggml_tensor * Q = dst->src[0]; const ggml_tensor * K = dst->src[1]; const ggml_tensor * V = dst->src[2]; - const ggml_tensor * mask = dst->src[3]; + GGML_ASSERT(V || is_mla); + + const ggml_tensor * mask = dst->src[3]; + const ggml_tensor * sinks = dst->src[4]; ggml_tensor * KQV = dst; GGML_ASSERT(Q->type == GGML_TYPE_F32); GGML_ASSERT(KQV->type == GGML_TYPE_F32); + GGML_ASSERT( Q->nb[0] == ggml_element_size(Q)); + GGML_ASSERT( K->nb[0] == ggml_element_size(K)); + GGML_ASSERT(!V || V->nb[0] == ggml_element_size(V)); + GGML_ASSERT(!mask || mask->type == GGML_TYPE_F16); GGML_ASSERT(!mask || mask->ne[1] >= GGML_PAD(Q->ne[1], 16) && "the Flash-Attention CUDA kernel requires the mask to be padded to 16 and at least n_queries big"); GGML_ASSERT(K->ne[1] % FATTN_KQ_STRIDE == 0 && "Incorrect KV cache padding."); - GGML_ASSERT(Q->ne[3] == 1); - ggml_cuda_pool & pool = ctx.pool(); cudaStream_t main_stream = ctx.stream(); const int id = ggml_cuda_get_device(); @@ -705,6 +763,7 @@ void launch_fattn( ggml_cuda_pool_alloc K_f16(pool); ggml_cuda_pool_alloc V_f16(pool); + ggml_cuda_pool_alloc KV_max(pool); ggml_cuda_pool_alloc dst_tmp(pool); ggml_cuda_pool_alloc dst_tmp_meta(pool); @@ -713,46 +772,90 @@ void launch_fattn( size_t nb12 = K->nb[2]; size_t nb13 = K->nb[3]; - const char * V_data = (const char *) V->data; - size_t nb21 = V->nb[1]; - size_t nb22 = V->nb[2]; - size_t nb23 = V->nb[3]; + const char * V_data = V ? (const char *) V->data : nullptr; + size_t nb21 = V ? V->nb[1] : nb11; + size_t nb22 = V ? V->nb[2] : nb12; + size_t nb23 = V ? V->nb[3] : nb13; if (need_f16_K && K->type != GGML_TYPE_F16) { - GGML_ASSERT(ggml_is_contiguously_allocated(K)); - K_f16.alloc(ggml_nelements(K)); - to_fp16_cuda_t to_fp16 = ggml_get_to_fp16_cuda(K->type); - to_fp16(K_data, K_f16.ptr, ggml_nelements(K), main_stream); - K_data = (char *) K_f16.ptr; - const size_t bs = ggml_blck_size(K->type); const size_t ts = ggml_type_size(K->type); - nb11 = nb11*bs*sizeof(half)/ts; - nb12 = nb12*bs*sizeof(half)/ts; - nb13 = nb13*bs*sizeof(half)/ts; + K_f16.alloc(ggml_nelements(K)); + if (ggml_is_contiguously_allocated(K)) { + to_fp16_cuda_t to_fp16 = ggml_get_to_fp16_cuda(K->type); + to_fp16(K_data, K_f16.ptr, ggml_nelements(K), main_stream); + + nb11 = nb11*bs*sizeof(half)/ts; + nb12 = nb12*bs*sizeof(half)/ts; + nb13 = nb13*bs*sizeof(half)/ts; + } else { + GGML_ASSERT(K->nb[0] == ts); + to_fp16_nc_cuda_t to_fp16 = ggml_get_to_fp16_nc_cuda(K->type); + const int64_t s01 = nb11 / ts; + const int64_t s02 = nb12 / ts; + const int64_t s03 = nb13 / ts; + to_fp16(K_data, K_f16.ptr, K->ne[0], K->ne[1], K->ne[2], K->ne[3], s01, s02, s03, main_stream); + + nb11 = K->ne[0] * sizeof(half); + nb12 = K->ne[1] * nb11; + nb13 = K->ne[2] * nb12; + } + K_data = (char *) K_f16.ptr; } - if (need_f16_V && V->type != GGML_TYPE_F16) { - GGML_ASSERT(ggml_is_contiguously_allocated(V)); - V_f16.alloc(ggml_nelements(V)); - to_fp16_cuda_t to_fp16 = ggml_get_to_fp16_cuda(V->type); - to_fp16(V_data, V_f16.ptr, ggml_nelements(V), main_stream); - V_data = (char *) V_f16.ptr; - + if (V && need_f16_V && V->type != GGML_TYPE_F16) { const size_t bs = ggml_blck_size(V->type); const size_t ts = ggml_type_size(V->type); - nb21 = nb21*bs*sizeof(half)/ts; - nb22 = nb22*bs*sizeof(half)/ts; - nb23 = nb23*bs*sizeof(half)/ts; - } + V_f16.alloc(ggml_nelements(V)); + if (ggml_is_contiguously_allocated(V)) { + to_fp16_cuda_t to_fp16 = ggml_get_to_fp16_cuda(V->type); + to_fp16(V_data, V_f16.ptr, ggml_nelements(V), main_stream); + V_data = (char *) V_f16.ptr; - int parallel_blocks = 1; + nb21 = nb21*bs*sizeof(half)/ts; + nb22 = nb22*bs*sizeof(half)/ts; + nb23 = nb23*bs*sizeof(half)/ts; + } else { + GGML_ASSERT(V->nb[0] == ts); + to_fp16_nc_cuda_t to_fp16 = ggml_get_to_fp16_nc_cuda(V->type); + const int64_t s01 = nb21 / ts; + const int64_t s02 = nb22 / ts; + const int64_t s03 = nb23 / ts; + to_fp16(V_data, V_f16.ptr, V->ne[0], V->ne[1], V->ne[2], V->ne[3], s01, s02, s03, main_stream); + + nb21 = V->ne[0] * sizeof(half); + nb22 = V->ne[1] * nb21; + nb23 = V->ne[2] * nb22; + } + V_data = (char *) V_f16.ptr; + } const int ntiles_x = ((Q->ne[1] + ncols1 - 1) / ncols1); const int ntiles_total = ntiles_x * (Q->ne[2] / ncols2) * Q->ne[3]; + // Optional optimization where the mask is scanned to determine whether part of the calculation can be skipped. + // Only worth the overhead if there is at lease one FATTN_KQ_STRIDE x FATTN_KQ_STRIDE square to be skipped or + // multiple sequences of possibly different lengths. + if (mask && (Q->ne[1] >= 1024 || Q->ne[3] > 1)) { + const int s31 = mask->nb[1] / sizeof(half2); + const int s33 = mask->nb[3] / sizeof(half2); + + const dim3 blocks_num_KV_max(ntiles_x, Q->ne[3], 1); + const dim3 block_dim_KV_max(FATTN_KQ_STRIDE/2, 1, 1); + + const int ne_KV_max = blocks_num_KV_max.x*blocks_num_KV_max.y; + const int iter_k = K->ne[1] / FATTN_KQ_STRIDE; + + KV_max.alloc(ne_KV_max); + flash_attn_mask_to_KV_max<<>> + ((const half2 *) mask->data, KV_max.ptr, iter_k, s31, s33); + CUDA_CHECK(cudaGetLastError()); + } + + int parallel_blocks = 1; + const dim3 block_dim(warp_size, nwarps, 1); int max_blocks_per_sm = 1; // Max. number of active blocks limited by occupancy. CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks_per_sm, fattn_kernel, block_dim.x * block_dim.y * block_dim.z, nbytes_shared)); @@ -839,15 +942,15 @@ void launch_fattn( K_data, V_data, mask ? ((const char *) mask->data) : nullptr, + sinks ? ((const char *) sinks->data) : nullptr, + KV_max.ptr, !stream_k && parallel_blocks > 1 ? dst_tmp.ptr : (float *) KQV->data, dst_tmp_meta.ptr, scale, max_bias, m0, m1, n_head_log2, logit_softcap, - Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3], - K->ne[0], K->ne[1], K->ne[2], K->ne[3], - mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0, - Q->nb[1], Q->nb[2], Q->nb[3], - nb11, nb12, nb13, + Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3], Q->nb[1], Q->nb[2], Q->nb[3], + K->ne[0], K->ne[1], K->ne[2], K->ne[3], nb11, nb12, nb13, nb21, nb22, nb23, - KQV->ne[0], KQV->ne[1], KQV->ne[2], KQV->ne[3] + mask ? mask->ne[1] : 0, mask ? mask->ne[2] : 0, mask ? mask->ne[3] : 0, + mask ? mask->nb[1] : 0, mask ? mask->nb[2] : 0, mask ? mask->nb[3] : 0 ); CUDA_CHECK(cudaGetLastError()); @@ -858,11 +961,11 @@ void launch_fattn( flash_attn_stream_k_fixup <<>> - ((float *) KQV->data, dst_tmp_meta.ptr, Q->ne[1], Q->ne[2], K->ne[1]); + ((float *) KQV->data, dst_tmp_meta.ptr, Q->ne[1], Q->ne[2], Q->ne[3], K->ne[1]); } } else if (parallel_blocks > 1) { const dim3 block_dim_combine(DV, 1, 1); - const dim3 blocks_num_combine(Q->ne[1], 1, blocks_num.z); + const dim3 blocks_num_combine(Q->ne[1], Q->ne[2], Q->ne[3]); const size_t nbytes_shared_combine = parallel_blocks*sizeof(float2); flash_attn_combine_results diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-mma-f16.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-mma-f16.cuh index 491780abd..39731baae 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-mma-f16.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-mma-f16.cuh @@ -33,9 +33,30 @@ struct fattn_mma_f16_config< 64, 64> { static constexpr int nwarps_max = 4; static constexpr bool Q_in_reg = true; static constexpr int nstages_target = 2; - static constexpr int nbatch_K2 = 32; - static constexpr int nbatch_V2 = 32; - static constexpr int nbatch_combine = 32; + + static int get_nbatch_K2_host(const int /*cc*/, const int /*ncols*/) { + return 32; + } + + static constexpr __device__ int get_nbatch_K2_device(int /*ncols*/) { + return 32; + } + + static int get_nbatch_V2_host(const int /*cc*/, const int /*ncols*/) { + return 32; + } + + static constexpr __device__ int get_nbatch_V2_device(int /*ncols*/) { + return 32; + } + + static int get_nbatch_combine_host(const int /*cc*/, const int /*ncols*/) { + return 32; + } + + static constexpr __device__ int get_nbatch_combine_device(int /*ncols*/) { + return 32; + } }; template <> @@ -44,9 +65,30 @@ struct fattn_mma_f16_config< 80, 80> { static constexpr int nwarps_max = 4; static constexpr bool Q_in_reg = true; static constexpr int nstages_target = 2; - static constexpr int nbatch_K2 = 40; - static constexpr int nbatch_V2 = 40; - static constexpr int nbatch_combine = 40; + + static int get_nbatch_K2_host(const int /*cc*/, const int /*ncols*/) { + return 40; + } + + static constexpr __device__ int get_nbatch_K2_device(int /*ncols*/) { + return 40; + } + + static int get_nbatch_V2_host(const int /*cc*/, const int /*ncols*/) { + return 40; + } + + static constexpr __device__ int get_nbatch_V2_device(int /*ncols*/) { + return 40; + } + + static int get_nbatch_combine_host(const int /*cc*/, const int /*ncols*/) { + return 40; + } + + static constexpr __device__ int get_nbatch_combine_device(int /*ncols*/) { + return 40; + } }; template <> @@ -55,9 +97,30 @@ struct fattn_mma_f16_config< 96, 96> { static constexpr int nwarps_max = 4; static constexpr bool Q_in_reg = true; static constexpr int nstages_target = 2; - static constexpr int nbatch_K2 = 48; - static constexpr int nbatch_V2 = 48; - static constexpr int nbatch_combine = 48; + + static int get_nbatch_K2_host(const int /*cc*/, const int /*ncols*/) { + return 48; + } + + static constexpr __device__ int get_nbatch_K2_device(int /*ncols*/) { + return 48; + } + + static int get_nbatch_V2_host(const int /*cc*/, const int /*ncols*/) { + return 48; + } + + static constexpr __device__ int get_nbatch_V2_device(int /*ncols*/) { + return 48; + } + + static int get_nbatch_combine_host(const int /*cc*/, const int /*ncols*/) { + return 48; + } + + static constexpr __device__ int get_nbatch_combine_device(int /*ncols*/) { + return 48; + } }; template <> @@ -66,9 +129,30 @@ struct fattn_mma_f16_config<112, 112> { static constexpr int nwarps_max = 4; static constexpr bool Q_in_reg = true; static constexpr int nstages_target = 2; - static constexpr int nbatch_K2 = 56; - static constexpr int nbatch_V2 = 56; - static constexpr int nbatch_combine = 56; + + static int get_nbatch_K2_host(const int /*cc*/, const int /*ncols*/) { + return 56; + } + + static constexpr __device__ int get_nbatch_K2_device(int /*ncols*/) { + return 56; + } + + static int get_nbatch_V2_host(const int /*cc*/, const int /*ncols*/) { + return 56; + } + + static constexpr __device__ int get_nbatch_V2_device(int /*ncols*/) { + return 56; + } + + static int get_nbatch_combine_host(const int /*cc*/, const int /*ncols*/) { + return 56; + } + + static constexpr __device__ int get_nbatch_combine_device(int /*ncols*/) { + return 56; + } }; template <> @@ -77,9 +161,30 @@ struct fattn_mma_f16_config<128, 128> { static constexpr int nwarps_max = 4; static constexpr bool Q_in_reg = true; static constexpr int nstages_target = 2; - static constexpr int nbatch_K2 = 64; - static constexpr int nbatch_V2 = 64; - static constexpr int nbatch_combine = 64; + + static int get_nbatch_K2_host(const int /*cc*/, const int /*ncols*/) { + return 64; + } + + static constexpr __device__ int get_nbatch_K2_device(int /*ncols*/) { + return 64; + } + + static int get_nbatch_V2_host(const int /*cc*/, const int /*ncols*/) { + return 64; + } + + static constexpr __device__ int get_nbatch_V2_device(int /*ncols*/) { + return 64; + } + + static int get_nbatch_combine_host(const int /*cc*/, const int /*ncols*/) { + return 64; + } + + static constexpr __device__ int get_nbatch_combine_device(int /*ncols*/) { + return 64; + } }; template <> @@ -88,9 +193,38 @@ struct fattn_mma_f16_config<256, 256> { static constexpr int nwarps_max = 4; static constexpr bool Q_in_reg = true; static constexpr int nstages_target = 2; - static constexpr int nbatch_K2 = 128; - static constexpr int nbatch_V2 = 128; - static constexpr int nbatch_combine = 128; + + static int get_nbatch_K2_host(const int /*cc*/, const int /*ncols*/) { + return 128; + } + + static constexpr __device__ int get_nbatch_K2_device(int /*ncols*/) { + return 128; + } + + static int get_nbatch_V2_host(const int /*cc*/, const int /*ncols*/) { + return 128; + } + + static constexpr __device__ int get_nbatch_V2_device(int /*ncols*/) { + return 128; + } + + static int get_nbatch_combine_host(const int cc, const int ncols) { + if (ggml_cuda_highest_compiled_arch(cc) == GGML_CUDA_CC_TURING) { + return ncols <= 16 ? 128 : 64; + } + return 64; + } + + static constexpr __device__ int get_nbatch_combine_device(int ncols) { +#if __CUDA_ARCH__ == GGML_CUDA_CC_TURING + return ncols <= 16 ? 128 : 64; +#else + GGML_UNUSED(ncols); + return 128; +#endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING + } }; template <> @@ -99,9 +233,44 @@ struct fattn_mma_f16_config<576, 512> { static constexpr int nwarps_max = 8; static constexpr bool Q_in_reg = false; static constexpr int nstages_target = 1; - static constexpr int nbatch_K2 = 160; - static constexpr int nbatch_V2 = 128; - static constexpr int nbatch_combine = 128; + + static int get_nbatch_K2_host(const int cc, const int ncols) { + if (ggml_cuda_highest_compiled_arch(cc) == GGML_CUDA_CC_TURING) { + return ncols <= 16 ? 96 : 160; + } + return ncols <= 16 ? 288 : 160; + } + + static constexpr __device__ int get_nbatch_K2_device(int ncols) { +#if __CUDA_ARCH__ == GGML_CUDA_CC_TURING + return ncols <= 16 ? 96 : 160; +#else + return ncols <= 16 ? 288 : 160; +#endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING + } + + static int get_nbatch_V2_host(const int cc, const int ncols) { + if (ggml_cuda_highest_compiled_arch(cc) == GGML_CUDA_CC_TURING) { + return ncols <= 16 ? 64 : 128; + } + return ncols <= 16 ? 256 : 128; + } + + static constexpr __device__ int get_nbatch_V2_device(int ncols) { +#if __CUDA_ARCH__ == GGML_CUDA_CC_TURING + return ncols <= 16 ? 64 : 128; +#else + return ncols <= 16 ? 256 : 128; +#endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING + } + + static int get_nbatch_combine_host(const int /*cc*/, const int /*ncols*/) { + return 128; + } + + static constexpr __device__ int get_nbatch_combine_device(int /*ncols*/) { + return 128; + } }; // ------------------------------------------------------------------------------------------------------------------ @@ -120,7 +289,7 @@ static __device__ __forceinline__ void flash_attn_ext_f16_load_tile( const unsigned int tile_KV_32 = ggml_cuda_cvta_generic_to_shared(tile_KV); - auto load = [&] __device__ (const int n) { + auto load = [&] __device__ (auto n) { const int stride_k = WARP_SIZE >> n; const int k0_start = stride_k == WARP_SIZE ? 0 : chunks_per_row - chunks_per_row % (2*stride_k); const int k0_stop = chunks_per_row - chunks_per_row % (1*stride_k); @@ -223,7 +392,8 @@ static __device__ __forceinline__ void flash_attn_ext_f16_load_mask( } } -template +template static __device__ __forceinline__ void flash_attn_ext_f16_iter( const float2 * const __restrict__ Q_f2, const half2 * const __restrict__ K_h2, @@ -239,7 +409,6 @@ static __device__ __forceinline__ void flash_attn_ext_f16_iter( const int stride_K, const int stride_V, const int stride_mask, - const int jt, half2 * const __restrict__ tile_Q, half2 * const __restrict__ tile_K, half2 * const __restrict__ tile_V, @@ -249,7 +418,7 @@ static __device__ __forceinline__ void flash_attn_ext_f16_iter( float * const __restrict__ KQ_max, float * const __restrict__ KQ_rowsum, const int kb0) { -#ifdef NEW_MMA_AVAILABLE +#ifdef TURING_MMA_AVAILABLE typedef fattn_mma_f16_config c; #ifdef CP_ASYNC_AVAILABLE @@ -261,10 +430,15 @@ static __device__ __forceinline__ void flash_attn_ext_f16_iter( constexpr int cols_per_warp = ntiles * tile_B::I; constexpr int cols_per_thread = ntiles == 1 ? 2 : ntiles; constexpr int np = nwarps * (cols_per_warp/ncols2) / ncols1; // Number of parallel CUDA warps per Q column. + constexpr int ncols = ncols1 * ncols2; + constexpr int nbatch_K2 = c::get_nbatch_K2_device(ncols); + constexpr int nbatch_V2 = c::get_nbatch_V2_device(ncols); - constexpr int stride_tile_Q = DKQ/2 + 4; - constexpr int stride_tile_K = c::nbatch_K2 + 4; - constexpr int stride_tile_V = c::nbatch_V2 + 4; + constexpr int stride_tile_Q = DKQ/2 + 4; + constexpr int stride_tile_K = nbatch_K2 + 4; + + static_assert(!mla || nbatch_K2 >= nbatch_V2, "bad nbatch_K2, nbatch_V2 for MLA"); + constexpr int stride_tile_V = mla ? stride_tile_K : nbatch_V2 + 4; const int k_VKQ_0 = kb0 * c::nbatch_fa; tile_C_KQ KQ_C[c::nbatch_fa/(np*tile_C_KQ::I) * ntiles]; @@ -275,12 +449,13 @@ static __device__ __forceinline__ void flash_attn_ext_f16_iter( tile_C_KQ_16 * KQ_C_16 = (tile_C_KQ_16 *) KQ_C; if constexpr (nstages > 1) { - static_assert(c::nbatch_K2 == DKQ/2, "batching not implemented for multi stage loading"); + static_assert(!mla, "multi-stage loading not implemented for MLA"); + static_assert(nbatch_K2 == DKQ/2, "batching not implemented for multi stage loading"); constexpr bool use_cp_async = true; cp_async_wait_all(); __syncthreads(); flash_attn_ext_f16_load_tile - (V_h2 + k_VKQ_0*stride_V, tile_V, c::nbatch_V2, stride_V); + (V_h2 + int64_t(k_VKQ_0)*stride_V, tile_V, nbatch_V2, stride_V); } else { constexpr bool use_cp_async = nstages == 1; if (ncols2 > 1 || mask_h2) { @@ -289,14 +464,14 @@ static __device__ __forceinline__ void flash_attn_ext_f16_iter( } #pragma unroll - for (int k0_start = 0; k0_start < DKQ/2; k0_start += c::nbatch_K2) { - const int k0_stop = k0_start + c::nbatch_K2 < DKQ/2 ? k0_start + c::nbatch_K2 : DKQ/2; + for (int k0_start = 0; k0_start < DKQ/2; k0_start += nbatch_K2) { + const int k0_stop = k0_start + nbatch_K2 < DKQ/2 ? k0_start + nbatch_K2 : DKQ/2; const int k0_diff = k0_stop - k0_start; if (nstages <= 1) { constexpr bool use_cp_async = nstages == 1; flash_attn_ext_f16_load_tile - (K_h2 + k_VKQ_0*stride_K + k0_start, tile_K, k0_diff, stride_K); + (K_h2 + int64_t(k_VKQ_0)*stride_K + k0_start, tile_K, k0_diff, stride_K); if (use_cp_async) { cp_async_wait_all(); } @@ -477,9 +652,12 @@ static __device__ __forceinline__ void flash_attn_ext_f16_iter( float KQ_max_scale[cols_per_thread]; #pragma unroll for (int col = 0; col < cols_per_thread; ++col) { - KQ_max_scale[col] = expf(KQ_max[col] - KQ_max_new[col]); + const float KQ_max_diff = KQ_max[col] - KQ_max_new[col]; + KQ_max_scale[col] = expf(KQ_max_diff); KQ_max[col] = KQ_max_new[col]; + *((uint32_t *) &KQ_max_scale[col]) *= KQ_max_diff >= SOFTMAX_FTZ_THRESHOLD; + // Scale previous KQ_rowsum to account for a potential increase in KQ_max: KQ_rowsum[col] = KQ_max_scale[col]*KQ_rowsum[col] + KQ_rowsum_add[col]; } @@ -537,24 +715,30 @@ static __device__ __forceinline__ void flash_attn_ext_f16_iter( (mask_h2 + (k_VKQ_0 + c::nbatch_fa)/2, tile_mask, stride_mask); } flash_attn_ext_f16_load_tile - (K_h2 + (k_VKQ_0 + c::nbatch_fa)*stride_K, tile_K, c::nbatch_K2, stride_K); + (K_h2 + int64_t(k_VKQ_0 + c::nbatch_fa)*stride_K, tile_K, nbatch_K2, stride_K); } } -#pragma unroll - for (int i0_start = 0; i0_start < DV; i0_start += 2*c::nbatch_V2) { - const int i0_stop = i0_start + 2*c::nbatch_V2 < DV ? i0_start + 2*c::nbatch_V2 : DV; - const int i0_diff = i0_stop - i0_start; - if (nstages <= 1) { + // For MLA K and V have the same data. + // Therefore, iterate over V in reverse and re-use the data if possible. + static_assert(!mla || nstages <= 1, "combination of MLA and multi-stage loading not implemented"); + constexpr int reusable_cutoff = mla ? (DKQ - 1) - (DKQ - 1) % (2*nbatch_K2) - (DKQ - DV) : DV; +#pragma unroll + for (int i0_stop = DV; i0_stop > 0; i0_stop -= 2*nbatch_V2) { + const int i0_start = i0_stop - 2*nbatch_V2 > 0 ? i0_stop - 2*nbatch_V2 : 0; + const int i0_diff = i0_stop - i0_start; + + if (nstages <= 1 && i0_start < reusable_cutoff) { constexpr bool use_cp_async = nstages == 1; flash_attn_ext_f16_load_tile - (V_h2 + k_VKQ_0*stride_V + i0_start/2, tile_V, i0_diff/2, stride_V); + (V_h2 + int64_t(k_VKQ_0)*stride_V + i0_start/2, tile_V, i0_diff/2, stride_V); if (use_cp_async) { cp_async_wait_all(); } __syncthreads(); } + const half2 * tile_V_i = i0_start < reusable_cutoff ? tile_V : tile_V + (i0_start - reusable_cutoff)/2; // Calculate VKQ tile: #pragma unroll @@ -565,7 +749,7 @@ static __device__ __forceinline__ void flash_attn_ext_f16_iter( const int k0 = k00 + (threadIdx.y % np)*tile_A::J; tile_A A; - load_ldmatrix_trans(A, tile_V + 2*k0*stride_tile_V + (i_VKQ_0 - i0_start)/2, stride_tile_V); + load_ldmatrix_trans(A, tile_V_i + 2*k0*stride_tile_V + (i_VKQ_0 - i0_start)/2, stride_tile_V); if (ntiles == 1) { mma(VKQ_C[i_VKQ_0/tile_C_VKQ::I], A, B[k00/(np*tile_A::J)]); } else { @@ -587,21 +771,21 @@ static __device__ __forceinline__ void flash_attn_ext_f16_iter( GGML_UNUSED(mask_h2); GGML_UNUSED(dstk); GGML_UNUSED(dstk_fixup); GGML_UNUSED(scale); GGML_UNUSED(slope); GGML_UNUSED(logit_softcap); GGML_UNUSED(ne01); GGML_UNUSED(ne02); GGML_UNUSED(stride_K); GGML_UNUSED(stride_V); - GGML_UNUSED(stride_mask); GGML_UNUSED(jt); GGML_UNUSED(tile_K); - GGML_UNUSED(stride_mask); GGML_UNUSED(jt); GGML_UNUSED(tile_K); + GGML_UNUSED(stride_mask); GGML_UNUSED(tile_K); GGML_UNUSED(tile_V); GGML_UNUSED(tile_mask); GGML_UNUSED(Q_B); GGML_UNUSED(VKQ_C); GGML_UNUSED(KQ_max); GGML_UNUSED(KQ_rowsum); - GGML_UNUSED(kb0); + GGML_UNUSED(kb0); GGML_UNUSED(tile_Q); NO_DEVICE_CODE; -#endif // NEW_MMA_AVAILABLE +#endif // TURING_MMA_AVAILABLE } -template +template static __device__ __forceinline__ void flash_attn_ext_f16_process_tile( const float2 * const __restrict__ Q_f2, const half2 * const __restrict__ K_h2, const half2 * const __restrict__ V_h2, const half2 * const __restrict__ mask_h2, + const float * const __restrict__ sinks_f, float2 * const __restrict__ dstk, float2 * const __restrict__ dstk_fixup, const float scale, @@ -617,7 +801,7 @@ static __device__ __forceinline__ void flash_attn_ext_f16_process_tile( const int jt, const int kb0_start, const int kb0_stop) { -#ifdef NEW_MMA_AVAILABLE +#ifdef TURING_MMA_AVAILABLE //In this kernel Q, K, V are matrices while i, j, k are matrix indices. typedef fattn_mma_f16_config c; @@ -632,13 +816,16 @@ static __device__ __forceinline__ void flash_attn_ext_f16_process_tile( constexpr int cols_per_warp = ntiles * tile_B::I; constexpr int cols_per_thread = ntiles == 1 ? 2 : ntiles; constexpr int np = nwarps * (cols_per_warp/ncols2) / ncols1; // Number of parallel CUDA warps per Q column. + constexpr int nbatch_K2 = c::get_nbatch_K2_device(ncols); + constexpr int nbatch_V2 = c::get_nbatch_V2_device(ncols); static_assert(nwarps * (cols_per_warp/ncols2) % ncols1 == 0, "bad nwarps"); - constexpr int stride_tile_Q = DKQ/2 + 4; - constexpr int stride_tile_K = c::nbatch_K2 + 4; - constexpr int stride_tile_V = c::nbatch_V2 + 4; + constexpr int stride_tile_Q = DKQ/2 + 4; + constexpr int stride_tile_K = nbatch_K2 + 4; + static_assert(!mla || nbatch_K2 >= nbatch_V2, "bad nbatch_K2, nbatch_V2 for MLA"); + constexpr int stride_tile_V = mla ? stride_tile_K : nbatch_V2 + 4; constexpr int stride_tile_KV_max = stride_tile_K > stride_tile_V ? stride_tile_K : stride_tile_V; extern __shared__ half2 tile_Q[]; @@ -726,28 +913,29 @@ static __device__ __forceinline__ void flash_attn_ext_f16_process_tile( // Preload mask and K data for first iteration when using cp_async with multiple stages: if constexpr (nstages > 1) { - static_assert(c::nbatch_K2 == DKQ/2, "batching not implemented for multi-stage pipeline"); + static_assert(nbatch_K2 == DKQ/2, "batching not implemented for multi-stage pipeline"); constexpr bool use_cp_async = true; if (ncols2 > 1 || mask_h2) { flash_attn_ext_f16_load_mask (mask_h2 + kb0_start*c::nbatch_fa/2, tile_mask, stride_mask); } flash_attn_ext_f16_load_tile - (K_h2 + kb0_start*c::nbatch_fa*stride_K, tile_K, c::nbatch_K2, stride_K); + (K_h2 + int64_t(kb0_start)*c::nbatch_fa*stride_K, tile_K, nbatch_K2, stride_K); } // Iterate over ne11 == previous tokens: - for (int kb0 = kb0_start; kb0 < kb0_stop-1; ++kb0) { + int kb0 = kb0_start; + for (; kb0 < kb0_stop-1; ++kb0) { constexpr bool last_iter = false; - flash_attn_ext_f16_iter + flash_attn_ext_f16_iter (Q_f2, K_h2, V_h2, mask_h2, dstk, dstk_fixup, scale, slope, logit_softcap, - ne01, ne02, stride_K, stride_V, stride_mask, jt, tile_Q, tile_K, tile_V, tile_mask, Q_B, VKQ_C, KQ_max, KQ_rowsum, kb0); + ne01, ne02, stride_K, stride_V, stride_mask, tile_Q, tile_K, tile_V, tile_mask, Q_B, VKQ_C, KQ_max, KQ_rowsum, kb0); } { // kb0_start is always < kb0_stop so the last iter can be executed unconditionally. constexpr bool last_iter = true; - flash_attn_ext_f16_iter + flash_attn_ext_f16_iter (Q_f2, K_h2, V_h2, mask_h2, dstk, dstk_fixup, scale, slope, logit_softcap, - ne01, ne02, stride_K, stride_V, stride_mask, jt, tile_Q, tile_K, tile_V, tile_mask, Q_B, VKQ_C, KQ_max, KQ_rowsum, kb0_stop-1); + ne01, ne02, stride_K, stride_V, stride_mask, tile_Q, tile_K, tile_V, tile_mask, Q_B, VKQ_C, KQ_max, KQ_rowsum, kb0); } // With multi-stage loading there is no __syncthreads at the end of the iter, @@ -770,11 +958,57 @@ static __device__ __forceinline__ void flash_attn_ext_f16_process_tile( } } + // If attention sinks are used, potentially re-scale if KQ_max is small. + // Also add the sink as a value to KQ_rowsum, this is done after synchonization of KQ_rowsum + // so it's being done unconditionally for every thread. + if (!is_fixup && (np == 1 || threadIdx.y % np == 0) && sinks_f) { + float KQ_max_scale[cols_per_thread]; +#pragma unroll + for (int col = 0; col < cols_per_thread; ++col) { + static_assert(ntiles == 1 || ntiles == 2, "ntiles > 2 not implemented"); + const int jc = ntiles == 1 ? 2*tile_C_VKQ::get_j(col/2) + col % 2 : tile_C_VKQ_16::get_i(col); + const float sink = sinks_f[jc % ncols2]; + + const float KQ_max_new = fmaxf(KQ_max[col], sink); + const float KQ_max_diff = KQ_max[col] - KQ_max_new; + KQ_max_scale[col] = expf(KQ_max_diff); + KQ_max[col] = KQ_max_new; + + *((uint32_t *) &KQ_max_scale[col]) *= KQ_max_diff >= SOFTMAX_FTZ_THRESHOLD; + + const float KQ_max_add = expf(sink - KQ_max_new); + KQ_rowsum[col] = KQ_max_scale[col]*KQ_rowsum[col] + KQ_max_add; + } + + if (ntiles == 1) { + const half2 KQ_max_scale_h2 = make_half2(KQ_max_scale[0], KQ_max_scale[1]); +#pragma unroll + for (int i = 0; i < DV/tile_C_VKQ::I; ++i) { +#pragma unroll + for (int l = 0; l < tile_C_VKQ::ne; ++l) { + VKQ_C[i].x[l] *= KQ_max_scale_h2; + } + } + } else { +#pragma unroll + for (int col = 0; col < cols_per_thread; ++col) { + const half2 KQ_max_scale_h2 = make_half2(KQ_max_scale[col], KQ_max_scale[col]); +#pragma unroll + for (int i = 0; i < DV/tile_C_VKQ_16::J; ++i) { +#pragma unroll + for (int l0 = 0; l0 < tile_C_VKQ_16::ne; l0 += 2) { + VKQ_C_16[i*ntiles/2 + col/2].x[l0 + col % 2] *= KQ_max_scale_h2; + } + } + } + } + } + // Combine VKQ accumulator values if np > 1. // It's also faster to do small writes to shared memory, then large write to VRAM than to do small writes to VRAM. // So also write VKQ accumulators to shared memory in column-major format if np == 1. - constexpr int nbatch_combine = c::Q_in_reg ? DV/2 : DV/4; + constexpr int nbatch_combine = c::get_nbatch_combine_device(ncols); constexpr int tile_stride = nbatch_combine + 4; static_assert((DV/2) % nbatch_combine == 0, "bad nbatch_combine"); @@ -1009,16 +1243,18 @@ static __device__ __forceinline__ void flash_attn_ext_f16_process_tile( GGML_UNUSED(stride_Q2); GGML_UNUSED(stride_K); GGML_UNUSED(stride_V); GGML_UNUSED(stride_mask); GGML_UNUSED(jt); GGML_UNUSED(kb0_start); GGML_UNUSED(kb0_stop); NO_DEVICE_CODE; -#endif // NEW_MMA_AVAILABLE +#endif // TURING_MMA_AVAILABLE } -template +template __launch_bounds__(nwarps*WARP_SIZE, 1) static __global__ void flash_attn_ext_f16( const char * __restrict__ Q, const char * __restrict__ K, const char * __restrict__ V, const char * __restrict__ mask, + const char * __restrict__ sinks, + const int * __restrict__ KV_max, float * __restrict__ dst, float2 * __restrict__ dst_meta, const float scale, @@ -1027,36 +1263,28 @@ static __global__ void flash_attn_ext_f16( const float m1, const uint32_t n_head_log2, const float logit_softcap, - const int ne00, - const int ne01, - const int ne02, - const int ne03, - const int ne10, - const int ne11, - const int ne12, - const int ne13, - const int ne31, - const int nb31, - const int nb01, - const int nb02, - const int nb03, - const int nb11, - const int nb12, - const int nb13, - const int nb21, - const int nb22, - const int nb23, - const int ne0, - const int ne1, - const int ne2, - const int ne3) { -#if defined(FLASH_ATTN_AVAILABLE) && defined(NEW_MMA_AVAILABLE) + const int32_t ne00, const int32_t ne01, const int32_t ne02, const int32_t ne03, + const int32_t nb01, const int32_t nb02, const int32_t nb03, + const int32_t ne10, const int32_t ne11, const int32_t ne12, const int32_t ne13, + const int32_t nb11, const int32_t nb12, const int64_t nb13, + const int32_t nb21, const int32_t nb22, const int64_t nb23, + const int32_t ne31, const int32_t ne32, const int32_t ne33, + const int32_t nb31, const int32_t nb32, const int64_t nb33) { +#if defined(FLASH_ATTN_AVAILABLE) && defined(TURING_MMA_AVAILABLE) // Skip unused kernel variants for faster compilation: if (use_logit_softcap && !(DKQ == 128 || DKQ == 256)) { NO_DEVICE_CODE; return; } +#if __CUDA_ARCH__ == GGML_CUDA_CC_TURING + if (ncols1*ncols2 > 32) { + NO_DEVICE_CODE; + return; + } +#endif // __CUDA_ARCH__ == GGML_CUDA_CC_TURING + + static_assert(!mla || DKQ >= DV, "MLA needs DKQ >= DV"); typedef fattn_mma_f16_config c; @@ -1067,17 +1295,18 @@ static __global__ void flash_attn_ext_f16( const int stride_Q1 = nb01 / sizeof(float2); const int stride_Q2 = nb02 / sizeof(float2); const int stride_K = nb11 / sizeof(half2); - const int stride_V = nb21 / sizeof(half2); const int stride_mask = nb31 / sizeof(half2); + const int stride_V = mla ? stride_K : nb21 / sizeof(half2); + const int iter_k = ne11 / FATTN_KQ_STRIDE; const int iter_j = (ne01 + (ncols1 - 1)) / ncols1; constexpr int kb_niter = FATTN_KQ_STRIDE / c::nbatch_fa; // Number of kernel iterations per assigned KQ slice. // kbc == k block continuous, current index in continuous ijk space. - int kbc = (blockIdx.x + 0)*iter_k*iter_j*(ne02/ncols2) / gridDim.x; - const int kbc_stop = (blockIdx.x + 1)*iter_k*iter_j*(ne02/ncols2) / gridDim.x; + int kbc = (blockIdx.x + 0)*(iter_k*iter_j*(ne02/ncols2)*ne03) / gridDim.x; + const int kbc_stop = (blockIdx.x + 1)*(iter_k*iter_j*(ne02/ncols2)*ne03) / gridDim.x; // If the seams of 2 CUDA blocks fall within an output tile their results need to be combined. // For this we need to track both the block that starts the tile (needs_fixup) and the block that finishes the tile (is_fixup). @@ -1086,31 +1315,42 @@ static __global__ void flash_attn_ext_f16( // kb0 == k start index when in the output tile. int kb0_start = kbc % iter_k; int kb0_stop = min(iter_k, kb0_start + kbc_stop - kbc); + while (kbc < kbc_stop && kb0_stop == iter_k) { - const int channel = kbc / (iter_k*iter_j); - const int jt = (kbc - channel*iter_k*iter_j) / iter_k; // j index of current tile. + const int sequence = kbc / (iter_k*iter_j*(ne02/ncols2)); + const int zt = (kbc - iter_k*iter_j*(ne02/ncols2)*sequence) / (iter_k*iter_j); // head in units of ncols2 + const int jt = (kbc - iter_k*iter_j*(ne02/ncols2)*sequence - iter_k*iter_j*zt) / iter_k; // j index of current tile. - const float2 * Q_f2 = (const float2 *) (Q + nb02* channel*ncols2); - const half2 * K_h2 = (const half2 *) (K + nb12*(channel*ncols2 / gqa_ratio)); - const half2 * V_h2 = (const half2 *) (V + nb22*(channel*ncols2 / gqa_ratio)); - const half2 * mask_h2 = ncols2 > 1 || mask ? (const half2 *) mask + (nb31/sizeof(half2))*jt*ncols1 : nullptr; - float2 * dstk = ((float2 *) dst) + channel*(ncols2 * DV/2); + const int head0 = zt * ncols2; - const float slope = ncols2 == 1 ? get_alibi_slope(max_bias, channel, n_head_log2, m0, m1) : 1.0f; + const float2 * Q_f2 = (const float2 *) (Q + nb03*sequence + nb02* head0); + const half2 * K_h2 = (const half2 *) (K + nb13*sequence + nb12*(head0 / gqa_ratio)); + const half2 * mask_h2 = ncols2 == 1 && !mask ? nullptr : + (const half2 *) (mask + nb33*(sequence % ne33) + nb31*jt*ncols1); + float2 * dstk = ((float2 *) dst) + (sequence*ne01*ne02 + head0) * (DV/2); + + const half2 * V_h2 = mla ? K_h2 + (DKQ/2 - DV/2) : (const half2 *) (V + nb23*sequence + nb22*(head0 / gqa_ratio)); + const float * sinks_f = sinks ? (const float *) sinks + head0 : nullptr; + + const float slope = ncols2 == 1 ? get_alibi_slope(max_bias, head0, n_head_log2, m0, m1) : 1.0f; const int kb0_start_kernel = kb0_start * kb_niter; - const int kb0_stop_kernel = kb0_stop * kb_niter; + int kb0_stop_kernel = kb0_stop * kb_niter; + + if (KV_max) { + kb0_stop_kernel = min(kb0_stop_kernel, KV_max[sequence*iter_j + jt] / c::nbatch_fa); + } constexpr bool is_fixup = false; // All but (potentially) the last iterations write their data to dst rather than the fixup buffer. if (kb0_start == 0) { constexpr bool needs_fixup = false; // CUDA block is working on an entire tile. - flash_attn_ext_f16_process_tile - (Q_f2, K_h2, V_h2, mask_h2, dstk, dst_meta, scale, slope, logit_softcap, + flash_attn_ext_f16_process_tile + (Q_f2, K_h2, V_h2, mask_h2, sinks_f, dstk, dst_meta, scale, slope, logit_softcap, ne01, ne02, stride_Q1, stride_Q2, stride_K, stride_V, stride_mask, jt, kb0_start_kernel, kb0_stop_kernel); } else { constexpr bool needs_fixup = true; // CUDA block is working on the beginning of a tile. - flash_attn_ext_f16_process_tile - (Q_f2, K_h2, V_h2, mask_h2, dstk, dst_meta, scale, slope, logit_softcap, + flash_attn_ext_f16_process_tile + (Q_f2, K_h2, V_h2, mask_h2, sinks_f, dstk, dst_meta, scale, slope, logit_softcap, ne01, ne02, stride_Q1, stride_Q2, stride_K, stride_V, stride_mask, jt, kb0_start_kernel, kb0_stop_kernel); } @@ -1125,38 +1365,49 @@ static __global__ void flash_attn_ext_f16( return; } - const int channel = kbc / (iter_k*iter_j); - const int jt = (kbc - channel*iter_k*iter_j) / iter_k; // j index of current tile. + const int sequence = kbc / (iter_k*iter_j*(ne02/ncols2)); + const int zt = (kbc - iter_k*iter_j*(ne02/ncols2)*sequence) / (iter_k*iter_j); // head in units of ncols2 + const int jt = (kbc - iter_k*iter_j*(ne02/ncols2)*sequence - iter_k*iter_j*zt) / iter_k; // j index of current tile. - const float2 * Q_f2 = (const float2 *) (Q + nb02* channel*ncols2); - const half2 * K_h2 = (const half2 *) (K + nb12*(channel*ncols2 / gqa_ratio)); - const half2 * V_h2 = (const half2 *) (V + nb22*(channel*ncols2 / gqa_ratio)); // K and V have same shape - const half2 * mask_h2 = ncols2 > 1 || mask ? (const half2 *) mask + (nb31/sizeof(half2))*jt*ncols1 : nullptr; - float2 * dstk = ((float2 *) dst) + channel*(ncols2 * DV/2); + const int head0 = zt * ncols2; - const float slope = ncols2 == 1 ? get_alibi_slope(max_bias, channel, n_head_log2, m0, m1) : 1.0f; + const float2 * Q_f2 = (const float2 *) (Q + nb03*sequence + nb02* head0); + const half2 * K_h2 = (const half2 *) (K + nb13*sequence + nb12*(head0 / gqa_ratio)); + const half2 * mask_h2 = ncols2 == 1 && !mask ? nullptr : + (const half2 *) (mask + nb33*(sequence % ne33) + nb31*jt*ncols1); + float2 * dstk = ((float2 *) dst) + (sequence*ne01*ne02 + head0) * (DV/2); + + const half2 * V_h2 = mla ? K_h2 + (DKQ/2 - DV/2) : (const half2 *) (V + nb23*sequence + nb22*(head0 / gqa_ratio)); + const float * sinks_f = sinks ? (const float *) sinks + head0 : nullptr; + + const float slope = ncols2 == 1 ? get_alibi_slope(max_bias, head0, n_head_log2, m0, m1) : 1.0f; const int kb0_start_kernel = kb0_start * kb_niter; - const int kb0_stop_kernel = kb0_stop * kb_niter; + int kb0_stop_kernel = kb0_stop * kb_niter; + + if (KV_max) { + kb0_stop_kernel = min(kb0_stop_kernel, KV_max[sequence*iter_j + jt] / c::nbatch_fa); + } constexpr bool is_fixup = true; // Last index writes its data to fixup buffer to avoid data races with other blocks. constexpr bool needs_fixup = false; - flash_attn_ext_f16_process_tile - (Q_f2, K_h2, V_h2, mask_h2, dstk, dst_meta, scale, slope, logit_softcap, + flash_attn_ext_f16_process_tile + (Q_f2, K_h2, V_h2, mask_h2, sinks_f, dstk, dst_meta, scale, slope, logit_softcap, ne01, ne02, stride_Q1, stride_Q2, stride_K, stride_V, stride_mask, jt, kb0_start_kernel, kb0_stop_kernel); #else - GGML_UNUSED(Q); GGML_UNUSED(K); GGML_UNUSED(V); GGML_UNUSED(mask); - GGML_UNUSED(dst); GGML_UNUSED(dst_meta); GGML_UNUSED(scale); - GGML_UNUSED(max_bias); GGML_UNUSED(m0); GGML_UNUSED(m1); - GGML_UNUSED(n_head_log2); GGML_UNUSED(logit_softcap); GGML_UNUSED(ne00); - GGML_UNUSED(ne01); GGML_UNUSED(ne02); GGML_UNUSED(ne03); GGML_UNUSED(ne10); - GGML_UNUSED(ne11); GGML_UNUSED(ne12); GGML_UNUSED(ne13); GGML_UNUSED(ne31); - GGML_UNUSED(nb31); GGML_UNUSED(nb01); GGML_UNUSED(nb02); GGML_UNUSED(nb03); - GGML_UNUSED(nb11); GGML_UNUSED(nb12); GGML_UNUSED(nb13); GGML_UNUSED(nb21); - GGML_UNUSED(nb22); GGML_UNUSED(nb23); GGML_UNUSED(ne0); GGML_UNUSED(ne1); - GGML_UNUSED(ne2); GGML_UNUSED(ne3); + GGML_UNUSED(Q); GGML_UNUSED(K); GGML_UNUSED(V); GGML_UNUSED(mask); GGML_UNUSED(sinks); + GGML_UNUSED(dst); GGML_UNUSED(dst_meta); + GGML_UNUSED(scale); GGML_UNUSED(max_bias); GGML_UNUSED(m0); GGML_UNUSED(m1); + GGML_UNUSED(n_head_log2); GGML_UNUSED(logit_softcap); + GGML_UNUSED(ne00); GGML_UNUSED(ne01); GGML_UNUSED(ne02); GGML_UNUSED(ne03); + GGML_UNUSED(nb01); GGML_UNUSED(nb02); GGML_UNUSED(nb03); + GGML_UNUSED(ne10); GGML_UNUSED(ne11); GGML_UNUSED(ne12); GGML_UNUSED(ne13); + GGML_UNUSED(nb11); GGML_UNUSED(nb12); GGML_UNUSED(nb13); + GGML_UNUSED(nb21); GGML_UNUSED(nb22); GGML_UNUSED(nb23); + GGML_UNUSED(ne31); GGML_UNUSED(ne32); GGML_UNUSED(ne33); + GGML_UNUSED(nb31); GGML_UNUSED(nb32); GGML_UNUSED(nb33); NO_DEVICE_CODE; -#endif // defined(FLASH_ATTN_AVAILABLE) && defined(NEW_MMA_AVAILABLE) +#endif // defined(FLASH_ATTN_AVAILABLE) && defined(TURING_MMA_AVAILABLE) } template @@ -1167,10 +1418,6 @@ void ggml_cuda_flash_attn_ext_mma_f16_case(ggml_backend_cuda_context & ctx, ggml typedef fattn_mma_f16_config c; - constexpr int nbatch_K2 = c::nbatch_K2 < 1 ? DKQ/2 : c::nbatch_K2; - constexpr int nbatch_V2 = c::nbatch_V2 < 1 ? DV /2 : c::nbatch_V2; - constexpr int nbatch_combine = c::nbatch_combine < 1 ? DV /2 : c::nbatch_combine; - const int nstages = cp_async_available(cc) ? c::nstages_target : 0; constexpr int ncols = ncols1 * ncols2; @@ -1180,15 +1427,21 @@ void ggml_cuda_flash_attn_ext_mma_f16_case(ggml_backend_cuda_context & ctx, ggml constexpr int nwarps_max_y = c::nbatch_fa / tile_A::I; constexpr int nwarps = nwarps_max_x*nwarps_max_y <= c::nwarps_max ? nwarps_max_x*nwarps_max_y : c::nwarps_max; + constexpr bool mla = DKQ == 576; + + const int nbatch_K2 = c::get_nbatch_K2_host (cc, ncols); + const int nbatch_V2 = c::get_nbatch_K2_host (cc, ncols); + const int nbatch_combine = c::get_nbatch_combine_host(cc, ncols); + static_assert(DKQ % tile_B::J == 0, "bad DKQ"); static_assert(DV % tile_A::J == 0, "bad DV"); static_assert(ncols % cols_per_warp == 0, "bad ncols"); - const size_t nbytes_shared_KV_1stage = c::nbatch_fa * std::max(c::nbatch_K2 + 4, c::nbatch_V2 + 4) * sizeof(half2); - const size_t nbytes_shared_KV_2stage = c::nbatch_fa * (c::nbatch_K2 + 4 + c::nbatch_V2 + 4) * sizeof(half2); - const size_t nbytes_shared_Q = ncols * (DKQ/2 + 4) * sizeof(half2); - const size_t nbytes_shared_mask = ncols1 * (c::nbatch_fa/2 + 4) * sizeof(half2); - const size_t nbytes_shared_combine = nwarps*cols_per_warp * (nbatch_combine + 4) * sizeof(half2); + const size_t nbytes_shared_KV_1stage = c::nbatch_fa * std::max(nbatch_K2 + 4, nbatch_V2 + 4) * sizeof(half2); + const size_t nbytes_shared_KV_2stage = c::nbatch_fa * (nbatch_K2 + 4 + nbatch_V2 + 4) * sizeof(half2); + const size_t nbytes_shared_Q = ncols * (DKQ/2 + 4) * sizeof(half2); + const size_t nbytes_shared_mask = ncols1 * (c::nbatch_fa/2 + 4) * sizeof(half2); + const size_t nbytes_shared_combine = nwarps*cols_per_warp * (nbatch_combine + 4) * sizeof(half2); const size_t nbytes_shared_KV = nstages <= 1 ? nbytes_shared_KV_1stage : nbytes_shared_KV_2stage; @@ -1202,26 +1455,26 @@ void ggml_cuda_flash_attn_ext_mma_f16_case(ggml_backend_cuda_context & ctx, ggml fattn_kernel_t fattn_kernel; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; - fattn_kernel = flash_attn_ext_f16; + fattn_kernel = flash_attn_ext_f16; -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && !defined(GGML_USE_MUSA) +#if !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) static bool shared_memory_limit_raised[GGML_CUDA_MAX_DEVICES] = {false}; if (!shared_memory_limit_raised[id]) { CUDA_CHECK(cudaFuncSetAttribute(fattn_kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, nbytes_shared_total)); shared_memory_limit_raised[id] = true; } -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && !defined(GGML_USE_MUSA) +#endif // !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) } else { constexpr bool use_logit_softcap = true; - fattn_kernel = flash_attn_ext_f16; + fattn_kernel = flash_attn_ext_f16; -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && !defined(GGML_USE_MUSA) +#if !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) static bool shared_memory_limit_raised[GGML_CUDA_MAX_DEVICES] = {false}; if (!shared_memory_limit_raised[id]) { CUDA_CHECK(cudaFuncSetAttribute(fattn_kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, nbytes_shared_total)); shared_memory_limit_raised[id] = true; } -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && !defined(GGML_USE_MUSA) +#endif // !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) } launch_fattn diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f16.cu index 9283560d5..0fcfaa32e 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f16.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f16.cu @@ -5,14 +5,16 @@ #define FATTN_KQ_STRIDE_TILE_F16 64 template // D == head size -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) -__launch_bounds__(nwarps*WARP_SIZE, 1) -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#if !defined(GGML_USE_HIP) +__launch_bounds__(nwarps*WARP_SIZE, 2) +#endif // !defined(GGML_USE_HIP) static __global__ void flash_attn_tile_ext_f16( const char * __restrict__ Q, const char * __restrict__ K, const char * __restrict__ V, const char * __restrict__ mask, + const char * __restrict__ sinks, + const int * __restrict__ KV_max, float * __restrict__ dst, float2 * __restrict__ dst_meta, const float scale, @@ -21,29 +23,13 @@ static __global__ void flash_attn_tile_ext_f16( const float m1, const uint32_t n_head_log2, const float logit_softcap, - const int ne00, - const int ne01, - const int ne02, - const int ne03, - const int ne10, - const int ne11, - const int ne12, - const int ne13, - const int ne31, - const int nb31, - const int nb01, - const int nb02, - const int nb03, - const int nb11, - const int nb12, - const int nb13, - const int nb21, - const int nb22, - const int nb23, - const int ne0, - const int ne1, - const int ne2, - const int ne3) { + const int32_t ne00, const int32_t ne01, const int32_t ne02, const int32_t ne03, + const int32_t nb01, const int32_t nb02, const int32_t nb03, + const int32_t ne10, const int32_t ne11, const int32_t ne12, const int32_t ne13, + const int32_t nb11, const int32_t nb12, const int64_t nb13, + const int32_t nb21, const int32_t nb22, const int64_t nb23, + const int32_t ne31, const int32_t ne32, const int32_t ne33, + const int32_t nb31, const int32_t nb32, const int64_t nb33) { #if defined(FLASH_ATTN_AVAILABLE) && defined(FP16_AVAILABLE) // Skip unused kernel variants for faster compilation: @@ -60,15 +46,17 @@ static __global__ void flash_attn_tile_ext_f16( const int ic0 = blockIdx.x * ncols; // Index of the Q/QKV column to work on. + const int sequence = blockIdx.z / ne02; + const int head = blockIdx.z - sequence*ne02; const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. - const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.z + nb01*ic0); - const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.z / gqa_ratio)); - const half2 * V_h2 = (const half2 *) (V + nb12*(blockIdx.z / gqa_ratio)); // K and V have same shape - const half * maskh = (const half *) mask + ne11*ic0; + const float2 * Q_f2 = (const float2 *) (Q + nb03* sequence + nb02* head + nb01*ic0); + const half2 * K_h2 = (const half2 *) (K + nb13* sequence + nb12*(head / gqa_ratio)); + const half2 * V_h2 = (const half2 *) (V + nb13* sequence + nb12*(head / gqa_ratio)); // K and V have same shape + const half * maskh = (const half *) (mask + nb33*(sequence % ne33) + nb31*ic0); const int stride_KV2 = nb11 / sizeof(half2); - const float slopef = get_alibi_slope(max_bias, blockIdx.z, n_head_log2, m0, m1); + const float slopef = get_alibi_slope(max_bias, head, n_head_log2, m0, m1); const half slopeh = __float2half(slopef); static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); @@ -104,7 +92,8 @@ static __global__ void flash_attn_tile_ext_f16( __syncthreads(); - for (int k_VKQ_0 = blockIdx.y*FATTN_KQ_STRIDE_TILE_F16; k_VKQ_0 < ne11; k_VKQ_0 += gridDim.y*FATTN_KQ_STRIDE_TILE_F16) { + const int k_VKQ_max = KV_max ? KV_max[sequence*gridDim.x + blockIdx.x] : ne11; + for (int k_VKQ_0 = blockIdx.y*FATTN_KQ_STRIDE_TILE_F16; k_VKQ_0 < k_VKQ_max; k_VKQ_0 += gridDim.y*FATTN_KQ_STRIDE_TILE_F16) { // Calculate KQ tile and keep track of new maximum KQ values: half kqmax_new[ncols/nwarps]; @@ -121,7 +110,7 @@ static __global__ void flash_attn_tile_ext_f16( for (int k_KQ_0 = 0; k_KQ_0 < D/2; k_KQ_0 += WARP_SIZE) { const int k_KQ = k_KQ_0 + threadIdx.x; - KV_tmp[i_KQ][k_KQ] = K_h2[(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ]; + KV_tmp[i_KQ][k_KQ] = K_h2[int64_t(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ]; } } @@ -215,7 +204,7 @@ static __global__ void flash_attn_tile_ext_f16( for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { const int i = i0 + threadIdx.x; - KV_tmp[k][i] = V_h2[(k_VKQ_0 + k)*stride_KV2 + i]; + KV_tmp[k][i] = V_h2[int64_t(k_VKQ_0 + k)*stride_KV2 + i]; } } @@ -253,6 +242,8 @@ static __global__ void flash_attn_tile_ext_f16( __syncthreads(); } + float2 * dst2 = (float2 *) dst; + #pragma unroll for (int j_VKQ_0 = 0; j_VKQ_0 < ncols; j_VKQ_0 += nwarps) { const int j_VKQ = j_VKQ_0 + threadIdx.y; @@ -264,36 +255,35 @@ static __global__ void flash_attn_tile_ext_f16( half kqsum_j = __low2half(kqsum[j_VKQ_0/nwarps]) + __high2half(kqsum[j_VKQ_0/nwarps]); kqsum_j = warp_reduce_sum((float)kqsum_j); -#pragma unroll - for (int i00 = 0; i00 < D; i00 += 2*WARP_SIZE) { - const int i0 = i00 + 2*threadIdx.x; + const int j_dst_unrolled = ((sequence*ne01 + ic0 + j_VKQ)*ne02 + head)*gridDim.y + blockIdx.y; - half2 dst_val = VKQ[j_VKQ_0/nwarps][i0/(2*WARP_SIZE)]; +#pragma unroll + for (int i00 = 0; i00 < D/2; i00 += WARP_SIZE) { + const int i0 = i00 + threadIdx.x; + + half2 dst_val = VKQ[j_VKQ_0/nwarps][i0/WARP_SIZE]; if (gridDim.y == 1) { dst_val /= __half2half2(kqsum_j); } - const int j_dst = (ic0 + j_VKQ)*gridDim.y + blockIdx.y; - dst[j_dst*D*gridDim.z + D*blockIdx.z + i0 + 0] = __low2float(dst_val); - dst[j_dst*D*gridDim.z + D*blockIdx.z + i0 + 1] = __high2float(dst_val); + dst2[j_dst_unrolled*(D/2) + i0] = __half22float2(dst_val); } if (gridDim.y != 1 && threadIdx.x == 0) { - dst_meta[((ic0 + j_VKQ)*gridDim.z + blockIdx.z) * gridDim.y + blockIdx.y] = make_float2(kqmax[j_VKQ_0/nwarps], kqsum_j); + dst_meta[j_dst_unrolled] = make_float2(kqmax[j_VKQ_0/nwarps], kqsum_j); } } #else - GGML_UNUSED(Q); GGML_UNUSED(K); GGML_UNUSED(V); GGML_UNUSED(mask); + GGML_UNUSED(Q); GGML_UNUSED(K); GGML_UNUSED(V); GGML_UNUSED(mask); GGML_UNUSED(sinks); GGML_UNUSED(dst); GGML_UNUSED(dst_meta); GGML_UNUSED(scale); GGML_UNUSED(max_bias); GGML_UNUSED(m0); GGML_UNUSED(m1); GGML_UNUSED(n_head_log2); GGML_UNUSED(logit_softcap); GGML_UNUSED(ne00); GGML_UNUSED(ne01); GGML_UNUSED(ne02); GGML_UNUSED(ne03); GGML_UNUSED(ne10); GGML_UNUSED(ne11); - GGML_UNUSED(ne12); GGML_UNUSED(ne13); GGML_UNUSED(ne31); - GGML_UNUSED(nb31); GGML_UNUSED(nb01); GGML_UNUSED(nb02); + GGML_UNUSED(ne12); GGML_UNUSED(ne13); GGML_UNUSED(ne31); GGML_UNUSED(ne32); GGML_UNUSED(ne33); + GGML_UNUSED(nb31); GGML_UNUSED(nb32); GGML_UNUSED(nb33); GGML_UNUSED(nb01); GGML_UNUSED(nb02); GGML_UNUSED(nb03); GGML_UNUSED(nb11); GGML_UNUSED(nb12); GGML_UNUSED(nb13); GGML_UNUSED(nb21); GGML_UNUSED(nb22); - GGML_UNUSED(nb23); GGML_UNUSED(ne0); GGML_UNUSED(ne1); - GGML_UNUSED(ne2); GGML_UNUSED(ne3); + GGML_UNUSED(nb23); NO_DEVICE_CODE; #endif // defined(FLASH_ATTN_AVAILABLE) && defined(FP16_AVAILABLE) } diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f32.cu b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f32.cu index 32673adb5..23550cbbd 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f32.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f32.cu @@ -5,14 +5,16 @@ #define FATTN_KQ_STRIDE_TILE_F32 32 template // D == head size -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) -__launch_bounds__(nwarps*WARP_SIZE, 1) -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#if !defined(GGML_USE_HIP) +__launch_bounds__(nwarps*WARP_SIZE, 2) +#endif // !defined(GGML_USE_HIP) static __global__ void flash_attn_tile_ext_f32( const char * __restrict__ Q, const char * __restrict__ K, const char * __restrict__ V, const char * __restrict__ mask, + const char * __restrict__ sinks, + const int * __restrict__ KV_max, float * __restrict__ dst, float2 * __restrict__ dst_meta, const float scale, @@ -21,29 +23,13 @@ static __global__ void flash_attn_tile_ext_f32( const float m1, const uint32_t n_head_log2, const float logit_softcap, - const int ne00, - const int ne01, - const int ne02, - const int ne03, - const int ne10, - const int ne11, - const int ne12, - const int ne13, - const int ne31, - const int nb31, - const int nb01, - const int nb02, - const int nb03, - const int nb11, - const int nb12, - const int nb13, - const int nb21, - const int nb22, - const int nb23, - const int ne0, - const int ne1, - const int ne2, - const int ne3) { + const int32_t ne00, const int32_t ne01, const int32_t ne02, const int32_t ne03, + const int32_t nb01, const int32_t nb02, const int32_t nb03, + const int32_t ne10, const int32_t ne11, const int32_t ne12, const int32_t ne13, + const int32_t nb11, const int32_t nb12, const int64_t nb13, + const int32_t nb21, const int32_t nb22, const int64_t nb23, + const int32_t ne31, const int32_t ne32, const int32_t ne33, + const int32_t nb31, const int32_t nb32, const int64_t nb33) { #ifdef FLASH_ATTN_AVAILABLE // Skip unused kernel variants for faster compilation: @@ -52,18 +38,17 @@ static __global__ void flash_attn_tile_ext_f32( return; #endif // FP16_MMA_AVAILABLE if (use_logit_softcap && !(D == 128 || D == 256)) { - GGML_UNUSED(Q); GGML_UNUSED(K); GGML_UNUSED(V); GGML_UNUSED(mask); - GGML_UNUSED(dst); GGML_UNUSED(dst_meta); GGML_UNUSED(scale); - GGML_UNUSED(max_bias); GGML_UNUSED(m0); GGML_UNUSED(m1); + GGML_UNUSED(Q); GGML_UNUSED(K); GGML_UNUSED(V); GGML_UNUSED(mask); GGML_UNUSED(sinks); + GGML_UNUSED(dst); GGML_UNUSED(dst_meta); + GGML_UNUSED(scale); GGML_UNUSED(max_bias); GGML_UNUSED(m0); GGML_UNUSED(m1); GGML_UNUSED(n_head_log2); GGML_UNUSED(logit_softcap); - GGML_UNUSED(ne00); GGML_UNUSED(ne01); GGML_UNUSED(ne02); - GGML_UNUSED(ne03); GGML_UNUSED(ne10); GGML_UNUSED(ne11); - GGML_UNUSED(ne12); GGML_UNUSED(ne13); GGML_UNUSED(ne31); - GGML_UNUSED(nb31); GGML_UNUSED(nb01); GGML_UNUSED(nb02); - GGML_UNUSED(nb03); GGML_UNUSED(nb11); GGML_UNUSED(nb12); - GGML_UNUSED(nb13); GGML_UNUSED(nb21); GGML_UNUSED(nb22); - GGML_UNUSED(nb23); GGML_UNUSED(ne0); GGML_UNUSED(ne1); - GGML_UNUSED(ne2); GGML_UNUSED(ne3); + GGML_UNUSED(ne00); GGML_UNUSED(ne01); GGML_UNUSED(ne02); GGML_UNUSED(ne03); + GGML_UNUSED(nb01); GGML_UNUSED(nb02); GGML_UNUSED(nb03); + GGML_UNUSED(ne10); GGML_UNUSED(ne11); GGML_UNUSED(ne12); GGML_UNUSED(ne13); + GGML_UNUSED(nb11); GGML_UNUSED(nb12); GGML_UNUSED(nb13); + GGML_UNUSED(nb21); GGML_UNUSED(nb22); GGML_UNUSED(nb23); + GGML_UNUSED(ne31); GGML_UNUSED(ne32); GGML_UNUSED(ne33); + GGML_UNUSED(nb31); GGML_UNUSED(nb32); GGML_UNUSED(nb33); NO_DEVICE_CODE; return; } @@ -72,15 +57,17 @@ static __global__ void flash_attn_tile_ext_f32( const int ic0 = blockIdx.x * ncols; // Index of the Q/QKV column to work on. + const int sequence = blockIdx.z / ne02; + const int head = blockIdx.z - sequence*ne02; const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. - const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.z + nb01*ic0); - const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.z / gqa_ratio)); - const half2 * V_h2 = (const half2 *) (V + nb12*(blockIdx.z / gqa_ratio)); // K and V have same shape - const half * maskh = (const half *) mask + ne11*ic0; + const float2 * Q_f2 = (const float2 *) (Q + nb03* sequence + nb02* head + nb01*ic0); + const half2 * K_h2 = (const half2 *) (K + nb13* sequence + nb12*(head / gqa_ratio)); + const half2 * V_h2 = (const half2 *) (V + nb13* sequence + nb12*(head / gqa_ratio)); // K and V have same shape + const half * maskh = (const half *) (mask + nb33*(sequence % ne33) + nb31*ic0); const int stride_KV2 = nb11 / sizeof(half2); - const float slope = get_alibi_slope(max_bias, blockIdx.z, n_head_log2, m0, m1); + const float slope = get_alibi_slope(max_bias, head, n_head_log2, m0, m1); static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); @@ -114,7 +101,8 @@ static __global__ void flash_attn_tile_ext_f32( __syncthreads(); - for (int k_VKQ_0 = blockIdx.y*FATTN_KQ_STRIDE_TILE_F32; k_VKQ_0 < ne11; k_VKQ_0 += gridDim.y*FATTN_KQ_STRIDE_TILE_F32) { + const int k_VKQ_max = KV_max ? KV_max[sequence*gridDim.x + blockIdx.x] : ne11; + for (int k_VKQ_0 = blockIdx.y*FATTN_KQ_STRIDE_TILE_F32; k_VKQ_0 < k_VKQ_max; k_VKQ_0 += gridDim.y*FATTN_KQ_STRIDE_TILE_F32) { // Calculate KQ tile and keep track of new maximum KQ values: float kqmax_new[ncols/nwarps]; @@ -129,7 +117,7 @@ static __global__ void flash_attn_tile_ext_f32( #pragma unroll for (int k_KQ_0 = 0; k_KQ_0 < D; k_KQ_0 += 2*WARP_SIZE) { - const half2 tmp = K_h2[(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ_0/2 + threadIdx.x]; + const half2 tmp = K_h2[int64_t(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ_0/2 + threadIdx.x]; KV_tmp[i_KQ][k_KQ_0 + 0*WARP_SIZE + threadIdx.x] = __low2float(tmp); KV_tmp[i_KQ][k_KQ_0 + 1*WARP_SIZE + threadIdx.x] = __high2float(tmp); } @@ -225,8 +213,9 @@ static __global__ void flash_attn_tile_ext_f32( for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { const int i = i0 + threadIdx.x; - KV_tmp2[k*(D/2) + i].x = __low2float(V_h2[(k_VKQ_0 + k)*stride_KV2 + i]); - KV_tmp2[k*(D/2) + i].y = __high2float(V_h2[(k_VKQ_0 + k)*stride_KV2 + i]); + const half2 tmp = V_h2[int64_t(k_VKQ_0 + k)*stride_KV2 + i]; + KV_tmp2[k*(D/2) + i].x = __low2float(tmp); + KV_tmp2[k*(D/2) + i].y = __high2float(tmp); } } @@ -263,6 +252,8 @@ static __global__ void flash_attn_tile_ext_f32( __syncthreads(); } + float2 * dst2 = (float2 *) dst; + #pragma unroll for (int j_VKQ_0 = 0; j_VKQ_0 < ncols; j_VKQ_0 += nwarps) { const int j_VKQ = j_VKQ_0 + threadIdx.y; @@ -274,37 +265,36 @@ static __global__ void flash_attn_tile_ext_f32( float kqsum_j = kqsum[j_VKQ_0/nwarps]; kqsum_j = warp_reduce_sum(kqsum_j); -#pragma unroll - for (int i00 = 0; i00 < D; i00 += 2*WARP_SIZE) { - const int i0 = i00 + 2*threadIdx.x; + const int j_dst_unrolled = ((sequence*ne01 + ic0 + j_VKQ)*ne02 + head)*gridDim.y + blockIdx.y; - float2 dst_val = VKQ[j_VKQ_0/nwarps][i0/(2*WARP_SIZE)]; +#pragma unroll + for (int i00 = 0; i00 < D/2; i00 += WARP_SIZE) { + const int i0 = i00 + threadIdx.x; + + float2 dst_val = VKQ[j_VKQ_0/nwarps][i0/WARP_SIZE]; if (gridDim.y == 1) { dst_val.x /= kqsum_j; dst_val.y /= kqsum_j; } - const int j_dst = (ic0 + j_VKQ)*gridDim.y + blockIdx.y; - dst[j_dst*D*gridDim.z + D*blockIdx.z + i0 + 0] = dst_val.x; - dst[j_dst*D*gridDim.z + D*blockIdx.z + i0 + 1] = dst_val.y; + dst2[j_dst_unrolled*(D/2) + i0] = dst_val; } if (gridDim.y != 1 && threadIdx.x == 0) { - dst_meta[((ic0 + j_VKQ)*gridDim.z + blockIdx.z) * gridDim.y + blockIdx.y] = make_float2(kqmax[j_VKQ_0/nwarps], kqsum_j); + dst_meta[j_dst_unrolled] = make_float2(kqmax[j_VKQ_0/nwarps], kqsum_j); } } #else GGML_UNUSED(Q); GGML_UNUSED(K); GGML_UNUSED(V); GGML_UNUSED(mask); - GGML_UNUSED(dst); GGML_UNUSED(dst_meta); GGML_UNUSED(scale); - GGML_UNUSED(max_bias); GGML_UNUSED(m0); GGML_UNUSED(m1); + GGML_UNUSED(dst); GGML_UNUSED(dst_meta); + GGML_UNUSED(scale); GGML_UNUSED(max_bias); GGML_UNUSED(m0); GGML_UNUSED(m1); GGML_UNUSED(n_head_log2); GGML_UNUSED(logit_softcap); - GGML_UNUSED(ne00); GGML_UNUSED(ne01); GGML_UNUSED(ne02); - GGML_UNUSED(ne03); GGML_UNUSED(ne10); GGML_UNUSED(ne11); - GGML_UNUSED(ne12); GGML_UNUSED(ne13); GGML_UNUSED(ne31); - GGML_UNUSED(nb31); GGML_UNUSED(nb01); GGML_UNUSED(nb02); - GGML_UNUSED(nb03); GGML_UNUSED(nb11); GGML_UNUSED(nb12); - GGML_UNUSED(nb13); GGML_UNUSED(nb21); GGML_UNUSED(nb22); - GGML_UNUSED(nb23); GGML_UNUSED(ne0); GGML_UNUSED(ne1); - GGML_UNUSED(ne2); GGML_UNUSED(ne3); + GGML_UNUSED(ne00); GGML_UNUSED(ne01); GGML_UNUSED(ne02); GGML_UNUSED(ne03); + GGML_UNUSED(nb01); GGML_UNUSED(nb02); GGML_UNUSED(nb03); + GGML_UNUSED(ne10); GGML_UNUSED(ne11); GGML_UNUSED(ne12); GGML_UNUSED(ne13); + GGML_UNUSED(nb11); GGML_UNUSED(nb12); GGML_UNUSED(nb13); + GGML_UNUSED(nb21); GGML_UNUSED(nb22); GGML_UNUSED(nb23); + GGML_UNUSED(ne31); GGML_UNUSED(ne32); GGML_UNUSED(ne33); + GGML_UNUSED(nb31); GGML_UNUSED(nb32); GGML_UNUSED(nb33); NO_DEVICE_CODE; #endif // FLASH_ATTN_AVAILABLE } diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-vec-f16.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-vec-f16.cuh index d96e39212..b05f682cd 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-vec-f16.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-vec-f16.cuh @@ -1,15 +1,23 @@ #include "common.cuh" #include "fattn-common.cuh" +// Currenlty llvm with the amdgcn target dose not support unrolling loops +// that contain a break that can not be resolved at compile time. +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wpass-failed" +#endif // __clang__ template // D == head size -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#ifndef GGML_USE_HIP __launch_bounds__(D, 1) -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#endif // GGML_USE_HIP static __global__ void flash_attn_vec_ext_f16( const char * __restrict__ Q, const char * __restrict__ K, const char * __restrict__ V, const char * __restrict__ mask, + const char * __restrict__ sinks, + const int * __restrict__ KV_max, float * __restrict__ dst, float2 * __restrict__ dst_meta, const float scale, @@ -18,29 +26,13 @@ static __global__ void flash_attn_vec_ext_f16( const float m1, const uint32_t n_head_log2, const float logit_softcap, - const int ne00, - const int ne01, - const int ne02, - const int ne03, - const int ne10, - const int ne11, - const int ne12, - const int ne13, - const int ne31, - const int nb31, - const int nb01, - const int nb02, - const int nb03, - const int nb11, - const int nb12, - const int nb13, - const int nb21, - const int nb22, - const int nb23, - const int ne0, - const int ne1, - const int ne2, - const int ne3) { + const int32_t ne00, const int32_t ne01, const int32_t ne02, const int32_t ne03, + const int32_t nb01, const int32_t nb02, const int32_t nb03, + const int32_t ne10, const int32_t ne11, const int32_t ne12, const int32_t ne13, + const int32_t nb11, const int32_t nb12, const int64_t nb13, + const int32_t nb21, const int32_t nb22, const int64_t nb23, + const int32_t ne31, const int32_t ne32, const int32_t ne33, + const int32_t nb31, const int32_t nb32, const int64_t nb33) { #if defined(FLASH_ATTN_AVAILABLE) && defined(FP16_AVAILABLE) // Skip unused kernel variants for faster compilation: @@ -48,6 +40,12 @@ static __global__ void flash_attn_vec_ext_f16( NO_DEVICE_CODE; return; } +#if !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) + if (ncols > 1) { + NO_DEVICE_CODE; + return; + } +#endif // !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) //In this kernel Q, K, V are matrices while i, j, k are matrix indices. @@ -57,14 +55,17 @@ static __global__ void flash_attn_vec_ext_f16( const int ic0 = blockIdx.x * ncols; // Index of the Q/QKV column to work on. + const int sequence = blockIdx.z / ne02; + const int head = blockIdx.z - sequence*ne02; const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. - Q += nb02* blockIdx.z + nb01*ic0; - K += nb12*(blockIdx.z / gqa_ratio); - V += nb22*(blockIdx.z / gqa_ratio); + Q += nb03*sequence + nb02* head + nb01*ic0; + K += nb13*sequence + nb12*(head / gqa_ratio); + V += nb23*sequence + nb22*(head / gqa_ratio); - const half * maskh = (const half *) mask + ne11*ic0; + const half * maskh = (const half *) (mask + nb33*(sequence % ne33) + nb31*ic0); + const float * sinksf = (const float *) (sinks); - const float slopef = get_alibi_slope(max_bias, blockIdx.z, n_head_log2, m0, m1); + const float slopef = get_alibi_slope(max_bias, head, n_head_log2, m0, m1); const half slopeh = __float2half(slopef); static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); @@ -76,11 +77,12 @@ static __global__ void flash_attn_vec_ext_f16( half2 * KQ2 = (half2 *) KQ; half kqmax[ncols]; + half kqsum[ncols]; #pragma unroll for (int j = 0; j < ncols; ++j) { kqmax[j] = -HALF_MAX_HALF; + kqsum[j] = 0.0f; } - half kqsum[ncols] = {0.0f}; __shared__ half kqmax_shared[ncols][WARP_SIZE]; __shared__ half kqsum_shared[ncols][WARP_SIZE]; @@ -91,6 +93,13 @@ static __global__ void flash_attn_vec_ext_f16( kqsum_shared[j][threadIdx.x] = 0.0f; } } + + __shared__ half maskh_shared[ncols*D]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { + maskh_shared[j*D + tid] = 0.0f; + } + __syncthreads(); // Convert Q to half2 (f16 K) or q8_1 (quantized K) and store in registers: @@ -172,9 +181,24 @@ static __global__ void flash_attn_vec_ext_f16( half2 VKQ[ncols] = {{0.0f, 0.0f}}; - for (int k_VKQ_0 = blockIdx.y*D; k_VKQ_0 < ne11; k_VKQ_0 += gridDim.y*D) { + const int k_VKQ_max = KV_max ? KV_max[sequence*gridDim.x + blockIdx.x] : ne11; + K += blockIdx.y*D * nb11; + V += blockIdx.y*D * nb21; + maskh += blockIdx.y*D; + for (int k_VKQ_0 = blockIdx.y*D; k_VKQ_0 < k_VKQ_max; k_VKQ_0 += gridDim.y*D, + // Increment pointers after each loop: + K += gridDim.y*D*nb11, V += gridDim.y*D*nb21, maskh += gridDim.y*D) { + // Calculate KQ tile and keep track of new maximum KQ values: + if (mask) { +#pragma unroll + for (int j = 0; j < ncols; ++j) { + maskh_shared[j*D + tid] = slopeh*maskh[j*ne11 + tid]; + } + __syncthreads(); + } + // For unknown reasons using a half array of size 1 for kqmax_new causes a performance regression, // see https://github.com/ggerganov/llama.cpp/pull/7061 . // Therefore this variable is defined twice but only used once (so that the compiler can optimize out the unused variable). @@ -195,14 +219,14 @@ static __global__ void flash_attn_vec_ext_f16( #pragma unroll for (int j = 0; j < ncols; ++j) { - half sum = vec_dot_KQ(K + (k_VKQ_0 + i_KQ)*nb11, Q_h2[j], Q_i32[j], Q_ds[j]); + half sum = vec_dot_KQ(K + i_KQ*nb11, Q_h2[j], Q_i32[j], Q_ds[j]); sum = warp_reduce_sum((float)sum); if (use_logit_softcap) { sum = logit_softcap*tanhf(sum); } - sum += mask ? slopeh*maskh[j*ne11 + k_VKQ_0 + i_KQ] : __float2half(0.0f); + sum += maskh_shared[j*D + i_KQ]; if (ncols == 1) { kqmax_new = ggml_cuda_hmax(kqmax_new, sum); @@ -251,8 +275,8 @@ static __global__ void flash_attn_vec_ext_f16( } half2 V_k; - reinterpret_cast(V_k.x) = dequantize_1_v(V + (k_VKQ_0 + k0 + 0)*nb21, tid); - reinterpret_cast(V_k.y) = dequantize_1_v(V + (k_VKQ_0 + k0 + 1)*nb21, tid); + reinterpret_cast(V_k.x) = dequantize_1_v(V + (k0 + 0)*nb21, tid); + reinterpret_cast(V_k.y) = dequantize_1_v(V + (k0 + 1)*nb21, tid); #pragma unroll for (int j = 0; j < ncols; ++j) { VKQ[j] += V_k*KQ2[j*(D/2) + k0/2]; @@ -262,6 +286,39 @@ static __global__ void flash_attn_vec_ext_f16( __syncthreads(); } + if (sinksf && blockIdx.y == 0) { + const half sink = __float2half(sinksf[head]); + +#pragma unroll + for (int j = 0; j < ncols; ++j) { + if (threadIdx.x == 0) { + kqmax_shared[j][threadIdx.y] = fmaxf(kqmax[j], sink); + } + } + + __syncthreads(); + +#pragma unroll + for (int j = 0; j < ncols; ++j) { + half kqmax_new_j = kqmax_shared[j][threadIdx.x]; + kqmax_new_j = warp_reduce_max(kqmax_new_j); + + const half KQ_max_scale = hexp(kqmax[j] - kqmax_new_j); + kqmax[j] = kqmax_new_j; + + const half val = hexp(sink - kqmax[j]); + kqsum[j] = kqsum[j]*KQ_max_scale; + + if (tid == 0) { + kqsum[j] += val; + } + + VKQ[j] *= __half2half2(KQ_max_scale); + } + + __syncthreads(); + } + #pragma unroll for (int j = 0; j < ncols; ++j) { kqsum[j] = warp_reduce_sum((float)kqsum[j]); @@ -285,29 +342,30 @@ static __global__ void flash_attn_vec_ext_f16( if (gridDim.y == 1) { dst_val /= kqsum[j_VKQ]; } - const int j_dst = (ic0 + j_VKQ)*gridDim.y + blockIdx.y; - dst[j_dst*D*gridDim.z + D*blockIdx.z + tid] = dst_val; + dst[(((sequence*ne01 + ic0 + j_VKQ)*ne02 + head)*gridDim.y + blockIdx.y)*D + tid] = dst_val; } if (gridDim.y != 1 && tid < ncols && (ncols <= 2 || ic0 + tid < ne01)) { - dst_meta[((ic0 + tid)*gridDim.z + blockIdx.z) * gridDim.y + blockIdx.y] = make_float2(kqmax[tid], kqsum[tid]); + dst_meta[((sequence*ne01 + ic0 + tid)*ne02 + head)*gridDim.y + blockIdx.y] = make_float2(kqmax[tid], kqsum[tid]); } #else - GGML_UNUSED(Q); GGML_UNUSED(K); GGML_UNUSED(V); GGML_UNUSED(mask); - GGML_UNUSED(dst); GGML_UNUSED(dst_meta); GGML_UNUSED(scale); - GGML_UNUSED(max_bias); GGML_UNUSED(m0); GGML_UNUSED(m1); + GGML_UNUSED(Q); GGML_UNUSED(K); GGML_UNUSED(V); GGML_UNUSED(mask); GGML_UNUSED(sinks); + GGML_UNUSED(dst); GGML_UNUSED(dst_meta); + GGML_UNUSED(scale); GGML_UNUSED(max_bias); GGML_UNUSED(m0); GGML_UNUSED(m1); GGML_UNUSED(n_head_log2); GGML_UNUSED(logit_softcap); - GGML_UNUSED(ne00); GGML_UNUSED(ne01); GGML_UNUSED(ne02); - GGML_UNUSED(ne03); GGML_UNUSED(ne10); GGML_UNUSED(ne11); - GGML_UNUSED(ne12); GGML_UNUSED(ne13); GGML_UNUSED(ne31); - GGML_UNUSED(nb31); GGML_UNUSED(nb01); GGML_UNUSED(nb02); - GGML_UNUSED(nb03); GGML_UNUSED(nb11); GGML_UNUSED(nb12); - GGML_UNUSED(nb13); GGML_UNUSED(nb21); GGML_UNUSED(nb22); - GGML_UNUSED(nb23); GGML_UNUSED(ne0); GGML_UNUSED(ne1); - GGML_UNUSED(ne2); GGML_UNUSED(ne3); + GGML_UNUSED(ne00); GGML_UNUSED(ne01); GGML_UNUSED(ne02); GGML_UNUSED(ne03); + GGML_UNUSED(nb01); GGML_UNUSED(nb02); GGML_UNUSED(nb03); + GGML_UNUSED(ne10); GGML_UNUSED(ne11); GGML_UNUSED(ne12); GGML_UNUSED(ne13); + GGML_UNUSED(nb11); GGML_UNUSED(nb12); GGML_UNUSED(nb13); + GGML_UNUSED(nb21); GGML_UNUSED(nb22); GGML_UNUSED(nb23); + GGML_UNUSED(ne31); GGML_UNUSED(ne32); GGML_UNUSED(ne33); + GGML_UNUSED(nb31); GGML_UNUSED(nb32); GGML_UNUSED(nb33); NO_DEVICE_CODE; #endif // defined(FLASH_ATTN_AVAILABLE) && defined(FP16_AVAILABLE) } +#ifdef __clang__ +#pragma clang diagnostic pop +#endif // __clang__ template void ggml_cuda_flash_attn_ext_vec_f16_case_impl(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { @@ -335,7 +393,9 @@ void ggml_cuda_flash_attn_ext_vec_f16_case(ggml_backend_cuda_context & ctx, ggml float logit_softcap; memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); - if (Q->ne[1] == 1) { + const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; + + if (Q->ne[1] == 1 || GGML_CUDA_CC_IS_NVIDIA(cc)) { constexpr int cols_per_block = 1; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-vec-f32.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-vec-f32.cuh index 7064675d5..d6d0bfb74 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-vec-f32.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-vec-f32.cuh @@ -1,15 +1,23 @@ #include "common.cuh" #include "fattn-common.cuh" +// Currenlty llvm with the amdgcn target dose not support unrolling loops +// that contain a break that can not be resolved at compile time. +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wpass-failed" +#endif // __clang__ template // D == head size -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#ifndef GGML_USE_HIP __launch_bounds__(D, 1) -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#endif // GGML_USE_HIP static __global__ void flash_attn_vec_ext_f32( const char * __restrict__ Q, const char * __restrict__ K, const char * __restrict__ V, const char * __restrict__ mask, + const char * __restrict__ sinks, + const int * __restrict__ KV_max, float * __restrict__ dst, float2 * __restrict__ dst_meta, const float scale, @@ -18,29 +26,13 @@ static __global__ void flash_attn_vec_ext_f32( const float m1, const uint32_t n_head_log2, const float logit_softcap, - const int ne00, - const int ne01, - const int ne02, - const int ne03, - const int ne10, - const int ne11, - const int ne12, - const int ne13, - const int ne31, - const int nb31, - const int nb01, - const int nb02, - const int nb03, - const int nb11, - const int nb12, - const int nb13, - const int nb21, - const int nb22, - const int nb23, - const int ne0, - const int ne1, - const int ne2, - const int ne3) { + const int32_t ne00, const int32_t ne01, const int32_t ne02, const int32_t ne03, + const int32_t nb01, const int32_t nb02, const int32_t nb03, + const int32_t ne10, const int32_t ne11, const int32_t ne12, const int32_t ne13, + const int32_t nb11, const int32_t nb12, const int64_t nb13, + const int32_t nb21, const int32_t nb22, const int64_t nb23, + const int32_t ne31, const int32_t ne32, const int32_t ne33, + const int32_t nb31, const int32_t nb32, const int64_t nb33) { #ifdef FLASH_ATTN_AVAILABLE // Skip unused kernel variants for faster compilation: @@ -51,15 +43,20 @@ static __global__ void flash_attn_vec_ext_f32( GGML_UNUSED(n_head_log2); GGML_UNUSED(logit_softcap); GGML_UNUSED(ne00); GGML_UNUSED(ne01); GGML_UNUSED(ne02); GGML_UNUSED(ne03); GGML_UNUSED(ne10); GGML_UNUSED(ne11); - GGML_UNUSED(ne12); GGML_UNUSED(ne13); GGML_UNUSED(ne31); - GGML_UNUSED(nb31); GGML_UNUSED(nb01); GGML_UNUSED(nb02); + GGML_UNUSED(ne12); GGML_UNUSED(ne13); GGML_UNUSED(ne31); GGML_UNUSED(ne32); GGML_UNUSED(ne33); + GGML_UNUSED(nb31); GGML_UNUSED(nb32); GGML_UNUSED(nb33); GGML_UNUSED(nb01); GGML_UNUSED(nb02); GGML_UNUSED(nb03); GGML_UNUSED(nb11); GGML_UNUSED(nb12); GGML_UNUSED(nb13); GGML_UNUSED(nb21); GGML_UNUSED(nb22); - GGML_UNUSED(nb23); GGML_UNUSED(ne0); GGML_UNUSED(ne1); - GGML_UNUSED(ne2); GGML_UNUSED(ne3); + GGML_UNUSED(nb23); NO_DEVICE_CODE; return; } +#if !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) + if (ncols > 1) { + NO_DEVICE_CODE; + return; + } +#endif // !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) //In this kernel Q, K, V are matrices while i, j, k are matrix indices. @@ -69,13 +66,17 @@ static __global__ void flash_attn_vec_ext_f32( const int ic0 = blockIdx.x * ncols; // Index of the Q/QKV column to work on. + const int sequence = blockIdx.z / ne02; + const int head = blockIdx.z - sequence*ne02; const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. - Q += nb02* blockIdx.z + nb01*ic0; - K += nb12*(blockIdx.z / gqa_ratio); - V += nb22*(blockIdx.z / gqa_ratio); // K and V have same shape - const half * maskh = (const half *) mask + ne11*ic0; + Q += nb03*sequence + nb02* head + nb01*ic0; + K += nb13*sequence + nb12*(head / gqa_ratio); + V += nb23*sequence + nb22*(head / gqa_ratio); - const float slope = get_alibi_slope(max_bias, blockIdx.z, n_head_log2, m0, m1); + const half * maskh = (const half *) (mask + nb33*(sequence % ne33) + nb31*ic0); + const float * sinksf = (const float *) (sinks); + + const float slope = get_alibi_slope(max_bias, head, n_head_log2, m0, m1); static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); constexpr int nwarps = D / WARP_SIZE; @@ -89,11 +90,12 @@ static __global__ void flash_attn_vec_ext_f32( } float kqmax[ncols]; + float kqsum[ncols]; #pragma unroll for (int j = 0; j < ncols; ++j) { kqmax[j] = -FLT_MAX/2.0f; + kqsum[j] = 0.0f; } - float kqsum[ncols] = {0.0f}; __shared__ float kqmax_shared[ncols][WARP_SIZE]; __shared__ float kqsum_shared[ncols][WARP_SIZE]; @@ -104,6 +106,13 @@ static __global__ void flash_attn_vec_ext_f32( kqsum_shared[j][threadIdx.x] = 0.0f; } } + + __shared__ float maskf_shared[ncols*D]; +#pragma unroll + for (int j = 0; j < ncols; ++j) { + maskf_shared[j*D + tid] = 0.0f; + } + __syncthreads(); // Convert Q to float2 (f16 K) or q8_1 (quantized K) and store in registers: @@ -178,9 +187,24 @@ static __global__ void flash_attn_vec_ext_f32( float VKQ[ncols] = {0.0f}; - for (int k_VKQ_0 = blockIdx.y*D; k_VKQ_0 < ne11; k_VKQ_0 += gridDim.y*D) { + const int k_VKQ_max = KV_max ? KV_max[sequence*gridDim.x + blockIdx.x] : ne11; + K += blockIdx.y*D * nb11; + V += blockIdx.y*D * nb21; + maskh += blockIdx.y*D; + for (int k_VKQ_0 = blockIdx.y*D; k_VKQ_0 < k_VKQ_max; k_VKQ_0 += gridDim.y*D, + // Increment pointers after each loop: + K += gridDim.y*D*nb11, V += gridDim.y*D*nb21, maskh += gridDim.y*D) { + // Calculate KQ tile and keep track of new maximum KQ values: + if (mask) { +#pragma unroll + for (int j = 0; j < ncols; ++j) { + maskf_shared[j*D + tid] = slope*__half2float(maskh[j*ne11 + tid]); + } + __syncthreads(); + } + float kqmax_new_arr[ncols]; #pragma unroll for (int j = 0; j < ncols; ++j) { @@ -197,14 +221,14 @@ static __global__ void flash_attn_vec_ext_f32( #pragma unroll for (int j = 0; j < ncols; ++j) { - float sum = vec_dot_KQ(K + (k_VKQ_0 + i_KQ)*nb11, Q_f2[j], Q_i32[j], Q_ds[j]); + float sum = vec_dot_KQ(K + i_KQ*nb11, Q_f2[j], Q_i32[j], Q_ds[j]); sum = warp_reduce_sum(sum); if (use_logit_softcap) { sum = logit_softcap*tanhf(sum); } - sum += mask ? slope*__half2float(maskh[j*ne11 + k_VKQ_0 + i_KQ]) : 0.0f; + sum += maskf_shared[j*D + i_KQ]; kqmax_new_arr[j] = fmaxf(kqmax_new_arr[j], sum); @@ -248,7 +272,7 @@ static __global__ void flash_attn_vec_ext_f32( break; } - const float V_ki = dequantize_1_v(V + (k_VKQ_0 + k)*nb21, tid); + const float V_ki = dequantize_1_v(V + k*nb21, tid); #pragma unroll for (int j = 0; j < ncols; ++j) { VKQ[j] += V_ki*KQ[j*D + k]; @@ -258,6 +282,39 @@ static __global__ void flash_attn_vec_ext_f32( __syncthreads(); } + if (sinksf && blockIdx.y == 0) { + const float sink = sinksf[head]; + +#pragma unroll + for (int j = 0; j < ncols; ++j) { + if (threadIdx.x == 0) { + kqmax_shared[j][threadIdx.y] = fmaxf(kqmax[j], sink); + } + } + + __syncthreads(); + +#pragma unroll + for (int j = 0; j < ncols; ++j) { + float kqmax_new_j = kqmax_shared[j][threadIdx.x]; + kqmax_new_j = warp_reduce_max(kqmax_new_j); + + const float KQ_max_scale = expf(kqmax[j] - kqmax_new_j); + kqmax[j] = kqmax_new_j; + + const float val = expf(sink - kqmax[j]); + kqsum[j] = kqsum[j]*KQ_max_scale; + + if (tid == 0) { + kqsum[j] += val; + } + + VKQ[j] *= KQ_max_scale; + } + + __syncthreads(); + } + #pragma unroll for (int j = 0; j < ncols; ++j) { kqsum[j] = warp_reduce_sum(kqsum[j]); @@ -281,27 +338,30 @@ static __global__ void flash_attn_vec_ext_f32( if (gridDim.y == 1) { dst_val /= kqsum[j_VKQ]; } - const int j_dst = (ic0 + j_VKQ)*gridDim.y + blockIdx.y; - dst[j_dst*D*gridDim.z + D*blockIdx.z + tid] = dst_val; + dst[(((sequence*ne01 + ic0 + j_VKQ)*ne02 + head)*gridDim.y + blockIdx.y)*D + tid] = dst_val; } if (gridDim.y != 1 && tid < ncols && (ncols <= 2 || ic0 + tid < ne01)) { - dst_meta[((ic0 + tid)*gridDim.z + blockIdx.z) * gridDim.y + blockIdx.y] = make_float2(kqmax[tid], kqsum[tid]); + dst_meta[((sequence*ne01 + ic0 + tid)*ne02 + head)*gridDim.y + blockIdx.y] = make_float2(kqmax[tid], kqsum[tid]); } #else GGML_UNUSED(Q); GGML_UNUSED(K); GGML_UNUSED(V); GGML_UNUSED(mask); GGML_UNUSED(dst); GGML_UNUSED(dst_meta); GGML_UNUSED(scale); GGML_UNUSED(max_bias); GGML_UNUSED(m0); GGML_UNUSED(m1); - GGML_UNUSED(n_head_log2); GGML_UNUSED(logit_softcap); GGML_UNUSED(ne00); - GGML_UNUSED(ne01); GGML_UNUSED(ne02); GGML_UNUSED(ne03); GGML_UNUSED(ne10); - GGML_UNUSED(ne11); GGML_UNUSED(ne12); GGML_UNUSED(ne13); GGML_UNUSED(ne31); - GGML_UNUSED(nb31); GGML_UNUSED(nb01); GGML_UNUSED(nb02); GGML_UNUSED(nb03); - GGML_UNUSED(nb11); GGML_UNUSED(nb12); GGML_UNUSED(nb13); GGML_UNUSED(nb21); - GGML_UNUSED(nb22); GGML_UNUSED(nb23); GGML_UNUSED(ne0); GGML_UNUSED(ne1); - GGML_UNUSED(ne2); GGML_UNUSED(ne3); + GGML_UNUSED(n_head_log2); GGML_UNUSED(logit_softcap); + GGML_UNUSED(ne00); GGML_UNUSED(ne01); GGML_UNUSED(ne02); GGML_UNUSED(ne03); + GGML_UNUSED(ne10); GGML_UNUSED(ne11); GGML_UNUSED(ne12); GGML_UNUSED(ne13); + GGML_UNUSED(ne31); GGML_UNUSED(ne32); GGML_UNUSED(ne33); + GGML_UNUSED(nb31); GGML_UNUSED(nb32); GGML_UNUSED(nb33); + GGML_UNUSED(nb01); GGML_UNUSED(nb02); GGML_UNUSED(nb03); + GGML_UNUSED(nb11); GGML_UNUSED(nb12); GGML_UNUSED(nb13); + GGML_UNUSED(nb21); GGML_UNUSED(nb22); GGML_UNUSED(nb23); NO_DEVICE_CODE; #endif // FLASH_ATTN_AVAILABLE } +#ifdef __clang__ +#pragma clang diagnostic pop +#endif // __clang__ template void ggml_cuda_flash_attn_ext_vec_f32_case_impl(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { @@ -326,7 +386,9 @@ void ggml_cuda_flash_attn_ext_vec_f32_case(ggml_backend_cuda_context & ctx, ggml float logit_softcap; memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); - if (Q->ne[1] == 1) { + const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; + + if (Q->ne[1] == 1 || GGML_CUDA_CC_IS_NVIDIA(cc)) { constexpr int cols_per_block = 1; if (logit_softcap == 0.0f) { constexpr bool use_logit_softcap = false; diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-wmma-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-wmma-f16.cu index c5668adb1..93d4d8102 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-wmma-f16.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-wmma-f16.cu @@ -7,14 +7,18 @@ #include "fattn-wmma-f16.cuh" #ifdef FP16_MMA_AVAILABLE -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#if !defined(GGML_USE_HIP) #include +#ifdef GGML_USE_MUSA +namespace wmma = mtmusa::wmma; +#else // GGML_USE_MUSA namespace wmma = nvcuda::wmma; +#endif // GGML_USE_MUSA #elif defined(GGML_HIP_ROCWMMA_FATTN) && defined(FP16_MMA_AVAILABLE) #undef HIP_ENABLE_WARP_SYNC_BUILTINS // conflicts with rocWMMA headers #include namespace wmma = rocwmma; -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#endif // !defined(GGML_USE_HIP) #endif // FP16_MMA_AVAILABLE // D == head size, VKQ_stride == num VKQ rows calculated in parallel: @@ -25,6 +29,8 @@ static __global__ void flash_attn_ext_f16( const char * __restrict__ K, const char * __restrict__ V, const char * __restrict__ mask, + const char * __restrict__ sinks, + const int * __restrict__ KV_max, float * __restrict__ dst, float2 * __restrict__ dst_meta, const float scale, @@ -33,29 +39,13 @@ static __global__ void flash_attn_ext_f16( const float m1, const uint32_t n_head_log2, const float logit_softcap, - const int ne00, - const int ne01, - const int ne02, - const int ne03, - const int ne10, - const int ne11, - const int ne12, - const int ne13, - const int ne31, - const int nb31, - const int nb01, - const int nb02, - const int nb03, - const int nb11, - const int nb12, - const int nb13, - const int nb21, - const int nb22, - const int nb23, - const int ne0, - const int ne1, - const int ne2, - const int ne3) { + const int32_t ne00, const int32_t ne01, const int32_t ne02, const int32_t ne03, + const int32_t nb01, const int32_t nb02, const int32_t nb03, + const int32_t ne10, const int32_t ne11, const int32_t ne12, const int32_t ne13, + const int32_t nb11, const int32_t nb12, const int64_t nb13, + const int32_t nb21, const int32_t nb22, const int64_t nb23, + const int32_t ne31, const int32_t ne32, const int32_t ne33, + const int32_t nb31, const int32_t nb32, const int64_t nb33) { #if defined(FLASH_ATTN_AVAILABLE) && (__CUDA_ARCH__ == GGML_CUDA_CC_VOLTA || (defined(GGML_HIP_ROCWMMA_FATTN) && defined(FP16_MMA_AVAILABLE))) // Skip unused kernel variants for faster compilation: if (use_logit_softcap && !(D == 128 || D == 256)) { @@ -89,17 +79,19 @@ static __global__ void flash_attn_ext_f16( constexpr int kqs_padded = FATTN_KQ_STRIDE + 8; constexpr int kqar = sizeof(KQ_acc_t)/sizeof(half); + const int sequence = blockIdx.z / ne02; + const int head = blockIdx.z - sequence*ne02; const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. - const float * Q_f = (const float *) (Q + nb02* blockIdx.z + nb01*ic0); - const half * K_h = (const half *) (K + nb12*(blockIdx.z / gqa_ratio)); - const half * V_h = (const half *) (V + nb12*(blockIdx.z / gqa_ratio)); // K and V have same shape - const half * maskh = (const half *) mask + (nb31/sizeof(half))* ic0; - const half2 * mask2 = (const half2 *) mask + (nb31/sizeof(half))*(ic0/2); + const float * Q_f = (const float *) (Q + nb03* sequence + nb02* head + nb01*ic0); + const half * K_h = (const half *) (K + nb13* sequence + nb12*(head / gqa_ratio)); + const half * V_h = (const half *) (V + nb13* sequence + nb12*(head / gqa_ratio)); // K and V have same shape + const half * maskh = (const half *) (mask + nb33*(sequence % ne33) + nb31*ic0); + const half2 * mask2 = (const half2 *) maskh; const int stride_Q = nb01 / sizeof(float); const int stride_KV = nb11 / sizeof(half); - const float slopef = get_alibi_slope(max_bias, blockIdx.z, n_head_log2, m0, m1); + const float slopef = get_alibi_slope(max_bias, head, n_head_log2, m0, m1); const half slopeh = __float2half(slopef); const half2 slope2 = make_half2(slopef, slopef); @@ -175,7 +167,8 @@ static __global__ void flash_attn_ext_f16( __syncthreads(); // Iterate over ne11 == previous tokens: - for (int k_VKQ_0 = blockIdx.y*FATTN_KQ_STRIDE; k_VKQ_0 < ne11; k_VKQ_0 += gridDim.y*FATTN_KQ_STRIDE) { + const int k_VKQ_max = KV_max ? KV_max[sequence*gridDim.x + blockIdx.x] : ne11; + for (int k_VKQ_0 = blockIdx.y*FATTN_KQ_STRIDE; k_VKQ_0 < k_VKQ_max; k_VKQ_0 += gridDim.y*FATTN_KQ_STRIDE) { // Calculate tile of KQ: #pragma unroll for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE; i_KQ_0 += KQ_stride_tc) { @@ -187,7 +180,7 @@ static __global__ void flash_attn_ext_f16( #pragma unroll for (int k_KQ_0 = 0; k_KQ_0 < D; k_KQ_0 += 16) { frag_a_K K_a; - wmma::load_matrix_sync(K_a, K_h + (k_VKQ_0 + i_KQ_0 + frag_m*threadIdx.y)*stride_KV + k_KQ_0, stride_KV); + wmma::load_matrix_sync(K_a, K_h + int64_t(k_VKQ_0 + i_KQ_0 + frag_m*threadIdx.y)*stride_KV + k_KQ_0, stride_KV); #pragma unroll for (int j = 0; j < ncols/frag_n; ++j) { wmma::mma_sync(KQ_c[j], K_a, Q_b[k_KQ_0/16][j], KQ_c[j]); @@ -334,7 +327,7 @@ static __global__ void flash_attn_ext_f16( const int k = k0 + (threadIdx.y % VKQ_ratio)*16; frag_a_V v_a; - wmma::load_matrix_sync(v_a, V_h + (k_VKQ_0 + k)*stride_KV + i_VKQ_0 + frag_m*(threadIdx.y/VKQ_ratio), stride_KV); + wmma::load_matrix_sync(v_a, V_h + int64_t(k_VKQ_0 + k)*stride_KV + i_VKQ_0 + frag_m*(threadIdx.y/VKQ_ratio), stride_KV); #pragma unroll for (int j = 0; j < ncols/frag_n; ++j) { wmma::mma_sync(VKQ_c[i_VKQ_0/VKQ_stride][j], v_a, KQ_b[k0/(VKQ_ratio*16)][j], VKQ_c[i_VKQ_0/VKQ_stride][j]); @@ -394,7 +387,6 @@ static __global__ void flash_attn_ext_f16( if (ic0 + j_VKQ >= ne01) { return; } - const int j_dst = (ic0 + j_VKQ)*gridDim.y + blockIdx.y; float KQ_rowsum_j; if (std::is_same::value) { @@ -403,6 +395,8 @@ static __global__ void flash_attn_ext_f16( KQ_rowsum_j = __low2float(KQ_rowsum_h2[j0/nwarps]) + __high2float(KQ_rowsum_h2[j0/nwarps]); } + const int j_dst_unrolled = ((sequence*ne01 + ic0 + j_VKQ)*ne02 + head)*gridDim.y + blockIdx.y; + #pragma unroll for (int i0 = 0; i0 < D; i0 += warp_size) { const int i = i0 + threadIdx.x; @@ -413,7 +407,7 @@ static __global__ void flash_attn_ext_f16( if (gridDim.y == 1) { dst_val /= KQ_rowsum_j; } - dst[j_dst*gridDim.z*D + blockIdx.z*D + i] = dst_val; + dst[j_dst_unrolled*D + i] = dst_val; } if (gridDim.y == 1 || threadIdx.x != 0) { @@ -427,19 +421,19 @@ static __global__ void flash_attn_ext_f16( dst_meta_val.x = __low2float(KQ_max_h2[j0/nwarps]); } dst_meta_val.y = KQ_rowsum_j; - dst_meta[((ic0 + j_VKQ)*gridDim.z + blockIdx.z) * gridDim.y + blockIdx.y] = dst_meta_val; + dst_meta[j_dst_unrolled] = dst_meta_val; } #else - GGML_UNUSED(Q); GGML_UNUSED(K); GGML_UNUSED(V); GGML_UNUSED(mask); + GGML_UNUSED(Q); GGML_UNUSED(K); GGML_UNUSED(V); GGML_UNUSED(mask); GGML_UNUSED(sinks); GGML_UNUSED(dst); GGML_UNUSED(dst_meta); GGML_UNUSED(scale); GGML_UNUSED(max_bias); GGML_UNUSED(m0); GGML_UNUSED(m1); GGML_UNUSED(n_head_log2); GGML_UNUSED(logit_softcap); GGML_UNUSED(ne00); GGML_UNUSED(ne01); GGML_UNUSED(ne02); GGML_UNUSED(ne03); GGML_UNUSED(ne10); GGML_UNUSED(ne11); GGML_UNUSED(ne12); GGML_UNUSED(ne13); - GGML_UNUSED(ne31); GGML_UNUSED(nb31); GGML_UNUSED(nb01); GGML_UNUSED(nb02); + GGML_UNUSED(ne31); GGML_UNUSED(ne32); GGML_UNUSED(ne33); GGML_UNUSED(nb31); + GGML_UNUSED(nb32); GGML_UNUSED(nb33); GGML_UNUSED(nb01); GGML_UNUSED(nb02); GGML_UNUSED(nb03); GGML_UNUSED(nb11); GGML_UNUSED(nb12); GGML_UNUSED(nb13); GGML_UNUSED(nb21); GGML_UNUSED(nb22); GGML_UNUSED(nb23); - GGML_UNUSED(ne0); GGML_UNUSED(ne1); GGML_UNUSED(ne2); GGML_UNUSED(ne3); NO_DEVICE_CODE; #endif // defined(FLASH_ATTN_AVAILABLE) && (__CUDA_ARCH__ == GGML_CUDA_CC_VOLTA || (defined(GGML_HIP_ROCWMMA_FATTN) && defined(FP16_MMA_AVAILABLE))) } @@ -555,7 +549,7 @@ void ggml_cuda_flash_attn_ext_wmma_f16(ggml_backend_cuda_context & ctx, ggml_ten return; } -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#if !defined(GGML_USE_HIP) if (Q->ne[1] <= 8 && Q->ne[0] % warp_size == 0) { constexpr int cols_per_block = 8; switch (Q->ne[0]) { @@ -577,7 +571,7 @@ void ggml_cuda_flash_attn_ext_wmma_f16(ggml_backend_cuda_context & ctx, ggml_ten } return; } -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#endif // !defined(GGML_USE_HIP) if (Q->ne[1] <= 32) { constexpr int cols_per_block = 16; diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/fattn.cu b/ml/backend/ggml/ggml/src/ggml-cuda/fattn.cu index 9c5c803d0..6c1185dea 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/fattn.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn.cu @@ -10,6 +10,7 @@ template static void ggml_cuda_flash_attn_ext_mma_f16_switch_ncols1(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; const ggml_tensor * Q = dst->src[0]; if constexpr (ncols2 <= 8) { @@ -24,7 +25,7 @@ static void ggml_cuda_flash_attn_ext_mma_f16_switch_ncols1(ggml_backend_cuda_con return; } - if (Q->ne[1] <= 32/ncols2) { + if (ggml_cuda_highest_compiled_arch(cc) == GGML_CUDA_CC_TURING || Q->ne[1] <= 32/ncols2) { ggml_cuda_flash_attn_ext_mma_f16_case(ctx, dst); return; } @@ -268,26 +269,20 @@ static void ggml_cuda_flash_attn_ext_vec_f32(ggml_backend_cuda_context & ctx, gg } void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - const ggml_tensor * KQV = dst; - const ggml_tensor * Q = dst->src[0]; - const ggml_tensor * K = dst->src[1]; - const ggml_tensor * V = dst->src[2]; - const ggml_tensor * mask = dst->src[3]; + const ggml_tensor * KQV = dst; + const ggml_tensor * Q = dst->src[0]; + const ggml_tensor * K = dst->src[1]; + const ggml_tensor * V = dst->src[2]; + const ggml_tensor * mask = dst->src[3]; + const ggml_tensor * sinks = dst->src[4]; ggml_cuda_set_device(ctx.device); const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; const int warp_size = ggml_cuda_info().devices[ggml_cuda_get_device()].warp_size; const enum ggml_prec prec = ggml_flash_attn_ext_get_prec(KQV); - if (GGML_CUDA_CC_IS_AMD(cc)) { -#if defined(GGML_HIP_ROCWMMA_FATTN) - if (fp16_mma_available(cc)) { - ggml_cuda_flash_attn_ext_wmma_f16(ctx, dst); - return; - } -#endif // defined(GGML_HIP_ROCWMMA_FATTN) - - // On AMD the tile kernels perform poorly, use the vec kernel instead: + // TODO: currently only vec implementation for sinks is supported [TAG_ATTN_SINKS] + if (sinks && !fp16_mma_available(cc)) { if (prec == GGML_PREC_DEFAULT && fast_fp16_available(cc)) { ggml_cuda_flash_attn_ext_vec_f16(ctx, dst); } else { @@ -296,6 +291,13 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst return; } +#if defined(GGML_HIP_ROCWMMA_FATTN) + if (GGML_CUDA_CC_IS_AMD(cc) && fp16_mma_available(cc)) { + ggml_cuda_flash_attn_ext_wmma_f16(ctx, dst); + return; + } +#endif // defined(GGML_HIP_ROCWMMA_FATTN) + if (!fast_fp16_available(cc)) { if (Q->ne[1] <= 8 || Q->ne[0] == 256) { ggml_cuda_flash_attn_ext_vec_f32(ctx, dst); @@ -324,7 +326,9 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst const bool gqa_opt_applies = ((Q->ne[2] / K->ne[2]) % 2 == 0) && mask; // The mma-based kernels have GQA-specific optimizations const bool mma_needs_data_conversion = K->type != GGML_TYPE_F16 || V->type != GGML_TYPE_F16; - const bool mma_faster_for_bs1 = new_mma_available(cc) && gqa_opt_applies && cc < GGML_CUDA_CC_ADA_LOVELACE && !mma_needs_data_conversion; + const bool mma_faster_for_rtx4000 = Q->ne[3] > 1 || (Q->ne[2] > 4*K->ne[2] && K->ne[1] >= 8192); + const bool mma_faster_for_bs1 = turing_mma_available(cc) && gqa_opt_applies && !mma_needs_data_conversion && + (cc < GGML_CUDA_CC_ADA_LOVELACE || mma_faster_for_rtx4000); const bool can_use_vector_kernel = Q->ne[0] <= 256 && Q->ne[0] % (2*warp_size) == 0; if (Q->ne[1] == 1 && can_use_vector_kernel && !mma_faster_for_bs1) { if (prec == GGML_PREC_DEFAULT) { @@ -336,7 +340,7 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst } // The MMA implementation needs Turing or newer, use the old WMMA code for Volta: - if (fp16_mma_available(cc) && !new_mma_available(cc)) { + if (fp16_mma_available(cc) && !turing_mma_available(cc)) { ggml_cuda_flash_attn_ext_wmma_f16(ctx, dst); return; } diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/getrows.cu b/ml/backend/ggml/ggml/src/ggml-cuda/getrows.cu index 963e4d03d..f77b2629a 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/getrows.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/getrows.cu @@ -168,6 +168,10 @@ static void ggml_cuda_get_rows_switch_src0_type( get_rows_cuda_float((const float *) src0_d, src1_d, dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; + case GGML_TYPE_I32: + get_rows_cuda_float((const int32_t *) src0_d, src1_d, dst_d, + ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); + break; case GGML_TYPE_BF16: get_rows_cuda_float((const nv_bfloat16 *) src0_d, src1_d, dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); @@ -210,6 +214,10 @@ void get_rows_cuda( ggml_cuda_get_rows_switch_src0_type(src0_d, src0_type, src1_d, (float *) dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); break; + case GGML_TYPE_I32: + ggml_cuda_get_rows_switch_src0_type(src0_d, src0_type, src1_d, (int32_t *) dst_d, + ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); + break; case GGML_TYPE_F16: ggml_cuda_get_rows_switch_src0_type(src0_d, src0_type, src1_d, (half *) dst_d, ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/ggml-cuda.cu b/ml/backend/ggml/ggml/src/ggml-cuda/ggml-cuda.cu index 496973adf..c7f9dc3a5 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/ggml-cuda.cu @@ -4,6 +4,7 @@ #include "ggml-cuda/common.cuh" #include "ggml-cuda/acc.cuh" +#include "ggml-cuda/add-id.cuh" #include "ggml-cuda/arange.cuh" #include "ggml-cuda/argmax.cuh" #include "ggml-cuda/argsort.cuh" @@ -11,6 +12,8 @@ #include "ggml-cuda/clamp.cuh" #include "ggml-cuda/concat.cuh" #include "ggml-cuda/conv-transpose-1d.cuh" +#include "ggml-cuda/conv2d-dw.cuh" +#include "ggml-cuda/conv2d-transpose.cuh" #include "ggml-cuda/convert.cuh" #include "ggml-cuda/count-equal.cuh" #include "ggml-cuda/cpy.cuh" @@ -19,9 +22,9 @@ #include "ggml-cuda/fattn.cuh" #include "ggml-cuda/getrows.cuh" #include "ggml-cuda/im2col.cuh" +#include "ggml-cuda/mmf.cuh" #include "ggml-cuda/mmq.cuh" -#include "ggml-cuda/mmv.cuh" -#include "ggml-cuda/mmvmxfp4.cuh" +#include "ggml-cuda/mmvf.cuh" #include "ggml-cuda/mmvq.cuh" #include "ggml-cuda/norm.cuh" #include "ggml-cuda/opt-step-adamw.cuh" @@ -30,7 +33,9 @@ #include "ggml-cuda/pool2d.cuh" #include "ggml-cuda/quantize.cuh" #include "ggml-cuda/rope.cuh" +#include "ggml-cuda/roll.cuh" #include "ggml-cuda/scale.cuh" +#include "ggml-cuda/softcap.cuh" #include "ggml-cuda/softmax.cuh" #include "ggml-cuda/ssm-conv.cuh" #include "ggml-cuda/ssm-scan.cuh" @@ -42,6 +47,7 @@ #include "ggml-cuda/upscale.cuh" #include "ggml-cuda/wkv.cuh" #include "ggml-cuda/gla.cuh" +#include "ggml-cuda/set-rows.cuh" #include "ggml.h" #include @@ -49,16 +55,17 @@ #include #include #include +#include #include #include #include +#include #include #include #include #include -#include -#include #include +#include #include #include #include @@ -99,8 +106,7 @@ int ggml_cuda_get_device() { static cudaError_t ggml_cuda_device_malloc(void ** ptr, size_t size, int device) { ggml_cuda_set_device(device); cudaError_t err; - if (getenv("GGML_CUDA_ENABLE_UNIFIED_MEMORY") != nullptr) - { + if (getenv("GGML_CUDA_ENABLE_UNIFIED_MEMORY") != nullptr) { err = cudaMallocManaged(ptr, size); #if defined(GGML_USE_HIP) if (err == hipSuccess) { @@ -118,15 +124,13 @@ static cudaError_t ggml_cuda_device_malloc(void ** ptr, size_t size, int device) err = cudaMalloc(ptr, size); } #endif // defined(GGML_USE_HIP) - } - else - { + } else { err = cudaMalloc(ptr, size); } return err; } -#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#if defined(GGML_USE_HIP) static int ggml_cuda_parse_id(char devName[]) { // A list of possible Target IDs can be found under the rocclr/clr repo in device.cpp // these values are not stable so this is susceptible to breakage @@ -173,12 +177,12 @@ static int ggml_cuda_parse_id(char devName[]) { archNum += archMinor; return archNum; } -#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#endif // defined(GGML_USE_HIP) static std::string ggml_cuda_parse_uuid(cudaDeviceProp prop, int device_num) { char id[64]; - #if !defined(GGML_USE_HIP) +#if !defined(GGML_USE_HIP) snprintf(id, sizeof(id), "GPU-%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", (unsigned char)prop.uuid.bytes[0], @@ -198,10 +202,10 @@ static std::string ggml_cuda_parse_uuid(cudaDeviceProp prop, int device_num) { (unsigned char)prop.uuid.bytes[14], (unsigned char)prop.uuid.bytes[15] ); - #else - #ifdef _WIN32 +#else +#ifdef _WIN32 snprintf(id, sizeof(id), "%d", device_num); - #else +#else try { std::string uuid = std::string(prop.uuid.bytes, 16); @@ -214,14 +218,14 @@ static std::string ggml_cuda_parse_uuid(cudaDeviceProp prop, int device_num) { } catch (const std::exception &e) { snprintf(id, sizeof(id), "%d", device_num); } - #endif - #endif +#endif +#endif return id; } static ggml_cuda_device_info ggml_cuda_init() { -#ifdef __HIP_PLATFORM_AMD__ +#if defined(GGML_USE_HIP) // Workaround for a rocBLAS bug when using multiple graphics cards: // https://github.com/ROCmSoftwarePlatform/rocBLAS/issues/1346 { @@ -290,11 +294,11 @@ static ggml_cuda_device_info ggml_cuda_init() { info.default_tensor_split[id] = total_vram; total_vram += prop.totalGlobalMem; - - info.devices[id].nsm = prop.multiProcessorCount; - info.devices[id].smpb = prop.sharedMemPerBlock; - info.devices[id].warp_size = prop.warpSize; -#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) + info.devices[id].integrated = prop.integrated; + info.devices[id].nsm = prop.multiProcessorCount; + info.devices[id].smpb = prop.sharedMemPerBlock; + info.devices[id].warp_size = prop.warpSize; +#if defined(GGML_USE_HIP) info.devices[id].smpbo = prop.sharedMemPerBlock; info.devices[id].cc = ggml_cuda_parse_id(prop.gcnArchName); @@ -326,7 +330,7 @@ static ggml_cuda_device_info ggml_cuda_init() { GGML_LOG_INFO(" Device %d: %s, compute capability %d.%d, VMM: %s, ID: %s\n", id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no", ggml_cuda_parse_uuid(prop, id).c_str()); -#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#endif // defined(GGML_USE_HIP) } for (int id = 0; id < info.device_count; ++id) { @@ -563,6 +567,33 @@ std::unique_ptr ggml_backend_cuda_context::new_pool_for_device(i return std::unique_ptr(new ggml_cuda_pool_leg(device)); } +// destroying a cuBLAS handle while a graph is being captured in a different thread can result in a CUDA error +// this lock is used to ensure that no cuBLAS handle is destroyed while a graph is being captured + +static std::mutex ggml_cuda_lock; +static std::condition_variable ggml_cuda_lock_cv; +static std::atomic ggml_cuda_lock_counter; + +ggml_backend_cuda_context::~ggml_backend_cuda_context() { + std::unique_lock lock(ggml_cuda_lock); + ggml_cuda_lock_cv.wait(lock, []{ return ggml_cuda_lock_counter.load(std::memory_order_relaxed) == 0; }); + + if (copy_event != nullptr) { + CUDA_CHECK(cudaEventDestroy(copy_event)); + } + for (int i = 0; i < GGML_CUDA_MAX_DEVICES; ++i) { + for (int j = 0; j < GGML_CUDA_MAX_STREAMS; ++j) { + if (streams[i][j] != nullptr) { + CUDA_CHECK(cudaStreamDestroy(streams[i][j])); + } + } + if (cublas_handles[i] != nullptr) { + CUBLAS_CHECK(cublasDestroy(cublas_handles[i])); + } + } +} + + // cuda buffer struct ggml_backend_cuda_buffer_context { @@ -665,9 +696,8 @@ static void ggml_backend_cuda_buffer_clear(ggml_backend_buffer_t buffer, uint8_t ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; ggml_cuda_set_device(ctx->device); - CUDA_CHECK(cudaDeviceSynchronize()); - CUDA_CHECK(cudaMemset(ctx->dev_ptr, value, buffer->size)); - CUDA_CHECK(cudaDeviceSynchronize()); + CUDA_CHECK(cudaMemsetAsync(ctx->dev_ptr, value, buffer->size, cudaStreamPerThread)); + CUDA_CHECK(cudaStreamSynchronize(cudaStreamPerThread)); } static const ggml_backend_buffer_i ggml_backend_cuda_buffer_interface = { @@ -1116,6 +1146,10 @@ static const char * ggml_backend_cuda_host_buffer_type_name(ggml_backend_buffer_ GGML_UNUSED(buft); } +static bool ggml_backend_buft_is_cuda_host(ggml_backend_buffer_type_t buft) { + return buft->iface.get_name == ggml_backend_cuda_host_buffer_type_name; +} + static void ggml_backend_cuda_host_buffer_free_buffer(ggml_backend_buffer_t buffer) { CUDA_CHECK(cudaFreeHost(buffer->context)); delete buffer; @@ -1192,7 +1226,6 @@ typedef void (*ggml_cuda_op_mul_mat_t)( static cudaError_t ggml_cuda_cpy_tensor_2d( void * dst, const struct ggml_tensor * src, int64_t i3, int64_t i2, int64_t i1_low, int64_t i1_high, cudaStream_t stream) { - GGML_ASSERT(ggml_backend_buffer_is_cuda(src->buffer)); const char * src_ptr = (const char *) src->data; char * dst_ptr = (char *) dst; @@ -1250,9 +1283,12 @@ static void ggml_cuda_op_mul_mat_cublas( const int cc = ggml_cuda_info().devices[id].cc; - const bool use_fp16 = (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT && src0->type != GGML_TYPE_MXFP4; + const bool supports_bf16 = GGML_CUDA_CC_IS_NVIDIA(cc) || GGML_CUDA_CC_IS_AMD(cc) || + (GGML_CUDA_CC_IS_MTHREADS(cc) && cc >= GGML_CUDA_CC_QY2); - if (src0->type == GGML_TYPE_BF16 && ggml_is_contiguous(src0) && row_diff == src0->ne[1]) { + const bool use_fp16 = (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT; + + if (supports_bf16 && src0->type == GGML_TYPE_BF16 && ggml_is_contiguous(src0) && row_diff == src0->ne[1]) { ggml_cuda_pool_alloc src1_as_bf16(ctx.pool(id)); if (src1->type != GGML_TYPE_BF16) { const to_bf16_cuda_t to_bf16_cuda = ggml_get_to_bf16_cuda(src1->type); @@ -1280,7 +1316,7 @@ static void ggml_cuda_op_mul_mat_cublas( const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_BF16); to_fp32_cuda(dst_bf16.get(), dst_dd_i, row_diff*src1_ncols, stream); - } else if (((GGML_CUDA_CC_IS_NVIDIA(cc) && cc >= GGML_CUDA_CC_VOLTA) || GGML_CUDA_CC_IS_AMD(cc)) && use_fp16) { + } else if (fast_fp16_hardware_available(cc) && use_fp16) { // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 ggml_cuda_pool_alloc src0_as_f16(ctx.pool(id)); if (src0->type != GGML_TYPE_F16) { @@ -1475,8 +1511,6 @@ static void ggml_cuda_op_mul_mat( const int64_t nb2 = dst->nb[2]; const int64_t nb3 = dst->nb[3]; - GGML_ASSERT(ggml_backend_buffer_is_cuda(dst->buffer)); - GGML_ASSERT(ggml_backend_buffer_is_cuda(src1->buffer)); ggml_backend_cuda_buffer_context * src1_ctx = (ggml_backend_cuda_buffer_context *) src1->buffer->context; ggml_backend_cuda_buffer_context * dst_ctx = (ggml_backend_cuda_buffer_context *) dst->buffer->context; @@ -1771,7 +1805,7 @@ static void ggml_cuda_op_mul_mat( } static __global__ void k_compute_batched_ptrs( - const half * src0_as_f16, const half * src1_as_f16, char * dst, + const void * src0_as_f16, const void * src1_as_f16, char * dst, const void ** ptrs_src, void ** ptrs_dst, int64_t ne12, int64_t ne13, int64_t ne23, @@ -1794,83 +1828,136 @@ static __global__ void k_compute_batched_ptrs( ptrs_dst[0*ne23 + i12 + i13*ne12] = ( char *) dst + i12*nbd2 + i13*nbd3; } -static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +// Type traits for mapping ggml types to CUDA/cuBLAS types +template +struct batched_mul_mat_traits; + +template<> +struct batched_mul_mat_traits { + using cuda_type = float; + static inline const cublasComputeType_t compute_type = CUBLAS_COMPUTE_32F; + static inline const cudaDataType_t data_type = CUDA_R_32F; + static inline const ggml_type ggml_type_val = GGML_TYPE_F32; + static inline const float alpha = 1.0f; + static inline const float beta = 0.0f; + static inline const void* get_alpha() { static const float val = alpha; return &val; } + static inline const void* get_beta() { static const float val = beta; return &val; } + static inline auto get_nc_converter(ggml_type src_type) { return ggml_get_to_fp32_nc_cuda(src_type); } +}; + +template<> +struct batched_mul_mat_traits { + using cuda_type = nv_bfloat16; + static inline const cublasComputeType_t compute_type = CUBLAS_COMPUTE_32F; + static inline const cudaDataType_t data_type = CUDA_R_16BF; + static inline const ggml_type ggml_type_val = GGML_TYPE_BF16; + static inline const float alpha = 1.0f; + static inline const float beta = 0.0f; + static inline const void* get_alpha() { static const float val = alpha; return &val; } + static inline const void* get_beta() { static const float val = beta; return &val; } + static inline auto get_nc_converter(ggml_type src_type) { return ggml_get_to_bf16_nc_cuda(src_type); } +}; + +template<> +struct batched_mul_mat_traits { + using cuda_type = half; + static inline const cublasComputeType_t compute_type = CUBLAS_COMPUTE_16F; + static inline const cudaDataType_t data_type = CUDA_R_16F; + static inline const ggml_type ggml_type_val = GGML_TYPE_F16; + static inline const half alpha = 1.0; + static inline const half beta = 0.0; + static inline const void* get_alpha() { static const half val = alpha; return &val; } + static inline const void* get_beta() { static const half val = beta; return &val; } + static inline auto get_nc_converter(ggml_type src_type) { return ggml_get_to_fp16_nc_cuda(src_type); } +}; + +template +static void ggml_cuda_mul_mat_batched_cublas_impl(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + using traits = batched_mul_mat_traits; + using cuda_t = typename traits::cuda_type; + GGML_ASSERT(!ggml_is_transposed(src0)); GGML_ASSERT(!ggml_is_transposed(src1)); - - GGML_ASSERT(ggml_backend_buffer_is_cuda(src0->buffer)); - GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(!ggml_backend_buft_is_cuda_split(src0->buffer->buft)); + GGML_ASSERT(src0->type == src0_type); + GGML_ASSERT(ggml_is_contiguous(dst)); // Byte offsets and tensor dimensions are currently used in an inconsistent way for dst. // As long as dst is contiguous this does not matter though. - GGML_ASSERT(ggml_is_contiguous(dst)); GGML_TENSOR_BINARY_OP_LOCALS const int64_t ne_dst = ggml_nelements(dst); - cudaStream_t main_stream = ctx.stream(); - CUBLAS_CHECK(cublasSetStream(ctx.cublas_handle(), main_stream)); - const half * src0_f16 = (const half *) src0->data; float * dst_ddf = (float *) dst->data; - - const half * src1_f16 = (const half *) src1->data; const size_t ts_src1 = ggml_type_size(src1->type); GGML_ASSERT(nb10 == ts_src1); int64_t s11 = nb11 / ts_src1; int64_t s12 = nb12 / ts_src1; int64_t s13 = nb13 / ts_src1; - ggml_cuda_pool_alloc src1_f16_alloc(ctx.pool()); - // convert src1 to fp16 - if (src1->type != GGML_TYPE_F16) { - const to_fp16_nc_cuda_t to_fp16_cuda = ggml_get_to_fp16_nc_cuda(src1->type); + const cuda_t * src0_ptr = nullptr; + const cuda_t * src1_ptr = nullptr; + + ggml_cuda_pool_alloc src0_alloc(ctx.pool()); + ggml_cuda_pool_alloc src1_alloc(ctx.pool()); + + bool is_src0_cont_2 = ggml_is_contiguous_2(src0); + bool is_src1_cont_2 = ggml_is_contiguous_2(src1); + + // Handle src0 + src0_ptr = (const cuda_t *) src0->data; + + // Handle src1 - convert if necessary + if (src1->type == src0_type) { + src1_ptr = (const cuda_t *) src1->data; + } else { + // Convert src1 to target type using traits conversion functions const int64_t ne_src1 = ggml_nelements(src1); - src1_f16_alloc.alloc(ne_src1); - GGML_ASSERT(to_fp16_cuda != nullptr); + src1_alloc.alloc(ne_src1); - to_fp16_cuda(src1_f16, src1_f16_alloc.get(), ne10, ne11, ne12, ne13, s11, s12, s13, main_stream); - - src1_f16 = src1_f16_alloc.get(); + const auto convert_func = traits::get_nc_converter(src1->type); + GGML_ASSERT(convert_func != nullptr); + convert_func(src1->data, src1_alloc.get(), ne10, ne11, ne12, ne13, s11, s12, s13, main_stream); + src1_ptr = src1_alloc.get(); s11 = ne10; s12 = ne11*s11; s13 = ne12*s12; + + is_src1_cont_2 = true; } - ggml_cuda_pool_alloc dst_f16(ctx.pool()); + // Setup destination buffer + ggml_cuda_pool_alloc dst_temp(ctx.pool()); char * dst_t; - - cublasComputeType_t cu_compute_type = CUBLAS_COMPUTE_16F; - cudaDataType_t cu_data_type = CUDA_R_16F; - - // dst strides size_t nbd2 = dst->nb[2]; size_t nbd3 = dst->nb[3]; - const half alpha_f16 = 1.0f; - const half beta_f16 = 0.0f; - + cublasComputeType_t cu_compute_type = traits::compute_type; + cudaDataType_t cu_data_type = traits::data_type; + cudaDataType_t cu_data_type_a = traits::data_type; + cudaDataType_t cu_data_type_b = traits::data_type; + const void * alpha = traits::get_alpha(); + const void * beta = traits::get_beta(); const float alpha_f32 = 1.0f; - const float beta_f32 = 0.0f; - - const void * alpha = &alpha_f16; - const void * beta = &beta_f16; + const float beta_f32 = 0.0f; if (dst->op_params[0] == GGML_PREC_DEFAULT) { - dst_t = (char *) dst_f16.alloc(ne_dst); - - nbd2 /= sizeof(float) / sizeof(half); - nbd3 /= sizeof(float) / sizeof(half); + if constexpr (src0_type == GGML_TYPE_F32) { + dst_t = (char *) dst_ddf; // Direct F32 output + } else { + dst_t = (char *) dst_temp.alloc(ne_dst); + nbd2 /= sizeof(float) / sizeof(cuda_t); + nbd3 /= sizeof(float) / sizeof(cuda_t); + } } else { dst_t = (char *) dst_ddf; - cu_compute_type = CUBLAS_COMPUTE_32F; - cu_data_type = CUDA_R_32F; - + cu_data_type = CUDA_R_32F; alpha = &alpha_f32; - beta = &beta_f32; + beta = &beta_f32; } int id = ggml_cuda_get_device(); @@ -1878,7 +1965,7 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co if (GGML_CUDA_CC_IS_CDNA(cc) || GGML_CUDA_CC_IS_RDNA4(cc)) { cu_compute_type = CUBLAS_COMPUTE_32F; alpha = &alpha_f32; - beta = &beta_f32; + beta = &beta_f32; } GGML_ASSERT(ne12 % ne02 == 0); @@ -1888,35 +1975,19 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co const int64_t r2 = ne12/ne02; const int64_t r3 = ne13/ne03; -#if 0 - // use cublasGemmEx - { - for (int i13 = 0; i13 < ne13; ++i13) { - for (int i12 = 0; i12 < ne12; ++i12) { - int i03 = i13 / r3; - int i02 = i12 / r2; + if (r2 == 1 && r3 == 1 && is_src0_cont_2 && is_src1_cont_2) { + // with a [0, 2, 1, 3] perm. and ne02==1 the matrix strides need to be determined from dim 3: + const int64_t sma = ne02 == 1 ? nb03/nb00 : nb02/nb00; + const int64_t smb = ne12 == 1 ? s13 : s12; - CUBLAS_CHECK( - cublasGemmEx(ctx.cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, - ne01, ne11, ne10, - alpha, (const char *) src0_f16 + i03*nb03 + i02*nb02, CUDA_R_16F, nb01/sizeof(half), - src1_f16 + i13*s13 + i12*s12, CUDA_R_16F, s11, - beta, ( char *) dst_t + i13*nbd3 + i12*nbd2, cu_data_type, ne0, - cu_compute_type, - CUBLAS_GEMM_DEFAULT_TENSOR_OP)); - } - } - } -#else - if (r2 == 1 && r3 == 1 && ggml_is_contiguous_2(src0) && ggml_is_contiguous_2(src1)) { // there is no broadcast and src0, src1 are contiguous across dims 2, 3 // use cublasGemmStridedBatchedEx CUBLAS_CHECK( cublasGemmStridedBatchedEx(ctx.cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - alpha, src0_f16, CUDA_R_16F, nb01/nb00, nb02/nb00, // strideA - src1_f16, CUDA_R_16F, s11, s12, // strideB - beta, dst_t, cu_data_type, ne0, ne1*ne0, // strideC + alpha, src0_ptr, cu_data_type_a, nb01/nb00, sma, // strideA + src1_ptr, cu_data_type_b, s11, smb, // strideB + beta, dst_t, cu_data_type, ne0, ne1*ne0, // strideC ne12*ne13, cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); @@ -1927,34 +1998,55 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co ggml_cuda_pool_alloc ptrs_src(ctx.pool(), 2*ne23); ggml_cuda_pool_alloc< void *> ptrs_dst(ctx.pool(), 1*ne23); + size_t src1_stride_size = sizeof(cuda_t); + dim3 block_dims(ne13, ne12); k_compute_batched_ptrs<<<1, block_dims, 0, main_stream>>>( - src0_f16, src1_f16, dst_t, + src0_ptr, src1_ptr, dst_t, ptrs_src.get(), ptrs_dst.get(), ne12, ne13, ne23, nb02, nb03, - src1->type == GGML_TYPE_F16 ? nb12 : s12*sizeof(half), - src1->type == GGML_TYPE_F16 ? nb13 : s13*sizeof(half), + (src1->type == src0_type) ? nb12 : s12*src1_stride_size, + (src1->type == src0_type) ? nb13 : s13*src1_stride_size, nbd2, nbd3, r2, r3); + CUDA_CHECK(cudaGetLastError()); CUBLAS_CHECK( cublasGemmBatchedEx(ctx.cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, ne01, ne11, ne10, - alpha, (const void **) (ptrs_src.get() + 0*ne23), CUDA_R_16F, nb01/nb00, - (const void **) (ptrs_src.get() + 1*ne23), CUDA_R_16F, s11, - beta, ( void **) (ptrs_dst.get() + 0*ne23), cu_data_type, ne0, + alpha, (const void **) (ptrs_src.get() + 0*ne23), cu_data_type_a, nb01/nb00, + (const void **) (ptrs_src.get() + 1*ne23), cu_data_type_b, s11, + beta, ( void **) (ptrs_dst.get() + 0*ne23), cu_data_type, ne0, ne23, cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } -#endif - if (dst->op_params[0] == GGML_PREC_DEFAULT && cu_data_type == CUDA_R_16F) { - const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); - to_fp32_cuda(dst_f16.get(), dst_ddf, ne_dst, main_stream); + // Convert output back to F32 if needed + if (dst->op_params[0] == GGML_PREC_DEFAULT && cu_data_type != CUDA_R_32F) { + const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(traits::ggml_type_val); + to_fp32_cuda(dst_temp.get(), dst_ddf, ne_dst, main_stream); + } +} + +static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16 || src0->type == GGML_TYPE_F32); + + switch (src0->type) { + case GGML_TYPE_F32: + ggml_cuda_mul_mat_batched_cublas_impl(ctx, src0, src1, dst); + break; + case GGML_TYPE_BF16: + ggml_cuda_mul_mat_batched_cublas_impl(ctx, src0, src1, dst); + break; + case GGML_TYPE_F16: + ggml_cuda_mul_mat_batched_cublas_impl(ctx, src0, src1, dst); + break; + default: + GGML_ABORT("Unsupported type"); } } @@ -1967,21 +2059,17 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor const bool bad_padding_clear = ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE && ggml_nbytes(src0) != ggml_backend_buffer_get_alloc_size(src0->buffer, src0) && src0->view_src; - bool use_mul_mat_vec = (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16) - && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 - && src0->ne[0] % 2 == 0 && src1->ne[1] == 1; + bool use_mul_mat_vec_f = (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16) + && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; + bool use_mul_mat_f = !ggml_is_quantized(src0->type) + && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; bool use_mul_mat_vec_q = ggml_is_quantized(src0->type) && !bad_padding_clear && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 - && src1->ne[1] <= MMVQ_MAX_BATCH_SIZE - && src0->type != GGML_TYPE_MXFP4; - bool use_mul_mat_vec_mxfp4 = src0->type == GGML_TYPE_MXFP4 - && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 - && src0->ne[0] % 2 == 0 && src1->ne[1] == 1; + && src1->ne[1] <= MMVQ_MAX_BATCH_SIZE; bool use_mul_mat_q = ggml_is_quantized(src0->type) && !bad_padding_clear && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32; - bool any_gpus_with_slow_fp16 = false; - bool any_gpus_without_fp16_mma = false; + bool any_gpus_with_slow_fp16 = false; if (split) { ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *) src0->buffer->buft->context; @@ -1992,16 +2080,20 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor continue; } - const int cc = ggml_cuda_info().devices[id].cc; - use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]); - any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_hardware_available(cc); - any_gpus_without_fp16_mma = any_gpus_without_fp16_mma || !fp16_mma_hardware_available(cc); + const int cc = ggml_cuda_info().devices[id].cc; + const int warp_size = ggml_cuda_info().devices[id].warp_size; + use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]); + use_mul_mat_f = use_mul_mat_f && ggml_cuda_should_use_mmf(src0->type, cc, warp_size, src0->ne, src1->ne[1]); + use_mul_mat_vec_f = use_mul_mat_vec_f && ggml_cuda_should_use_mmvf(src0->type, cc, src0->ne, src1->ne[1]); + any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_hardware_available(cc); } } else { - const int cc = ggml_cuda_info().devices[ctx.device].cc; - use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]); - any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_hardware_available(cc); - any_gpus_without_fp16_mma = any_gpus_without_fp16_mma || !fp16_mma_hardware_available(cc); + const int cc = ggml_cuda_info().devices[ctx.device].cc; + const int warp_size = ggml_cuda_info().devices[ctx.device].warp_size; + use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]); + use_mul_mat_f = use_mul_mat_f && ggml_cuda_should_use_mmf(src0->type, cc, warp_size, src0->ne, src1->ne[1]); + use_mul_mat_vec_f = use_mul_mat_vec_f && ggml_cuda_should_use_mmvf(src0->type, cc, src0->ne, src1->ne[1]); + any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_hardware_available(cc); } // debug helpers @@ -2012,26 +2104,32 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor //printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name); //printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name); - if (!split && use_mul_mat_vec && (src0->ne[1] <= MMV_MAX_ROWS || any_gpus_without_fp16_mma)) { + //TODO update for generic tensor parallelism + const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; + bool use_batched_cublas_f16 = src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16 || !any_gpus_with_slow_fp16); + bool use_batched_cublas_bf16 = src0->type == GGML_TYPE_BF16 && bf16_mma_hardware_available(cc); + bool use_batched_cublas_f32 = src0->type == GGML_TYPE_F32; + + if (!split && use_mul_mat_vec_f) { // the custom F16 vector kernel can be used over batched cuBLAS GEMM // but this is only faster for GPUs without tensor cores or with a thin src0 matrix (particularly KQV in attention) - ggml_cuda_mul_mat_vec(ctx, src0, src1, nullptr, dst); + ggml_cuda_mul_mat_vec_f(ctx, src0, src1, nullptr, dst); + } else if (!split && use_mul_mat_f) { + ggml_cuda_mul_mat_f(ctx, src0, src1, nullptr, dst); } else if (!split && use_mul_mat_vec_q) { ggml_cuda_mul_mat_vec_q(ctx, src0, src1, nullptr, dst); } else if (!split && use_mul_mat_q) { ggml_cuda_mul_mat_q(ctx, src0, src1, nullptr, dst); - } else if (!split && src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16 || !any_gpus_with_slow_fp16) && - !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) { + } else if (!split && (use_batched_cublas_f16 || use_batched_cublas_bf16 || use_batched_cublas_f32) + && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) { // general KQ + KQV multi-batch without FlashAttention ggml_cuda_mul_mat_batched_cublas(ctx, src0, src1, dst); - } else if (use_mul_mat_vec) { - ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_vec, nullptr); + } else if (use_mul_mat_vec_f) { + ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_vec_f, nullptr); } else if (use_mul_mat_vec_q) { ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_vec_q, quantize_row_q8_1_cuda); } else if (use_mul_mat_q) { ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_q, quantize_mmq_q8_1_cuda); - } else if (use_mul_mat_vec_mxfp4) { - ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_vec_mxfp4, nullptr); } else { ggml_cuda_op_mul_mat(ctx, src0, src1, dst, ggml_cuda_op_mul_mat_cublas, nullptr); } @@ -2051,15 +2149,11 @@ static void ggml_cuda_mul_mat_id(ggml_backend_cuda_context & ctx, ggml_tensor * const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; if (src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { - if (ne2 == 1 && src0->type == GGML_TYPE_MXFP4) { - ggml_cuda_mul_mat_vec_mxfp4(ctx, src0, src1, ids, dst); - return; - } if (ne2 == 1) { if (ggml_is_quantized(src0->type)) { ggml_cuda_mul_mat_vec_q(ctx, src0, src1, ids, dst); } else { - ggml_cuda_mul_mat_vec(ctx, src0, src1, ids, dst); + ggml_cuda_mul_mat_vec_f(ctx, src0, src1, ids, dst); } return; } @@ -2209,6 +2303,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_OP_GET_ROWS_BACK: ggml_cuda_op_get_rows_back(ctx, dst); break; + case GGML_OP_SET_ROWS: + ggml_cuda_op_set_rows(ctx, dst); + break; case GGML_OP_DUP: ggml_cuda_dup(ctx, dst); break; @@ -2222,6 +2319,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_OP_ADD1: // TODO: more efficient implementation ggml_cuda_op_add(ctx, dst); break; + case GGML_OP_ADD_ID: + ggml_cuda_op_add_id(ctx, dst); + break; case GGML_OP_SUB: ggml_cuda_op_sub(ctx, dst); break; @@ -2254,6 +2354,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_UNARY_OP_SILU: ggml_cuda_op_silu(ctx, dst); break; + case GGML_UNARY_OP_GELU_ERF: + ggml_cuda_op_gelu_erf(ctx, dst); + break; case GGML_UNARY_OP_GELU_QUICK: ggml_cuda_op_gelu_quick(ctx, dst); break; @@ -2275,6 +2378,33 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_UNARY_OP_EXP: ggml_cuda_op_exp(ctx, dst); break; + case GGML_UNARY_OP_ELU: + ggml_cuda_op_elu(ctx, dst); + break; + default: + return false; + } + break; + case GGML_OP_GLU: + switch (ggml_get_glu_op(dst)) { + case GGML_GLU_OP_REGLU: + ggml_cuda_op_reglu(ctx, dst); + break; + case GGML_GLU_OP_GEGLU: + ggml_cuda_op_geglu(ctx, dst); + break; + case GGML_GLU_OP_SWIGLU: + ggml_cuda_op_swiglu(ctx, dst); + break; + case GGML_GLU_OP_SWIGLU_OAI: + ggml_cuda_op_swiglu_oai(ctx, dst); + break; + case GGML_GLU_OP_GEGLU_ERF: + ggml_cuda_op_geglu_erf(ctx, dst); + break; + case GGML_GLU_OP_GEGLU_QUICK: + ggml_cuda_op_geglu_quick(ctx, dst); + break; default: return false; } @@ -2366,9 +2496,18 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_OP_ROPE_BACK: ggml_cuda_op_rope_back(ctx, dst); break; + case GGML_OP_ROLL: + ggml_cuda_op_roll(ctx, dst); + break; case GGML_OP_IM2COL: ggml_cuda_op_im2col(ctx, dst); break; + case GGML_OP_CONV_2D_DW: + ggml_cuda_op_conv2d_dw(ctx, dst); + break; + case GGML_OP_CONV_TRANSPOSE_2D: + ggml_cuda_conv_2d_transpose_p0(ctx, dst); + break; case GGML_OP_CONV_TRANSPOSE_1D: ggml_cuda_op_conv_transpose_1d(ctx,dst); break; @@ -2532,8 +2671,23 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud // Loop over nodes in GGML graph to obtain info needed for CUDA graph cuda_ctx->cuda_graph->cpy_dest_ptrs.clear(); - const std::string gemma3n_per_layer_proj_src1_name = " (reshaped)"; - const std::string gemma3n_node_name = "node_"; + // This fix was added in llama.cpp and Ollama in parallel, but with + // different tensor names. + // llama.cpp: https://github.com/ggml-org/llama.cpp/pull/14741 + // ollama: https://github.com/ollama/ollama/pull/11525 + + const std::string gemma3n_per_layer_proj_src1_name_ollama = " (reshaped)"; + const std::string gemma3n_node_name_ollama = "node_"; + + const std::string gemma3n_per_layer_proj_src0_name = "inp_per_layer_selected"; + const std::string gemma3n_per_layer_proj_src1_name = "per_layer_proj"; + + const std::string ffn_moe_bias_suffix = "_exps.bias"; + + const std::string ffn_moe_gate_bias_prefix = "ffn_moe_gate_biased"; + const std::string ffn_moe_up_bias_prefix = "ffn_moe_up_biased"; + const std::string ffn_moe_down_bias_prefix = "ffn_moe_down_biased"; + for (int i = 0; i < cgraph->n_nodes; i++) { ggml_tensor * node = cgraph->nodes[i]; @@ -2556,6 +2710,30 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud #endif } + if (node->op == GGML_OP_ADD && + node->src[1] && node->src[1]->ne[1] > 1 && + // ollama + // workarounds to exclude Gemma3n's `project_per_layer_input` operation from the batch-size heuristic, specific to ollama's implementation of gemma3n + // number of layers is different for per_layer_proj between gemma3n:2b and gemma3n:4b, which is why we don't check that value here + !(node->ne[0] == 256 && node->ne[2] == 1 && node->ne[3] == 1 && node->src[0] ? std::string(node->src[0]->name).find(gemma3n_node_name_ollama) != std::string::npos : false && node->src[1] ? node->src[1]->name == gemma3n_per_layer_proj_src1_name_ollama : false) && + node->src[1] ? std::string(node->src[1]->name).find(ffn_moe_bias_suffix) == std::string::npos : false && + // upstream + (node->src[0] ? node->src[0]->name != gemma3n_per_layer_proj_src0_name : true) && + (node->src[1] ? node->src[1]->name != gemma3n_per_layer_proj_src1_name : true) && + strncmp(node->name, ffn_moe_gate_bias_prefix.c_str(), ffn_moe_gate_bias_prefix.size()) != 0 && + strncmp(node->name, ffn_moe_up_bias_prefix.c_str(), ffn_moe_up_bias_prefix.size()) != 0 && + strncmp(node->name, ffn_moe_down_bias_prefix.c_str(), ffn_moe_down_bias_prefix.size()) != 0) { + // disable CUDA graphs for batch size > 1 for now while excluding the matrix-matrix addition as part of Gemma3n's `project_per_layer_input` operation + // by means of matching node names. See + // https://github.com/ggml-org/llama.cpp/blob/f9a31eea06a859e34cecb88b4d020c7f03d86cc4/src/llama-model.cpp#L10199-L10241 and + // https://github.com/huggingface/transformers/blob/bda75b4011239d065de84aa3e744b67ebfa7b245/src/transformers/models/gemma3n/modeling_gemma3n.py#L1773, + // Generally, changes in batch size or context size can cause changes to the grid size of some kernels. + use_cuda_graph = false; +#ifndef NDEBUG + GGML_LOG_DEBUG("%s: disabling CUDA graphs due to batch size > 1 [%s] [%ld %ld %ld %ld]\n", __func__, node->name, node->ne[0], node->ne[1], node->ne[2], node->ne[3]); +#endif + } + if (node->op == GGML_OP_CPY) { // Store the pointers which are updated for each token, such that these can be sent @@ -2695,13 +2873,77 @@ static void update_cuda_graph_executable(ggml_backend_cuda_context * cuda_ctx) { } #endif +static bool ggml_cuda_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, std::initializer_list ops, std::initializer_list unary_ops) { +#ifndef NDEBUG + const size_t num_unary = std::count(ops.begin(), ops.end(), GGML_OP_UNARY); + GGML_ASSERT(unary_ops.size() == num_unary); +#endif + + if (!ggml_can_fuse(cgraph, node_idx, ops)) { + return false; + } + + if (ops.size() == 2 && ops.begin()[0] == GGML_OP_RMS_NORM && ops.begin()[1] == GGML_OP_MUL) { + const ggml_tensor *rms_norm = cgraph->nodes[node_idx]; + const ggml_tensor *mul = cgraph->nodes[node_idx+1]; + + GGML_ASSERT(rms_norm->src[0]->type == GGML_TYPE_F32); + GGML_ASSERT(rms_norm->type == GGML_TYPE_F32); + + //rms norm only supports F32 + if (mul->src[0]->type != GGML_TYPE_F32 || + mul->src[1]->type != GGML_TYPE_F32 || + mul->type != GGML_TYPE_F32) { + return false; + } + + //if rms norm is the B operand, then we don't handle broadcast + if (rms_norm == mul->src[1] && !ggml_are_same_shape(mul->src[0], rms_norm->src[1])) { + return false; + } + + //rms_norm kernel assumes contigous rows + if (!ggml_is_contiguous_rows(mul->src[0]) || !ggml_is_contiguous_rows(mul->src[1])) { + return false; + } + + return true; + } + + if (ops.size() == 3 && ops.begin()[0] == GGML_OP_SCALE && ops.begin()[1] == GGML_OP_UNARY && ops.begin()[2] == GGML_OP_SCALE + && unary_ops.size() == 1 && unary_ops.begin()[0] == GGML_UNARY_OP_TANH) { + const ggml_tensor *scale = cgraph->nodes[node_idx]; + const ggml_tensor *tanh = cgraph->nodes[node_idx+1]; + const ggml_tensor *scale2 = cgraph->nodes[node_idx+2]; + + GGML_ASSERT(scale->src[0]->type == GGML_TYPE_F32); + GGML_ASSERT(scale->type == GGML_TYPE_F32); + + if (ggml_get_unary_op(tanh) != GGML_UNARY_OP_TANH) { + return false; + } + + // Check for bias + if (ggml_get_op_params_f32(scale, 1) != 0.0f || ggml_get_op_params_f32(scale2, 1) != 0.0f) { + return false; + } + + return true; + } + + return false; +} + static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx, ggml_cgraph * cgraph, bool & graph_evaluated_or_captured, bool & use_cuda_graph, bool & cuda_graph_update_required) { + // flag used to determine whether it is an integrated_gpu + const bool integrated = ggml_cuda_info().devices[cuda_ctx->device].integrated; while (!graph_evaluated_or_captured) { // Only perform the graph execution if CUDA graphs are not enabled, or we are capturing the graph. // With the use of CUDA graphs, the execution will be performed by the graph launch. if (!use_cuda_graph || cuda_graph_update_required) { + for (int i = 0; i < cgraph->n_nodes; i++) { ggml_tensor * node = cgraph->nodes[i]; @@ -2709,16 +2951,32 @@ static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx continue; } + static bool disable_fusion = (getenv("GGML_CUDA_DISABLE_FUSION") != nullptr); + if (!disable_fusion) { + if (ggml_cuda_can_fuse(cgraph, i, { GGML_OP_RMS_NORM, GGML_OP_MUL }, {})) { + ggml_cuda_op_rms_norm_fused(*cuda_ctx, node, cgraph->nodes[i+1]); + i++; + continue; + } + + if (ggml_cuda_can_fuse(cgraph, i, { GGML_OP_SCALE, GGML_OP_UNARY, GGML_OP_SCALE }, { GGML_UNARY_OP_TANH })) { + i += 2; + ggml_cuda_op_softcap(*cuda_ctx, cgraph->nodes[i], node); + continue; + } + } #ifndef NDEBUG assert(node->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device)); for (int j = 0; j < GGML_MAX_SRC; j++) { if (node->src[j] != nullptr) { assert(node->src[j]->buffer); assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) || - ggml_backend_buft_is_cuda_split(node->src[j]->buffer->buft)); + ggml_backend_buft_is_cuda_split(node->src[j]->buffer->buft) || (integrated && ggml_backend_buft_is_cuda_host(node->src[j]->buffer->buft))); } } -#endif +#else + GGML_UNUSED(integrated); +#endif // NDEBUG bool ok = ggml_cuda_compute_forward(*cuda_ctx, node); if (!ok) { @@ -2737,6 +2995,11 @@ static void evaluate_and_capture_cuda_graph(ggml_backend_cuda_context * cuda_ctx CUDA_CHECK(cudaStreamEndCapture(cuda_ctx->stream(), &cuda_ctx->cuda_graph->graph)); graph_evaluated_or_captured = true; // CUDA graph has been captured + + std::lock_guard lock(ggml_cuda_lock); + if (ggml_cuda_lock_counter.fetch_sub(1, std::memory_order_relaxed) == 1) { + ggml_cuda_lock_cv.notify_all(); + } } else { graph_evaluated_or_captured = true; // ggml graph has been directly evaluated } @@ -2812,7 +3075,13 @@ static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t backend, } } - if (use_cuda_graph && cuda_graph_update_required) { // Start CUDA graph capture + if (use_cuda_graph && cuda_graph_update_required) { + // Start CUDA graph capture + { + std::lock_guard lock(ggml_cuda_lock); + ggml_cuda_lock_counter.fetch_add(1, std::memory_order_relaxed); + } + CUDA_CHECK(cudaStreamBeginCapture(cuda_ctx->stream(), cudaStreamCaptureModeRelaxed)); } @@ -3043,22 +3312,40 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g case GGML_UNARY_OP_SIGMOID: case GGML_UNARY_OP_HARDSIGMOID: case GGML_UNARY_OP_HARDSWISH: + case GGML_UNARY_OP_GELU_ERF: case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_TANH: case GGML_UNARY_OP_EXP: + case GGML_UNARY_OP_ELU: return ggml_is_contiguous(op->src[0]); default: return false; } break; + case GGML_OP_GLU: + switch (ggml_get_glu_op(op)) { + case GGML_GLU_OP_REGLU: + case GGML_GLU_OP_GEGLU: + case GGML_GLU_OP_SWIGLU: + case GGML_GLU_OP_SWIGLU_OAI: + case GGML_GLU_OP_GEGLU_ERF: + case GGML_GLU_OP_GEGLU_QUICK: + return ggml_is_contiguous_1(op->src[0]); + default: + return false; + } + break; case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: { struct ggml_tensor * a = op->src[0]; struct ggml_tensor * b = op->src[1]; - // for small weight matrices the active device can end up without any rows, don't use row split in those cases - // this avoids some edge cases (and the performance would not be good anyways) if (a->buffer && ggml_backend_buft_is_cuda_split(a->buffer->buft)) { + if (a->ne[2] > 1 || a->ne[3] > 1) { + return false; + } + // for small weight matrices the active device can end up without any rows, don't use row split in those cases + // this avoids some edge cases (and the performance would not be good anyways) ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *) a->buffer->buft->context; int64_t row_low; int64_t row_high; @@ -3071,9 +3358,16 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g return false; } #ifdef GGML_USE_MUSA - if (b->type == GGML_TYPE_F16 && b->ne[2]*b->ne[3] > 1 && - !ggml_is_transposed(a) && !ggml_is_transposed(b)) { - return false; + const int cc = ggml_cuda_info().devices[dev_ctx->device].cc; + if (b->ne[2]*b->ne[3] > 1 && !ggml_is_transposed(a) && !ggml_is_transposed(b)) { + if (GGML_CUDA_CC_IS_QY1(cc) && op->op == GGML_OP_MUL_MAT && + a->type == GGML_TYPE_F16 && b->type == GGML_TYPE_F16) { + return false; + } + if (GGML_CUDA_CC_IS_QY2(cc) && op->op == GGML_OP_MUL_MAT_ID && + a->type == GGML_TYPE_Q2_K && b->type == GGML_TYPE_F32) { + return false; + } } #endif // GGML_USE_MUSA switch (a->type) { @@ -3084,6 +3378,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: + case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -3100,12 +3395,6 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_XS: case GGML_TYPE_BF16: - case GGML_TYPE_MXFP4: -#ifdef GGML_USE_MUSA - if (a->type == GGML_TYPE_Q3_K) { - return false; - } -#endif // GGML_USE_MUSA return true; default: return false; @@ -3118,6 +3407,8 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g switch (op->src[0]->type) { case GGML_TYPE_F16: case GGML_TYPE_F32: + case GGML_TYPE_BF16: + case GGML_TYPE_I32: case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q5_0: @@ -3132,17 +3423,21 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g { return op->type == GGML_TYPE_F32 && op->src[0]->type == GGML_TYPE_F32 && op->ne[2] == 1 && op->ne[3] == 1; } break; + case GGML_OP_SET_ROWS: + { + return (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16 || op->type == GGML_TYPE_BF16 || + op->type == GGML_TYPE_Q4_0 || op->type == GGML_TYPE_Q4_1 || op->type == GGML_TYPE_Q5_0 || + op->type == GGML_TYPE_Q5_1 || op->type == GGML_TYPE_Q8_0 || op->type == GGML_TYPE_IQ4_NL) && + op->src[0]->type == GGML_TYPE_F32 && + op->src[1]->type == GGML_TYPE_I64; + } break; case GGML_OP_CPY: { ggml_type src0_type = op->src[0]->type; ggml_type src1_type = op->src[1]->type; - if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) { - return true; - } - if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_BF16) { - return true; - } - if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) { + if ((src0_type == GGML_TYPE_F32 || src0_type == GGML_TYPE_BF16 || src0_type == GGML_TYPE_F16) && + (src1_type == GGML_TYPE_F32 || src1_type == GGML_TYPE_BF16 || src1_type == GGML_TYPE_F16) + ) { return true; } if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q8_0) { @@ -3178,12 +3473,6 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_IQ4_NL) { return true; } - if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) { - return true; - } - if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) { - return true; - } if (src0_type == src1_type && ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1])) { return true; } @@ -3236,6 +3525,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: case GGML_OP_ADD: + case GGML_OP_ADD_ID: case GGML_OP_ADD1: case GGML_OP_SUB: case GGML_OP_MUL: @@ -3247,12 +3537,26 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_COS: case GGML_OP_CLAMP: case GGML_OP_LOG: - case GGML_OP_SSM_SCAN: - case GGML_OP_SSM_CONV: return true; + case GGML_OP_SSM_SCAN: { + if (op->src[3]->ne[0] == 1) { + // Mamba2 + // (kernel only supports (d_state == 128 || d_state == 256) && d_head % 16 == 0) + return (op->src[0]->ne[0] == 128 || op->src[0]->ne[0] == 256) && op->src[0]->ne[1] % 16 == 0; + } else { + // Mamba + // (kernel only supports d_state == 16, d_head == 1, n_head % 128 == 0, n_group == 1) + return op->src[0]->ne[0] == 16 && op->src[0]->ne[1] == 1 && op->src[0]->ne[2] % 128 == 0 && op->src[4]->ne[1] == 1; + } + } + case GGML_OP_SSM_CONV: { + // assumes d_inner % threads == 0 + return op->src[0]->ne[1] % 128 == 0; + } case GGML_OP_CONT: - return op->src[0]->type != GGML_TYPE_BF16; + return true; case GGML_OP_DIAG_MASK_INF: + return true; case GGML_OP_SOFT_MAX: return true; case GGML_OP_SOFT_MAX_BACK: { @@ -3260,11 +3564,18 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g memcpy(&max_bias, (const float *) op->op_params + 1, sizeof(float)); return max_bias == 0.0f; } + case GGML_OP_ROLL: + if(op->src[0]->type == GGML_TYPE_F32) { + return true; + } + return false; case GGML_OP_ROPE: case GGML_OP_ROPE_BACK: { return op->src[0]->nb[0] == ggml_type_size(op->src[0]->type) && ggml_is_contiguous_2(op->src[0]); } case GGML_OP_IM2COL: + case GGML_OP_CONV_2D_DW: + case GGML_OP_CONV_TRANSPOSE_2D: case GGML_OP_POOL_2D: case GGML_OP_SUM: case GGML_OP_SUM_ROWS: @@ -3275,7 +3586,6 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_GROUP_NORM: return ggml_is_contiguous(op->src[0]); case GGML_OP_UPSCALE: - return op->src[0]->type == GGML_TYPE_F32 && op->op_params[0] == GGML_SCALE_MODE_NEAREST; case GGML_OP_PAD: case GGML_OP_ARANGE: case GGML_OP_TIMESTEP_EMBEDDING: @@ -3290,16 +3600,18 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g #endif // FLASH_ATTN_AVAILABLE if (op->src[1]->ne[0] != op->src[2]->ne[0]) { const int cc = ggml_cuda_info().devices[dev_ctx->device].cc; - if (!new_mma_available(cc) || cc < GGML_CUDA_CC_AMPERE) { + if (!turing_mma_available(cc)) { return false; } const int gqa_ratio = op->src[0]->ne[2] / op->src[1]->ne[2]; return op->src[1]->ne[0] == 576 && op->src[2]->ne[0] == 512 && op->src[3] && gqa_ratio % 16 == 0; } - if (op->src[0]->ne[0] == 192) { + // TODO: more general-purpose attention sink support [TAG_ATTN_SINKS] + if (op->src[4] && !fp16_mma_available(ggml_cuda_info().devices[dev_ctx->device].cc) + && op->src[0]->ne[0] != 64 && op->src[0]->ne[0] != 128) { return false; } - if (op->src[0]->ne[3] != 1) { + if (op->src[0]->ne[0] == 192) { return false; } if (op->src[1]->type == GGML_TYPE_BF16 || op->src[2]->type == GGML_TYPE_BF16) { @@ -3314,6 +3626,9 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g if (op->src[0]->ne[0] == 256 && op->src[1]->type == GGML_TYPE_F16 && op->src[2]->type == GGML_TYPE_F16) { return true; } + if (op->src[3] && op->src[3]->ne[2] != 1) { + return false; + } return fp16_mma_available(ggml_cuda_info().devices[dev_ctx->device].cc) && op->src[1]->type == GGML_TYPE_F16 && op->src[2]->type == GGML_TYPE_F16; } @@ -3327,7 +3642,9 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g } static bool ggml_backend_cuda_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { - return (ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft)) && buft->device == dev; + ggml_backend_cuda_device_context * dev_ctx = (ggml_backend_cuda_device_context *) dev->context; + const bool integrated = ggml_cuda_info().devices[dev_ctx->device].integrated; + return (((ggml_backend_buft_is_cuda(buft) || ggml_backend_buft_is_cuda_split(buft)) && buft->device == dev) || (integrated && ggml_backend_buft_is_cuda_host(buft))); } static int64_t get_op_batch_size(const ggml_tensor * op) { @@ -3558,10 +3875,10 @@ ggml_backend_t ggml_backend_cuda_init(int device) { } ggml_backend_t cuda_backend = new ggml_backend { - /* .guid = */ ggml_backend_cuda_guid(), - /* .interface = */ ggml_backend_cuda_interface, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cuda_reg(), device), - /* .context = */ ctx, + /* .guid = */ ggml_backend_cuda_guid(), + /* .iface = */ ggml_backend_cuda_interface, + /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cuda_reg(), device), + /* .context = */ ctx, }; return cuda_backend; diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/im2col.cu b/ml/backend/ggml/ggml/src/ggml-cuda/im2col.cu index 86a54e42b..16bb9bec9 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/im2col.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/im2col.cu @@ -1,65 +1,76 @@ #include "im2col.cuh" +#define MAX_GRIDDIM_Z 65535 + template static __global__ void im2col_kernel( - const float * x, T * dst, int64_t batch_offset, - int64_t offset_delta, int64_t IC, int64_t IW, int64_t IH, int64_t OH, int64_t OW, int64_t KW, int64_t KH, int64_t pelements, int64_t CHW, + const float * x, T * dst, + int64_t IC, int64_t IW, int64_t IH, int64_t OH, int64_t OW, int64_t KW, int64_t KH, + int64_t IC_IH_IW, int64_t IH_IW, int64_t N_OH, int64_t KH_KW, int64_t IC_KH_KW, int s0, int s1, int p0, int p1, int d0, int d1) { const int64_t i = threadIdx.x + blockIdx.x * blockDim.x; - if (i >= pelements) { + if (i >= IC_KH_KW) { return; } - const int64_t ksize = OW * (KH > 1 ? KW : 1); - const int64_t kx = i / ksize; - const int64_t kd = kx * ksize; - const int64_t ky = (i - kd) / OW; - const int64_t ix = i % OW; + const int64_t iic = i / (KH_KW); + const int64_t rem = i - iic * KH_KW; + const int64_t ikh = rem / KW; + const int64_t ikw = rem - ikh * KW; - const int64_t oh = blockIdx.y; - const int64_t batch = blockIdx.z / IC; - const int64_t ic = blockIdx.z % IC; + const int64_t iow = blockIdx.y; + for (int64_t iz = blockIdx.z; iz < N_OH; iz+=MAX_GRIDDIM_Z) { + const int64_t in = iz / OH; + const int64_t ioh = iz - in * OH; - const int64_t iiw = ix * s0 + kx * d0 - p0; - const int64_t iih = oh * s1 + ky * d1 - p1; + const int64_t iiw = iow * s0 + ikw * d0 - p0; + const int64_t iih = ioh * s1 + ikh * d1 - p1; - const int64_t offset_dst = - ((batch * OH + oh) * OW + ix) * CHW + - (ic * (KW * KH) + ky * KW + kx); + const int64_t offset_dst = + ((in * OH + ioh) * OW + iow) * IC_KH_KW + iic * KH_KW + ikh * KW + ikw; - if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { - dst[offset_dst] = 0.0f; - } else { - const int64_t offset_src = ic * offset_delta + batch * batch_offset; - dst[offset_dst] = x[offset_src + iih * IW + iiw]; + if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { + dst[offset_dst] = 0.0f; + } else { + const int64_t offset_src = iic * IC_IH_IW + in * IH_IW; + dst[offset_dst] = x[offset_src + iih * IW + iiw]; + } } + + GGML_UNUSED(IC); + GGML_UNUSED(KH); } +// im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] template static void im2col_cuda(const float * x, T* dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW, int64_t KH, int64_t IC, - int64_t batch, int64_t batch_offset, int64_t offset_delta, + int64_t N, int64_t IC_IH_IW, int64_t IH_IW, int s0,int s1,int p0,int p1,int d0,int d1, cudaStream_t stream) { - const int parallel_elements = OW * KW * KH; - const int num_blocks = (parallel_elements + CUDA_IM2COL_BLOCK_SIZE - 1) / CUDA_IM2COL_BLOCK_SIZE; - dim3 block_nums(num_blocks, OH, batch * IC); - im2col_kernel<<>>(x, dst, batch_offset, offset_delta, IC, IW, IH, OH, OW, KW, KH, parallel_elements, (IC * KH * KW), s0, s1, p0, p1, d0, d1); + const int64_t IC_KH_KW = IC * KH * KW; + const int64_t num_blocks = (IC_KH_KW + CUDA_IM2COL_BLOCK_SIZE - 1) / CUDA_IM2COL_BLOCK_SIZE; + const int64_t N_OH = N * OH; + const int64_t KH_KW = KW*KH; + dim3 block_nums(num_blocks, OW, MIN(N_OH, MAX_GRIDDIM_Z)); + im2col_kernel<<>>(x, dst, IC, IW, IH, OH, OW, KW, KH, + IC_IH_IW, IH_IW, N_OH, KH_KW, IC_KH_KW, + s0, s1, p0, p1, d0, d1); } static void im2col_cuda_f16(const float * x, half * dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW, int64_t KH, int64_t IC, - int64_t batch, int64_t batch_offset, int64_t offset_delta, + int64_t N, int64_t IC_IH_IW, int64_t IH_IW, int s0,int s1,int p0,int p1,int d0,int d1, cudaStream_t stream) { - im2col_cuda(x, dst, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, offset_delta, s0, s1, p0, p1, d0, d1, stream); + im2col_cuda(x, dst, IW, IH, OW, OH, KW, KH, IC, N, IC_IH_IW, IH_IW, s0, s1, p0, p1, d0, d1, stream); } static void im2col_cuda_f32(const float * x, float * dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW, int64_t KH, int64_t IC, - int64_t batch, int64_t batch_offset, int64_t offset_delta, + int64_t N, int64_t IC_IH_IW, int64_t IH_IW, int s0,int s1,int p0,int p1,int d0,int d1, cudaStream_t stream) { - im2col_cuda(x, dst, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, offset_delta, s0, s1, p0, p1, d0, d1, stream); + im2col_cuda(x, dst, IW, IH, OW, OH, KW, KH, IC, N, IC_IH_IW, IH_IW, s0, s1, p0, p1, d0, d1, stream); } void ggml_cuda_op_im2col(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { @@ -91,13 +102,13 @@ void ggml_cuda_op_im2col(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int64_t OH = is_2D ? dst->ne[2] : 1; const int64_t OW = dst->ne[1]; - const size_t delta_offset = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32 - const int64_t batch = src1->ne[is_2D ? 3 : 2]; - const size_t batch_offset = src1->nb[is_2D ? 3 : 2] / 4; // nb is byte offset, src is type float32 + const int64_t IC_IH_IW = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32 + const int64_t N = src1->ne[is_2D ? 3 : 2]; + const int64_t IH_IW = src1->nb[is_2D ? 3 : 2] / 4; // nb is byte offset, src is type float32 if(dst->type == GGML_TYPE_F16) { - im2col_cuda_f16(src1_d, (half *) dst_d, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, stream); + im2col_cuda_f16(src1_d, (half *) dst_d, IW, IH, OW, OH, KW, KH, IC, N, IC_IH_IW, IH_IW, s0, s1, p0, p1, d0, d1, stream); } else { - im2col_cuda_f32(src1_d, (float *) dst_d, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, stream); + im2col_cuda_f32(src1_d, (float *) dst_d, IW, IH, OW, OH, KW, KH, IC, N, IC_IH_IW, IH_IW, s0, s1, p0, p1, d0, d1, stream); } } diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/mma.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/mma.cuh index 2af63355a..83ee16b27 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/mma.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mma.cuh @@ -12,7 +12,8 @@ // The methods get_i and get_j can be used to get the physical 32 bit index of the lth element of a thread within a tile. // All matrix tiles have ne physical 32 bit elements per warp. // -// As described in the documentation, all pointers for load_ldmatrix must be to shared memory and aligned to 16 bytes. +// As described in the PTX documentation, all pointers for load_ldmatrix must be to shared memory and aligned to 16 bytes. +// The API in this file also assumes that the pointers for load_generic are aligned to 16 bytes, unaligned pointers are considered undefined behavior. #include "common.cuh" @@ -22,13 +23,13 @@ static __device__ __forceinline__ int ggml_cuda_movmatrix(const int x) { int ret = 0; -#ifdef NEW_MMA_AVAILABLE +#ifdef TURING_MMA_AVAILABLE asm("movmatrix.sync.aligned.m8n8.trans.b16 %0, %1;" : "=r"(ret) : "r"(x)); #else GGML_UNUSED(x); NO_DEVICE_CODE; -#endif // defined(NEW_MMA_AVAILABLE) +#endif // defined(TURING_MMA_AVAILABLE) return ret; } @@ -66,7 +67,44 @@ namespace ggml_cuda_mma { struct tile { static constexpr int I = I_; static constexpr int J = J_; - static constexpr int ne = I * J / WARP_SIZE; + +#if defined(GGML_USE_HIP) + static constexpr int ne = I * J / 64; + T x[ne] = {0}; + + static __device__ __forceinline__ int get_i(const int l) { + if constexpr (I == 64 && J == 2) { // Special tile size to load <16, 4> as <16, 8> + return threadIdx.x % 16; + } else if constexpr (I == 16 && J == 8) { + return threadIdx.x % 16; + } else if constexpr (I == 32 && J == 4) { + return threadIdx.x % 32; + } else if constexpr (I == 16 && J == 16) { + return 4 * (threadIdx.x / 16) + l; + } else if constexpr (I == 32 && J == 32) { + return 4 * (threadIdx.x / 32) + 8 * (l / 4) + (l % 4); + } else { + static_assert(I == -1 && J == -1, "template specialization not implemented"); + } + } + + static __device__ __forceinline__ int get_j(const int l) { + if constexpr (I == 64 && J == 2) { // Special tile size to load <16, 4> as <16, 8> + return (2 * ((threadIdx.x / 16) % 2) + l); + } else if constexpr (I == 16 && J == 8) { + return 2 * (threadIdx.x / 16) + l; + } else if constexpr (I == 32 && J == 4) { + return 2 * (threadIdx.x / 32) + l; + } else if constexpr (I == 16 && J == 16) { + return threadIdx.x % 16; + } else if constexpr (I == 32 && J == 32) { + return threadIdx.x % 32; + } else { + static_assert(I == -1 && J == -1, "template specialization not implemented"); + } + } +#else + static constexpr int ne = I * J / 32; T x[ne] = {0}; static __device__ __forceinline__ int get_i(const int l) { @@ -94,6 +132,7 @@ namespace ggml_cuda_mma { static_assert(I == -1 && J == -1, "template specialization not implemented"); } } +#endif // defined(GGML_USE_HIP) }; template @@ -128,6 +167,38 @@ namespace ggml_cuda_mma { } }; + template + struct tile { + static constexpr int I = I_; + static constexpr int J = J_; + static constexpr int ne = I * J / WARP_SIZE; + nv_bfloat162 x[ne] = {{0.0f, 0.0f}}; + + static __device__ __forceinline__ int get_i(const int l) { + if constexpr (I == 8 && J == 8) { + return threadIdx.x / 4; + } else if constexpr (I == 16 && J == 4) { + return l * 8 + threadIdx.x / 4; + } else if constexpr (I == 16 && J == 8) { + return (l % 2) * 8 + threadIdx.x / 4; + } else { + static_assert(I == -1 && J == -1, "template specialization not implemented"); + } + } + + static __device__ __forceinline__ int get_j(const int l) { + if constexpr (I == 8 && J == 8) { + return l * 4 + threadIdx.x % 4; + } else if constexpr (I == 16 && J == 4) { + return threadIdx.x % 4; + } else if constexpr (I == 16 && J == 8) { + return (l / 2) * 4 + threadIdx.x % 4; + } else { + static_assert(I == -1 && J == -1, "template specialization not implemented"); + } + } + }; + template static __device__ __forceinline__ tile get_half2(const tile & tile_float) { tile ret; @@ -148,16 +219,29 @@ namespace ggml_cuda_mma { template static __device__ __forceinline__ void load_generic(tile & t, const T * __restrict__ xs0, const int stride) { +#if defined(AMD_MFMA_AVAILABLE) + if constexpr (I == 64 && J == 2) { // Special tile size to load <16, 4> as <16, 8> +#pragma unroll + for (int l = 0; l < t.ne; ++l) { + t.x[l] = xs0[t.get_i(l)*stride + t.get_j(l)]; + } + } else { + int64_t * xi = (int64_t *) t.x; + const int64_t * xs = (int64_t *) ((const int *) xs0 + (threadIdx.x % t.I) * stride + 2 * (threadIdx.x / t.I)); + xi[0] = xs[0]; + } +#else #pragma unroll for (int l = 0; l < t.ne; ++l) { t.x[l] = xs0[t.get_i(l)*stride + t.get_j(l)]; } +#endif // defined(AMD_MFMA_AVAILABLE) } template static __device__ __forceinline__ void load_ldmatrix( tile<8, 8, T> & t, const T * __restrict__ xs0, const int stride) { -#ifdef NEW_MMA_AVAILABLE +#ifdef TURING_MMA_AVAILABLE int * xi = (int *) t.x; const int * xs = (const int *) xs0 + (threadIdx.x % t.I) * stride + ((threadIdx.x / t.I) * (t.J / 2)) % t.J; asm volatile("ldmatrix.sync.aligned.m8n8.x2.b16 {%0, %1}, [%2];" @@ -165,13 +249,13 @@ namespace ggml_cuda_mma { : "l"(xs)); #else load_generic(t, xs0, stride); -#endif // NEW_MMA_AVAILABLE +#endif // TURING_MMA_AVAILABLE } template static __device__ __forceinline__ void load_ldmatrix( tile<16, 4, T> & t, const T * __restrict__ xs0, const int stride) { -#ifdef NEW_MMA_AVAILABLE +#ifdef TURING_MMA_AVAILABLE int * xi = (int *) t.x; const int * xs = (const int *) xs0 + (threadIdx.x % t.I) * stride; asm volatile("ldmatrix.sync.aligned.m8n8.x2.b16 {%0, %1}, [%2];" @@ -180,13 +264,13 @@ namespace ggml_cuda_mma { #else load_generic(xs0, stride); GGML_UNUSED(t); -#endif // NEW_MMA_AVAILABLE +#endif // TURING_MMA_AVAILABLE } template static __device__ __forceinline__ void load_ldmatrix( tile<16, 8, T> & t, const T * __restrict__ xs0, const int stride) { -#ifdef NEW_MMA_AVAILABLE +#if defined(TURING_MMA_AVAILABLE) int * xi = (int * ) t.x; const int * xs = (const int *) xs0 + (threadIdx.x % t.I) * stride + (threadIdx.x / t.I) * (t.J / 2); asm volatile("ldmatrix.sync.aligned.m8n8.x4.b16 {%0, %1, %2, %3}, [%4];" @@ -194,13 +278,13 @@ namespace ggml_cuda_mma { : "l"(xs)); #else load_generic(t, xs0, stride); -#endif // NEW_MMA_AVAILABLE +#endif // TURING_MMA_AVAILABLE } template static __device__ __forceinline__ void load_ldmatrix_trans( tile<16, 8, T> & t, const T * __restrict__ xs0, const int stride) { -#ifdef NEW_MMA_AVAILABLE +#ifdef TURING_MMA_AVAILABLE int * xi = (int * ) t.x; const int * xs = (const int *) xs0 + (threadIdx.x % t.I) * stride + (threadIdx.x / t.I) * (t.J / 2); asm volatile("ldmatrix.sync.aligned.m8n8.x4.trans.b16 {%0, %1, %2, %3}, [%4];" @@ -211,12 +295,12 @@ namespace ggml_cuda_mma { GGML_UNUSED(xs0); GGML_UNUSED(stride); NO_DEVICE_CODE; -#endif // NEW_MMA_AVAILABLE +#endif // TURING_MMA_AVAILABLE } static __device__ __forceinline__ void mma( tile<16, 8, int> & D, const tile<16, 4, int> & A, const tile<8, 4, int> & B) { -#ifdef NEW_MMA_AVAILABLE +#ifdef TURING_MMA_AVAILABLE #if __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE asm("mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32 {%0, %1, %2, %3}, {%4, %5}, {%6}, {%0, %1, %2, %3};" : "+r"(D.x[0]), "+r"(D.x[1]), "+r"(D.x[2]), "+r"(D.x[3]) @@ -235,12 +319,12 @@ namespace ggml_cuda_mma { GGML_UNUSED(A); GGML_UNUSED(B); NO_DEVICE_CODE; -#endif // NEW_MMA_AVAILABLE +#endif // TURING_MMA_AVAILABLE } static __device__ __forceinline__ void mma( tile<16, 8, int> & D, const tile<16, 8, int> & A, const tile<8, 8, int> & B) { -#ifdef NEW_MMA_AVAILABLE +#ifdef TURING_MMA_AVAILABLE #if __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE asm("mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%0, %1, %2, %3};" : "+r"(D.x[0]), "+r"(D.x[1]), "+r"(D.x[2]), "+r"(D.x[3]) @@ -265,12 +349,12 @@ namespace ggml_cuda_mma { GGML_UNUSED(A); GGML_UNUSED(B); NO_DEVICE_CODE; -#endif // NEW_MMA_AVAILABLE +#endif // TURING_MMA_AVAILABLE } static __device__ __forceinline__ void mma( tile<16, 4, half2> & D, const tile<16, 8, half2> & A, const tile<8, 8, half2> & B) { -#ifdef NEW_MMA_AVAILABLE +#ifdef TURING_MMA_AVAILABLE const int * Axi = (const int *) A.x; const int * Bxi = (const int *) B.x; int * Dxi = (int *) D.x; @@ -292,12 +376,12 @@ namespace ggml_cuda_mma { GGML_UNUSED(A); GGML_UNUSED(B); NO_DEVICE_CODE; -#endif // NEW_MMA_AVAILABLE +#endif // TURING_MMA_AVAILABLE } static __device__ __forceinline__ void mma( tile<16, 8, half2> & D, const tile<16, 8, half2> & A, const tile<16, 8, half2> & B) { -#ifdef NEW_MMA_AVAILABLE +#ifdef TURING_MMA_AVAILABLE const int * Axi = (const int *) A.x; const int * Bxi = (const int *) B.x; int * Dxi = (int *) D.x; @@ -328,12 +412,29 @@ namespace ggml_cuda_mma { GGML_UNUSED(A); GGML_UNUSED(B); NO_DEVICE_CODE; -#endif // NEW_MMA_AVAILABLE +#endif // TURING_MMA_AVAILABLE + } + + static __device__ __forceinline__ void mma( + tile<16, 8, float> & D, const tile<16, 8, float> & A, const tile<8, 8, float> & B) { +#ifdef AMPERE_MMA_AVAILABLE + const int * Axi = (const int *) A.x; + const int * Bxi = (const int *) B.x; + int * Dxi = (int *) D.x; + asm("mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%0, %1, %2, %3};" + : "+r"(Dxi[0]), "+r"(Dxi[1]), "+r"(Dxi[2]), "+r"(Dxi[3]) + : "r"(Axi[0]), "r"(Axi[1]), "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[0]), "r"(Bxi[1])); +#else + GGML_UNUSED(D); + GGML_UNUSED(A); + GGML_UNUSED(B); + NO_DEVICE_CODE; +#endif // AMPERE_MMA_AVAILABLE } static __device__ __forceinline__ void mma( tile<16, 8, float> & D, const tile<16, 8, half2> & A, const tile<8, 8, half2> & B) { -#ifdef NEW_MMA_AVAILABLE +#ifdef TURING_MMA_AVAILABLE const int * Axi = (const int *) A.x; const int * Bxi = (const int *) B.x; int * Dxi = (int *) D.x; @@ -355,12 +456,29 @@ namespace ggml_cuda_mma { GGML_UNUSED(A); GGML_UNUSED(B); NO_DEVICE_CODE; -#endif // NEW_MMA_AVAILABLE +#endif // TURING_MMA_AVAILABLE + } + + static __device__ __forceinline__ void mma( + tile<16, 8, float> & D, const tile<16, 8, nv_bfloat162> & A, const tile<8, 8, nv_bfloat162> & B) { +#ifdef AMPERE_MMA_AVAILABLE + const int * Axi = (const int *) A.x; + const int * Bxi = (const int *) B.x; + int * Dxi = (int *) D.x; + asm("mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%0, %1, %2, %3};" + : "+r"(Dxi[0]), "+r"(Dxi[1]), "+r"(Dxi[2]), "+r"(Dxi[3]) + : "r"(Axi[0]), "r"(Axi[1]), "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[0]), "r"(Bxi[1])); +#else + GGML_UNUSED(D); + GGML_UNUSED(A); + GGML_UNUSED(B); + NO_DEVICE_CODE; +#endif // AMPERE_MMA_AVAILABLE } static __device__ __forceinline__ void mma( tile<16, 16, float> & D, const tile<16, 8, half2> & A, const tile<16, 8, half2> & B) { -#ifdef NEW_MMA_AVAILABLE +#ifdef TURING_MMA_AVAILABLE const int * Axi = (const int *) A.x; const int * Bxi = (const int *) B.x; int * Dxi = (int *) D.x; @@ -391,6 +509,62 @@ namespace ggml_cuda_mma { GGML_UNUSED(A); GGML_UNUSED(B); NO_DEVICE_CODE; -#endif // NEW_MMA_AVAILABLE +#endif // TURING_MMA_AVAILABLE + } + + static __device__ __forceinline__ void mma( + tile<16, 16, int> & D, const tile<16, 8, int> & A, const tile<16, 8, int> & B) { +#if defined(AMD_MFMA_AVAILABLE) + using int32x4_t = __attribute__((__vector_size__(4 * sizeof(int)))) int; + int32x4_t * acc = (int32x4_t *) D.x; +#if defined(CDNA3) + acc[0] = __builtin_amdgcn_mfma_i32_16x16x32_i8(((int64_t *) A.x)[0], + ((int64_t *) B.x)[0], + acc[0], + 0, 0, 0); +#elif defined(CDNA2) || defined(CDNA) + acc[0] = __builtin_amdgcn_mfma_i32_16x16x16i8(A.x[0], + B.x[0], + acc[0], + 0, 0, 0); + acc[0] = __builtin_amdgcn_mfma_i32_16x16x16i8(A.x[1], + B.x[1], + acc[0], + 0, 0, 0); +#endif // defined(CDNA3) +#else + GGML_UNUSED(D); + GGML_UNUSED(A); + GGML_UNUSED(B); + NO_DEVICE_CODE; +#endif // AMD_MFMA_AVAILABLE + } + + static __device__ __forceinline__ void mma( + tile<32, 32, int> & D, const tile<32, 4, int> & A, const tile<32, 4, int> & B) { +#if defined(AMD_MFMA_AVAILABLE) + using int32x16_t = __attribute__((__vector_size__(16 * sizeof(int)))) int; + int32x16_t * acc = (int32x16_t *) D.x; +#if defined(CDNA3) + acc[0] = __builtin_amdgcn_mfma_i32_32x32x16_i8(((int64_t *) A.x)[0], + ((int64_t *) B.x)[0], + acc[0], + 0, 0, 0); +#elif defined(CDNA2) || defined(CDNA) + acc[0] = __builtin_amdgcn_mfma_i32_32x32x8i8(A.x[0], + B.x[0], + acc[0], + 0, 0, 0); + acc[0] = __builtin_amdgcn_mfma_i32_32x32x8i8(A.x[1], + B.x[1], + acc[0], + 0, 0, 0); +#endif // defined(CDNA3) +#else + GGML_UNUSED(D); + GGML_UNUSED(A); + GGML_UNUSED(B); + NO_DEVICE_CODE; +#endif // AMD_MFMA_AVAILABLE } } diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/mmf.cu b/ml/backend/ggml/ggml/src/ggml-cuda/mmf.cu new file mode 100644 index 000000000..1437367e8 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mmf.cu @@ -0,0 +1,431 @@ +#include "ggml.h" +#include "common.cuh" +#include "mma.cuh" +#include "mmf.cuh" + +using namespace ggml_cuda_mma; + +#define MMF_ROWS_PER_BLOCK 32 + +template +__launch_bounds__(ggml_cuda_get_physical_warp_size()*nwarps, 1) +static __global__ void mul_mat_f( + const T * __restrict__ x, const float * __restrict__ y, const int32_t * __restrict__ ids, float * __restrict__ dst, + const int ncols, const int nchannels_y, const int stride_row, const int stride_col_y, const int stride_col_dst, + const int channel_ratio, const int stride_channel_x, const int stride_channel_y, const int stride_channel_dst, + const int sample_ratio, const int stride_sample_x, const int stride_sample_y, const int stride_sample_dst) { +#if !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) + typedef tile<16, 8, T> tile_A; + typedef tile< 8, 8, T> tile_B; + typedef tile<16, 8, float> tile_C; + + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); + constexpr int tile_k_padded = warp_size + 4; + constexpr int ntA = rows_per_block / tile_A::I; + constexpr int ntB = (cols_per_block + tile_B::I - 1) / tile_B::I; + + const int row0 = blockIdx.x * rows_per_block; + const int channel_dst = blockIdx.y; + const int channel_x = channel_dst / channel_ratio; + const int channel_y = channel_dst; + const int sample_dst = blockIdx.z; + const int sample_x = sample_dst / sample_ratio; + const int sample_y = sample_dst; + + x += int64_t(sample_x) *stride_sample_x + channel_x *stride_channel_x + row0*stride_row ; + y += int64_t(sample_y) *stride_sample_y + channel_y *stride_channel_y; + dst += int64_t(sample_dst)*stride_sample_dst + channel_dst*stride_channel_dst; + + const float2 * y2 = (const float2 *) y; + + extern __shared__ char data_mmv[]; + + tile_C C[ntA][ntB]; + + T * tile_xy = (T *) data_mmv + threadIdx.y*(tile_A::I * tile_k_padded); + + for (int col = threadIdx.y*warp_size + threadIdx.x; col < ncols; col += nwarps*warp_size) { + tile_A A[ntA][warp_size / tile_A::J]; +#pragma unroll + for (int itA = 0; itA < ntA; ++itA) { +#pragma unroll + for (int i = 0; i < tile_A::I; ++i) { + tile_xy[i*tile_k_padded + threadIdx.x] = x[(itA*tile_A::I + i)*stride_row + col]; + } +#pragma unroll + for (int k0 = 0; k0 < warp_size; k0 += tile_A::J) { + load_ldmatrix(A[itA][k0/tile_A::J], tile_xy + k0, tile_k_padded); + } + } + +#pragma unroll + for (int itB = 0; itB < ntB; ++itB) { + if constexpr (std::is_same_v) { +#pragma unroll + for (int j0 = 0; j0 < tile_B::I; ++j0) { + const int j = j0 + itB*tile_B::I; + + tile_xy[j0*tile_k_padded + threadIdx.x] = j < cols_per_block ? y[j*stride_col_y + col] : 0.0f; + } + } else if constexpr (std::is_same_v || std::is_same_v) { +#pragma unroll + for (int j0 = 0; j0 < tile_B::I; ++j0) { + const int j = j0 + itB*tile_B::I; + + const float2 tmp = j < cols_per_block ? y2[j*stride_col_y + col] : make_float2(0.0f, 0.0f); + tile_xy[j0*tile_k_padded + threadIdx.x] = {tmp.x, tmp.y}; + } + } else { + static_assert(std::is_same_v, "unsupported type"); + } +#pragma unroll + for (int k0 = 0; k0 < warp_size; k0 += tile_B::J) { + tile_B B; + load_ldmatrix(B, tile_xy + k0, tile_k_padded); +#pragma unroll + for (int itA = 0; itA < ntA; ++itA) { + mma(C[itA][itB], A[itA][k0/tile_B::J], B); + } + } + } + } + + float * buf_iw = (float *) data_mmv; + constexpr int kiw = nwarps*rows_per_block + 4; + + if (nwarps > 1) { + __syncthreads(); + } +#pragma unroll + for (int itB = 0; itB < ntB; ++itB) { +#pragma unroll + for (int itA = 0; itA < ntA; ++itA) { +#pragma unroll + for (int l = 0; l < tile_C::ne; ++l) { + const int i = threadIdx.y*rows_per_block + itA*tile_C::I + tile_C::get_i(l); + const int j = itB*tile_C::J + tile_C::get_j(l); + buf_iw[j*kiw + i] = C[itA][itB].x[l]; + } + } + } + + if (nwarps > 1) { + __syncthreads(); + } + +#pragma unroll + for (int j0 = 0; j0 < cols_per_block; j0 += nwarps) { + const int j = j0 + threadIdx.y; + + if (j0 + nwarps > cols_per_block && j >= cols_per_block) { + return; + } + + float sum = 0.0f; + static_assert(rows_per_block == warp_size, "need loop/check"); +#pragma unroll + for (int i0 = 0; i0 < nwarps*rows_per_block; i0 += rows_per_block) { + const int i = i0 + threadIdx.x; + + sum += buf_iw[j*kiw + i]; + } + dst[j*stride_col_dst + row0 + threadIdx.x] = sum; + } +#else + NO_DEVICE_CODE; + GGML_UNUSED(x); GGML_UNUSED(y); GGML_UNUSED(ids); GGML_UNUSED(dst); + GGML_UNUSED(ncols); GGML_UNUSED(nchannels_y); GGML_UNUSED(stride_row); GGML_UNUSED(stride_col_y); GGML_UNUSED(stride_col_dst); + GGML_UNUSED(channel_ratio); GGML_UNUSED(stride_channel_x); GGML_UNUSED(stride_channel_y); GGML_UNUSED(stride_channel_dst); + GGML_UNUSED(sample_ratio); GGML_UNUSED(stride_sample_x); GGML_UNUSED(stride_sample_y); GGML_UNUSED(stride_sample_dst); +#endif // !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) +} + +template +static void mul_mat_f_cuda( + const T * x, const float * y, const int32_t * ids, float * dst, + const int64_t ncols_x, const int64_t nrows_x, + const int64_t stride_row, const int64_t stride_col_y, const int64_t stride_col_dst, + const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, + const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, + const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, + cudaStream_t stream) { + typedef tile<16, 8, T> tile_A; + typedef tile< 8, 8, T> tile_B; + typedef tile<16, 8, float> tile_C; + + GGML_ASSERT(!ids && "mul_mat_id not implemented"); + + GGML_ASSERT(ncols_x % 2 == 0); + GGML_ASSERT(stride_row % 2 == 0); + GGML_ASSERT(stride_col_y % 2 == 0); + GGML_ASSERT(ids || nchannels_dst % nchannels_x == 0); + GGML_ASSERT( nsamples_dst % nsamples_x == 0); + const int64_t channel_ratio = nchannels_dst / nchannels_x; + const int64_t sample_ratio = nsamples_dst / nsamples_x; + + const int device = ggml_cuda_get_device(); + const int warp_size = ggml_cuda_info().devices[device].warp_size; + + int64_t nwarps_best = 1; + int64_t niter_best = (ncols_x + warp_size*2 - 1) / (warp_size*2); + int64_t max_block_size = 256; + for (int64_t nwarps = 2; nwarps <= max_block_size/warp_size; nwarps++) { + const int64_t niter = (ncols_x + nwarps*warp_size*2 - 1) / (nwarps*warp_size*2); + if (niter < niter_best) { + niter_best = niter; + nwarps_best = nwarps; + } + } + + constexpr int rows_per_block = MMF_ROWS_PER_BLOCK; + const int nbytes_shared_iter = nwarps_best * tile_A::I * (warp_size + 4) * 4; + const int nbytes_shared_combine = GGML_PAD(cols_per_block, tile_B::I) * (nwarps_best*rows_per_block + 4) * 4; + const int nbytes_shared = std::max(nbytes_shared_iter, nbytes_shared_combine); + const dim3 block_nums(nrows_x/rows_per_block, nchannels_dst, nsamples_dst); + const dim3 block_dims(warp_size, nwarps_best, 1); + switch (nwarps_best) { + case 1: { + mul_mat_f<<>> + (x, y, ids, dst, ncols_x, nchannels_y, stride_row, stride_col_y, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + case 2: { + mul_mat_f<<>> + (x, y, ids, dst, ncols_x, nchannels_y, stride_row, stride_col_y, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + case 3: { + mul_mat_f<<>> + (x, y, ids, dst, ncols_x, nchannels_y, stride_row, stride_col_y, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + case 4: { + mul_mat_f<<>> + (x, y, ids, dst, ncols_x, nchannels_y, stride_row, stride_col_y, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + case 5: { + mul_mat_f<<>> + (x, y, ids, dst, ncols_x, nchannels_y, stride_row, stride_col_y, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + case 6: { + mul_mat_f<<>> + (x, y, ids, dst, ncols_x, nchannels_y, stride_row, stride_col_y, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + case 7: { + mul_mat_f<<>> + (x, y, ids, dst, ncols_x, nchannels_y, stride_row, stride_col_y, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + case 8: { + mul_mat_f<<>> + (x, y, ids, dst, ncols_x, nchannels_y, stride_row, stride_col_y, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + default: { + GGML_ABORT("fatal error"); + } break; + } +} + +template +static void mul_mat_f_switch_cols_per_block( + const T * x, const float * y, const int32_t * ids, float * dst, + const int64_t ncols_x, const int64_t nrows_x, const int64_t ncols_dst, + const int64_t stride_row, const int64_t stride_col_y, const int64_t stride_col_dst, + const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, + const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, + const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, + cudaStream_t stream) { + switch (ncols_dst) { + case 1: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 2: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 3: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 4: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 5: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 6: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 7: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 8: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 9: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 10: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 11: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 12: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 13: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 14: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 15: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + case 16: { + mul_mat_f_cuda(x, y, ids, dst, ncols_x, nrows_x, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + } break; + default: { + GGML_ABORT("fatal error"); + } break; + } +} + +void ggml_cuda_mul_mat_f(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst) { + GGML_ASSERT( src1->type == GGML_TYPE_F32); + GGML_ASSERT(!ids || ids->type == GGML_TYPE_I32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const size_t ts_src0 = ggml_type_size(src0->type); + const size_t ts_src1 = ggml_type_size(src1->type); + const size_t ts_dst = ggml_type_size(dst->type); + + GGML_ASSERT(ne13 == ne3); + + GGML_ASSERT( nb00 == ts_src0); + GGML_ASSERT( nb10 == ts_src1); + GGML_ASSERT(!ids || ids->nb[0] == ggml_type_size(ids->type)); + GGML_ASSERT( nb0 == ts_dst); + + const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; + const enum ggml_prec prec = fast_fp16_available(cc) ? ggml_prec(dst->op_params[0]) : GGML_PREC_F32; + + const float * src1_d = (const float *) src1->data; + const int32_t * ids_d = ids ? (const int32_t *) ids->data : nullptr; + float * dst_d = (float *) dst->data; + + const int64_t s01 = src0->nb[1] / ts_src0; + const int64_t s11 = src1->nb[1] / ts_src1; + const int64_t s1 = dst->nb[1] / ts_dst; + const int64_t s02 = src0->nb[2] / ts_src0; + const int64_t s12 = src1->nb[2] / ts_src1; + const int64_t s2 = dst->nb[2] / ts_dst; + const int64_t s03 = src0->nb[3] / ts_src0; + const int64_t s13 = src1->nb[3] / ts_src1; + const int64_t s3 = dst->nb[3] / ts_dst; + + // For MUL_MAT_ID the memory layout is different than for MUL_MAT: + const int64_t ncols_dst = ids ? ne2 : ne1; + const int64_t nchannels_y = ids ? ne11 : ne12; + const int64_t nchannels_dst = ids ? ne1 : ne2; + const int64_t stride_channel_dst = ids ? s1 : s2; + const int64_t stride_channel_y = ids ? s11 : s12; + + GGML_ASSERT(!ids || ncols_dst == 1); + + switch (src0->type) { + case GGML_TYPE_F32: { + const float * src0_d = (const float *) src0->data; + constexpr int vals_per_T = 1; + mul_mat_f_switch_cols_per_block( + src0_d, src1_d, ids_d, dst_d, ne00/vals_per_T, ne01, ncols_dst, s01/vals_per_T, s11/vals_per_T, s1, + ne02, nchannels_y, nchannels_dst, s02/vals_per_T, stride_channel_y, stride_channel_dst, + ne03, ne3, s03/vals_per_T, s13, s3, ctx.stream()); + } break; + case GGML_TYPE_F16: { + const half2 * src0_d = (const half2 *) src0->data; + constexpr int vals_per_T = 2; + mul_mat_f_switch_cols_per_block( + src0_d, src1_d, ids_d, dst_d, ne00/vals_per_T, ne01, ncols_dst, s01/vals_per_T, s11/vals_per_T, s1, + ne02, nchannels_y, nchannels_dst, s02/vals_per_T, stride_channel_y, stride_channel_dst, + ne03, ne3, s03/vals_per_T, s13, s3, ctx.stream()); + } break; + case GGML_TYPE_BF16: { + const nv_bfloat162 * src0_d = (const nv_bfloat162 *) src0->data; + constexpr int vals_per_T = 2; + mul_mat_f_switch_cols_per_block( + src0_d, src1_d, ids_d, dst_d, ne00/vals_per_T, ne01, ncols_dst, s01/vals_per_T, s11/vals_per_T, s1, + ne02, nchannels_y, nchannels_dst, s02/vals_per_T, stride_channel_y, stride_channel_dst, + ne03, ne3, s03/vals_per_T, s13, s3, ctx.stream()); + } break; + default: + GGML_ABORT("unsupported type: %s", ggml_type_name(src0->type)); + } +} + +bool ggml_cuda_should_use_mmf(enum ggml_type type, int cc, int warp_size, const int64_t * src0_ne, int64_t ne11) { + if (src0_ne[0] % (warp_size * (4/ggml_type_size(type))) != 0) { + return false; + } + if (src0_ne[1] % MMF_ROWS_PER_BLOCK != 0) { + return false; + } + if (ne11 > 16) { + return false; + } + switch (type) { + case GGML_TYPE_F32: + return ampere_mma_available(cc); + case GGML_TYPE_F16: + return turing_mma_available(cc); + case GGML_TYPE_BF16: + return ampere_mma_available(cc); + default: + return false; + } +} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/mmf.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/mmf.cuh new file mode 100644 index 000000000..785f9f211 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mmf.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +void ggml_cuda_mul_mat_f(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst); + +bool ggml_cuda_should_use_mmf(enum ggml_type type, int cc, int warp_size, const int64_t * scr0_ne, int64_t ne11); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/mmq.cu b/ml/backend/ggml/ggml/src/ggml-cuda/mmq.cu index e1cf843de..384ee7615 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/mmq.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mmq.cu @@ -20,6 +20,9 @@ static void ggml_cuda_mul_mat_q_switch_type(ggml_backend_cuda_context & ctx, con case GGML_TYPE_Q8_0: mul_mat_q_case(ctx, args, stream); break; + case GGML_TYPE_MXFP4: + mul_mat_q_case(ctx, args, stream); + break; case GGML_TYPE_Q2_K: mul_mat_q_case(ctx, args, stream); break; @@ -109,7 +112,8 @@ void ggml_cuda_mul_mat_q( const int64_t s03 = src0->nb[3] / ts_src0; const int64_t s3 = dst->nb[3] / ts_dst; - const bool use_stream_k = GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA; + const bool use_stream_k = (GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA) + || GGML_CUDA_CC_IS_CDNA(cc); if (!ids) { const size_t nbytes_src1_q8_1 = ne13*ne12 * ne11*ne10_padded * sizeof(block_q8_1)/QK8_1 + @@ -122,6 +126,7 @@ void ggml_cuda_mul_mat_q( const int64_t s13 = src1->nb[3] / ts_src1; quantize_mmq_q8_1_cuda(src1_d, nullptr, src1_q8_1.get(), src0->type, ne10, s11, s12, s13, ne10_padded, ne11, ne12, ne13, stream); + CUDA_CHECK(cudaGetLastError()); } const int64_t s12 = ne11*ne10_padded * sizeof(block_q8_1)/(QK8_1*sizeof(int)); @@ -205,6 +210,7 @@ void ggml_cuda_mul_mat_q( const int64_t s13 = src1->nb[2] / ts_src1; quantize_mmq_q8_1_cuda(src1_d, ids_src1_dev, src1_q8_1.get(), src0->type, ne10, s11, s12, s13, ne10_padded, ne11_flat, ne12_flat, ne13_flat, stream); + CUDA_CHECK(cudaGetLastError()); } const int64_t s12 = ne11*ne10_padded * sizeof(block_q8_1)/(QK8_1*sizeof(int)); @@ -248,8 +254,9 @@ void ggml_cuda_op_mul_mat_q( // The stream-k decomposition is only faster for recent NVIDIA GPUs. // Also its fixup needs to allocate a temporary buffer in the memory pool. // There are multiple parallel CUDA streams for src1_ncols != ne11 which would introduce a race condition for this buffer. - const bool use_stream_k = GGML_CUDA_CC_IS_NVIDIA(cc) && - ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA && src1_ncols == ne11; + const bool use_stream_k = ((GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA) + || GGML_CUDA_CC_IS_CDNA(cc)) + && src1_ncols == ne11; const mmq_args args = { src0_dd_i, src0->type, (const int *) src1_ddq_i, nullptr, nullptr, dst_dd_i, ne00, row_diff, src1_ncols, stride01, ne11, nrows_dst, @@ -278,6 +285,7 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) { case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: + case GGML_TYPE_MXFP4: case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: @@ -302,7 +310,7 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) { return false; } - if (new_mma_available(cc)) { + if (turing_mma_available(cc)) { return true; } @@ -318,5 +326,21 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) { return !fp16_mma_hardware_available(cc) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; } + if (amd_mfma_available(cc)) { + // As of ROCM 7.0 rocblas/tensile performs very poorly on CDNA3 and hipblaslt (via ROCBLAS_USE_HIPBLASLT) + // performs better but is currently suffering from a crash on this architecture. + // TODO: Revisit when hipblaslt is fixed on CDNA3 + if (GGML_CUDA_CC_IS_CDNA3(cc)) { + return true; + } + if (ne11 <= 128 || type == GGML_TYPE_Q4_0 || type == GGML_TYPE_Q4_1 || type == GGML_TYPE_Q5_0 || type == GGML_TYPE_Q5_1) { + return true; + } + if (ne11 <= 256 && (type == GGML_TYPE_Q4_K || type == GGML_TYPE_Q5_K)) { + return true; + } + return false; + } + return (!GGML_CUDA_CC_IS_RDNA4(cc) && !GGML_CUDA_CC_IS_RDNA3(cc) && !GGML_CUDA_CC_IS_CDNA(cc)) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; } diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/mmq.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/mmq.cuh index 80baf459c..96129bd83 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/mmq.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mmq.cuh @@ -58,6 +58,8 @@ static mmq_q8_1_ds_layout mmq_get_q8_1_ds_layout(const ggml_type type_x) { return MMQ_Q8_1_DS_LAYOUT_DS4; case GGML_TYPE_Q8_0: return MMQ_Q8_1_DS_LAYOUT_D4; + case GGML_TYPE_MXFP4: + return MMQ_Q8_1_DS_LAYOUT_D4; case GGML_TYPE_Q2_K: return MMQ_Q8_1_DS_LAYOUT_D2S6; case GGML_TYPE_Q3_K: @@ -90,7 +92,7 @@ struct tile_x_sizes { }; static int get_mmq_x_max_host(const int cc) { - return new_mma_available(cc) ? 128 : + return (amd_mfma_available(cc) || turing_mma_available(cc)) ? 128 : GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) >= GGML_CUDA_CC_VOLTA ? #ifdef GGML_CUDA_FORCE_MMQ 128 : 64; @@ -100,13 +102,13 @@ static int get_mmq_x_max_host(const int cc) { } static constexpr __device__ int get_mmq_x_max_device() { -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) return 128; -#else // NEW_MMA_AVAILABLE +#else // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) -#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) - return 128; -#else // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#if defined(GGML_USE_HIP) + return 64; +#else // defined(GGML_USE_HIP) #if __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA #ifdef GGML_CUDA_FORCE_MMQ @@ -115,12 +117,11 @@ static constexpr __device__ int get_mmq_x_max_device() { return MMQ_DP4A_MAX_BATCH_SIZE; #endif // GGML_CUDA_FORCE_MMQ #else // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA - return 64; #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA -#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) -#endif // NEW_MMA_AVAILABLE +#endif // defined(GGML_USE_HIP) +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } static int get_mmq_y_host(const int cc) { @@ -129,7 +130,7 @@ static int get_mmq_y_host(const int cc) { } static constexpr __device__ int get_mmq_y_device() { -#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#if defined(GGML_USE_HIP) #if defined(RDNA1) return 64; #else @@ -141,19 +142,28 @@ static constexpr __device__ int get_mmq_y_device() { #else return 64; #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA -#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#endif // defined(GGML_USE_HIP) } -#define MMQ_DP4A_TXS_Q4_0 tile_x_sizes{mmq_y*WARP_SIZE + mmq_y, mmq_y*WARP_SIZE/QI4_0 + mmq_y/QI4_0, 0} -#define MMQ_DP4A_TXS_Q4_1 tile_x_sizes{mmq_y*WARP_SIZE + mmq_y, mmq_y*WARP_SIZE/QI4_1 + mmq_y/QI4_1, 0} -#define MMQ_DP4A_TXS_Q8_0 tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE*2/QI8_0 + mmq_y/(QI8_0/2), 0} -#define MMQ_DP4A_TXS_Q8_0_16 tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE*4/QI8_0 + mmq_y/(QI8_0/4), 0} -#define MMQ_DP4A_TXS_Q8_1 tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE*2/QI8_1 + mmq_y/(QI8_1/2), 0} -#define MMQ_DP4A_TXS_Q2_K tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE + mmq_y, 0} -#define MMQ_DP4A_TXS_Q3_K tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y, mmq_y*WARP_SIZE/8 + mmq_y/8} -#define MMQ_DP4A_TXS_Q4_K tile_x_sizes{mmq_y*WARP_SIZE + mmq_y, mmq_y*WARP_SIZE/QI4_K, mmq_y*WARP_SIZE/8 + mmq_y/8} -#define MMQ_DP4A_TXS_Q5_K tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE/QI5_K + mmq_y/QI5_K, mmq_y*WARP_SIZE/8 + mmq_y/8} -#define MMQ_DP4A_TXS_Q6_K tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE/QI6_K + mmq_y/QI6_K, mmq_y*WARP_SIZE/8 + mmq_y/8} +// Decouple shared memory tile sizes from WARP_SIZE to allow for different warp sizes. +// The K dimension of the tiles has either, +// 1*MMQ_TILE_NE_K==32 (always for TILE_Y_K) or 2*MMQ_TILE_NE_K==64 (typically for TILE_X_K), +// 32 bit elements for the quantized data (does not include scales). +// In other words, the size of the quantized data in the K dimension is a multiple of MMQ_TILE_NE_K. +// The final tile size in K direction is padded to avoid shared memory bank conflicts, +// in terms of 32 bit elements that means K % 2 == 1 for dp4a or K % 8 == 4 for mma. +#define MMQ_TILE_NE_K 32 + +#define MMQ_DP4A_TXS_Q4_0 tile_x_sizes{mmq_y*MMQ_TILE_NE_K + mmq_y, mmq_y*MMQ_TILE_NE_K/QI4_0 + mmq_y/QI4_0, 0} +#define MMQ_DP4A_TXS_Q4_1 tile_x_sizes{mmq_y*MMQ_TILE_NE_K + mmq_y, mmq_y*MMQ_TILE_NE_K/QI4_1 + mmq_y/QI4_1, 0} +#define MMQ_DP4A_TXS_Q8_0 tile_x_sizes{mmq_y*MMQ_TILE_NE_K*2 + mmq_y, mmq_y*MMQ_TILE_NE_K*2/QI8_0 + mmq_y/(QI8_0/2), 0} +#define MMQ_DP4A_TXS_Q8_0_16 tile_x_sizes{mmq_y*MMQ_TILE_NE_K*2 + mmq_y, mmq_y*MMQ_TILE_NE_K*4/QI8_0 + mmq_y/(QI8_0/4), 0} +#define MMQ_DP4A_TXS_Q8_1 tile_x_sizes{mmq_y*MMQ_TILE_NE_K*2 + mmq_y, mmq_y*MMQ_TILE_NE_K*2/QI8_1 + mmq_y/(QI8_1/2), 0} +#define MMQ_DP4A_TXS_Q2_K tile_x_sizes{mmq_y*MMQ_TILE_NE_K*2 + mmq_y, mmq_y*MMQ_TILE_NE_K + mmq_y, 0} +#define MMQ_DP4A_TXS_Q3_K tile_x_sizes{mmq_y*MMQ_TILE_NE_K*2 + mmq_y, mmq_y, mmq_y*MMQ_TILE_NE_K/8 + mmq_y/8} +#define MMQ_DP4A_TXS_Q4_K tile_x_sizes{mmq_y*MMQ_TILE_NE_K + mmq_y, mmq_y*MMQ_TILE_NE_K/QI4_K, mmq_y*MMQ_TILE_NE_K/8 + mmq_y/8} +#define MMQ_DP4A_TXS_Q5_K tile_x_sizes{mmq_y*MMQ_TILE_NE_K*2 + mmq_y, mmq_y*MMQ_TILE_NE_K/QI5_K + mmq_y/QI5_K, mmq_y*MMQ_TILE_NE_K/8 + mmq_y/8} +#define MMQ_DP4A_TXS_Q6_K tile_x_sizes{mmq_y*MMQ_TILE_NE_K*2 + mmq_y, mmq_y*MMQ_TILE_NE_K/QI6_K + mmq_y/QI6_K, mmq_y*MMQ_TILE_NE_K/8 + mmq_y/8} static constexpr __host__ __device__ tile_x_sizes mmq_get_dp4a_tile_x_sizes(ggml_type type, int mmq_y) { switch (type) { @@ -162,6 +172,7 @@ static constexpr __host__ __device__ tile_x_sizes mmq_get_dp4a_tile_x_sizes(ggml case GGML_TYPE_Q5_0: return MMQ_DP4A_TXS_Q8_0; case GGML_TYPE_Q5_1: return MMQ_DP4A_TXS_Q8_1; case GGML_TYPE_Q8_0: return MMQ_DP4A_TXS_Q8_0; + case GGML_TYPE_MXFP4: return MMQ_DP4A_TXS_Q8_1; case GGML_TYPE_Q2_K: return MMQ_DP4A_TXS_Q2_K; case GGML_TYPE_Q3_K: return MMQ_DP4A_TXS_Q3_K; case GGML_TYPE_Q4_K: return MMQ_DP4A_TXS_Q4_K; @@ -179,11 +190,11 @@ static constexpr __host__ __device__ tile_x_sizes mmq_get_dp4a_tile_x_sizes(ggml } } -#define MMQ_MMA_TILE_X_K_Q8_0 (2*WARP_SIZE + 2*WARP_SIZE/QI8_0 + 4) -#define MMQ_MMA_TILE_X_K_Q8_1 (2*WARP_SIZE + 2*WARP_SIZE/QI8_0 + 4) -#define MMQ_MMA_TILE_X_K_Q2_K (2*WARP_SIZE + WARP_SIZE + 4) -#define MMQ_MMA_TILE_X_K_Q3_K (2*WARP_SIZE + WARP_SIZE/2 + 4) -#define MMQ_MMA_TILE_X_K_Q6_K (2*WARP_SIZE + WARP_SIZE/QI6_K + WARP_SIZE/8 + 7) +#define MMQ_MMA_TILE_X_K_Q8_0 (2*MMQ_TILE_NE_K + 2*MMQ_TILE_NE_K/QI8_0 + 4) +#define MMQ_MMA_TILE_X_K_Q8_1 (2*MMQ_TILE_NE_K + 2*MMQ_TILE_NE_K/QI8_0 + 4) +#define MMQ_MMA_TILE_X_K_Q2_K (2*MMQ_TILE_NE_K + MMQ_TILE_NE_K + 4) +#define MMQ_MMA_TILE_X_K_Q3_K (2*MMQ_TILE_NE_K + MMQ_TILE_NE_K/2 + 4) +#define MMQ_MMA_TILE_X_K_Q6_K (2*MMQ_TILE_NE_K + MMQ_TILE_NE_K/QI6_K + MMQ_TILE_NE_K/8 + 7) static_assert(MMQ_MMA_TILE_X_K_Q8_0 % 8 == 4, "Wrong padding."); static_assert(MMQ_MMA_TILE_X_K_Q8_1 % 8 == 4, "Wrong padding."); @@ -198,6 +209,7 @@ static constexpr __host__ __device__ int mmq_get_mma_tile_x_k(ggml_type type) { case GGML_TYPE_Q5_0: return MMQ_MMA_TILE_X_K_Q8_0; case GGML_TYPE_Q5_1: return MMQ_MMA_TILE_X_K_Q8_1; case GGML_TYPE_Q8_0: return MMQ_MMA_TILE_X_K_Q8_0; + case GGML_TYPE_MXFP4: return MMQ_MMA_TILE_X_K_Q8_1; case GGML_TYPE_Q2_K: return MMQ_MMA_TILE_X_K_Q2_K; case GGML_TYPE_Q3_K: return MMQ_MMA_TILE_X_K_Q3_K; case GGML_TYPE_Q4_K: return MMQ_MMA_TILE_X_K_Q8_1; @@ -215,42 +227,76 @@ static constexpr __host__ __device__ int mmq_get_mma_tile_x_k(ggml_type type) { } } -#define MMQ_TILE_Y_K (WARP_SIZE + WARP_SIZE/QI8_1) +// block_q8_1_mmq has (128 8-bit ints == 32 32-bit ints + 4 32-bit scales) +#define MMQ_TILE_Y_K (MMQ_TILE_NE_K + MMQ_TILE_NE_K/QI8_1) static int mmq_get_granularity_host(const int mmq_x, const int cc) { - return new_mma_available(cc) && mmq_x >= 48 ? 16 : 8; + if (amd_mfma_available(cc)) { + return mmq_x >= 128 ? 32 : 16; + } else if (turing_mma_available(cc) && mmq_x >= 48) { + return 16; + } else { + return 8; + } } -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) +static constexpr __device__ int mmq_get_granularity_device(const int mmq_x) { + return mmq_x >= 128 ? 32 : 16; +} +#elif defined(TURING_MMA_AVAILABLE) static constexpr __device__ int mmq_get_granularity_device(const int mmq_x) { return mmq_x >= 48 ? 16 : 8; } #else -static constexpr __device__ int mmq_get_granularity_device(const int /* mmq_x */) { +static constexpr __device__ int mmq_get_granularity_device(const int /*mmq_x*/) { return 8; } -#endif // NEW_MMA_AVAILABLE +#endif // AMD_MFMA_AVAILABLE + +#if defined(GGML_USE_HIP) +static int mmq_get_nwarps_host(const int cc, const int warp_size) { + return amd_mfma_available(cc) ? 8 : 256/warp_size; +} +#else +static int mmq_get_nwarps_host(const int /*cc*/, const int warp_size) { + return 256/warp_size; +} +#endif // (GGML_USE_HIP) + +static constexpr __device__ int mmq_get_nwarps_device() { +#if defined(AMD_MFMA_AVAILABLE) + return 8; +#else + return 256/ggml_cuda_get_physical_warp_size(); +#endif // AMD_MFMA_AVAILABLE +} // ------------------------------------------------------------ -template static __device__ __forceinline__ void load_tiles_q4_0( +template static __device__ __forceinline__ void load_tiles_q4_0( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - float * x_df = (float *) (x_qs + 2*WARP_SIZE); + float * x_df = (float *) (x_qs + 2*MMQ_TILE_NE_K); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q4_0, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kbx = threadIdx.x / QI4_0; - const int kqsx = threadIdx.x % QI4_0; + constexpr int threads_per_row = MMQ_ITER_K / (4 * QR4_0); + constexpr int nrows = warp_size / threads_per_row; + const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; + const int kbx = txi / QI4_0; + const int kqsx = txi % QI4_0; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { - int i = i0 + threadIdx.y; + for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { + int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); @@ -259,20 +305,21 @@ template static __device__ __forceinlin const block_q4_0 * bxi = (const block_q4_0 *) x + kbx0 + i*stride + kbx; const int qs0 = get_int_b2(bxi->qs, kqsx); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kbx*(2*QI4_0) + kqsx + 0] = __vsubss4((qs0 >> 0) & 0x0F0F0F0F, 0x08080808); x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kbx*(2*QI4_0) + kqsx + QI4_0] = __vsubss4((qs0 >> 4) & 0x0F0F0F0F, 0x08080808); #else - x_qs[i*(WARP_SIZE + 1) + threadIdx.x] = qs0; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(MMQ_TILE_NE_K + 1) + txi] = qs0; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } - const int blocks_per_tile_x_row = WARP_SIZE / QI4_0; + constexpr int blocks_per_tile_x_row = MMQ_TILE_NE_K / QI4_0; + constexpr int rows_per_warp = warp_size / blocks_per_tile_x_row; const int kbxd = threadIdx.x % blocks_per_tile_x_row; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_0) { - int i = i0 + threadIdx.y * QI4_0 + threadIdx.x / blocks_per_tile_x_row; + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { + int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); @@ -280,17 +327,19 @@ template static __device__ __forceinlin const block_q4_0 * bxi = (const block_q4_0 *) x + kbx0 + i*stride + kbxd; -#ifdef NEW_MMA_AVAILABLE - x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = bxi->d; +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = bxi->d; #else - x_df[i*(WARP_SIZE/QI4_0) + i/QI4_0 + kbxd] = bxi->d; -#endif // NEW_MMA_AVAILABLE + x_df[i*(MMQ_TILE_NE_K/QI4_0) + i/QI4_0 + kbxd] = bxi->d; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template +template static __device__ __forceinline__ void vec_dot_q4_0_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q4_0, mmq_y); const int * x_qs = (const int *) x; @@ -299,7 +348,7 @@ static __device__ __forceinline__ void vec_dot_q4_0_q8_1_dp4a( const half2 * y_ds = (const half2 *) y; // #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QR4_0*VDR_Q4_0_Q8_1_MMQ) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QR4_0*VDR_Q4_0_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll @@ -307,7 +356,7 @@ static __device__ __forceinline__ void vec_dot_q4_0_q8_1_dp4a( const int j = j0 + threadIdx.y; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; const int kyqs = QI8_1 * ((k01/2) / (QI8_1/2)) + (k01/2) % (QI8_1/2); @@ -320,32 +369,37 @@ static __device__ __forceinline__ void vec_dot_q4_0_q8_1_dp4a( u[2*l+1] = y_qs[j*MMQ_TILE_Y_K + kyqs + (l + QI4_0)]; } - sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q4_0_q8_1_impl - (&x_qs[i*(WARP_SIZE + 1) + k0/QR4_0], u, - x_df[i*(WARP_SIZE/QI4_0) + i/QI4_0 + k0/(QR4_0*QI4_0)], y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); + sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q4_0_q8_1_impl + (&x_qs[i*(MMQ_TILE_NE_K + 1) + k0/QR4_0], u, + x_df[i*(MMQ_TILE_NE_K/QI4_0) + i/QI4_0 + k0/(QR4_0*QI4_0)], y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } -template static __device__ __forceinline__ void load_tiles_q4_1( +template static __device__ __forceinline__ void load_tiles_q4_1( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - half2 * x_dm = (half2 *) (x_qs + 2*WARP_SIZE); + half2 * x_dm = (half2 *) (x_qs + 2*MMQ_TILE_NE_K); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q4_1, mmq_y); int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kbx = threadIdx.x / QI4_1; - const int kqsx = threadIdx.x % QI4_1; + constexpr int threads_per_row = MMQ_ITER_K / (4 * QR4_1); + constexpr int nrows = warp_size / threads_per_row; + const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; + const int kbx = txi / QI4_1; + const int kqsx = txi % QI4_1; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { - int i = i0 + threadIdx.y; + for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { + int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); @@ -354,20 +408,21 @@ template static __device__ __forceinlin const block_q4_1 * bxi = (const block_q4_1 *) x + kbx0 + i*stride + kbx; const int qs0 = get_int_b4(bxi->qs, kqsx); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kbx*(2*QI4_1) + kqsx + 0] = (qs0 >> 0) & 0x0F0F0F0F; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kbx*(2*QI4_1) + kqsx + QI4_1] = (qs0 >> 4) & 0x0F0F0F0F; #else - x_qs[i*(WARP_SIZE + 1) + threadIdx.x] = qs0; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(MMQ_TILE_NE_K + 1) + txi] = qs0; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } - const int blocks_per_tile_x_row = WARP_SIZE / QI4_1; + constexpr int blocks_per_tile_x_row = MMQ_TILE_NE_K / QI4_1; + constexpr int rows_per_warp = warp_size / blocks_per_tile_x_row; const int kbxd = threadIdx.x % blocks_per_tile_x_row; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_1) { - int i = i0 + threadIdx.y * QI4_1 + threadIdx.x / blocks_per_tile_x_row; + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { + int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); @@ -375,17 +430,19 @@ template static __device__ __forceinlin const block_q4_1 * bxi = (const block_q4_1 *) x + kbx0 + i*stride + kbxd; -#ifdef NEW_MMA_AVAILABLE - x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + kbxd] = bxi->dm; +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + kbxd] = bxi->dm; #else - x_dm[i*(WARP_SIZE/QI4_1) + i/QI4_1 + kbxd] = bxi->dm; -#endif // NEW_MMA_AVAILABLE + x_dm[i*(MMQ_TILE_NE_K/QI4_1) + i/QI4_1 + kbxd] = bxi->dm; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template +template static __device__ __forceinline__ void vec_dot_q4_1_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q4_1, mmq_y); const int * x_qs = (const int *) x; @@ -394,7 +451,7 @@ static __device__ __forceinline__ void vec_dot_q4_1_q8_1_dp4a( const half2 * y_ds = (const half2 *) y; // #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QR4_1*VDR_Q4_1_Q8_1_MMQ) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QR4_1*VDR_Q4_1_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll @@ -402,7 +459,7 @@ static __device__ __forceinline__ void vec_dot_q4_1_q8_1_dp4a( const int j = j0 + threadIdx.y; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; const int kyqs = QI8_1 * ((k01/2) / (QI8_1/2)) + (k01/2) % (QI8_1/2); @@ -415,32 +472,37 @@ static __device__ __forceinline__ void vec_dot_q4_1_q8_1_dp4a( u[2*l+1] = y_qs[j*MMQ_TILE_Y_K + kyqs + (l + QI4_1)]; } - sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q4_1_q8_1_impl - (&x_qs[i*(WARP_SIZE + 1) + k0/QR4_1], u, - x_dm[i*(WARP_SIZE/QI4_1) + i/QI4_1 + k0/(QR4_1*QI4_1)], y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); + sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q4_1_q8_1_impl + (&x_qs[i*(MMQ_TILE_NE_K + 1) + k0/QR4_1], u, + x_dm[i*(MMQ_TILE_NE_K/QI4_1) + i/QI4_1 + k0/(QR4_1*QI4_1)], y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } -template static __device__ __forceinline__ void load_tiles_q5_0( +template static __device__ __forceinline__ void load_tiles_q5_0( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - float * x_df = (float *) (x_qs + WARP_SIZE*2); + float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q5_0, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kbx = threadIdx.x / QI5_0; - const int kqsx = threadIdx.x % QI5_0; + constexpr int threads_per_row = MMQ_ITER_K / (4 * QR5_0); + constexpr int nrows = warp_size / threads_per_row; + const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; + const int kbx = txi / QI5_0; + const int kqsx = txi % QI5_0; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { - int i = i0 + threadIdx.y; + for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { + int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); @@ -449,7 +511,7 @@ template static __device__ __forceinlin const block_q5_0 * bxi = (const block_q5_0 *) x + kbx0 + i*stride + kbx; const int ql = get_int_b2(bxi->qs, kqsx); - const int qh = get_int_b2(bxi->qh, 0) >> (4 * (threadIdx.x % QI5_0)); + const int qh = get_int_b2(bxi->qh, 0) >> (4 * kqsx); int qs0 = (ql >> 0) & 0x0F0F0F0F; qs0 |= (qh << 4) & 0x00000010; // 0 -> 4 @@ -465,21 +527,22 @@ template static __device__ __forceinlin qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 qs1 = __vsubss4(qs1, 0x10101010); // subtract 16 -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kbx*(2*QI5_0) + kqsx + 0] = qs0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kbx*(2*QI5_0) + kqsx + QI5_0] = qs1; #else - x_qs[i*(2*WARP_SIZE + 1) + kbx*(2*QI5_0) + kqsx + 0] = qs0; - x_qs[i*(2*WARP_SIZE + 1) + kbx*(2*QI5_0) + kqsx + QI5_0] = qs1; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + kbx*(2*QI5_0) + kqsx + 0] = qs0; + x_qs[i*(2*MMQ_TILE_NE_K + 1) + kbx*(2*QI5_0) + kqsx + QI5_0] = qs1; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } - const int blocks_per_tile_x_row = WARP_SIZE / QI5_0; + constexpr int blocks_per_tile_x_row = MMQ_TILE_NE_K / QI5_0; + constexpr int rows_per_warp = warp_size / blocks_per_tile_x_row; const int kbxd = threadIdx.x % blocks_per_tile_x_row; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_0) { - int i = i0 + threadIdx.y * QI5_0 + threadIdx.x / blocks_per_tile_x_row; + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { + int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); @@ -487,32 +550,37 @@ template static __device__ __forceinlin const block_q5_0 * bxi = (const block_q5_0 *) x + kbx0 + i*stride + kbxd; -#ifdef NEW_MMA_AVAILABLE - x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = bxi->d; +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = bxi->d; #else - x_df[i*(WARP_SIZE/QI5_0) + i/QI5_0 + kbxd] = bxi->d; -#endif // NEW_MMA_AVAILABLE + x_df[i*(MMQ_TILE_NE_K/QI5_0) + i/QI5_0 + kbxd] = bxi->d; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template static __device__ __forceinline__ void load_tiles_q5_1( +template static __device__ __forceinline__ void load_tiles_q5_1( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - half2 * x_dm = (half2 *) (x_qs + 2*WARP_SIZE); + half2 * x_dm = (half2 *) (x_qs + 2*MMQ_TILE_NE_K); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q5_1, mmq_y); int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kbx = threadIdx.x / QI5_1; - const int kqsx = threadIdx.x % QI5_1; + constexpr int threads_per_row = MMQ_ITER_K / (4 * QR5_1); + constexpr int nrows = warp_size / threads_per_row; + const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; + const int kbx = txi / QI5_1; + const int kqsx = txi % QI5_1; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { - int i = i0 + threadIdx.y; + for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { + int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); @@ -521,7 +589,7 @@ template static __device__ __forceinlin const block_q5_1 * bxi = (const block_q5_1 *) x + kbx0 + i*stride + kbx; const int ql = get_int_b4(bxi->qs, kqsx); - const int qh = get_int_b4(bxi->qh, 0) >> (4 * (threadIdx.x % QI5_1)); + const int qh = get_int_b4(bxi->qh, 0) >> (4 * kqsx); int qs0 = (ql >> 0) & 0x0F0F0F0F; qs0 |= (qh << 4) & 0x00000010; // 0 -> 4 @@ -535,21 +603,22 @@ template static __device__ __forceinlin qs1 |= (qh << 2) & 0x00100000; // 18 -> 20 qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kbx*(2*QI5_1) + kqsx + 0] = qs0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kbx*(2*QI5_1) + kqsx + QI5_1] = qs1; #else - x_qs[i*(2*WARP_SIZE + 1) + kbx*(2*QI5_1) + kqsx + 0] = qs0; - x_qs[i*(2*WARP_SIZE + 1) + kbx*(2*QI5_1) + kqsx + QI5_1] = qs1; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + kbx*(2*QI5_1) + kqsx + 0] = qs0; + x_qs[i*(2*MMQ_TILE_NE_K + 1) + kbx*(2*QI5_1) + kqsx + QI5_1] = qs1; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } - const int blocks_per_tile_x_row = WARP_SIZE / QI5_1; + constexpr int blocks_per_tile_x_row = MMQ_TILE_NE_K / QI5_1; + constexpr int rows_per_warp = warp_size / blocks_per_tile_x_row; const int kbxd = threadIdx.x % blocks_per_tile_x_row; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_1) { - int i = i0 + threadIdx.y * QI5_1 + threadIdx.x / blocks_per_tile_x_row; + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { + int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); @@ -557,32 +626,38 @@ template static __device__ __forceinlin const block_q5_1 * bxi = (const block_q5_1 *) x + kbx0 + i*stride + kbxd; -#ifdef NEW_MMA_AVAILABLE - x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + kbxd] = bxi->dm; +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + kbxd] = bxi->dm; #else - x_dm[i*(WARP_SIZE/QI5_1) + i/QI5_1 + kbxd] = bxi->dm; -#endif // NEW_MMA_AVAILABLE + x_dm[i*(MMQ_TILE_NE_K/QI5_1) + i/QI5_1 + kbxd] = bxi->dm; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template static __device__ __forceinline__ void load_tiles_q8_0( +template static __device__ __forceinline__ void load_tiles_q8_0( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - float * x_df = (float *) (x_tile + 2*WARP_SIZE); + float * x_df = (float *) (x_tile + 2*MMQ_TILE_NE_K); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q8_0, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kbx = threadIdx.x / QI8_0; - const int kqsx = threadIdx.x % QI8_0; + // MMQ_ITER_K / (4 * QR8_0) == 64 required. but NV has only 32 threads per warp + constexpr int threads_per_row = 32; + constexpr int nrows = warp_size / threads_per_row; + const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; + const int kbx = txi / QI8_0; + const int kqsx = txi % QI8_0; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { - int i = i0 + threadIdx.y; + for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { + int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); @@ -590,21 +665,22 @@ template static __device__ __forceinlin const block_q8_0 * bxi = (const block_q8_0 *) x + kbx0 + i*stride + kbx; -#ifdef NEW_MMA_AVAILABLE - x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 0 + threadIdx.x] = get_int_b2(bxi[0].qs, kqsx); - x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + WARP_SIZE + threadIdx.x] = get_int_b2(bxi[WARP_SIZE/QI8_0].qs, kqsx); +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 0 + txi] = get_int_b2(bxi[0].qs, kqsx); + x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + MMQ_TILE_NE_K + txi] = get_int_b2(bxi[MMQ_TILE_NE_K/QI8_0].qs, kqsx); #else - x_qs[i*(2*WARP_SIZE + 1) + 0 + threadIdx.x] = get_int_b2(bxi[0].qs, kqsx); - x_qs[i*(2*WARP_SIZE + 1) + WARP_SIZE + threadIdx.x] = get_int_b2(bxi[WARP_SIZE/QI8_0].qs, kqsx); -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + 0 + txi] = get_int_b2(bxi[0].qs, kqsx); + x_qs[i*(2*MMQ_TILE_NE_K + 1) + MMQ_TILE_NE_K + txi] = get_int_b2(bxi[MMQ_TILE_NE_K/QI8_0].qs, kqsx); +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } - const int blocks_per_tile_x_row = 2*WARP_SIZE / QI8_0; + constexpr int blocks_per_tile_x_row = 2*MMQ_TILE_NE_K / QI8_0; + constexpr int rows_per_warp = warp_size / blocks_per_tile_x_row; const int kbxd = threadIdx.x % blocks_per_tile_x_row; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI8_0/2) { - int i = i0 + threadIdx.y * (QI8_0/2) + threadIdx.x / blocks_per_tile_x_row; + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { + int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); @@ -612,17 +688,84 @@ template static __device__ __forceinlin const block_q8_0 * bxi = (const block_q8_0 *) x + kbx0 + i*stride + kbxd; -#ifdef NEW_MMA_AVAILABLE - x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = bxi->d; +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = bxi->d; #else - x_df[i*(2*WARP_SIZE/QI8_0) + i/(QI8_0/2) + kbxd] = bxi->d; -#endif // NEW_MMA_AVAILABLE + x_df[i*(2*MMQ_TILE_NE_K/QI8_0) + i/(QI8_0/2) + kbxd] = bxi->d; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template +template static __device__ __forceinline__ void load_tiles_mxfp4( + const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); + +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + int * x_qs = (int *) x_tile; + float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); +#else + constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_MXFP4, mmq_y); + int * x_qs = (int *) x_tile; + float * x_df = (float *) (x_qs + txs.qs); +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + + constexpr int threads_per_row = MMQ_ITER_K / (4 * QR_MXFP4); + constexpr int nrows = warp_size / threads_per_row; + const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; + const int kbx = txi / QI_MXFP4; + const int kqsx = txi % QI_MXFP4; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { + int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); + + if (need_check) { + i = min(i, i_max); + } + + const block_mxfp4 * bxi = (const block_mxfp4 *) x + kbx0 + i*stride + kbx; + + const int aux_q4 = get_int_b1(bxi->qs, kqsx); + const int2 v = get_int_from_table_16(aux_q4, kvalues_mxfp4); + const int k0 = kbx * (2 * QI_MXFP4) + kqsx; + +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + k0 + 0] = v.x; + x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + k0 + QI_MXFP4] = v.y; +#else + x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0 + 0] = v.x; + x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0 + QI_MXFP4] = v.y; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + } + + constexpr int blocks_per_tile_x_row = MMQ_TILE_NE_K / QI_MXFP4; + constexpr int rows_per_warp = warp_size / blocks_per_tile_x_row; + const int kbxd = threadIdx.x % blocks_per_tile_x_row; + +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { + int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / blocks_per_tile_x_row; + + if (need_check) { + i = min(i, i_max); + } + + const block_mxfp4 * bxi = (const block_mxfp4 *) x + kbx0 + i*stride + kbxd; + +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_df[i*MMQ_MMA_TILE_X_K_Q8_1 + kbxd] = ggml_cuda_e8m0_to_fp32(bxi->e)*0.5f; +#else + x_df[i*(MMQ_TILE_NE_K/QI_MXFP4) + i/QI_MXFP4 + kbxd] = ggml_cuda_e8m0_to_fp32(bxi->e)*0.5f; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + } +} + +template static __device__ __forceinline__ void vec_dot_q8_0_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q8_0, mmq_y); const int * x_qs = (const int *) x; @@ -631,7 +774,7 @@ static __device__ __forceinline__ void vec_dot_q8_0_q8_1_dp4a( const float * y_df = (const float *) y; // #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += VDR_Q8_0_Q8_1_MMQ) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += VDR_Q8_0_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll @@ -639,21 +782,76 @@ static __device__ __forceinline__ void vec_dot_q8_0_q8_1_dp4a( const int j = j0 + threadIdx.y; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; - sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q8_0_q8_1_impl - (&x_qs[i*(2*WARP_SIZE + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k0 % WARP_SIZE], - x_df[i*(2*WARP_SIZE/QI8_0) + i/(QI8_0/2) + k0/QI8_0], y_df[j*MMQ_TILE_Y_K + (k0/QI8_1) % (WARP_SIZE/QI8_1)]); + sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q8_0_q8_1_impl + (&x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k0 % MMQ_TILE_NE_K], + x_df[i*(2*MMQ_TILE_NE_K/QI8_0) + i/(QI8_0/2) + k0/QI8_0], y_df[j*MMQ_TILE_Y_K + (k0/QI8_1) % (MMQ_TILE_NE_K/QI8_1)]); } } } } -template +template static __device__ __forceinline__ void vec_dot_q8_0_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { +#if defined(AMD_MFMA_AVAILABLE) + typedef tile<16, 8, int> tile_A; + typedef tile<16, 8, int> tile_B; + typedef tile<16, 16, int> tile_C; + constexpr int granularity = mmq_get_granularity_device(mmq_x); + constexpr int rows_per_warp = granularity; + constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. + + y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); + + const int * x_qs = (const int *) x; + const float * x_df = (const float *) x_qs + 2*MMQ_TILE_NE_K; + const int * y_qs = (const int *) y + 4; + const float * y_df = (const float *) y; + const half2 * y_ds = (const half2 *) y; + + const int i0 = (threadIdx.y / ntx) * rows_per_warp; + + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_0) { + const int k0 = k00 + k01; + + tile_A A[ntx]; +#pragma unroll + for (int n = 0; n < ntx; ++n) { + load_generic(A[n], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q8_0 + k0, MMQ_MMA_TILE_X_K_Q8_0); + } + +#pragma unroll + for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { + tile_B B; + load_generic(B, y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); + + float dB; + const int j = j0 + tile_C::get_j(0); + if (ds_layout == MMQ_Q8_1_DS_LAYOUT_D4) { + dB = y_df[j*MMQ_TILE_Y_K + k01/QI8_1]; + } else { + dB = __low2float(y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); + } + +#pragma unroll + for (int n = 0; n < ntx; ++n) { + tile_C C; + mma(C, A[n], B); + +#pragma unroll + for (int l = 0; l < tile_C::ne; ++l) { + const int i = i0 + n*tile_A::I + tile_C::get_i(l); + const float dA = x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + k0/QI8_0]; + sum[(j0/tile_C::J + n)*tile_C::ne + l] += C.x[l]*dA*dB; + } + } + } + } +#else typedef tile<16, 8, int> tile_A; typedef tile< 8, 8, int> tile_B; typedef tile<16, 8, int> tile_C; @@ -662,23 +860,23 @@ static __device__ __forceinline__ void vec_dot_q8_0_q8_1_mma( constexpr int rows_per_warp = 2 * granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. - y += (threadIdx.y % ntx) * (tile_B::I*MMQ_TILE_Y_K); + y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; - const float * x_df = (const float *) x_qs + 2*WARP_SIZE; + const float * x_df = (const float *) x_qs + 2*MMQ_TILE_NE_K; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; const half2 * y_ds = (const half2 *) y; - tile_A A[ntx][WARP_SIZE/QI8_0]; - float dA[ntx][tile_C::ne/2][WARP_SIZE/QI8_0]; + tile_A A[ntx][MMQ_TILE_NE_K/QI8_0]; + float dA[ntx][tile_C::ne/2][MMQ_TILE_NE_K/QI8_0]; const int i0 = (threadIdx.y/ntx)*rows_per_warp; #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QI8_0) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_0) { const int k0 = k00 + k01; load_ldmatrix(A[n][k01/QI8_0], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q8_0 + k0, MMQ_MMA_TILE_X_K_Q8_0); @@ -689,7 +887,7 @@ static __device__ __forceinline__ void vec_dot_q8_0_q8_1_mma( const int i = i0 + n*tile_A::I + tile_C::get_i(2*l); #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QI8_0) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_0) { const int k0 = k00 + k01; dA[n][l][k01/QI8_0] = x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + k0/QI8_0]; @@ -700,7 +898,7 @@ static __device__ __forceinline__ void vec_dot_q8_0_q8_1_mma( #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QI8_0) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_0) { tile_B B; float dB[tile_C::ne/2]; @@ -729,11 +927,14 @@ static __device__ __forceinline__ void vec_dot_q8_0_q8_1_mma( } } } +#endif // defined(AMD_MFMA_AVAILABLE) } -template +template static __device__ __forceinline__ void vec_dot_q8_1_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q5_1, mmq_y); const int * x_qs = (const int *) x; @@ -742,7 +943,7 @@ static __device__ __forceinline__ void vec_dot_q8_1_q8_1_dp4a( const half2 * y_ds = (const half2 *) y; // #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += VDR_Q8_0_Q8_1_MMQ) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += VDR_Q8_0_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll @@ -750,45 +951,95 @@ static __device__ __forceinline__ void vec_dot_q8_1_q8_1_dp4a( const int j = j0 + threadIdx.y; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; - sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q8_1_q8_1_impl - (&x_qs[i*(2*WARP_SIZE + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], - x_dm[i*(WARP_SIZE/QI5_1) + i/QI5_1 + k0/QI8_1], y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); + sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q8_1_q8_1_impl + (&x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], + x_dm[i*(MMQ_TILE_NE_K/QI5_1) + i/QI5_1 + k0/QI8_1], y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } -template +template static __device__ __forceinline__ void vec_dot_q8_1_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { +#if defined(AMD_MFMA_AVAILABLE) + typedef tile<16, 8, int> tile_A; + typedef tile<16, 8, int> tile_B; + typedef tile<16, 16, int> tile_C; - typedef tile<16, 8, int> tile_A; - typedef tile< 8, 8, int> tile_B; - typedef tile<16, 8, int> tile_C; + constexpr int granularity = mmq_get_granularity_device(mmq_x); + constexpr int rows_per_warp = granularity; + constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. + + y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); + + const int * x_qs = (const int *) x; + const half2 * x_dm = (const half2 *) x_qs + 2*MMQ_TILE_NE_K; + const int * y_qs = (const int *) y + 4; + const half2 * y_dm = (const half2 *) y; + + const int i0 = (threadIdx.y / ntx) * rows_per_warp; + + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_1) { + const int k0 = k00 + k01; + + tile_A A[ntx]; +#pragma unroll + for (int n = 0; n < ntx; ++n) { + load_generic(A[n], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q8_1 + k0, MMQ_MMA_TILE_X_K_Q8_1); + } + +#pragma unroll + for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { + tile_B B; + load_generic(B, y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); + + const int j = j0 + tile_C::get_j(0); + const float2 dsB = __half22float2(y_dm[j*MMQ_TILE_Y_K + k01/QI8_1]); + +#pragma unroll + for (int n = 0; n < ntx; ++n) { + tile_C C; + mma(C, A[n], B); + +#pragma unroll + for (int l = 0; l < tile_C::ne; ++l) { + const int i = i0 + n*tile_A::I + tile_C::get_i(l); + float2 dmA = __half22float2(x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + k0/QI8_1]); + sum[(j0/tile_C::J + n)*tile_C::ne + l] += dmA.x*dsB.x*C.x[l]; + sum[(j0/tile_C::J + n)*tile_C::ne + l] += dmA.y*dsB.y; + } + } + } + } +#else + typedef tile<16, 8, int> tile_A; + typedef tile< 8, 8, int> tile_B; + typedef tile<16, 8, int> tile_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = 2 * granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. - y += (threadIdx.y % ntx) * (tile_B::J*MMQ_TILE_Y_K); + y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; - const half2 * x_dm = (const half2 *) x_qs + 2*WARP_SIZE; + const half2 * x_dm = (const half2 *) x_qs + 2*MMQ_TILE_NE_K; const int * y_qs = (const int *) y + 4; const half2 * y_dm = (const half2 *) y; - tile_A A[ntx][WARP_SIZE/QI8_1]; - float2 dmA[ntx][tile_C::ne/2][WARP_SIZE/QI8_1]; + tile_A A[ntx][MMQ_TILE_NE_K/QI8_1]; + float2 dmA[ntx][tile_C::ne/2][MMQ_TILE_NE_K/QI8_1]; const int i0 = (threadIdx.y/ntx)*rows_per_warp; #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QI8_1) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_1) { const int k0 = k00 + k01; load_ldmatrix(A[n][k01/QI8_1], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q8_1 + k0, MMQ_MMA_TILE_X_K_Q8_1); @@ -799,7 +1050,7 @@ static __device__ __forceinline__ void vec_dot_q8_1_q8_1_mma( const int i = i0 + n*tile_A::I + tile_C::get_i(2*l); #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QI8_1) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_1) { const int k0 = k00 + k01; dmA[n][l][k01/QI8_1] = __half22float2(x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + k0/QI8_1]); @@ -810,7 +1061,7 @@ static __device__ __forceinline__ void vec_dot_q8_1_q8_1_mma( #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QI8_1) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_1) { tile_B B; float2 dsB[tile_C::ne/2]; @@ -836,11 +1087,15 @@ static __device__ __forceinline__ void vec_dot_q8_1_q8_1_mma( } } } +#endif // defined(AMD_MFMA_AVAILABLE) } -template +// Used for Q3_K, IQ2_S, and IQ2_XS +template static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = MMQ_DP4A_TXS_Q8_0_16; const int * x_qs = (const int *) x; @@ -849,7 +1104,7 @@ static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_dp4a( const float * y_df = (const float *) y; // #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QI8_0) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_0) { const int k0 = k00 + k01; #pragma unroll @@ -857,23 +1112,73 @@ static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_dp4a( const int j = j0 + threadIdx.y; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; - sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q8_0_16_q8_1_impl( - &x_qs[i*(2*WARP_SIZE + 1) + k0], + sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q8_0_16_q8_1_impl( + &x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], - &x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + k0/(QI8_0/2)], + &x_df[i*(2*MMQ_TILE_NE_K*2/QI8_0) + i/(QI8_0/4) + k0/(QI8_0/2)], y_df[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } -template +// Used for Q3_K, IQ2_S, and IQ2_XS: +template static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) + typedef tile<16, 8, int> tile_A; + typedef tile<16, 8, int> tile_B; + typedef tile<16, 16, int> tile_C; + typedef tile<64, 2, int> tile_load; + + constexpr int granularity = mmq_get_granularity_device(mmq_x); + constexpr int rows_per_warp = granularity; + constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. + + y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); + + const int * x_qs = (const int *) x; + const float * x_df = (const float *) x_qs + MMQ_TILE_NE_K*2; + const int * y_qs = (const int *) y + 4; + const float * y_df = (const float *) y; + + const int i0 = (threadIdx.y / ntx) * rows_per_warp; + + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 4) { + const int k0 = k00 + k01; + + tile_A A[ntx]; +#pragma unroll + for (int n = 0; n < ntx; ++n) { + load_generic(((tile_load *) A)[n], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q3_K + k0, MMQ_MMA_TILE_X_K_Q3_K); + } + +#pragma unroll + for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { + tile_B B[1]; + load_generic(((tile_load *) B)[0], y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); + + const int j = j0 + tile_C::get_j(0); + const float dB = y_df[j*MMQ_TILE_Y_K + k01/QI8_1] / 2; + +#pragma unroll + for (int n = 0; n < ntx; ++n) { + tile_C C; + mma(C, A[n], B[0]); + +#pragma unroll + for (int l = 0; l < tile_C::ne; ++l) { + const int i = i0 + n*tile_C::I + tile_C::get_i(l); + sum[(j0/tile_C::J + n)*tile_C::ne + l] += C.x[l] * x_df[i*MMQ_MMA_TILE_X_K_Q3_K + k0/4] * dB; + } + } + } + } +#elif defined(TURING_MMA_AVAILABLE) typedef tile<16, 4, int> tile_A; typedef tile<16, 8, int> tile_A_8; @@ -884,10 +1189,10 @@ static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_mma( constexpr int rows_per_warp = 2 * granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. - y += (threadIdx.y % ntx) * (tile_B::I*MMQ_TILE_Y_K); + y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; - const float * x_df = (const float *) x_qs + WARP_SIZE*2; + const float * x_df = (const float *) x_qs + MMQ_TILE_NE_K*2; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; @@ -899,7 +1204,7 @@ static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_mma( #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += 8) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 8) { const int k0 = k00 + k01; load_ldmatrix(((tile_A_8 *) A[n])[k01/8], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q3_K + k0, MMQ_MMA_TILE_X_K_Q3_K); @@ -910,7 +1215,7 @@ static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_mma( const int i = i0 + n*tile_C::I + tile_C::get_i(2*l); #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += 4) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 4) { const int k0 = k00 + k01; dA[n][l][k01/4] = x_df[i*MMQ_MMA_TILE_X_K_Q3_K + k0/4]; @@ -921,7 +1226,7 @@ static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_mma( #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QR3_K*VDR_Q3_K_Q8_1_MMQ) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QR3_K*VDR_Q3_K_Q8_1_MMQ) { tile_B B[2]; float dB[tile_C::ne/2]; @@ -952,26 +1257,29 @@ static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_mma( #else GGML_UNUSED(x); GGML_UNUSED(y); GGML_UNUSED(sum); GGML_UNUSED(k00); NO_DEVICE_CODE; -#endif // NEW_MMA_AVAILABLE +#endif // AMD_MFMA_AVAILABLE } -template static __device__ __forceinline__ void load_tiles_q2_K( +template static __device__ __forceinline__ void load_tiles_q2_K( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - half2 * x_dm = (half2 *) (x_qs + 2*WARP_SIZE); + half2 * x_dm = (half2 *) (x_qs + 2*MMQ_TILE_NE_K); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q2_K, mmq_y); int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kqsx = threadIdx.x % QI2_K; + constexpr int threads_per_row = MMQ_ITER_K / (4 * QR2_K); + constexpr int nrows = ggml_cuda_get_physical_warp_size() / threads_per_row; + const int kqsx = threadIdx.x % threads_per_row; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * WARP_SIZE/QI2_K) { - int i = i0 + threadIdx.y*(WARP_SIZE/QI2_K) + threadIdx.x/QI2_K; + for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { + int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); @@ -987,11 +1295,11 @@ template static __device__ __forceinlin const int x_qs_k = (x_ql_0 >> (2*l)) & 0x03030303; -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q2_K + k] = x_qs_k; #else - x_qs[i*(2*WARP_SIZE + 1) + k] = x_qs_k; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + k] = x_qs_k; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } const int sc_m = bxi->scales[kqsx]; @@ -1002,17 +1310,19 @@ template static __device__ __forceinlin const half2 x_dm_ik = make_half2(bxi_dmf.x*(sc_m & 0x0F), bxi_dmf.y*(sc_m >> 4)); #endif // FAST_FP16_AVAILABLE -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_dm[i*MMQ_MMA_TILE_X_K_Q2_K + kqsx] = x_dm_ik; #else - x_dm[i*(WARP_SIZE + 1) + kqsx] = x_dm_ik; -#endif // NEW_MMA_AVAILABLE + x_dm[i*(MMQ_TILE_NE_K + 1) + kqsx] = x_dm_ik; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template +template static __device__ __forceinline__ void vec_dot_q2_K_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q2_K, mmq_y); const int * x_qs = (const int *) x; @@ -1029,7 +1339,7 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_dp4a( } #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE/2; k01 += QR2_K*VDR_Q2_K_Q8_1_MMQ) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K/2; k01 += QR2_K*VDR_Q2_K_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll @@ -1037,13 +1347,13 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_dp4a( const int j = j0 + threadIdx.y; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; constexpr int ns = 2; - sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q2_K_q8_1_impl_mmq( - &x_qs[i*(2*WARP_SIZE + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], - &x_dm[i*(WARP_SIZE + 1) + k0/4], k01 < WARP_SIZE/2 ? y_df[j0/nwarps].x : y_df[j0/nwarps].y, + sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q2_K_q8_1_impl_mmq( + &x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], + &x_dm[i*(MMQ_TILE_NE_K + 1) + k0/4], k01 < MMQ_TILE_NE_K/2 ? y_df[j0/nwarps].x : y_df[j0/nwarps].y, &y_ds[j*MMQ_TILE_Y_K + (1 + k01/QI8_1)]); } } @@ -1052,7 +1362,7 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_dp4a( // Some compilers fail to unroll the loop over k01 if there is a conditional statement for ns in the inner loop. // As a workaround 2 separate loops are used instead. #pragma unroll - for (int k01 = WARP_SIZE/2; k01 < WARP_SIZE; k01 += QR2_K*VDR_Q2_K_Q8_1_MMQ) { + for (int k01 = MMQ_TILE_NE_K/2; k01 < MMQ_TILE_NE_K; k01 += QR2_K*VDR_Q2_K_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll @@ -1060,23 +1370,89 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_dp4a( const int j = j0 + threadIdx.y; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; constexpr int ns = 1; - sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q2_K_q8_1_impl_mmq( - &x_qs[i*(2*WARP_SIZE + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], - &x_dm[i*(WARP_SIZE + 1) + k0/4], k01 < WARP_SIZE/2 ? y_df[j0/nwarps].x : y_df[j0/nwarps].y, + sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q2_K_q8_1_impl_mmq( + &x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], + &x_dm[i*(MMQ_TILE_NE_K + 1) + k0/4], k01 < MMQ_TILE_NE_K/2 ? y_df[j0/nwarps].x : y_df[j0/nwarps].y, &y_ds[j*MMQ_TILE_Y_K + (1 + k01/QI8_1)]); } } } } -template +template static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) + typedef tile<16, 8, int> tile_A; + typedef tile<16, 8, int> tile_B; + typedef tile<16, 16, int> tile_C; + typedef tile<64, 2, int> tile_load; + + constexpr int granularity = mmq_get_granularity_device(mmq_x); + constexpr int rows_per_warp = granularity; + constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. + + y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); + + const int * x_qs = (const int *) x; + const half2 * x_dm = (const half2 *) x_qs + MMQ_TILE_NE_K*2; + const int * y_qs = (const int *) y + 4; + const half2 * y_ds = (const half2 *) y; + + const int i0 = (threadIdx.y / ntx) * rows_per_warp; + + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 4) { + const int k0 = k00 + k01; + + tile_A A[ntx]; +#pragma unroll + for (int n = 0; n < ntx; ++n) { + load_generic(((tile_load *) A)[n], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q2_K + k0, MMQ_MMA_TILE_X_K_Q2_K); + } + +#pragma unroll + for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { + tile_B B[1]; + load_generic(((tile_load *) B)[0], y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); + + const int j = j0 + tile_C::get_j(0); + const float dB = (k01 < MMQ_TILE_NE_K/2) ? __half22float2(y_ds[j*MMQ_TILE_Y_K]).x/2 : __half22float2(y_ds[j*MMQ_TILE_Y_K]).y/2; + const float sB = (k01 >= MMQ_TILE_NE_K * 3/4) ? 0 + : (((k01/4)%2) ? __half22float2(y_ds[j*MMQ_TILE_Y_K + (1 + k01/QI8_1)]).y + : __half22float2(y_ds[j*MMQ_TILE_Y_K + (1 + k01/QI8_1)]).x); + + tile_C Cm; + if (k01 >= MMQ_TILE_NE_K * 3/4) { + tile_A A1; + A1.x[0] = 0x01010101; + A1.x[1] = 0x01010101; + mma(Cm, A1, B[0]); + } + +#pragma unroll + for (int n = 0; n < ntx; ++n) { + tile_C Cd; + mma(Cd, A[n], B[0]); + +#pragma unroll + for (int l = 0; l < tile_C::ne; ++l) { + const int i = i0 + n*tile_C::I + tile_C::get_i(l); + const float2 dm = __half22float2(x_dm[i*MMQ_MMA_TILE_X_K_Q2_K + k0/4]); + float tmp = Cd.x[l]*dm.x; + if (k01 >= MMQ_TILE_NE_K * 3/4) { + tmp -= Cm.x[l]*dm.y; + } + sum[(j0/tile_C::J + n)*tile_C::ne + l] += tmp*dB; + sum[(j0/tile_C::J + n)*tile_C::ne + l] -= dm.y*sB; + } + } + } + } +#elif defined(TURING_MMA_AVAILABLE) typedef tile<16, 4, int> tile_A; typedef tile<16, 8, int> tile_A_8; @@ -1087,10 +1463,10 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mma( constexpr int rows_per_warp = 2 * granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. - y += (threadIdx.y % ntx) * (tile_B::I*MMQ_TILE_Y_K); + y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; - const half2 * x_dm = (const half2 *) x_qs + WARP_SIZE*2; + const half2 * x_dm = (const half2 *) x_qs + MMQ_TILE_NE_K*2; const int * y_qs = (const int *) y + 4; const half2 * y_ds = (const half2 *) y; @@ -1103,7 +1479,7 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mma( #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QI8_1) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_1) { const int k0 = k00 + k01; load_ldmatrix(((tile_A_8 *) A[n])[k01/QI8_1], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q2_K + k0, MMQ_MMA_TILE_X_K_Q2_K); @@ -1117,7 +1493,7 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mma( const int i = i0 + n*tile_C::I + tile_C::get_i(2*l); #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QI8_1/2) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_1/2) { const int k0 = k00 + k01; const float2 dm = __half22float2(x_dm[i*MMQ_MMA_TILE_X_K_Q2_K + k0/(QI8_1/2)]); @@ -1140,7 +1516,7 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mma( } #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QI8_1) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QI8_1) { tile_B B[2]; // Here load_generic is faster than load_ldmatrix. @@ -1148,7 +1524,7 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mma( load_generic(B[1], y_qs + j0*MMQ_TILE_Y_K + (k01 + tile_B::J), MMQ_TILE_Y_K); tile_C Cm[2]; - if (k01 >= WARP_SIZE * 3/4) { + if (k01 >= MMQ_TILE_NE_K * 3/4) { tile_A A1; A1.x[0] = 0x01010101; A1.x[1] = 0x01010101; @@ -1166,16 +1542,16 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mma( #pragma unroll for (int l = 0; l < tile_C::ne; ++l) { float tmp = Cd[0].x[l]*dA[n][l/2][k01/4 + 0] + Cd[1].x[l]*dA[n][l/2][k01/4 + 1]; - if (k01 >= WARP_SIZE * 3/4) { + if (k01 >= MMQ_TILE_NE_K * 3/4) { tmp -= Cm[0].x[l]*mA[n][l/2][k01/4 + 0] + Cm[1].x[l]*mA[n][l/2][k01/4 + 1]; } - sum[(j0/tile_C::J + n)*tile_C::ne + l] += tmp*(k01 < WARP_SIZE/2 ? dB[l%2].x : dB[l%2].y); + sum[(j0/tile_C::J + n)*tile_C::ne + l] += tmp*(k01 < MMQ_TILE_NE_K/2 ? dB[l%2].x : dB[l%2].y); } } } #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE * 3/4; k01 += QI8_1) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K * 3/4; k01 += QI8_1) { float2 sB[tile_C::ne/2]; #pragma unroll @@ -1198,27 +1574,31 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mma( #else GGML_UNUSED(x); GGML_UNUSED(y); GGML_UNUSED(sum); GGML_UNUSED(k00); NO_DEVICE_CODE; -#endif // NEW_MMA_AVAILABLE +#endif // AMD_MFMA_AVAILABLE } -template static __device__ __forceinline__ void load_tiles_q3_K( +template static __device__ __forceinline__ void load_tiles_q3_K( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - float * x_df = (float *) (x_qs + WARP_SIZE*2); + float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q3_K, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); int * x_sc = (int *) (x_df + txs.dm); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kqsx = threadIdx.x % QI3_K; + constexpr int threads_per_row = MMQ_ITER_K / (4 * QR3_K); + constexpr int nrows = warp_size / threads_per_row; + const int kqsx = threadIdx.x % threads_per_row; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * WARP_SIZE/QI3_K) { - int i = i0 + threadIdx.y * (WARP_SIZE/QI3_K) + threadIdx.x / QI3_K; + for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { + int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); @@ -1238,17 +1618,18 @@ template static __device__ __forceinlin const int x_qs_k = __vsubss4(x_ql_k | x_qh_k, 0x04040404); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + k] = x_qs_k; #else - x_qs[i*(2*WARP_SIZE + 1) + k] = x_qs_k; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + k] = x_qs_k; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } + constexpr int rows_per_warp = warp_size / 4; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps*8) { - int i = i0 + threadIdx.y*8 + threadIdx.x/(WARP_SIZE/8); + for (int i0 = 0; i0 < mmq_y; i0 += nwarps*rows_per_warp) { + int i = i0 + threadIdx.y*rows_per_warp + threadIdx.x/4; if (need_check) { i = min(i, i_max); @@ -1256,7 +1637,7 @@ template static __device__ __forceinlin const block_q3_K * bxi = (const block_q3_K *) x + kbx0 + i*stride; - const int ksc = threadIdx.x % (WARP_SIZE/8); + const int ksc = threadIdx.x % 4; const int ksc_low = ksc % (QI3_K/8); const int shift_low = 4 * (ksc / (QI3_K/8)); @@ -1268,23 +1649,23 @@ template static __device__ __forceinlin const int sc = __vsubss4(sc_low | sc_high, 0x20202020); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) const int8_t * sc8 = (const int8_t *) ≻ const float d = bxi->d; #pragma unroll for (int l = 0; l < int(sizeof(int)); ++l) { - x_df[i*MMQ_MMA_TILE_X_K_Q3_K + sizeof(int)*(threadIdx.x % (WARP_SIZE/8)) + l] = d*sc8[l]; + x_df[i*MMQ_MMA_TILE_X_K_Q3_K + sizeof(int)*ksc + l] = d*sc8[l]; } #else - x_sc[i*(WARP_SIZE/8) + i/8 + threadIdx.x % (WARP_SIZE/8)] = sc; -#endif // NEW_MMA_AVAILABLE + x_sc[i*(MMQ_TILE_NE_K/8) + i/8 + ksc] = sc; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } -#ifndef NEW_MMA_AVAILABLE +#if !(defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE)) #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps*WARP_SIZE) { - int i = (i0 + threadIdx.y*WARP_SIZE + threadIdx.x) % mmq_y; + for (int i0 = 0; i0 < mmq_y; i0 += nwarps*warp_size) { + int i = (i0 + threadIdx.y*warp_size + threadIdx.x) % mmq_y; if (need_check) { i = min(i, i_max); @@ -1294,12 +1675,14 @@ template static __device__ __forceinlin x_df[i] = bxi->d; } -#endif // NEW_MMA_AVAILABLE +#endif // !(defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE)) } -template +template static __device__ __forceinline__ void vec_dot_q3_K_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q3_K, mmq_y); const int * x_qs = (const int *) x; @@ -1309,7 +1692,7 @@ static __device__ __forceinline__ void vec_dot_q3_K_q8_1_dp4a( const float * y_df = (const float *) y; // #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QR3_K*VDR_Q3_K_Q8_1_MMQ) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QR3_K*VDR_Q3_K_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll @@ -1317,13 +1700,13 @@ static __device__ __forceinline__ void vec_dot_q3_K_q8_1_dp4a( const int j = j0 + threadIdx.y; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; - const int8_t * scales = ((const int8_t *) (x_sc + i*(WARP_SIZE/8) + i/8)) + k0/4; + const int8_t * scales = ((const int8_t *) (x_sc + i*(MMQ_TILE_NE_K/8) + i/8)) + k0/4; - sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q3_K_q8_1_impl_mmq( - &x_qs[i*(2*WARP_SIZE + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], scales, + sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q3_K_q8_1_impl_mmq( + &x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], scales, x_df[i], y_df[j*MMQ_TILE_Y_K + k01/QI8_1]); } } @@ -1340,72 +1723,85 @@ static __device__ __forceinline__ int unpack_scales_q45_K(const int * scales, co ((scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030); // upper 2 bits } -template static __device__ __forceinline__ void load_tiles_q4_K( +template static __device__ __forceinline__ void load_tiles_q4_K( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - half2 * x_dm = (half2 *) (x_qs + 2*WARP_SIZE); + half2 * x_dm = (half2 *) (x_qs + 2*MMQ_TILE_NE_K); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q4_K, mmq_y); int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); int * x_sc = (int *) (x_dm + txs.dm); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + + constexpr int threads_per_row = MMQ_ITER_K / (4 * QR4_K); + constexpr int nrows = warp_size / threads_per_row; + const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { - int i = i0 + threadIdx.y; + for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { + int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); } const block_q4_K * bxi = (const block_q4_K *) x + kbx0 + i*stride; - const int qs0 = get_int_b4(bxi->qs, threadIdx.x); + const int qs0 = get_int_b4(bxi->qs, txi); -#ifdef NEW_MMA_AVAILABLE - x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + 16*(threadIdx.x/8) + threadIdx.x % 8 + 0] = (qs0 >> 0) & 0x0F0F0F0F; - x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + 16*(threadIdx.x/8) + threadIdx.x % 8 + 8] = (qs0 >> 4) & 0x0F0F0F0F; +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + 16*(txi/8) + txi % 8 + 0] = (qs0 >> 0) & 0x0F0F0F0F; + x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + 16*(txi/8) + txi % 8 + 8] = (qs0 >> 4) & 0x0F0F0F0F; #else - x_qs[i*(WARP_SIZE + 1) + threadIdx.x] = qs0; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(MMQ_TILE_NE_K + 1) + txi] = qs0; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } -#ifdef NEW_MMA_AVAILABLE - +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + constexpr int rows_per_warp = warp_size / 2; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps*16) { - int i = (i0 + threadIdx.y*16 + threadIdx.x/(WARP_SIZE/16)) % mmq_y; + for (int i0 = 0; i0 < mmq_y; i0 += nwarps*rows_per_warp) { +#if defined(AMD_MFMA_AVAILABLE) + // Need if on AMD instead of % because warp_size == 64 + // This causes double work and throughput loss (MI300X) + // H100 loses about 100 t/s with 'if' condition over '%' + int i = i0 + threadIdx.y*rows_per_warp + threadIdx.x/2; + if (i < mmq_y) { +#else + int i = (i0 + threadIdx.y*rows_per_warp + threadIdx.x/2) % mmq_y; + { +#endif // defined(AMD_MFMA_AVAILABLE) + if (need_check) { + i = min(i, i_max); + } - if (need_check) { - i = min(i, i_max); - } + const block_q4_K * bxi = (const block_q4_K *) x + kbx0 + i*stride; - const block_q4_K * bxi = (const block_q4_K *) x + kbx0 + i*stride; + const int * scales = (const int *) bxi->scales; + const int ksc = threadIdx.x % 2; - const int * scales = (const int *) bxi->scales; - const int ksc = threadIdx.x % (WARP_SIZE/16); + const int sc32 = unpack_scales_q45_K(scales, ksc + 0); + const int m32 = unpack_scales_q45_K(scales, ksc + 2); - const int sc32 = unpack_scales_q45_K(scales, ksc + 0); - const int m32 = unpack_scales_q45_K(scales, ksc + 2); + const uint8_t * sc8 = (const uint8_t *) &sc32; + const uint8_t * m8 = (const uint8_t *) &m32; - const uint8_t * sc8 = (const uint8_t *) &sc32; - const uint8_t * m8 = (const uint8_t *) &m32; + const half2 dm = bxi->dm * make_half2(1.0f, -1.0f); - const half2 dm = bxi->dm * make_half2(1.0f, -1.0f); - -#pragma unroll - for (int l = 0; l < int(sizeof(int)); ++l) { - x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + sizeof(int)*ksc + l] = dm*make_half2(sc8[l], m8[l]); + #pragma unroll + for (int l = 0; l < sizeof(int); ++l) { + x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + sizeof(int)*ksc + l] = dm*make_half2(sc8[l], m8[l]); + } } } - #else - #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps*QI4_K) { - int i = (i0 + threadIdx.y*QI4_K + threadIdx.x) % mmq_y; + for (int i0 = 0; i0 < mmq_y; i0 += nwarps*warp_size) { + int i = (i0 + threadIdx.y*warp_size + threadIdx.x) % mmq_y; if (need_check) { i = min(i, i_max); @@ -1415,30 +1811,32 @@ template static __device__ __forceinlin x_dm[i] = bxi->dm; } - + constexpr int rows_per_warp = warp_size / 4; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { - int i = (i0 + threadIdx.y * 8 + threadIdx.x / (WARP_SIZE/8)) % mmq_y; + for (int i0 = 0; i0 < mmq_y; i0 += nwarps*rows_per_warp) { + int i = (i0 + threadIdx.y*rows_per_warp + threadIdx.x/(MMQ_TILE_NE_K/8)) % mmq_y; if (need_check) { i = min(i, i_max); } - const block_q4_K * bxi = (const block_q4_K *) x + kbx0 + i*stride + (threadIdx.x % (WARP_SIZE/8)) / (QI4_K/8); + const block_q4_K * bxi = (const block_q4_K *) x + kbx0 + i*stride + (threadIdx.x % (MMQ_TILE_NE_K/8)) / (QI4_K/8); const int * scales = (const int *) bxi->scales; - const int ksc = threadIdx.x % (WARP_SIZE/8); + const int ksc = threadIdx.x % (MMQ_TILE_NE_K/8); const int scales8 = unpack_scales_q45_K(scales, ksc); - x_sc[i*(WARP_SIZE/8) + i/8 + ksc] = scales8; + x_sc[i*(MMQ_TILE_NE_K/8) + i/8 + ksc] = scales8; } -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } -template +template static __device__ __forceinline__ void vec_dot_q4_K_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q4_K, mmq_y); const int * x_qs = (const int *) x; @@ -1448,7 +1846,7 @@ static __device__ __forceinline__ void vec_dot_q4_K_q8_1_dp4a( const half2 * y_ds = (const half2 *) y; // #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QR4_K*VDR_Q4_K_Q8_1_MMQ) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QR4_K*VDR_Q4_K_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll @@ -1456,97 +1854,110 @@ static __device__ __forceinline__ void vec_dot_q4_K_q8_1_dp4a( const int j = j0 + threadIdx.y; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; - const uint8_t * sc = (const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k0/32] + 2*(k01/16); + const uint8_t * sc = (const uint8_t *) &x_sc[i * (MMQ_TILE_NE_K/8) + i/8 + k0/32] + 2*(k01/16); - sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q4_K_q8_1_impl_mmq( - &x_qs[i*(WARP_SIZE + 1) + k0/2], &y_qs[j*MMQ_TILE_Y_K + k01], sc, sc+8, + sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q4_K_q8_1_impl_mmq( + &x_qs[i*(MMQ_TILE_NE_K + 1) + k0/2], &y_qs[j*MMQ_TILE_Y_K + k01], sc, sc+8, x_dm[i], &y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } -template static __device__ __forceinline__ void load_tiles_q5_K( +template static __device__ __forceinline__ void load_tiles_q5_K( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - half2 * x_dm = (half2 *) (x_qs + WARP_SIZE*2); + half2 * x_dm = (half2 *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q5_K, mmq_y); int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); int * x_sc = (int *) (x_dm + txs.dm); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + + constexpr int threads_per_row = MMQ_ITER_K / (4 * QR5_K); + constexpr int nrows = warp_size / threads_per_row; + const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { - int i = i0 + threadIdx.y; + for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { + int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); } const block_q5_K * bxi = (const block_q5_K *) x + kbx0 + i*stride; - const int ky = QR5_K*threadIdx.x; + const int ky = QR5_K*txi; - const int ql = get_int_b4(bxi->qs, threadIdx.x); + const int ql = get_int_b4(bxi->qs, txi); const int ql0 = (ql >> 0) & 0x0F0F0F0F; const int ql1 = (ql >> 4) & 0x0F0F0F0F; - const int qh = get_int_b4(bxi->qh, threadIdx.x % (QI5_K/4)); - const int qh0 = ((qh >> (2 * (threadIdx.x / (QI5_K/4)) + 0)) << 4) & 0x10101010; - const int qh1 = ((qh >> (2 * (threadIdx.x / (QI5_K/4)) + 1)) << 4) & 0x10101010; + const int qh = get_int_b4(bxi->qh, txi % (QI5_K/4)); + const int qh0 = ((qh >> (2 * (txi / (QI5_K/4)) + 0)) << 4) & 0x10101010; + const int qh1 = ((qh >> (2 * (txi / (QI5_K/4)) + 1)) << 4) & 0x10101010; - const int kq0 = ky - ky % (QI5_K/2) + threadIdx.x % (QI5_K/4) + 0; - const int kq1 = ky - ky % (QI5_K/2) + threadIdx.x % (QI5_K/4) + QI5_K/4; + const int kq0 = ky - ky % (QI5_K/2) + txi % (QI5_K/4) + 0; + const int kq1 = ky - ky % (QI5_K/2) + txi % (QI5_K/4) + QI5_K/4; -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kq0] = ql0 | qh0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kq1] = ql1 | qh1; #else - x_qs[i*(2*WARP_SIZE + 1) + kq0] = ql0 | qh0; - x_qs[i*(2*WARP_SIZE + 1) + kq1] = ql1 | qh1; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + kq0] = ql0 | qh0; + x_qs[i*(2*MMQ_TILE_NE_K + 1) + kq1] = ql1 | qh1; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } -#ifdef NEW_MMA_AVAILABLE - +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + constexpr int rows_per_warp = warp_size / 2; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps*16) { - int i = (i0 + threadIdx.y*16 + threadIdx.x/(WARP_SIZE/16)) % mmq_y; - - if (need_check) { - i = min(i, i_max); - } - - const block_q5_K * bxi = (const block_q5_K *) x + kbx0 + i*stride; - - const int * scales = (const int *) bxi->scales; - const int ksc = threadIdx.x % (WARP_SIZE/16); - - const int sc32 = unpack_scales_q45_K(scales, ksc + 0); - const int m32 = unpack_scales_q45_K(scales, ksc + 2); - - const uint8_t * sc8 = (const uint8_t *) &sc32; - const uint8_t * m8 = (const uint8_t *) &m32; - - const half2 dm = bxi->dm * make_half2(1.0f, -1.0f); - -#pragma unroll - for (int l = 0; l < int(sizeof(int)); ++l) { - x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + sizeof(int)*ksc + l] = dm*make_half2(sc8[l], m8[l]); - } - } - + for (int i0 = 0; i0 < mmq_y; i0 += nwarps*rows_per_warp) { +#if defined(AMD_MFMA_AVAILABLE) + // Need if on AMD instead of % because warp_size == 64 + // This causes double work and throughput loss (MI300X) + // H100 loses about 100 t/s with 'if' condition over '%' + int i = i0 + threadIdx.y*rows_per_warp + threadIdx.x/2; + if (i < mmq_y) { #else + int i = (i0 + threadIdx.y*rows_per_warp + threadIdx.x/2) % mmq_y; + { +#endif // defined(AMD_MFMA_AVAILABLE) + if (need_check) { + i = min(i, i_max); + } + + const block_q5_K * bxi = (const block_q5_K *) x + kbx0 + i*stride; + + const int * scales = (const int *) bxi->scales; + const int ksc = threadIdx.x % 2; + + const int sc32 = unpack_scales_q45_K(scales, ksc + 0); + const int m32 = unpack_scales_q45_K(scales, ksc + 2); + + const uint8_t * sc8 = (const uint8_t *) &sc32; + const uint8_t * m8 = (const uint8_t *) &m32; + + const half2 dm = bxi->dm * make_half2(1.0f, -1.0f); #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps*QI5_K) { - int i = (i0 + threadIdx.y*QI5_K + threadIdx.x) % mmq_y; + for (int l = 0; l < int(sizeof(int)); ++l) { + x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + sizeof(int)*ksc + l] = dm*make_half2(sc8[l], m8[l]); + } + } + } +#else +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += nwarps*warp_size) { + int i = (i0 + threadIdx.y*warp_size + threadIdx.x) % mmq_y; if (need_check) { i = min(i, i_max); @@ -1557,9 +1968,10 @@ template static __device__ __forceinlin x_dm[i] = bxi->dm; } + constexpr int rows_per_warp = warp_size / 4; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps*8) { - int i = (i0 + threadIdx.y*8 + threadIdx.x/(WARP_SIZE/8)) % mmq_y; + for (int i0 = 0; i0 < mmq_y; i0 += nwarps*rows_per_warp) { + int i = (i0 + threadIdx.y*rows_per_warp + threadIdx.x/(MMQ_TILE_NE_K/8)) % mmq_y; if (need_check) { i = min(i, i_max); @@ -1569,17 +1981,19 @@ template static __device__ __forceinlin const int * scales = (const int *) bxi->scales; - const int ksc = threadIdx.x % (WARP_SIZE/8); + const int ksc = threadIdx.x % (MMQ_TILE_NE_K/8); const int scales8 = unpack_scales_q45_K(scales, ksc); - x_sc[i*(WARP_SIZE/8) + i/8 + ksc] = scales8; + x_sc[i*(MMQ_TILE_NE_K/8) + i/8 + ksc] = scales8; } -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } -template +template static __device__ __forceinline__ void vec_dot_q5_K_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q5_K, mmq_y); const int * x_qs = (const int *) x; @@ -1589,7 +2003,7 @@ static __device__ __forceinline__ void vec_dot_q5_K_q8_1_dp4a( const half2 * y_ds = (const half2 *) y; // #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QR5_K*VDR_Q5_K_Q8_1_MMQ) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QR5_K*VDR_Q5_K_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll @@ -1597,36 +2011,42 @@ static __device__ __forceinline__ void vec_dot_q5_K_q8_1_dp4a( const int j = j0 + threadIdx.y; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; - const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k00/32]) + 2*(k01/16); + const uint8_t * sc = ((const uint8_t *) &x_sc[i * (MMQ_TILE_NE_K/8) + i/8 + k00/32]) + 2*(k01/16); - sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q5_K_q8_1_impl_mmq( - &x_qs[i*(QR5_K*WARP_SIZE + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], sc, sc+8, + sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q5_K_q8_1_impl_mmq( + &x_qs[i*(QR5_K*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], sc, sc+8, x_dm[i], &y_ds[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } -template static __device__ __forceinline__ void load_tiles_q6_K( +template static __device__ __forceinline__ void load_tiles_q6_K( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - float * x_df = (float *) (x_qs + WARP_SIZE*2); - int * x_sc = (int *) (x_df + WARP_SIZE/QI6_K); + float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); + int * x_sc = (int *) (x_df + MMQ_TILE_NE_K/QI6_K); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q6_K, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); int * x_sc = (int *) (x_df + txs.dm); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + + constexpr int threads_per_row = MMQ_ITER_K / (4 * QR6_K); + constexpr int nrows = warp_size / threads_per_row; + const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { - int i = i0 + threadIdx.y; + for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { + int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); @@ -1634,67 +2054,67 @@ template static __device__ __forceinlin const block_q6_K * bxi = (const block_q6_K *) x + kbx0 + i*stride; - const int ql = get_int_b2(bxi->ql, threadIdx.x); + const int ql = get_int_b2(bxi->ql, txi); const int ql0 = (ql >> 0) & 0x0F0F0F0F; const int ql1 = (ql >> 4) & 0x0F0F0F0F; - const int qh = get_int_b2(bxi->qh, (QI6_K/4) * (threadIdx.x / (QI6_K/2)) + threadIdx.x % (QI6_K/4)); - const int qh0 = ((qh >> ((threadIdx.x & 0x08) >> 2)) << 4) & 0x30303030; - const int qh1 = (qh >> ((threadIdx.x & 0x08) >> 2)) & 0x30303030; + const int qh = get_int_b2(bxi->qh, (QI6_K/4) * (txi / (QI6_K/2)) + txi % (QI6_K/4)); + const int qh0 = ((qh >> ((txi & 0x08) >> 2)) << 4) & 0x30303030; + const int qh1 = (qh >> ((txi & 0x08) >> 2)) & 0x30303030; - const int kq0 = 2*threadIdx.x - threadIdx.x % (QI6_K/2) + 0; - const int kq1 = 2*threadIdx.x - threadIdx.x % (QI6_K/2) + QI6_K/2; + const int kq0 = 2*txi - txi % (QI6_K/2) + 0; + const int kq1 = 2*txi - txi % (QI6_K/2) + QI6_K/2; -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q6_K + kq0] = __vsubss4(ql0 | qh0, 0x20202020); x_qs[i*MMQ_MMA_TILE_X_K_Q6_K + kq1] = __vsubss4(ql1 | qh1, 0x20202020); #else - x_qs[i*(2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020); - x_qs[i*(2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020); -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020); + x_qs[i*(2*MMQ_TILE_NE_K + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020); +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } - const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256 - const int kbxd = threadIdx.x % blocks_per_tile_x_row; // == 0 if QK_K == 256 - #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI6_K) { - int i = (i0 + threadIdx.y * QI6_K + threadIdx.x / blocks_per_tile_x_row) % mmq_y; + for (int i0 = 0; i0 < mmq_y; i0 += nwarps*warp_size) { + int i = (i0 + threadIdx.y*warp_size + threadIdx.x) % mmq_y; if (need_check) { i = min(i, i_max); } - const block_q6_K * bxi = (const block_q6_K *) x + kbx0 + i*stride + kbxd; + const block_q6_K * bxi = (const block_q6_K *) x + kbx0 + i*stride; -#ifdef NEW_MMA_AVAILABLE - x_df[i*MMQ_MMA_TILE_X_K_Q6_K + kbxd] = bxi->d; +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_df[i*MMQ_MMA_TILE_X_K_Q6_K] = bxi->d; #else - x_df[i*(WARP_SIZE/QI6_K) + i/QI6_K + kbxd] = bxi->d; -#endif // NEW_MMA_AVAILABLE + x_df[i*(MMQ_TILE_NE_K/QI6_K) + i/QI6_K] = bxi->d; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } + constexpr int rows_per_warp = warp_size / 4; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { - int i = (i0 + threadIdx.y * 8 + threadIdx.x / (WARP_SIZE/8)) % mmq_y; + for (int i0 = 0; i0 < mmq_y; i0 += nwarps*rows_per_warp) { + int i = (i0 + threadIdx.y*rows_per_warp + threadIdx.x/(MMQ_TILE_NE_K/8)) % mmq_y; if (need_check) { i = min(i, i_max); } - const block_q6_K * bxi = (const block_q6_K *) x + kbx0 + i*stride + (threadIdx.x % (WARP_SIZE/8)) / 4; + const block_q6_K * bxi = (const block_q6_K *) x + kbx0 + i*stride + (threadIdx.x % (MMQ_TILE_NE_K/8)) / 4; -#ifdef NEW_MMA_AVAILABLE - x_sc[i*MMQ_MMA_TILE_X_K_Q6_K + threadIdx.x % (WARP_SIZE/8)] = get_int_b2(bxi->scales, threadIdx.x % (QI6_K/8)); +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_sc[i*MMQ_MMA_TILE_X_K_Q6_K + threadIdx.x%4] = get_int_b2(bxi->scales, threadIdx.x % (MMQ_TILE_NE_K/8)); #else - x_sc[i*(WARP_SIZE/8) + i/8 + threadIdx.x % (WARP_SIZE/8)] = get_int_b2(bxi->scales, threadIdx.x % (QI6_K/8)); -#endif // NEW_MMA_AVAILABLE + x_sc[i*(MMQ_TILE_NE_K/8) + i/8 + threadIdx.x%(MMQ_TILE_NE_K/8)] = get_int_b2(bxi->scales, threadIdx.x%(QI6_K/8)); +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template +template static __device__ __forceinline__ void vec_dot_q6_K_q8_1_dp4a( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q6_K, mmq_y); const int * x_qs = (const int *) x; @@ -1704,7 +2124,7 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_dp4a( const float * y_df = (const float *) y; // #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += QR6_K*VDR_Q6_K_Q8_1_MMQ) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += QR6_K*VDR_Q6_K_Q8_1_MMQ) { const int k0 = k00 + k01; #pragma unroll @@ -1712,23 +2132,74 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_dp4a( const int j = j0 + threadIdx.y; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; - const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k0/16]); + const int8_t * sc = ((const int8_t *) &x_sc[i * (MMQ_TILE_NE_K/8) + i/8 + k0/16]); - sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q6_K_q8_1_impl_mmq( - &x_qs[i*(QR6_K*WARP_SIZE + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], sc, - x_df[i*(WARP_SIZE/QI6_K) + i/QI6_K], &y_df[j*MMQ_TILE_Y_K + k01/QI8_1]); + sum[j0/nwarps*mmq_y/warp_size + i0/warp_size] += vec_dot_q6_K_q8_1_impl_mmq( + &x_qs[i*(QR6_K*MMQ_TILE_NE_K + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k01], sc, + x_df[i*(MMQ_TILE_NE_K/QI6_K) + i/QI6_K], &y_df[j*MMQ_TILE_Y_K + k01/QI8_1]); } } } } -template +template static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int k00) { -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) + typedef tile<16, 8, int> tile_A; + typedef tile<16, 8, int> tile_B; + typedef tile<16, 16, int> tile_C; + typedef tile<64, 2, int> tile_load; + + constexpr int granularity = mmq_get_granularity_device(mmq_x); + constexpr int rows_per_warp = granularity; + constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. + + y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); + + const int * x_qs = (const int *) x; + const float * x_df = (const float *) x_qs + MMQ_TILE_NE_K*2; + const int * x_sc = (const int *) x_df + MMQ_TILE_NE_K/QI6_K; + const int * y_qs = (const int *) y + 4; + const float * y_df = (const float *) y; + + const int i0 = (threadIdx.y / ntx) * rows_per_warp; + + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 4) { + const int k0 = k00 + k01; + + tile_A A[ntx]; +#pragma unroll + for (int n = 0; n < ntx; ++n) { + load_generic(((tile_load *) A)[n], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q6_K + k0, MMQ_MMA_TILE_X_K_Q6_K); + } + +#pragma unroll + for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { + tile_B B[1]; + load_generic(((tile_load *) B)[0], y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); + + const int j = j0 + tile_C::get_j(0); + const float dB = y_df[j*MMQ_TILE_Y_K + k01/QI8_1] / 2; + +#pragma unroll + for (int n = 0; n < ntx; ++n) { + tile_C C; + mma(C, A[n], B[0]); + +#pragma unroll + for (int l = 0; l < tile_C::ne; ++l) { + const int i = i0 + n*tile_C::I + tile_C::get_i(l); + const int8_t * sc = (const int8_t *) (x_sc + i*MMQ_MMA_TILE_X_K_Q6_K + k00/16); + sum[(j0/tile_C::J + n)*tile_C::ne + l] += C.x[l] * sc[k01/4] * x_df[i*MMQ_MMA_TILE_X_K_Q6_K] * dB; + } + } + } + } +#elif defined(TURING_MMA_AVAILABLE) typedef tile<16, 4, int> tile_A; typedef tile< 8, 4, int> tile_B; @@ -1738,11 +2209,11 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma( constexpr int rows_per_warp = 2 * granularity; constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. - y += (threadIdx.y % ntx) * (tile_B::I*MMQ_TILE_Y_K); + y += (threadIdx.y % ntx) * (tile_C::J*MMQ_TILE_Y_K); const int * x_qs = (const int *) x; - const float * x_df = (const float *) x_qs + WARP_SIZE*2; - const int * x_sc = (const int *) x_df + WARP_SIZE/QI6_K; + const float * x_df = (const float *) x_qs + MMQ_TILE_NE_K*2; + const int * x_sc = (const int *) x_df + MMQ_TILE_NE_K/QI6_K; const int * y_qs = (const int *) y + 4; const float * y_df = (const float *) y; @@ -1755,7 +2226,7 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma( #pragma unroll for (int n = 0; n < ntx; ++n) { #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += 8) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 8) { const int k0 = k00 + k01; load_ldmatrix(A[n][k01/4 + 0], x_qs + (i0 + n*tile_A::I)*MMQ_MMA_TILE_X_K_Q6_K + (k0 + 0), MMQ_MMA_TILE_X_K_Q6_K); @@ -1763,7 +2234,7 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma( } #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += 16) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 16) { const int k0 = k00 + k01; #pragma unroll @@ -1793,7 +2264,7 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma( float tmp[ntx][tile_C::ne] = {{0.0f}}; #pragma unroll - for (int k01 = 0; k01 < WARP_SIZE; k01 += 8) { + for (int k01 = 0; k01 < MMQ_TILE_NE_K; k01 += 8) { tile_B B[2]; float dB[tile_C::ne/2]; @@ -1832,27 +2303,32 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma( #else GGML_UNUSED(x); GGML_UNUSED(y); GGML_UNUSED(sum); GGML_UNUSED(k00); NO_DEVICE_CODE; -#endif // NEW_MMA_AVAILABLE +#endif // AMD_MFMA_AVAILABLE } -template static __device__ __forceinline__ void load_tiles_iq4_nl( +template static __device__ __forceinline__ void load_tiles_iq4_nl( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - float * x_df = (float *) (x_qs + WARP_SIZE*2); + float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ4_NL, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kbx = threadIdx.x / QI4_NL; - const int kqsx = threadIdx.x % QI4_NL; + constexpr int threads_per_row = MMQ_ITER_K / (4 * QR4_NL); + constexpr int nrows = warp_size / threads_per_row; + const int txi = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; + const int kbx = txi / QI4_NL; + const int kqsx = txi % QI4_NL; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { - int i = i0 + threadIdx.y; + for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { + int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); @@ -1861,23 +2337,25 @@ template static __device__ __forceinlin const block_iq4_nl * bxi = (const block_iq4_nl *) x + kbx0 + i*stride + kbx; const int aux_q4 = get_int_b2(bxi->qs, kqsx); - const int2 v = get_int_from_table_16(aux_q4); - const int k0 = 8 * (threadIdx.x / 4) + threadIdx.x % 4; -#ifdef NEW_MMA_AVAILABLE - x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + 0] = v.x; - x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + 4] = v.y; + const int2 v = get_int_from_table_16(aux_q4, kvalues_iq4nl); + const int k0 = kbx * (2 * QI4_NL) + kqsx; + +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + 0] = v.x; + x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + QI4_NL] = v.y; #else - x_qs[i*(2*WARP_SIZE + 1) + k0 + 0] = v.x; - x_qs[i*(2*WARP_SIZE + 1) + k0 + 4] = v.y; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0 + 0] = v.x; + x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0 + QI4_NL] = v.y; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } - const int blocks_per_tile_x_row = WARP_SIZE / QI4_NL; + constexpr int blocks_per_tile_x_row = MMQ_TILE_NE_K / QI4_NL; + constexpr int rows_per_warp = warp_size / blocks_per_tile_x_row; const int kbxd = threadIdx.x % blocks_per_tile_x_row; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_NL) { - int i = i0 + threadIdx.y * QI4_NL + threadIdx.x / blocks_per_tile_x_row; + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { + int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); @@ -1885,31 +2363,35 @@ template static __device__ __forceinlin const block_iq4_nl * bxi = (const block_iq4_nl *) x + kbx0 + i*stride + kbxd; -#ifdef NEW_MMA_AVAILABLE - x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = __half2float(bxi->d); +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = __half2float(bxi->d); #else - x_df[i*(WARP_SIZE/4) + i/4 + kbxd] = __half2float(bxi->d); -#endif // NEW_MMA_AVAILABLE + x_df[i*(MMQ_TILE_NE_K/QI4_NL) + i/QI4_NL + kbxd] = __half2float(bxi->d); +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template static __device__ __forceinline__ void load_tiles_iq2_xxs( +template static __device__ __forceinline__ void load_tiles_iq2_xxs( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - float * x_df = (float *) (x_qs + WARP_SIZE*2); + float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ2_XXS, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kqsx = threadIdx.x % (QI2_XXS/2); + constexpr int threads_per_row = (MMQ_ITER_K / (4 * QR2_XXS)) / 2; + constexpr int nrows = warp_size / threads_per_row; + const int kqsx = warp_size > threads_per_row ? threadIdx.x % threads_per_row : threadIdx.x; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * WARP_SIZE/(QI2_XXS/2)) { - int i = i0 + threadIdx.y*(2*WARP_SIZE/QI2_XXS) + threadIdx.x/(QI2_XXS/2); + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * nrows) { + int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); @@ -1932,42 +2414,46 @@ template static __device__ __forceinlin const int signs1 = __vcmpne4(((signs_packed & 0x30) << 3) | ((signs_packed & 0xC0) << 17), 0x00000000); const int grid1 = __vsub4(grid_pos[1] ^ signs1, signs1); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l + 0)] = grid0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l + 1)] = grid1; #else - x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 0)] = grid0; - x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 1)] = grid1; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 0)] = grid0; + x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 1)] = grid1; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } const int ls = aux32 >> 28; const float d = bxi->d; -#ifdef NEW_MMA_AVAILABLE - x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx] = (ls*d + d/2)/4; +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx] = (ls*d + d/2)/4; #else - x_df[i*(WARP_SIZE/4) + i/4 + kqsx] = (ls*d + d/2)/4; -#endif // NEW_MMA_AVAILABLE + x_df[i*(MMQ_TILE_NE_K/4) + i/4 + kqsx] = (ls*d + d/2)/4; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template static __device__ __forceinline__ void load_tiles_iq2_xs( +template static __device__ __forceinline__ void load_tiles_iq2_xs( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - float * x_df = (float *) (x_qs + WARP_SIZE*2); + float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = MMQ_DP4A_TXS_Q8_0_16; int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kqsx = threadIdx.x % (QI2_XS/2); + constexpr int threads_per_row = (MMQ_ITER_K / (4 * QR2_XS)) / 2; + constexpr int nrows = warp_size / threads_per_row; + const int kqsx = threadIdx.x % threads_per_row; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * WARP_SIZE/(QI2_XS/2)) { - int i = i0 + threadIdx.y*(2*WARP_SIZE/QI2_XS) + threadIdx.x/(QI2_XS/2); + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * nrows) { + int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); @@ -1986,44 +2472,48 @@ template static __device__ __forceinlin const int grid_l = __vsub4(grid_pos[0] ^ signs[0], signs[0]); const int grid_h = __vsub4(grid_pos[1] ^ signs[1], signs[1]); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + (2*l + 1)] = grid_h; #else - x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 0)] = grid_l; - x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 1)] = grid_h; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 0)] = grid_l; + x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 1)] = grid_h; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } const int ls = bxi->scales[kqsx]; const float d = bxi->d; -#ifdef NEW_MMA_AVAILABLE - x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; - x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; + x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; #else - x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; - x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; -#endif // NEW_MMA_AVAILABLE + x_df[i*(2*MMQ_TILE_NE_K*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; + x_df[i*(2*MMQ_TILE_NE_K*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template static __device__ __forceinline__ void load_tiles_iq2_s( +template static __device__ __forceinline__ void load_tiles_iq2_s( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - float * x_df = (float *) (x_qs + WARP_SIZE*2); + float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ2_S, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kqsx = threadIdx.x % (QI2_S/2); + constexpr int threads_per_row = (MMQ_ITER_K / (4 * QR2_S)) / 2; + constexpr int nrows = warp_size / threads_per_row; + const int kqsx = threadIdx.x % threads_per_row; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * WARP_SIZE/(QI2_S/2)) { - int i = i0 + threadIdx.y*(2*WARP_SIZE/QI2_S) + threadIdx.x/(QI2_S/2); + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * nrows) { + int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); @@ -2049,44 +2539,48 @@ template static __device__ __forceinlin const int grid_l = __vsub4(grid_pos[0] ^ signs0, signs0); const int grid_h = __vsub4(grid_pos[1] ^ signs1, signs1); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + (2*l + 1)] = grid_h; #else - x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 0)] = grid_l; - x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 1)] = grid_h; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 0)] = grid_l; + x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 1)] = grid_h; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } const int ls = bxi->scales[kqsx]; const float d = bxi->d; -#ifdef NEW_MMA_AVAILABLE - x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; - x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; + x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; #else - x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; - x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; -#endif // NEW_MMA_AVAILABLE + x_df[i*(2*MMQ_TILE_NE_K*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; + x_df[i*(2*MMQ_TILE_NE_K*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template static __device__ __forceinline__ void load_tiles_iq3_xxs( +template static __device__ __forceinline__ void load_tiles_iq3_xxs( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - float * x_df = (float *) (x_qs + WARP_SIZE*2); + float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ3_XXS, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kqsx = threadIdx.x % (QI3_XXS/2); + constexpr int threads_per_row = (MMQ_ITER_K / (4 * QR3_XXS)) / 2; + constexpr int nrows = warp_size / threads_per_row; + const int kqsx = threadIdx.x % threads_per_row; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * WARP_SIZE/(QI3_XXS/2)) { - int i = i0 + threadIdx.y*(2*WARP_SIZE/QI3_XXS) + threadIdx.x/(QI3_XXS/2); + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * nrows) { + int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); @@ -2107,42 +2601,46 @@ template static __device__ __forceinlin const int grid_l = __vsub4(grid_pos.x ^ signs[0], signs[0]); const int grid_h = __vsub4(grid_pos.y ^ signs[1], signs[1]); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l + 1)] = grid_h; #else - x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 0)] = grid_l; - x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 1)] = grid_h; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 0)] = grid_l; + x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l + 1)] = grid_h; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } const int ls = aux32 >> 28; const float d = bxi->d; -#ifdef NEW_MMA_AVAILABLE - x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx] = (ls*d + d/2)/2; +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx] = (ls*d + d/2)/2; #else - x_df[i*(WARP_SIZE/4) + i/4 + kqsx] = (ls*d + d/2)/2; -#endif // NEW_MMA_AVAILABLE + x_df[i*(MMQ_TILE_NE_K/4) + i/4 + kqsx] = (ls*d + d/2)/2; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template static __device__ __forceinline__ void load_tiles_iq3_s( +template static __device__ __forceinline__ void load_tiles_iq3_s( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - float * x_df = (float *) (x_qs + WARP_SIZE*2); + float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ3_S, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kqsx = threadIdx.x % (QI3_S/2); + constexpr int threads_per_row = (MMQ_ITER_K / (4 * QR3_S)) / 2; + constexpr int nrows = warp_size / threads_per_row; + const int kqsx = threadIdx.x % threads_per_row; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * WARP_SIZE/(QI3_S/2)) { - int i = i0 + threadIdx.y*(2*WARP_SIZE/QI3_S) + threadIdx.x/(QI3_S/2); + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * nrows) { + int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); @@ -2170,42 +2668,46 @@ template static __device__ __forceinlin const int grid_l = __vsub4(grid_pos.x ^ signs0, signs0); const int grid_h = __vsub4(grid_pos.y ^ signs1, signs1); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l+0)] = grid_l; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l+1)] = grid_h; #else - x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l+0)] = grid_l; - x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l+1)] = grid_h; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l+0)] = grid_l; + x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l+1)] = grid_h; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } const int ls = 1 + 2*((bxi->scales[kqsx/2] >> (((2*kqsx) << 1) & 0x04)) & 0x0F); const float d = bxi->d; -#ifdef NEW_MMA_AVAILABLE - x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx] = ls*d; +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx] = ls*d; #else - x_df[i*(WARP_SIZE/4) + i/4 + kqsx] = ls*d; -#endif // NEW_MMA_AVAILABLE + x_df[i*(MMQ_TILE_NE_K/4) + i/4 + kqsx] = ls*d; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template static __device__ __forceinline__ void load_tiles_iq1_s( +template static __device__ __forceinline__ void load_tiles_iq1_s( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - half2 * x_ds = (half2 *) (x_qs + WARP_SIZE*2); + half2 * x_ds = (half2 *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ3_S, mmq_y); int * x_qs = (int *) x_tile; half2 * x_ds = (half2 *) (x_qs + txs.qs); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kqsx = threadIdx.x % QI1_S; + constexpr int threads_per_row = MMQ_ITER_K / (4 * QR1_S); + constexpr int nrows = warp_size / threads_per_row; + const int kqsx = threadIdx.x % threads_per_row; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * WARP_SIZE/QI1_S) { - int i = i0 + threadIdx.y*(WARP_SIZE/QI1_S) + threadIdx.x/QI1_S; + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * nrows) { + int i = i0 + threadIdx.y*nrows + threadIdx.x/threads_per_row; if (need_check) { i = min(i, i_max); @@ -2225,66 +2727,71 @@ template static __device__ __forceinlin const int grid0 = (grid >> 0) & 0x0F0F0F0F; const int grid1 = (grid >> 4) & 0x0F0F0F0F; -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + 8*kqsx + (2*l+0)] = grid0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + 8*kqsx + (2*l+1)] = grid1; #else - x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l+0)] = grid0; - x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l+1)] = grid1; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l+0)] = grid0; + x_qs[i*(2*MMQ_TILE_NE_K + 1) + 8*kqsx + (2*l+1)] = grid1; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } const float d1q = __half2float(bxi->d) * (((qh >> 11) & 0x0E) + 1); const float delta = -1.0f + IQ1S_DELTA - (qh & 0x8000) * (2.0f*IQ1S_DELTA/0x8000); -#ifdef NEW_MMA_AVAILABLE - x_ds[i*MMQ_MMA_TILE_X_K_Q8_1 + kqsx] = make_half2(d1q, d1q*delta); +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_ds[i*MMQ_MMA_TILE_X_K_Q8_1 + kqsx] = make_half2(d1q, d1q*delta); #else - x_ds[i*(WARP_SIZE/4) + i/4 + kqsx] = make_half2(d1q, d1q*delta); -#endif // NEW_MMA_AVAILABLE + x_ds[i*(MMQ_TILE_NE_K/4) + i/4 + kqsx] = make_half2(d1q, d1q*delta); +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template static __device__ __forceinline__ void load_tiles_iq4_xs( +template static __device__ __forceinline__ void load_tiles_iq4_xs( const char * __restrict__ x, int * __restrict__ x_tile, const int kbx0, const int i_max, const int stride) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); -#ifdef NEW_MMA_AVAILABLE +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) int * x_qs = (int *) x_tile; - float * x_df = (float *) (x_qs + WARP_SIZE*2); + float * x_df = (float *) (x_qs + MMQ_TILE_NE_K*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ4_XS, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) - const int kbx = 0; // threadIdx.x / QI4_XS - const int kqsx = threadIdx.x; // threadIdx.x % QI4_XS + constexpr int threads_per_row = MMQ_ITER_K / (4 * QR4_XS); + constexpr int nrows = warp_size / threads_per_row; + const int kqsx = threadIdx.x % threads_per_row; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { - int i = i0 + threadIdx.y; + for (int i0 = 0; i0 < mmq_y; i0 += nrows*nwarps) { + int i = i0 + (nrows == 1 ? threadIdx.y : threadIdx.y*nrows + threadIdx.x/threads_per_row); if (need_check) { i = min(i, i_max); } - const block_iq4_xs * bxi = (const block_iq4_xs *) x + kbx0 + i*stride + kbx; + const block_iq4_xs * bxi = (const block_iq4_xs *) x + kbx0 + i*stride; const int aux_q4 = get_int_b4(bxi->qs, kqsx); - const int2 v = get_int_from_table_16(aux_q4); - const int k0 = 8 * (threadIdx.x / 4) + threadIdx.x % 4; -#ifdef NEW_MMA_AVAILABLE + const int2 v = get_int_from_table_16(aux_q4, kvalues_iq4nl); + const int k0 = 8 * (kqsx / 4) + kqsx % 4; + +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + 0] = v.x; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + 4] = v.y; #else - x_qs[i*(2*WARP_SIZE + 1) + k0 + 0] = v.x; - x_qs[i*(2*WARP_SIZE + 1) + k0 + 4] = v.y; -#endif // NEW_MMA_AVAILABLE + x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0 + 0] = v.x; + x_qs[i*(2*MMQ_TILE_NE_K + 1) + k0 + 4] = v.y; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } + constexpr int rows_per_warp = warp_size / 8; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) { - int i = i0 + threadIdx.y * 4 + threadIdx.x / (WARP_SIZE/4); + for (int i0 = 0; i0 < mmq_y; i0 += nwarps * rows_per_warp) { + int i = i0 + threadIdx.y * rows_per_warp + threadIdx.x / (MMQ_TILE_NE_K/4); if (need_check) { i = min(i, i_max); @@ -2297,18 +2804,21 @@ template static __device__ __forceinlin const int ls = ((bxi->scales_l[(threadIdx.x % 8)/2] >> (4*(threadIdx.x % 2))) & 0x0F) | (((bxi->scales_h >> (2*(threadIdx.x % 8))) & 0x03) << 4); -#ifdef NEW_MMA_AVAILABLE - x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + threadIdx.x % 8] = d * (ls - 32); +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + threadIdx.x % 8] = d * (ls - 32); #else - x_df[i*(WARP_SIZE/4) + i/4 + threadIdx.x % 8] = d * (ls - 32); -#endif // NEW_MMA_AVAILABLE + x_df[i*(MMQ_TILE_NE_K/4) + i/4 + threadIdx.x % 8] = d * (ls - 32); +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) } } -template +template static __device__ __forceinline__ void mmq_write_back_dp4a( const float * __restrict__ sum, const int32_t * __restrict__ ids_dst, float * __restrict__ dst, const int stride, const int i_max, const int j_max) { + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); + #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += nwarps) { const int j = j0 + threadIdx.y; @@ -2318,32 +2828,40 @@ static __device__ __forceinline__ void mmq_write_back_dp4a( } #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; if (need_check && i > i_max) { continue; } - dst[ids_dst[j]*stride + i] = sum[(j0/nwarps) * (mmq_y/WARP_SIZE) + i0/WARP_SIZE]; + dst[ids_dst[j]*stride + i] = sum[(j0/nwarps) * (mmq_y/warp_size) + i0/warp_size]; } } } -template +template static __device__ __forceinline__ void mmq_write_back_mma( const float * __restrict__ sum, const int * __restrict__ ids_dst, float * __restrict__ dst, const int stride, const int i_max, const int j_max) { - typedef tile<16, 8, int> tile_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); + constexpr int nwarps = mmq_get_nwarps_device(); + +#if defined(AMD_MFMA_AVAILABLE) + constexpr int tileC_IJ = mmq_get_granularity_device(0); + typedef tile tile_C; + constexpr int rows_per_warp = granularity; +#else + typedef tile<16, 8, int> tile_C; constexpr int rows_per_warp = 2 * granularity; +#endif constexpr int ntx = rows_per_warp/tile_C::I; // Number of x minitiles per warp. const int i0 = (threadIdx.y / ntx) * (ntx*tile_C::I); -#ifdef NEW_MMA_AVAILABLE +#if defined(TURING_MMA_AVAILABLE) || defined(AMD_MFMA_AVAILABLE) static_assert(nwarps*tile_C::I == mmq_y, "nwarps*tile_C::I != mmq_y"); -#endif // NEW_MMA_AVAILABLE +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*tile_C::J) { @@ -2371,179 +2889,189 @@ static __device__ __forceinline__ void mmq_write_back_mma( // ------------------------------------------------------------------------------------------------------------------------------------- -template +template struct mmq_type_traits; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_Q4_0_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_0; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q4_0_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_0; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q4_0_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_Q4_1_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_1; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q4_1_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_1; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q4_1_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_Q5_0_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_0; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_0; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_Q5_1_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_1; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_1_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_1; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_1_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_Q8_0_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_q8_0; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_q8_0; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { + static constexpr int vdr = VDR_MXFP4_Q8_1_MMQ; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_mxfp4; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; +}; + +template +struct mmq_type_traits { static constexpr int vdr = VDR_Q2_K_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_q2_K; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q2_K_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q2_K_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_q2_K; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q2_K_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q2_K_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_Q3_K_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_q3_K; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_16_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q3_K_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_q3_K; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_16_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q3_K_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_Q4_K_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_K; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q4_K_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_K; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q4_K_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_Q5_K_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_K; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q5_K_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_K; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q5_K_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_Q6_K_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_q6_K; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q6_K_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q6_K_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_q6_K; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q6_K_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q6_K_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_IQ2_XXS_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq2_xxs; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq2_xxs; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_IQ2_XS_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq2_xs; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_16_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_16_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq2_xs; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_16_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_16_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_IQ2_S_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq2_s; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_16_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_16_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq2_s; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_16_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_16_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_IQ3_XXS_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq3_xxs; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq3_xxs; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_IQ3_S_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq3_s; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq3_s; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_IQ1_S_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq1_s; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_1_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq1_s; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_1_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_1_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_IQ4_NL_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq4_nl; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq4_nl; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; -template -struct mmq_type_traits { +template +struct mmq_type_traits { static constexpr int vdr = VDR_IQ4_XS_Q8_1_MMQ; - static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq4_xs; - static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; - static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; + static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq4_xs; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; -template +template static __device__ __forceinline__ void mul_mat_q_process_tile( const char * __restrict__ x, const int offset_x, const int * __restrict__ y, const int * __restrict__ ids_dst, float * __restrict__ dst, float * __restrict__ tmp_fixup, const int stride_row_x, const int ncols_y, const int stride_col_dst, const int tile_x_max_i, const int tile_y_max_j, const int kb0_start, const int kb0_stop) { + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); + constexpr int nwarps = mmq_get_nwarps_device(); constexpr int qk = ggml_cuda_type_traits::qk; constexpr int mmq_y = get_mmq_y_device(); - constexpr load_tiles_mmq_t load_tiles = mmq_type_traits::load_tiles; + constexpr load_tiles_mmq_t load_tiles = mmq_type_traits::load_tiles; extern __shared__ int data_mul_mat_q[]; int * tile_y = data_mul_mat_q + mmq_x; - int * tile_x = tile_y + GGML_PAD(mmq_x*(WARP_SIZE + WARP_SIZE/QI8_1), nwarps*WARP_SIZE); + int * tile_x = tile_y + GGML_PAD(mmq_x*MMQ_TILE_Y_K, nwarps*warp_size); -#ifdef NEW_MMA_AVAILABLE - constexpr vec_dot_mmq_t vec_dot = mmq_type_traits::vec_dot_mma; - constexpr mmq_write_back_t write_back = mmq_write_back_mma; +#if defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) + constexpr vec_dot_mmq_t vec_dot = mmq_type_traits::vec_dot_mma; + constexpr mmq_write_back_t write_back = mmq_write_back_mma; #else - constexpr vec_dot_mmq_t vec_dot = mmq_type_traits::vec_dot_dp4a; - constexpr mmq_write_back_t write_back = mmq_write_back_dp4a; -#endif // NEW_MMA_AVAILABLE + constexpr vec_dot_mmq_t vec_dot = mmq_type_traits::vec_dot_dp4a; + constexpr mmq_write_back_t write_back = mmq_write_back_dp4a; +#endif // defined(AMD_MFMA_AVAILABLE) || defined(TURING_MMA_AVAILABLE) constexpr int blocks_per_iter = MMQ_ITER_K / qk; - float sum[mmq_x*mmq_y / (nwarps*WARP_SIZE)] = {0.0f}; + float sum[mmq_x*mmq_y / (nwarps*warp_size)] = {0.0f}; for (int kb0 = kb0_start; kb0 < kb0_stop; kb0 += blocks_per_iter) { load_tiles(x, tile_x, offset_x + kb0, tile_x_max_i, stride_row_x); @@ -2551,8 +3079,8 @@ static __device__ __forceinline__ void mul_mat_q_process_tile( { const int * by0 = y + ncols_y*(kb0*(qk*sizeof(block_q8_1_mmq) / (4*QK8_1*sizeof(int))) + 0*sizeof(block_q8_1_mmq)/sizeof(int)); #pragma unroll - for (int l0 = 0; l0 < mmq_x*MMQ_TILE_Y_K; l0 += nwarps*WARP_SIZE) { - int l = l0 + threadIdx.y*WARP_SIZE + threadIdx.x; + for (int l0 = 0; l0 < mmq_x*MMQ_TILE_Y_K; l0 += nwarps*warp_size) { + int l = l0 + threadIdx.y*warp_size + threadIdx.x; tile_y[l] = by0[l]; } @@ -2567,8 +3095,8 @@ static __device__ __forceinline__ void mul_mat_q_process_tile( { const int * by0 = y + ncols_y*(kb0*(qk*sizeof(block_q8_1_mmq) / (4*QK8_1*sizeof(int))) + 1*sizeof(block_q8_1_mmq)/sizeof(int)); #pragma unroll - for (int l0 = 0; l0 < mmq_x*MMQ_TILE_Y_K; l0 += nwarps*WARP_SIZE) { - int l = l0 + threadIdx.y*WARP_SIZE + threadIdx.x; + for (int l0 = 0; l0 < mmq_x*MMQ_TILE_Y_K; l0 += nwarps*warp_size) { + int l = l0 + threadIdx.y*warp_size + threadIdx.x; tile_y[l] = by0[l]; } @@ -2576,7 +3104,7 @@ static __device__ __forceinline__ void mul_mat_q_process_tile( __syncthreads(); - vec_dot(tile_x, tile_y, sum, WARP_SIZE); + vec_dot(tile_x, tile_y, sum, MMQ_TILE_NE_K); __syncthreads(); } @@ -2591,18 +3119,18 @@ static __device__ __forceinline__ void mul_mat_q_process_tile( // The mul_mat_q kernel implements "stream-k" work partitioning as described in https://arxiv.org/abs/2301.03598 -template -#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +template +#if defined(GGML_USE_HIP) #if defined(RDNA4) || defined(RDNA3) || defined(RDNA2) || defined(CDNA) || defined(GCN) - __launch_bounds__(WARP_SIZE*nwarps, 2) + __launch_bounds__(ggml_cuda_get_physical_warp_size()*mmq_get_nwarps_device(), 2) #endif // defined(RDNA4) || defined(RDNA3) || defined(RDNA2) || defined(CDNA) || defined(GCN) #else #if __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA - __launch_bounds__(WARP_SIZE*nwarps, 1) + __launch_bounds__(ggml_cuda_get_physical_warp_size()*mmq_get_nwarps_device(), 1) #else - __launch_bounds__(WARP_SIZE*nwarps, 2) + __launch_bounds__(ggml_cuda_get_physical_warp_size()*mmq_get_nwarps_device(), 2) #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA -#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +#endif // defined(GGML_USE_HIP) static __global__ void mul_mat_q( const char * __restrict__ x, const int * __restrict__ y, const int32_t * __restrict__ ids_dst, const int32_t * __restrict__ expert_bounds, float * __restrict__ dst, float * __restrict__ tmp_fixup, @@ -2616,6 +3144,9 @@ static __global__ void mul_mat_q( return; } + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); + constexpr int qk = ggml_cuda_type_traits::qk; constexpr int mmq_y = get_mmq_y_device(); @@ -2627,10 +3158,10 @@ static __global__ void mul_mat_q( // For MoE the correct indices are loaded from ids_dst. extern __shared__ int ids_dst_shared[]; // Stored at beginning of shared memory. #pragma unroll - for (int j0 = 0; j0 < mmq_x; j0 += nwarps*WARP_SIZE) { - const int j = j0 + threadIdx.y*WARP_SIZE + threadIdx.x; + for (int j0 = 0; j0 < mmq_x; j0 += nwarps*warp_size) { + const int j = j0 + threadIdx.y*warp_size + threadIdx.x; - if (j0 + nwarps*WARP_SIZE > mmq_x && j >= mmq_x) { + if (j0 + nwarps*warp_size > mmq_x && j >= mmq_x) { break; } @@ -2638,8 +3169,8 @@ static __global__ void mul_mat_q( } __syncthreads(); - // On AMD or old CUDA the performance with stream-k was worse, use conventional tiling instead: -#if (defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ < GGML_CUDA_CC_VOLTA + // On non-CDNA AMD or old CUDA the performance with stream-k was worse, use conventional tiling instead: +#if (defined(GGML_USE_HIP) && !defined(CDNA)) || __CUDA_ARCH__ < GGML_CUDA_CC_VOLTA { const int wt = blockIdx.z / nchannels_y; const int zt = blockIdx.z - wt*nchannels_y; @@ -2667,10 +3198,10 @@ static __global__ void mul_mat_q( // __syncthreads(); // There is no previous tile that could cause a race condition. #pragma unroll - for (int j0 = 0; j0 < mmq_x; j0 += nwarps*WARP_SIZE) { - const int j = j0 + threadIdx.y*WARP_SIZE + threadIdx.x; + for (int j0 = 0; j0 < mmq_x; j0 += nwarps*warp_size) { + const int j = j0 + threadIdx.y*warp_size + threadIdx.x; - if (j0 + nwarps*WARP_SIZE > mmq_x && j >= mmq_x) { + if (j0 + nwarps*warp_size > mmq_x && j >= mmq_x) { break; } @@ -2688,12 +3219,12 @@ static __global__ void mul_mat_q( const int offset_x = (wt/sample_ratio)*stride_sample_x + (zt/channel_ratio)*stride_channel_x + it*mmq_y*stride_row_x; constexpr bool fixup = false; - mul_mat_q_process_tile + mul_mat_q_process_tile (x, offset_x, y + offset_y, ids_dst_shared, dst + offset_dst, tmp_fixup, stride_row_x, ncols_y, stride_col_dst, tile_x_max_i, tile_y_max_j, 0, ncols_x/qk); return; } -#endif // (defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ < GGML_CUDA_CC_VOLTA +#endif // (defined(GGML_USE_HIP) && !defined(CDNA3)) || __CUDA_ARCH__ < GGML_CUDA_CC_VOLTA const int64_t blocks_per_ne00 = ncols_x / qk; constexpr int blocks_per_iter = MMQ_ITER_K / qk; @@ -2745,10 +3276,10 @@ static __global__ void mul_mat_q( __syncthreads(); #pragma unroll - for (int j0 = 0; j0 < mmq_x; j0 += nwarps*WARP_SIZE) { - const int j = j0 + threadIdx.y*WARP_SIZE + threadIdx.x; + for (int j0 = 0; j0 < mmq_x; j0 += nwarps*warp_size) { + const int j = j0 + threadIdx.y*warp_size + threadIdx.x; - if (j0 + nwarps*WARP_SIZE > mmq_x && j >= mmq_x) { + if (j0 + nwarps*warp_size > mmq_x && j >= mmq_x) { break; } @@ -2766,7 +3297,7 @@ static __global__ void mul_mat_q( const int offset_x = (wt/sample_ratio)*stride_sample_x + (zt/channel_ratio)*stride_channel_x + it*mmq_y*stride_row_x; constexpr bool fixup = false; // All but (potentially) the last iterations write their data to dst rather than the fixup buffer. - mul_mat_q_process_tile + mul_mat_q_process_tile (x, offset_x, y + offset_y, ids_dst_shared, dst + offset_dst, tmp_fixup, stride_row_x, ncols_y, stride_col_dst, tile_x_max_i, tile_y_max_j, kb0_start, kb0_stop); @@ -2812,10 +3343,10 @@ static __global__ void mul_mat_q( // The memory layout for the fixup buffer is always contiguous, therefore reset ids: __syncthreads(); #pragma unroll - for (int j0 = 0; j0 < mmq_x; j0 += nwarps*WARP_SIZE) { - const int j = j0 + threadIdx.y*WARP_SIZE + threadIdx.x; + for (int j0 = 0; j0 < mmq_x; j0 += nwarps*warp_size) { + const int j = j0 + threadIdx.y*warp_size + threadIdx.x; - if (j0 + nwarps*WARP_SIZE > mmq_x && j >= mmq_x) { + if (j0 + nwarps*warp_size > mmq_x && j >= mmq_x) { break; } @@ -2833,13 +3364,13 @@ static __global__ void mul_mat_q( const int offset_x = (wt/sample_ratio)*stride_sample_x + (zt/channel_ratio)*stride_channel_x + it*mmq_y*stride_row_x; constexpr bool fixup = true; // Last index writes its data to fixup buffer to avoid data races with other blocks. - mul_mat_q_process_tile + mul_mat_q_process_tile (x, offset_x, y + offset_y, ids_dst_shared, dst + offset_dst, tmp_fixup, stride_row_x, ncols_y, stride_col_dst, tile_x_max_i, tile_y_max_j, kb0_start, kb0_stop); } -template +template static __global__ void mul_mat_q_stream_k_fixup( const int32_t * ids_dst, const int32_t * expert_bounds, float * __restrict__ dst, const float * __restrict__ tmp_last_tile, const int ncols_x, const int nrows_x, const int ncols_dst, const int stride_col_dst, @@ -2849,7 +3380,10 @@ static __global__ void mul_mat_q_stream_k_fixup( constexpr int blocks_per_iter = MMQ_ITER_K / qk; const int64_t blocks_per_ne00 = ncols_x / qk; - float sum[mmq_x*mmq_y / (nwarps*WARP_SIZE)] = {0.0f}; + constexpr int nwarps = mmq_get_nwarps_device(); + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); + + float sum[mmq_x*mmq_y / (nwarps*warp_size)] = {0.0f}; const int ntx = (ncols_dst + mmq_x - 1) / mmq_x; const int nty = (nrows_x + mmq_y - 1) / mmq_y; @@ -2893,10 +3427,10 @@ static __global__ void mul_mat_q_stream_k_fixup( const int j = j0 + threadIdx.y; #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; - sum[(j0/nwarps) * (mmq_y/WARP_SIZE) + i0/WARP_SIZE] += tmp_last_tile[bidx*(mmq_x*mmq_y) + j*mmq_y + i]; + sum[(j0/nwarps) * (mmq_y/warp_size) + i0/warp_size] += tmp_last_tile[bidx*(mmq_x*mmq_y) + j*mmq_y + i]; } } @@ -2937,14 +3471,14 @@ static __global__ void mul_mat_q_stream_k_fixup( } #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; if (need_check && i > i_max) { continue; } - dst[j*stride_col_dst + i] += sum[(j0/nwarps) * (mmq_y/WARP_SIZE) + i0/WARP_SIZE]; + dst[j*stride_col_dst + i] += sum[(j0/nwarps) * (mmq_y/warp_size) + i0/warp_size]; } } return; @@ -2955,7 +3489,7 @@ static __global__ void mul_mat_q_stream_k_fixup( const int col_high = expert_bounds[zt + 1]; const int col_diff = col_high - col_low; - for (int j = threadIdx.y*WARP_SIZE + threadIdx.x; j < mmq_x; j += nwarps*WARP_SIZE) { + for (int j = threadIdx.y*warp_size + threadIdx.x; j < mmq_x; j += nwarps*warp_size) { ids_dst_shared[j] = ids_dst[col_low + j]; } __syncthreads(); @@ -2975,14 +3509,14 @@ static __global__ void mul_mat_q_stream_k_fixup( } #pragma unroll - for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) { + for (int i0 = 0; i0 < mmq_y; i0 += warp_size) { const int i = i0 + threadIdx.x; if (need_check && i > i_max) { continue; } - dst[ids_dst_shared[j]*stride_col_dst + i] += sum[(j0/nwarps) * (mmq_y/WARP_SIZE) + i0/WARP_SIZE]; + dst[ids_dst_shared[j]*stride_col_dst + i] += sum[(j0/nwarps) * (mmq_y/warp_size) + i0/warp_size]; } } } @@ -2996,13 +3530,13 @@ struct mmq_args { }; template -static size_t mmq_get_nbytes_shared(const int mmq_x, const int mmq_y, const int cc) { +static size_t mmq_get_nbytes_shared(const int mmq_x, const int mmq_y, const int cc, const int warp_size, const int nwarps) { const tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(type, mmq_y); const int mmq_tile_x_k = mmq_get_mma_tile_x_k(type); const size_t nbs_ids = mmq_x*sizeof(int); - const size_t nbs_x = new_mma_available(cc) ? mmq_y*mmq_tile_x_k*sizeof(int) : txs.qs*sizeof(int) + txs.dm*sizeof(half2) + txs.sc*sizeof(int); + const size_t nbs_x = (turing_mma_available(cc) || amd_mfma_available(cc)) ? mmq_y*mmq_tile_x_k*sizeof(int) : txs.qs*sizeof(int) + txs.dm*sizeof(half2) + txs.sc*sizeof(int); const size_t nbs_y = mmq_x*sizeof(block_q8_1_mmq); - return nbs_ids + nbs_x + GGML_PAD(nbs_y, MMQ_NWARPS*WARP_SIZE*sizeof(int)); + return nbs_ids + nbs_x + GGML_PAD(nbs_y, nwarps*warp_size*sizeof(int)); } template @@ -3010,20 +3544,16 @@ static void launch_mul_mat_q(ggml_backend_cuda_context & ctx, const mmq_args & a const int id = ggml_cuda_get_device(); const int cc = ggml_cuda_info().devices[id].cc; const int nsm = ggml_cuda_info().devices[id].nsm; + const int warp_size = ggml_cuda_info().devices[id].warp_size; + const int nwarps = mmq_get_nwarps_host(cc, warp_size); const int mmq_y = get_mmq_y_host(cc); - const dim3 block_dims(WARP_SIZE, MMQ_NWARPS, 1); + const dim3 block_dims(warp_size, nwarps, 1); - const int nbytes_shared = mmq_get_nbytes_shared(mmq_x, mmq_y, cc); + const int nbytes_shared = mmq_get_nbytes_shared(mmq_x, mmq_y, cc, warp_size, nwarps); -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && !defined(GGML_USE_MUSA) - static bool shared_memory_limit_raised[GGML_CUDA_MAX_DEVICES] = {false}; - if (!shared_memory_limit_raised[id]) { - CUDA_CHECK(cudaFuncSetAttribute(mul_mat_q, cudaFuncAttributeMaxDynamicSharedMemorySize, nbytes_shared)); - CUDA_CHECK(cudaFuncSetAttribute(mul_mat_q, cudaFuncAttributeMaxDynamicSharedMemorySize, nbytes_shared)); - shared_memory_limit_raised[id] = true; - } -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && !defined(GGML_USE_MUSA) + CUDA_SET_SHARED_MEMORY_LIMIT((mul_mat_q), nbytes_shared); + CUDA_SET_SHARED_MEMORY_LIMIT((mul_mat_q), nbytes_shared); const int nty = (args.nrows_x + mmq_y - 1) / mmq_y; const int ntx = (args.ncols_dst + mmq_x - 1) / mmq_x; @@ -3038,14 +3568,14 @@ static void launch_mul_mat_q(ggml_backend_cuda_context & ctx, const mmq_args & a if (!args.use_stream_k) { if (args.nrows_x % mmq_y == 0) { constexpr bool need_check = false; - mul_mat_q<<>> + mul_mat_q<<>> (args.x, args.y, args.ids_dst, args.expert_bounds, args.dst, nullptr, args.ncols_x, args.nrows_x, args.ncols_dst, args.stride_row_x, args.ncols_y, args.nrows_dst, channel_ratio, args.nchannels_y, args.stride_channel_x, args.stride_channel_y, args.stride_channel_dst, sample_ratio, args.nsamples_y, args.stride_sample_x, args.stride_sample_y, args.stride_sample_dst); } else { constexpr bool need_check = true; - mul_mat_q<<>> + mul_mat_q<<>> (args.x, args.y, args.ids_dst, args.expert_bounds, args.dst, nullptr, args.ncols_x, args.nrows_x, args.ncols_dst, args.stride_row_x, args.ncols_y, args.nrows_dst, channel_ratio, args.nchannels_y, args.stride_channel_x, args.stride_channel_y, args.stride_channel_dst, @@ -3065,8 +3595,7 @@ static void launch_mul_mat_q(ggml_backend_cuda_context & ctx, const mmq_args & a if (args.nrows_x % mmq_y == 0) { constexpr bool need_check = false; - - mul_mat_q<<>> + mul_mat_q<<>> (args.x, args.y, args.ids_dst, args.expert_bounds, args.dst, tmp_fixup.ptr, args.ncols_x, args.nrows_x, args.ncols_dst, args.stride_row_x, args.ncols_y, args.nrows_dst, channel_ratio, args.nchannels_y, args.stride_channel_x, args.stride_channel_y, args.stride_channel_dst, @@ -3076,13 +3605,12 @@ static void launch_mul_mat_q(ggml_backend_cuda_context & ctx, const mmq_args & a return; } - mul_mat_q_stream_k_fixup<<>> + mul_mat_q_stream_k_fixup<<>> (args.ids_dst, args.expert_bounds, args.dst, tmp_fixup.ptr, args.ncols_x, args.nrows_x, args.ncols_dst, args.nrows_dst, args.nchannels_y, args.stride_channel_dst, args.nsamples_y, args.stride_sample_dst); } else { constexpr bool need_check = true; - - mul_mat_q<<>> + mul_mat_q<<>> (args.x, args.y, args.ids_dst, args.expert_bounds, args.dst, tmp_fixup.ptr, args.ncols_x, args.nrows_x, args.ncols_dst, args.stride_row_x, args.ncols_y, args.nrows_dst, channel_ratio, args.nchannels_y, args.stride_channel_x, args.stride_channel_y, args.stride_channel_dst, @@ -3092,7 +3620,7 @@ static void launch_mul_mat_q(ggml_backend_cuda_context & ctx, const mmq_args & a return; } - mul_mat_q_stream_k_fixup<<>> + mul_mat_q_stream_k_fixup<<>> (args.ids_dst, args.expert_bounds, args.dst, tmp_fixup.ptr, args.ncols_x, args.nrows_x, args.ncols_dst, args.nrows_dst, args.nchannels_y, args.stride_channel_dst, args.nsamples_y, args.stride_sample_dst); } @@ -3100,9 +3628,11 @@ static void launch_mul_mat_q(ggml_backend_cuda_context & ctx, const mmq_args & a template void mul_mat_q_case(ggml_backend_cuda_context & ctx, const mmq_args & args, cudaStream_t stream) { - const int id = ggml_cuda_get_device(); - const int cc = ggml_cuda_info().devices[id].cc; - const size_t smpbo = ggml_cuda_info().devices[id].smpbo; + const int id = ggml_cuda_get_device(); + const int cc = ggml_cuda_info().devices[id].cc; + const size_t smpbo = ggml_cuda_info().devices[id].smpbo; + const int warp_size = ggml_cuda_info().devices[id].warp_size; + const int nwarps = mmq_get_nwarps_host(cc, warp_size); const int mmq_x_max = get_mmq_x_max_host(cc); const int mmq_y = get_mmq_y_host(cc); @@ -3113,7 +3643,7 @@ void mul_mat_q_case(ggml_backend_cuda_context & ctx, const mmq_args & args, cuda for (int mmq_x = 8; mmq_x <= mmq_x_max && ntiles_x_best > 1; mmq_x += 8) { const int granularity = mmq_get_granularity_host(mmq_x, cc); - if (mmq_x % granularity != 0 || mmq_get_nbytes_shared(mmq_x, mmq_y, cc) > smpbo) { + if (mmq_x % granularity != 0 || mmq_get_nbytes_shared(mmq_x, mmq_y, cc, warp_size, nwarps) > smpbo) { continue; } @@ -3189,6 +3719,7 @@ extern DECL_MMQ_CASE(GGML_TYPE_Q4_1); extern DECL_MMQ_CASE(GGML_TYPE_Q5_0); extern DECL_MMQ_CASE(GGML_TYPE_Q5_1); extern DECL_MMQ_CASE(GGML_TYPE_Q8_0); +extern DECL_MMQ_CASE(GGML_TYPE_MXFP4); extern DECL_MMQ_CASE(GGML_TYPE_Q2_K); extern DECL_MMQ_CASE(GGML_TYPE_Q3_K); extern DECL_MMQ_CASE(GGML_TYPE_Q4_K); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/mmv.cu b/ml/backend/ggml/ggml/src/ggml-cuda/mmv.cu deleted file mode 100644 index d8c385e23..000000000 --- a/ml/backend/ggml/ggml/src/ggml-cuda/mmv.cu +++ /dev/null @@ -1,336 +0,0 @@ -#include "ggml.h" -#include "common.cuh" -#include "mmv.cuh" - -template -static __global__ void mul_mat_vec( - const T * __restrict__ x, const float * __restrict__ y, const int32_t * __restrict__ ids, float * __restrict__ dst, - const int64_t ncols2, const int64_t nchannels_y, const int64_t stride_row, - const int64_t channel_ratio, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, - const int64_t sample_ratio, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst) { - const int64_t row = blockIdx.x; - const int64_t channel_dst = blockIdx.y; - const int64_t channel_x = ids ? ids[channel_dst] : channel_dst / channel_ratio; - const int64_t channel_y = ids ? channel_dst % nchannels_y : channel_dst; - const int64_t sample_dst = blockIdx.z; - const int64_t sample_x = sample_dst / sample_ratio; - const int64_t sample_y = sample_dst; - const int tid = threadIdx.x; - constexpr int warp_size = ggml_cuda_get_physical_warp_size(); - - x += sample_x *stride_sample_x + channel_x *stride_channel_x + row*stride_row; - y += sample_y *stride_sample_y + channel_y *stride_channel_y; - dst += sample_dst*stride_sample_dst + channel_dst*stride_channel_dst; - - const float2 * y2 = (const float2 *) y; - - extern __shared__ char data_mmv[]; - float * buf_iw = (float *) data_mmv; - - if (block_size > warp_size) { - if (tid < warp_size) { - buf_iw[tid] = 0.0f; - } - __syncthreads(); - } - - float sumf = 0.0f; - - if constexpr (std::is_same::value) { - const float2 * x2 = (const float2 *) x; - - for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) { - const float2 tmpx = x2[col2]; - const float2 tmpy = y2[col2]; - sumf += tmpx.x*tmpy.x; - sumf += tmpx.y*tmpy.y; - } - } else if constexpr (std::is_same::value) { - const half2 * x2 = (const half2 *) x; - - if (std::is_same::value) { - for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) { - const float2 tmpx = __half22float2(x2[col2]); - const float2 tmpy = y2[col2]; - sumf += tmpx.x * tmpy.x; - sumf += tmpx.y * tmpy.y; - } - } else { -#ifdef FP16_AVAILABLE - half2 sumh2 = make_half2(0.0f, 0.0f); - - for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) { - const float2 tmp = y2[col2]; - sumh2 += x2[col2] * make_half2(tmp.x, tmp.y); - } - - sumf = __low2float(sumh2) + __high2float(sumh2); -#else - NO_DEVICE_CODE; -#endif // FP16_AVAILABLE - } - } else if constexpr (std::is_same::value) { - const int * x2 = (const int *) x; - for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) { - const int tmpx = x2[col2]; - const float2 tmpy = y2[col2]; - sumf += float(reinterpret_cast(&tmpx)[0]) * tmpy.x; - sumf += float(reinterpret_cast(&tmpx)[1]) * tmpy.y; - } - } else { - static_assert(std::is_same::value, "unsupported type"); - } - - sumf = warp_reduce_sum(sumf); - - if (block_size > warp_size) { - buf_iw[tid/warp_size] = sumf; - __syncthreads(); - if (tid >= warp_size) { - return; - } - sumf = buf_iw[tid]; - sumf = warp_reduce_sum(sumf); - } - - if (tid != 0) { - return; - } - - dst[row] = sumf; -} - -template -static void launch_mul_mat_vec_cuda( - const T * x, const float * y, const int32_t * ids, float * dst, - const int64_t ncols, const int64_t nrows, const int64_t stride_row, const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, - const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, - const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, - cudaStream_t stream) { - GGML_ASSERT(ncols % 2 == 0); - GGML_ASSERT(stride_row % 2 == 0); - GGML_ASSERT(ids || nchannels_dst % nchannels_x == 0); - GGML_ASSERT( nsamples_dst % nsamples_x == 0); - const int64_t channel_ratio = nchannels_dst / nchannels_x; - const int64_t sample_ratio = nsamples_dst / nsamples_x; - int device; - int warp_size; - - CUDA_CHECK(cudaGetDevice(&device)); - warp_size = ggml_cuda_info().devices[device].warp_size; - - int64_t block_size_best = warp_size; - int64_t niter_best = (ncols + 2*warp_size - 1) / (2*warp_size); - int64_t max_block_size = 256; - if(ggml_cuda_info().devices[device].cc > GGML_CUDA_CC_OFFSET_AMD && ggml_cuda_info().devices[device].cc < GGML_CUDA_CC_RDNA1) { - max_block_size = 128; - } - for (int64_t block_size = 2*warp_size; block_size <= max_block_size; block_size += warp_size) { - const int64_t niter = (ncols + 2*block_size - 1) / (2*block_size); - if (niter < niter_best) { - niter_best = niter; - block_size_best = block_size; - } - } - - const int smem = warp_size*sizeof(float); - const dim3 block_nums(nrows, nchannels_dst, nsamples_dst); - const dim3 block_dims(block_size_best, 1, 1); - switch (block_size_best) { - case 32: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - case 64: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - case 96: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - case 128: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - case 160: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - case 192: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - case 224: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - case 256: { - mul_mat_vec<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - default: { - GGML_ABORT("fatal error"); - } break; - } -} - -template -static void mul_mat_vec_cuda( - const T * x, const float * y, const int32_t * ids, float * dst, - const int64_t ncols, const int64_t nrows, const int64_t stride_row, const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, - const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, - const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, - enum ggml_prec prec, cudaStream_t stream) { - if constexpr(std::is_same::value) { - if (prec == GGML_PREC_DEFAULT) { - launch_mul_mat_vec_cuda - (x, y, ids, dst, ncols, nrows, stride_row, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, - stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); - return; - } - } - launch_mul_mat_vec_cuda - (x, y, ids, dst, ncols, nrows, stride_row, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, - stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); -} - -void ggml_cuda_mul_mat_vec(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst) { - GGML_ASSERT( src1->type == GGML_TYPE_F32); - GGML_ASSERT(!ids || ids->type == GGML_TYPE_I32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const size_t ts_src0 = ggml_type_size(src0->type); - const size_t ts_src1 = ggml_type_size(src1->type); - const size_t ts_dst = ggml_type_size(dst->type); - - GGML_ASSERT(!ids || ne12 == 1); // Implementation is only correct for batch size 1. - GGML_ASSERT(ne13 == ne3); - - GGML_ASSERT( nb00 == ts_src0); - GGML_ASSERT( nb10 == ts_src1); - GGML_ASSERT(!ids || ids->nb[0] == ggml_type_size(ids->type)); - GGML_ASSERT( nb0 == ts_dst); - - const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; - const enum ggml_prec prec = fast_fp16_available(cc) ? ggml_prec(dst->op_params[0]) : GGML_PREC_F32; - - const float * src1_d = (const float *) src1->data; - const int32_t * ids_d = ids ? (const int32_t *) ids->data : nullptr; - float * dst_d = (float *) dst->data; - - const int64_t s01 = src0->nb[1] / ts_src0; - const int64_t s11 = src1->nb[1] / ts_src1; - const int64_t s1 = dst->nb[1] / ts_dst; - const int64_t s02 = src0->nb[2] / ts_src0; - const int64_t s12 = src1->nb[2] / ts_src1; - const int64_t s2 = dst->nb[2] / ts_dst; - const int64_t s03 = src0->nb[3] / ts_src0; - const int64_t s13 = src1->nb[3] / ts_src1; - const int64_t s3 = dst->nb[3] / ts_dst; - - // For MUL_MAT_ID the memory layout is different than for MUL_MAT: - const int64_t ncols_dst = ids ? ne2 : ne1; - const int64_t nchannels_y = ids ? ne11 : ne12; - const int64_t nchannels_dst = ids ? ne1 : ne2; - const int64_t stride_channel_dst = ids ? s1 : s2; - const int64_t stride_channel_y = ids ? s11 : s12; - - GGML_ASSERT(ncols_dst == 1); - - switch (src0->type) { - case GGML_TYPE_F32: { - const float * src0_d = (const float *) src0->data; - mul_mat_vec_cuda(src0_d, src1_d, ids_d, dst_d, ne00, ne01, s01, - ne02, nchannels_y, nchannels_dst, s02, stride_channel_y, stride_channel_dst, - ne03, ne3, s03, s13, s3, prec, ctx.stream()); - } break; - case GGML_TYPE_F16: { - const half * src0_d = (const half *) src0->data; - mul_mat_vec_cuda(src0_d, src1_d, ids_d, dst_d, ne00, ne01, s01, - ne02, nchannels_y, nchannels_dst, s02, stride_channel_y, stride_channel_dst, - ne03, ne3, s03, s13, s3, prec, ctx.stream()); - } break; - case GGML_TYPE_BF16: { - const nv_bfloat16 * src0_d = (const nv_bfloat16 *) src0->data; - mul_mat_vec_cuda(src0_d, src1_d, ids_d, dst_d, ne00, ne01, s01, - ne02, nchannels_y, nchannels_dst, s02, stride_channel_y, stride_channel_dst, - ne03, ne3, s03, s13, s3, prec, ctx.stream()); - } break; - default: - GGML_ABORT("unsupported type: %s", ggml_type_name(src0->type)); - } -} - -void ggml_cuda_op_mul_mat_vec( - ggml_backend_cuda_context & ctx, - const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, - const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, - const int64_t src1_padded_row_size, cudaStream_t stream) { - - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - const int64_t ne00 = src0->ne[0]; - const int64_t row_diff = row_high - row_low; - - GGML_ASSERT(src1_ncols == 1); - - const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; - const enum ggml_prec prec = fast_fp16_available(cc) ? ggml_prec(dst->op_params[0]) : GGML_PREC_F32; - - - // ggml_cuda_op provides single, contiguous matrices - const int64_t stride_row = ne00; - const int64_t nchannels_x = 1; - const int64_t nchannels_y = 1; - const int64_t nchannels_dst = 1; - const int64_t stride_channel_x = 0; - const int64_t stride_channel_y = 0; - const int64_t stride_channel_dst = 0; - const int64_t nsamples_x = 1; - const int64_t nsamples_dst = 1; - const int64_t stride_sample_x = 0; - const int64_t stride_sample_y = 0; - const int64_t stride_sample_dst = 0; - - switch (src0->type) { - case GGML_TYPE_F32: { - const float * src0_d = (const float *) src0_dd_i; - mul_mat_vec_cuda(src0_d, src1_ddf_i, nullptr, dst_dd_i, ne00, row_diff, stride_row, - nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, - nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, stream); - } break; - case GGML_TYPE_F16: { - const half * src0_d = (const half *) src0_dd_i; - mul_mat_vec_cuda(src0_d, src1_ddf_i, nullptr, dst_dd_i, ne00, row_diff, stride_row, - nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, - nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, stream); - } break; - case GGML_TYPE_BF16: { - const nv_bfloat16 * src0_d = (const nv_bfloat16 *) src0_dd_i; - mul_mat_vec_cuda(src0_d, src1_ddf_i, nullptr, dst_dd_i, ne00, row_diff, stride_row, - nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, - nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, stream); - } break; - default: - GGML_ABORT("unsupported type: %s", ggml_type_name(src0->type)); - } - - GGML_UNUSED(ctx); - GGML_UNUSED(src1); - GGML_UNUSED(dst); - GGML_UNUSED(src1_ddq_i); - GGML_UNUSED(src1_ncols); - GGML_UNUSED(src1_padded_row_size); -} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/mmv.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/mmv.cuh deleted file mode 100644 index 756e7e1cc..000000000 --- a/ml/backend/ggml/ggml/src/ggml-cuda/mmv.cuh +++ /dev/null @@ -1,12 +0,0 @@ -#include "common.cuh" - -// maximum number of src0 rows with which to use mul_mat_vec over cuBLAS if FP16 tensor cores are available -#define MMV_MAX_ROWS 512 - -void ggml_cuda_mul_mat_vec(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst); - -void ggml_cuda_op_mul_mat_vec( - ggml_backend_cuda_context & ctx, - const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, - const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, - const int64_t src1_padded_row_size, cudaStream_t stream); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/mmvf.cu b/ml/backend/ggml/ggml/src/ggml-cuda/mmvf.cu new file mode 100644 index 000000000..1ad4bc75b --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mmvf.cu @@ -0,0 +1,510 @@ +#include "ggml.h" +#include "common.cuh" +#include "mmvf.cuh" + +template +static __global__ void mul_mat_vec_f( + const T * __restrict__ x, const float * __restrict__ y, const int32_t * __restrict__ ids, float * __restrict__ dst, + const int ncols2, const int nchannels_y, const int stride_row, const int stride_col_y2, const int stride_col_dst, + const int channel_ratio, const int stride_channel_x, const int stride_channel_y, const int stride_channel_dst, + const int sample_ratio, const int stride_sample_x, const int stride_sample_y, const int stride_sample_dst) { + const int row = blockIdx.x; + const int channel_dst = blockIdx.y; + const int channel_x = ids ? ids[channel_dst] : channel_dst / channel_ratio; + const int channel_y = ids ? channel_dst % nchannels_y : channel_dst; + const int sample_dst = blockIdx.z; + const int sample_x = sample_dst / sample_ratio; + const int sample_y = sample_dst; + const int tid = threadIdx.x; + + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); + + x += int64_t(sample_x) *stride_sample_x + channel_x *stride_channel_x + row*stride_row; + y += int64_t(sample_y) *stride_sample_y + channel_y *stride_channel_y; + dst += int64_t(sample_dst)*stride_sample_dst + channel_dst*stride_channel_dst; + + const float2 * y2 = (const float2 *) y; + + extern __shared__ char data_mmv[]; + float * buf_iw = (float *) data_mmv; + + if (block_size > warp_size) { + if (tid < warp_size) { + buf_iw[tid] = 0.0f; + } + __syncthreads(); + } + + float sumf[ncols_dst] = {0.0f}; + + if constexpr (std::is_same_v) { + const float2 * x2 = (const float2 *) x; + + for (int col2 = tid; col2 < ncols2; col2 += block_size) { + const float2 tmpx = x2[col2]; + +#pragma unroll + for (int j = 0; j < ncols_dst; ++j) { + const float2 tmpy = y2[j*stride_col_y2 + col2]; + sumf[j] += tmpx.x*tmpy.x; + sumf[j] += tmpx.y*tmpy.y; + } + } + } else if constexpr (std::is_same_v) { + const half2 * x2 = (const half2 *) x; + + if (std::is_same_v) { + for (int col2 = tid; col2 < ncols2; col2 += block_size) { + const float2 tmpx = __half22float2(x2[col2]); + +#pragma unroll + for (int j = 0; j < ncols_dst; ++j) { + const float2 tmpy = y2[j*stride_col_y2 + col2]; + sumf[j] += tmpx.x * tmpy.x; + sumf[j] += tmpx.y * tmpy.y; + } + } + } else { +#ifdef FP16_AVAILABLE + half2 sumh2[ncols_dst] = {{0.0f, 0.0f}}; + + for (int col2 = tid; col2 < ncols2; col2 += block_size) { + const half2 tmpx = x2[col2]; + +#pragma unroll + for (int j = 0; j < ncols_dst; ++j) { + const float2 tmpy = y2[j*stride_col_y2 + col2]; + sumh2[j] += tmpx * make_half2(tmpy.x, tmpy.y); + } + } + +#pragma unroll + for (int j = 0; j < ncols_dst; ++j) { + sumf[j] = __low2float(sumh2[j]) + __high2float(sumh2[j]); + } +#else + NO_DEVICE_CODE; +#endif // FP16_AVAILABLE + } + } else if constexpr (std::is_same_v) { + const int * x2 = (const int *) x; + for (int col2 = tid; col2 < ncols2; col2 += block_size) { + const int tmpx = x2[col2]; +#pragma unroll + for (int j = 0; j < ncols_dst; ++j) { + const float2 tmpy = y2[j*stride_col_y2 + col2]; + sumf[j] += float(reinterpret_cast(&tmpx)[0]) * tmpy.x; + sumf[j] += float(reinterpret_cast(&tmpx)[1]) * tmpy.y; + } + } + } else { + static_assert(std::is_same_v, "unsupported type"); + } + +#pragma unroll + for (int j = 0; j < ncols_dst; ++j) { + sumf[j] = warp_reduce_sum(sumf[j]); + + if (block_size > warp_size) { + buf_iw[tid/warp_size] = sumf[j]; + __syncthreads(); + if (tid < warp_size) { + sumf[j] = buf_iw[tid]; + sumf[j] = warp_reduce_sum(sumf[j]); + } + if (j < ncols_dst) { + __syncthreads(); + } + } + } + + if (tid >= ncols_dst) { + return; + } + + dst[tid*stride_col_dst + row] = sumf[tid]; +} + +template +static void launch_mul_mat_vec_f_cuda( + const T * x, const float * y, const int32_t * ids, float * dst, + const int64_t ncols, const int64_t nrows, + const int64_t stride_row, const int64_t stride_col_y, const int64_t stride_col_dst, + const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, + const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, + const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, + cudaStream_t stream) { + GGML_ASSERT(ncols % 2 == 0); + GGML_ASSERT(stride_row % 2 == 0); + GGML_ASSERT(stride_col_y % 2 == 0); + GGML_ASSERT(ids || nchannels_dst % nchannels_x == 0); + GGML_ASSERT( nsamples_dst % nsamples_x == 0); + const int64_t channel_ratio = nchannels_dst / nchannels_x; + const int64_t sample_ratio = nsamples_dst / nsamples_x; + + const int device = ggml_cuda_get_device(); + const int warp_size = ggml_cuda_info().devices[device].warp_size; + + int64_t block_size_best = warp_size; + int64_t niter_best = (ncols + 2*warp_size - 1) / (2*warp_size); + int64_t max_block_size = 256; + if(ggml_cuda_info().devices[device].cc > GGML_CUDA_CC_OFFSET_AMD && ggml_cuda_info().devices[device].cc < GGML_CUDA_CC_RDNA1) { + max_block_size = 128; + } + for (int64_t block_size = 2*warp_size; block_size <= max_block_size; block_size += warp_size) { + const int64_t niter = (ncols + 2*block_size - 1) / (2*block_size); + if (niter < niter_best) { + niter_best = niter; + block_size_best = block_size; + } + } + + const int nbytes_shared = warp_size*sizeof(float); + const dim3 block_nums(nrows, nchannels_dst, nsamples_dst); + const dim3 block_dims(block_size_best, 1, 1); + switch (block_size_best) { + case 32: { + mul_mat_vec_f<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + case 64: { + mul_mat_vec_f<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + case 96: { + mul_mat_vec_f<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + case 128: { + mul_mat_vec_f<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + case 160: { + mul_mat_vec_f<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + case 192: { + mul_mat_vec_f<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + case 224: { + mul_mat_vec_f<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + case 256: { + mul_mat_vec_f<<>> + (x, y, ids, dst, ncols/2, nchannels_y, stride_row, stride_col_y/2, stride_col_dst, + channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst, + sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); + } break; + default: { + GGML_ABORT("fatal error"); + } break; + } +} + +template +static void mul_mat_vec_f_cuda_switch_ncols_dst( + const T * x, const float * y, const int32_t * ids, float * dst, + const int64_t ncols, const int64_t nrows, const int64_t ncols_dst, + const int64_t stride_row, const int64_t stride_col_y, const int64_t stride_col_dst, + const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, + const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, + const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, + cudaStream_t stream) { + switch (ncols_dst) { + case 1: + launch_mul_mat_vec_f_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + case 2: + launch_mul_mat_vec_f_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + case 3: + launch_mul_mat_vec_f_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + case 4: + launch_mul_mat_vec_f_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + case 5: + launch_mul_mat_vec_f_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + case 6: + launch_mul_mat_vec_f_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + case 7: + launch_mul_mat_vec_f_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + case 8: + launch_mul_mat_vec_f_cuda + (x, y, ids, dst, ncols, nrows, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + break; + default: + GGML_ABORT("fatal error"); + break; + } +} + +template +static void mul_mat_vec_f_cuda( + const T * x, const float * y, const int32_t * ids, float * dst, + const int64_t ncols, const int64_t nrows, const int64_t ncols_dst, + const int64_t stride_row, const int64_t stride_col_y, const int stride_col_dst, + const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, + const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, + const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, + enum ggml_prec prec, cudaStream_t stream) { + if constexpr(std::is_same_v) { + if (prec == GGML_PREC_DEFAULT) { + mul_mat_vec_f_cuda_switch_ncols_dst + (x, y, ids, dst, ncols, nrows, ncols_dst, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); + return; + } + } + mul_mat_vec_f_cuda_switch_ncols_dst + (x, y, ids, dst, ncols, nrows, ncols_dst, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, + stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); +} + +void ggml_cuda_mul_mat_vec_f(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst) { + GGML_ASSERT( src1->type == GGML_TYPE_F32); + GGML_ASSERT(!ids || ids->type == GGML_TYPE_I32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const size_t ts_src0 = ggml_type_size(src0->type); + const size_t ts_src1 = ggml_type_size(src1->type); + const size_t ts_dst = ggml_type_size(dst->type); + + GGML_ASSERT(!ids || ne12 == 1); // Implementation is only correct for batch size 1. + GGML_ASSERT(ne13 == ne3); + + GGML_ASSERT( nb00 == ts_src0); + GGML_ASSERT( nb10 == ts_src1); + GGML_ASSERT(!ids || ids->nb[0] == ggml_type_size(ids->type)); + GGML_ASSERT( nb0 == ts_dst); + + const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; + const enum ggml_prec prec = fast_fp16_available(cc) ? ggml_prec(dst->op_params[0]) : GGML_PREC_F32; + + const float * src1_d = (const float *) src1->data; + const int32_t * ids_d = ids ? (const int32_t *) ids->data : nullptr; + float * dst_d = (float *) dst->data; + + const int64_t s01 = src0->nb[1] / ts_src0; + const int64_t s11 = src1->nb[1] / ts_src1; + const int64_t s1 = dst->nb[1] / ts_dst; + const int64_t s02 = src0->nb[2] / ts_src0; + const int64_t s12 = src1->nb[2] / ts_src1; + const int64_t s2 = dst->nb[2] / ts_dst; + const int64_t s03 = src0->nb[3] / ts_src0; + const int64_t s13 = src1->nb[3] / ts_src1; + const int64_t s3 = dst->nb[3] / ts_dst; + + // For MUL_MAT_ID the memory layout is different than for MUL_MAT: + const int64_t ncols_dst = ids ? ne2 : ne1; + const int64_t nchannels_y = ids ? ne11 : ne12; + const int64_t nchannels_dst = ids ? ne1 : ne2; + const int64_t stride_channel_dst = ids ? s1 : s2; + const int64_t stride_channel_y = ids ? s11 : s12; + + GGML_ASSERT(!ids || ncols_dst == 1); + + switch (src0->type) { + case GGML_TYPE_F32: { + const float * src0_d = (const float *) src0->data; + mul_mat_vec_f_cuda(src0_d, src1_d, ids_d, dst_d, ne00, ne01, ncols_dst, s01, s11, s1, + ne02, nchannels_y, nchannels_dst, s02, stride_channel_y, stride_channel_dst, + ne03, ne3, s03, s13, s3, prec, ctx.stream()); + } break; + case GGML_TYPE_F16: { + const half * src0_d = (const half *) src0->data; + mul_mat_vec_f_cuda(src0_d, src1_d, ids_d, dst_d, ne00, ne01, ncols_dst, s01, s11, s1, + ne02, nchannels_y, nchannels_dst, s02, stride_channel_y, stride_channel_dst, + ne03, ne3, s03, s13, s3, prec, ctx.stream()); + } break; + case GGML_TYPE_BF16: { + const nv_bfloat16 * src0_d = (const nv_bfloat16 *) src0->data; + mul_mat_vec_f_cuda(src0_d, src1_d, ids_d, dst_d, ne00, ne01, ncols_dst, s01, s11, s1, + ne02, nchannels_y, nchannels_dst, s02, stride_channel_y, stride_channel_dst, + ne03, ne3, s03, s13, s3, prec, ctx.stream()); + } break; + default: + GGML_ABORT("unsupported type: %s", ggml_type_name(src0->type)); + } +} + +void ggml_cuda_op_mul_mat_vec_f( + ggml_backend_cuda_context & ctx, + const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, + const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, + const int64_t src1_padded_row_size, cudaStream_t stream) { + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + const int64_t ne00 = src0->ne[0]; + const int64_t ne10 = src1->ne[0]; + const int64_t ne0 = dst->ne[0]; + const int64_t row_diff = row_high - row_low; + + const int id = ggml_cuda_get_device(); + const int cc = ggml_cuda_info().devices[id].cc; + const enum ggml_prec prec = fast_fp16_available(cc) ? ggml_prec(dst->op_params[0]) : GGML_PREC_F32; + + + // ggml_cuda_op provides single, contiguous matrices + const int64_t stride_row = ne00; + const int64_t stride_col_y = ne10; + const int64_t stride_col_dst = id == ctx.device ? ne0 : row_diff; // main device has larger memory buffer + const int64_t nchannels_x = 1; + const int64_t nchannels_y = 1; + const int64_t nchannels_dst = 1; + const int64_t stride_channel_x = 0; + const int64_t stride_channel_y = 0; + const int64_t stride_channel_dst = 0; + const int64_t nsamples_x = 1; + const int64_t nsamples_dst = 1; + const int64_t stride_sample_x = 0; + const int64_t stride_sample_y = 0; + const int64_t stride_sample_dst = 0; + + switch (src0->type) { + case GGML_TYPE_F32: { + const float * src0_d = (const float *) src0_dd_i; + mul_mat_vec_f_cuda(src0_d, src1_ddf_i, nullptr, dst_dd_i, ne00, row_diff, src1_ncols, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, stream); + } break; + case GGML_TYPE_F16: { + const half * src0_d = (const half *) src0_dd_i; + mul_mat_vec_f_cuda(src0_d, src1_ddf_i, nullptr, dst_dd_i, ne00, row_diff, src1_ncols, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, stream); + } break; + case GGML_TYPE_BF16: { + const nv_bfloat16 * src0_d = (const nv_bfloat16 *) src0_dd_i; + mul_mat_vec_f_cuda(src0_d, src1_ddf_i, nullptr, dst_dd_i, ne00, row_diff, src1_ncols, stride_row, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, stream); + } break; + default: + GGML_ABORT("unsupported type: %s", ggml_type_name(src0->type)); + } + + GGML_UNUSED(ctx); + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_ddq_i); + GGML_UNUSED(src1_ncols); + GGML_UNUSED(src1_padded_row_size); +} + +bool ggml_cuda_should_use_mmvf(enum ggml_type type, int cc, const int64_t * src0_ne, int64_t ne11) { + if (src0_ne[0] % 2 != 0) { + return false; + } + switch (type) { + case GGML_TYPE_F32: + if (GGML_CUDA_CC_IS_NVIDIA(cc)) { + if (ampere_mma_available(cc)) { + return ne11 <= 3; + } + if (cc >= GGML_CUDA_CC_TURING) { + return ne11 <= 4; + } + return ne11 <= 3; + } else if (GGML_CUDA_CC_IS_AMD(cc)) { + if (fp32_mma_hardware_available(cc)) { + return ne11 <= 3; + } + return ne11 <= 8; + } + return ne11 <= 8; + case GGML_TYPE_F16: + if (GGML_CUDA_CC_IS_NVIDIA(cc)) { + const bool src0_small = (src0_ne[1] <= 512 || src0_ne[2]*src0_ne[3] == 1); + if (ampere_mma_available(cc)) { + return src0_small && ne11 == 1; + } + if (cc >= GGML_CUDA_CC_ADA_LOVELACE) { + return src0_small && ne11 <= 4; + } + if (fp16_mma_hardware_available(cc)) { + return src0_small && ne11 <= 3; + } + return ne11 <= 8; + } else if (GGML_CUDA_CC_IS_AMD(cc)) { + if (fp16_mma_hardware_available(cc)) { + if (GGML_CUDA_CC_IS_RDNA3(cc) || GGML_CUDA_CC_IS_RDNA4(cc)) { + return ne11 <= 5; + } + return ne11 <= 2; + } + return ne11 <= 8; + } + return ne11 <= 8; + case GGML_TYPE_BF16: + if (GGML_CUDA_CC_IS_NVIDIA(cc)) { + const bool src0_small = (src0_ne[1] <= 512 || src0_ne[2]*src0_ne[3] == 1); + if (ampere_mma_available(cc)) { + return src0_small && ne11 == 1; + } + if (cc >= GGML_CUDA_CC_ADA_LOVELACE) { + return src0_small && ne11 <= 4; + } + if (bf16_mma_hardware_available(cc)) { + return src0_small && ne11 <= 3; + } + return ne11 <= 8; + } else if (GGML_CUDA_CC_IS_AMD(cc)) { + if (bf16_mma_hardware_available(cc)) { + return ne11 <= 3; + } + return ne11 <= 8; + } + return ne11 <= 8; + default: + return false; + } +} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/mmvmxfp4.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/mmvf.cuh similarity index 55% rename from ml/backend/ggml/ggml/src/ggml-cuda/mmvmxfp4.cuh rename to ml/backend/ggml/ggml/src/ggml-cuda/mmvf.cuh index a08fc7800..1da460992 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/mmvmxfp4.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mmvf.cuh @@ -1,9 +1,11 @@ #include "common.cuh" -void ggml_cuda_mul_mat_vec_mxfp4(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst); +void ggml_cuda_mul_mat_vec_f(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst); -void ggml_cuda_op_mul_mat_vec_mxfp4( +void ggml_cuda_op_mul_mat_vec_f( ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_row_size, cudaStream_t stream); + +bool ggml_cuda_should_use_mmvf(enum ggml_type type, int cc, const int64_t * src0_ne, int64_t ne11); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/mmvmxfp4.cu b/ml/backend/ggml/ggml/src/ggml-cuda/mmvmxfp4.cu deleted file mode 100644 index da62062b3..000000000 --- a/ml/backend/ggml/ggml/src/ggml-cuda/mmvmxfp4.cu +++ /dev/null @@ -1,307 +0,0 @@ -#include "ggml.h" -#include "common.cuh" -#include "mmvmxfp4.cuh" - -// MXFP4 implementation derived from mmv.cu float32 code paths -typedef union { - half f16; - uint16_t u16; -} f16_t; - -template // TODO type_acc unused - consider bf16 support -static __global__ void mul_mat_vec_mxfp4( - const block_mxfp4 * __restrict__ x, const float * __restrict__ y, const int32_t * __restrict__ ids, float * __restrict__ dst, - const int64_t ncols2, const int64_t nchannels_y, const int64_t stride_row, - const int64_t channel_ratio, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, - const int64_t sample_ratio, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst) { - const int64_t row = blockIdx.x; - const int64_t channel_dst = blockIdx.y; - const int64_t channel_x = ids ? ids[channel_dst] : channel_dst / channel_ratio; - const int64_t channel_y = ids ? channel_dst % nchannels_y : channel_dst; - const int64_t sample_dst = blockIdx.z; - const int64_t sample_x = sample_dst / sample_ratio; - const int64_t sample_y = sample_dst; - const int tid = threadIdx.x; - constexpr int warp_size = ggml_cuda_get_physical_warp_size(); - - const uint16_t dst_bias = 15; - const uint16_t dst_0p5 = 0x3800; - const uint16_t dst_m_bits = 10; - - x += sample_x *stride_sample_x + channel_x *stride_channel_x + row*stride_row; - y += sample_y *stride_sample_y + channel_y *stride_channel_y; - dst += sample_dst*stride_sample_dst + channel_dst*stride_channel_dst; - - const float2 * y2 = (const float2 *) y; - - extern __shared__ char data_mmv[]; // allocated in GPU shared memory: warp_size*sizeof(float) - float * buf_iw = (float *) data_mmv; - - if (block_size > warp_size) { - if (tid < warp_size) { - buf_iw[tid] = 0.0f; - } - __syncthreads(); - } - - float sumf = 0.0f; - - for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) { - int offset0 = col2 / (MXFP4/2); - int i = col2 % (MXFP4/2); - const block_mxfp4 *x2 = x+offset0; - - union { - uint32_t as_bits; - float as_value; - } scale; - scale.as_bits = (((uint32_t)x2->d) << 23); - uint16_t em0 = x2->qs[i] & 0x07; - uint16_t em1 = x2->qs[i] & 0x70; - // float16 values - f16_t x0; - f16_t x1; - x0.u16 = (em0 << (dst_m_bits - 1)) | ((x2->qs[i] & 0x08) << 12); - x1.u16 = (em1 << (dst_m_bits - 5)) | ((x2->qs[i] & 0x80) << 8); - - // Three cases: - // x is normal and non-zero: Correct bias - if ((em0 & 0x06) != 0) { - x0.u16 = x0.u16 + ((dst_bias - 1) << dst_m_bits); - } - if ((em1 & 0x60) != 0) { - x1.u16 = x1.u16 + ((dst_bias - 1) << dst_m_bits); - } - // x is subnormal (x == 0bs001 where s is the sign): Map to +-0.5 in the dst type - if (em0 == 0x01) { - x0.u16 = dst_0p5 | (x0.u16 & 0x8000); - } - if (em1 == 0x10) { - x1.u16 = dst_0p5 | (x1.u16 & 0x8000); - } - // x is zero, do nothing - - if (isnan(scale.as_value)) { - sumf = scale.as_value; - break; - } - - const float2 tmpx = {x0.f16, x1.f16}; - const float2 tmpy = y2[col2]; - sumf += tmpx.x*tmpy.x*scale.as_value; - sumf += tmpx.y*tmpy.y*scale.as_value; - } - - sumf = warp_reduce_sum(sumf); - - if (block_size > warp_size) { - buf_iw[tid/warp_size] = sumf; - __syncthreads(); - if (tid >= warp_size) { - return; - } - sumf = buf_iw[tid]; - sumf = warp_reduce_sum(sumf); - } - - if (tid != 0) { - return; - } - - dst[row] = sumf; -} - -template -static void launch_mul_mat_vec_cuda_mxfp4( - const block_mxfp4 * x, const float * y, const int32_t * ids, float * dst, - const int64_t ncols, const int64_t nrows, const int64_t stride_row, const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, - const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, - const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, - cudaStream_t stream) { - GGML_ASSERT(ncols % 2 == 0); - // GGML_ASSERT(stride_row % 2 == 0); // TODO - GGML_ASSERT(ids || nchannels_dst % nchannels_x == 0); - GGML_ASSERT( nsamples_dst % nsamples_x == 0); - const int64_t channel_ratio = nchannels_dst / nchannels_x; - const int64_t sample_ratio = nsamples_dst / nsamples_x; - int device; - int warp_size; - - CUDA_CHECK(cudaGetDevice(&device)); - warp_size = ggml_cuda_info().devices[device].warp_size; - - int64_t block_size_best = warp_size; - int64_t niter_best = (ncols + 2*warp_size - 1) / (2*warp_size); - int64_t max_block_size = 256; - if(ggml_cuda_info().devices[device].cc > GGML_CUDA_CC_OFFSET_AMD && ggml_cuda_info().devices[device].cc < GGML_CUDA_CC_RDNA1) { - max_block_size = 128; - } - for (int64_t block_size = 2*warp_size; block_size <= max_block_size; block_size += warp_size) { - const int64_t niter = (ncols + 2*block_size - 1) / (2*block_size); - if (niter < niter_best) { - niter_best = niter; - block_size_best = block_size; - } - } - - const int smem = warp_size*sizeof(float); - const dim3 block_nums(nrows, nchannels_dst, nsamples_dst); - const dim3 block_dims(block_size_best, 1, 1); - - switch (block_size_best) { - case 32: { - mul_mat_vec_mxfp4<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - case 64: { - mul_mat_vec_mxfp4<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - case 96: { - mul_mat_vec_mxfp4<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - case 128: { - mul_mat_vec_mxfp4<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - case 160: { - mul_mat_vec_mxfp4<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - case 192: { - mul_mat_vec_mxfp4<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - case 224: { - mul_mat_vec_mxfp4<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - case 256: { - mul_mat_vec_mxfp4<<>> - (x, y, ids, dst, ncols/2, nchannels_y, stride_row, channel_ratio, stride_channel_x, stride_channel_y, - stride_channel_dst, sample_ratio, stride_sample_x, stride_sample_y, stride_sample_dst); - } break; - default: { - GGML_ABORT("fatal error"); - } break; - } -} - -static void mul_mat_vec_cuda_mxfp4( - const block_mxfp4 * x, const float * y, const int32_t * ids, float * dst, - const int64_t ncols, const int64_t nrows, const int64_t stride_row, const int64_t nchannels_x, const int64_t nchannels_y, const int64_t nchannels_dst, - const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst, const int64_t nsamples_x, - const int64_t nsamples_dst, const int64_t stride_sample_x, const int64_t stride_sample_y, const int64_t stride_sample_dst, - enum ggml_prec prec, cudaStream_t stream) { - launch_mul_mat_vec_cuda_mxfp4 - (x, y, ids, dst, ncols, nrows, stride_row, nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, - stride_channel_dst, nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); -} - -void ggml_cuda_mul_mat_vec_mxfp4(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst) { - GGML_ASSERT( src1->type == GGML_TYPE_F32); - GGML_ASSERT(!ids || ids->type == GGML_TYPE_I32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const size_t ts_src0 = ggml_type_size(src0->type); - const size_t ts_src1 = ggml_type_size(src1->type); - const size_t ts_dst = ggml_type_size(dst->type); - - GGML_ASSERT(!ids || ne12 == 1); // Implementation is only correct for batch size 1. - GGML_ASSERT(ne13 == ne3); - - // GGML_ASSERT( nb00 == ts_src0); // TODO adjust for block sizing logic - GGML_ASSERT( nb10 == ts_src1); - GGML_ASSERT(!ids || ids->nb[0] == ggml_type_size(ids->type)); - GGML_ASSERT( nb0 == ts_dst); - - const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; - const enum ggml_prec prec = fast_fp16_available(cc) ? ggml_prec(dst->op_params[0]) : GGML_PREC_F32; - - const float * src1_d = (const float *) src1->data; - const int32_t * ids_d = ids ? (const int32_t *) ids->data : nullptr; - float * dst_d = (float *) dst->data; - - const int64_t stride_row = src0->nb[1] / ts_src0; - const int64_t s11 = src1->nb[1] / ts_src1; - const int64_t s1 = dst->nb[1] / ts_dst; - const int64_t stride_channel_x = src0->nb[2] / ts_src0; - const int64_t s12 = src1->nb[2] / ts_src1; - const int64_t s2 = dst->nb[2] / ts_dst; - const int64_t stride_sample_x = src0->nb[3] / ts_src0; - const int64_t stride_sample_y = src1->nb[3] / ts_src1; - const int64_t stride_sample_dst = dst->nb[3] / ts_dst; - const int64_t nsamples_dst = ne3; - const int64_t nsamples_x = ne03; - const int64_t nchannels_x = ne02; - const int64_t nrows = ne01; - const int64_t ncols = ne00; - - // For MUL_MAT_ID the memory layout is different than for MUL_MAT: - const int64_t ncols_dst = ids ? ne2 : ne1; - const int64_t nchannels_y = ids ? ne11 : ne12; - const int64_t nchannels_dst = ids ? ne1 : ne2; - const int64_t stride_channel_dst = ids ? s1 : s2; - const int64_t stride_channel_y = ids ? s11 : s12; - - GGML_ASSERT(ncols_dst == 1); - - const block_mxfp4 * src0_d = (const block_mxfp4 *) src0->data; - mul_mat_vec_cuda_mxfp4(src0_d, src1_d, ids_d, dst_d, ncols, nrows, stride_row, - nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, - nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, ctx.stream()); -} - -void ggml_cuda_op_mul_mat_vec_mxfp4( - ggml_backend_cuda_context & ctx, - const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, - const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, - const int64_t src1_padded_row_size, cudaStream_t stream) { - - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - const int64_t ne00 = src0->ne[0]; - const int64_t row_diff = row_high - row_low; - - GGML_ASSERT(src1_ncols == 1); - - const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc; - const enum ggml_prec prec = fast_fp16_available(cc) ? ggml_prec(dst->op_params[0]) : GGML_PREC_F32; - - // ggml_cuda_op provides single, contiguous matrices - const int64_t stride_row = ne00 / MXFP4; - const int64_t nchannels_x = 1; - const int64_t nchannels_y = 1; - const int64_t nchannels_dst = 1; - const int64_t stride_channel_x = 0; - const int64_t stride_channel_y = 0; - const int64_t stride_channel_dst = 0; - const int64_t nsamples_x = 1; - const int64_t nsamples_dst = 1; - const int64_t stride_sample_x = 0; - const int64_t stride_sample_y = 0; - const int64_t stride_sample_dst = 0; - - const block_mxfp4 * src0_d = (const block_mxfp4 *) src0_dd_i; - mul_mat_vec_cuda_mxfp4(src0_d, src1_ddf_i, nullptr, dst_dd_i, ne00, row_diff, stride_row, - nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, - nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, prec, stream); - - GGML_UNUSED(ctx); - GGML_UNUSED(src1); - GGML_UNUSED(dst); - GGML_UNUSED(src1_ddq_i); - GGML_UNUSED(src1_ncols); - GGML_UNUSED(src1_padded_row_size); -} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/mmvq.cu b/ml/backend/ggml/ggml/src/ggml-cuda/mmvq.cu index dc7adf509..5c8e5c4a7 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/mmvq.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mmvq.cu @@ -13,6 +13,7 @@ static constexpr __device__ vec_dot_q_cuda_t get_vec_dot_q_cuda(ggml_type type) case GGML_TYPE_Q5_0: return vec_dot_q5_0_q8_1; case GGML_TYPE_Q5_1: return vec_dot_q5_1_q8_1; case GGML_TYPE_Q8_0: return vec_dot_q8_0_q8_1; + case GGML_TYPE_MXFP4: return vec_dot_mxfp4_q8_1; case GGML_TYPE_Q2_K: return vec_dot_q2_K_q8_1; case GGML_TYPE_Q3_K: return vec_dot_q3_K_q8_1; case GGML_TYPE_Q4_K: return vec_dot_q4_K_q8_1; @@ -38,6 +39,7 @@ static constexpr __device__ int get_vdr_mmvq(ggml_type type) { case GGML_TYPE_Q5_0: return VDR_Q5_0_Q8_1_MMVQ; case GGML_TYPE_Q5_1: return VDR_Q5_1_Q8_1_MMVQ; case GGML_TYPE_Q8_0: return VDR_Q8_0_Q8_1_MMVQ; + case GGML_TYPE_MXFP4: return VDR_MXFP4_Q8_1_MMVQ; case GGML_TYPE_Q2_K: return VDR_Q2_K_Q8_1_MMVQ; case GGML_TYPE_Q3_K: return VDR_Q3_K_Q8_1_MMVQ; case GGML_TYPE_Q4_K: return VDR_Q4_K_Q8_1_MMVQ; @@ -384,6 +386,13 @@ static void mul_mat_vec_q_switch_type( nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, stream); break; + case GGML_TYPE_MXFP4: + mul_mat_vec_q_switch_ncols_dst + (vx, vy, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, + nchannels_x, nchannels_y, nchannels_dst, stride_channel_x, stride_channel_y, stride_channel_dst, + nsamples_x, nsamples_dst, stride_sample_x, stride_sample_y, stride_sample_dst, + stream); + break; case GGML_TYPE_Q2_K: mul_mat_vec_q_switch_ncols_dst (vx, vy, ids, dst, ncols_x, nrows_x, ncols_dst, stride_row_x, stride_col_y, stride_col_dst, diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/norm.cu b/ml/backend/ggml/ggml/src/ggml-cuda/norm.cu index 0020dbcec..bddcca51b 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/norm.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/norm.cu @@ -104,10 +104,12 @@ static __global__ void group_norm_f32(const float * x, float * dst, const int gr } } -template +template static __global__ void rms_norm_f32( const float * x, float * dst, const int ncols, const int64_t stride_row, const int64_t stride_channel, - const int64_t stride_sample, const float eps) { + const int64_t stride_sample, const float eps, const float * mul = nullptr, const int64_t mul_stride_row = 0, + const int64_t mul_stride_channel = 0, const int64_t mul_stride_sample = 0, const int mul_ncols = 0, + const int mul_nrows = 0, const int mul_nchannels = 0, const int mul_nsamples = 0) { const int nrows = gridDim.x; const int nchannels = gridDim.y; @@ -119,6 +121,13 @@ static __global__ void rms_norm_f32( x += sample*stride_sample + channel*stride_channel + row*stride_row; dst += ((sample*nchannels + channel)*nrows + row)*ncols; + if constexpr (do_multiply) { + const int mul_row = row % mul_nrows; + const int mul_channel = channel % mul_nchannels; + const int mul_sample = sample % mul_nsamples; + mul += mul_sample*mul_stride_sample + mul_channel*mul_stride_channel + mul_row*mul_stride_row; + } + float tmp = 0.0f; // partial sum for thread in warp for (int col = tid; col < ncols; col += block_size) { @@ -145,7 +154,12 @@ static __global__ void rms_norm_f32( const float scale = rsqrtf(mean + eps); for (int col = tid; col < ncols; col += block_size) { - dst[col] = scale * x[col]; + if constexpr (do_multiply) { + const int mul_col = col % mul_ncols; + dst[col] = scale * x[col] * mul[mul_col]; + } else { + dst[col] = scale * x[col]; + } } } @@ -310,10 +324,30 @@ static void rms_norm_f32_cuda( const dim3 blocks_num(nrows, nchannels, nsamples); if (ncols < 1024) { const dim3 block_dims(WARP_SIZE, 1, 1); - rms_norm_f32<<>>(x, dst, ncols, stride_row, stride_channel, stride_sample, eps); + rms_norm_f32<<>>(x, dst, ncols, stride_row, stride_channel, stride_sample, eps); } else { const dim3 block_dims(1024, 1, 1); - rms_norm_f32<1024><<>>(x, dst, ncols, stride_row, stride_channel, stride_sample, eps); + rms_norm_f32<1024, false><<>>(x, dst, ncols, stride_row, stride_channel, stride_sample, eps); + } +} + +static void rms_norm_mul_f32_cuda( + const float * x, const float * mul, float * dst, const int ncols, const int nrows, const int nchannels, const int nsamples, + const int64_t stride_row, const int64_t stride_channel, const int64_t stride_sample, + const int64_t mul_stride_row, const int64_t mul_stride_channel, const int64_t mul_stride_sample, + const int mul_ncols, const int mul_nrows, const int mul_nchannels, const int mul_nsamples, + const float eps, cudaStream_t stream) { + const dim3 blocks_num(nrows, nchannels, nsamples); + if (mul == nullptr) { + rms_norm_f32_cuda(x, dst, ncols, nrows, nchannels, nsamples, stride_row, stride_channel, stride_sample, eps, stream); + return; + } + if (ncols < 1024) { + const dim3 block_dims(WARP_SIZE, 1, 1); + rms_norm_f32<<>>(x, dst, ncols, stride_row, stride_channel, stride_sample, eps, mul, mul_stride_row, mul_stride_channel, mul_stride_sample, mul_ncols, mul_nrows, mul_nchannels, mul_nsamples); + } else { + const dim3 block_dims(1024, 1, 1); + rms_norm_f32<1024, true><<>>(x, dst, ncols, stride_row, stride_channel, stride_sample, eps, mul, mul_stride_row, mul_stride_channel, mul_stride_sample, mul_ncols, mul_nrows, mul_nchannels, mul_nsamples); } } @@ -407,6 +441,59 @@ void ggml_cuda_op_rms_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { rms_norm_f32_cuda(src0_d, dst_d, ne00, ne01, ne02, ne03, s01, s02, s03, eps, stream); } +void ggml_cuda_op_rms_norm_fused(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * mul_tensor) { + const ggml_tensor * rms_norm_src = (ggml_tensor *) dst->src[0]; + float eps = 0.0f; + + memcpy(&eps, dst->op_params, sizeof(float)); + + const float * src0_d = (const float *) rms_norm_src->data; + const float * mul_d = nullptr; + const ggml_tensor * mul_src = nullptr; + + if (mul_tensor->src[0] == dst) { + mul_d = (float *) mul_tensor->src[1]->data; + mul_src = mul_tensor->src[1]; + } else if(mul_tensor->src[1] == dst) { + mul_d = (float *) mul_tensor->src[0]->data; + mul_src = mul_tensor->src[0]; + } else { + GGML_ASSERT(false); + } + + float * dst_d = (float *) mul_tensor->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(rms_norm_src->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(mul_tensor->type == GGML_TYPE_F32); + GGML_ASSERT(eps >= 0.0f); + + const int64_t ne00 = rms_norm_src->ne[0]; + const int64_t ne01 = rms_norm_src->ne[1]; + const int64_t ne02 = rms_norm_src->ne[2]; + const int64_t ne03 = rms_norm_src->ne[3]; + + const size_t ts0 = ggml_type_size(rms_norm_src->type); + GGML_ASSERT(rms_norm_src->nb[0] == ts0); + const int64_t s01 = rms_norm_src->nb[1] / ts0; + const int64_t s02 = rms_norm_src->nb[2] / ts0; + const int64_t s03 = rms_norm_src->nb[3] / ts0; + + const size_t ts_mul = ggml_type_size(mul_src->type); + GGML_ASSERT(mul_src->nb[0] == ts_mul); + const int64_t mul_s01 = mul_src->nb[1] / ts_mul; + const int64_t mul_s02 = mul_src->nb[2] / ts_mul; + const int64_t mul_s03 = mul_src->nb[3] / ts_mul; + + const int mul_ncols = mul_src->ne[0]; + const int mul_nrows = mul_src->ne[1]; + const int mul_nchannels = mul_src->ne[2]; + const int mul_nsamples = mul_src->ne[3]; + + rms_norm_mul_f32_cuda(src0_d, mul_d, dst_d, ne00, ne01, ne02, ne03, s01, s02, s03, mul_s01, mul_s02, mul_s03, mul_ncols, mul_nrows, mul_nchannels, mul_nsamples, eps, stream); +} + void ggml_cuda_op_rms_norm_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * grad = dst->src[0]; // gradients const ggml_tensor * src0f = dst->src[1]; // src0 from forward pass diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/norm.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/norm.cuh index 706a5660a..7ea7bd4df 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/norm.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/norm.cuh @@ -6,6 +6,8 @@ void ggml_cuda_op_group_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) void ggml_cuda_op_rms_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); +void ggml_cuda_op_rms_norm_fused(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * mul_tensor); + void ggml_cuda_op_rms_norm_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_l2_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/quantize.cu b/ml/backend/ggml/ggml/src/ggml-cuda/quantize.cu index cb9318145..a0b03a740 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/quantize.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/quantize.cu @@ -56,13 +56,13 @@ static __global__ void quantize_mmq_q8_1( constexpr int vals_per_scale = ds_layout == MMQ_Q8_1_DS_LAYOUT_D2S6 ? 64 : 32; constexpr int vals_per_sum = ds_layout == MMQ_Q8_1_DS_LAYOUT_D2S6 ? 16 : 32; - const int64_t i0 = ((int64_t)blockDim.x*blockIdx.x + threadIdx.x)*4; + const int64_t i0 = ((int64_t)blockDim.x*blockIdx.y + threadIdx.x)*4; if (i0 >= ne0) { return; } - const int64_t i1 = blockIdx.y; + const int64_t i1 = blockIdx.x; const int64_t i2 = blockIdx.z % ne2; const int64_t i3 = blockIdx.z / ne2; @@ -75,8 +75,8 @@ static __global__ void quantize_mmq_q8_1( block_q8_1_mmq * y = (block_q8_1_mmq *) vy; - const int64_t ib0 = blockIdx.z*((int64_t)gridDim.y*gridDim.x*blockDim.x/QK8_1); // first block of channel - const int64_t ib = ib0 + (i0 / (4*QK8_1))*ne1 + blockIdx.y; // block index in channel + const int64_t ib0 = blockIdx.z*((int64_t)gridDim.x*gridDim.y*blockDim.x/QK8_1); // first block of channel + const int64_t ib = ib0 + (i0 / (4*QK8_1))*ne1 + blockIdx.x; // block index in channel const int64_t iqs = i0 % (4*QK8_1); // quant index in block // Load 4 floats per thread and calculate max. abs. value between them: @@ -166,8 +166,9 @@ void quantize_mmq_q8_1_cuda( GGML_ASSERT(ne00 % 4 == 0); GGML_ASSERT(ne0 % (4*QK8_1) == 0); - const int64_t block_num_x = (ne0 + 4*CUDA_QUANTIZE_BLOCK_SIZE_MMQ - 1) / (4*CUDA_QUANTIZE_BLOCK_SIZE_MMQ); - const dim3 num_blocks(block_num_x, ne1, ne2*ne3); + // ne1 tends to assume the highest values, therefore use it as the "x" dimension of the CUDA grid: + const int64_t block_num_y = (ne0 + 4*CUDA_QUANTIZE_BLOCK_SIZE_MMQ - 1) / (4*CUDA_QUANTIZE_BLOCK_SIZE_MMQ); + const dim3 num_blocks(ne1, block_num_y, ne2*ne3); const dim3 block_size(CUDA_QUANTIZE_BLOCK_SIZE_MMQ, 1, 1); switch (mmq_get_q8_1_ds_layout(type_src0)) { case MMQ_Q8_1_DS_LAYOUT_D4: diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/roll.cu b/ml/backend/ggml/ggml/src/ggml-cuda/roll.cu new file mode 100644 index 000000000..a339dfc1a --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/roll.cu @@ -0,0 +1,67 @@ +#include "ggml-cuda/common.cuh" +#include "roll.cuh" + +static __forceinline__ __device__ int64_t wrap_index(const int64_t idx, const int64_t ne) { + if (idx < 0) { + return idx + ne; + } + if (idx >= ne) { + return idx - ne; + } + return idx; +} + +static __global__ void roll_f32_cuda(const float * __restrict__ src, + float * __restrict__ dst, + const int64_t ne00, + const int64_t ne01, + const int64_t ne02, + const int64_t ne03, + const int s0, + const int s1, + const int s2, + const int s3) { + const int64_t idx = int64_t(blockDim.x) * blockIdx.x + threadIdx.x; + const int64_t n_elements = ne00 * ne01 * ne02 * ne03; + + if (idx >= n_elements) { + return; + } + + const int64_t i0 = idx % ne00; + const int64_t i1 = (idx / ne00) % ne01; + const int64_t i2 = (idx / (ne00 * ne01)) % ne02; + const int64_t i3 = (idx / (ne00 * ne01 * ne02)) % ne03; + + const int64_t d0 = wrap_index(i0 - s0, ne00); + const int64_t d1 = wrap_index(i1 - s1, ne01); + const int64_t d2 = wrap_index(i2 - s2, ne02); + const int64_t d3 = wrap_index(i3 - s3, ne03); + + dst[i3 * (ne00 * ne01 * ne02) + i2 * (ne01 * ne00) + i1 * ne00 + i0] = + src[d3 * (ne00 * ne01 * ne02) + d2 * (ne01 * ne00) + d1 * ne00 + d0]; +} + +void ggml_cuda_op_roll(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + int s0 = dst->op_params[0]; + int s1 = dst->op_params[1]; + int s2 = dst->op_params[2]; + int s3 = dst->op_params[3]; + + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *) dst->src[0]->data; + float * dst_d = (float *) dst->data; + + GGML_TENSOR_UNARY_OP_LOCALS; + + GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); + GGML_ASSERT(ggml_are_same_shape(dst->src[0], dst)); + + cudaStream_t stream = ctx.stream(); + + int64_t sz = (ne00 * ne01 * ne02 * ne03); + int64_t num_blocks = (sz + CUDA_ROLL_BLOCK_SIZE - 1) / CUDA_ROLL_BLOCK_SIZE; + + roll_f32_cuda<<>>( + src0_d, dst_d, ne00, ne01, ne02, ne03, s0, s1, s2, s3); +} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/roll.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/roll.cuh new file mode 100644 index 000000000..322d55436 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/roll.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_ROLL_BLOCK_SIZE 256 + +void ggml_cuda_op_roll(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/rope.cu b/ml/backend/ggml/ggml/src/ggml-cuda/rope.cu index 18f691b2d..d058504cd 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/rope.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/rope.cu @@ -50,21 +50,19 @@ static __global__ void rope_norm( const int row_dst = blockDim.x*blockIdx.x + threadIdx.x; - if (i0 >= n_dims) { - const int i = row_dst*ne0 + i0; - - dst[i + 0] = x[i + 0]; - dst[i + 1] = x[i + 1]; - - return; - } - const int row_x = row_dst % ne1; const int channel_x = row_dst / ne1; const int idst = row_dst*ne0 + i0; const int ix = channel_x*s2 + row_x*s1 + i0; + if (i0 >= n_dims) { + dst[idst + 0] = x[ix + 0]; + dst[idst + 1] = x[ix + 1]; + + return; + } + const float theta_base = pos[channel_x]*powf(theta_scale, i0/2.0f); const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f; @@ -94,21 +92,19 @@ static __global__ void rope_neox( const int row_dst = blockDim.x*blockIdx.x + threadIdx.x; - if (i0 >= n_dims) { - const int i = row_dst*ne0 + i0; - - dst[i + 0] = x[i + 0]; - dst[i + 1] = x[i + 1]; - - return; - } - const int row_x = row_dst % ne1; const int channel_x = row_dst / ne1; const int idst = row_dst*ne0 + i0/2; const int ix = channel_x*s2 + row_x*s1 + i0/2; + if (i0 >= n_dims) { + dst[idst + i0/2 + 0] = x[ix + i0/2 + 0]; + dst[idst + i0/2 + 1] = x[ix + i0/2 + 1]; + + return; + } + const float theta_base = pos[channel_x]*powf(theta_scale, i0/2.0f); const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f; @@ -138,21 +134,19 @@ static __global__ void rope_multi( const int row_dst = blockDim.x*blockIdx.x + threadIdx.x; - if (i0 >= n_dims) { - const int i = row_dst*ne0 + i0; - - dst[i + 0] = x[i + 0]; - dst[i + 1] = x[i + 1]; - - return; - } - const int row_x = row_dst % ne1; const int channel_x = row_dst / ne1; const int idst = row_dst*ne0 + i0/2; const int ix = channel_x*s2 + row_x*s1 + i0/2; + if (i0 >= n_dims) { + dst[idst + i0/2 + 0] = x[ix + i0/2 + 0]; + dst[idst + i0/2 + 1] = x[ix + i0/2 + 1]; + + return; + } + const int sect_dims = sections.v[0] + sections.v[1] + sections.v[2] + sections.v[3]; const int sec_w = sections.v[1] + sections.v[0]; const int sector = (i0 / 2) % sect_dims; diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/scale.cu b/ml/backend/ggml/ggml/src/ggml-cuda/scale.cu index 1405e066e..2ee9e5889 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/scale.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/scale.cu @@ -1,18 +1,18 @@ #include "scale.cuh" -static __global__ void scale_f32(const float * x, float * dst, const float scale, const int k) { +static __global__ void scale_f32(const float * x, float * dst, const float scale, const float bias, const int k) { const int i = blockDim.x*blockIdx.x + threadIdx.x; if (i >= k) { return; } - dst[i] = scale * x[i]; + dst[i] = scale * x[i] + bias; } -static void scale_f32_cuda(const float * x, float * dst, const float scale, const int k, cudaStream_t stream) { +static void scale_f32_cuda(const float * x, float * dst, const float scale, const float bias, const int k, cudaStream_t stream) { const int num_blocks = (k + CUDA_SCALE_BLOCK_SIZE - 1) / CUDA_SCALE_BLOCK_SIZE; - scale_f32<<>>(x, dst, scale, k); + scale_f32<<>>(x, dst, scale, bias, k); } void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { @@ -25,7 +25,9 @@ void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { GGML_ASSERT( dst->type == GGML_TYPE_F32); float scale; - memcpy(&scale, dst->op_params, sizeof(float)); + float bias; + memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&bias, (float *) dst->op_params + 1, sizeof(float)); - scale_f32_cuda(src0_d, dst_d, scale, ggml_nelements(src0), stream); + scale_f32_cuda(src0_d, dst_d, scale, bias, ggml_nelements(src0), stream); } diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/set-rows.cu b/ml/backend/ggml/ggml/src/ggml-cuda/set-rows.cu new file mode 100644 index 000000000..079834364 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/set-rows.cu @@ -0,0 +1,275 @@ +#include "set-rows.cuh" +#include "cpy-utils.cuh" + +typedef void (*set_rows_kernel_t)(const char * src, char * dst); + +template +__device__ __forceinline__ void set_rows_1(const src_t * src_f, dst_t * dst_f) { + convert_flt(src_f, dst_f); +} + +// Generic quantized set_rows kernel template +template +static __global__ void k_set_rows_quant( + const float * __restrict__ src0, const int64_t * __restrict__ src1, block_type * __restrict__ dst, + const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, + const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13, + const int64_t s01, const int64_t s02, const int64_t s03, + const int64_t s10, const int64_t s11, const int64_t s12, + const int64_t s1, const int64_t s2, const int64_t s3) { + + const int64_t i = int64_t(blockDim.x) * blockIdx.x + threadIdx.x; + const int64_t ne_total = (ne00 * ne01 * ne02 * ne03) / qk; + + if (i >= ne_total) { + return; + } + + const int64_t i_base = i * qk; + const int64_t i03 = i_base / (ne00 * ne01 * ne02); + const int64_t i02 = (i_base - i03 * ne00 * ne01 * ne02) / (ne00 * ne01); + const int64_t i01 = (i_base - i03 * ne00 * ne01 * ne02 - i02 * ne00 * ne01) / ne00; + const int64_t i00 = i_base - i03 * ne00 * ne01 * ne02 - i02 * ne00 * ne01 - i01 * ne00; + + const int64_t i12 = i03 % ne12; + const int64_t i11 = i02 % ne11; + const int64_t i10 = i01; + + const int64_t dst_row = *(src1 + i10*s10 + i11*s11 + i12*s12); + + const float * src0_row = src0 + i01*s01 + i02*s02 + i03*s03; + block_type * dst_row_ptr = dst + (dst_row*s1 + i02*s2 + i03*s3) / sizeof(block_type); + + const float * src_block = src0_row + i00; + block_type * dst_block = dst_row_ptr + i00 / qk; + + quantize_func(src_block, dst_block); + + GGML_UNUSED(ne10); + GGML_UNUSED(ne13); +} + +// Template dispatch function for quantized set_rows +template +static void set_rows_cuda_quant( + const float * src0_d, const int64_t * src1_d, block_type * dst_d, + const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, + const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13, + const size_t nb01, const size_t nb02, const size_t nb03, + const size_t nb10, const size_t nb11, const size_t nb12, + const size_t nb1, const size_t nb2, const size_t nb3, + cudaStream_t stream) { + + GGML_ASSERT(ne00 % qk == 0); + const int64_t ne_total = (ne00 * ne01 * ne02 * ne03) / qk; + const int num_blocks = (ne_total + CUDA_SET_ROWS_BLOCK_SIZE - 1) / CUDA_SET_ROWS_BLOCK_SIZE; + const dim3 block_size(CUDA_SET_ROWS_BLOCK_SIZE); + const dim3 grid_size(num_blocks); + + const int64_t s01 = nb01/sizeof(float); + const int64_t s02 = nb02/sizeof(float); + const int64_t s03 = nb03/sizeof(float); + const int64_t s10 = nb10/sizeof(int64_t); + const int64_t s11 = nb11/sizeof(int64_t); + const int64_t s12 = nb12/sizeof(int64_t); + const int64_t s1 = nb1; + const int64_t s2 = nb2; + const int64_t s3 = nb3; + + if (ne_total > 0) { + k_set_rows_quant<<>>( + src0_d, src1_d, dst_d, + ne00, ne01, ne02, ne03, + ne10, ne11, ne12, ne13, + s01, s02, s03, + s10, s11, s12, + s1, s2, s3); + } +} + +template +static __global__ void k_set_rows( + const src_t * __restrict__ src0, const int64_t * __restrict__ src1, dst_t * __restrict__ dst, + const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, + const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13, + const int64_t s01, const int64_t s02, const int64_t s03, + const int64_t s10, const int64_t s11, const int64_t s12, + const int64_t s1, const int64_t s2, const int64_t s3) { + + const int64_t i = int64_t(blockDim.x) * blockIdx.x + threadIdx.x; + const int64_t ne_total = ne00 * ne01 * ne02 * ne03; + + if (i >= ne_total) { + return; + } + + const int64_t i03 = i / (ne00 * ne01 * ne02); + const int64_t i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01); + const int64_t i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne00 * ne01) / ne00; + const int64_t i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne00 * ne01 - i01 * ne00; + + const int64_t i12 = i03 % ne12; + const int64_t i11 = i02 % ne11; + const int64_t i10 = i01; + + const int64_t dst_row = *(src1 + i10*s10 + i11*s11 + i12*s12); + + const src_t * src0_row = src0 + i01*s01 + i02*s02 + i03*s03; + dst_t * dst_row_ptr = dst + dst_row*s1 + i02*s2 + i03*s3; + + const src_t* src_elem = src0_row + i00; + dst_t* dst_elem = dst_row_ptr + i00; + set_rows_1(src_elem, dst_elem); + + GGML_UNUSED(ne10); + GGML_UNUSED(ne13); +} + +template +static void set_rows_cuda( + const src_t * src0_d, const int64_t * src1_d, dst_t * dst_d, + const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, + const int64_t ne10, const int64_t ne11, const int64_t ne12, const int64_t ne13, + const size_t nb01, const size_t nb02, const size_t nb03, + const size_t nb10, const size_t nb11, const size_t nb12, + const size_t nb1, const size_t nb2, const size_t nb3, + cudaStream_t stream) { + + const int64_t ne_total = ne00 * ne01 * ne02 * ne03; + const int num_blocks = (ne_total + CUDA_SET_ROWS_BLOCK_SIZE - 1) / CUDA_SET_ROWS_BLOCK_SIZE; + const dim3 block_size(CUDA_SET_ROWS_BLOCK_SIZE); + const dim3 grid_size(num_blocks); + + + const int64_t s01 = nb01/sizeof(src_t); + const int64_t s02 = nb02/sizeof(src_t); + const int64_t s03 = nb03/sizeof(src_t); + const int64_t s10 = nb10/sizeof(int64_t); + const int64_t s11 = nb11/sizeof(int64_t); + const int64_t s12 = nb12/sizeof(int64_t); + const int64_t s1 = nb1/sizeof(dst_t); + const int64_t s2 = nb2/sizeof(dst_t); + const int64_t s3 = nb3/sizeof(dst_t); + + if (ne_total > 0) { + k_set_rows<<>>( + src0_d, src1_d, dst_d, + ne00, ne01, ne02, ne03, + ne10, ne11, ne12, ne13, + s01, s02, s03, + s10, s11, s12, + s1, s2, s3); + } +} + + +void ggml_cuda_op_set_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_I64); + + GGML_TENSOR_BINARY_OP_LOCALS + + const float * src0_d = (const float *)src0->data; + const int64_t * src1_d = (const int64_t *)src1->data; + + cudaStream_t stream = ctx.stream(); + + + + if (dst->type == GGML_TYPE_F32) { + set_rows_cuda( + src0_d, src1_d, (float*)dst->data, + ne00, ne01, ne02, ne03, + ne10, ne11, ne12, ne13, + nb01, nb02, nb03, + nb10, nb11, nb12, + nb1, nb2, nb3, + stream + ); + } else if (dst->type == GGML_TYPE_F16) { + set_rows_cuda( + src0_d, src1_d, (half*)dst->data, + ne00, ne01, ne02, ne03, + ne10, ne11, ne12, ne13, + nb01, nb02, nb03, + nb10, nb11, nb12, + nb1, nb2, nb3, + stream + ); + } else if (dst->type == GGML_TYPE_BF16) { + set_rows_cuda( + src0_d, src1_d, (nv_bfloat16*)dst->data, + ne00, ne01, ne02, ne03, + ne10, ne11, ne12, ne13, + nb01, nb02, nb03, + nb10, nb11, nb12, + nb1, nb2, nb3, + stream + ); + } else if (dst->type == GGML_TYPE_Q4_0) { + set_rows_cuda_quant( + src0_d, src1_d, (block_q4_0*)dst->data, + ne00, ne01, ne02, ne03, + ne10, ne11, ne12, ne13, + nb01, nb02, nb03, + nb10, nb11, nb12, + nb1, nb2, nb3, + stream + ); + } else if (dst->type == GGML_TYPE_Q4_1) { + set_rows_cuda_quant( + src0_d, src1_d, (block_q4_1*)dst->data, + ne00, ne01, ne02, ne03, + ne10, ne11, ne12, ne13, + nb01, nb02, nb03, + nb10, nb11, nb12, + nb1, nb2, nb3, + stream + ); + } else if (dst->type == GGML_TYPE_Q5_0) { + set_rows_cuda_quant( + src0_d, src1_d, (block_q5_0*)dst->data, + ne00, ne01, ne02, ne03, + ne10, ne11, ne12, ne13, + nb01, nb02, nb03, + nb10, nb11, nb12, + nb1, nb2, nb3, + stream + ); + } else if (dst->type == GGML_TYPE_Q5_1) { + set_rows_cuda_quant( + src0_d, src1_d, (block_q5_1*)dst->data, + ne00, ne01, ne02, ne03, + ne10, ne11, ne12, ne13, + nb01, nb02, nb03, + nb10, nb11, nb12, + nb1, nb2, nb3, + stream + ); + } else if (dst->type == GGML_TYPE_Q8_0) { + set_rows_cuda_quant( + src0_d, src1_d, (block_q8_0*)dst->data, + ne00, ne01, ne02, ne03, + ne10, ne11, ne12, ne13, + nb01, nb02, nb03, + nb10, nb11, nb12, + nb1, nb2, nb3, + stream + ); + } else if (dst->type == GGML_TYPE_IQ4_NL) { + set_rows_cuda_quant( + src0_d, src1_d, (block_iq4_nl*)dst->data, + ne00, ne01, ne02, ne03, + ne10, ne11, ne12, ne13, + nb01, nb02, nb03, + nb10, nb11, nb12, + nb1, nb2, nb3, + stream + ); + } else { + GGML_ABORT("unsupported type %s", ggml_type_name(dst->type)); + } +} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/set-rows.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/set-rows.cuh new file mode 100644 index 000000000..c140c0873 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/set-rows.cuh @@ -0,0 +1,7 @@ +#pragma once + +#include "common.cuh" + +#define CUDA_SET_ROWS_BLOCK_SIZE 256 + +void ggml_cuda_op_set_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/softcap.cu b/ml/backend/ggml/ggml/src/ggml-cuda/softcap.cu new file mode 100644 index 000000000..40dfe45d6 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/softcap.cu @@ -0,0 +1,34 @@ +#include "softcap.cuh" + +static __global__ void softcap_f32(const float * x, float * dst, const float scale, const float softcap, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + + dst[i] = tanhf(scale * x[i]) * softcap; +} + +static void softcap_f32_cuda(const float * x, float * dst, const float scale, const float softcap, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_SOFTCAP_BLOCK_SIZE - 1) / CUDA_SOFTCAP_BLOCK_SIZE; + softcap_f32<<>>(x, dst, scale, softcap, k); +} + +// fused GGML_OP_SCALE + GGML_UNARY_OP_TANH + GGML_OP_SCALE +void ggml_cuda_op_softcap(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * src) { + const ggml_tensor * src0 = src->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + float scale; + float softcap; + memcpy(&scale, (float *) src->op_params + 0, sizeof(float)); + memcpy(&softcap, (float *) dst->op_params + 0, sizeof(float)); + + softcap_f32_cuda(src0_d, dst_d, scale, softcap, ggml_nelements(src0), stream); +} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/softcap.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/softcap.cuh new file mode 100644 index 000000000..6d34fb2be --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/softcap.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_SOFTCAP_BLOCK_SIZE 256 + +void ggml_cuda_op_softcap(ggml_backend_cuda_context & ctx, ggml_tensor * dst, ggml_tensor * src); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/softmax.cu b/ml/backend/ggml/ggml/src/ggml-cuda/softmax.cu index aac6e0999..eeacde0bd 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/softmax.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/softmax.cu @@ -2,6 +2,7 @@ #include "ggml.h" #include "softmax.cuh" #include +#include template static __device__ __forceinline__ float t2f32(T val) { @@ -13,6 +14,29 @@ __device__ float __forceinline__ t2f32(half val) { return __half2float(val); } +struct soft_max_params { + + int64_t nheads; + uint32_t n_head_log2; + int64_t ncols; + int64_t nrows_x; + int64_t nrows_y; + int64_t ne00; + int64_t ne01; + int64_t ne02; + int64_t ne03; + int64_t nb11; + int64_t nb12; + int64_t nb13; + + int64_t ne12; + int64_t ne13; + float scale; + float max_bias; + float m0; + float m1; +}; + // When ncols_template == 0 the bounds for the loops in this function are not known and can't be unrolled. // As we want to keep pragma unroll for all other cases we supress the clang transformation warning here. #ifdef __clang__ @@ -21,16 +45,24 @@ __device__ float __forceinline__ t2f32(half val) { #endif // __clang__ template static __global__ void soft_max_f32( - const float * x, const T * mask, float * dst, const int ncols_par, const int nrows_y, - const float scale, const float max_bias, const float m0, const float m1, uint32_t n_head_log2) { - const int ncols = ncols_template == 0 ? ncols_par : ncols_template; + const float * x, const T * mask, const float * sinks, float * dst, const soft_max_params p) { + const int ncols = ncols_template == 0 ? p.ncols : ncols_template; const int tid = threadIdx.x; - const int rowx = blockIdx.x; - const int rowy = rowx % nrows_y; // broadcast the mask in the row dimension + + const int64_t i03 = blockIdx.z; + const int64_t i02 = blockIdx.y; + const int64_t i01 = blockIdx.x; + + //TODO: noncontigous inputs/outputs + const int rowx = blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y; + + const int64_t i11 = i01; + const int64_t i12 = i02 % p.ne12; + const int64_t i13 = i03 % p.ne13; x += int64_t(rowx)*ncols; - mask += int64_t(rowy)*ncols * (mask != nullptr); + mask += (i11*p.nb11 + i12*p.nb12 + i13*p.nb13) / sizeof(T) * (mask != nullptr); dst += int64_t(rowx)*ncols; const int block_size = block_size_template == 0 ? blockDim.x : block_size_template; @@ -38,14 +70,14 @@ static __global__ void soft_max_f32( const int warp_id = threadIdx.x / WARP_SIZE; const int lane_id = threadIdx.x % WARP_SIZE; - const float slope = get_alibi_slope(max_bias, rowx/nrows_y, n_head_log2, m0, m1); + const float slope = get_alibi_slope(p.max_bias, i02, p.n_head_log2, p.m0, p.m1); extern __shared__ float data_soft_max_f32[]; float * buf_iw = data_soft_max_f32; // shared memory buffer for inter-warp communication // shared memory buffer to cache values between iterations: float * vals = use_shared ? buf_iw + WARP_SIZE : dst; - float max_val = -INFINITY; + float max_val = sinks ? sinks[i02] : -INFINITY; #pragma unroll for (int col0 = 0; col0 < ncols; col0 += block_size) { @@ -55,7 +87,7 @@ static __global__ void soft_max_f32( break; } - const float val = x[col]*scale + (mask ? slope*t2f32(mask[col]) : 0.0f); + const float val = x[col]*p.scale + (mask ? slope*t2f32(mask[col]) : 0.0f); vals[col] = val; max_val = max(max_val, val); @@ -111,6 +143,10 @@ static __global__ void soft_max_f32( tmp = warp_reduce_sum(tmp); } + if (sinks) { + tmp += expf(sinks[i02] - max_val); + } + const float inv_sum = 1.0f / tmp; #pragma unroll @@ -150,64 +186,58 @@ static __global__ void soft_max_back_f32( } } +template +static void launch_soft_max_kernels(const float * x, const T * mask, const float * sinks, float * dst, + const soft_max_params & p, cudaStream_t stream, dim3 block_dims, dim3 block_nums, size_t nbytes_shared) +{ + const int id = ggml_cuda_get_device(); + const size_t smpbo = ggml_cuda_info().devices[id].smpbo; + + auto launch_kernel = [=](auto I) -> bool { + constexpr int ncols = decltype(I)::value; + constexpr int block = (ncols > 1024 ? 1024 : ncols); + + if (p.ncols == ncols) { + CUDA_SET_SHARED_MEMORY_LIMIT((soft_max_f32), smpbo); + soft_max_f32<<>> + (x, mask, sinks, dst, p); + return true; + } + return false; + }; + + // unary fold over launch_kernel + if ((launch_kernel(std::integral_constant{}) || ...)) { + return; + } + + //default case + CUDA_SET_SHARED_MEMORY_LIMIT((soft_max_f32), smpbo); + soft_max_f32<<>>(x, mask, sinks, dst, p); +} + + template -static void soft_max_f32_cuda(const float * x, const T * mask, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const float scale, const float max_bias, cudaStream_t stream) { +static void soft_max_f32_cuda(const float * x, const T * mask, const float * sinks, float * dst, const soft_max_params & params, cudaStream_t stream) { int nth = WARP_SIZE; + const int64_t ncols_x = params.ncols; + while (nth < ncols_x && nth < CUDA_SOFT_MAX_BLOCK_SIZE) nth *= 2; const dim3 block_dims(nth, 1, 1); - const dim3 block_nums(nrows_x, 1, 1); + const dim3 block_nums(params.ne01, params.ne02, params.ne03); const size_t nbytes_shared = (GGML_PAD(ncols_x, WARP_SIZE) + WARP_SIZE)*sizeof(float); static_assert(CUDA_SOFT_MAX_BLOCK_SIZE == 1024, "These values need to be adjusted."); - const uint32_t n_head = nrows_x/nrows_y; - const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); - const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); - const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + const int id = ggml_cuda_get_device(); + const size_t smpbo = ggml_cuda_info().devices[id].smpbo; - // FIXME: this limit could be raised by ~2-4x on Ampere or newer - if (nbytes_shared < ggml_cuda_info().devices[ggml_cuda_get_device()].smpb) { - switch (ncols_x) { - case 32: - soft_max_f32<<>> - (x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); - break; - case 64: - soft_max_f32<<>> - (x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); - break; - case 128: - soft_max_f32<<>> - (x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); - break; - case 256: - soft_max_f32<<>> - (x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); - break; - case 512: - soft_max_f32<<>> - (x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); - break; - case 1024: - soft_max_f32<<>> - (x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); - break; - case 2048: - soft_max_f32<<>> - (x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); - break; - case 4096: - soft_max_f32<<>> - (x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); - break; - default: - soft_max_f32<<>> - (x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); - break; - } + + if (nbytes_shared <= smpbo) { + launch_soft_max_kernels<32, 64, 128, 256, 512, 1024, 2048, 4096>(x, mask, sinks, dst, params, stream, block_dims, block_nums, nbytes_shared); } else { const size_t nbytes_shared_low = WARP_SIZE*sizeof(float); - soft_max_f32<<>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2); + soft_max_f32<<>>(x, mask, sinks, dst, params); } } @@ -223,9 +253,11 @@ static void soft_max_back_f32_cuda( void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; const float * src0_d = (const float *) src0->data; const void * src1_d = src1 ? (const void *) src1->data : nullptr; + const void * src2_d = src2 ? (const void *) src2->data : nullptr; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); @@ -235,10 +267,11 @@ void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F16 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional - const int64_t ne00 = src0->ne[0]; const int64_t nrows_x = ggml_nrows(src0); const int64_t nrows_y = src0->ne[1]; + const int64_t ne00 = src0->ne[0]; + float scale = 1.0f; float max_bias = 0.0f; @@ -247,10 +280,44 @@ void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); + const int64_t nb11 = src1 ? src1->nb[1] : 1; + const int64_t nb12 = src1 ? src1->nb[2] : 1; + const int64_t nb13 = src1 ? src1->nb[3] : 1; + + const int64_t ne12 = src1 ? src1->ne[2] : 1; + const int64_t ne13 = src1 ? src1->ne[3] : 1; + + const uint32_t n_head = src0->ne[2]; + const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); + + const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + + soft_max_params params = {}; + params.nheads = src0->ne[2]; + params.n_head_log2 = n_head_log2; + params.ncols = ne00; + params.nrows_x = nrows_x; + params.nrows_y = nrows_y; + params.ne00 = src0->ne[0]; + params.ne01 = src0->ne[1]; + params.ne02 = src0->ne[2]; + params.ne03 = src0->ne[3]; + params.nb11 = nb11; + params.nb12 = nb12; + params.nb13 = nb13; + params.ne12 = ne12; + params.ne13 = ne13; + params.scale = scale; + params.max_bias = max_bias; + params.m0 = m0; + params.m1 = m1; + if (use_f16) { - soft_max_f32_cuda(src0_d, (const half *) src1_d, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream); + soft_max_f32_cuda(src0_d, (const half *) src1_d, (const float *) src2_d, dst_d, params, stream); } else { - soft_max_f32_cuda(src0_d, (const float *) src1_d, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream); + soft_max_f32_cuda(src0_d, (const float *) src1_d, (const float *) src2_d, dst_d, params, stream); } } diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/ssm-conv.cu b/ml/backend/ggml/ggml/src/ggml-cuda/ssm-conv.cu index f63757196..419797336 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/ssm-conv.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/ssm-conv.cu @@ -107,8 +107,11 @@ static void ssm_conv_f32_cuda(const float * src0, const float * src1, const int if (nc == 4) { ssm_conv_f32<<>>(src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1, dst, dst_nb0, dst_nb1, dst_nb2, n_t); + } else if (nc == 3) { + ssm_conv_f32<<>>(src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1, + dst, dst_nb0, dst_nb1, dst_nb2, n_t); } else { - GGML_ABORT("Only support kernel size = 4 now."); + GGML_ABORT("Only support kernel size = 3 or size = 4 right now."); } } else { if (nc == 4) { @@ -116,8 +119,13 @@ static void ssm_conv_f32_cuda(const float * src0, const float * src1, const int dim3 blocks(n_s, (nr + threads - 1) / threads, (n_t + split_n_t - 1) / split_n_t); ssm_conv_long_token_f32<<>>( src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1, dst, dst_nb0, dst_nb1, dst_nb2, n_t); + } else if (nc == 3) { + const int64_t split_n_t = 32; + dim3 blocks(n_s, (nr + threads - 1) / threads, (n_t + split_n_t - 1) / split_n_t); + ssm_conv_long_token_f32<<>>( + src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1, dst, dst_nb0, dst_nb1, dst_nb2, n_t); } else { - GGML_ABORT("Only support kernel size = 4 right now."); + GGML_ABORT("Only support kernel size = 3 or size = 4 right now."); } } } diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/ssm-scan.cu b/ml/backend/ggml/ggml/src/ggml-cuda/ssm-scan.cu index 37ee208c0..c9184398b 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/ssm-scan.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/ssm-scan.cu @@ -4,14 +4,15 @@ template __global__ void __launch_bounds__(splitD, 2) ssm_scan_f32(const float * __restrict__ src0, const float * __restrict__ src1, const float * __restrict__ src2, const float * __restrict__ src3, const float * __restrict__ src4, const float * __restrict__ src5, - const int src0_nb1, const int src0_nb2, const int src1_nb0, const int src1_nb1, const int src1_nb2, - const int src1_nb3, const int src2_nb0, const int src2_nb1, const int src2_nb2, const int src3_nb1, - const int src4_nb1, const int src4_nb2, const int src5_nb1, const int src5_nb2, - float * __restrict__ dst, const int64_t L) { - GGML_UNUSED(src1_nb0); - GGML_UNUSED(src2_nb0); - const int bidx = blockIdx.x; // split along B - const int bidy = blockIdx.y; // split along D + const int32_t * __restrict__ src6, float * __restrict__ dst, + const int src0_nb2, const int src0_nb3, const int src1_nb2, const int src1_nb3, + const int src2_nb1, const int src2_nb2, const int src3_nb1, + const int src4_nb2, const int src4_nb3, const int src5_nb2, const int src5_nb3, + const int64_t s_off, const int64_t d_inner, const int64_t L) { + + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); + const int bidx = blockIdx.x; // split along B (sequences) + const int bidy = blockIdx.y; // split along D (d_inner) const int tid = threadIdx.x; const int wid = tid / 32; const int wtid = tid % 32; @@ -22,38 +23,38 @@ __global__ void __launch_bounds__(splitD, 2) float * smem_A = smem; float * smem_s0 = smem_A + splitD * stride_sA; - const float * s0_block = (const float *) ((const char *) src0 + bidx * src0_nb2 + bidy * splitD * src0_nb1); - const float * x_block = (const float *) ((const char *) src1 + (bidx * src1_nb2) + bidy * splitD * sizeof(float)); + const float * s0_block = (const float *) ((const char *) src0 + src6[bidx] * src0_nb3 + bidy * splitD * src0_nb2); + const float * x_block = (const float *) ((const char *) src1 + (bidx * src1_nb3) + bidy * splitD * sizeof(float)); const float * dt_block = (const float *) ((const char *) src2 + (bidx * src2_nb2) + bidy * splitD * sizeof(float)); const float * A_block = (const float *) ((const char *) src3 + bidy * splitD * src3_nb1); - const float * B_block = (const float *) ((const char *) src4 + (bidx * src4_nb2)); - const float * C_block = (const float *) ((const char *) src5 + (bidx * src5_nb2)); - float * y_block = (float *) ((char *) dst + (bidx * src1_nb2) + bidy * splitD * sizeof(float)); - float * s_block = (float *) ((char *) dst + src1_nb3 + bidx * src0_nb2 + bidy * splitD * src0_nb1); + const float * B_block = (const float *) ((const char *) src4 + (bidx * src4_nb3)); + const float * C_block = (const float *) ((const char *) src5 + (bidx * src5_nb3)); + float * y_block = (float *) ((char *) dst + (bidx * d_inner * L * sizeof(float)) + bidy * splitD * sizeof(float)); + float * s_block = (float *) ((char *) dst + s_off + bidx * src0_nb3 + bidy * splitD * src0_nb2); - const int stride_s0 = src0_nb1 / sizeof(float); - const int stride_x = src1_nb1 / sizeof(float); + const int stride_s0 = src0_nb2 / sizeof(float); + const int stride_x = src1_nb2 / sizeof(float); const int stride_dt = src2_nb1 / sizeof(float); const int stride_A = src3_nb1 / sizeof(float); - const int stride_B = src4_nb1 / sizeof(float); - const int stride_C = src5_nb1 / sizeof(float); + const int stride_B = src4_nb2 / sizeof(float); + const int stride_C = src5_nb2 / sizeof(float); const int stride_s = stride_s0; - const int stride_y = stride_x; + const int stride_y = d_inner; // can N not be 16? for example 32? if (N == 16) { #pragma unroll for (size_t i = 0; i < splitD / 4; i += 2) { - float value = A_block[(wid * warpSize + i) * stride_A + wtid]; + float value = A_block[(wid * warp_size + i) * stride_A + wtid]; // todo: bank conflict // I am always confused with how to use the swizzling method to solve // bank conflit. Hoping somebody can tell me. - smem_A[(wid * warpSize + i) * stride_sA + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; + smem_A[(wid * warp_size + i) * stride_sA + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; } #pragma unroll for (size_t i = 0; i < splitD / 4; i += 2) { - float value = s0_block[(wid * warpSize + i) * stride_s0 + wtid]; - smem_s0[(wid * warpSize + i) * stride_ss0 + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; + float value = s0_block[(wid * warp_size + i) * stride_s0 + wtid]; + smem_s0[(wid * warp_size + i) * stride_ss0 + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value; } } @@ -82,24 +83,167 @@ __global__ void __launch_bounds__(splitD, 2) } } +// assumes as many threads as d_state +template +__global__ void __launch_bounds__(d_state, 1) + ssm_scan_f32_group( + const float * __restrict__ src0, const float * __restrict__ src1, const float * __restrict__ src2, + const float * __restrict__ src3, const float * __restrict__ src4, const float * __restrict__ src5, + const int32_t * __restrict__ src6, float * __restrict__ dst, + const int src0_nb2, const int src0_nb3, const int src1_nb2, const int src1_nb3, + const int src2_nb1, const int src2_nb2, const int src3_nb1, + const int src4_nb2, const int src4_nb3, const int src5_nb2, const int src5_nb3, + const int64_t s_off, const int64_t n_head, const int64_t d_head, const int64_t n_group, const int64_t n_tok) { + + const int head_idx = (blockIdx.x * splitH) / d_head; + const int head_off = ((blockIdx.x * splitH) % d_head) * sizeof(float); + const int seq_idx = blockIdx.y; + + const int group_off = (head_idx & (n_group - 1)) * d_state * sizeof(float); + + const float * s0_block = (const float *) ((const char *) src0 + src6[seq_idx] * src0_nb3 + head_idx * src0_nb2 + head_off * d_state); + const float * x_block = (const float *) ((const char *) src1 + (seq_idx * src1_nb3) + blockIdx.x * splitH * sizeof(float)); + const float * dt_block = (const float *) ((const char *) src2 + (seq_idx * src2_nb2) + head_idx * sizeof(float)); + const float * A_block = (const float *) ((const char *) src3 + head_idx * src3_nb1); + const float * B_block = (const float *) ((const char *) src4 + (seq_idx * src4_nb3) + (group_off)); + const float * C_block = (const float *) ((const char *) src5 + (seq_idx * src5_nb3) + (group_off)); + float * y_block = dst + (seq_idx * n_tok * n_head * d_head) + blockIdx.x * splitH; + float * s_block = (float *) ((char *) dst + s_off + seq_idx * src0_nb3 + head_idx * src0_nb2 + head_off * d_state); + + // strides across n_seq_tokens + const int stride_x = src1_nb2 / sizeof(float); + const int stride_dt = src2_nb1 / sizeof(float); + const int stride_B = src4_nb2 / sizeof(float); + const int stride_C = src5_nb2 / sizeof(float); + const int stride_y = n_head * d_head; + + float state[splitH]; + // for the parallel accumulation + __shared__ float stateC[splitH * d_state]; + +#pragma unroll + for (int j = 0; j < splitH; j++) { + state[j] = s0_block[j * d_state + threadIdx.x]; + } + + for (int64_t i = 0; i < n_tok; i++) { + // TODO: only calculate dA and dt_soft_plus once per head instead of every splitH head elements + // TODO: only calculate B and C once per head group + // NOTE: dt_soft_plus, dA and x_dt have the same value across threads here. + float dt_soft_plus = dt_block[i * stride_dt]; + if (dt_soft_plus <= 20.0f) { + dt_soft_plus = log1pf(expf(dt_soft_plus)); + } + const float dA = expf(dt_soft_plus * A_block[0]); + const float B = B_block[i * stride_B + threadIdx.x]; + const float C = C_block[i * stride_C + threadIdx.x]; + + // across d_head +#pragma unroll + for (int j = 0; j < splitH; j++) { + const float x_dt = x_block[i * stride_x + j] * dt_soft_plus; + + state[j] = (state[j] * dA) + (B * x_dt); + + stateC[j * d_state + threadIdx.x] = state[j] * C; + } + + __syncthreads(); + + // parallel accumulation for stateC + // TODO: simplify + { + static_assert((d_state & -d_state) == d_state, "the state size has to be a power of 2"); + static_assert((splitH & -splitH) == splitH, "splitH has to be a power of 2"); + + // reduce until w matches the warp size + // TODO: does this work even when the physical warp size is 64? +#pragma unroll + for (int w = d_state; w > WARP_SIZE; w >>= 1) { + // (assuming there are d_state threads) +#pragma unroll + for (int j = 0; j < ((w >> 1) * splitH + d_state - 1) / d_state; j++) { + // TODO: check for bank conflicts + const int k = (threadIdx.x % (w >> 1)) + (d_state * (threadIdx.x / (w >> 1))) + j * d_state * (d_state / (w >> 1)); + stateC[k] += stateC[k + (w >> 1)]; + + } + __syncthreads(); + } + + static_assert(splitH >= d_state / WARP_SIZE); + +#pragma unroll + for (int j = 0; j < splitH / (d_state / WARP_SIZE); j++) { + float y = stateC[(threadIdx.x % WARP_SIZE) + d_state * (threadIdx.x / WARP_SIZE) + j * d_state * (d_state / WARP_SIZE)]; + y = warp_reduce_sum(y); + + // store the above accumulations + if (threadIdx.x % WARP_SIZE == 0) { + const int k = threadIdx.x / WARP_SIZE + j * (d_state / WARP_SIZE); + y_block[i * stride_y + k] = y; + } + } + } + } + + // write back the state +#pragma unroll + for (int j = 0; j < splitH; j++) { + s_block[j * d_state + threadIdx.x] = state[j]; + } +} + static void ssm_scan_f32_cuda(const float * src0, const float * src1, const float * src2, const float * src3, - const float * src4, const float * src5, const int src0_nb1, const int src0_nb2, - const int src1_nb0, const int src1_nb1, const int src1_nb2, const int src1_nb3, - const int src2_nb0, const int src2_nb1, const int src2_nb2, const int src3_nb1, - const int src4_nb1, const int src4_nb2, const int src5_nb1, const int src5_nb2, - float * dst, const int64_t N, const int64_t D, const int64_t L, const int64_t B, + const float * src4, const float * src5, const int32_t * src6, float * dst, + const int src0_nb2, const int src0_nb3, const int src1_nb2, const int src1_nb3, const int src2_nb1, + const int src2_nb2, const int src3_nb1, const int src4_nb2, const int src4_nb3, const int src5_nb2, + const int src5_nb3, const int64_t s_off, const int64_t d_state, const int64_t head_dim, + const int64_t n_head, const int64_t n_group, const int64_t n_tok, const int64_t n_seq, cudaStream_t stream) { - const int threads = 128; - // todo: consider D cannot be divided,does this situation exist? - GGML_ASSERT(D % threads == 0); - const dim3 blocks(B, (D + threads - 1) / threads, 1); - const int smem_size = (threads * (N + 1) * 2) * sizeof(float); - if (N == 16) { - ssm_scan_f32<128, 16><<>>( - src0, src1, src2, src3, src4, src5, src0_nb1, src0_nb2, src1_nb0, src1_nb1, src1_nb2, src1_nb3, src2_nb0, - src2_nb1, src2_nb2, src3_nb1, src4_nb1, src4_nb2, src5_nb1, src5_nb2, dst, L); + // NOTE: if you change conditions here, be sure to update the corresponding supports_op condition! + if (src3_nb1 == sizeof(float)) { + // Mamba-2 + if (d_state == 128) { + const int threads = 128; + GGML_ASSERT(d_state % threads == 0); + // NOTE: can be any power of two between 4 and 64 + const int splitH = 16; + GGML_ASSERT(head_dim % splitH == 0); + const dim3 blocks((n_head * head_dim + (splitH - 1)) / splitH, n_seq, 1); + ssm_scan_f32_group<16, 128><<>>( + src0, src1, src2, src3, src4, src5, src6, dst, + src0_nb2, src0_nb3, src1_nb2, src1_nb3, src2_nb1, src2_nb2, src3_nb1, + src4_nb2, src4_nb3, src5_nb2, src5_nb3, s_off, n_head, head_dim, n_group, n_tok); + } else if (d_state == 256) { // Falcon-H1 + const int threads = 256; + // NOTE: can be any power of two between 8 and 64 + const int splitH = 16; + GGML_ASSERT(head_dim % splitH == 0); + const dim3 blocks((n_head * head_dim + (splitH - 1)) / splitH, n_seq, 1); + ssm_scan_f32_group<16, 256><<>>( + src0, src1, src2, src3, src4, src5, src6, dst, + src0_nb2, src0_nb3, src1_nb2, src1_nb3, src2_nb1, src2_nb2, src3_nb1, + src4_nb2, src4_nb3, src5_nb2, src5_nb3, s_off, n_head, head_dim, n_group, n_tok); + } else { + GGML_ABORT("doesn't support d_state!=(128 or 256)."); + } } else { - GGML_ABORT("doesn't support N!=16."); + const int threads = 128; + // Mamba-1 + GGML_ASSERT(n_head % threads == 0); + GGML_ASSERT(head_dim == 1); + GGML_ASSERT(n_group == 1); + const dim3 blocks(n_seq, (n_head + threads - 1) / threads, 1); + const int smem_size = (threads * (d_state + 1) * 2) * sizeof(float); + if (d_state == 16) { + ssm_scan_f32<128, 16><<>>( + src0, src1, src2, src3, src4, src5, src6, dst, + src0_nb2, src0_nb3, src1_nb2, src1_nb3, src2_nb1, src2_nb2, + src3_nb1, src4_nb2, src4_nb3, src5_nb2, src5_nb3, s_off, n_head, n_tok); + } else { + GGML_ABORT("doesn't support d_state!=16."); + } } } @@ -110,30 +254,25 @@ void ggml_cuda_op_ssm_scan(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const struct ggml_tensor * src3 = dst->src[3]; // A const struct ggml_tensor * src4 = dst->src[4]; // B const struct ggml_tensor * src5 = dst->src[5]; // C - - // const int64_t d_state = src0->ne[0]; - // const int64_t d_inner = src0->ne[1]; - // const int64_t l = src1->ne[1]; - // const int64_t b = src0->ne[2]; + const struct ggml_tensor * src6 = dst->src[6]; // ids const int64_t nc = src0->ne[0]; // d_state - const int64_t nr = src0->ne[1]; // d_inner - const int64_t n_t = src1->ne[1]; // number of tokens per sequence - const int64_t n_s = src0->ne[2]; // number of sequences in the batch + const int64_t nr = src0->ne[1]; // head_dim or 1 + const int64_t nh = src1->ne[1]; // n_head + const int64_t ng = src4->ne[1]; // n_group + const int64_t n_t = src1->ne[2]; // number of tokens per sequence + const int64_t n_s = src1->ne[3]; // number of sequences in the batch - GGML_ASSERT(ggml_nelements(src1) + ggml_nelements(src0) == ggml_nelements(dst)); + const int64_t s_off = ggml_nelements(src1) * sizeof(float); + + GGML_ASSERT(ggml_nelements(src1) + nc*nr*nh*n_s == ggml_nelements(dst)); GGML_ASSERT(src0->nb[0] == sizeof(float)); GGML_ASSERT(src1->nb[0] == sizeof(float)); GGML_ASSERT(src2->nb[0] == sizeof(float)); GGML_ASSERT(src3->nb[0] == sizeof(float)); GGML_ASSERT(src4->nb[0] == sizeof(float)); GGML_ASSERT(src5->nb[0] == sizeof(float)); - // required for the dot product between s and C - GGML_ASSERT(src0->nb[1] == src0->ne[0] * sizeof(float)); - // required for per-sequence offsets for states - GGML_ASSERT(src0->nb[2] == src0->ne[0] * src0->ne[1] * sizeof(float)); - // required to get correct offset for state destination (i.e. src1->nb[3]) - GGML_ASSERT(src1->nb[3] == src1->ne[0] * src1->ne[1] * src1->ne[2] * sizeof(float)); + GGML_ASSERT(src6->nb[0] == sizeof(int32_t)); const float * src0_d = (const float *) src0->data; const float * src1_d = (const float *) src1->data; @@ -141,13 +280,16 @@ void ggml_cuda_op_ssm_scan(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const float * src3_d = (const float *) src3->data; const float * src4_d = (const float *) src4->data; const float * src5_d = (const float *) src5->data; + const int32_t * src6_d = (const int32_t *) src6->data; float * dst_d = (float *) dst->data; cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src6->type == GGML_TYPE_I32); GGML_ASSERT(dst->type == GGML_TYPE_F32); - ssm_scan_f32_cuda(src0_d, src1_d, src2_d, src3_d, src4_d, src5_d, src0->nb[1], src0->nb[2], src1->nb[0], - src1->nb[1], src1->nb[2], src1->nb[3], src2->nb[0], src2->nb[1], src2->nb[2], src3->nb[1], - src4->nb[1], src4->nb[2], src5->nb[1], src5->nb[2], dst_d, nc, nr, n_t, n_s, stream); + ssm_scan_f32_cuda(src0_d, src1_d, src2_d, src3_d, src4_d, src5_d, src6_d, dst_d, + src0->nb[2], src0->nb[3], src1->nb[2], src1->nb[3], src2->nb[1], src2->nb[2], + src3->nb[1], src4->nb[2], src4->nb[3], src5->nb[2], src5->nb[3], + s_off, nc, nr, nh, ng, n_t, n_s, stream); } diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu deleted file mode 100644 index 2d94e65c2..000000000 --- a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu +++ /dev/null @@ -1,10 +0,0 @@ -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 16, float); -DECL_FATTN_WMMA_F16_CASE(80, 16, float); -DECL_FATTN_WMMA_F16_CASE(96, 16, float); -DECL_FATTN_WMMA_F16_CASE(112, 16, float); -DECL_FATTN_WMMA_F16_CASE(128, 16, float); -DECL_FATTN_WMMA_F16_CASE(256, 16, float); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu deleted file mode 100644 index c3d9df3c4..000000000 --- a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu +++ /dev/null @@ -1,9 +0,0 @@ -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 32, float); -DECL_FATTN_WMMA_F16_CASE(80, 32, float); -DECL_FATTN_WMMA_F16_CASE(96, 32, float); -DECL_FATTN_WMMA_F16_CASE(112, 32, float); -DECL_FATTN_WMMA_F16_CASE(128, 32, float); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu deleted file mode 100644 index bb680e401..000000000 --- a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu +++ /dev/null @@ -1,10 +0,0 @@ -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 16, half); -DECL_FATTN_WMMA_F16_CASE(80, 16, half); -DECL_FATTN_WMMA_F16_CASE(96, 16, half); -DECL_FATTN_WMMA_F16_CASE(112, 16, half); -DECL_FATTN_WMMA_F16_CASE(128, 16, half); -DECL_FATTN_WMMA_F16_CASE(256, 16, half); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu deleted file mode 100644 index 073f71b1f..000000000 --- a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu +++ /dev/null @@ -1,10 +0,0 @@ -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 32, half); -DECL_FATTN_WMMA_F16_CASE(80, 32, half); -DECL_FATTN_WMMA_F16_CASE(96, 32, half); -DECL_FATTN_WMMA_F16_CASE(112, 32, half); -DECL_FATTN_WMMA_F16_CASE(128, 32, half); -DECL_FATTN_WMMA_F16_CASE(256, 32, half); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu deleted file mode 100644 index d30710c5f..000000000 --- a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu +++ /dev/null @@ -1,8 +0,0 @@ -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 8, half); -DECL_FATTN_WMMA_F16_CASE(96, 8, half); -DECL_FATTN_WMMA_F16_CASE(128, 8, half); -DECL_FATTN_WMMA_F16_CASE(256, 8, half); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-mxfp4.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-mxfp4.cu new file mode 100644 index 000000000..c14624c52 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-mxfp4.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_MXFP4); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/unary.cu b/ml/backend/ggml/ggml/src/ggml-cuda/unary.cu index ec5773e01..5aff8a876 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/unary.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/unary.cu @@ -23,6 +23,12 @@ static __device__ __forceinline__ float op_gelu(float x) { return 0.5f*x*(1.0f + tanhf(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); } +static __device__ __forceinline__ float op_gelu_erf(float x) { + const float SQRT_2_INV = 0.70710678118654752440084436210484f; + + return 0.5f*x*(1.0f + erff(x*SQRT_2_INV)); +} + static __device__ __forceinline__ float op_gelu_quick(float x) { const float GELU_QUICK_COEF = -1.702f; @@ -77,6 +83,10 @@ static __device__ __forceinline__ float op_log(float x) { return logf(x); } +static __device__ __forceinline__ float op_elu(float x) { + return (x > 0.f) ? x : expm1f(x); +} + template static __global__ void unary_op_kernel(const T * x, T * dst, const int k) { const int i = blockDim.x*blockIdx.x + threadIdx.x; @@ -134,6 +144,10 @@ void ggml_cuda_op_gelu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } +void ggml_cuda_op_gelu_erf(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_unary(ctx, dst); +} + void ggml_cuda_op_gelu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } @@ -186,6 +200,181 @@ void ggml_cuda_op_log(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { ggml_cuda_op_unary(ctx, dst); } +void ggml_cuda_op_elu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_unary(ctx, dst); +} +/* gated ops */ + +template +static __global__ void unary_gated_op_kernel(const T * x, const T * g, T * dst, const int64_t k, const int64_t n, const int64_t o0, const int64_t o1) { + const int64_t i = int64_t(blockDim.x)*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + + // perform base op and multiply with gate (either offset in same tensor or a separate one) + const int64_t j0 = (i / n) * o0 + (i % n); + const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); + + dst[i] = (T)(op((float)x[j0]) * (float)g[j1]); +} + +template +static void unary_gated_cuda(const T * x, const T * g, T * dst, const int64_t k, const int64_t n, const int64_t o0, const int64_t o1, cudaStream_t stream) { + const int64_t num_blocks = (k + CUDA_GLU_BLOCK_SIZE - 1) / CUDA_GLU_BLOCK_SIZE; + unary_gated_op_kernel<<>>(x, g, dst, k, n, o0, o1); +} + +template +void ggml_cuda_op_unary_gated(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + void * src0_d = src0->data; + void * src1_d = src1 ? src1->data : src0->data; + const int64_t src0_o = src0->nb[1]; + const int64_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + void * dst_d = dst->data; + const int64_t nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(src0->nb[0] == ggml_element_size(src0)); + GGML_ASSERT(ggml_is_contiguous(dst)); + + GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); + GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); + GGML_ASSERT(src0->type == dst->type); + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == ggml_nrows(src0)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src1->nb[0] == ggml_element_size(src1)); + GGML_ASSERT(src1->ne[0] == nc); + GGML_ASSERT(src0->type == src1->type); + } + + const int32_t swapped = ((const int32_t *) dst->op_params)[1]; + + if (src0->type == GGML_TYPE_F16) { + half * src0_p = (half *) src0_d; + half * src1_p = (half *) src1_d; + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + unary_gated_cuda(src0_p, src1_p, (half *)dst_d, ggml_nelements(dst), nc, src0_o / sizeof(half), src1_o / sizeof(half), stream); + } else { + float * src0_p = (float *) src0_d; + float * src1_p = (float *) src1_d; + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + unary_gated_cuda(src0_p, src1_p, (float *)dst_d, ggml_nelements(dst), nc, src0_o / sizeof(float), src1_o / sizeof(float), stream); + } +} + +void ggml_cuda_op_reglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_unary_gated(ctx, dst); +} + +void ggml_cuda_op_geglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_unary_gated(ctx, dst); +} + +void ggml_cuda_op_swiglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_unary_gated(ctx, dst); +} + +void ggml_cuda_op_geglu_erf(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_unary_gated(ctx, dst); +} + +void ggml_cuda_op_geglu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + ggml_cuda_op_unary_gated(ctx, dst); +} + +// swiglu_oai + +template +static __global__ void swiglu_oai_kernel(const T * x, const T * g, T * dst, const int64_t k, const int64_t n, const int64_t o0, const int64_t o1, float alpha, float limit) { + const int64_t i = int64_t(blockDim.x)*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + + // perform base op and multiply with gate (either offset in same tensor or a separate one) + const int64_t j0 = (i / n) * o0 + (i % n); + const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n); + + float xi = x[j0]; + float gi = g[j1]; + xi = fminf(xi, limit); + gi = fmaxf(fminf(gi, limit), -limit); + + float out_glu = xi / (1.0f + expf(-xi * alpha)); + out_glu = out_glu * (1.0f + gi); + + dst[i] = out_glu; +} + +template +static void swiglu_oai_cuda(const T * x, const T * g, T * dst, const int64_t k, const int64_t n, const int64_t o0, const int64_t o1, const float alpha, const float limit, cudaStream_t stream) { + const int64_t num_blocks = (k + CUDA_GLU_BLOCK_SIZE - 1) / CUDA_GLU_BLOCK_SIZE; + swiglu_oai_kernel<<>>(x, g, dst, k, n, o0, o1, alpha, limit); +} + +void ggml_cuda_op_swiglu_oai(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + void * src0_d = src0->data; + void * src1_d = src1 ? src1->data : src0->data; + const int64_t src0_o = src0->nb[1]; + const int64_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + void * dst_d = dst->data; + const int64_t nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(src0->nb[0] == ggml_element_size(src0)); + GGML_ASSERT(ggml_is_contiguous(dst)); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + GGML_ASSERT(src0->type == dst->type); + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == ggml_nrows(src0)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src1->nb[0] == ggml_element_size(src1)); + GGML_ASSERT(src1->ne[0] == nc); + GGML_ASSERT(src0->type == src1->type); + } + + //const int32_t swapped = ((const int32_t *) dst->op_params)[1]; + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + const float alpha = ggml_get_op_params_f32(dst, 2); + const float limit = ggml_get_op_params_f32(dst, 3); + + float * src0_p = (float *) src0_d; + float * src1_p = (float *) src1_d; + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + swiglu_oai_cuda(src0_p, src1_p, (float *)dst_d, ggml_nelements(dst), nc, src0_o / sizeof(float), src1_o / sizeof(float), alpha, limit, stream); +} + /* silu_back */ static __device__ __forceinline__ float op_silu_back(float grad, float x) { diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/unary.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/unary.cuh index 940a1feed..da3caf1d8 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/unary.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/unary.cuh @@ -15,6 +15,7 @@ #define CUDA_SQRT_BLOCK_SIZE 256 #define CUDA_SIN_BLOCK_SIZE 256 #define CUDA_COS_BLOCK_SIZE 256 +#define CUDA_GLU_BLOCK_SIZE 256 void ggml_cuda_op_abs(ggml_backend_cuda_context & ctx, ggml_tensor * dst); @@ -30,6 +31,8 @@ void ggml_cuda_op_silu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_silu_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); +void ggml_cuda_op_gelu_erf(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + void ggml_cuda_op_gelu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_tanh(ggml_backend_cuda_context & ctx, ggml_tensor * dst); @@ -55,3 +58,17 @@ void ggml_cuda_op_sin(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_cos(ggml_backend_cuda_context & ctx, ggml_tensor * dst); void ggml_cuda_op_log(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_elu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_reglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_geglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_swiglu(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_swiglu_oai(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_geglu_erf(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_geglu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/upscale.cu b/ml/backend/ggml/ggml/src/ggml-cuda/upscale.cu index 524e97957..ef48aa5f9 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/upscale.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/upscale.cu @@ -22,17 +22,88 @@ static __global__ void upscale_f32(const float * x, float * dst, dst[index] = *( (const float *)((const char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00) ); } +static __global__ void upscale_f32_bilinear(const float * x, float * dst, + const int nb00, const int nb01, const int nb02, const int nb03, + const int ne00_src, const int ne01_src, + const int ne10_dst, const int ne11_dst, const int ne12_dst, const int ne13_dst, + const float sf0, const float sf1, const float sf2, const float sf3, + const float pixel_offset) { + const int64_t index = threadIdx.x + blockIdx.x * blockDim.x; + const int64_t dst_total_elements = ne10_dst * ne11_dst * ne12_dst * ne13_dst; + + if (index >= dst_total_elements) { + return; + } + + const int i10_dst = index % ne10_dst; + const int i11_dst = (index / ne10_dst) % ne11_dst; + const int i12_dst = (index / (ne10_dst * ne11_dst)) % ne12_dst; + const int i13_dst = index / (ne10_dst * ne11_dst * ne12_dst); + + const int i02_src = (int)(i12_dst / sf2); + const int i03_src = (int)(i13_dst / sf3); + + const float y_src_f = ((float)i11_dst + pixel_offset) / sf1 - pixel_offset; + int y0_src = (int)floorf(y_src_f); + int y1_src = y0_src + 1; + + y0_src = max(0, min(y0_src, ne01_src - 1)); + y1_src = max(0, min(y1_src, ne01_src - 1)); + + float dy = y_src_f - (float)y0_src; + dy = max(0.0f, min(dy, 1.0f)); + + float x_src_f = ((float)i10_dst + pixel_offset) / sf0 - pixel_offset; + int x0_src = (int)floorf(x_src_f); + int x1_src = x0_src + 1; + + x0_src = max(0, min(x0_src, ne00_src - 1)); + x1_src = max(0, min(x1_src, ne00_src - 1)); + + float dx = x_src_f - (float)x0_src; + dx = max(0.0f, min(dx, 1.0f)); + + const float * p_a = (const float *)((const char *)x + (int64_t)x0_src * nb00 + (int64_t)y0_src * nb01 + (int64_t)i02_src * nb02 + (int64_t)i03_src * nb03); + const float * p_b = (const float *)((const char *)x + (int64_t)x1_src * nb00 + (int64_t)y0_src * nb01 + (int64_t)i02_src * nb02 + (int64_t)i03_src * nb03); + const float * p_c = (const float *)((const char *)x + (int64_t)x0_src * nb00 + (int64_t)y1_src * nb01 + (int64_t)i02_src * nb02 + (int64_t)i03_src * nb03); + const float * p_d = (const float *)((const char *)x + (int64_t)x1_src * nb00 + (int64_t)y1_src * nb01 + (int64_t)i02_src * nb02 + (int64_t)i03_src * nb03); + + const float val_a = *p_a; + const float val_b = *p_b; + const float val_c = *p_c; + const float val_d = *p_d; + + float result = val_a * (1.0f - dx) * (1.0f - dy) + + val_b * dx * (1.0f - dy) + + val_c * (1.0f - dx) * dy + + val_d * dx * dy; + + dst[index] = result; +} + static void upscale_f32_cuda(const float * x, float * dst, const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11, const int ne12, const int ne13, const float sf0, const float sf1, const float sf2, const float sf3, cudaStream_t stream) { - int dst_size = ne10 * ne11 * ne12 * ne13; - int num_blocks = (dst_size + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE; + const int64_t dst_size = ne10 * ne11 * ne12 * ne13; + const int64_t num_blocks = (dst_size + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE; upscale_f32<<>>(x, dst, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3); } +static void upscale_f32_bilinear_cuda(const float * x, float * dst, + const int nb00, const int nb01, const int nb02, const int nb03, + const int ne00_src, const int ne01_src, + const int ne10_dst, const int ne11_dst, const int ne12_dst, const int ne13_dst, + const float sf0, const float sf1, const float sf2, const float sf3, + const float pixel_offset, cudaStream_t stream) { + const int64_t dst_size = ne10_dst * ne11_dst * ne12_dst * ne13_dst; + const int64_t num_blocks = (dst_size + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE; + + upscale_f32_bilinear<<>>(x, dst, nb00, nb01, nb02, nb03, ne00_src, ne01_src, ne10_dst, ne11_dst, ne12_dst, ne13_dst, sf0, sf1, sf2, sf3, pixel_offset); +} + void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const float * src0_d = (const float *)src0->data; @@ -42,10 +113,25 @@ void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); - const float sf0 = (float)dst->ne[0]/src0->ne[0]; - const float sf1 = (float)dst->ne[1]/src0->ne[1]; - const float sf2 = (float)dst->ne[2]/src0->ne[2]; + const int mode_flags = dst->op_params[0]; + const ggml_scale_mode mode = (ggml_scale_mode)(mode_flags & 0xFF); + + float sf0 = (float)dst->ne[0]/src0->ne[0]; + float sf1 = (float)dst->ne[1]/src0->ne[1]; + float sf2 = (float)dst->ne[2]/src0->ne[2]; const float sf3 = (float)dst->ne[3]/src0->ne[3]; - upscale_f32_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, stream); + if (mode == GGML_SCALE_MODE_NEAREST) { + upscale_f32_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, stream); + } else if (mode == GGML_SCALE_MODE_BILINEAR) { + float pixel_offset = 0.5f; + if (mode_flags & GGML_SCALE_FLAG_ALIGN_CORNERS) { + sf0 = (float)(dst->ne[0] - 1) / (src0->ne[0] - 1); + sf1 = (float)(dst->ne[1] - 1) / (src0->ne[1] - 1); + pixel_offset = 0.0f; + } + upscale_f32_bilinear_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], + src0->ne[0], src0->ne[1], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], + sf0, sf1, sf2, sf3, pixel_offset, stream); + } } diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/vecdotq.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/vecdotq.cuh index ba195e1d1..d8f9aa5ba 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/vecdotq.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/vecdotq.cuh @@ -1,8 +1,20 @@ #pragma once #include "common.cuh" + #include +static __device__ __forceinline__ int get_int_b1(const void * x, const int & i32) { + const uint8_t * x8 = (const uint8_t *) x; + + int x32 = x8[4*i32 + 0] << 0; + x32 |= x8[4*i32 + 1] << 8; + x32 |= x8[4*i32 + 2] << 16; + x32 |= x8[4*i32 + 3] << 24; + + return x32; +} + static __device__ __forceinline__ int get_int_b2(const void * x, const int & i32) { const uint16_t * x16 = (const uint16_t *) x; // assume at least 2 byte alignment @@ -16,6 +28,20 @@ static __device__ __forceinline__ int get_int_b4(const void * x, const int & i32 return ((const int *) x)[i32]; // assume at least 4 byte alignment } +static __device__ __forceinline__ int2 get_int_from_table_16(const int & q4, const int8_t * table) { + const int q0_32 = (q4 >> 0) & 0x0F0F0F0F; + const int8_t * q0_8 = (const int8_t *) &q0_32; + const char4 val0_8 = make_char4( + table[q0_8[0]], table[q0_8[1]], table[q0_8[2]], table[q0_8[3]]); + + const int q1_32 = (q4 >> 4) & 0x0F0F0F0F; + const int8_t * q1_8 = (const int8_t *) &q1_32; + const char4 val1_8 = make_char4( + table[q1_8[0]], table[q1_8[1]], table[q1_8[2]], table[q1_8[3]]); + + return make_int2(*((const int *) &val0_8), *((const int *) &val1_8)); +} + // VDR = vec dot ratio, how many contiguous integers each thread processes when the vec dot kernel is called // MMVQ = mul_mat_vec_q, MMQ = mul_mat_q @@ -211,6 +237,30 @@ template static __device__ __forceinline__ float vec_dot_q8_0_16_q8_1_ return d8_1*sumf; } +#define VDR_MXFP4_Q8_1_MMVQ 2 +#define VDR_MXFP4_Q8_1_MMQ 4 + +static __device__ __forceinline__ float vec_dot_mxfp4_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { + + const block_mxfp4 * bq4 = (const block_mxfp4 *) vbq + kbx; + + const int * q8 = (const int *) bq8_1->qs + iqs; + + int sumi = 0; +#pragma unroll + for (int l = 0; l < VDR_MXFP4_Q8_1_MMVQ; ++l) { + const int aux_q4 = get_int_b1(bq4->qs, iqs + l); + const int2 v = get_int_from_table_16(aux_q4, kvalues_mxfp4); + + sumi = ggml_cuda_dp4a(v.x, q8[l + 0], sumi); + sumi = ggml_cuda_dp4a(v.y, q8[l + 4], sumi); + } + + const float d = ggml_cuda_e8m0_to_fp32(bq4->e) * 0.5f * __low2float(bq8_1->ds); + return d * sumi; +} + #define VDR_Q2_K_Q8_1_MMVQ 1 #define VDR_Q2_K_Q8_1_MMQ 4 @@ -1068,20 +1118,6 @@ static __device__ __forceinline__ float vec_dot_iq1_m_q8_1( return d * ((sumi[0] + sumf[0]) * sc0 + (sumi[1] + sumf[1]) * sc1); } -static __device__ __forceinline__ int2 get_int_from_table_16(const int & q4) { - const int q0_32 = (q4 >> 0) & 0x0F0F0F0F; - const int8_t * q0_8 = (const int8_t *) &q0_32; - const char4 val0_8 = make_char4( - kvalues_iq4nl[q0_8[0]], kvalues_iq4nl[q0_8[1]], kvalues_iq4nl[q0_8[2]], kvalues_iq4nl[q0_8[3]]); - - const int q1_32 = (q4 >> 4) & 0x0F0F0F0F; - const int8_t * q1_8 = (const int8_t *) &q1_32; - const char4 val1_8 = make_char4( - kvalues_iq4nl[q1_8[0]], kvalues_iq4nl[q1_8[1]], kvalues_iq4nl[q1_8[2]], kvalues_iq4nl[q1_8[3]]); - - return make_int2(*((const int *) &val0_8), *((const int *) &val1_8)); -} - #define VDR_IQ4_NL_Q8_1_MMVQ 2 #define VDR_IQ4_NL_Q8_1_MMQ 4 @@ -1096,7 +1132,7 @@ static __device__ __forceinline__ float vec_dot_iq4_nl_q8_1( #pragma unroll for (int l = 0; l < VDR_Q4_0_Q8_1_MMVQ; ++l) { const int aux_q4 = get_int_b2(bq4->qs, iqs + l); - const int2 v = get_int_from_table_16(aux_q4); + const int2 v = get_int_from_table_16(aux_q4, kvalues_iq4nl); sumi = ggml_cuda_dp4a(v.x, q8[l + 0], sumi); sumi = ggml_cuda_dp4a(v.y, q8[l + 4], sumi); @@ -1118,7 +1154,7 @@ static __device__ __forceinline__ float vec_dot_iq4_xs_q8_1( #pragma unroll for (int j = 0; j < 4; ++j) { const int aux_q4 = get_int_b4(bq4->qs, iqs + j); - const int2 v = get_int_from_table_16(aux_q4); + const int2 v = get_int_from_table_16(aux_q4, kvalues_iq4nl); const int u0 = get_int_b4(bq8_1[iqs/4].qs, j + 0); const int u1 = get_int_b4(bq8_1[iqs/4].qs, j + 4); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/vendors/cuda.h b/ml/backend/ggml/ggml/src/ggml-cuda/vendors/cuda.h index 1746b0732..3b3086778 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/vendors/cuda.h +++ b/ml/backend/ggml/ggml/src/ggml-cuda/vendors/cuda.h @@ -6,6 +6,10 @@ #include #include +#if CUDART_VERSION >= 12050 +#include +#endif // CUDART_VERSION >= 12050 + #if CUDART_VERSION < 11020 #define CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED #define CUBLAS_TF32_TENSOR_OP_MATH CUBLAS_TENSOR_OP_MATH diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/vendors/hip.h b/ml/backend/ggml/ggml/src/ggml-cuda/vendors/hip.h index 1a28831b7..c31f31923 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/vendors/hip.h +++ b/ml/backend/ggml/ggml/src/ggml-cuda/vendors/hip.h @@ -5,14 +5,9 @@ #include #include #include -#ifdef __HIP_PLATFORM_AMD__ // for rocblas_initialize() #include "rocblas/rocblas.h" -#endif // __HIP_PLATFORM_AMD__ -#define CUBLAS_COMPUTE_16F HIPBLAS_R_16F -#define CUBLAS_COMPUTE_32F HIPBLAS_R_32F -#define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_R_32F #define CUBLAS_GEMM_DEFAULT HIPBLAS_GEMM_DEFAULT #define CUBLAS_GEMM_DEFAULT_TENSOR_OP HIPBLAS_GEMM_DEFAULT #define CUBLAS_OP_N HIPBLAS_OP_N @@ -30,7 +25,6 @@ #define CU_CHECK(fn) {hipError_t err = fn; if(err != hipSuccess) { GGML_ABORT("HipVMM Failure: %s\n", hipGetErrorString(err)); }} #define __shfl_sync(mask, var, laneMask, width) __shfl(var, laneMask, width) #define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width) -#define cublasComputeType_t hipblasDatatype_t //deprecated, new hipblasComputeType_t not in 5.6 #define cublasCreate hipblasCreate #define cublasDestroy hipblasDestroy #define cublasGemmEx hipblasGemmEx @@ -42,7 +36,6 @@ #define cublasSgemm hipblasSgemm #define cublasStatus_t hipblasStatus_t #define cublasOperation_t hipblasOperation_t -#define cudaDataType_t hipblasDatatype_t //deprecated, new hipblasDatatype not in 5.6 #define cudaDeviceCanAccessPeer hipDeviceCanAccessPeer #define cudaDeviceDisablePeerAccess hipDeviceDisablePeerAccess #define cudaDeviceEnablePeerAccess hipDeviceEnablePeerAccess @@ -144,6 +137,24 @@ #define CUBLAS_STATUS_INTERNAL_ERROR HIPBLAS_STATUS_INTERNAL_ERROR #define CUBLAS_STATUS_NOT_SUPPORTED HIPBLAS_STATUS_NOT_SUPPORTED +#if HIP_VERSION >= 70000000 +#define CUBLAS_COMPUTE_16F HIPBLAS_COMPUTE_16F +#define CUBLAS_COMPUTE_32F HIPBLAS_COMPUTE_32F +#define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_COMPUTE_32F_FAST_16F +#define cublasComputeType_t hipblasComputeType_t +#define cudaDataType_t hipDataType +#else +#define CUBLAS_COMPUTE_16F HIPBLAS_R_16F +#define CUBLAS_COMPUTE_32F HIPBLAS_R_32F +#define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_R_32F +#define cublasComputeType_t hipblasDatatype_t +#define cudaDataType_t hipblasDatatype_t +#endif // HIP_VERSION >= 7000000 + +#if !defined(__HIP_PLATFORM_AMD__) +#error "The HIP backend supports only AMD targets" +#endif // !defined(__HIP_PLATFORM_AMD__) + #define __CUDA_ARCH__ 1300 #if defined(__gfx803__) || defined(__gfx900__) || defined(__gfx906__) @@ -151,7 +162,19 @@ #endif #if defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx942__) -#define CDNA +#define CDNA // For the entire family +#endif + +#if defined(__gfx942__) +#define CDNA3 +#endif + +#if defined(__gfx90a__) +#define CDNA2 +#endif + +#if defined(__gfx908__) +#define CDNA1 #endif #if defined(__GFX12__) @@ -177,6 +200,7 @@ #endif typedef hip_bfloat16 nv_bfloat16; +typedef short2 nv_bfloat162; // FIXME there is no 2x BF16 type being defined in bfloat16.h, ad-hoc compilation fix typedef int8_t int8x4_t __attribute__((ext_vector_type(4))); typedef uint8_t uint8x4_t __attribute__((ext_vector_type(4))); @@ -228,7 +252,7 @@ static __device__ __forceinline__ unsigned int __vcmpne4(unsigned int a, unsigne return c; } -#if defined(__HIP_PLATFORM_AMD__) && HIP_VERSION < 50600000 +#if HIP_VERSION < 50600000 // __shfl_xor() for half2 was added in ROCm 5.6 static __device__ __forceinline__ half2 __shfl_xor(half2 var, int laneMask, int width) { typedef union half2_b32 { @@ -240,4 +264,4 @@ static __device__ __forceinline__ half2 __shfl_xor(half2 var, int laneMask, int tmp.b32 = __shfl_xor(tmp.b32, laneMask, width); return tmp.val; } -#endif // defined(__HIP_PLATFORM_AMD__) && HIP_VERSION < 50600000 +#endif // HIP_VERSION < 50600000 diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/vendors/musa.h b/ml/backend/ggml/ggml/src/ggml-cuda/vendors/musa.h index 937779a90..8c55a2e4e 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/vendors/musa.h +++ b/ml/backend/ggml/ggml/src/ggml-cuda/vendors/musa.h @@ -13,7 +13,7 @@ #define CUBLAS_OP_N MUBLAS_OP_N #define CUBLAS_OP_T MUBLAS_OP_T #define CUBLAS_STATUS_SUCCESS MUBLAS_STATUS_SUCCESS -#define CUBLAS_TF32_TENSOR_OP_MATH MUBLAS_MATH_MODE_DEFAULT +#define CUBLAS_TF32_TENSOR_OP_MATH MUBLAS_TENSOR_OP_MATH #define CUDA_R_16F MUSA_R_16F #define CUDA_R_16BF MUSA_R_16BF #define CUDA_R_32F MUSA_R_32F @@ -29,7 +29,7 @@ #define cublasSgemm mublasSgemm #define cublasStatus_t mublasStatus_t #define cublasOperation_t mublasOperation_t -#define cublasGetStatusString mublasStatus_to_string +#define cublasGetStatusString mublasGetStatusString #define cudaDataType_t musaDataType_t #define cudaDeviceCanAccessPeer musaDeviceCanAccessPeer #define cudaDeviceDisablePeerAccess musaDeviceDisablePeerAccess @@ -137,4 +137,5 @@ #define cudaStreamEndCapture musaStreamEndCapture #define cudaOccupancyMaxActiveBlocksPerMultiprocessor musaOccupancyMaxActiveBlocksPerMultiprocessor -typedef mt_bfloat16 nv_bfloat16; +typedef __mt_bfloat16 nv_bfloat16; +typedef __mt_bfloat162 nv_bfloat162; diff --git a/ml/backend/ggml/ggml/src/ggml-hip/CMakeLists.txt b/ml/backend/ggml/ggml/src/ggml-hip/CMakeLists.txt index 1fe8fe3b8..852de9734 100644 --- a/ml/backend/ggml/ggml/src/ggml-hip/CMakeLists.txt +++ b/ml/backend/ggml/ggml/src/ggml-hip/CMakeLists.txt @@ -113,6 +113,18 @@ if (GGML_HIP_ROCWMMA_FATTN) add_compile_definitions(GGML_HIP_ROCWMMA_FATTN) endif() +if (NOT GGML_HIP_MMQ_MFMA) + add_compile_definitions(GGML_HIP_NO_MMQ_MFMA) +endif() + +if (GGML_HIP_FORCE_ROCWMMA_FATTN_GFX12 OR ${hip_VERSION} VERSION_GREATER_EQUAL 7.0) + add_compile_definitions(GGML_HIP_ROCWMMA_FATTN_GFX12) +endif() + +if (GGML_HIP_EXPORT_METRICS) + set(CMAKE_HIP_FLAGS "${CMAKE_HIP_FLAGS} -Rpass-analysis=kernel-resource-usage --save-temps") +endif() + if (NOT GGML_CUDA_FA) add_compile_definitions(GGML_CUDA_NO_FA) endif() diff --git a/ml/backend/ggml/ggml/src/ggml-impl.h b/ml/backend/ggml/ggml/src/ggml-impl.h index a19cfb14e..19a7adb2d 100644 --- a/ml/backend/ggml/ggml/src/ggml-impl.h +++ b/ml/backend/ggml/ggml/src/ggml-impl.h @@ -32,6 +32,8 @@ extern "C" { #endif +void ggml_print_backtrace(void); + #ifndef MIN # define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif @@ -71,6 +73,22 @@ static inline int ggml_up(int n, int m) { return (n + m - 1) & ~(m - 1); } +// TODO: move to ggml.h? +static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) { + if (a->type != b->type) { + return false; + } + for (int i = 0; i < GGML_MAX_DIMS; i++) { + if (a->ne[i] != b->ne[i]) { + return false; + } + if (a->nb[i] != b->nb[i]) { + return false; + } + } + return true; +} + // // logging // @@ -299,6 +317,7 @@ struct ggml_cgraph { struct ggml_tensor ** grads; // the outputs of these tensors are the gradients of the nodes struct ggml_tensor ** grad_accs; // accumulators for node gradients struct ggml_tensor ** leafs; // tensors with constant data + int32_t * use_counts;// number of uses of each tensor, indexed by hash table slot struct ggml_hash_set visited_hash_set; @@ -315,203 +334,142 @@ struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph, int i0, int i1); GGML_API void * ggml_aligned_malloc(size_t size); GGML_API void ggml_aligned_free(void * ptr, size_t size); -// FP16 to FP32 conversion +// FP16 <-> FP32 +// ref: https://github.com/Maratyszcza/FP16 -// 16-bit float -// on Arm, we use __fp16 -// on x86, we use uint16_t -// -// for old CUDA compilers (<= 11), we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/10616 -// for MUSA compilers , we use uint16_t: ref https://github.com/ggml-org/llama.cpp/pull/11843 -// -#if defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__) - #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) - #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) - - #define GGML_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) - - static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { - __fp16 tmp; - memcpy(&tmp, &h, sizeof(ggml_fp16_t)); - return (float)tmp; - } - - static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { - ggml_fp16_t res; - __fp16 tmp = f; - memcpy(&res, &tmp, sizeof(ggml_fp16_t)); - return res; - } - -#elif defined(__F16C__) - - #ifdef _MSC_VER - #define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x))) - #define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0) - #else - #define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x) - #define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0) - #endif - -#elif defined(__POWER9_VECTOR__) - - #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) - #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) - /* the inline asm below is about 12% faster than the lookup method */ - #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) - #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) - - static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { - float f; - double d; - __asm__( - "mtfprd %0,%2\n" - "xscvhpdp %0,%0\n" - "frsp %1,%0\n" : - /* temp */ "=d"(d), - /* out */ "=f"(f): - /* in */ "r"(h)); - return f; - } - - static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { - double d; - ggml_fp16_t r; - __asm__( /* xscvdphp can work on double or single precision */ - "xscvdphp %0,%2\n" - "mffprd %1,%0\n" : - /* temp */ "=d"(d), - /* out */ "=r"(r): - /* in */ "f"(f)); - return r; - } - -#elif defined(__riscv) && defined(GGML_RV_ZFH) - - static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { - float f; - __asm__( - "fmv.h.x %[f], %[h]\n\t" - "fcvt.s.h %[f], %[f]" - : [f] "=&f" (f) - : [h] "r" (h) - ); - return f; - } - - static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { - ggml_fp16_t res; - __asm__( - "fcvt.h.s %[f], %[f]\n\t" - "fmv.x.h %[h], %[f]" - : [h] "=&r" (res) - : [f] "f" (f) - ); - return res; - } - - #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) - #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) - #define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) - #define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) - -#else - - // FP16 <-> FP32 - // ref: https://github.com/Maratyszcza/FP16 - - static inline float fp32_from_bits(uint32_t w) { - union { - uint32_t as_bits; - float as_value; - } fp32; - fp32.as_bits = w; - return fp32.as_value; - } - - static inline uint32_t fp32_to_bits(float f) { - union { - float as_value; - uint32_t as_bits; - } fp32; - fp32.as_value = f; - return fp32.as_bits; - } - - static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { - const uint32_t w = (uint32_t) h << 16; - const uint32_t sign = w & UINT32_C(0x80000000); - const uint32_t two_w = w + w; - - const uint32_t exp_offset = UINT32_C(0xE0) << 23; - #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L) - const float exp_scale = 0x1.0p-112f; - #else - const float exp_scale = fp32_from_bits(UINT32_C(0x7800000)); - #endif - const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale; - - const uint32_t magic_mask = UINT32_C(126) << 23; - const float magic_bias = 0.5f; - const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias; - - const uint32_t denormalized_cutoff = UINT32_C(1) << 27; - const uint32_t result = sign | - (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value)); - return fp32_from_bits(result); - } - - static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { - #if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L) - const float scale_to_inf = 0x1.0p+112f; - const float scale_to_zero = 0x1.0p-110f; - #else - const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000)); - const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000)); - #endif - float base = (fabsf(f) * scale_to_inf) * scale_to_zero; - - const uint32_t w = fp32_to_bits(f); - const uint32_t shl1_w = w + w; - const uint32_t sign = w & UINT32_C(0x80000000); - uint32_t bias = shl1_w & UINT32_C(0xFF000000); - if (bias < UINT32_C(0x71000000)) { - bias = UINT32_C(0x71000000); - } - - base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base; - const uint32_t bits = fp32_to_bits(base); - const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00); - const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF); - const uint32_t nonsign = exp_bits + mantissa_bits; - return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign); - } - - #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) - #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) - -#endif // defined(__ARM_NEON) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) && !defined(__MUSACC__) - -// precomputed f32 table for f16 (256 KB) -// defined in ggml.c, initialized in ggml_init() -GGML_API float ggml_table_f32_f16[1 << 16]; - -// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32, -// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON. -// This is also true for POWER9. -#if !defined(GGML_FP16_TO_FP32) -inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) { - uint16_t s; - memcpy(&s, &f, sizeof(uint16_t)); - return ggml_table_f32_f16[s]; +static inline float fp32_from_bits(uint32_t w) { + union { + uint32_t as_bits; + float as_value; + } fp32; + fp32.as_bits = w; + return fp32.as_value; } -#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x) -#endif +static inline uint32_t fp32_to_bits(float f) { + union { + float as_value; + uint32_t as_bits; + } fp32; + fp32.as_value = f; + return fp32.as_bits; +} -#if !defined(GGML_FP32_TO_FP16) -#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) +static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) { + const uint32_t w = (uint32_t) h << 16; + const uint32_t sign = w & UINT32_C(0x80000000); + const uint32_t two_w = w + w; + + const uint32_t exp_offset = UINT32_C(0xE0) << 23; +#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L) + const float exp_scale = 0x1.0p-112f; +#else + const float exp_scale = fp32_from_bits(UINT32_C(0x7800000)); #endif + const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale; + + const uint32_t magic_mask = UINT32_C(126) << 23; + const float magic_bias = 0.5f; + const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias; + + const uint32_t denormalized_cutoff = UINT32_C(1) << 27; + const uint32_t result = sign | + (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value)); + return fp32_from_bits(result); +} + +static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) { +#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L) + const float scale_to_inf = 0x1.0p+112f; + const float scale_to_zero = 0x1.0p-110f; +#else + const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000)); + const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000)); +#endif + float base = (fabsf(f) * scale_to_inf) * scale_to_zero; + + const uint32_t w = fp32_to_bits(f); + const uint32_t shl1_w = w + w; + const uint32_t sign = w & UINT32_C(0x80000000); + uint32_t bias = shl1_w & UINT32_C(0xFF000000); + if (bias < UINT32_C(0x71000000)) { + bias = UINT32_C(0x71000000); + } + + base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base; + const uint32_t bits = fp32_to_bits(base); + const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00); + const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF); + const uint32_t nonsign = exp_bits + mantissa_bits; + return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign); +} + +#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) +#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) + +#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x) +#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x) + +static inline float ggml_e8m0_to_fp32(uint8_t x) { + uint32_t bits; // Stores the raw bit representation of the float + + // Handle special case for minimum exponent (denormalized float) + if (x == 0) { + // Bit pattern for 2^(-127): + // - Sign bit: 0 (positive) + // - Exponent: 0 (denormalized number) + // - Mantissa: 0x400000 (0.5 in fractional form) + // Value = 0.5 * 2^(-126) = 2^(-127) + bits = 0x00400000; + } + // note: disabled as we don't need to handle NaNs + //// Handle special case for NaN (all bits set) + //else if (x == 0xFF) { + // // Standard quiet NaN pattern: + // // - Sign bit: 0 + // // - Exponent: all 1s (0xFF) + // // - Mantissa: 0x400000 (quiet NaN flag) + // bits = 0x7FC00000; + //} + // Normalized values (most common case) + else { + // Construct normalized float by shifting exponent into position: + // - Exponent field: 8 bits (positions 30-23) + // - Mantissa: 0 (implicit leading 1) + // Value = 2^(x - 127) + bits = (uint32_t) x << 23; + } + + float result; // Final float value + // Safely reinterpret bit pattern as float without type-punning issues + memcpy(&result, &bits, sizeof(float)); + return result; +} + +// Equal to ggml_e8m0_to_fp32/2 +// Useful with MXFP4 quantization since the E0M2 values are doubled +static inline float ggml_e8m0_to_fp32_half(uint8_t x) { + uint32_t bits; + + // For x < 2: use precomputed denormal patterns + if (x < 2) { + // 0x00200000 = 2^(-128), 0x00400000 = 2^(-127) + bits = 0x00200000 << x; + } + // For x >= 2: normalized exponent adjustment + else { + // 0.5 * 2^(x-127) = 2^(x-128) = normalized with exponent (x-1) + bits = (uint32_t)(x - 1) << 23; + } + // Note: NaNs are not handled here + + float result; + memcpy(&result, &bits, sizeof(float)); + return result; +} + +#define GGML_E8M0_TO_FP32(x) ggml_e8m0_to_fp32(x) +#define GGML_E8M0_TO_FP32_HALF(x) ggml_e8m0_to_fp32_half(x) /** * Converts brain16 to float32. @@ -587,13 +545,76 @@ static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) { #define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x) #define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x) +// return true if the node's results are only used by N other nodes +// and can be fused into their calculations. +static inline bool ggml_node_has_n_uses(const struct ggml_cgraph * cgraph, int node_idx, int32_t n_uses) { + const struct ggml_tensor * node = cgraph->nodes[node_idx]; + + // check the use count against how many we're replacing + size_t hash_pos = ggml_hash_find(&cgraph->visited_hash_set, node); + if (!ggml_bitset_get(cgraph->visited_hash_set.used, hash_pos) || cgraph->use_counts[hash_pos] != n_uses) { + return false; + } + + // if node is a view, some other node might be using the intermediate result + // via the view source. + if (node->view_src) { + return false; + } + + // If the user requested output for the node, can't fuse + if (node->flags & GGML_TENSOR_FLAG_OUTPUT) { + return false; + } + + return true; +} + +// Returns true if nodes [i, i+ops.size()) are the sequence of ggml_ops in ops[] +// and are fusable. Nodes are considered fusable according to this function if: +// - all nodes except the last have only one use and are not views/outputs (see ggml_node_has_N_uses). +// - all nodes except the last are a src of the following node. +// - all nodes are the same shape. +// TODO: Consider allowing GGML_OP_NONE nodes in between +static inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, const enum ggml_op * ops, int num_ops) { + if (node_idx + num_ops > cgraph->n_nodes) { + return false; + } + + for (int i = 0; i < num_ops; ++i) { + struct ggml_tensor * node = cgraph->nodes[node_idx + i]; + if (node->op != ops[i]) { + return false; + } + if (i < num_ops - 1 && !ggml_node_has_n_uses(cgraph, node_idx + i, 1)) { + return false; + } + if (i > 0) { + struct ggml_tensor * prev = cgraph->nodes[node_idx + i - 1]; + if (node->src[0] != prev && node->src[1] != prev) { + return false; + } + if (!ggml_are_same_shape(node, prev)) { + return false; + } + } + } + return true; +} + #ifdef __cplusplus } #endif #ifdef __cplusplus +#include #include +// nicer C++ syntax for ggml_can_fuse +inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, std::initializer_list ops) { + return ggml_can_fuse(cgraph, node_idx, ops.begin(), (int)ops.size()); +} + // expose GGUF internals for test code GGML_API size_t gguf_type_size(enum gguf_type type); GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params); diff --git a/ml/backend/ggml/ggml/src/ggml-metal/CMakeLists.txt b/ml/backend/ggml/ggml/src/ggml-metal/CMakeLists.txt index e22232780..0ca8a3c55 100644 --- a/ml/backend/ggml/ggml/src/ggml-metal/CMakeLists.txt +++ b/ml/backend/ggml/ggml/src/ggml-metal/CMakeLists.txt @@ -44,21 +44,22 @@ if (GGML_METAL_EMBED_LIBRARY) set(METALLIB_SOURCE_EMBED_TMP "${CMAKE_BINARY_DIR}/autogenerated/ggml-metal-embed.metal.tmp") add_custom_command( - OUTPUT ${METALLIB_EMBED_ASM} + OUTPUT "${METALLIB_EMBED_ASM}" COMMAND echo "Embedding Metal library" - COMMAND sed -e '/__embed_ggml-common.h__/r ${METALLIB_COMMON}' -e '/__embed_ggml-common.h__/d' < ${METALLIB_SOURCE} > ${METALLIB_SOURCE_EMBED_TMP} - COMMAND sed -e '/\#include \"ggml-metal-impl.h\"/r ${METALLIB_IMPL}' -e '/\#include \"ggml-metal-impl.h\"/d' < ${METALLIB_SOURCE_EMBED_TMP} > ${METALLIB_SOURCE_EMBED} - COMMAND echo ".section __DATA,__ggml_metallib" > ${METALLIB_EMBED_ASM} - COMMAND echo ".globl _ggml_metallib_start" >> ${METALLIB_EMBED_ASM} - COMMAND echo "_ggml_metallib_start:" >> ${METALLIB_EMBED_ASM} - COMMAND echo ".incbin \\\"${METALLIB_SOURCE_EMBED}\\\"" >> ${METALLIB_EMBED_ASM} - COMMAND echo ".globl _ggml_metallib_end" >> ${METALLIB_EMBED_ASM} - COMMAND echo "_ggml_metallib_end:" >> ${METALLIB_EMBED_ASM} + COMMAND sed -e "/__embed_ggml-common.h__/r ${METALLIB_COMMON}" -e "/__embed_ggml-common.h__/d" < "${METALLIB_SOURCE}" > "${METALLIB_SOURCE_EMBED_TMP}" + COMMAND sed -e "/\#include \"ggml-metal-impl.h\"/r ${METALLIB_IMPL}" -e "/\#include \"ggml-metal-impl.h\"/d" < "${METALLIB_SOURCE_EMBED_TMP}" > "${METALLIB_SOURCE_EMBED}" + COMMAND echo ".section __DATA,__ggml_metallib" > "${METALLIB_EMBED_ASM}" + COMMAND echo ".globl _ggml_metallib_start" >> "${METALLIB_EMBED_ASM}" + COMMAND echo "_ggml_metallib_start:" >> "${METALLIB_EMBED_ASM}" + COMMAND echo .incbin "\"${METALLIB_SOURCE_EMBED}\"" >> "${METALLIB_EMBED_ASM}" + COMMAND echo ".globl _ggml_metallib_end" >> "${METALLIB_EMBED_ASM}" + COMMAND echo "_ggml_metallib_end:" >> "${METALLIB_EMBED_ASM}" DEPENDS ../ggml-common.h ggml-metal.metal ggml-metal-impl.h COMMENT "Generate assembly for embedded Metal library" + VERBATIM ) - target_sources(ggml-metal PRIVATE ${METALLIB_EMBED_ASM}) + target_sources(ggml-metal PRIVATE "${METALLIB_EMBED_ASM}") else() if (GGML_METAL_SHADER_DEBUG) # custom command to do the following: @@ -70,7 +71,9 @@ else() # note: adding -fno-inline fixes the tests when using MTL_SHADER_VALIDATION=1 # note: unfortunately, we have to call it default.metallib instead of ggml.metallib # ref: https://github.com/ggerganov/whisper.cpp/issues/1720 - set(XC_FLAGS -fno-fast-math -fno-inline -g) + # note: adding -g causes segmentation fault during compile + #set(XC_FLAGS -fno-fast-math -fno-inline -g) + set(XC_FLAGS -fno-fast-math -fno-inline) else() set(XC_FLAGS -O3) endif() @@ -89,7 +92,7 @@ else() add_custom_command( OUTPUT ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib COMMAND xcrun -sdk macosx metal ${XC_FLAGS} -c ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal -o - | - xcrun -sdk macosx metallib - -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib + xcrun -sdk macosx metallib - -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal DEPENDS ggml-metal.metal ${METALLIB_COMMON} diff --git a/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.metal b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.metal index 5eba1dafc..dbd955ecd 100644 --- a/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.metal +++ b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.metal @@ -103,6 +103,9 @@ typedef sycl::half2 ggml_half2; #define QI4_1 (QK4_1 / (4 * QR4_1)) #define QR4_1 2 +#define QI_MXFP4 (QK_MXFP4 / (4 * QR_MXFP4)) +#define QR_MXFP4 2 + #define QI5_0 (QK5_0 / (4 * QR5_0)) #define QR5_0 2 @@ -188,6 +191,13 @@ typedef struct { } block_q4_1; static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_half) + QK4_1 / 2, "wrong q4_1 block size/padding"); +#define QK_MXFP4 32 +typedef struct { + uint8_t e; // E8M0 + uint8_t qs[QK_MXFP4/2]; +} block_mxfp4; +static_assert(sizeof(block_mxfp4) == sizeof(uint8_t) + QK_MXFP4/2, "wrong mxfp4 block size/padding"); + #define QK5_0 32 typedef struct { ggml_half d; // delta @@ -421,13 +431,6 @@ typedef struct { } block_iq4_xs; static_assert(sizeof(block_iq4_xs) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/64 + QK_K/2, "wrong iq4_xs block size/padding"); -#define MXFP4 32 -typedef struct { - uint8_t d; // scale E8M0 float - uint8_t qs[MXFP4 / 2]; // (32) 4 bit elements E2M1 float -} block_mxfp4; -static_assert(sizeof(block_mxfp4) == sizeof(uint8_t) + MXFP4/2, "wrong mxfp4 block size/padding"); - #endif // GGML_COMMON_DECL #endif // GGML_COMMON_DECL @@ -1085,6 +1088,17 @@ GGML_TABLE_BEGIN(uint32_t, iq3s_grid, 512) 0x0f090307, 0x0f090501, 0x0f090b01, 0x0f0b0505, 0x0f0b0905, 0x0f0d0105, 0x0f0d0703, 0x0f0f0101, GGML_TABLE_END() +// TODO: fix name to kvalues_iq4_nl +GGML_TABLE_BEGIN(int8_t, kvalues_iq4nl, 16) + -127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113, +GGML_TABLE_END() + +// e2m1 values (doubled) +// ref: https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf +GGML_TABLE_BEGIN(int8_t, kvalues_mxfp4, 16) + 0, 1, 2, 3, 4, 6, 8, 12, 0, -1, -2, -3, -4, -6, -8, -12, +GGML_TABLE_END() + #define NGRID_IQ1S 2048 #define IQ1S_DELTA 0.125f #define IQ1M_DELTA 0.125f @@ -1894,6 +1908,9 @@ GGML_TABLE_END() #define N_R0_Q8_0 4 #define N_SG_Q8_0 2 +#define N_R0_MXFP4 2 +#define N_SG_MXFP4 2 + #define N_R0_Q2_K 4 #define N_SG_Q2_K 2 @@ -1936,9 +1953,6 @@ GGML_TABLE_END() #define N_R0_IQ4_XS 2 #define N_SG_IQ4_XS 2 -#define N_R0_MXFP4 4 -#define N_SG_MXFP4 2 - // kernel argument structs // // - element counters (e.g. ne00) typically use int32_t to reduce register usage @@ -2000,8 +2014,18 @@ typedef struct { uint64_t nb2; uint64_t nb3; uint64_t offs; + uint64_t o1[8]; } ggml_metal_kargs_bin; +typedef struct { + int64_t ne0; + int64_t ne1; + size_t nb01; + size_t nb02; + size_t nb11; + size_t nb21; +} ggml_metal_kargs_add_id; + typedef struct { int32_t ne00; int32_t ne01; @@ -2103,14 +2127,18 @@ typedef struct { uint64_t nb21; uint64_t nb22; uint64_t nb23; + int32_t ne32; + int32_t ne33; uint64_t nb31; + uint64_t nb32; + uint64_t nb33; int32_t ne1; int32_t ne2; float scale; float max_bias; float m0; float m1; - uint16_t n_head_log2; + int32_t n_head_log2; float logit_softcap; } ggml_metal_kargs_flash_attn_ext; @@ -2247,8 +2275,16 @@ typedef struct { typedef struct { int32_t ne00; int32_t ne00_4; - uint64_t nb01; + uint64_t nb1; + uint64_t nb2; + uint64_t nb3; float eps; + int32_t nef1[3]; + int32_t nef2[3]; + int32_t nef3[3]; + uint64_t nbf1[3]; + uint64_t nbf2[3]; + uint64_t nbf3[3]; } ggml_metal_kargs_rms_norm; typedef struct { @@ -2296,6 +2332,19 @@ typedef struct { int32_t KHW; // KH * KW, pre-computed on CPU to save GPU resources } ggml_metal_kargs_im2col; +typedef struct{ + int32_t ne00; + uint64_t nb01; + int32_t ne10; + uint64_t nb11; + int32_t ne0; + uint64_t nb1; + int32_t i00; + int32_t i10; + float alpha; + float limit; +} ggml_metal_kargs_glu; + typedef struct { int64_t ne00; int64_t ne01; @@ -2324,14 +2373,26 @@ typedef struct { } ggml_metal_kargs_sum_rows; typedef struct { - int64_t ne00; - int64_t ne01; - int64_t ne02; + int32_t ne00; + int32_t ne01; + int32_t ne02; + uint64_t nb01; + uint64_t nb02; + uint64_t nb03; + int32_t ne11; + int32_t ne12; + int32_t ne13; + uint64_t nb11; + uint64_t nb12; + uint64_t nb13; + uint64_t nb1; + uint64_t nb2; + uint64_t nb3; float scale; float max_bias; float m0; float m1; - uint32_t n_head_log2; + int32_t n_head_log2; } ggml_metal_kargs_soft_max; typedef struct { @@ -2362,26 +2423,26 @@ typedef struct { typedef struct { int64_t d_state; int64_t d_inner; + int64_t n_head; + int64_t n_group; int64_t n_seq_tokens; int64_t n_seqs; - uint64_t nb00; + int64_t s_off; uint64_t nb01; uint64_t nb02; - uint64_t nb10; + uint64_t nb03; uint64_t nb11; uint64_t nb12; uint64_t nb13; - uint64_t nb20; uint64_t nb21; uint64_t nb22; - uint64_t nb30; uint64_t nb31; - uint64_t nb40; uint64_t nb41; uint64_t nb42; - uint64_t nb50; + uint64_t nb43; uint64_t nb51; uint64_t nb52; + uint64_t nb53; } ggml_metal_kargs_ssm_scan; typedef struct { @@ -2395,6 +2456,22 @@ typedef struct { uint64_t nb2; } ggml_metal_kargs_get_rows; +typedef struct { + int32_t nk0; + int32_t ne01; + uint64_t nb01; + uint64_t nb02; + uint64_t nb03; + int32_t ne11; + int32_t ne12; + uint64_t nb10; + uint64_t nb11; + uint64_t nb12; + uint64_t nb1; + uint64_t nb2; + uint64_t nb3; +} ggml_metal_kargs_set_rows; + typedef struct { int64_t ne00; int64_t ne01; @@ -2523,6 +2600,33 @@ constexpr constant static float kvalues_iq4nl_f[16] = { -127.f, -104.f, -83.f, -65.f, -49.f, -35.f, -22.f, -10.f, 1.f, 13.f, 25.f, 38.f, 53.f, 69.f, 89.f, 113.f }; +constexpr constant static float kvalues_mxfp4_f[16] = { + 0, .5f, 1.f, 1.5f, 2.f, 3.f, 4.f, 6.f, -0, -.5f, -1.f, -1.5f, -2.f, -3.f, -4.f, -6.f +}; + +static inline int best_index_int8(int n, constant float * val, float x) { + if (x <= val[0]) return 0; + if (x >= val[n-1]) return n-1; + int ml = 0, mu = n-1; + while (mu-ml > 1) { + int mav = (ml+mu)/2; + if (x < val[mav]) mu = mav; else ml = mav; + } + return x - val[mu-1] < val[mu] - x ? mu-1 : mu; +} + +static inline float e8m0_to_fp32(uint8_t x) { + uint32_t bits; + + if (x == 0) { + bits = 0x00400000; + } else { + bits = (uint32_t) x << 23; + } + + return as_type(bits); +} + // NOTE: this is not dequantizing - we are simply fitting the template template void dequantize_f32(device const float4x4 * src, short il, thread type4x4 & reg) { @@ -2585,6 +2689,199 @@ void dequantize_q4_0_t4(device const block_q4_0 * xb, short il, thread type4 & r } } +void quantize_q4_0(device const float * src, device block_q4_0 & dst) { +#pragma METAL fp math_mode(safe) + float amax = 0.0f; // absolute max + float max = 0.0f; + + for (int j = 0; j < QK4_0; j++) { + const float v = src[j]; + if (amax < fabs(v)) { + amax = fabs(v); + max = v; + } + } + + const float d = max / -8; + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + + for (int j = 0; j < QK4_0/2; ++j) { + const float x0 = src[0 + j]*id; + const float x1 = src[QK4_0/2 + j]*id; + + const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f)); + const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f)); + + dst.qs[j] = xi0; + dst.qs[j] |= xi1 << 4; + } +} + +void quantize_q4_1(device const float * src, device block_q4_1 & dst) { +#pragma METAL fp math_mode(safe) + float min = FLT_MAX; + float max = -FLT_MAX; + + for (int j = 0; j < QK4_1; j++) { + const float v = src[j]; + if (min > v) min = v; + if (max < v) max = v; + } + + const float d = (max - min) / ((1 << 4) - 1); + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + dst.m = min; + + for (int j = 0; j < QK4_1/2; ++j) { + const float x0 = (src[0 + j] - min)*id; + const float x1 = (src[QK4_1/2 + j] - min)*id; + + const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f)); + const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f)); + + dst.qs[j] = xi0; + dst.qs[j] |= xi1 << 4; + } +} + +void quantize_q5_0(device const float * src, device block_q5_0 & dst) { +#pragma METAL fp math_mode(safe) + float amax = 0.0f; // absolute max + float max = 0.0f; + + for (int j = 0; j < QK5_0; j++) { + const float v = src[j]; + if (amax < fabs(v)) { + amax = fabs(v); + max = v; + } + } + + const float d = max / -16; + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + + uint32_t qh = 0; + for (int j = 0; j < QK5_0/2; ++j) { + const float x0 = src[0 + j]*id; + const float x1 = src[QK5_0/2 + j]*id; + + const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f)); + const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f)); + + dst.qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); + qh |= ((xi0 & 0x10u) >> 4) << (j + 0); + qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); + } + + thread const uint8_t * qh8 = (thread const uint8_t *)&qh; + + for (int j = 0; j < 4; ++j) { + dst.qh[j] = qh8[j]; + } +} + +void quantize_q5_1(device const float * src, device block_q5_1 & dst) { +#pragma METAL fp math_mode(safe) + float max = src[0]; + float min = src[0]; + + for (int j = 1; j < QK5_1; j++) { + const float v = src[j]; + min = v < min ? v : min; + max = v > max ? v : max; + } + + const float d = (max - min) / 31; + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + dst.m = min; + + uint32_t qh = 0; + for (int j = 0; j < QK5_1/2; ++j) { + const float x0 = (src[0 + j] - min)*id; + const float x1 = (src[QK5_1/2 + j] - min)*id; + + const uint8_t xi0 = (uint8_t)(x0 + 0.5f); + const uint8_t xi1 = (uint8_t)(x1 + 0.5f); + + dst.qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); + qh |= ((xi0 & 0x10u) >> 4) << (j + 0); + qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1/2); + } + + thread const uint8_t * qh8 = (thread const uint8_t *)&qh; + + for (int j = 0; j < 4; ++j) { + dst.qh[j] = qh8[j]; + } +} + +void quantize_q8_0(device const float * src, device block_q8_0 & dst) { +#pragma METAL fp math_mode(safe) + float amax = 0.0f; // absolute max + + for (int j = 0; j < QK8_0; j++) { + const float v = src[j]; + amax = MAX(amax, fabs(v)); + } + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + + for (int j = 0; j < QK8_0; ++j) { + const float x0 = src[j]*id; + + dst.qs[j] = round(x0); + } +} + +void quantize_iq4_nl(device const float * src, device block_iq4_nl & dst) { +#pragma METAL fp math_mode(safe) + float amax = 0.0f; // absolute max + float max = 0.0f; + + for (int j = 0; j < QK4_NL; j++) { + const float v = src[j]; + if (amax < fabs(v)) { + amax = fabs(v); + max = v; + } + } + + const float d = max / kvalues_iq4nl_f[0]; + const float id = d ? 1.0f/d : 0.0f; + + float sumqx = 0, sumq2 = 0; + for (int j = 0; j < QK4_NL/2; ++j) { + const float x0 = src[0 + j]*id; + const float x1 = src[QK4_NL/2 + j]*id; + + const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl_f, x0); + const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl_f, x1); + + dst.qs[j] = xi0 | (xi1 << 4); + + const float v0 = kvalues_iq4nl_f[xi0]; + const float v1 = kvalues_iq4nl_f[xi1]; + const float w0 = src[0 + j]*src[0 + j]; + const float w1 = src[QK4_NL/2 + j]*src[QK4_NL/2 + j]; + sumqx += w0*v0*src[j] + w1*v1*src[QK4_NL/2 + j]; + sumq2 += w0*v0*v0 + w1*v1*v1; + + } + + dst.d = sumq2 > 0 ? sumqx/sumq2 : d; +} + template void dequantize_q4_1(device const block_q4_1 * xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 2); @@ -2767,6 +3064,36 @@ void dequantize_q8_0_t4(device const block_q8_0 *xb, short il, thread type4 & re } } +template +void dequantize_mxfp4(device const block_mxfp4 * xb, short il, thread type4x4 & reg) { + device const uint8_t * q2 = (device const uint8_t *)xb->qs; + + const float d = e8m0_to_fp32(xb->e); + const uint8_t shr = il >= 1 ? 4 : 0; + + for (int i = 0; i < 4; ++i) { + reg[i][0] = d * kvalues_mxfp4_f[(q2[4*i + 0] >> shr) & 0x0F]; + reg[i][1] = d * kvalues_mxfp4_f[(q2[4*i + 1] >> shr) & 0x0F]; + reg[i][2] = d * kvalues_mxfp4_f[(q2[4*i + 2] >> shr) & 0x0F]; + reg[i][3] = d * kvalues_mxfp4_f[(q2[4*i + 3] >> shr) & 0x0F]; + } +} + +template +void dequantize_mxfp4_t4(device const block_mxfp4 * xb, short il, thread type4 & reg) { + device const uint8_t * q2 = (device const uint8_t *)xb->qs; + + const float d = e8m0_to_fp32(xb->e); + const short il4 = il%4; + + const uint8_t shr = il >= 4 ? 4 : 0; + + reg[0] = d * kvalues_mxfp4_f[(q2[4*il4 + 0] >> shr) & 0x0F]; + reg[1] = d * kvalues_mxfp4_f[(q2[4*il4 + 1] >> shr) & 0x0F]; + reg[2] = d * kvalues_mxfp4_f[(q2[4*il4 + 2] >> shr) & 0x0F]; + reg[3] = d * kvalues_mxfp4_f[(q2[4*il4 + 3] >> shr) & 0x0F]; +} + template void dequantize_q2_K(device const block_q2_K *xb, short il, thread type4x4 & reg) { const float d = xb->d; @@ -3116,7 +3443,8 @@ enum ggml_sort_order { // general-purpose kernel for addition, subtraction, multiplication and division of two tensors // pros: works for non-contiguous tensors, supports broadcast across all dims // cons: not very efficient -kernel void kernel_add( +template +kernel void kernel_add_fuse_impl( constant ggml_metal_kargs_bin & args, device const char * src0, device const char * src1, @@ -3132,16 +3460,39 @@ kernel void kernel_add( const int i12 = i02%args.ne12; const int i11 = i01%args.ne11; - device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs; - device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11; - device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs; + device const float * src0_ptr = (device const float *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs); + device float * dst_ptr = (device float *) (dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs); + + device const float * src1_ptr[F]; + for (short j = 0; j < F; ++j) { + src1_ptr[j] = (device const float *) (src1 + args.o1[j] + i13*args.nb13 + i12*args.nb12 + i11*args.nb11); + } for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { const int i10 = i0%args.ne10; - *((device float *)(dst_ptr + i0*args.nb0)) = *((device float *)(src0_ptr + i0*args.nb00)) + *((device float *)(src1_ptr + i10*args.nb10)); + + float res = src0_ptr[i0]; + +#pragma unroll + for (short j = 0; j < F; ++j) { + res += src1_ptr[j][i10]; + } + + dst_ptr[i0] = res; } } +typedef decltype(kernel_add_fuse_impl<2>) kernel_add_fuse_t; + +template [[host_name("kernel_add")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<1>; +template [[host_name("kernel_add_fuse_2")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<2>; +template [[host_name("kernel_add_fuse_3")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<3>; +template [[host_name("kernel_add_fuse_4")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<4>; +template [[host_name("kernel_add_fuse_5")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<5>; +template [[host_name("kernel_add_fuse_6")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<6>; +template [[host_name("kernel_add_fuse_7")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<7>; +template [[host_name("kernel_add_fuse_8")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<8>; + kernel void kernel_sub( constant ggml_metal_kargs_bin & args, device const char * src0, @@ -3159,7 +3510,7 @@ kernel void kernel_sub( const int i11 = i01%args.ne11; device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs; - device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11; + device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11 + args.o1[0]; device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs; for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { @@ -3184,9 +3535,9 @@ kernel void kernel_mul( const int i12 = i02%args.ne12; const int i11 = i01%args.ne11; - device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01; - device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11; - device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1; + device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs; + device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11 + args.o1[0]; + device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs; for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { const int i10 = i0%args.ne10; @@ -3210,9 +3561,9 @@ kernel void kernel_div( const int i12 = i02%args.ne12; const int i11 = i01%args.ne11; - device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01; - device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11; - device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1; + device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs; + device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11 + args.o1[0]; + device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs; for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { const int i10 = i0%args.ne10; @@ -3220,6 +3571,32 @@ kernel void kernel_div( } } +kernel void kernel_add_id( + constant ggml_metal_kargs_add_id & args, + device const char * src0, + device const char * src1, + device const char * src2, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + ushort3 tpitg[[thread_position_in_threadgroup]], + ushort3 ntg[[threads_per_threadgroup]]) { + const int i1 = tgpig.x; + const int i2 = tgpig.y; + + const int i11 = *((device const int32_t *) (src2 + i1*sizeof(int32_t) + i2*args.nb21)); + + const size_t nb1 = args.ne0 * sizeof(float); + const size_t nb2 = args.ne1 * nb1; + + device float * dst_row = (device float *)((device char *)dst + i1*nb1 + i2*nb2); + device const float * src0_row = (device const float *)((device char *)src0 + i1*args.nb01 + i2*args.nb02); + device const float * src1_row = (device const float *)((device char *)src1 + i11*args.nb11); + + for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { + dst_row[i0] = src0_row[i0] + src1_row[i0]; + } +} + template kernel void kernel_repeat( constant ggml_metal_kargs_repeat & args, @@ -3254,60 +3631,161 @@ template [[host_name("kernel_repeat_i16")]] kernel kernel_repeat_t kernel_repeat // assumption: src1 is a row // broadcast src1 into src0 -kernel void kernel_add_row( +template +kernel void kernel_add_row_c4_fuse_impl( constant ggml_metal_kargs_bin & args, - device const float4 * src0, - device const float4 * src1, - device float4 * dst, + device const char * src0, + device const char * src1, + device char * dst, uint tpig[[thread_position_in_grid]]) { + const uint nb = args.ne00/4; - dst[tpig] = src0[tpig] + src1[tpig % nb]; + const uint i = tpig % nb; + + device const float4 * src0_row = (device const float4 *) (src0); + device float4 * dst_row = (device float4 *) (dst); + + device const float4 * src1_row[F]; + for (short j = 0; j < F; ++j) { + src1_row[j] = (device const float4 *) (src1 + args.o1[j]); + } + + float4 res = src0_row[tpig]; + +#pragma unroll(F) + for (short j = 0; j < F; ++j) { + res += src1_row[j][i]; + } + + dst_row[tpig] = res; } -kernel void kernel_sub_row( +typedef decltype(kernel_add_row_c4_fuse_impl<1>) kernel_add_row_c4_fuse_t; + +template [[host_name("kernel_add_row_c4")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<1>; +template [[host_name("kernel_add_row_c4_fuse_2")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<2>; +template [[host_name("kernel_add_row_c4_fuse_3")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<3>; +template [[host_name("kernel_add_row_c4_fuse_4")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<4>; +template [[host_name("kernel_add_row_c4_fuse_5")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<5>; +template [[host_name("kernel_add_row_c4_fuse_6")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<6>; +template [[host_name("kernel_add_row_c4_fuse_7")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<7>; +template [[host_name("kernel_add_row_c4_fuse_8")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<8>; + +template +kernel void kernel_sub_row_c4_fuse_impl( constant ggml_metal_kargs_bin & args, - device const float4 * src0, - device const float4 * src1, - device float4 * dst, + device const char * src0, + device const char * src1, + device char * dst, uint tpig[[thread_position_in_grid]]) { + const uint nb = args.ne00/4; - dst[tpig] = src0[tpig] - src1[tpig % nb]; + const uint i = tpig % nb; + + device const float4 * src0_row = (device const float4 *) (src0); + device float4 * dst_row = (device float4 *) (dst); + + device const float4 * src1_row[F]; + for (short j = 0; j < F; ++j) { + src1_row[j] = (device const float4 *) (src1 + args.o1[j]); + } + + float4 res = src0_row[tpig]; + +#pragma unroll(F) + for (short j = 0; j < F; ++j) { + res -= src1_row[j][i]; + } + + dst_row[tpig] = res; } -kernel void kernel_mul_row( +typedef decltype(kernel_sub_row_c4_fuse_impl<1>) kernel_sub_row_c4_fuse_t; + +template [[host_name("kernel_sub_row_c4")]] kernel kernel_sub_row_c4_fuse_t kernel_sub_row_c4_fuse_impl<1>; + +template +kernel void kernel_mul_row_c4_fuse_impl( constant ggml_metal_kargs_bin & args, - device const float4 * src0, - device const float4 * src1, - device float4 * dst, + device const char * src0, + device const char * src1, + device char * dst, uint tpig[[thread_position_in_grid]]) { + const uint nb = args.ne00/4; - dst[tpig] = src0[tpig] * src1[tpig % nb]; + const uint i = tpig % nb; + + device const float4 * src0_row = (device const float4 *) (src0); + device float4 * dst_row = (device float4 *) (dst); + + device const float4 * src1_row[F]; + for (short j = 0; j < F; ++j) { + src1_row[j] = (device const float4 *) (src1 + args.o1[j]); + } + + float4 res = src0_row[tpig]; + +#pragma unroll(F) + for (short j = 0; j < F; ++j) { + res *= src1_row[j][i]; + } + + dst_row[tpig] = res; } -kernel void kernel_div_row( +typedef decltype(kernel_mul_row_c4_fuse_impl<1>) kernel_mul_row_c4_fuse_t; + +template [[host_name("kernel_mul_row_c4")]] kernel kernel_mul_row_c4_fuse_t kernel_mul_row_c4_fuse_impl<1>; + +template +kernel void kernel_div_row_c4_fuse_impl( constant ggml_metal_kargs_bin & args, - device const float4 * src0, - device const float4 * src1, - device float4 * dst, + device const char * src0, + device const char * src1, + device char * dst, uint tpig[[thread_position_in_grid]]) { + const uint nb = args.ne00/4; - dst[tpig] = src0[tpig] / src1[tpig % nb]; + const uint i = tpig % nb; + + device const float4 * src0_row = (device const float4 *) (src0); + device float4 * dst_row = (device float4 *) (dst); + + device const float4 * src1_row[F]; + for (short j = 0; j < F; ++j) { + src1_row[j] = (device const float4 *) (src1 + args.o1[j]); + } + + float4 res = src0_row[tpig]; + +#pragma unroll(F) + for (short j = 0; j < F; ++j) { + res /= src1_row[j][i]; + } + + dst_row[tpig] = res; } +typedef decltype(kernel_div_row_c4_fuse_impl<1>) kernel_div_row_c4_fuse_t; + +template [[host_name("kernel_div_row_c4")]] kernel kernel_div_row_c4_fuse_t kernel_div_row_c4_fuse_impl<1>; + kernel void kernel_scale( device const float * src0, device float * dst, constant float & scale, + constant float & bias, uint tpig[[thread_position_in_grid]]) { - dst[tpig] = src0[tpig] * scale; + dst[tpig] = src0[tpig] * scale + bias; } kernel void kernel_scale_4( device const float4 * src0, device float4 * dst, constant float & scale, + constant float & bias, uint tpig[[thread_position_in_grid]]) { - dst[tpig] = src0[tpig] * scale; + dst[tpig] = src0[tpig] * scale + bias; } kernel void kernel_clamp( @@ -3344,6 +3822,7 @@ kernel void kernel_tanh( constant float GELU_COEF_A = 0.044715f; constant float GELU_QUICK_COEF = -1.702f; constant float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; +constant float SQRT_2_INV = 0.70710678118654752440084436210484f; kernel void kernel_gelu( device const float * src0, @@ -3385,6 +3864,42 @@ kernel void kernel_gelu_quick_4( dst[tpig] = x*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x))); } +// based on Abramowitz and Stegun formula 7.1.26 or similar Hastings' approximation +// ref: https://www.johndcook.com/blog/python_erf/ +constant float p_erf = 0.3275911f; +constant float a1_erf = 0.254829592f; +constant float a2_erf = -0.284496736f; +constant float a3_erf = 1.421413741f; +constant float a4_erf = -1.453152027f; +constant float a5_erf = 1.061405429f; + +template +T erf_approx(T x) { + T sign_x = sign(x); + x = fabs(x); + T t = 1.0f / (1.0f + p_erf * x); + T y = 1.0f - (((((a5_erf * t + a4_erf) * t) + a3_erf) * t + a2_erf) * t + a1_erf) * t * exp(-x * x); + return sign_x * y; +} + +kernel void kernel_gelu_erf( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + device const float & x = src0[tpig]; + + dst[tpig] = 0.5f*x*(1.0f+erf_approx(x*SQRT_2_INV)); +} + +kernel void kernel_gelu_erf_4( + device const float4 * src0, + device float4 * dst, + uint tpig[[thread_position_in_grid]]) { + device const float4 & x = src0[tpig]; + + dst[tpig] = 0.5f*x*(1.0f+erf_approx(x*SQRT_2_INV)); +} + kernel void kernel_silu( device const float * src0, device float * dst, @@ -3444,6 +3959,185 @@ kernel void kernel_neg( dst[tpig] = -src0[tpig]; } +kernel void kernel_abs( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + dst[tpig] = fabs(src0[tpig]); +} + +kernel void kernel_sgn( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + device const float & x = src0[tpig]; + dst[tpig] = (x > 0.0f) ? 1.0f : ((x < 0.0f) ? -1.0f : 0.0f); +} + +kernel void kernel_step( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + dst[tpig] = src0[tpig] > 0.0f ? 1.0f : 0.0f; +} + +kernel void kernel_hardswish( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + device const float & x = src0[tpig]; + dst[tpig] = x * fmin(1.0f, fmax(0.0f, (x + 3.0f) / 6.0f)); +} + +kernel void kernel_hardsigmoid( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + device const float & x = src0[tpig]; + dst[tpig] = fmin(1.0f, fmax(0.0f, (x + 3.0f) / 6.0f)); +} + +kernel void kernel_exp( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + dst[tpig] = exp(src0[tpig]); +} + +kernel void kernel_reglu( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + const float x0 = src0_row[i0]; + const float x1 = src1_row[i0]; + + dst_row[i0] = x0*x1*(x0 > 0.0f); + } +} + +kernel void kernel_geglu( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + const float x0 = src0_row[i0]; + const float x1 = src1_row[i0]; + + const float gelu = 0.5f*x0*(1.0f + precise::tanh(SQRT_2_OVER_PI*x0*(1.0f + GELU_COEF_A*x0*x0))); + + dst_row[i0] = gelu*x1; + } +} + +kernel void kernel_swiglu( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + const float x0 = src0_row[i0]; + const float x1 = src1_row[i0]; + + const float silu = x0 / (1.0f + exp(-x0)); + + dst_row[i0] = silu*x1; + } +} + +kernel void kernel_swiglu_oai( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + float x0 = src0_row[i0]; + float x1 = src1_row[i0]; + + x0 = min(x0, args.limit); + x1 = max(min(x1, args.limit), -args.limit); + + float out_glu = x0 / (1.0f + exp(-x0 * args.alpha)); + out_glu = out_glu * (1.0f + x1); + + dst_row[i0] = out_glu; + } +} + +kernel void kernel_geglu_erf( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + const float x0 = src0_row[i0]; + const float x1 = src1_row[i0]; + + const float gelu_erf = 0.5f*x0*(1.0f+erf_approx(x0*SQRT_2_INV)); + + dst_row[i0] = gelu_erf*x1; + } +} + +kernel void kernel_geglu_quick( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + const float x0 = src0_row[i0]; + const float x1 = src1_row[i0]; + + const float gelu_quick = x0*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x0))); + + dst_row[i0] = gelu_quick*x1; + } +} + template kernel void kernel_sum_rows( constant ggml_metal_kargs_sum_rows & args, @@ -3503,27 +4197,33 @@ template kernel void kernel_soft_max( device const char * src0, device const char * src1, + device const char * src2, device char * dst, constant ggml_metal_kargs_soft_max & args, threadgroup float * buf [[threadgroup(0)]], - uint tgpig[[threadgroup_position_in_grid]], - uint tpitg[[thread_position_in_threadgroup]], + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], - uint ntg[[threads_per_threadgroup]]) { - const int64_t i03 = (tgpig) / (args.ne02*args.ne01); - const int64_t i02 = (tgpig - i03*args.ne02*args.ne01) / args.ne01; - const int64_t i01 = (tgpig - i03*args.ne02*args.ne01 - i02*args.ne01); + uint3 tptg[[threads_per_threadgroup]]) { + const int32_t i03 = tgpig.z; + const int32_t i02 = tgpig.y; + const int32_t i01 = tgpig.x; - device const float * psrc0 = (device const float *) src0 + (i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00); - device const T * pmask = src1 != src0 ? (device const T *) src1 + i01*args.ne00 : nullptr; - device float * pdst = (device float *) dst + (i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00); + const int32_t i13 = i03%args.ne13; + const int32_t i12 = i02%args.ne12; + const int32_t i11 = i01; + + device const float * psrc0 = (device const float *) (src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03); + device const T * pmask = src1 != src0 ? (device const T * ) (src1 + i11*args.nb11 + i12*args.nb12 + i13*args.nb13) : nullptr; + device const float * psrc2 = src2 != src0 ? (device const float *) (src2) : nullptr; + device float * pdst = (device float *) (dst + i01*args.nb1 + i02*args.nb2 + i03*args.nb3); float slope = 1.0f; // ALiBi if (args.max_bias > 0.0f) { - const int64_t h = i02; + const int32_t h = i02; const float base = h < args.n_head_log2 ? args.m0 : args.m1; const int exp = h < args.n_head_log2 ? h + 1 : 2*(h - args.n_head_log2) + 1; @@ -3532,15 +4232,15 @@ kernel void kernel_soft_max( } // parallel max - float lmax = -INFINITY; + float lmax = psrc2 ? psrc2[i02] : -INFINITY; - for (int i00 = tpitg; i00 < args.ne00; i00 += ntg) { + for (int i00 = tpitg.x; i00 < args.ne00; i00 += tptg.x) { lmax = MAX(lmax, psrc0[i00]*args.scale + (pmask ? slope*pmask[i00] : 0.0f)); } // find the max value in the block float max_val = simd_max(lmax); - if (ntg > N_SIMDWIDTH) { + if (tptg.x > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = -INFINITY; } @@ -3559,7 +4259,7 @@ kernel void kernel_soft_max( // parallel sum float lsum = 0.0f; - for (int i00 = tpitg; i00 < args.ne00; i00 += ntg) { + for (int i00 = tpitg.x; i00 < args.ne00; i00 += tptg.x) { const float exp_psrc0 = exp((psrc0[i00]*args.scale + (pmask ? slope*pmask[i00] : 0.0f)) - max_val); lsum += exp_psrc0; pdst[i00] = exp_psrc0; @@ -3571,7 +4271,7 @@ kernel void kernel_soft_max( float sum = simd_sum(lsum); - if (ntg > N_SIMDWIDTH) { + if (tptg.x > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } @@ -3588,9 +4288,13 @@ kernel void kernel_soft_max( sum = simd_sum(sum); } + if (psrc2) { + sum += exp(psrc2[i02] - max_val); + } + const float inv_sum = 1.0f/sum; - for (int i00 = tpitg; i00 < args.ne00; i00 += ntg) { + for (int i00 = tpitg.x; i00 < args.ne00; i00 += tptg.x) { pdst[i00] *= inv_sum; } } @@ -3599,26 +4303,32 @@ template kernel void kernel_soft_max_4( device const char * src0, device const char * src1, + device const char * src2, device char * dst, constant ggml_metal_kargs_soft_max & args, threadgroup float * buf [[threadgroup(0)]], - uint tgpig[[threadgroup_position_in_grid]], - uint tpitg[[thread_position_in_threadgroup]], + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], - uint ntg[[threads_per_threadgroup]]) { - const int64_t i03 = (tgpig) / (args.ne02*args.ne01); - const int64_t i02 = (tgpig - i03*args.ne02*args.ne01) / args.ne01; - const int64_t i01 = (tgpig - i03*args.ne02*args.ne01 - i02*args.ne01); + uint3 tptg[[threads_per_threadgroup]]) { + const int32_t i03 = tgpig.z; + const int32_t i02 = tgpig.y; + const int32_t i01 = tgpig.x; - device const float4 * psrc4 = (device const float4 *) src0 + (i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00)/4; - device const T * pmask = src1 != src0 ? (device const T *) src1 + i01*args.ne00/4 : nullptr; - device float4 * pdst4 = (device float4 *) dst + (i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00)/4; + const int32_t i13 = i03%args.ne13; + const int32_t i12 = i02%args.ne12; + const int32_t i11 = i01; + + device const float4 * psrc4 = (device const float4 *) (src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03); + device const T * pmask = src1 != src0 ? (device const T * ) (src1 + i11*args.nb11 + i12*args.nb12 + i13*args.nb13) : nullptr; + device const float * psrc2 = src2 != src0 ? (device const float * ) (src2) : nullptr; + device float4 * pdst4 = (device float4 *) (dst + i01*args.nb1 + i02*args.nb2 + i03*args.nb3); float slope = 1.0f; if (args.max_bias > 0.0f) { - const int64_t h = i02; + const int32_t h = i02; const float base = h < args.n_head_log2 ? args.m0 : args.m1; const int exp = h < args.n_head_log2 ? h + 1 : 2*(h - args.n_head_log2) + 1; @@ -3627,16 +4337,16 @@ kernel void kernel_soft_max_4( } // parallel max - float4 lmax4 = -INFINITY; + float4 lmax4 = psrc2 ? psrc2[i02] : -INFINITY; - for (int i00 = tpitg; i00 < args.ne00/4; i00 += ntg) { + for (int i00 = tpitg.x; i00 < args.ne00/4; i00 += tptg.x) { lmax4 = fmax(lmax4, psrc4[i00]*args.scale + (float4)((pmask ? slope*pmask[i00] : 0.0f))); } const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3])); float max_val = simd_max(lmax); - if (ntg > N_SIMDWIDTH) { + if (tptg.x > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = -INFINITY; } @@ -3655,7 +4365,7 @@ kernel void kernel_soft_max_4( // parallel sum float4 lsum4 = 0.0f; - for (int i00 = tpitg; i00 < args.ne00/4; i00 += ntg) { + for (int i00 = tpitg.x; i00 < args.ne00/4; i00 += tptg.x) { const float4 exp_psrc4 = exp((psrc4[i00]*args.scale + (float4)((pmask ? slope*pmask[i00] : 0.0f))) - max_val); lsum4 += exp_psrc4; pdst4[i00] = exp_psrc4; @@ -3669,7 +4379,7 @@ kernel void kernel_soft_max_4( float sum = simd_sum(lsum); - if (ntg > N_SIMDWIDTH) { + if (tptg.x > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } @@ -3686,9 +4396,13 @@ kernel void kernel_soft_max_4( sum = simd_sum(sum); } + if (psrc2) { + sum += exp(psrc2[i02] - max_val); + } + const float inv_sum = 1.0f/sum; - for (int i00 = tpitg; i00 < args.ne00/4; i00 += ntg) { + for (int i00 = tpitg.x; i00 < args.ne00/4; i00 += tptg.x) { pdst4[i00] *= inv_sum; } } @@ -3774,7 +4488,7 @@ kernel void kernel_ssm_conv_f32( x[0] = sumf; } -// ref: ggml.c:ggml_compute_forward_ssm_scan_f32 +// ref: ggml.c:ggml_compute_forward_ssm_scan_f32, Mamba-1 part kernel void kernel_ssm_scan_f32( device const void * src0, device const void * src1, @@ -3782,47 +4496,222 @@ kernel void kernel_ssm_scan_f32( device const void * src3, device const void * src4, device const void * src5, + device const void * src6, device float * dst, + threadgroup float * shared [[threadgroup(0)]], constant ggml_metal_kargs_ssm_scan & args, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t ir = tgpig.x; - const int64_t i3 = tgpig.y; + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + ushort sgitg[[simdgroup_index_in_threadgroup]], + ushort tiisg[[thread_index_in_simdgroup]], + ushort sgptg[[simdgroups_per_threadgroup]], + uint3 tgpg[[threadgroups_per_grid]]) { + + const int64_t i0 = tpitg.x; + const int64_t i1 = 0; + const int64_t ir = tgpig.x; // current head + const int64_t i3 = tgpig.y; // current seq + + const uint64_t nb00 = sizeof(float); + const uint64_t nb10 = sizeof(float); + const uint64_t nb20 = sizeof(float); const int64_t nc = args.d_state; - // const int64_t nr = args.d_inner; + const int64_t nr = args.d_inner; + const int64_t nh = args.n_head; + const int64_t ng = args.n_group; const int64_t n_t = args.n_seq_tokens; - // const int64_t n_s = args.n_seqs; + + const int64_t s_off = args.s_off; + + device const int32_t * ids = (device const int32_t *) src6; + + device const float * s0_buff = (device const float *) ((device const char *) src0 + ir*args.nb02 + ids[i3]*args.nb03); + device float * s_buff = (device float *) ((device char *) dst + ir*args.nb02 + i3*args.nb03 + s_off); + const int64_t i = i0 + i1*nc; + float s0 = s0_buff[i]; + float s = s_buff[i]; + + device const float * A = (device const float *) ((device const char *) src3 + ir*args.nb31); + device const float * x_block = (device const float *) ((device const char *) src1 + i1*nb10 + ir*args.nb11 + i3*args.nb13); + device const float * dt_block = (device const float *) ((device const char *) src2 + ir*nb20 + i3*args.nb22); + device const float * B_block = (device const float *) ((device const char *) src4 + (ir & (ng - 1))*args.nb41 + i3*args.nb43); + device const float * C_block = (device const float *) ((device const char *) src5 + (ir & (ng - 1))*args.nb51 + i3*args.nb53); + device float * y_block = (device float *) ((device char *) dst + (i1 + ir*(nr) + i3*(n_t*nh*nr))*nb00); for (int64_t i2 = 0; i2 < n_t; ++i2) { - device const float * s0 = (device const float *) ((device const char *) src0 + ir*args.nb01 + i3*args.nb02); - device const float * x = (device const float *) ((device const char *) src1 + ir*args.nb10 + i2*args.nb11 + i3*args.nb12); - device const float * dt = (device const float *) ((device const char *) src2 + ir*args.nb20 + i2*args.nb21 + i3*args.nb22); - device const float * A = (device const float *) ((device const char *) src3 + ir*args.nb31); - device const float * B = (device const float *) ((device const char *) src4 + i2*args.nb41 + i3*args.nb42); - device const float * C = (device const float *) ((device const char *) src5 + i2*args.nb51 + i3*args.nb52); - device float * y = (device float *) ((device char *) dst + ir*args.nb10 + i2*args.nb11 + i3*args.nb12); // TODO: do not use src1 strides - device float * s = (device float *) ((device char *) dst + ir*args.nb01 + i3*args.nb02 + args.nb13); + device const float * x = (device const float *) ((device const char *) x_block + i2*args.nb12); // {dim, nh, nt, ns} + device const float * dt = (device const float *) ((device const char *) dt_block + i2*args.nb21); // {nh, nt, ns} + device const float * B = (device const float *) ((device const char *) B_block + i2*args.nb42); // {d_state, ng, nt, ns} + device const float * C = (device const float *) ((device const char *) C_block + i2*args.nb52); // {d_state, ng, nt, ns} + device float * y = (device float *) ((device char *) y_block + i2*(nh*nr*nb00)); // {dim, nh, nt, ns} - if (i2 > 0) { - s0 = s; + const float dt_soft_plus = dt[0] <= 20.0f ? log(1.0f + exp(dt[0])) : dt[0]; + const float x_dt = x[0] * dt_soft_plus; + + const float state = (s0 * exp(dt_soft_plus * A[i0])) + (B[i0] * x_dt); + s = state; + + // Parallel sum: This relies on the fact that this kernel will be + // dispatched with each threadgroup having (d_state, 1, 1) threads which + // are subdivided into SIMD groups of size `sgptg`. The goal is to + // compute y = sum({state * C[i] for i in range(d_state)}). + // To parallelize this effectively, we first use simd_sum over each SIMD + // group to compute the sum of each SIMD group, then place the result in + // the SIMD group's indexed bucket in the shared memory. We then sum + // over the individual group sums to compute the final sum. + + // Computed for each thread + float sumf = state * C[i0]; + + // Sum the threads in the simd group => simd sum + sumf = simd_sum(sumf); + + if (sgptg > 1) { + + // Once per simd group, place the group sum into the shared buffer + if (tiisg == 0) { + shared[sgitg] = sumf; + } + + // Wait for all threads in the threadgroup to reach this point. This + // ensures that all elements of the shared buffer are populated with the + // sum of the individual simd groups. + threadgroup_barrier(mem_flags::mem_threadgroup); + + // For simd group 0 at indices < num simd groups, extract the shared + // simd sum + sumf = 0.0f; + if (sgitg == 0) { + if (tiisg < sgptg) { + sumf = shared[tiisg]; + } + sumf = simd_sum(sumf); + if (tiisg == 0) { + y[0] = sumf; + } + } + } else if (tiisg == 0) { + y[0] = sumf; } - // i1 == 0 - float dt_soft_plus = dt[0] <= 20.0f ? log(1.0f + exp(dt[0])) : dt[0]; - float x_dt = x[0] * dt_soft_plus; - float sumf = 0.0f; - - for (int64_t i0 = 0; i0 < nc; ++i0) { - int64_t i = i0; - float state = (s0[i] * exp(dt_soft_plus * A[i])) + (B[i0] * x_dt); - sumf += state * C[i0]; - s[i] = state; - } - - y[0] = sumf; + // recurse + s0 = s; } + + // Assign the final state to the output buffer + s_buff[i] = s; +} + +// ref: ggml.c:ggml_compute_forward_ssm_scan_f32, Mamba-2 part +kernel void kernel_ssm_scan_f32_group( + device const void * src0, + device const void * src1, + device const void * src2, + device const void * src3, + device const void * src4, + device const void * src5, + device const void * src6, + device float * dst, + threadgroup float * shared [[threadgroup(0)]], + constant ggml_metal_kargs_ssm_scan & args, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + ushort sgitg[[simdgroup_index_in_threadgroup]], + ushort tiisg[[thread_index_in_simdgroup]], + ushort sgptg[[simdgroups_per_threadgroup]], + uint3 tgpg[[threadgroups_per_grid]]) { + + const int64_t i0 = tpitg.x; + const int64_t i1 = tgpig.x; + const int64_t ir = tgpig.y; // current head + const int64_t i3 = tgpig.z; // current seq + + const uint64_t nb00 = sizeof(float); + const uint64_t nb10 = sizeof(float); + const uint64_t nb20 = sizeof(float); + + const int64_t nc = args.d_state; + const int64_t nr = args.d_inner; + const int64_t nh = args.n_head; + const int64_t ng = args.n_group; + const int64_t n_t = args.n_seq_tokens; + + const int64_t s_off = args.s_off; + + device const int32_t * ids = (device const int32_t *) src6; + + device const float * s0_buff = (device const float *) ((device const char *) src0 + ir*args.nb02 + ids[i3]*args.nb03); + device float * s_buff = (device float *) ((device char *) dst + ir*args.nb02 + i3*args.nb03 + s_off); + const int64_t i = i0 + i1*nc; + float s0 = s0_buff[i]; + float s = s_buff[i]; + + device const float * A = (device const float *) ((device const char *) src3 + ir*args.nb31); // {1, nh} + device const float * x_block = (device const float *) ((device const char *) src1 + i1*nb10 + ir*args.nb11 + i3*args.nb13); + device const float * dt_block = (device const float *) ((device const char *) src2 + ir*nb20 + i3*args.nb22); + device const float * B_block = (device const float *) ((device const char *) src4 + (ir & (ng - 1))*args.nb41 + i3*args.nb43); + device const float * C_block = (device const float *) ((device const char *) src5 + (ir & (ng - 1))*args.nb51 + i3*args.nb53); + device float * y_block = (device float *) ((device char *) dst + (i1 + ir*(nr) + i3*(n_t*nh*nr))*nb00); + + for (int64_t i2 = 0; i2 < n_t; ++i2) { + device const float * x = (device const float *) ((device const char *) x_block + i2*args.nb12); // {dim, nh, nt, ns} + device const float * dt = (device const float *) ((device const char *) dt_block + i2*args.nb21); // {nh, nt, ns} + device const float * B = (device const float *) ((device const char *) B_block + i2*args.nb42); // {d_state, ng, nt, ns} + device const float * C = (device const float *) ((device const char *) C_block + i2*args.nb52); // {d_state, ng, nt, ns} + device float * y = (device float *) ((device char *) y_block + i2*(nh*nr*nb00)); // {dim, nh, nt, ns} + + const float dt_soft_plus = dt[0] <= 20.0f ? log(1.0f + exp(dt[0])) : dt[0]; + const float x_dt = x[0] * dt_soft_plus; + const float dA = exp(dt_soft_plus * A[0]); + + const float state = (s0 * dA) + (B[i0] * x_dt); + s = state; + + // Parallel sum: This relies on the fact that this kernel will be + // dispatched with each threadgroup having (d_state, 1, 1) threads which + // are subdivided into SIMD groups of size `sgptg`. The goal is to + // compute y = sum({state * C[i] for i in range(d_state)}). + // To parallelize this effectively, we first use simd_sum over each SIMD + // group to compute the sum of each SIMD group, then place the result in + // the SIMD group's indexed bucket in the shared memory. We then sum + // over the individual group sums to compute the final sum. + + // Computed for each thread + float sumf = state * C[i0]; + + // Sum the threads in the simd group => simd sum + sumf = simd_sum(sumf); + + // Once per simd group, place the group sum into the shared buffer + if (tiisg == 0) { + shared[sgitg] = sumf; + } + + // Wait for all threads in the threadgroup to reach this point. This + // ensures that all elements of the shared buffer are populated with the + // sum of the individual simd groups. + threadgroup_barrier(mem_flags::mem_threadgroup); + + // For simd group 0 at indices < num simd groups, extract the shared + // simd sum + sumf = 0.0f; + if (sgitg == 0) { + if (tiisg < sgptg) { + sumf = shared[tiisg]; + } + sumf = simd_sum(sumf); + if (tiisg == 0) { + y[0] = sumf; + } + } + + // recurse + s0 = s; + } + + // Assign the final state to the output buffer + s_buff[i] = s; } kernel void kernel_rwkv_wkv6_f32( @@ -4127,26 +5016,39 @@ kernel void kernel_norm( } } -kernel void kernel_rms_norm( +// F == 1 : rms_norm (no fuse) +// F == 2 : rms_norm + mul +// F == 3 : rms_norm + mul + add +template +kernel void kernel_rms_norm_fuse_impl( constant ggml_metal_kargs_rms_norm & args, device const char * src0, + device const char * src1_0, + device const char * src1_1, device char * dst, threadgroup float * shmem_f32 [[threadgroup(0)]], - uint tgpig[[threadgroup_position_in_grid]], - ushort tpitg[[thread_position_in_threadgroup]], - ushort sgitg[[simdgroup_index_in_threadgroup]], - ushort tiisg[[thread_index_in_simdgroup]], - ushort ntg[[threads_per_threadgroup]]) { + uint3 tgpig[[threadgroup_position_in_grid]], + ushort3 tpitg[[thread_position_in_threadgroup]], + ushort sgitg[[simdgroup_index_in_threadgroup]], + ushort tiisg[[thread_index_in_simdgroup]], + ushort3 ntg[[threads_per_threadgroup]]) { if (sgitg == 0) { shmem_f32[tiisg] = 0.0f; } - device const float4 * x = (device const float4 *) (src0 + tgpig*args.nb01); + const int i01 = tgpig.x; + const int i02 = tgpig.y; + const int i03 = tgpig.z; + + device const float4 * x = (device const float4 *) (src0 + i03*args.nbf3[0] + i02*args.nbf2[0] + i01*args.nbf1[0]); + + device const float4 * f0 = (device const float4 *) (src1_0 + (i03%args.nef3[1])*args.nbf3[1] + (i02%args.nef2[1])*args.nbf2[1] + (i01%args.nef1[1])*args.nbf1[1]); + device const float4 * f1 = (device const float4 *) (src1_1 + (i03%args.nef3[2])*args.nbf3[2] + (i02%args.nef2[2])*args.nbf2[2] + (i01%args.nef1[2])*args.nbf1[2]); float sumf = 0.0f; // parallel sum - for (int i00 = tpitg; i00 < args.ne00_4; i00 += ntg) { + for (int i00 = tpitg.x; i00 < args.ne00_4; i00 += ntg.x) { sumf += dot(x[i00], x[i00]); } sumf = simd_sum(sumf); @@ -4165,12 +5067,26 @@ kernel void kernel_rms_norm( const float mean = sumf/args.ne00; const float scale = 1.0f/sqrt(mean + args.eps); - device float4 * y = (device float4 *) dst + tgpig*args.ne00_4; - for (int i00 = tpitg; i00 < args.ne00_4; i00 += ntg) { - y[i00] = x[i00] * scale; + device float4 * y = (device float4 *) (dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1); + for (int i00 = tpitg.x; i00 < args.ne00_4; i00 += ntg.x) { + if (F == 1) { + y[i00] = (x[i00]*scale); + } + if (F == 2) { + y[i00] = (x[i00]*scale)*f0[i00]; + } + if (F == 3) { + y[i00] = (x[i00]*scale)*f0[i00] + f1[i00]; + } } } +typedef decltype(kernel_rms_norm_fuse_impl<1>) kernel_rms_norm_fuse_t; + +template [[host_name("kernel_rms_norm")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<1>; +template [[host_name("kernel_rms_norm_mul")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<2>; +template [[host_name("kernel_rms_norm_mul_add")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<3>; + kernel void kernel_l2_norm( constant ggml_metal_kargs_l2_norm & args, device const char * src0, @@ -4390,16 +5306,16 @@ void mul_vec_q_n_f32_impl( device const char * src1, device char * dst, threadgroup char * shmem, - uint3 tgpig, // Threadgroup Position in Grid - ushort tiisg, // Thread Index in SIMD Group - ushort sgitg) { // SIMD Group Index in ThreadGroup - const int nb = args.ne00/QK4_0; // src0->ne[0] / 32 + uint3 tgpig, + ushort tiisg, + ushort sgitg) { + const int nb = args.ne00/QK4_0; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; - const int first_row = (r0 * nsg + sgitg) * nr0; // nsg=2 nr0=4 + const int first_row = (r0 * nsg + sgitg) * nr0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; @@ -4865,6 +5781,11 @@ template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_3")]] kernel mul_mv_ext_q4 template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q8_0, 32, dequantize_q8_0_t4>; template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q8_0, 32, dequantize_q8_0_t4>; +template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_mxfp4, 32, dequantize_mxfp4_t4>; +template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_mxfp4, 32, dequantize_mxfp4_t4>; +template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_mxfp4, 32, dequantize_mxfp4_t4>; +template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_mxfp4, 32, dequantize_mxfp4_t4>; + template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_iq4_nl, 32, dequantize_iq4_nl_t4>; template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_iq4_nl, 32, dequantize_iq4_nl_t4>; template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_iq4_nl, 32, dequantize_iq4_nl_t4>; @@ -4983,6 +5904,70 @@ template [[host_name("kernel_mul_mv_bf16_f32")]] kernel mul_mv_t kernel_mul_mv< template [[host_name("kernel_mul_mv_bf16_bf16")]] kernel mul_mv_t kernel_mul_mv; #endif +template +void kernel_mul_mv_c4_impl( + args_t args, + device const char * src0, + device const char * src1, + device char * dst, + uint3 tgpig, + ushort tiisg) { + const int r0 = tgpig.x*32 + tiisg; + const int rb = tgpig.y*N_MV_T_T; + const int im = tgpig.z; + + if (r0 >= args.ne01) { + return; + } + + const uint i12 = im%args.ne12; + const uint i13 = im/args.ne12; + + const uint64_t offset0 = r0*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; + + device const T04 * x = (device const T04 *) (src0 + offset0); + + device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1; + + for (int row = 0; row < N_MV_T_T; ++row) { + int r1 = rb + row; + if (r1 >= args.ne11) { + break; + } + + const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; + + device const T14 * y = (device const T14 *) (src1 + offset1); + + dst_f32[(uint64_t)r1*args.ne0 + r0] = dot((float4) x[0], (float4) y[0]); + } +} + +template +kernel void kernel_mul_mv_c4( + constant ggml_metal_kargs_mul_mv & args, + device const char * src0, + device const char * src1, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + ushort tiisg[[thread_index_in_simdgroup]]) { + kernel_mul_mv_c4_impl( + args, + src0, + src1, + dst, + tgpig, + tiisg); +} + +typedef decltype(kernel_mul_mv_c4) mul_mv_c4_t; + +template [[host_name("kernel_mul_mv_f32_f32_c4")]] kernel mul_mv_c4_t kernel_mul_mv_c4; +template [[host_name("kernel_mul_mv_f16_f32_c4")]] kernel mul_mv_c4_t kernel_mul_mv_c4; +#if defined(GGML_METAL_USE_BF16) +template [[host_name("kernel_mul_mv_bf16_f32_c4")]] kernel mul_mv_c4_t kernel_mul_mv_c4; +#endif + template kernel void kernel_mul_mv_1row( constant ggml_metal_kargs_mul_mv & args, @@ -5773,7 +6758,7 @@ template< typename kd4x4_t, // key type in device memory short nl_k, void (*deq_k)(device const kd4x4_t *, short, thread k4x4_t &), - typename vd4x4_t, // key type in device memory + typename vd4x4_t, // value type in device memory short nl_v, void (*deq_v)(device const vd4x4_t *, short, thread v4x4_t &), short DK, // K head size @@ -5787,6 +6772,7 @@ kernel void kernel_flash_attn_ext( device const char * k, device const char * v, device const char * mask, + device const char * sinks, device char * dst, threadgroup half * shmem_f16 [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], @@ -5809,14 +6795,12 @@ kernel void kernel_flash_attn_ext( constexpr short NW = N_SIMDWIDTH; constexpr short SH = (2*C + Q); // shared memory per simdgroup (s_t == float) - const short TS = nsg*SH; // shared memory size per query in (s_t == float) - const short T = DK + 2*TS; // shared memory size per query in (half) + const short TS = nsg*SH; // shared memory size per query in (s_t == float) + const short T = 2*DK + 2*TS; // shared memory size per query in (half) - threadgroup q_t * sq = (threadgroup q_t *) (shmem_f16 + 0*DK); // holds the query data - threadgroup q4_t * sq4 = (threadgroup q4_t *) (shmem_f16 + 0*DK); // same as above but in q4_t - threadgroup o_t * so = (threadgroup o_t *) (shmem_f16 + 0*DK); // reuse query data for accumulation - threadgroup o4_t * so4 = (threadgroup o4_t *) (shmem_f16 + 0*DK); // same as above but in o4_t - threadgroup s_t * ss = (threadgroup s_t *) (shmem_f16 + 2*sgitg*SH + Q*DK); // scratch buffer for attention, mask and diagonal matrix + threadgroup q_t * sq = (threadgroup q_t *) (shmem_f16 + 0*DK); // holds the query data + threadgroup q4_t * sq4 = (threadgroup q4_t *) (shmem_f16 + 0*DK); // same as above but in q4_t + threadgroup s_t * ss = (threadgroup s_t *) (shmem_f16 + 2*sgitg*SH + 2*Q*DK); // scratch buffer for attention, mask and diagonal matrix threadgroup k_t * sk = (threadgroup k_t *) (shmem_f16 + sgitg*(4*16*KV) + Q*T); // scratch buffer to load K in shared memory threadgroup k4x4_t * sk4x4 = (threadgroup k4x4_t *) (shmem_f16 + sgitg*(4*16*KV) + Q*T); // same as above but in k4x4_t @@ -5835,7 +6819,7 @@ kernel void kernel_flash_attn_ext( if (iq1 + j < args.ne01) { sq4[j*DK4 + i] = (q4_t) q4[i]; } else { - sq4[j*DK4 + i] = (q4_t) 0.0f; + sq4[j*DK4 + i] = 0; } } } @@ -5900,7 +6884,7 @@ kernel void kernel_flash_attn_ext( // load the mask in shared memory #pragma unroll(Q) for (short j = 0; j < Q; ++j) { - device const half * pm = (device const half *) ((device const char *) mask + (iq1 + j)*args.nb31); + device const half * pm = (device const half *) ((device const char *) mask + (iq1 + j)*args.nb31 + (iq2%args.ne32)*args.nb32 + (iq3%args.ne33)*args.nb33); const float m = pm[ic + tiisg]; @@ -6029,20 +7013,20 @@ kernel void kernel_flash_attn_ext( // O = diag(ms)*O { - s8x8_t mm; - simdgroup_load(mm, ss + 2*C, TS, 0, false); + s8x8_t ms; + simdgroup_load(ms, ss + 2*C, TS, 0, false); #pragma unroll(DV8) for (short i = 0; i < DV8; ++i) { - simdgroup_multiply(lo[i], mm, lo[i]); + simdgroup_multiply(lo[i], ms, lo[i]); } } // O = O + (Q*K^T)*V { for (short cc = 0; cc < C/8; ++cc) { - s8x8_t ms; - simdgroup_load(ms, ss + 8*cc, TS, 0, false); + s8x8_t vs; + simdgroup_load(vs, ss + 8*cc, TS, 0, false); if (is_same::value) { // we can read directly from global memory @@ -6053,7 +7037,7 @@ kernel void kernel_flash_attn_ext( v8x8_t mv; simdgroup_load(mv, pv + i*8, args.nb21/sizeof(v_t), 0, false); // TODO: use ne20 - simdgroup_multiply_accumulate(lo[i], ms, mv, lo[i]); + simdgroup_multiply_accumulate(lo[i], vs, mv, lo[i]); } } else { for (short ii = 0; ii < DV16; ii += 4) { @@ -6074,10 +7058,10 @@ kernel void kernel_flash_attn_ext( v8x8_t mv; simdgroup_load(mv, sv + 16*k + 0*8, 4*16, 0, false); - simdgroup_multiply_accumulate(lo[2*(ii + k) + 0], ms, mv, lo[2*(ii + k) + 0]); + simdgroup_multiply_accumulate(lo[2*(ii + k) + 0], vs, mv, lo[2*(ii + k) + 0]); simdgroup_load(mv, sv + 16*k + 1*8, 4*16, 0, false); - simdgroup_multiply_accumulate(lo[2*(ii + k) + 1], ms, mv, lo[2*(ii + k) + 1]); + simdgroup_multiply_accumulate(lo[2*(ii + k) + 1], vs, mv, lo[2*(ii + k) + 1]); } } else { if (ii + tx < DV16) { @@ -6092,10 +7076,10 @@ kernel void kernel_flash_attn_ext( v8x8_t mv; simdgroup_load(mv, sv + 16*k + 0*8, 4*16, 0, false); - simdgroup_multiply_accumulate(lo[2*(ii + k) + 0], ms, mv, lo[2*(ii + k) + 0]); + simdgroup_multiply_accumulate(lo[2*(ii + k) + 0], vs, mv, lo[2*(ii + k) + 0]); simdgroup_load(mv, sv + 16*k + 1*8, 4*16, 0, false); - simdgroup_multiply_accumulate(lo[2*(ii + k) + 1], ms, mv, lo[2*(ii + k) + 1]); + simdgroup_multiply_accumulate(lo[2*(ii + k) + 1], vs, mv, lo[2*(ii + k) + 1]); } } } @@ -6104,94 +7088,119 @@ kernel void kernel_flash_attn_ext( } } - // these are needed for reducing the results from the simdgroups (reuse the ss buffer) - for (short j = 0; j < Q; ++j) { - if (tiisg == 0) { - ss[j*TS + 0] = S[j]; - ss[j*TS + 1] = M[j]; + if (sinks != q && sgitg == 0) { + for (ushort j = 0; j < Q; ++j) { + const float m = M[j]; + const float s = tiisg == 0 ? ((device const float *) sinks)[iq2] : -FLT_MAX/2; + + M[j] = simd_max(max(M[j], s)); + + const float ms = exp(m - M[j]); + const float vs = exp(s - M[j]); + + S[j] = S[j]*ms + simd_sum(vs); + + if (tiisg == j) { + ss[j*TS + 2*C + j] = ms; + } } + + // O = diag(ms)*O + { + s8x8_t ms; + simdgroup_load(ms, ss + 2*C, TS, 0, false); + + #pragma unroll(DV8) + for (short i = 0; i < DV8; ++i) { + simdgroup_multiply(lo[i], ms, lo[i]); + } + } + } + + // these are needed for reducing the results from the simdgroups (reuse the ss buffer) + for (short j = tiisg; j < Q; j += NW) { + ss[j*TS + 0] = S[j]; + ss[j*TS + 1] = M[j]; } } + threadgroup_barrier(mem_flags::mem_threadgroup); + + threadgroup float * so = (threadgroup float *) (shmem_f16 + 0*DK); // reuse query data for accumulation + threadgroup float4 * so4 = (threadgroup float4 *) (shmem_f16 + 0*DK); + + // store result to shared memory in F32 + if (sgitg == 0) { + for (short i = 0; i < DV8; ++i) { + //simdgroup_store(lo[i], so + i*8, DV, 0, false); + simdgroup_float8x8 t(1.0f); + simdgroup_multiply(t, lo[i], t); + simdgroup_store(t, so + i*8, DV, 0, false); + } + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + // reduce the warps sequentially for (ushort sg = 1; sg < nsg; ++sg) { - float S = { 0.0f }; - float M = { -__FLT_MAX__/2 }; - - threadgroup_barrier(mem_flags::mem_threadgroup); - - // each simdgroup stores its output to shared memory, reusing sq if (sgitg == sg) { - for (short i = 0; i < DV8; ++i) { - simdgroup_store(lo[i], so + i*8, DV, 0, false); + for (short j = tiisg; j < Q; j += NW) { + const float S0 = ss[j*TS - 1*SH + 0]; + const float S1 = ss[j*TS + 0]; + + const float M0 = ss[j*TS - 1*SH + 1]; + const float M1 = ss[j*TS + 1]; + + const float M = max(M0, M1); + + float ms0 = exp(M0 - M); + float ms1 = exp(M1 - M); + + const float S = S0*ms0 + S1*ms1; + + ss[j*TS + 0] = S; + ss[j*TS + 1] = M; + + ss[j*TS + 2*C + j - 1*SH] = ms0; + ss[j*TS + 2*C + j ] = ms1; } - } - threadgroup_barrier(mem_flags::mem_threadgroup); - - // the first simdgroup accumulates the results from the other simdgroups - if (sgitg == 0) { - for (short j = 0; j < Q; ++j) { - const float S0 = ss[j*TS + 0]; - const float S1 = ss[j*TS + sg*SH + 0]; - - const float M0 = ss[j*TS + 1]; - const float M1 = ss[j*TS + sg*SH + 1]; - - M = max(M0, M1); - - const float ms0 = exp(M0 - M); - const float ms1 = exp(M1 - M); - - S = S0*ms0 + S1*ms1; - - if (tiisg == 0) { - ss[j*TS + 0] = S; - ss[j*TS + 1] = M; - - ss[j*TS + 2*C + j ] = ms0; - ss[j*TS + 2*C + j + sg*SH] = ms1; - } - } + //simdgroup_barrier(mem_flags::mem_threadgroup); // O_0 = diag(ms0)*O_0 + diag(ms1)*O_1 { s8x8_t ms0; s8x8_t ms1; - simdgroup_load(ms0, ss + 2*C, TS, 0, false); - simdgroup_load(ms1, ss + 2*C + sg*SH, TS, 0, false); + simdgroup_load(ms0, ss + 2*C - 1*SH, TS, 0, false); + simdgroup_load(ms1, ss + 2*C, TS, 0, false); #pragma unroll(DV8) for (short i = 0; i < DV8; ++i) { - o8x8_t t; + simdgroup_float8x8 t; simdgroup_load (t, so + i*8, DV, 0, false); - simdgroup_multiply(t, ms1, t); + simdgroup_multiply(t, ms0, t); - simdgroup_multiply_accumulate(lo[i], ms0, lo[i], t); + simdgroup_multiply_accumulate(t, ms1, lo[i], t); + simdgroup_store(t, so + i*8, DV, 0, false); } } } + + threadgroup_barrier(mem_flags::mem_threadgroup); } - // store result to shared memory (reuse sq) - if (sgitg == 0) { - for (short i = 0; i < DV8; ++i) { - simdgroup_store(lo[i], so + i*8, DV, 0, false); - } - } - - device float4 * dst4 = (device float4 *) dst; + threadgroup s_t * sf = (threadgroup s_t *) (shmem_f16 + 2*(nsg-1)*SH + 2*Q*DK); // final rescale with 1/S and store to global memory - if (sgitg == 0) { - for (short j = 0; j < Q && iq1 + j < args.ne01; ++j) { - const float S = ss[j*TS + 0]; + for (short j = sgitg; j < Q && iq1 + j < args.ne01; j += nsg) { + const float S = 1.0f/sf[j*TS + 0]; - for (short i = tiisg; i < DV4; i += NW) { - dst4[((uint64_t)iq3*args.ne2*args.ne1 + iq2 + (uint64_t)(iq1 + j)*args.ne1)*DV4 + i] = (float4) so4[j*DV4 + i]/S; - } + device float4 * dst4 = (device float4 *) dst + ((uint64_t)iq3*args.ne2*args.ne1 + iq2 + (uint64_t)(iq1 + j)*args.ne1)*DV4; + + for (short i = tiisg; i < DV4; i += NW) { + dst4[i] = (float4) so4[j*DV4 + i]*S; } } } @@ -6200,12 +7209,22 @@ kernel void kernel_flash_attn_ext( // template to be able to explore different combinations // #define FA_TYPES \ - half, half4, simdgroup_half8x8, \ - half, half4x4, simdgroup_half8x8, \ - half, half4x4, simdgroup_half8x8, \ - float, simdgroup_float8x8, \ - float, simdgroup_float8x8, \ - half, half4, simdgroup_half8x8 + float, float4, simdgroup_float8x8, \ + half, half4x4, simdgroup_half8x8, \ + half, half4x4, simdgroup_half8x8, \ + float, simdgroup_float8x8, \ + float, simdgroup_float8x8, \ + half, half4, simdgroup_half8x8 + //float, float4, simdgroup_float8x8 + +#define FA_TYPES_BF \ + bfloat, bfloat4, simdgroup_bfloat8x8, \ + bfloat, bfloat4x4, simdgroup_bfloat8x8, \ + bfloat, bfloat4x4, simdgroup_bfloat8x8, \ + float, simdgroup_float8x8, \ + float, simdgroup_float8x8, \ + half, half4, simdgroup_half8x8 + //float, float4, simdgroup_float8x8 typedef decltype(kernel_flash_attn_ext) flash_attn_ext_t; @@ -6220,15 +7239,15 @@ template [[host_name("kernel_flash_attn_ext_f16_h256")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_f16_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; #if defined(GGML_METAL_USE_BF16) -template [[host_name("kernel_flash_attn_ext_bf16_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_h80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_h96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_h112")]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_h128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_h80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_h96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_h112")]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_h128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; #endif template [[host_name("kernel_flash_attn_ext_q4_0_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; @@ -6282,6 +7301,7 @@ template [[host_name("kernel_flash_attn_ext_q8_0_h256")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_q8_0_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; #undef FA_TYPES +#undef FA_TYPES_BF template< typename q4_t, // query types in shared memory @@ -6294,7 +7314,7 @@ template< typename kd4_t, // key type in device memory short nl_k, void (*deq_k_t4)(device const kd4_t *, short, thread k4_t &), - typename vd4_t, // key type in device memory + typename vd4_t, // value type in device memory short nl_v, void (*deq_v_t4)(device const vd4_t *, short, thread v4_t &), short DK, // K head size @@ -6308,6 +7328,7 @@ kernel void kernel_flash_attn_ext_vec( device const char * k, device const char * v, device const char * mask, + device const char * sinks, device char * dst, threadgroup half * shmem_f16 [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], @@ -6328,12 +7349,12 @@ kernel void kernel_flash_attn_ext_vec( const short T = DK + nsg*SH; // shared memory size per query in (half) - //threadgroup q_t * sq = (threadgroup q_t *) (shmem_f16 + 0*DK); // holds the query data - threadgroup q4_t * sq4 = (threadgroup q4_t *) (shmem_f16 + 0*DK); // same as above but in q4_t - threadgroup s_t * ss = (threadgroup s_t *) (shmem_f16 + sgitg*SH + Q*DK); // scratch buffer for attention - threadgroup s4_t * ss4 = (threadgroup s4_t *) (shmem_f16 + sgitg*SH + Q*DK); // same as above but in s4_t - threadgroup float * sm = (threadgroup float *) (shmem_f16 + sgitg*SH + 2*C + Q*DK); // scratch buffer for mask - threadgroup o4_t * sr4 = (threadgroup o4_t *) (shmem_f16 + sgitg*DV + Q*T); // scratch buffer for the results + //threadgroup q_t * sq = (threadgroup q_t *) (shmem_f16 + 0*DK); // holds the query data + threadgroup q4_t * sq4 = (threadgroup q4_t *) (shmem_f16 + 0*DK); // same as above but in q4_t + threadgroup s_t * ss = (threadgroup s_t *) (shmem_f16 + sgitg*SH + Q*DK); // scratch buffer for attention + threadgroup s4_t * ss4 = (threadgroup s4_t *) (shmem_f16 + sgitg*SH + Q*DK); // same as above but in s4_t + threadgroup float * sm = (threadgroup float *) (shmem_f16 + sgitg*SH + 2*C + Q*DK); // scratch buffer for mask + threadgroup o4_t * sr4 = (threadgroup o4_t *) (shmem_f16 + 2*sgitg*DV + Q*T); // scratch buffer for the results // store the result for all queries in local memory (the O matrix from the paper) o4_t lo[DV4/NL]; @@ -6379,7 +7400,7 @@ kernel void kernel_flash_attn_ext_vec( const bool has_mask = mask != q; // pointer to the mask - device const half * pm = (device const half *) (mask + iq1*args.nb31); + device const half * pm = (device const half *) (mask + iq1*args.nb31 + (iq2%args.ne32)*args.nb32 + (iq3%args.ne33)*args.nb33); float slope = 1.0f; @@ -6405,6 +7426,11 @@ kernel void kernel_flash_attn_ext_vec( sm[tiisg] = pm[ic + tiisg]; } + // skip -INF blocks + if (simd_max(sm[tiisg]) == -INFINITY) { + continue; + } + // Q*K^T { // each simdgroup processes 1 query and NE (NW/NL) head elements @@ -6520,6 +7546,23 @@ kernel void kernel_flash_attn_ext_vec( } } + if (sinks != q && sgitg == 0) { + const float m = M; + const float s = tiisg == 0 ? ((device const float *) sinks)[iq2] : -FLT_MAX/2; + + M = simd_max(max(M, s)); + + const float ms = exp(m - M); + const float vs = exp(s - M); + + S = S*ms + simd_sum(vs); + +#pragma unroll(DV4/NL) + for (short ii = 0; ii < DV4; ii += NL) { + lo[ii/NL] *= ms; + } + } + // these are needed for reducing the results from the simdgroups (reuse the ss buffer) if (tiisg == 0) { ss[0] = (s_t) S; @@ -6633,10 +7676,20 @@ kernel void kernel_flash_attn_ext_vec( half4, \ float, \ float, float4, \ - half4 + float4 typedef decltype(kernel_flash_attn_ext_vec) flash_attn_ext_vec_t; +template [[host_name("kernel_flash_attn_ext_vec_f16_h64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; +#if defined(GGML_METAL_USE_BF16) +template [[host_name("kernel_flash_attn_ext_vec_bf16_h64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; +#endif +template [[host_name("kernel_flash_attn_ext_vec_q4_0_h64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; +template [[host_name("kernel_flash_attn_ext_vec_q4_1_h64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; +template [[host_name("kernel_flash_attn_ext_vec_q5_0_h64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; +template [[host_name("kernel_flash_attn_ext_vec_q5_1_h64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; +template [[host_name("kernel_flash_attn_ext_vec_q8_0_h64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; + template [[host_name("kernel_flash_attn_ext_vec_f16_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #if defined(GGML_METAL_USE_BF16) template [[host_name("kernel_flash_attn_ext_vec_bf16_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; @@ -6737,11 +7790,16 @@ kernel void kernel_cpy( device const char * src0, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], + uint tiitg[[thread_index_in_threadgroup]], ushort3 tpitg[[thread_position_in_threadgroup]], - ushort3 ntg[[threads_per_threadgroup]]) { + ushort3 tptg[[threads_per_threadgroup]]) { const int i03 = tgpig[2]; const int i02 = tgpig[1]; - const int i01 = tgpig[0]; + const int i01 = tgpig[0]*tptg.y + tiitg/tptg.x; + + if (i01 >= args.ne01) { + return; + } const int64_t n = i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00; @@ -6752,7 +7810,7 @@ kernel void kernel_cpy( device T1 * dst_data = (device T1 *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0); - for (int64_t i00 = tpitg.x; i00 < args.ne00; i00 += ntg.x) { + for (int64_t i00 = tiitg%tptg.x; i00 < args.ne00; i00 += tptg.x) { device const T0 * src = (device T0 *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); dst_data[i00] = (T1) src[0]; } @@ -6772,6 +7830,7 @@ template [[host_name("kernel_cpy_bf16_f32")]] kernel kernel_cpy_t kernel_cpy; #endif +// TODO: templetify these kernels kernel void kernel_cpy_f32_q8_0( constant ggml_metal_kargs_cpy & args, device const char * src0, @@ -6795,23 +7854,7 @@ kernel void kernel_cpy_f32_q8_0( for (int64_t i00 = tpitg.x*QK8_0; i00 < args.ne00; i00 += ntg.x*QK8_0) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float amax = 0.0f; // absolute max - - for (int j = 0; j < QK8_0; j++) { - const float v = src[j]; - amax = MAX(amax, fabs(v)); - } - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK8_0].d = d; - - for (int j = 0; j < QK8_0; ++j) { - const float x0 = src[j]*id; - - dst_data[i00/QK8_0].qs[j] = round(x0); - } + quantize_q8_0(src, dst_data[i00/QK8_0]); } } @@ -6838,32 +7881,7 @@ kernel void kernel_cpy_f32_q4_0( for (int64_t i00 = tpitg.x*QK4_0; i00 < args.ne00; i00 += ntg.x*QK4_0) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float amax = 0.0f; // absolute max - float max = 0.0f; - - for (int j = 0; j < QK4_0; j++) { - const float v = src[j]; - if (amax < fabs(v)) { - amax = fabs(v); - max = v; - } - } - - const float d = max / -8; - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK4_0].d = d; - - for (int j = 0; j < QK4_0/2; ++j) { - const float x0 = src[0 + j]*id; - const float x1 = src[QK4_0/2 + j]*id; - - const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f)); - const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f)); - - dst_data[i00/QK4_0].qs[j] = xi0; - dst_data[i00/QK4_0].qs[j] |= xi1 << 4; - } + quantize_q4_0(src, dst_data[i00/QK4_0]); } } @@ -6890,31 +7908,7 @@ kernel void kernel_cpy_f32_q4_1( for (int64_t i00 = tpitg.x*QK4_1; i00 < args.ne00; i00 += ntg.x*QK4_1) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float min = FLT_MAX; - float max = -FLT_MAX; - - for (int j = 0; j < QK4_1; j++) { - const float v = src[j]; - if (min > v) min = v; - if (max < v) max = v; - } - - const float d = (max - min) / ((1 << 4) - 1); - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK4_1].d = d; - dst_data[i00/QK4_1].m = min; - - for (int j = 0; j < QK4_1/2; ++j) { - const float x0 = (src[0 + j] - min)*id; - const float x1 = (src[QK4_1/2 + j] - min)*id; - - const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f)); - const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f)); - - dst_data[i00/QK4_1].qs[j] = xi0; - dst_data[i00/QK4_1].qs[j] |= xi1 << 4; - } + quantize_q4_1(src, dst_data[i00/QK4_1]); } } @@ -6941,38 +7935,7 @@ kernel void kernel_cpy_f32_q5_0( for (int64_t i00 = tpitg.x*QK5_0; i00 < args.ne00; i00 += ntg.x*QK5_0) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float amax = 0.0f; // absolute max - float max = 0.0f; - - for (int j = 0; j < QK5_0; j++) { - const float v = src[j]; - if (amax < fabs(v)) { - amax = fabs(v); - max = v; - } - } - - const float d = max / -16; - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK5_0].d = d; - - uint32_t qh = 0; - for (int j = 0; j < QK5_0/2; ++j) { - const float x0 = src[0 + j]*id; - const float x1 = src[QK5_0/2 + j]*id; - - const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f)); - const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f)); - - dst_data[i00/QK5_0].qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); - qh |= ((xi0 & 0x10u) >> 4) << (j + 0); - qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); - } - thread const uint8_t * qh8 = (thread const uint8_t *)&qh; - for (int j = 0; j < 4; ++j) { - dst_data[i00/QK5_0].qh[j] = qh8[j]; - } + quantize_q5_0(src, dst_data[i00/QK5_0]); } } @@ -6999,51 +7962,10 @@ kernel void kernel_cpy_f32_q5_1( for (int64_t i00 = tpitg.x*QK5_1; i00 < args.ne00; i00 += ntg.x*QK5_1) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float max = src[0]; - float min = src[0]; - - for (int j = 1; j < QK5_1; j++) { - const float v = src[j]; - min = v < min ? v : min; - max = v > max ? v : max; - } - - const float d = (max - min) / 31; - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK5_1].d = d; - dst_data[i00/QK5_1].m = min; - - uint32_t qh = 0; - for (int j = 0; j < QK5_1/2; ++j) { - const float x0 = (src[0 + j] - min)*id; - const float x1 = (src[QK5_1/2 + j] - min)*id; - - const uint8_t xi0 = (uint8_t)(x0 + 0.5f); - const uint8_t xi1 = (uint8_t)(x1 + 0.5f); - - dst_data[i00/QK5_1].qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); - qh |= ((xi0 & 0x10u) >> 4) << (j + 0); - qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1/2); - } - thread const uint8_t * qh8 = (thread const uint8_t *)&qh; - for (int j = 0; j < 4; ++j) { - dst_data[i00/QK5_1].qh[j] = qh8[j]; - } + quantize_q5_1(src, dst_data[i00/QK5_1]); } } -static inline int best_index_int8(int n, constant float * val, float x) { - if (x <= val[0]) return 0; - if (x >= val[n-1]) return n-1; - int ml = 0, mu = n-1; - while (mu-ml > 1) { - int mav = (ml+mu)/2; - if (x < val[mav]) mu = mav; else ml = mav; - } - return x - val[mu-1] < val[mu] - x ? mu-1 : mu; -} - kernel void kernel_cpy_f32_iq4_nl( constant ggml_metal_kargs_cpy & args, device const char * src0, @@ -7067,40 +7989,7 @@ kernel void kernel_cpy_f32_iq4_nl( for (int64_t i00 = tpitg.x*QK4_NL; i00 < args.ne00; i00 += ntg.x*QK4_NL) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float amax = 0.0f; // absolute max - float max = 0.0f; - - for (int j = 0; j < QK4_NL; j++) { - const float v = src[j]; - if (amax < fabs(v)) { - amax = fabs(v); - max = v; - } - } - - const float d = max / kvalues_iq4nl_f[0]; - const float id = d ? 1.0f/d : 0.0f; - - float sumqx = 0, sumq2 = 0; - for (int j = 0; j < QK4_NL/2; ++j) { - const float x0 = src[0 + j]*id; - const float x1 = src[QK4_NL/2 + j]*id; - - const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl_f, x0); - const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl_f, x1); - - dst_data[i00/QK4_NL].qs[j] = xi0 | (xi1 << 4); - - const float v0 = kvalues_iq4nl_f[xi0]; - const float v1 = kvalues_iq4nl_f[xi1]; - const float w0 = src[0 + j]*src[0 + j]; - const float w1 = src[QK4_NL/2 + j]*src[QK4_NL/2 + j]; - sumqx += w0*v0*src[j] + w1*v1*src[QK4_NL/2 + j]; - sumq2 += w0*v0*v0 + w1*v1*v1; - - } - - dst_data[i00/QK4_NL].d = sumq2 > 0 ? sumqx/sumq2 : d; + quantize_iq4_nl(src, dst_data[i00/QK4_NL]); } } @@ -8779,12 +9668,101 @@ kernel void kernel_mul_mv_iq4_xs_f32( kernel_mul_mv_iq4_xs_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } +template +void kernel_mul_mv_mxfp4_f32_impl( + args_t args, + device const char * src0, + device const char * src1, + device char * dst, + threadgroup char * shmem, + uint3 tgpig, + ushort tiisg, + ushort sgitg) { + + threadgroup float * shmem_f32 = (threadgroup float *) shmem; + const int nb = args.ne00/QK_MXFP4; + + const int r0 = tgpig.x; + const int r1 = tgpig.y; + const int im = tgpig.z; + + const int first_row = (r0 * nsg + sgitg) * nr0; + + const uint i12 = im%args.ne12; + const uint i13 = im/args.ne12; + + const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; + const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; + + device const block_mxfp4 * x = (device const block_mxfp4 *) (src0 + offset0); + device const float * y = (device const float *) (src1 + offset1); + + const short ix = tiisg/2; // 0...15 + const short it = tiisg%2; // 0 or 1 + + shmem_f32[tiisg] = kvalues_mxfp4_f[tiisg%16]; + threadgroup_barrier(mem_flags::mem_threadgroup); + + float4 yl[4]; + float sumf[nr0]={0.f}; + + device const float * yb = y + ix * QK_MXFP4 + it * 8; + + for (int ib = ix; ib < nb; ib += 16) { + device const float4 * y4 = (device const float4 *)yb; + yl[0] = y4[0]; + yl[1] = y4[4]; + yl[2] = y4[1]; + yl[3] = y4[5]; + +#pragma unroll(nr0) + for (short row = 0; row < nr0; row++) { + device const block_mxfp4 & xb = x[row*nb + ib]; + device const uint8_t * q2 = (device const uint8_t *)(xb.qs + 8*it); + + float4 acc1 = yl[0]*float4(shmem_f32[q2[0] & 0x0F], shmem_f32[q2[1] & 0x0F], shmem_f32[q2[2] & 0x0F], shmem_f32[q2[3] & 0x0F]); + float4 acc2 = yl[1]*float4(shmem_f32[q2[0] >> 4 ], shmem_f32[q2[1] >> 4 ], shmem_f32[q2[2] >> 4 ], shmem_f32[q2[3] >> 4 ]); + float4 acc3 = yl[2]*float4(shmem_f32[q2[4] & 0x0F], shmem_f32[q2[5] & 0x0F], shmem_f32[q2[6] & 0x0F], shmem_f32[q2[7] & 0x0F]); + float4 acc4 = yl[3]*float4(shmem_f32[q2[4] >> 4 ], shmem_f32[q2[5] >> 4 ], shmem_f32[q2[6] >> 4 ], shmem_f32[q2[7] >> 4 ]); + + acc1 = (acc1 + acc3) + (acc2 + acc4); + + sumf[row] += e8m0_to_fp32(xb.e) * ((acc1[0] + acc1[1]) + (acc1[2] + acc1[3])); + } + + yb += 16 * QK_MXFP4; + } + + device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; + + for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) { + float sum_all = simd_sum(sumf[row]); + if (tiisg == 0) { + dst_f32[first_row + row] = sum_all; + } + } +} + +[[host_name("kernel_mul_mv_mxfp4_f32")]] +kernel void kernel_mul_mv_mxfp4_f32( + constant ggml_metal_kargs_mul_mv & args, + device const char * src0, + device const char * src1, + device char * dst, + threadgroup char * shmem [[threadgroup(0)]], + uint3 tgpig[[threadgroup_position_in_grid]], + ushort tiisg[[thread_index_in_simdgroup]], + ushort sgitg[[simdgroup_index_in_threadgroup]]) { + + kernel_mul_mv_mxfp4_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); +} + template kernel void kernel_get_rows_q( + constant ggml_metal_kargs_get_rows & args, device const void * src0, device const void * src1, device float * dst, - constant ggml_metal_kargs_get_rows & args, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { @@ -8804,10 +9782,10 @@ kernel void kernel_get_rows_q( template kernel void kernel_get_rows_f( + constant ggml_metal_kargs_get_rows & args, device const void * src0, device const void * src1, device float * dst, - constant ggml_metal_kargs_get_rows & args, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { @@ -8825,10 +9803,10 @@ kernel void kernel_get_rows_f( } kernel void kernel_get_rows_i32( + constant ggml_metal_kargs_get_rows & args, device const void * src0, device const void * src1, device int32_t * dst, - constant ggml_metal_kargs_get_rows & args, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { @@ -8845,6 +9823,67 @@ kernel void kernel_get_rows_i32( } } +template +kernel void kernel_set_rows_q32( + constant ggml_metal_kargs_set_rows & args, + device const void * src0, + device const void * src1, + device float * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + uint tiitg[[thread_index_in_threadgroup]], + uint3 tptg [[threads_per_threadgroup]]) { + const int32_t i03 = tgpig.z; + const int32_t i02 = tgpig.y; + + const int32_t i12 = i03%args.ne12; + const int32_t i11 = i02%args.ne11; + + const int32_t i01 = tgpig.x*tptg.y + tiitg/tptg.x; + if (i01 >= args.ne01) { + return; + } + + const int32_t i10 = i01; + const int64_t i1 = ((const device int64_t *) ((const device char *) src1 + i10*args.nb10 + i11*args.nb11 + i12*args.nb12))[0]; + + device block_q * dst_row = ( device block_q *) (( device char *) dst + i1*args.nb1 + i02*args.nb2 + i03*args.nb3); + const device float * src_row = (const device float *) ((const device char *) src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03); + + for (int ind = tiitg%tptg.x; ind < args.nk0; ind += tptg.x) { + quantize_func(src_row + 32*ind, dst_row[ind]); + } +} + +template +kernel void kernel_set_rows_f( + constant ggml_metal_kargs_set_rows & args, + device const void * src0, + device const void * src1, + device float * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + uint tiitg[[thread_index_in_threadgroup]], + uint3 tptg [[threads_per_threadgroup]]) { + const int32_t i03 = tgpig.z; + const int32_t i02 = tgpig.y; + + const int32_t i12 = i03%args.ne12; + const int32_t i11 = i02%args.ne11; + + const int32_t i01 = tgpig.x*tptg.y + tiitg/tptg.x; + if (i01 >= args.ne01) { + return; + } + + const int32_t i10 = i01; + const int64_t i1 = ((const device int64_t *) ((const device char *) src1 + i10*args.nb10 + i11*args.nb11 + i12*args.nb12))[0]; + + device T * dst_row = ( device T *) (( device char *) dst + i1*args.nb1 + i02*args.nb2 + i03*args.nb3); + const device float * src_row = (const device float *) ((const device char *) src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03); + + for (int ind = tiitg%tptg.x; ind < args.nk0; ind += tptg.x) { + dst_row[ind] = (T) src_row[ind]; + } +} #define BLOCK_SIZE_M 64 // 8 simdgroup matrices from matrix A #define BLOCK_SIZE_N 32 // 4 simdgroup matrices from matrix B @@ -9232,49 +10271,6 @@ kernel void kernel_mul_mm_id( } } -template -void dequantize_mxfp4(device const block_mxfp4 * xb, short il, thread type4x4 & reg) { - float4x4 reg_f; - const ushort dst_bias = 15; - const ushort dst_0p5 = 0x3800; - const ushort dst_m_bits = 10; - const half scale = (half)(as_type(((uint32_t)xb->d) << 23)); - // il:0 first 16, il:1 last 16 - for (int i = 0; i < 8; i++) { - ushort em0 = xb->qs[il*8 + i] & 0x07; - ushort em1 = xb->qs[il*8 + i] & 0x70; - // float16 values - ushort x0 = (em0 << (dst_m_bits - 1)) | ((xb->qs[il*8 + i] & 0x08) << 12); - ushort x1 = (em1 << (dst_m_bits - 5)) | ((xb->qs[il*8 + i] & 0x80) << 8); - - // Three cases: - // x is normal and non-zero: Correct bias - if ((em0 & 0x06) != 0) { - x0 = x0 + ((dst_bias - 1) << dst_m_bits); - } - if ((em1 & 0x60) != 0) { - x1 = x1 + ((dst_bias - 1) << dst_m_bits); - } - // x is subnormal (x == 0bs001 where s is the sign): Map to +-0.5 in the dst type - if (em0 == 0x01) { - x0 = dst_0p5 | (x0 & 0x8000); - } - if (em1 == 0x10) { - x1 = dst_0p5 | (x1 & 0x8000); - } - // x is zero, do nothing - - if (isnan(scale)) { - reg_f[i/2][2*(i%2) + 0] = scale; - reg_f[i/2][2*(i%2) + 1] = scale; - } else { - reg_f[i/2][2*(i%2) + 0] = scale * as_type(x0); - reg_f[i/2][2*(i%2) + 1] = scale * as_type(x1); - } - } - reg = (type4x4) reg_f; -} - #define QK_NL 16 // @@ -9296,6 +10292,7 @@ template [[host_name("kernel_get_rows_q4_1")]] kernel get_rows_q_t kernel_get template [[host_name("kernel_get_rows_q5_0")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q5_1")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q8_0")]] kernel get_rows_q_t kernel_get_rows_q; +template [[host_name("kernel_get_rows_mxfp4")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q2_K")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q3_K")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q4_K")]] kernel get_rows_q_t kernel_get_rows_q; @@ -9311,6 +10308,27 @@ template [[host_name("kernel_get_rows_iq1_m")]] kernel get_rows_q_t kernel_get template [[host_name("kernel_get_rows_iq4_nl")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_iq4_xs")]] kernel get_rows_q_t kernel_get_rows_q; +// +// set rows +// + +typedef decltype(kernel_set_rows_f) set_rows_f_t; + +template [[host_name("kernel_set_rows_f32")]] kernel set_rows_f_t kernel_set_rows_f; +template [[host_name("kernel_set_rows_f16")]] kernel set_rows_f_t kernel_set_rows_f; +#if defined(GGML_METAL_USE_BF16) +template [[host_name("kernel_set_rows_bf16")]] kernel set_rows_f_t kernel_set_rows_f; +#endif + +typedef decltype(kernel_set_rows_q32) set_rows_q32_t; + +template [[host_name("kernel_set_rows_q8_0")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_q4_0")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_q4_1")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_q5_0")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_q5_1")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_iq4_nl")]] kernel set_rows_q32_t kernel_set_rows_q32; + // // matrix-matrix multiplication // @@ -9327,6 +10345,7 @@ template [[host_name("kernel_mul_mm_q4_1_f32")]] kernel mul_mm_t kernel_mul_m template [[host_name("kernel_mul_mm_q5_0_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q5_1_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q8_0_f32")]] kernel mul_mm_t kernel_mul_mm; +template [[host_name("kernel_mul_mm_mxfp4_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q2_K_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q3_K_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q4_K_f32")]] kernel mul_mm_t kernel_mul_mm; @@ -9342,8 +10361,6 @@ template [[host_name("kernel_mul_mm_iq1_m_f32")]] kernel mul_mm_t kernel_mul_m template [[host_name("kernel_mul_mm_iq4_nl_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq4_xs_f32")]] kernel mul_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_mxfp4_f32")]] kernel mul_mm_t kernel_mul_mm; - // // indirect matrix-matrix multiplication // @@ -9360,6 +10377,7 @@ template [[host_name("kernel_mul_mm_id_q4_1_f16")]] kernel mul_mm_id kernel_m template [[host_name("kernel_mul_mm_id_q5_0_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q5_1_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q8_0_f16")]] kernel mul_mm_id kernel_mul_mm_id; +template [[host_name("kernel_mul_mm_id_mxfp4_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q2_K_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q3_K_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q4_K_f16")]] kernel mul_mm_id kernel_mul_mm_id; @@ -9375,8 +10393,6 @@ template [[host_name("kernel_mul_mm_id_iq1_m_f16")]] kernel mul_mm_id kernel_m template [[host_name("kernel_mul_mm_id_iq4_nl_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq4_xs_f16")]] kernel mul_mm_id kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_mxfp4_f16")]] kernel mul_mm_id kernel_mul_mm_id; - // // matrix-vector multiplication @@ -9493,120 +10509,6 @@ kernel void kernel_mul_mv_id( sgitg); } -// MXFP32 implementation derived from mul_vec_q_n_f32_impl and block_q_n_dot_y -void mul_mv_mxfp4_f32_impl( - ggml_metal_kargs_mul_mv args, - device const char * src0, - device const char * src1, - device char * dst, - threadgroup char * shmem, - uint3 tgpig, - ushort tiisg, - ushort sgitg) { - const ushort dst_bias = 15; - const ushort dst_0p5 = 0x3800; - const ushort dst_m_bits = 10; - const int nr0 = N_R0_MXFP4; - const int nsg = N_SG_MXFP4; - const int nw = N_SIMDWIDTH; - const int nb = args.ne00/MXFP4; - - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - - const int first_row = (r0 * nsg + sgitg) * nr0; - - const uint i12 = im%args.ne12; - const uint i13 = im/args.ne12; - - const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; - - device const float * y = (device const float *) (src1 + offset1); - - // pointers to src0 rows - device const block_mxfp4 * ax[nr0]; - for (int row = 0; row < nr0; ++row) { - const uint64_t offset0 = (first_row + row)*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; - - ax[row] = (device const block_mxfp4 *) ((device char *) src0 + offset0); - } - - float yl[16]; // src1 vector cache - float sumf[nr0] = {0.f}; - - const short ix = (tiisg/2); - const short il = (tiisg%2)*16; - - device const float * yb = y + ix*MXFP4 + il; - - // each thread in a SIMD group deals with half a block. - for (int ib = ix; ib < nb; ib += nw/2) { - -#pragma unroll - for (short row = 0; row < nr0; row++) { - // Processes 16 items - device const block_mxfp4 * qb_curr = ax[row] + ib; - float d = as_type(((uint32_t)(ax[row] + ib)->d) << 23); - // il = 0 or 16 - device const uint8_t *qs = ((device const uint8_t *) qb_curr + 1 + il/2); - for (int i = 0; i < 8; ++i) { - ushort em0 = qs[i] & 0x07; - ushort em1 = qs[i] & 0x70; - ushort x0 = (em0 << (dst_m_bits - 1)) | ((qs[i] & 0x08) << 12); - ushort x1 = (em1 << (dst_m_bits - 5)) | ((qs[i] & 0x80) << 8); - // Three cases: - // x is normal and non-zero: Correct bias - if ((em0 & 0x06) != 0) { - x0 = x0 + ((dst_bias - 1) << dst_m_bits); - } - if ((em1 & 0x60) != 0) { - x1 = x1 + ((dst_bias - 1) << dst_m_bits); - } - // x is subnormal (x == 0bs001 where s is the sign): Map to +-0.5 in the dst type - if (em0 == 0x01) { - x0 = dst_0p5 | (x0 & 0x8000); - } - if (em1 == 0x10) { - x1 = dst_0p5 | (x1 & 0x8000); - } - // x is zero, do nothing - if (!isnan(d)) { - sumf[row] += yb[i*2] * as_type(x0) * d - + yb[i*2+1] * as_type(x1) * d; - } else { - sumf[row] = d; - } - } - } - - yb += MXFP4 * 16; - } - - device float * dst_f32 = (device float *) dst + im*args.ne0*args.ne1 + r1*args.ne0; - - for (int row = 0; row < nr0; ++row) { - const float tot = simd_sum(sumf[row]); - - if (tiisg == 0 && first_row + row < args.ne01) { - dst_f32[first_row + row] = tot; - } - } -} - -[[host_name("kernel_mul_mv_mxfp4_f32")]] -kernel void kernel_mul_mv_mxfp4_f32( - constant ggml_metal_kargs_mul_mv & args, - device const char * src0, - device const char * src1, - device char * dst, - threadgroup char * shmem [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - ushort tiisg[[thread_index_in_simdgroup]], - ushort sgitg[[simdgroup_index_in_threadgroup]]) { - mul_mv_mxfp4_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); -} - typedef decltype(kernel_mul_mv_id>>) kernel_mul_mv_id_t; template [[host_name("kernel_mul_mv_id_f32_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; @@ -9621,6 +10523,8 @@ template [[host_name("kernel_mul_mv_id_q4_1_f32")]] kernel kernel_mul_mv_id_t template [[host_name("kernel_mul_mv_id_q5_0_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q5_1_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; +template [[host_name("kernel_mul_mv_id_mxfp4_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; + template [[host_name("kernel_mul_mv_id_q2_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q3_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q4_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; @@ -9636,8 +10540,6 @@ template [[host_name("kernel_mul_mv_id_iq2_s_f32")]] kernel kernel_mul_mv_id_t template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; -template [[host_name("kernel_mul_mv_id_mxfp4_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; - kernel void kernel_pool_2d_max_f32( device const float * src0, device float * dst, diff --git a/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-impl.h b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-impl.h index 938386ba8..fc6526d6d 100644 --- a/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-impl.h +++ b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-impl.h @@ -23,6 +23,9 @@ #define N_R0_Q8_0 4 #define N_SG_Q8_0 2 +#define N_R0_MXFP4 2 +#define N_SG_MXFP4 2 + #define N_R0_Q2_K 4 #define N_SG_Q2_K 2 @@ -65,9 +68,6 @@ #define N_R0_IQ4_XS 2 #define N_SG_IQ4_XS 2 -#define N_R0_MXFP4 4 -#define N_SG_MXFP4 2 - // kernel argument structs // // - element counters (e.g. ne00) typically use int32_t to reduce register usage @@ -129,8 +129,18 @@ typedef struct { uint64_t nb2; uint64_t nb3; uint64_t offs; + uint64_t o1[8]; } ggml_metal_kargs_bin; +typedef struct { + int64_t ne0; + int64_t ne1; + size_t nb01; + size_t nb02; + size_t nb11; + size_t nb21; +} ggml_metal_kargs_add_id; + typedef struct { int32_t ne00; int32_t ne01; @@ -232,14 +242,18 @@ typedef struct { uint64_t nb21; uint64_t nb22; uint64_t nb23; + int32_t ne32; + int32_t ne33; uint64_t nb31; + uint64_t nb32; + uint64_t nb33; int32_t ne1; int32_t ne2; float scale; float max_bias; float m0; float m1; - uint16_t n_head_log2; + int32_t n_head_log2; float logit_softcap; } ggml_metal_kargs_flash_attn_ext; @@ -376,8 +390,16 @@ typedef struct { typedef struct { int32_t ne00; int32_t ne00_4; - uint64_t nb01; + uint64_t nb1; + uint64_t nb2; + uint64_t nb3; float eps; + int32_t nef1[3]; + int32_t nef2[3]; + int32_t nef3[3]; + uint64_t nbf1[3]; + uint64_t nbf2[3]; + uint64_t nbf3[3]; } ggml_metal_kargs_rms_norm; typedef struct { @@ -425,6 +447,19 @@ typedef struct { int32_t KHW; // KH * KW, pre-computed on CPU to save GPU resources } ggml_metal_kargs_im2col; +typedef struct{ + int32_t ne00; + uint64_t nb01; + int32_t ne10; + uint64_t nb11; + int32_t ne0; + uint64_t nb1; + int32_t i00; + int32_t i10; + float alpha; + float limit; +} ggml_metal_kargs_glu; + typedef struct { int64_t ne00; int64_t ne01; @@ -453,14 +488,26 @@ typedef struct { } ggml_metal_kargs_sum_rows; typedef struct { - int64_t ne00; - int64_t ne01; - int64_t ne02; + int32_t ne00; + int32_t ne01; + int32_t ne02; + uint64_t nb01; + uint64_t nb02; + uint64_t nb03; + int32_t ne11; + int32_t ne12; + int32_t ne13; + uint64_t nb11; + uint64_t nb12; + uint64_t nb13; + uint64_t nb1; + uint64_t nb2; + uint64_t nb3; float scale; float max_bias; float m0; float m1; - uint32_t n_head_log2; + int32_t n_head_log2; } ggml_metal_kargs_soft_max; typedef struct { @@ -491,26 +538,26 @@ typedef struct { typedef struct { int64_t d_state; int64_t d_inner; + int64_t n_head; + int64_t n_group; int64_t n_seq_tokens; int64_t n_seqs; - uint64_t nb00; + int64_t s_off; uint64_t nb01; uint64_t nb02; - uint64_t nb10; + uint64_t nb03; uint64_t nb11; uint64_t nb12; uint64_t nb13; - uint64_t nb20; uint64_t nb21; uint64_t nb22; - uint64_t nb30; uint64_t nb31; - uint64_t nb40; uint64_t nb41; uint64_t nb42; - uint64_t nb50; + uint64_t nb43; uint64_t nb51; uint64_t nb52; + uint64_t nb53; } ggml_metal_kargs_ssm_scan; typedef struct { @@ -524,6 +571,22 @@ typedef struct { uint64_t nb2; } ggml_metal_kargs_get_rows; +typedef struct { + int32_t nk0; + int32_t ne01; + uint64_t nb01; + uint64_t nb02; + uint64_t nb03; + int32_t ne11; + int32_t ne12; + uint64_t nb10; + uint64_t nb11; + uint64_t nb12; + uint64_t nb1; + uint64_t nb2; + uint64_t nb3; +} ggml_metal_kargs_set_rows; + typedef struct { int64_t ne00; int64_t ne01; diff --git a/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.m b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.m index d8e05a21b..e4c31268f 100644 --- a/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.m +++ b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.m @@ -40,7 +40,6 @@ static const NSInteger MTLGPUFamilyMetal3_GGML = 5001; static struct ggml_backend_reg g_ggml_backend_metal_reg; static struct ggml_backend_device g_ggml_backend_metal_device; - // information about a Metal device // note: assumes single GPU device - the default one // TODO: support multiple GPU devices @@ -49,22 +48,37 @@ static struct ggml_backend_metal_device_context { int mtl_device_ref_count; id mtl_library; + NSLock * mtl_lock; + bool has_simdgroup_reduction; bool has_simdgroup_mm; bool has_residency_sets; bool has_bfloat; bool use_bfloat; + bool use_fusion; + + int debug_fusion; + + // how many times a given op was fused + uint64_t fuse_cnt[GGML_OP_COUNT]; + + size_t max_size; char name[128]; } g_ggml_ctx_dev_main = { /*.mtl_device =*/ nil, /*.mtl_device_ref_count =*/ 0, /*.mtl_library =*/ nil, + /*.mtl_lock =*/ nil, /*.has_simdgroup_reduction =*/ false, /*.has_simdgroup_mm =*/ false, /*.has_residency_sets =*/ false, /*.has_bfloat =*/ false, /*.use_bfloat =*/ false, + /*.use_fusion =*/ true, + /*.debug_fusion =*/ 0, + /*.fuse_cnt =*/ { 0 }, + /*.max_size =*/ 0, /*.name =*/ "", }; @@ -72,18 +86,20 @@ static struct ggml_backend_metal_device_context { static id ggml_backend_metal_device_acq(struct ggml_backend_metal_device_context * ctx) { assert(ctx != NULL); - if (ctx->mtl_device == nil) { - ctx->mtl_device = MTLCreateSystemDefaultDevice(); + if (ctx->mtl_lock == nil) { + ctx->mtl_lock = [[NSLock alloc] init]; } - if (ctx->mtl_device) { + if (ctx->mtl_device == nil) { + ctx->mtl_device = MTLCreateSystemDefaultDevice(); + ctx->has_simdgroup_reduction = [ctx->mtl_device supportsFamily:MTLGPUFamilyApple7]; ctx->has_simdgroup_reduction |= [ctx->mtl_device supportsFamily:MTLGPUFamilyMetal3_GGML]; ctx->has_simdgroup_mm = [ctx->mtl_device supportsFamily:MTLGPUFamilyApple7]; #if defined(GGML_METAL_HAS_RESIDENCY_SETS) - ctx->has_residency_sets = getenv("GGML_METAL_NO_RESIDENCY") == NULL; + ctx->has_residency_sets = getenv("GGML_METAL_NO_RESIDENCY") == nil; #endif ctx->has_bfloat = [ctx->mtl_device supportsFamily:MTLGPUFamilyMetal3_GGML]; @@ -98,6 +114,16 @@ static id ggml_backend_metal_device_acq(struct ggml_backend_metal_dev #else ctx->use_bfloat = false; #endif + ctx->use_fusion = getenv("GGML_METAL_FUSION_DISABLE") == nil; + + { + const char * val = getenv("GGML_METAL_FUSION_DEBUG"); + ctx->debug_fusion = val ? atoi(val) : 0; + } + + memset(ctx->fuse_cnt, 0, sizeof(ctx->fuse_cnt)); + + ctx->max_size = ctx->mtl_device.maxBufferLength; strncpy(ctx->name, [[ctx->mtl_device name] UTF8String], sizeof(ctx->name) - 1); } @@ -115,6 +141,23 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte ctx->mtl_device_ref_count--; if (ctx->mtl_device_ref_count == 0) { + if (ctx->debug_fusion > 0) { + fprintf(stderr, "%s: fusion stats:\n", __func__); + for (int i = 0; i < GGML_OP_COUNT; i++) { + if (ctx->fuse_cnt[i] == 0) { + continue; + } + + // note: cannot use ggml_log here + fprintf(stderr, "%s: - %s: %" PRIu64 "\n", __func__, ggml_op_name((enum ggml_op) i), ctx->fuse_cnt[i]); + } + } + + if (ctx->mtl_lock) { + [ctx->mtl_lock release]; + ctx->mtl_lock = nil; + } + if (ctx->mtl_library) { [ctx->mtl_library release]; ctx->mtl_library = nil; @@ -135,13 +178,28 @@ struct ggml_metal_kernel { enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_ADD, - GGML_METAL_KERNEL_TYPE_ADD_ROW, + GGML_METAL_KERNEL_TYPE_ADD_FUSE_2, + GGML_METAL_KERNEL_TYPE_ADD_FUSE_3, + GGML_METAL_KERNEL_TYPE_ADD_FUSE_4, + GGML_METAL_KERNEL_TYPE_ADD_FUSE_5, + GGML_METAL_KERNEL_TYPE_ADD_FUSE_6, + GGML_METAL_KERNEL_TYPE_ADD_FUSE_7, + GGML_METAL_KERNEL_TYPE_ADD_FUSE_8, + GGML_METAL_KERNEL_TYPE_ADD_ROW_C4, + GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_2, + GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_3, + GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_4, + GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_5, + GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_6, + GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_7, + GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_8, GGML_METAL_KERNEL_TYPE_SUB, - GGML_METAL_KERNEL_TYPE_SUB_ROW, + GGML_METAL_KERNEL_TYPE_SUB_ROW_C4, GGML_METAL_KERNEL_TYPE_MUL, - GGML_METAL_KERNEL_TYPE_MUL_ROW, + GGML_METAL_KERNEL_TYPE_MUL_ROW_C4, GGML_METAL_KERNEL_TYPE_DIV, - GGML_METAL_KERNEL_TYPE_DIV_ROW, + GGML_METAL_KERNEL_TYPE_DIV_ROW_C4, + GGML_METAL_KERNEL_TYPE_ADD_ID, GGML_METAL_KERNEL_TYPE_REPEAT_F32, GGML_METAL_KERNEL_TYPE_REPEAT_F16, GGML_METAL_KERNEL_TYPE_REPEAT_I32, @@ -154,11 +212,19 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_SIGMOID, GGML_METAL_KERNEL_TYPE_GELU, GGML_METAL_KERNEL_TYPE_GELU_4, + GGML_METAL_KERNEL_TYPE_GELU_ERF, + GGML_METAL_KERNEL_TYPE_GELU_ERF_4, GGML_METAL_KERNEL_TYPE_GELU_QUICK, GGML_METAL_KERNEL_TYPE_GELU_QUICK_4, GGML_METAL_KERNEL_TYPE_SILU, GGML_METAL_KERNEL_TYPE_SILU_4, GGML_METAL_KERNEL_TYPE_ELU, + GGML_METAL_KERNEL_TYPE_ABS, + GGML_METAL_KERNEL_TYPE_SGN, + GGML_METAL_KERNEL_TYPE_STEP, + GGML_METAL_KERNEL_TYPE_HARDSWISH, + GGML_METAL_KERNEL_TYPE_HARDSIGMOID, + GGML_METAL_KERNEL_TYPE_EXP, GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16, GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16_4, GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32, @@ -173,6 +239,7 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0, GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1, GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0, + GGML_METAL_KERNEL_TYPE_GET_ROWS_MXFP4, GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K, GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K, GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K, @@ -188,20 +255,35 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL, GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, GGML_METAL_KERNEL_TYPE_GET_ROWS_I32, + GGML_METAL_KERNEL_TYPE_SET_ROWS_F32, + GGML_METAL_KERNEL_TYPE_SET_ROWS_F16, + GGML_METAL_KERNEL_TYPE_SET_ROWS_BF16, + GGML_METAL_KERNEL_TYPE_SET_ROWS_Q8_0, + GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_0, + GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_1, + GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_0, + GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_1, + GGML_METAL_KERNEL_TYPE_SET_ROWS_IQ4_NL, GGML_METAL_KERNEL_TYPE_RMS_NORM, + GGML_METAL_KERNEL_TYPE_RMS_NORM_MUL, + GGML_METAL_KERNEL_TYPE_RMS_NORM_MUL_ADD, GGML_METAL_KERNEL_TYPE_L2_NORM, GGML_METAL_KERNEL_TYPE_GROUP_NORM, GGML_METAL_KERNEL_TYPE_NORM, GGML_METAL_KERNEL_TYPE_SSM_CONV_F32, GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32, + GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32_GROUP, GGML_METAL_KERNEL_TYPE_RWKV_WKV6_F32, GGML_METAL_KERNEL_TYPE_RWKV_WKV7_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32, + GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32_C4, GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32, + GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_C4, GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW, GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4, GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16, GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32, + GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_C4, GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_1ROW, GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_L4, GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_BF16, @@ -235,6 +317,10 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_3, GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_4, GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_5, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_2, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_3, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_4, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_5, GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_2, GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_3, GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_4, @@ -276,6 +362,7 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32, + GGML_METAL_KERNEL_TYPE_MUL_MV_ID_MXFP4_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32, @@ -290,7 +377,6 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32, - GGML_METAL_KERNEL_TYPE_MUL_MV_ID_MXFP4_F32, GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32, GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32, GGML_METAL_KERNEL_TYPE_MUL_MM_BF16_F32, @@ -299,6 +385,7 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32, GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32, GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32, + GGML_METAL_KERNEL_TYPE_MUL_MM_MXFP4_F32, GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32, GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32, GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32, @@ -313,7 +400,6 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32, GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32, GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32, - GGML_METAL_KERNEL_TYPE_MUL_MM_MXFP4_F32, GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16, GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP1_F32, GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F16, @@ -324,6 +410,7 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F16, GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F16, GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F16, + GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MXFP4_F16, GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F16, GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F16, GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F16, @@ -338,7 +425,6 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F16, GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F16, GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F16, - GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MXFP4_F16, GGML_METAL_KERNEL_TYPE_ROPE_NORM_F32, GGML_METAL_KERNEL_TYPE_ROPE_NORM_F16, GGML_METAL_KERNEL_TYPE_ROPE_MULTI_F32, @@ -424,6 +510,13 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK192_HV128, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H256, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK576_HV512, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H64, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H64, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H64, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_H64, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H64, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H64, + GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H64, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H96, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H96, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H96, @@ -497,6 +590,12 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_SIN, GGML_METAL_KERNEL_TYPE_COS, GGML_METAL_KERNEL_TYPE_NEG, + GGML_METAL_KERNEL_TYPE_REGLU, + GGML_METAL_KERNEL_TYPE_GEGLU, + GGML_METAL_KERNEL_TYPE_SWIGLU, + GGML_METAL_KERNEL_TYPE_SWIGLU_OAI, + GGML_METAL_KERNEL_TYPE_GEGLU_ERF, + GGML_METAL_KERNEL_TYPE_GEGLU_QUICK, GGML_METAL_KERNEL_TYPE_SUM_ROWS, GGML_METAL_KERNEL_TYPE_MEAN, GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, @@ -939,7 +1038,7 @@ static id ggml_metal_load_library(id device, bool use_bfl MTLCompileOptions * options = [MTLCompileOptions new]; options.preprocessorMacros = prep; - + //[options setFastMathEnabled:false]; metal_library = [device newLibraryWithSource:src options:options error:&error]; @@ -977,7 +1076,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de struct ggml_backend_metal_context * ctx = calloc(1, sizeof(struct ggml_backend_metal_context)); struct ggml_backend_metal_device_context * ctx_dev = dev->context; - id device = ggml_backend_metal_device_acq(ctx_dev); + id device = ctx_dev->mtl_device; GGML_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]); @@ -991,9 +1090,16 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de ctx->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT); // load library - if (ctx_dev->mtl_library == nil) { - ctx_dev->mtl_library = ggml_metal_load_library(device, ctx_dev->use_bfloat); + { + [ctx_dev->mtl_lock lock]; + + if (ctx_dev->mtl_library == nil) { + ctx_dev->mtl_library = ggml_metal_load_library(device, ctx_dev->use_bfloat); + } + + [ctx_dev->mtl_lock unlock]; } + id metal_library = ctx_dev->mtl_library; if (metal_library == nil) { GGML_LOG_ERROR("%s: error: metal library is nil\n", __func__); @@ -1087,13 +1193,28 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de // simd_sum and simd_max requires MTLGPUFamilyApple7 GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD, add, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW, add_row, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_FUSE_2, add_fuse_2, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_FUSE_3, add_fuse_3, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_FUSE_4, add_fuse_4, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_FUSE_5, add_fuse_5, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_FUSE_6, add_fuse_6, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_FUSE_7, add_fuse_7, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_FUSE_8, add_fuse_8, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW_C4, add_row_c4, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_2, add_row_c4_fuse_2, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_3, add_row_c4_fuse_3, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_4, add_row_c4_fuse_4, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_5, add_row_c4_fuse_5, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_6, add_row_c4_fuse_6, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_7, add_row_c4_fuse_7, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_8, add_row_c4_fuse_8, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUB, sub, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUB_ROW, sub_row, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUB_ROW_C4, sub_row_c4, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL, mul, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_ROW, mul_row, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_ROW_C4, mul_row_c4, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV, div, true); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV_ROW, div_row, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV_ROW_C4, div_row_c4, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ID, add_id, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_REPEAT_F32, repeat_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_REPEAT_F16, repeat_f16, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_REPEAT_I32, repeat_i32, true); @@ -1106,11 +1227,19 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SIGMOID, sigmoid, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU, gelu, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_4, gelu_4, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_ERF, gelu_erf, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_ERF_4, gelu_erf_4, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK, gelu_quick, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK_4, gelu_quick_4, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU, silu, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU_4, silu_4, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ELU, elu, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ABS, abs, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SGN, sgn, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_STEP, step, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_HARDSWISH, hardswish, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_HARDSIGMOID, hardsigmoid, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_EXP, exp, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16, soft_max_f16, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16_4, soft_max_f16_4, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32, soft_max_f32, has_simdgroup_reduction); @@ -1125,6 +1254,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0, get_rows_q5_0, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1, get_rows_q5_1, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0, get_rows_q8_0, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_MXFP4, get_rows_mxfp4, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K, get_rows_q2_K, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K, get_rows_q3_K, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K, get_rows_q4_K, true); @@ -1140,20 +1270,35 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL, get_rows_iq4_nl, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, get_rows_iq4_xs, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_I32, get_rows_i32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_F32, set_rows_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_F16, set_rows_f16, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_BF16, set_rows_bf16, use_bfloat); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q8_0, set_rows_q8_0, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_0, set_rows_q4_0, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_1, set_rows_q4_1, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_0, set_rows_q5_0, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_1, set_rows_q5_1, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_ROWS_IQ4_NL, set_rows_iq4_nl, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RMS_NORM, rms_norm, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RMS_NORM_MUL, rms_norm_mul, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RMS_NORM_MUL_ADD, rms_norm_mul_add, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_L2_NORM, l2_norm, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GROUP_NORM, group_norm, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NORM, norm, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SSM_CONV_F32, ssm_conv_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32, ssm_scan_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32_GROUP, ssm_scan_f32_group, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RWKV_WKV6_F32, rwkv_wkv6_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RWKV_WKV7_F32, rwkv_wkv7_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32, mul_mv_f32_f32, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32_C4, mul_mv_f32_f32_c4, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32, mul_mv_bf16_f32, has_simdgroup_reduction && use_bfloat); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_C4, mul_mv_bf16_f32_c4, use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_1ROW, mul_mv_bf16_f32_1row, has_simdgroup_reduction && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_L4, mul_mv_bf16_f32_l4, has_simdgroup_reduction && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_BF16, mul_mv_bf16_bf16, has_simdgroup_reduction && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32, mul_mv_f16_f32, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_C4, mul_mv_f16_f32_c4, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW, mul_mv_f16_f32_1row, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4, mul_mv_f16_f32_l4, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16, mul_mv_f16_f16, has_simdgroup_reduction); @@ -1187,6 +1332,10 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_3, mul_mv_ext_q8_0_f32_r1_3, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_4, mul_mv_ext_q8_0_f32_r1_4, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_5, mul_mv_ext_q8_0_f32_r1_5, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_2, mul_mv_ext_mxfp4_f32_r1_2, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_3, mul_mv_ext_mxfp4_f32_r1_3, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_4, mul_mv_ext_mxfp4_f32_r1_4, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_5, mul_mv_ext_mxfp4_f32_r1_5, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_2, mul_mv_ext_q4_K_f32_r1_2, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_3, mul_mv_ext_q4_K_f32_r1_3, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_4, mul_mv_ext_q4_K_f32_r1_4, has_simdgroup_reduction); @@ -1228,6 +1377,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32, mul_mv_id_q5_0_f32, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32, mul_mv_id_q5_1_f32, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32, mul_mv_id_q8_0_f32, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_MXFP4_F32, mul_mv_id_mxfp4_f32, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32, mul_mv_id_q2_K_f32, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32, mul_mv_id_q3_K_f32, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32, mul_mv_id_q4_K_f32, has_simdgroup_reduction); @@ -1242,7 +1392,6 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32, mul_mv_id_iq1_m_f32, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32, mul_mv_id_iq4_nl_f32, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32, mul_mv_id_iq4_xs_f32, has_simdgroup_reduction); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_MXFP4_F32, mul_mv_id_mxfp4_f32, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32, mul_mm_f32_f32, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32, mul_mm_f16_f32, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_BF16_F32, mul_mm_bf16_f32, has_simdgroup_mm && use_bfloat); @@ -1251,6 +1400,8 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32, mul_mm_q5_0_f32, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32, mul_mm_q5_1_f32, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32, mul_mm_q8_0_f32, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_MXFP4_F32, mul_mm_mxfp4_f32, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_MXFP4_F32, mul_mm_mxfp4_f32, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32, mul_mm_q2_K_f32, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32, mul_mm_q3_K_f32, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32, mul_mm_q4_K_f32, has_simdgroup_mm); @@ -1265,7 +1416,6 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32, mul_mm_iq1_m_f32, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32, mul_mm_iq4_nl_f32, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32, mul_mm_iq4_xs_f32, has_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_MXFP4_F32, mul_mm_mxfp4_f32, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP0_F16, mul_mm_id_map0_f16, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MAP1_F32, mul_mm_id_map1_f32, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F16, mul_mm_id_f32_f16, has_simdgroup_mm); @@ -1276,6 +1426,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F16, mul_mm_id_q5_0_f16, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F16, mul_mm_id_q5_1_f16, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F16, mul_mm_id_q8_0_f16, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MXFP4_F16, mul_mm_id_mxfp4_f16, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F16, mul_mm_id_q2_K_f16, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F16, mul_mm_id_q3_K_f16, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F16, mul_mm_id_q4_K_f16, has_simdgroup_mm); @@ -1290,7 +1441,6 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F16, mul_mm_id_iq1_m_f16, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F16, mul_mm_id_iq4_nl_f16, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F16, mul_mm_id_iq4_xs_f16, has_simdgroup_mm); - GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MXFP4_F16, mul_mm_id_mxfp4_f16, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_NORM_F32, rope_norm_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_NORM_F16, rope_norm_f16, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_MULTI_F32, rope_multi_f32, true); @@ -1376,6 +1526,13 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK192_HV128, flash_attn_ext_q8_0_hk192_hv128, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_H256, flash_attn_ext_q8_0_h256, has_simdgroup_mm); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_Q8_0_HK576_HV512, flash_attn_ext_q8_0_hk576_hv512, has_simdgroup_mm); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H64, flash_attn_ext_vec_f16_h64, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H64, flash_attn_ext_vec_bf16_h64, has_simdgroup_reduction && use_bfloat); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H64, flash_attn_ext_vec_q4_0_h64, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_H64, flash_attn_ext_vec_q4_1_h64, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H64, flash_attn_ext_vec_q5_0_h64, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H64, flash_attn_ext_vec_q5_1_h64, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H64, flash_attn_ext_vec_q8_0_h64, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H96, flash_attn_ext_vec_f16_h96, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H96, flash_attn_ext_vec_bf16_h96, has_simdgroup_reduction && use_bfloat); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H96, flash_attn_ext_vec_q4_0_h96, has_simdgroup_reduction); @@ -1449,6 +1606,12 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SIN, sin, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_COS, cos, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NEG, neg, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_REGLU, reglu, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GEGLU, geglu, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SWIGLU, swiglu, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SWIGLU_OAI, swiglu_oai, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GEGLU_ERF, geglu_erf, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GEGLU_QUICK, geglu_quick, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MEAN, mean, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGMAX, argmax, true); @@ -1600,6 +1763,10 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex const bool use_bfloat = ctx_dev->use_bfloat; if (!use_bfloat) { + if (op->type == GGML_TYPE_BF16) { + return false; + } + for (size_t i = 0, n = 3; i < n; ++i) { if (op->src[i] != NULL && op->src[i]->type == GGML_TYPE_BF16) { return false; @@ -1614,14 +1781,33 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex case GGML_UNARY_OP_RELU: case GGML_UNARY_OP_SIGMOID: case GGML_UNARY_OP_GELU: + case GGML_UNARY_OP_GELU_ERF: case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_SILU: case GGML_UNARY_OP_ELU: case GGML_UNARY_OP_NEG: + case GGML_UNARY_OP_ABS: + case GGML_UNARY_OP_SGN: + case GGML_UNARY_OP_STEP: + case GGML_UNARY_OP_HARDSWISH: + case GGML_UNARY_OP_HARDSIGMOID: + case GGML_UNARY_OP_EXP: return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32; default: return false; } + case GGML_OP_GLU: + switch (ggml_get_glu_op(op)) { + case GGML_GLU_OP_REGLU: + case GGML_GLU_OP_GEGLU: + case GGML_GLU_OP_SWIGLU: + case GGML_GLU_OP_SWIGLU_OAI: + case GGML_GLU_OP_GEGLU_ERF: + case GGML_GLU_OP_GEGLU_QUICK: + return ggml_is_contiguous_1(op->src[0]) && op->src[0]->type == GGML_TYPE_F32; + default: + return false; + } case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_VIEW: @@ -1633,6 +1819,7 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex case GGML_OP_SUB: case GGML_OP_MUL: case GGML_OP_DIV: + case GGML_OP_ADD_ID: return op->src[0]->type == GGML_TYPE_F32; case GGML_OP_ACC: case GGML_OP_REPEAT: @@ -1652,7 +1839,7 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex case GGML_OP_MEAN: case GGML_OP_SOFT_MAX: case GGML_OP_GROUP_NORM: - return has_simdgroup_reduction && ggml_is_contiguous(op->src[0]); + return has_simdgroup_reduction && ggml_is_contiguous_rows(op->src[0]); case GGML_OP_RMS_NORM: case GGML_OP_L2_NORM: return has_simdgroup_reduction && (op->ne[0] % 4 == 0 && ggml_is_contiguous_1(op->src[0])); @@ -1768,14 +1955,36 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex { return op->ne[3] == 1; } + case GGML_OP_SET_ROWS: + { + if (op->src[0]->type != GGML_TYPE_F32) { + return false; + } + + switch (op->type) { + case GGML_TYPE_F32: + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_IQ4_NL: + return true; + default: + return false; + }; + } default: return false; } } -static bool ggml_metal_encode_node( +static int ggml_metal_encode_node( ggml_backend_t backend, int idx, + int idx_end, id encoder, struct ggml_metal_mem_pool * mem_pool) { struct ggml_backend_metal_context * ctx = backend->context; @@ -1783,7 +1992,10 @@ static bool ggml_metal_encode_node( struct ggml_cgraph * gf = ctx->gf; - struct ggml_tensor * node = ggml_graph_node(gf, idx); + enum ggml_op ops[8]; + + struct ggml_tensor ** nodes = ggml_graph_nodes(gf) + idx; + struct ggml_tensor * node = nodes[0]; //GGML_LOG_INFO("%s: encoding node %3d, op = %8s\n", __func__, idx, ggml_op_name(node->op)); @@ -1793,7 +2005,7 @@ static bool ggml_metal_encode_node( struct ggml_tensor * dst = node; if (ggml_is_empty(dst)) { - return true; + return 1; } switch (dst->op) { @@ -1804,7 +2016,7 @@ static bool ggml_metal_encode_node( case GGML_OP_PERMUTE: { // noop -> next node - } return true; + } return 1; default: { } break; @@ -1859,6 +2071,7 @@ static bool ggml_metal_encode_node( const enum ggml_type src0t = src0 ? src0->type : GGML_TYPE_COUNT; const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT; + const enum ggml_type src2t = src2 ? src2->type : GGML_TYPE_COUNT; const enum ggml_type dstt = dst ? dst->type : GGML_TYPE_COUNT; size_t offs_src0 = 0; @@ -1871,6 +2084,8 @@ static bool ggml_metal_encode_node( id id_src2 = src2 ? ggml_metal_get_buffer(src2, &offs_src2) : nil; id id_dst = dst ? ggml_metal_get_buffer(dst, &offs_dst) : nil; + int n_fuse = 1; + #if 0 GGML_LOG_INFO("%s: op - %s\n", __func__, ggml_op_name(dst->op)); if (src0) { @@ -1942,37 +2157,15 @@ static bool ggml_metal_encode_node( GGML_ASSERT(src0t == GGML_TYPE_F32); GGML_ASSERT(src1t == GGML_TYPE_F32); + GGML_ASSERT(ggml_is_contiguous_rows(src0)); + GGML_ASSERT(ggml_is_contiguous_rows(src1)); + const size_t offs = 0; bool bcast_row = false; id pipeline = nil; - if (ggml_nelements(src1) == ne10 && ggml_is_contiguous(src1) && ne00 % 4 == 0 && ne10 % 4 == 0) { - GGML_ASSERT(ggml_is_contiguous(src0)); - - // src1 is a row - GGML_ASSERT(ne11 == 1); - - switch (dst->op) { - case GGML_OP_ADD: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ROW].pipeline; break; - case GGML_OP_SUB: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUB_ROW].pipeline; break; - case GGML_OP_MUL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_ROW].pipeline; break; - case GGML_OP_DIV: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIV_ROW].pipeline; break; - default: GGML_ABORT("fatal error"); - } - - bcast_row = true; - } else { - switch (dst->op) { - case GGML_OP_ADD: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD].pipeline; break; - case GGML_OP_SUB: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUB].pipeline; break; - case GGML_OP_MUL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL].pipeline; break; - case GGML_OP_DIV: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIV].pipeline; break; - default: GGML_ABORT("fatal error"); - } - } - ggml_metal_kargs_bin args = { /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, @@ -1999,12 +2192,119 @@ static bool ggml_metal_encode_node( /*.nb2 =*/ nb2, /*.nb3 =*/ nb3, /*.offs =*/ offs, + /*.o1 =*/ { offs_src1 }, }; + // c[0] = add(a, b[0]) + // c[1] = add(c[0], b[1]) + // c[2] = add(c[1], b[2]) + // ... + if (ctx_dev->use_fusion) { + ops[0] = GGML_OP_ADD; + ops[1] = GGML_OP_ADD; + ops[2] = GGML_OP_ADD; + ops[3] = GGML_OP_ADD; + ops[4] = GGML_OP_ADD; + ops[5] = GGML_OP_ADD; + ops[6] = GGML_OP_ADD; + ops[7] = GGML_OP_ADD; + + size_t offs_fuse; + id id_fuse; + + // note: in metal, we sometimes encode the graph in parallel so we have to avoid fusing nodes + // across splits. idx_end indicates the last node in the current split + for (n_fuse = 0; n_fuse <= 6 && idx + n_fuse + 1 < idx_end; ++n_fuse) { + if (!ggml_can_fuse(gf, idx + n_fuse, ops + n_fuse, 2)) { + break; + } + + if (nodes[n_fuse] != nodes[n_fuse + 1]->src[0]) { + break; + } + + // b[0] === b[1] === ... + if (!ggml_are_same_layout(nodes[n_fuse]->src[1], nodes[n_fuse + 1]->src[1])) { + break; + } + + // only fuse nodes if src1 is in the same Metal buffer + id_fuse = ggml_metal_get_buffer(nodes[n_fuse + 1]->src[1], &offs_fuse); + if (id_fuse != id_src1) { + break; + } + + ctx_dev->fuse_cnt[nodes[n_fuse + 1]->op]++; + + args.o1[n_fuse + 1] = offs_fuse; + } + + ++n_fuse; + + if (ctx_dev->debug_fusion > 1 && n_fuse > 1) { + GGML_LOG_DEBUG("%s: fuse: ADD x %d\n", __func__, n_fuse); + } + } + + if (ggml_nelements(src1) == ne10 && ggml_is_contiguous(src1) && ne00 % 4 == 0 && ne10 % 4 == 0) { + GGML_ASSERT(ggml_is_contiguous(src0)); + + // src1 is a row + GGML_ASSERT(ne11 == 1); + + switch (dst->op) { + case GGML_OP_ADD: + { + switch (n_fuse) { + case 1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ROW_C4 ].pipeline; break; + case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_2].pipeline; break; + case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_3].pipeline; break; + case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_4].pipeline; break; + case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_5].pipeline; break; + case 6: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_6].pipeline; break; + case 7: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_7].pipeline; break; + case 8: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ROW_C4_FUSE_8].pipeline; break; + default: GGML_ABORT("fatal error"); + } + } break; + case GGML_OP_SUB: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUB_ROW_C4].pipeline; break; + case GGML_OP_MUL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_ROW_C4].pipeline; break; + case GGML_OP_DIV: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIV_ROW_C4].pipeline; break; + default: GGML_ABORT("fatal error"); + } + + bcast_row = true; + } else { + switch (dst->op) { + case GGML_OP_ADD: + { + switch (n_fuse) { + case 1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD ].pipeline; break; + case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_FUSE_2].pipeline; break; + case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_FUSE_3].pipeline; break; + case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_FUSE_4].pipeline; break; + case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_FUSE_5].pipeline; break; + case 6: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_FUSE_6].pipeline; break; + case 7: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_FUSE_7].pipeline; break; + case 8: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_FUSE_8].pipeline; break; + default: GGML_ABORT("fatal error"); + } + } break; + case GGML_OP_SUB: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUB].pipeline; break; + case GGML_OP_MUL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL].pipeline; break; + case GGML_OP_DIV: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIV].pipeline; break; + default: GGML_ABORT("fatal error"); + } + } + + if (n_fuse > 1) { + id_dst = ggml_metal_get_buffer(nodes[n_fuse - 1], &offs_dst); + } + [encoder setComputePipelineState:pipeline]; [encoder setBytes:&args length:sizeof(args) atIndex:0]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; - [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2]; + [encoder setBuffer:id_src1 offset:0 atIndex:2]; [encoder setBuffer:id_dst offset:offs_dst atIndex:3]; if (bcast_row) { @@ -2012,11 +2312,47 @@ static bool ggml_metal_encode_node( [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } else { - const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0); + int nth = 32; + + while (16*nth < ne0 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) { + nth *= 2; + } [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } } break; + case GGML_OP_ADD_ID: + { + GGML_ASSERT(src0t == GGML_TYPE_F32); + GGML_ASSERT(src1t == GGML_TYPE_F32); + GGML_ASSERT(src2t == GGML_TYPE_I32); + GGML_ASSERT(dstt == GGML_TYPE_F32); + + GGML_ASSERT(ggml_is_contiguous_rows(src0)); + + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ID].pipeline; + + ggml_metal_kargs_add_id args = { + /*.ne0 =*/ ne0, + /*.ne1 =*/ ne1, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb11 =*/ nb11, + /*.nb21 =*/ nb21, + + }; + + [encoder setComputePipelineState:pipeline]; + [encoder setBytes:&args length:sizeof(args) atIndex:0]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2]; + [encoder setBuffer:id_src2 offset:offs_src2 atIndex:3]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:4]; + + const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00); + + [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; + } break; case GGML_OP_REPEAT: { id pipeline; @@ -2137,12 +2473,13 @@ static bool ggml_metal_encode_node( /*.nb2 =*/ pnb2, /*.nb3 =*/ pnb3, /*.offs =*/ offs, + /*.o1 =*/ { offs_src1}, }; [encoder setComputePipelineState:pipeline]; [encoder setBytes:&args length:sizeof(args) atIndex:0]; [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; - [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2]; + [encoder setBuffer:id_src1 offset:0 atIndex:2]; [encoder setBuffer:id_dst offset:offs_dst atIndex:3]; const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00); @@ -2154,7 +2491,9 @@ static bool ggml_metal_encode_node( GGML_ASSERT(ggml_is_contiguous(src0)); float scale; - memcpy(&scale, dst->op_params, sizeof(scale)); + float bias; + memcpy(&scale, ((const int32_t *) dst->op_params) + 0, sizeof(float)); + memcpy(&bias, ((const int32_t *) dst->op_params) + 1, sizeof(float)); int64_t n = ggml_nelements(dst); @@ -2171,6 +2510,7 @@ static bool ggml_metal_encode_node( [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; [encoder setBytes:&scale length:sizeof(scale) atIndex:2]; + [encoder setBytes:&bias length:sizeof(bias) atIndex:3]; [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; @@ -2253,6 +2593,25 @@ static bool ggml_metal_encode_node( [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; + case GGML_UNARY_OP_GELU_ERF: + { + int64_t n = ggml_nelements(dst); + + id pipeline = nil; + + if (n % 4 == 0) { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU_ERF_4].pipeline; + n /= 4; + } else { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU_ERF].pipeline; + } + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + + [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; + } break; case GGML_UNARY_OP_GELU_QUICK: { int64_t n = ggml_nelements(dst); @@ -2315,12 +2674,153 @@ static bool ggml_metal_encode_node( [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; + case GGML_UNARY_OP_ABS: + { + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ABS].pipeline; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + + const int64_t n = ggml_nelements(dst); + + [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; + } break; + case GGML_UNARY_OP_SGN: + { + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SGN].pipeline; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + + const int64_t n = ggml_nelements(dst); + + [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; + } break; + case GGML_UNARY_OP_STEP: + { + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_STEP].pipeline; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + + const int64_t n = ggml_nelements(dst); + + [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; + } break; + case GGML_UNARY_OP_HARDSWISH: + { + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_HARDSWISH].pipeline; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + + const int64_t n = ggml_nelements(dst); + + [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; + } break; + case GGML_UNARY_OP_HARDSIGMOID: + { + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_HARDSIGMOID].pipeline; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + + const int64_t n = ggml_nelements(dst); + + [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; + } break; + case GGML_UNARY_OP_EXP: + { + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_EXP].pipeline; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + + const int64_t n = ggml_nelements(dst); + + [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; + } break; default: { GGML_LOG_WARN("%s: node %3d, op = %8s not implemented\n", __func__, idx, ggml_op_name(dst->op)); GGML_ABORT("fatal error"); } } break; + case GGML_OP_GLU: + { + GGML_ASSERT(ggml_is_contiguous_1(src0)); + + if (src1) { + GGML_ASSERT(ggml_are_same_shape(src0, src1)); + } + + id pipeline = nil; + + switch (ggml_get_glu_op(node)) { + case GGML_GLU_OP_REGLU: + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_REGLU].pipeline; + break; + case GGML_GLU_OP_GEGLU: + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GEGLU].pipeline; + break; + case GGML_GLU_OP_SWIGLU: + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SWIGLU].pipeline; + break; + case GGML_GLU_OP_SWIGLU_OAI: + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SWIGLU_OAI].pipeline; + break; + case GGML_GLU_OP_GEGLU_ERF: + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GEGLU_ERF].pipeline; + break; + case GGML_GLU_OP_GEGLU_QUICK: + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GEGLU_QUICK].pipeline; + break; + default: + GGML_ABORT("fatal error"); + } + + const int32_t swp = ggml_get_op_params_i32(dst, 1); + const float alpha = ggml_get_op_params_f32(dst, 2); + const float limit = ggml_get_op_params_f32(dst, 3); + + const int32_t i00 = swp ? ne0 : 0; + const int32_t i10 = swp ? 0 : ne0; + + ggml_metal_kargs_glu args = { + /*.ne00 =*/ ne00, + /*.nb01 =*/ nb01, + /*.ne10 =*/ src1 ? ne10 : ne00, + /*.nb11 =*/ src1 ? nb11 : nb01, + /*.ne0 =*/ ne0, + /*.nb1 =*/ nb1, + /*.i00 =*/ src1 ? 0 : i00, + /*.i10 =*/ src1 ? 0 : i10, + /*.alpha=*/ alpha, + /*.limit=*/ limit + }; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + if (src1) { + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; + } else { + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; + } + [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; + [encoder setBytes:&args length:sizeof(args) atIndex:3]; + + const int64_t nrows = ggml_nrows(src0); + + const int32_t nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00/2); + + [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; + } break; case GGML_OP_SQR: { GGML_ASSERT(ggml_is_contiguous(src0)); @@ -2401,6 +2901,7 @@ static bool ggml_metal_encode_node( nth *= 2; } + nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup); nth = MIN(nth, ne00); ggml_metal_kargs_sum_rows args = { @@ -2474,10 +2975,7 @@ static bool ggml_metal_encode_node( memcpy(&scale, ((const int32_t *) dst->op_params) + 0, sizeof(scale)); memcpy(&max_bias, ((const int32_t *) dst->op_params) + 1, sizeof(max_bias)); - const int64_t nrows_x = ggml_nrows(src0); - const int64_t nrows_y = src0->ne[1]; - - const uint32_t n_head = nrows_x/nrows_y; + const uint32_t n_head = src0->ne[2]; const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); @@ -2490,7 +2988,7 @@ static bool ggml_metal_encode_node( id h_src0 = h_src0 = ggml_metal_mem_pool_alloc(mem_pool, ggml_nbytes(src0)); if (!h_src0) { GGML_LOG_ERROR("%s: failed to allocate buffer from memory pool, size = %zu\n", __func__, ggml_nbytes(src0)); - return false; + return 0; } offs_src0 = 0; @@ -2537,6 +3035,18 @@ static bool ggml_metal_encode_node( /*.ne00 =*/ ne00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb03 =*/ nb03, + /*.ne11 =*/ ne11, + /*.ne12 =*/ ne12, + /*.ne13 =*/ ne13, + /*.nb11 =*/ nb11, + /*.nb12 =*/ nb12, + /*.nb13 =*/ nb13, + /*.nb1 =*/ nb1, + /*.nb2 =*/ nb2, + /*.nb3 =*/ nb3, /*.scale =*/ scale, /*.max_bias =*/ max_bias, /*.m0 =*/ m0, @@ -2551,12 +3061,17 @@ static bool ggml_metal_encode_node( } else { [encoder setBuffer:h_src0 offset:offs_src0 atIndex:1]; } - [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; - [encoder setBytes:&args length:sizeof(args) atIndex:3]; + if (id_src2) { + [encoder setBuffer:id_src2 offset:offs_src2 atIndex:2]; + } else { + [encoder setBuffer:h_src0 offset:offs_src0 atIndex:2]; + } + [encoder setBuffer:id_dst offset:offs_dst atIndex:3]; + [encoder setBytes:&args length:sizeof(args) atIndex:4]; [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0]; - [encoder dispatchThreadgroups:MTLSizeMake(ne01*ne02*ne03, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; + [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; case GGML_OP_DIAG_MASK_INF: { @@ -2630,71 +3145,92 @@ static bool ggml_metal_encode_node( struct ggml_tensor * src3 = node->src[3]; struct ggml_tensor * src4 = node->src[4]; struct ggml_tensor * src5 = node->src[5]; + struct ggml_tensor * src6 = node->src[6]; GGML_ASSERT(src3); GGML_ASSERT(src4); GGML_ASSERT(src5); + GGML_ASSERT(src6); size_t offs_src3 = 0; size_t offs_src4 = 0; size_t offs_src5 = 0; + size_t offs_src6 = 0; id id_src3 = src3 ? ggml_metal_get_buffer(src3, &offs_src3) : nil; id id_src4 = src4 ? ggml_metal_get_buffer(src4, &offs_src4) : nil; id id_src5 = src5 ? ggml_metal_get_buffer(src5, &offs_src5) : nil; + id id_src6 = src6 ? ggml_metal_get_buffer(src6, &offs_src6) : nil; - const int64_t ne30 = src3->ne[0]; GGML_UNUSED(ne30); + const int64_t ne30 = src3->ne[0]; const int64_t ne31 = src3->ne[1]; GGML_UNUSED(ne31); - const uint64_t nb30 = src3->nb[0]; + const uint64_t nb30 = src3->nb[0]; GGML_UNUSED(nb30); const uint64_t nb31 = src3->nb[1]; const int64_t ne40 = src4->ne[0]; GGML_UNUSED(ne40); - const int64_t ne41 = src4->ne[1]; GGML_UNUSED(ne41); + const int64_t ne41 = src4->ne[1]; const int64_t ne42 = src4->ne[2]; GGML_UNUSED(ne42); + const int64_t ne43 = src4->ne[3]; GGML_UNUSED(ne43); - const uint64_t nb40 = src4->nb[0]; + const uint64_t nb40 = src4->nb[0]; GGML_UNUSED(nb40); const uint64_t nb41 = src4->nb[1]; const uint64_t nb42 = src4->nb[2]; + const uint64_t nb43 = src4->nb[3]; const int64_t ne50 = src5->ne[0]; GGML_UNUSED(ne50); const int64_t ne51 = src5->ne[1]; GGML_UNUSED(ne51); const int64_t ne52 = src5->ne[2]; GGML_UNUSED(ne52); + const int64_t ne53 = src5->ne[3]; GGML_UNUSED(ne53); - const uint64_t nb50 = src5->nb[0]; + const uint64_t nb50 = src5->nb[0]; GGML_UNUSED(nb50); const uint64_t nb51 = src5->nb[1]; const uint64_t nb52 = src5->nb[2]; + const uint64_t nb53 = src5->nb[3]; + + const int64_t ne60 = src6->ne[0]; GGML_UNUSED(ne60); + + const uint64_t nb60 = src6->nb[0]; GGML_UNUSED(nb60); const int64_t d_state = ne00; const int64_t d_inner = ne01; - const int64_t n_seq_tokens = ne11; - const int64_t n_seqs = ne02; + const int64_t n_head = ne02; + const int64_t n_group = ne41; + const int64_t n_seq_tokens = ne12; + const int64_t n_seqs = ne13; - id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32].pipeline; + id pipeline = nil; + + if (ne30 == 1) { + // Mamba-2 + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32_GROUP].pipeline; + } else { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SSM_SCAN_F32].pipeline; + } ggml_metal_kargs_ssm_scan args = { - /*.d_state =*/ d_state, - /*.d_inner =*/ d_inner, + /*.d_state =*/ d_state, + /*.d_inner =*/ d_inner, + /*.n_head =*/ n_head, + /*.n_group =*/ n_group, /*.n_seq_tokens =*/ n_seq_tokens, - /*.n_seqs =*/ n_seqs, - /*.nb00 =*/ nb00, - /*.nb01 =*/ nb01, - /*.nb02 =*/ nb02, - /*.nb10 =*/ nb10, - /*.nb11 =*/ nb11, - /*.nb12 =*/ nb12, - /*.nb13 =*/ nb13, - /*.nb20 =*/ nb20, - /*.nb21 =*/ nb21, - /*.nb22 =*/ nb22, - /*.nb30 =*/ nb30, - /*.nb31 =*/ nb31, - /*.nb40 =*/ nb40, - /*.nb41 =*/ nb41, - /*.nb42 =*/ nb42, - /*.nb50 =*/ nb50, - /*.nb51 =*/ nb51, - /*.nb52 =*/ nb52, + /*.n_seqs =*/ n_seqs, + /*.s_off =*/ ggml_nelements(src1) * sizeof(float), + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb03 =*/ nb03, + /*.nb11 =*/ nb11, + /*.nb12 =*/ nb12, + /*.nb13 =*/ nb13, + /*.nb21 =*/ nb21, + /*.nb22 =*/ nb22, + /*.nb31 =*/ nb31, + /*.nb41 =*/ nb41, + /*.nb42 =*/ nb42, + /*.nb43 =*/ nb43, + /*.nb51 =*/ nb51, + /*.nb52 =*/ nb52, + /*.nb53 =*/ nb53, }; [encoder setComputePipelineState:pipeline]; @@ -2704,10 +3240,27 @@ static bool ggml_metal_encode_node( [encoder setBuffer:id_src3 offset:offs_src3 atIndex:3]; [encoder setBuffer:id_src4 offset:offs_src4 atIndex:4]; [encoder setBuffer:id_src5 offset:offs_src5 atIndex:5]; - [encoder setBuffer:id_dst offset:offs_dst atIndex:6]; - [encoder setBytes:&args length:sizeof(args) atIndex:7]; + [encoder setBuffer:id_src6 offset:offs_src6 atIndex:6]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:7]; + [encoder setBytes:&args length:sizeof(args) atIndex:8]; - [encoder dispatchThreadgroups:MTLSizeMake(d_inner, n_seqs, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; + // One shared memory bucket for each simd group in the threadgroup + // NOTE: Metal kernels require the buffer size to be multiple of 16 bytes + // https://developer.apple.com/documentation/metal/mtlcomputecommandencoder/1443142-setthreadgroupmemorylength + if (d_state >= 32) { + GGML_ASSERT((int64_t)(d_state / 32) <= 32); + const int64_t shmem_size = 32; + GGML_ASSERT(d_state <= (int64_t)pipeline.maxTotalThreadsPerThreadgroup); + [encoder setThreadgroupMemoryLength:(shmem_size)*sizeof(float) atIndex:0]; + } + + if (ne30 == 1) { + // Mamba-2 + [encoder dispatchThreadgroups:MTLSizeMake(d_inner, n_head, n_seqs) threadsPerThreadgroup:MTLSizeMake(d_state, 1, 1)]; + } else { + GGML_ASSERT(d_inner == 1); + [encoder dispatchThreadgroups:MTLSizeMake(n_head, n_seqs, 1) threadsPerThreadgroup:MTLSizeMake(d_state, 1, 1)]; + } } break; case GGML_OP_RWKV_WKV6: { @@ -2812,6 +3365,7 @@ static bool ggml_metal_encode_node( src0t == GGML_TYPE_Q5_0 || src0t == GGML_TYPE_Q5_1 || src0t == GGML_TYPE_Q8_0 || + src0t == GGML_TYPE_MXFP4 || src0t == GGML_TYPE_IQ4_NL || false) && (ne11 >= 2 && ne11 <= 8) ) || @@ -2904,6 +3458,14 @@ static bool ggml_metal_encode_node( case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_5].pipeline; break; default: GGML_ABORT("not implemented"); } break; + case GGML_TYPE_MXFP4: + switch (r1ptg) { + case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_2].pipeline; break; + case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_3].pipeline; break; + case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_4].pipeline; break; + case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_MXFP4_F32_R1_5].pipeline; break; + default: GGML_ABORT("not implemented"); + } break; case GGML_TYPE_Q4_K: switch (r1ptg) { case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_2].pipeline; break; @@ -3002,6 +3564,7 @@ static bool ggml_metal_encode_node( case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32 ].pipeline; break; case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32 ].pipeline; break; case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32 ].pipeline; break; + case GGML_TYPE_MXFP4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_MXFP4_F32 ].pipeline; break; case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32 ].pipeline; break; case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32 ].pipeline; break; case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32 ].pipeline; break; @@ -3016,7 +3579,6 @@ static bool ggml_metal_encode_node( case GGML_TYPE_IQ1_M: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32 ].pipeline; break; case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32 ].pipeline; break; case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32 ].pipeline; break; - case GGML_TYPE_MXFP4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_MXFP4_F32 ].pipeline; break; default: GGML_ABORT("MUL MAT-MAT not implemented"); } @@ -3062,14 +3624,23 @@ static bool ggml_metal_encode_node( nsg = 1; nr0 = 1; nr1 = 4; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32].pipeline; + if (ne00 == 4) { + nr0 = 32; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32_C4].pipeline; + } else { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32].pipeline; + } } break; case GGML_TYPE_F16: { nsg = 1; nr0 = 1; if (src1t == GGML_TYPE_F32) { - if (ne11 * ne12 < 4) { + if (ne00 == 4) { + nr0 = 32; + nr1 = 4; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_C4].pipeline; + } else if (ne11 * ne12 < 4) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW].pipeline; } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4].pipeline; @@ -3088,7 +3659,11 @@ static bool ggml_metal_encode_node( nsg = 1; nr0 = 1; if (src1t == GGML_TYPE_F32) { - if (ne11 * ne12 < 4) { + if (ne00 == 4) { + nr0 = 32; + nr1 = 4; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_C4].pipeline; + } else if (ne11 * ne12 < 4) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_1ROW].pipeline; } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) { pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_L4].pipeline; @@ -3132,6 +3707,13 @@ static bool ggml_metal_encode_node( nr0 = N_R0_Q8_0; pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32].pipeline; } break; + case GGML_TYPE_MXFP4: + { + nsg = N_SG_MXFP4; + nr0 = N_R0_MXFP4; + smem = 32*sizeof(float); + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_MXFP4_F32].pipeline; + } break; case GGML_TYPE_Q2_K: { nsg = N_SG_Q2_K; @@ -3222,12 +3804,6 @@ static bool ggml_metal_encode_node( smem = 32*sizeof(float); pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32].pipeline; } break; - case GGML_TYPE_MXFP4: - { - nsg = N_SG_MXFP4; - nr0 = N_R0_MXFP4; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_MXFP4_F32].pipeline; - } break; default: { GGML_LOG_ERROR("Asserting on type %d\n", (int)src0t); @@ -3271,8 +3847,6 @@ static bool ggml_metal_encode_node( case GGML_OP_MUL_MAT_ID: { // src2 = ids - const enum ggml_type src2t = src2->type; GGML_UNUSED(src2t); - GGML_ASSERT(src2t == GGML_TYPE_I32); GGML_ASSERT(!ggml_is_transposed(src0)); @@ -3321,7 +3895,7 @@ static bool ggml_metal_encode_node( id h_src1 = ggml_metal_mem_pool_alloc(mem_pool, s_src1); if (!h_src1) { GGML_LOG_ERROR("%s: failed to allocate buffer from memory pool, size = %zu\n", __func__, s_src1); - return false; + return 0; } const int64_t neh0 = ne0; @@ -3337,7 +3911,7 @@ static bool ggml_metal_encode_node( id h_dst = ggml_metal_mem_pool_alloc(mem_pool, s_dst); if (!h_dst) { GGML_LOG_ERROR("%s: failed to allocate buffer from memory pool, size = %zu\n", __func__, s_dst); - return false; + return 0; } // tokens per expert @@ -3345,7 +3919,7 @@ static bool ggml_metal_encode_node( id h_tpe = ggml_metal_mem_pool_alloc(mem_pool, s_tpe); if (!h_tpe) { GGML_LOG_ERROR("%s: failed to allocate buffer from memory pool, size = %zu\n", __func__, s_tpe); - return false; + return 0; } // id map @@ -3354,7 +3928,7 @@ static bool ggml_metal_encode_node( id h_ids = ggml_metal_mem_pool_alloc(mem_pool, s_ids); if (!h_ids) { GGML_LOG_ERROR("%s: failed to allocate buffer from memory pool, size = %zu\n", __func__, s_ids); - return false; + return 0; } { @@ -3398,6 +3972,7 @@ static bool ggml_metal_encode_node( case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F16 ].pipeline; break; case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F16 ].pipeline; break; case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F16 ].pipeline; break; + case GGML_TYPE_MXFP4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MXFP4_F16 ].pipeline; break; case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F16 ].pipeline; break; case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F16 ].pipeline; break; case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F16 ].pipeline; break; @@ -3412,7 +3987,6 @@ static bool ggml_metal_encode_node( case GGML_TYPE_IQ1_M: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F16 ].pipeline; break; case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F16 ].pipeline; break; case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F16 ].pipeline; break; - case GGML_TYPE_MXFP4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_MXFP4_F16 ].pipeline; break; default: GGML_ABORT("MUL_MAT_ID not implemented"); } @@ -3534,6 +4108,13 @@ static bool ggml_metal_encode_node( nr0 = N_R0_Q8_0; pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32].pipeline; } break; + case GGML_TYPE_MXFP4: + { + nsg = N_SG_MXFP4; + nr0 = N_R0_MXFP4; + smem = 32*sizeof(float); + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_MXFP4_F32].pipeline; + } break; case GGML_TYPE_Q2_K: { nsg = N_SG_Q2_K; @@ -3624,12 +4205,6 @@ static bool ggml_metal_encode_node( smem = 32*sizeof(float); pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32].pipeline; } break; - case GGML_TYPE_MXFP4: - { - nsg = N_SG_MXFP4; - nr0 = N_R0_MXFP4; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_MXFP4_F32].pipeline; - } break; default: { GGML_LOG_ERROR("Asserting on type %d\n", (int)src2t); @@ -3692,6 +4267,7 @@ static bool ggml_metal_encode_node( case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0 ].pipeline; break; case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1 ].pipeline; break; case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0 ].pipeline; break; + case GGML_TYPE_MXFP4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_MXFP4 ].pipeline; break; case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K ].pipeline; break; case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K ].pipeline; break; case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K ].pipeline; break; @@ -3722,22 +4298,166 @@ static bool ggml_metal_encode_node( }; [encoder setComputePipelineState:pipeline]; - [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; - [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; - [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; - [encoder setBytes:&args length:sizeof(args) atIndex:3]; + [encoder setBytes:&args length:sizeof(args) atIndex:0]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:3]; [encoder dispatchThreadgroups:MTLSizeMake(ne10, ne11, 1) threadsPerThreadgroup:MTLSizeMake(32, 1, 1)]; } break; + case GGML_OP_SET_ROWS: + { + id pipeline = nil; + + switch (dst->type) { + case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_F32 ].pipeline; break; + case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_F16 ].pipeline; break; + case GGML_TYPE_BF16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_BF16 ].pipeline; break; + case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q8_0 ].pipeline; break; + case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_0 ].pipeline; break; + case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q4_1 ].pipeline; break; + case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_0 ].pipeline; break; + case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_Q5_1 ].pipeline; break; + case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_ROWS_IQ4_NL].pipeline; break; + default: GGML_ABORT("not implemented"); + } + + const int32_t nk0 = ne0/ggml_blck_size(dst->type); + + int nth = 32; // SIMD width + + while (nth < nk0 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) { + nth *= 2; + } + + int nrptg = 1; + if (nth > nk0) { + nrptg = (nth + nk0 - 1)/nk0; + nth = nk0; + + if (nrptg*nth > (int) pipeline.maxTotalThreadsPerThreadgroup) { + nrptg--; + } + } + + nth = MIN(nth, nk0); + + ggml_metal_kargs_set_rows args = { + /*.nk0 =*/ nk0, + /*.ne01 =*/ ne01, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb03 =*/ nb03, + /*.ne11 =*/ ne11, + /*.ne12 =*/ ne12, + /*.nb10 =*/ nb10, + /*.nb11 =*/ nb11, + /*.nb12 =*/ nb12, + /*.nb1 =*/ nb1, + /*.nb2 =*/ nb2, + /*.nb3 =*/ nb3, + }; + + [encoder setComputePipelineState:pipeline]; + [encoder setBytes:&args length:sizeof(args) atIndex:0]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:3]; + + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nrptg - 1)/nrptg, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, nrptg, 1)]; + } break; case GGML_OP_RMS_NORM: { GGML_ASSERT(ne00 % 4 == 0); - GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_rows(src0)); float eps; memcpy(&eps, dst->op_params, sizeof(float)); - id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RMS_NORM].pipeline; + ggml_metal_kargs_rms_norm args = { + /*.ne00 =*/ ne00, + /*.ne00_4 =*/ ne00/4, + /*.nb1 =*/ nb1, + /*.nb2 =*/ nb2, + /*.nb3 =*/ nb3, + /*.eps =*/ eps, + /*.nef1 =*/ { ne01 }, + /*.nef2 =*/ { ne02 }, + /*.nef3 =*/ { ne03 }, + /*.nbf1 =*/ { nb01 }, + /*.nbf2 =*/ { nb02 }, + /*.nbf3 =*/ { nb03 }, + }; + + size_t offs_fuse[2] = { 0, 0 }; + id id_fuse[2] = { id_src0, id_src0 }; + + // d[0] = rms_norm(a) + // d[1] = mul(d[0], b) + // d[2] = add(d[1], c) + if (ctx_dev->use_fusion) { + ops[0] = GGML_OP_RMS_NORM; + ops[1] = GGML_OP_MUL; + ops[2] = GGML_OP_ADD; + + for (n_fuse = 0; n_fuse <= 1 && idx + n_fuse + 1 < idx_end; ++n_fuse) { + if (!ggml_can_fuse(gf, idx + n_fuse, ops + n_fuse, 2)) { + break; + } + + if (nodes[n_fuse] != nodes[n_fuse + 1]->src[0]) { + break; + } + + if (nodes[n_fuse + 1]->src[1]->ne[0] != node->ne[0]) { + break; + } + + if (!ggml_is_contiguous_rows(nodes[n_fuse + 1]->src[1])) { + break; + } + + if (nodes[n_fuse + 1]->type != GGML_TYPE_F32) { + break; + } + + ctx_dev->fuse_cnt[nodes[n_fuse + 1]->op]++; + + id_fuse[n_fuse] = ggml_metal_get_buffer(nodes[n_fuse + 1]->src[1], &offs_fuse[n_fuse]); + + args.nef1[n_fuse + 1] = nodes[n_fuse + 1]->src[1]->ne[1]; + args.nef2[n_fuse + 1] = nodes[n_fuse + 1]->src[1]->ne[2]; + args.nef3[n_fuse + 1] = nodes[n_fuse + 1]->src[1]->ne[3]; + + args.nbf1[n_fuse + 1] = nodes[n_fuse + 1]->src[1]->nb[1]; + args.nbf2[n_fuse + 1] = nodes[n_fuse + 1]->src[1]->nb[2]; + args.nbf3[n_fuse + 1] = nodes[n_fuse + 1]->src[1]->nb[3]; + } + + ++n_fuse; + + if (ctx_dev->debug_fusion > 1 && n_fuse > 1) { + if (n_fuse == 2) { + GGML_LOG_DEBUG("%s: fuse: RMS_NORM + MUL\n", __func__); + } + if (n_fuse == 3) { + GGML_LOG_DEBUG("%s: fuse: RMS_NORM + MUL + ADD\n", __func__); + } + } + } + + if (n_fuse > 1) { + id_dst = ggml_metal_get_buffer(nodes[n_fuse - 1], &offs_dst); + } + + id pipeline; + + switch (n_fuse) { + case 1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RMS_NORM ].pipeline; break; + case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RMS_NORM_MUL ].pipeline; break; + case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RMS_NORM_MUL_ADD].pipeline; break; + default: GGML_ABORT("unsupported n_fuse = %d\n", n_fuse); + } int nth = 32; // SIMD width @@ -3745,25 +4465,19 @@ static bool ggml_metal_encode_node( nth *= 2; } + nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup); nth = MIN(nth, ne00/4); - ggml_metal_kargs_rms_norm args = { - /*.ne00 =*/ ne00, - /*.ne00_4 =*/ ne00/4, - /*.nb01 =*/ nb01, - /*.eps =*/ eps, - }; - [encoder setComputePipelineState:pipeline]; - [encoder setBytes:&args length:sizeof(args) atIndex:0]; - [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; - [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; + [encoder setBytes:&args length:sizeof(args) atIndex:0]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; + [encoder setBuffer:id_fuse[0] offset:offs_fuse[0] atIndex:2]; + [encoder setBuffer:id_fuse[1] offset:offs_fuse[1] atIndex:3]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:4]; [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0]; - const int64_t nrows = ggml_nrows(src0); - - [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; + [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; case GGML_OP_L2_NORM: { @@ -3781,6 +4495,7 @@ static bool ggml_metal_encode_node( nth *= 2; } + nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup); nth = MIN(nth, ne00/4); ggml_metal_kargs_l2_norm args = { @@ -3853,6 +4568,7 @@ static bool ggml_metal_encode_node( nth *= 2; } + nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup); nth = MIN(nth, ne00/4); ggml_metal_kargs_norm args = { @@ -4362,11 +5078,14 @@ static bool ggml_metal_encode_node( GGML_ASSERT(ne11 == ne21); GGML_ASSERT(ne12 == ne22); - struct ggml_tensor * src3 = node->src[3]; + struct ggml_tensor * src3 = node->src[3]; // mask + struct ggml_tensor * src4 = node->src[4]; // sinks size_t offs_src3 = 0; + size_t offs_src4 = 0; id id_src3 = src3 ? ggml_metal_get_buffer(src3, &offs_src3) : nil; + id id_src4 = src4 ? ggml_metal_get_buffer(src4, &offs_src4) : nil; GGML_ASSERT(!src3 || src3->type == GGML_TYPE_F16); GGML_ASSERT(!src3 || src3->ne[1] >= GGML_PAD(src0->ne[1], 8) && @@ -4382,8 +5101,6 @@ static bool ggml_metal_encode_node( const uint64_t nb32 = src3 ? src3->nb[2] : 0; GGML_UNUSED(nb32); const uint64_t nb33 = src3 ? src3->nb[3] : 0; GGML_UNUSED(nb33); - const enum ggml_type src2t = src2 ? src2->type : GGML_TYPE_COUNT; GGML_UNUSED(src2t); - float scale; float max_bias; float logit_softcap; @@ -4408,7 +5125,7 @@ static bool ggml_metal_encode_node( // TODO: add vec kernels for (ne00%64 == 0) and maybe also for (ne00%32 == 0) // for now avoiding mainly to keep the number of templates/kernels a bit lower // these are now trivial to add after: https://github.com/ggml-org/llama.cpp/pull/12612 - if (ne01 >= 4 || (ne00%128 != 0 && ne00 != 96 && ne00 != 192 && ne00 != 576)) { + if (ne01 >= 20 || (ne00%128 != 0 && ne00 != 64 && ne00 != 96 && ne00 != 192 && ne00 != 576)) { switch (src1->type) { case GGML_TYPE_F16: { @@ -4589,6 +5306,24 @@ static bool ggml_metal_encode_node( use_vec_kernel = true; switch (ne00) { + case 64: + { + switch (src1->type) { + case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H64].pipeline; break; + case GGML_TYPE_BF16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_BF16_H64].pipeline; break; + case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_0_H64].pipeline; break; + case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q4_1_H64].pipeline; break; + case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H64].pipeline; break; + case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H64].pipeline; break; + case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H64].pipeline; break; + default: + { + GGML_LOG_ERROR("unsupported type: %d\n", src1->type); + GGML_LOG_ERROR("add template specialization for this type\n"); + GGML_ABORT("add template specialization for this type"); + } + } + } break; case 96: { switch (src1->type) { @@ -4728,7 +5463,11 @@ static bool ggml_metal_encode_node( /*.nb21 =*/ nb21, /*.nb22 =*/ nb22, /*.nb23 =*/ nb23, + /*.ne32 =*/ ne32, + /*.ne33 =*/ ne33, /*.nb31 =*/ nb31, + /*.nb32 =*/ nb32, + /*.nb33 =*/ nb33, /*.ne1 =*/ ne1, /*.ne2 =*/ ne2, /*.scale =*/ scale, @@ -4749,7 +5488,12 @@ static bool ggml_metal_encode_node( } else { [encoder setBuffer:id_src0 offset:offs_src0 atIndex:4]; } - [encoder setBuffer:id_dst offset:offs_dst atIndex:5]; + if (id_src4) { + [encoder setBuffer:id_src4 offset:offs_src4 atIndex:5]; + } else { + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:5]; + } + [encoder setBuffer:id_dst offset:offs_dst atIndex:6]; if (!use_vec_kernel) { // half8x8 kernel @@ -4760,6 +5504,8 @@ static bool ggml_metal_encode_node( GGML_ASSERT(nqptg % 8 == 0); GGML_ASSERT(ncpsg % 32 == 0); + const int is_q = ggml_is_quantized(src1->type) ? 1 : 0; + // 2*(2*ncpsg + nqptg)*(nsg) // ncpsg soft_max values + ncpsg mask values + a diagonal scaling matrix (in float) // @@ -4767,7 +5513,7 @@ static bool ggml_metal_encode_node( // the shared memory needed for the simdgroups to load the KV cache // each thread loads (dequantizes) 16 head elements, there are 32 threads in th SG // -#define FATTN_SMEM(nsg) (GGML_PAD((nqptg*(ne00 + 2*(2*ncpsg + nqptg)*(nsg)) + 16*32*(nsg))*(sizeof(float)/2), 16)) +#define FATTN_SMEM(nsg) (GGML_PAD((nqptg*(2*ne00 + 2*(2*ncpsg + nqptg)*(nsg)) + is_q*(16*32*(nsg)))*(sizeof(float)/2), 16)) int64_t nsgmax = 2; @@ -4804,9 +5550,9 @@ static bool ggml_metal_encode_node( // and store the soft_max values and the mask // // ne00*(nsg) - // each simdgroup has a full f16 head vector in shared mem to accumulate results + // each simdgroup has a full f32 head vector in shared mem to accumulate results // -#define FATTN_SMEM(nsg) (GGML_PAD((nqptg*(GGML_PAD(ne00, 128) + 4*ncpsg*(nsg)) + ne20*(nsg))*(sizeof(float)/2), 16)) +#define FATTN_SMEM(nsg) (GGML_PAD((nqptg*(GGML_PAD(ne00, 128) + 4*ncpsg*(nsg)) + 2*ne20*(nsg))*(sizeof(float)/2), 16)) int64_t nsgmax = 2; while (true) { @@ -4919,8 +5665,39 @@ static bool ggml_metal_encode_node( default: GGML_ABORT("not implemented"); } + GGML_ASSERT(ne00 % ggml_blck_size(src0->type) == 0); + + // TODO: support + //const int32_t nk00 = ne00/ggml_blck_size(dst->type); + const int32_t nk00 = ne00; + + int nth = 32; // SIMD width + + while (nth < nk00 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) { + nth *= 2; + } + + nth = MIN(nth, (int) pipeline.maxTotalThreadsPerThreadgroup); + + // when rows are small, we can batch them together in a single threadgroup + int nrptg = 1; + + // TODO: relax this constraint in the future + if (ggml_blck_size(src0->type) == 1 && ggml_blck_size(dst->type) == 1) { + if (nth > nk00) { + nrptg = (nth + nk00 - 1)/nk00; + nth = nk00; + + if (nrptg*nth > (int) pipeline.maxTotalThreadsPerThreadgroup) { + nrptg--; + } + } + } + + nth = MIN(nth, nk00); + ggml_metal_kargs_cpy args = { - /*.ne00 =*/ ne00, + /*.ne00 =*/ nk00, /*.ne01 =*/ ne01, /*.ne02 =*/ ne02, /*.ne03 =*/ ne03, @@ -4943,11 +5720,7 @@ static bool ggml_metal_encode_node( [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; - GGML_ASSERT(ne00 % ggml_blck_size(src0->type) == 0); - int nth = MIN(1024, ne00/ggml_blck_size(src0->type)); - - [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; - + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + nrptg - 1)/nrptg, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, nrptg, 1)]; } break; case GGML_OP_SET: { @@ -5105,7 +5878,7 @@ static bool ggml_metal_encode_node( } } - return true; + return n_fuse; } static enum ggml_status ggml_metal_graph_compute( @@ -5253,7 +6026,6 @@ static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) } ggml_backend_metal_buffer_rset_free(ctx); - ggml_backend_metal_device_rel(buffer->buft->device->context); if (ctx->owned) { #if TARGET_OS_OSX @@ -5363,7 +6135,10 @@ static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_ba } struct ggml_backend_metal_device_context * ctx_dev = (struct ggml_backend_metal_device_context *)buft->device->context; - id device = ggml_backend_metal_device_acq(ctx_dev); + + GGML_ASSERT(ctx_dev->mtl_device != nil); + + id device = ctx_dev->mtl_device; ctx->all_data = ggml_metal_host_malloc(size_aligned); ctx->all_size = size_aligned; @@ -5386,14 +6161,12 @@ static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_ba if (size_aligned > 0 && (ctx->all_data == NULL || ctx->buffers[0].metal == nil)) { GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0); free(ctx); - ggml_backend_metal_device_rel(ctx_dev); return NULL; } if (!ggml_backend_metal_buffer_rset_init(ctx, ctx_dev, device)) { GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__); free(ctx); - ggml_backend_metal_device_rel(ctx_dev); return NULL; } @@ -5404,17 +6177,14 @@ static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_ba static size_t ggml_backend_metal_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { return 32; + GGML_UNUSED(buft); } static size_t ggml_backend_metal_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) { - id device = ggml_backend_metal_device_acq(buft->device->context); - const size_t max_size = device.maxBufferLength; - ggml_backend_metal_device_rel(buft->device->context); + const size_t max_size = ((struct ggml_backend_metal_device_context *)buft->device->context)->max_size; return max_size; - - GGML_UNUSED(buft); } static bool ggml_backend_metal_buffer_type_is_host(ggml_backend_buffer_type_t buft) { @@ -5487,7 +6257,10 @@ ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t siz } struct ggml_backend_metal_device_context * ctx_dev = &g_ggml_ctx_dev_main; - id device = ggml_backend_metal_device_acq(ctx_dev); + + GGML_ASSERT(ctx_dev->mtl_device != nil); + + id device = ctx_dev->mtl_device; // the buffer fits into the max buffer size allowed by the device if (size_aligned <= device.maxBufferLength) { @@ -5543,7 +6316,6 @@ ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t siz if (!ggml_backend_metal_buffer_rset_init(ctx, ctx_dev, device)) { GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__); free(ctx); - ggml_backend_metal_device_rel(ctx_dev); return NULL; } @@ -5559,10 +6331,8 @@ static const char * ggml_backend_metal_name(ggml_backend_t backend) { } static void ggml_backend_metal_free(ggml_backend_t backend) { - struct ggml_backend_metal_context * ctx = backend->context; - struct ggml_backend_metal_device_context * ctx_dev = backend->device->context; + struct ggml_backend_metal_context * ctx = backend->context; - ggml_backend_metal_device_rel(ctx_dev); ggml_metal_free(ctx); free(backend); @@ -5615,20 +6385,26 @@ static void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) { struct ggml_metal_mem_pool * mem_pool = ctx->cmd_bufs[cb_idx].mem_pool; ggml_metal_mem_pool_reset(mem_pool); - for (int idx = node_start; idx < node_end; ++idx) { + for (int idx = node_start; idx < node_end;) { if (should_capture) { [encoder pushDebugGroup:[NSString stringWithCString:ggml_op_desc(ggml_graph_node(ctx->gf, idx)) encoding:NSUTF8StringEncoding]]; } - const bool res = ggml_metal_encode_node(backend, idx, encoder, mem_pool); + const int res = ggml_metal_encode_node(backend, idx, node_end, encoder, mem_pool); + if (idx + res > node_end) { + GGML_ABORT("fusion error: nodes spanning multiple encoders have been fused. this indicates a bug in the fusion logic %s", + "https://github.com/ggml-org/llama.cpp/pull/14849"); + } if (should_capture) { [encoder popDebugGroup]; } - if (!res) { + if (res == 0) { break; } + + idx += res; } [encoder endEncoding]; @@ -5702,6 +6478,8 @@ bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family) { struct ggml_backend_metal_device_context * ctx_dev = backend->device->context; + GGML_ASSERT(ctx_dev->mtl_device != nil); + return [ctx_dev->mtl_device supportsFamily:(MTLGPUFamilyApple1 + family - 1)]; } @@ -5721,10 +6499,7 @@ static const char * ggml_backend_metal_device_get_name(ggml_backend_dev_t dev) { } static const char * ggml_backend_metal_device_get_description(ggml_backend_dev_t dev) { - // acq/rel just to populate ctx->name in case it hasn't been done yet struct ggml_backend_metal_device_context * ctx_dev = (struct ggml_backend_metal_device_context *)dev->context; - ggml_backend_metal_device_acq(ctx_dev); - ggml_backend_metal_device_rel(ctx_dev); return ctx_dev->name; } @@ -5732,12 +6507,10 @@ static const char * ggml_backend_metal_device_get_description(ggml_backend_dev_t static void ggml_backend_metal_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { if (@available(macOS 10.12, iOS 16.0, *)) { struct ggml_backend_metal_device_context * ctx_dev = (struct ggml_backend_metal_device_context *)dev->context; - id device = ggml_backend_metal_device_acq(ctx_dev); + id device = ctx_dev->mtl_device; *total = device.recommendedMaxWorkingSetSize; *free = *total - device.currentAllocatedSize; - - ggml_backend_metal_device_rel(ctx_dev); } else { *free = 1; *total = 1; @@ -5816,7 +6589,10 @@ static ggml_backend_buffer_t ggml_backend_metal_device_buffer_from_ptr(ggml_back } struct ggml_backend_metal_device_context * ctx_dev = (struct ggml_backend_metal_device_context *)dev->context; - id device = ggml_backend_metal_device_acq(ctx_dev); + + GGML_ASSERT(ctx_dev->mtl_device != nil); + + id device = ctx_dev->mtl_device; // the buffer fits into the max buffer size allowed by the device if (size_aligned <= device.maxBufferLength) { @@ -5872,7 +6648,6 @@ static ggml_backend_buffer_t ggml_backend_metal_device_buffer_from_ptr(ggml_back if (!ggml_backend_metal_buffer_rset_init(ctx, ctx_dev, device)) { GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__); free(ctx); - ggml_backend_metal_device_rel(ctx_dev); return NULL; } @@ -5886,8 +6661,9 @@ static bool ggml_backend_metal_device_supports_op(ggml_backend_dev_t dev, const } static bool ggml_backend_metal_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { - return buft->iface.get_name == ggml_backend_metal_buffer_type_get_name || - buft->iface.get_name == ggml_backend_metal_buffer_from_ptr_type_get_name; + return + buft->iface.get_name == ggml_backend_metal_buffer_type_get_name || + buft->iface.get_name == ggml_backend_metal_buffer_from_ptr_type_get_name; GGML_UNUSED(dev); } @@ -5972,8 +6748,19 @@ static struct ggml_backend_reg_i ggml_backend_metal_reg_i = { /* .get_proc_address = */ ggml_backend_metal_get_proc_address, }; +// called upon program exit +static void ggml_metal_cleanup(void) { + ggml_backend_metal_device_rel(&g_ggml_ctx_dev_main); +} + +// TODO: make thread-safe ggml_backend_reg_t ggml_backend_metal_reg(void) { - // TODO: make this thread-safe somehow? + ggml_backend_metal_device_acq(&g_ggml_ctx_dev_main); + + // register cleanup callback + // TODO: not ideal, but not sure if there is a better way to do this in Objective-C + atexit(ggml_metal_cleanup); + { g_ggml_backend_metal_reg = (struct ggml_backend_reg) { /* .api_version = */ GGML_BACKEND_API_VERSION, diff --git a/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.metal b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.metal index 69fa17de3..b35a3bbdc 100644 --- a/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.metal +++ b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.metal @@ -35,6 +35,33 @@ constexpr constant static float kvalues_iq4nl_f[16] = { -127.f, -104.f, -83.f, -65.f, -49.f, -35.f, -22.f, -10.f, 1.f, 13.f, 25.f, 38.f, 53.f, 69.f, 89.f, 113.f }; +constexpr constant static float kvalues_mxfp4_f[16] = { + 0, .5f, 1.f, 1.5f, 2.f, 3.f, 4.f, 6.f, -0, -.5f, -1.f, -1.5f, -2.f, -3.f, -4.f, -6.f +}; + +static inline int best_index_int8(int n, constant float * val, float x) { + if (x <= val[0]) return 0; + if (x >= val[n-1]) return n-1; + int ml = 0, mu = n-1; + while (mu-ml > 1) { + int mav = (ml+mu)/2; + if (x < val[mav]) mu = mav; else ml = mav; + } + return x - val[mu-1] < val[mu] - x ? mu-1 : mu; +} + +static inline float e8m0_to_fp32(uint8_t x) { + uint32_t bits; + + if (x == 0) { + bits = 0x00400000; + } else { + bits = (uint32_t) x << 23; + } + + return as_type(bits); +} + // NOTE: this is not dequantizing - we are simply fitting the template template void dequantize_f32(device const float4x4 * src, short il, thread type4x4 & reg) { @@ -97,6 +124,199 @@ void dequantize_q4_0_t4(device const block_q4_0 * xb, short il, thread type4 & r } } +void quantize_q4_0(device const float * src, device block_q4_0 & dst) { +#pragma METAL fp math_mode(safe) + float amax = 0.0f; // absolute max + float max = 0.0f; + + for (int j = 0; j < QK4_0; j++) { + const float v = src[j]; + if (amax < fabs(v)) { + amax = fabs(v); + max = v; + } + } + + const float d = max / -8; + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + + for (int j = 0; j < QK4_0/2; ++j) { + const float x0 = src[0 + j]*id; + const float x1 = src[QK4_0/2 + j]*id; + + const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f)); + const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f)); + + dst.qs[j] = xi0; + dst.qs[j] |= xi1 << 4; + } +} + +void quantize_q4_1(device const float * src, device block_q4_1 & dst) { +#pragma METAL fp math_mode(safe) + float min = FLT_MAX; + float max = -FLT_MAX; + + for (int j = 0; j < QK4_1; j++) { + const float v = src[j]; + if (min > v) min = v; + if (max < v) max = v; + } + + const float d = (max - min) / ((1 << 4) - 1); + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + dst.m = min; + + for (int j = 0; j < QK4_1/2; ++j) { + const float x0 = (src[0 + j] - min)*id; + const float x1 = (src[QK4_1/2 + j] - min)*id; + + const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f)); + const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f)); + + dst.qs[j] = xi0; + dst.qs[j] |= xi1 << 4; + } +} + +void quantize_q5_0(device const float * src, device block_q5_0 & dst) { +#pragma METAL fp math_mode(safe) + float amax = 0.0f; // absolute max + float max = 0.0f; + + for (int j = 0; j < QK5_0; j++) { + const float v = src[j]; + if (amax < fabs(v)) { + amax = fabs(v); + max = v; + } + } + + const float d = max / -16; + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + + uint32_t qh = 0; + for (int j = 0; j < QK5_0/2; ++j) { + const float x0 = src[0 + j]*id; + const float x1 = src[QK5_0/2 + j]*id; + + const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f)); + const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f)); + + dst.qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); + qh |= ((xi0 & 0x10u) >> 4) << (j + 0); + qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); + } + + thread const uint8_t * qh8 = (thread const uint8_t *)&qh; + + for (int j = 0; j < 4; ++j) { + dst.qh[j] = qh8[j]; + } +} + +void quantize_q5_1(device const float * src, device block_q5_1 & dst) { +#pragma METAL fp math_mode(safe) + float max = src[0]; + float min = src[0]; + + for (int j = 1; j < QK5_1; j++) { + const float v = src[j]; + min = v < min ? v : min; + max = v > max ? v : max; + } + + const float d = (max - min) / 31; + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + dst.m = min; + + uint32_t qh = 0; + for (int j = 0; j < QK5_1/2; ++j) { + const float x0 = (src[0 + j] - min)*id; + const float x1 = (src[QK5_1/2 + j] - min)*id; + + const uint8_t xi0 = (uint8_t)(x0 + 0.5f); + const uint8_t xi1 = (uint8_t)(x1 + 0.5f); + + dst.qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); + qh |= ((xi0 & 0x10u) >> 4) << (j + 0); + qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1/2); + } + + thread const uint8_t * qh8 = (thread const uint8_t *)&qh; + + for (int j = 0; j < 4; ++j) { + dst.qh[j] = qh8[j]; + } +} + +void quantize_q8_0(device const float * src, device block_q8_0 & dst) { +#pragma METAL fp math_mode(safe) + float amax = 0.0f; // absolute max + + for (int j = 0; j < QK8_0; j++) { + const float v = src[j]; + amax = MAX(amax, fabs(v)); + } + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + + dst.d = d; + + for (int j = 0; j < QK8_0; ++j) { + const float x0 = src[j]*id; + + dst.qs[j] = round(x0); + } +} + +void quantize_iq4_nl(device const float * src, device block_iq4_nl & dst) { +#pragma METAL fp math_mode(safe) + float amax = 0.0f; // absolute max + float max = 0.0f; + + for (int j = 0; j < QK4_NL; j++) { + const float v = src[j]; + if (amax < fabs(v)) { + amax = fabs(v); + max = v; + } + } + + const float d = max / kvalues_iq4nl_f[0]; + const float id = d ? 1.0f/d : 0.0f; + + float sumqx = 0, sumq2 = 0; + for (int j = 0; j < QK4_NL/2; ++j) { + const float x0 = src[0 + j]*id; + const float x1 = src[QK4_NL/2 + j]*id; + + const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl_f, x0); + const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl_f, x1); + + dst.qs[j] = xi0 | (xi1 << 4); + + const float v0 = kvalues_iq4nl_f[xi0]; + const float v1 = kvalues_iq4nl_f[xi1]; + const float w0 = src[0 + j]*src[0 + j]; + const float w1 = src[QK4_NL/2 + j]*src[QK4_NL/2 + j]; + sumqx += w0*v0*src[j] + w1*v1*src[QK4_NL/2 + j]; + sumq2 += w0*v0*v0 + w1*v1*v1; + + } + + dst.d = sumq2 > 0 ? sumqx/sumq2 : d; +} + template void dequantize_q4_1(device const block_q4_1 * xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 2); @@ -279,6 +499,36 @@ void dequantize_q8_0_t4(device const block_q8_0 *xb, short il, thread type4 & re } } +template +void dequantize_mxfp4(device const block_mxfp4 * xb, short il, thread type4x4 & reg) { + device const uint8_t * q2 = (device const uint8_t *)xb->qs; + + const float d = e8m0_to_fp32(xb->e); + const uint8_t shr = il >= 1 ? 4 : 0; + + for (int i = 0; i < 4; ++i) { + reg[i][0] = d * kvalues_mxfp4_f[(q2[4*i + 0] >> shr) & 0x0F]; + reg[i][1] = d * kvalues_mxfp4_f[(q2[4*i + 1] >> shr) & 0x0F]; + reg[i][2] = d * kvalues_mxfp4_f[(q2[4*i + 2] >> shr) & 0x0F]; + reg[i][3] = d * kvalues_mxfp4_f[(q2[4*i + 3] >> shr) & 0x0F]; + } +} + +template +void dequantize_mxfp4_t4(device const block_mxfp4 * xb, short il, thread type4 & reg) { + device const uint8_t * q2 = (device const uint8_t *)xb->qs; + + const float d = e8m0_to_fp32(xb->e); + const short il4 = il%4; + + const uint8_t shr = il >= 4 ? 4 : 0; + + reg[0] = d * kvalues_mxfp4_f[(q2[4*il4 + 0] >> shr) & 0x0F]; + reg[1] = d * kvalues_mxfp4_f[(q2[4*il4 + 1] >> shr) & 0x0F]; + reg[2] = d * kvalues_mxfp4_f[(q2[4*il4 + 2] >> shr) & 0x0F]; + reg[3] = d * kvalues_mxfp4_f[(q2[4*il4 + 3] >> shr) & 0x0F]; +} + template void dequantize_q2_K(device const block_q2_K *xb, short il, thread type4x4 & reg) { const float d = xb->d; @@ -628,7 +878,8 @@ enum ggml_sort_order { // general-purpose kernel for addition, subtraction, multiplication and division of two tensors // pros: works for non-contiguous tensors, supports broadcast across all dims // cons: not very efficient -kernel void kernel_add( +template +kernel void kernel_add_fuse_impl( constant ggml_metal_kargs_bin & args, device const char * src0, device const char * src1, @@ -644,16 +895,39 @@ kernel void kernel_add( const int i12 = i02%args.ne12; const int i11 = i01%args.ne11; - device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs; - device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11; - device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs; + device const float * src0_ptr = (device const float *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs); + device float * dst_ptr = (device float *) (dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs); + + device const float * src1_ptr[F]; + for (short j = 0; j < F; ++j) { + src1_ptr[j] = (device const float *) (src1 + args.o1[j] + i13*args.nb13 + i12*args.nb12 + i11*args.nb11); + } for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { const int i10 = i0%args.ne10; - *((device float *)(dst_ptr + i0*args.nb0)) = *((device float *)(src0_ptr + i0*args.nb00)) + *((device float *)(src1_ptr + i10*args.nb10)); + + float res = src0_ptr[i0]; + +#pragma unroll + for (short j = 0; j < F; ++j) { + res += src1_ptr[j][i10]; + } + + dst_ptr[i0] = res; } } +typedef decltype(kernel_add_fuse_impl<2>) kernel_add_fuse_t; + +template [[host_name("kernel_add")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<1>; +template [[host_name("kernel_add_fuse_2")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<2>; +template [[host_name("kernel_add_fuse_3")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<3>; +template [[host_name("kernel_add_fuse_4")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<4>; +template [[host_name("kernel_add_fuse_5")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<5>; +template [[host_name("kernel_add_fuse_6")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<6>; +template [[host_name("kernel_add_fuse_7")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<7>; +template [[host_name("kernel_add_fuse_8")]] kernel kernel_add_fuse_t kernel_add_fuse_impl<8>; + kernel void kernel_sub( constant ggml_metal_kargs_bin & args, device const char * src0, @@ -671,7 +945,7 @@ kernel void kernel_sub( const int i11 = i01%args.ne11; device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs; - device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11; + device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11 + args.o1[0]; device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs; for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { @@ -696,9 +970,9 @@ kernel void kernel_mul( const int i12 = i02%args.ne12; const int i11 = i01%args.ne11; - device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01; - device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11; - device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1; + device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs; + device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11 + args.o1[0]; + device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs; for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { const int i10 = i0%args.ne10; @@ -722,9 +996,9 @@ kernel void kernel_div( const int i12 = i02%args.ne12; const int i11 = i01%args.ne11; - device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01; - device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11; - device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1; + device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs; + device const char * src1_ptr = src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11 + args.o1[0]; + device char * dst_ptr = dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs; for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { const int i10 = i0%args.ne10; @@ -732,6 +1006,32 @@ kernel void kernel_div( } } +kernel void kernel_add_id( + constant ggml_metal_kargs_add_id & args, + device const char * src0, + device const char * src1, + device const char * src2, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + ushort3 tpitg[[thread_position_in_threadgroup]], + ushort3 ntg[[threads_per_threadgroup]]) { + const int i1 = tgpig.x; + const int i2 = tgpig.y; + + const int i11 = *((device const int32_t *) (src2 + i1*sizeof(int32_t) + i2*args.nb21)); + + const size_t nb1 = args.ne0 * sizeof(float); + const size_t nb2 = args.ne1 * nb1; + + device float * dst_row = (device float *)((device char *)dst + i1*nb1 + i2*nb2); + device const float * src0_row = (device const float *)((device char *)src0 + i1*args.nb01 + i2*args.nb02); + device const float * src1_row = (device const float *)((device char *)src1 + i11*args.nb11); + + for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) { + dst_row[i0] = src0_row[i0] + src1_row[i0]; + } +} + template kernel void kernel_repeat( constant ggml_metal_kargs_repeat & args, @@ -766,60 +1066,161 @@ template [[host_name("kernel_repeat_i16")]] kernel kernel_repeat_t kernel_repeat // assumption: src1 is a row // broadcast src1 into src0 -kernel void kernel_add_row( +template +kernel void kernel_add_row_c4_fuse_impl( constant ggml_metal_kargs_bin & args, - device const float4 * src0, - device const float4 * src1, - device float4 * dst, + device const char * src0, + device const char * src1, + device char * dst, uint tpig[[thread_position_in_grid]]) { + const uint nb = args.ne00/4; - dst[tpig] = src0[tpig] + src1[tpig % nb]; + const uint i = tpig % nb; + + device const float4 * src0_row = (device const float4 *) (src0); + device float4 * dst_row = (device float4 *) (dst); + + device const float4 * src1_row[F]; + for (short j = 0; j < F; ++j) { + src1_row[j] = (device const float4 *) (src1 + args.o1[j]); + } + + float4 res = src0_row[tpig]; + +#pragma unroll(F) + for (short j = 0; j < F; ++j) { + res += src1_row[j][i]; + } + + dst_row[tpig] = res; } -kernel void kernel_sub_row( +typedef decltype(kernel_add_row_c4_fuse_impl<1>) kernel_add_row_c4_fuse_t; + +template [[host_name("kernel_add_row_c4")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<1>; +template [[host_name("kernel_add_row_c4_fuse_2")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<2>; +template [[host_name("kernel_add_row_c4_fuse_3")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<3>; +template [[host_name("kernel_add_row_c4_fuse_4")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<4>; +template [[host_name("kernel_add_row_c4_fuse_5")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<5>; +template [[host_name("kernel_add_row_c4_fuse_6")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<6>; +template [[host_name("kernel_add_row_c4_fuse_7")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<7>; +template [[host_name("kernel_add_row_c4_fuse_8")]] kernel kernel_add_row_c4_fuse_t kernel_add_row_c4_fuse_impl<8>; + +template +kernel void kernel_sub_row_c4_fuse_impl( constant ggml_metal_kargs_bin & args, - device const float4 * src0, - device const float4 * src1, - device float4 * dst, + device const char * src0, + device const char * src1, + device char * dst, uint tpig[[thread_position_in_grid]]) { + const uint nb = args.ne00/4; - dst[tpig] = src0[tpig] - src1[tpig % nb]; + const uint i = tpig % nb; + + device const float4 * src0_row = (device const float4 *) (src0); + device float4 * dst_row = (device float4 *) (dst); + + device const float4 * src1_row[F]; + for (short j = 0; j < F; ++j) { + src1_row[j] = (device const float4 *) (src1 + args.o1[j]); + } + + float4 res = src0_row[tpig]; + +#pragma unroll(F) + for (short j = 0; j < F; ++j) { + res -= src1_row[j][i]; + } + + dst_row[tpig] = res; } -kernel void kernel_mul_row( +typedef decltype(kernel_sub_row_c4_fuse_impl<1>) kernel_sub_row_c4_fuse_t; + +template [[host_name("kernel_sub_row_c4")]] kernel kernel_sub_row_c4_fuse_t kernel_sub_row_c4_fuse_impl<1>; + +template +kernel void kernel_mul_row_c4_fuse_impl( constant ggml_metal_kargs_bin & args, - device const float4 * src0, - device const float4 * src1, - device float4 * dst, + device const char * src0, + device const char * src1, + device char * dst, uint tpig[[thread_position_in_grid]]) { + const uint nb = args.ne00/4; - dst[tpig] = src0[tpig] * src1[tpig % nb]; + const uint i = tpig % nb; + + device const float4 * src0_row = (device const float4 *) (src0); + device float4 * dst_row = (device float4 *) (dst); + + device const float4 * src1_row[F]; + for (short j = 0; j < F; ++j) { + src1_row[j] = (device const float4 *) (src1 + args.o1[j]); + } + + float4 res = src0_row[tpig]; + +#pragma unroll(F) + for (short j = 0; j < F; ++j) { + res *= src1_row[j][i]; + } + + dst_row[tpig] = res; } -kernel void kernel_div_row( +typedef decltype(kernel_mul_row_c4_fuse_impl<1>) kernel_mul_row_c4_fuse_t; + +template [[host_name("kernel_mul_row_c4")]] kernel kernel_mul_row_c4_fuse_t kernel_mul_row_c4_fuse_impl<1>; + +template +kernel void kernel_div_row_c4_fuse_impl( constant ggml_metal_kargs_bin & args, - device const float4 * src0, - device const float4 * src1, - device float4 * dst, + device const char * src0, + device const char * src1, + device char * dst, uint tpig[[thread_position_in_grid]]) { + const uint nb = args.ne00/4; - dst[tpig] = src0[tpig] / src1[tpig % nb]; + const uint i = tpig % nb; + + device const float4 * src0_row = (device const float4 *) (src0); + device float4 * dst_row = (device float4 *) (dst); + + device const float4 * src1_row[F]; + for (short j = 0; j < F; ++j) { + src1_row[j] = (device const float4 *) (src1 + args.o1[j]); + } + + float4 res = src0_row[tpig]; + +#pragma unroll(F) + for (short j = 0; j < F; ++j) { + res /= src1_row[j][i]; + } + + dst_row[tpig] = res; } +typedef decltype(kernel_div_row_c4_fuse_impl<1>) kernel_div_row_c4_fuse_t; + +template [[host_name("kernel_div_row_c4")]] kernel kernel_div_row_c4_fuse_t kernel_div_row_c4_fuse_impl<1>; + kernel void kernel_scale( device const float * src0, device float * dst, constant float & scale, + constant float & bias, uint tpig[[thread_position_in_grid]]) { - dst[tpig] = src0[tpig] * scale; + dst[tpig] = src0[tpig] * scale + bias; } kernel void kernel_scale_4( device const float4 * src0, device float4 * dst, constant float & scale, + constant float & bias, uint tpig[[thread_position_in_grid]]) { - dst[tpig] = src0[tpig] * scale; + dst[tpig] = src0[tpig] * scale + bias; } kernel void kernel_clamp( @@ -856,6 +1257,7 @@ kernel void kernel_tanh( constant float GELU_COEF_A = 0.044715f; constant float GELU_QUICK_COEF = -1.702f; constant float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f; +constant float SQRT_2_INV = 0.70710678118654752440084436210484f; kernel void kernel_gelu( device const float * src0, @@ -897,6 +1299,42 @@ kernel void kernel_gelu_quick_4( dst[tpig] = x*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x))); } +// based on Abramowitz and Stegun formula 7.1.26 or similar Hastings' approximation +// ref: https://www.johndcook.com/blog/python_erf/ +constant float p_erf = 0.3275911f; +constant float a1_erf = 0.254829592f; +constant float a2_erf = -0.284496736f; +constant float a3_erf = 1.421413741f; +constant float a4_erf = -1.453152027f; +constant float a5_erf = 1.061405429f; + +template +T erf_approx(T x) { + T sign_x = sign(x); + x = fabs(x); + T t = 1.0f / (1.0f + p_erf * x); + T y = 1.0f - (((((a5_erf * t + a4_erf) * t) + a3_erf) * t + a2_erf) * t + a1_erf) * t * exp(-x * x); + return sign_x * y; +} + +kernel void kernel_gelu_erf( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + device const float & x = src0[tpig]; + + dst[tpig] = 0.5f*x*(1.0f+erf_approx(x*SQRT_2_INV)); +} + +kernel void kernel_gelu_erf_4( + device const float4 * src0, + device float4 * dst, + uint tpig[[thread_position_in_grid]]) { + device const float4 & x = src0[tpig]; + + dst[tpig] = 0.5f*x*(1.0f+erf_approx(x*SQRT_2_INV)); +} + kernel void kernel_silu( device const float * src0, device float * dst, @@ -956,6 +1394,185 @@ kernel void kernel_neg( dst[tpig] = -src0[tpig]; } +kernel void kernel_abs( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + dst[tpig] = fabs(src0[tpig]); +} + +kernel void kernel_sgn( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + device const float & x = src0[tpig]; + dst[tpig] = (x > 0.0f) ? 1.0f : ((x < 0.0f) ? -1.0f : 0.0f); +} + +kernel void kernel_step( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + dst[tpig] = src0[tpig] > 0.0f ? 1.0f : 0.0f; +} + +kernel void kernel_hardswish( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + device const float & x = src0[tpig]; + dst[tpig] = x * fmin(1.0f, fmax(0.0f, (x + 3.0f) / 6.0f)); +} + +kernel void kernel_hardsigmoid( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + device const float & x = src0[tpig]; + dst[tpig] = fmin(1.0f, fmax(0.0f, (x + 3.0f) / 6.0f)); +} + +kernel void kernel_exp( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + dst[tpig] = exp(src0[tpig]); +} + +kernel void kernel_reglu( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + const float x0 = src0_row[i0]; + const float x1 = src1_row[i0]; + + dst_row[i0] = x0*x1*(x0 > 0.0f); + } +} + +kernel void kernel_geglu( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + const float x0 = src0_row[i0]; + const float x1 = src1_row[i0]; + + const float gelu = 0.5f*x0*(1.0f + precise::tanh(SQRT_2_OVER_PI*x0*(1.0f + GELU_COEF_A*x0*x0))); + + dst_row[i0] = gelu*x1; + } +} + +kernel void kernel_swiglu( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + const float x0 = src0_row[i0]; + const float x1 = src1_row[i0]; + + const float silu = x0 / (1.0f + exp(-x0)); + + dst_row[i0] = silu*x1; + } +} + +kernel void kernel_swiglu_oai( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + float x0 = src0_row[i0]; + float x1 = src1_row[i0]; + + x0 = min(x0, args.limit); + x1 = max(min(x1, args.limit), -args.limit); + + float out_glu = x0 / (1.0f + exp(-x0 * args.alpha)); + out_glu = out_glu * (1.0f + x1); + + dst_row[i0] = out_glu; + } +} + +kernel void kernel_geglu_erf( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + const float x0 = src0_row[i0]; + const float x1 = src1_row[i0]; + + const float gelu_erf = 0.5f*x0*(1.0f+erf_approx(x0*SQRT_2_INV)); + + dst_row[i0] = gelu_erf*x1; + } +} + +kernel void kernel_geglu_quick( + device const char * src0, + device const char * src1, + device char * dst, + constant ggml_metal_kargs_glu & args, + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00; + device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10; + device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1); + + for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) { + const float x0 = src0_row[i0]; + const float x1 = src1_row[i0]; + + const float gelu_quick = x0*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x0))); + + dst_row[i0] = gelu_quick*x1; + } +} + template kernel void kernel_sum_rows( constant ggml_metal_kargs_sum_rows & args, @@ -1015,27 +1632,33 @@ template kernel void kernel_soft_max( device const char * src0, device const char * src1, + device const char * src2, device char * dst, constant ggml_metal_kargs_soft_max & args, threadgroup float * buf [[threadgroup(0)]], - uint tgpig[[threadgroup_position_in_grid]], - uint tpitg[[thread_position_in_threadgroup]], + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], - uint ntg[[threads_per_threadgroup]]) { - const int64_t i03 = (tgpig) / (args.ne02*args.ne01); - const int64_t i02 = (tgpig - i03*args.ne02*args.ne01) / args.ne01; - const int64_t i01 = (tgpig - i03*args.ne02*args.ne01 - i02*args.ne01); + uint3 tptg[[threads_per_threadgroup]]) { + const int32_t i03 = tgpig.z; + const int32_t i02 = tgpig.y; + const int32_t i01 = tgpig.x; - device const float * psrc0 = (device const float *) src0 + (i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00); - device const T * pmask = src1 != src0 ? (device const T *) src1 + i01*args.ne00 : nullptr; - device float * pdst = (device float *) dst + (i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00); + const int32_t i13 = i03%args.ne13; + const int32_t i12 = i02%args.ne12; + const int32_t i11 = i01; + + device const float * psrc0 = (device const float *) (src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03); + device const T * pmask = src1 != src0 ? (device const T * ) (src1 + i11*args.nb11 + i12*args.nb12 + i13*args.nb13) : nullptr; + device const float * psrc2 = src2 != src0 ? (device const float *) (src2) : nullptr; + device float * pdst = (device float *) (dst + i01*args.nb1 + i02*args.nb2 + i03*args.nb3); float slope = 1.0f; // ALiBi if (args.max_bias > 0.0f) { - const int64_t h = i02; + const int32_t h = i02; const float base = h < args.n_head_log2 ? args.m0 : args.m1; const int exp = h < args.n_head_log2 ? h + 1 : 2*(h - args.n_head_log2) + 1; @@ -1044,15 +1667,15 @@ kernel void kernel_soft_max( } // parallel max - float lmax = -INFINITY; + float lmax = psrc2 ? psrc2[i02] : -INFINITY; - for (int i00 = tpitg; i00 < args.ne00; i00 += ntg) { + for (int i00 = tpitg.x; i00 < args.ne00; i00 += tptg.x) { lmax = MAX(lmax, psrc0[i00]*args.scale + (pmask ? slope*pmask[i00] : 0.0f)); } // find the max value in the block float max_val = simd_max(lmax); - if (ntg > N_SIMDWIDTH) { + if (tptg.x > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = -INFINITY; } @@ -1071,7 +1694,7 @@ kernel void kernel_soft_max( // parallel sum float lsum = 0.0f; - for (int i00 = tpitg; i00 < args.ne00; i00 += ntg) { + for (int i00 = tpitg.x; i00 < args.ne00; i00 += tptg.x) { const float exp_psrc0 = exp((psrc0[i00]*args.scale + (pmask ? slope*pmask[i00] : 0.0f)) - max_val); lsum += exp_psrc0; pdst[i00] = exp_psrc0; @@ -1083,7 +1706,7 @@ kernel void kernel_soft_max( float sum = simd_sum(lsum); - if (ntg > N_SIMDWIDTH) { + if (tptg.x > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } @@ -1100,9 +1723,13 @@ kernel void kernel_soft_max( sum = simd_sum(sum); } + if (psrc2) { + sum += exp(psrc2[i02] - max_val); + } + const float inv_sum = 1.0f/sum; - for (int i00 = tpitg; i00 < args.ne00; i00 += ntg) { + for (int i00 = tpitg.x; i00 < args.ne00; i00 += tptg.x) { pdst[i00] *= inv_sum; } } @@ -1111,26 +1738,32 @@ template kernel void kernel_soft_max_4( device const char * src0, device const char * src1, + device const char * src2, device char * dst, constant ggml_metal_kargs_soft_max & args, threadgroup float * buf [[threadgroup(0)]], - uint tgpig[[threadgroup_position_in_grid]], - uint tpitg[[thread_position_in_threadgroup]], + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], uint sgitg[[simdgroup_index_in_threadgroup]], uint tiisg[[thread_index_in_simdgroup]], - uint ntg[[threads_per_threadgroup]]) { - const int64_t i03 = (tgpig) / (args.ne02*args.ne01); - const int64_t i02 = (tgpig - i03*args.ne02*args.ne01) / args.ne01; - const int64_t i01 = (tgpig - i03*args.ne02*args.ne01 - i02*args.ne01); + uint3 tptg[[threads_per_threadgroup]]) { + const int32_t i03 = tgpig.z; + const int32_t i02 = tgpig.y; + const int32_t i01 = tgpig.x; - device const float4 * psrc4 = (device const float4 *) src0 + (i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00)/4; - device const T * pmask = src1 != src0 ? (device const T *) src1 + i01*args.ne00/4 : nullptr; - device float4 * pdst4 = (device float4 *) dst + (i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00)/4; + const int32_t i13 = i03%args.ne13; + const int32_t i12 = i02%args.ne12; + const int32_t i11 = i01; + + device const float4 * psrc4 = (device const float4 *) (src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03); + device const T * pmask = src1 != src0 ? (device const T * ) (src1 + i11*args.nb11 + i12*args.nb12 + i13*args.nb13) : nullptr; + device const float * psrc2 = src2 != src0 ? (device const float * ) (src2) : nullptr; + device float4 * pdst4 = (device float4 *) (dst + i01*args.nb1 + i02*args.nb2 + i03*args.nb3); float slope = 1.0f; if (args.max_bias > 0.0f) { - const int64_t h = i02; + const int32_t h = i02; const float base = h < args.n_head_log2 ? args.m0 : args.m1; const int exp = h < args.n_head_log2 ? h + 1 : 2*(h - args.n_head_log2) + 1; @@ -1139,16 +1772,16 @@ kernel void kernel_soft_max_4( } // parallel max - float4 lmax4 = -INFINITY; + float4 lmax4 = psrc2 ? psrc2[i02] : -INFINITY; - for (int i00 = tpitg; i00 < args.ne00/4; i00 += ntg) { + for (int i00 = tpitg.x; i00 < args.ne00/4; i00 += tptg.x) { lmax4 = fmax(lmax4, psrc4[i00]*args.scale + (float4)((pmask ? slope*pmask[i00] : 0.0f))); } const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3])); float max_val = simd_max(lmax); - if (ntg > N_SIMDWIDTH) { + if (tptg.x > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = -INFINITY; } @@ -1167,7 +1800,7 @@ kernel void kernel_soft_max_4( // parallel sum float4 lsum4 = 0.0f; - for (int i00 = tpitg; i00 < args.ne00/4; i00 += ntg) { + for (int i00 = tpitg.x; i00 < args.ne00/4; i00 += tptg.x) { const float4 exp_psrc4 = exp((psrc4[i00]*args.scale + (float4)((pmask ? slope*pmask[i00] : 0.0f))) - max_val); lsum4 += exp_psrc4; pdst4[i00] = exp_psrc4; @@ -1181,7 +1814,7 @@ kernel void kernel_soft_max_4( float sum = simd_sum(lsum); - if (ntg > N_SIMDWIDTH) { + if (tptg.x > N_SIMDWIDTH) { if (sgitg == 0) { buf[tiisg] = 0.0f; } @@ -1198,9 +1831,13 @@ kernel void kernel_soft_max_4( sum = simd_sum(sum); } + if (psrc2) { + sum += exp(psrc2[i02] - max_val); + } + const float inv_sum = 1.0f/sum; - for (int i00 = tpitg; i00 < args.ne00/4; i00 += ntg) { + for (int i00 = tpitg.x; i00 < args.ne00/4; i00 += tptg.x) { pdst4[i00] *= inv_sum; } } @@ -1286,7 +1923,7 @@ kernel void kernel_ssm_conv_f32( x[0] = sumf; } -// ref: ggml.c:ggml_compute_forward_ssm_scan_f32 +// ref: ggml.c:ggml_compute_forward_ssm_scan_f32, Mamba-1 part kernel void kernel_ssm_scan_f32( device const void * src0, device const void * src1, @@ -1294,47 +1931,222 @@ kernel void kernel_ssm_scan_f32( device const void * src3, device const void * src4, device const void * src5, + device const void * src6, device float * dst, + threadgroup float * shared [[threadgroup(0)]], constant ggml_metal_kargs_ssm_scan & args, - uint3 tgpig[[threadgroup_position_in_grid]], - uint3 tpitg[[thread_position_in_threadgroup]], - uint3 ntg[[threads_per_threadgroup]]) { - const int64_t ir = tgpig.x; - const int64_t i3 = tgpig.y; + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + ushort sgitg[[simdgroup_index_in_threadgroup]], + ushort tiisg[[thread_index_in_simdgroup]], + ushort sgptg[[simdgroups_per_threadgroup]], + uint3 tgpg[[threadgroups_per_grid]]) { + + const int64_t i0 = tpitg.x; + const int64_t i1 = 0; + const int64_t ir = tgpig.x; // current head + const int64_t i3 = tgpig.y; // current seq + + const uint64_t nb00 = sizeof(float); + const uint64_t nb10 = sizeof(float); + const uint64_t nb20 = sizeof(float); const int64_t nc = args.d_state; - // const int64_t nr = args.d_inner; + const int64_t nr = args.d_inner; + const int64_t nh = args.n_head; + const int64_t ng = args.n_group; const int64_t n_t = args.n_seq_tokens; - // const int64_t n_s = args.n_seqs; + + const int64_t s_off = args.s_off; + + device const int32_t * ids = (device const int32_t *) src6; + + device const float * s0_buff = (device const float *) ((device const char *) src0 + ir*args.nb02 + ids[i3]*args.nb03); + device float * s_buff = (device float *) ((device char *) dst + ir*args.nb02 + i3*args.nb03 + s_off); + const int64_t i = i0 + i1*nc; + float s0 = s0_buff[i]; + float s = s_buff[i]; + + device const float * A = (device const float *) ((device const char *) src3 + ir*args.nb31); + device const float * x_block = (device const float *) ((device const char *) src1 + i1*nb10 + ir*args.nb11 + i3*args.nb13); + device const float * dt_block = (device const float *) ((device const char *) src2 + ir*nb20 + i3*args.nb22); + device const float * B_block = (device const float *) ((device const char *) src4 + (ir & (ng - 1))*args.nb41 + i3*args.nb43); + device const float * C_block = (device const float *) ((device const char *) src5 + (ir & (ng - 1))*args.nb51 + i3*args.nb53); + device float * y_block = (device float *) ((device char *) dst + (i1 + ir*(nr) + i3*(n_t*nh*nr))*nb00); for (int64_t i2 = 0; i2 < n_t; ++i2) { - device const float * s0 = (device const float *) ((device const char *) src0 + ir*args.nb01 + i3*args.nb02); - device const float * x = (device const float *) ((device const char *) src1 + ir*args.nb10 + i2*args.nb11 + i3*args.nb12); - device const float * dt = (device const float *) ((device const char *) src2 + ir*args.nb20 + i2*args.nb21 + i3*args.nb22); - device const float * A = (device const float *) ((device const char *) src3 + ir*args.nb31); - device const float * B = (device const float *) ((device const char *) src4 + i2*args.nb41 + i3*args.nb42); - device const float * C = (device const float *) ((device const char *) src5 + i2*args.nb51 + i3*args.nb52); - device float * y = (device float *) ((device char *) dst + ir*args.nb10 + i2*args.nb11 + i3*args.nb12); // TODO: do not use src1 strides - device float * s = (device float *) ((device char *) dst + ir*args.nb01 + i3*args.nb02 + args.nb13); + device const float * x = (device const float *) ((device const char *) x_block + i2*args.nb12); // {dim, nh, nt, ns} + device const float * dt = (device const float *) ((device const char *) dt_block + i2*args.nb21); // {nh, nt, ns} + device const float * B = (device const float *) ((device const char *) B_block + i2*args.nb42); // {d_state, ng, nt, ns} + device const float * C = (device const float *) ((device const char *) C_block + i2*args.nb52); // {d_state, ng, nt, ns} + device float * y = (device float *) ((device char *) y_block + i2*(nh*nr*nb00)); // {dim, nh, nt, ns} - if (i2 > 0) { - s0 = s; + const float dt_soft_plus = dt[0] <= 20.0f ? log(1.0f + exp(dt[0])) : dt[0]; + const float x_dt = x[0] * dt_soft_plus; + + const float state = (s0 * exp(dt_soft_plus * A[i0])) + (B[i0] * x_dt); + s = state; + + // Parallel sum: This relies on the fact that this kernel will be + // dispatched with each threadgroup having (d_state, 1, 1) threads which + // are subdivided into SIMD groups of size `sgptg`. The goal is to + // compute y = sum({state * C[i] for i in range(d_state)}). + // To parallelize this effectively, we first use simd_sum over each SIMD + // group to compute the sum of each SIMD group, then place the result in + // the SIMD group's indexed bucket in the shared memory. We then sum + // over the individual group sums to compute the final sum. + + // Computed for each thread + float sumf = state * C[i0]; + + // Sum the threads in the simd group => simd sum + sumf = simd_sum(sumf); + + if (sgptg > 1) { + + // Once per simd group, place the group sum into the shared buffer + if (tiisg == 0) { + shared[sgitg] = sumf; + } + + // Wait for all threads in the threadgroup to reach this point. This + // ensures that all elements of the shared buffer are populated with the + // sum of the individual simd groups. + threadgroup_barrier(mem_flags::mem_threadgroup); + + // For simd group 0 at indices < num simd groups, extract the shared + // simd sum + sumf = 0.0f; + if (sgitg == 0) { + if (tiisg < sgptg) { + sumf = shared[tiisg]; + } + sumf = simd_sum(sumf); + if (tiisg == 0) { + y[0] = sumf; + } + } + } else if (tiisg == 0) { + y[0] = sumf; } - // i1 == 0 - float dt_soft_plus = dt[0] <= 20.0f ? log(1.0f + exp(dt[0])) : dt[0]; - float x_dt = x[0] * dt_soft_plus; - float sumf = 0.0f; - - for (int64_t i0 = 0; i0 < nc; ++i0) { - int64_t i = i0; - float state = (s0[i] * exp(dt_soft_plus * A[i])) + (B[i0] * x_dt); - sumf += state * C[i0]; - s[i] = state; - } - - y[0] = sumf; + // recurse + s0 = s; } + + // Assign the final state to the output buffer + s_buff[i] = s; +} + +// ref: ggml.c:ggml_compute_forward_ssm_scan_f32, Mamba-2 part +kernel void kernel_ssm_scan_f32_group( + device const void * src0, + device const void * src1, + device const void * src2, + device const void * src3, + device const void * src4, + device const void * src5, + device const void * src6, + device float * dst, + threadgroup float * shared [[threadgroup(0)]], + constant ggml_metal_kargs_ssm_scan & args, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + ushort sgitg[[simdgroup_index_in_threadgroup]], + ushort tiisg[[thread_index_in_simdgroup]], + ushort sgptg[[simdgroups_per_threadgroup]], + uint3 tgpg[[threadgroups_per_grid]]) { + + const int64_t i0 = tpitg.x; + const int64_t i1 = tgpig.x; + const int64_t ir = tgpig.y; // current head + const int64_t i3 = tgpig.z; // current seq + + const uint64_t nb00 = sizeof(float); + const uint64_t nb10 = sizeof(float); + const uint64_t nb20 = sizeof(float); + + const int64_t nc = args.d_state; + const int64_t nr = args.d_inner; + const int64_t nh = args.n_head; + const int64_t ng = args.n_group; + const int64_t n_t = args.n_seq_tokens; + + const int64_t s_off = args.s_off; + + device const int32_t * ids = (device const int32_t *) src6; + + device const float * s0_buff = (device const float *) ((device const char *) src0 + ir*args.nb02 + ids[i3]*args.nb03); + device float * s_buff = (device float *) ((device char *) dst + ir*args.nb02 + i3*args.nb03 + s_off); + const int64_t i = i0 + i1*nc; + float s0 = s0_buff[i]; + float s = s_buff[i]; + + device const float * A = (device const float *) ((device const char *) src3 + ir*args.nb31); // {1, nh} + device const float * x_block = (device const float *) ((device const char *) src1 + i1*nb10 + ir*args.nb11 + i3*args.nb13); + device const float * dt_block = (device const float *) ((device const char *) src2 + ir*nb20 + i3*args.nb22); + device const float * B_block = (device const float *) ((device const char *) src4 + (ir & (ng - 1))*args.nb41 + i3*args.nb43); + device const float * C_block = (device const float *) ((device const char *) src5 + (ir & (ng - 1))*args.nb51 + i3*args.nb53); + device float * y_block = (device float *) ((device char *) dst + (i1 + ir*(nr) + i3*(n_t*nh*nr))*nb00); + + for (int64_t i2 = 0; i2 < n_t; ++i2) { + device const float * x = (device const float *) ((device const char *) x_block + i2*args.nb12); // {dim, nh, nt, ns} + device const float * dt = (device const float *) ((device const char *) dt_block + i2*args.nb21); // {nh, nt, ns} + device const float * B = (device const float *) ((device const char *) B_block + i2*args.nb42); // {d_state, ng, nt, ns} + device const float * C = (device const float *) ((device const char *) C_block + i2*args.nb52); // {d_state, ng, nt, ns} + device float * y = (device float *) ((device char *) y_block + i2*(nh*nr*nb00)); // {dim, nh, nt, ns} + + const float dt_soft_plus = dt[0] <= 20.0f ? log(1.0f + exp(dt[0])) : dt[0]; + const float x_dt = x[0] * dt_soft_plus; + const float dA = exp(dt_soft_plus * A[0]); + + const float state = (s0 * dA) + (B[i0] * x_dt); + s = state; + + // Parallel sum: This relies on the fact that this kernel will be + // dispatched with each threadgroup having (d_state, 1, 1) threads which + // are subdivided into SIMD groups of size `sgptg`. The goal is to + // compute y = sum({state * C[i] for i in range(d_state)}). + // To parallelize this effectively, we first use simd_sum over each SIMD + // group to compute the sum of each SIMD group, then place the result in + // the SIMD group's indexed bucket in the shared memory. We then sum + // over the individual group sums to compute the final sum. + + // Computed for each thread + float sumf = state * C[i0]; + + // Sum the threads in the simd group => simd sum + sumf = simd_sum(sumf); + + // Once per simd group, place the group sum into the shared buffer + if (tiisg == 0) { + shared[sgitg] = sumf; + } + + // Wait for all threads in the threadgroup to reach this point. This + // ensures that all elements of the shared buffer are populated with the + // sum of the individual simd groups. + threadgroup_barrier(mem_flags::mem_threadgroup); + + // For simd group 0 at indices < num simd groups, extract the shared + // simd sum + sumf = 0.0f; + if (sgitg == 0) { + if (tiisg < sgptg) { + sumf = shared[tiisg]; + } + sumf = simd_sum(sumf); + if (tiisg == 0) { + y[0] = sumf; + } + } + + // recurse + s0 = s; + } + + // Assign the final state to the output buffer + s_buff[i] = s; } kernel void kernel_rwkv_wkv6_f32( @@ -1639,26 +2451,39 @@ kernel void kernel_norm( } } -kernel void kernel_rms_norm( +// F == 1 : rms_norm (no fuse) +// F == 2 : rms_norm + mul +// F == 3 : rms_norm + mul + add +template +kernel void kernel_rms_norm_fuse_impl( constant ggml_metal_kargs_rms_norm & args, device const char * src0, + device const char * src1_0, + device const char * src1_1, device char * dst, threadgroup float * shmem_f32 [[threadgroup(0)]], - uint tgpig[[threadgroup_position_in_grid]], - ushort tpitg[[thread_position_in_threadgroup]], - ushort sgitg[[simdgroup_index_in_threadgroup]], - ushort tiisg[[thread_index_in_simdgroup]], - ushort ntg[[threads_per_threadgroup]]) { + uint3 tgpig[[threadgroup_position_in_grid]], + ushort3 tpitg[[thread_position_in_threadgroup]], + ushort sgitg[[simdgroup_index_in_threadgroup]], + ushort tiisg[[thread_index_in_simdgroup]], + ushort3 ntg[[threads_per_threadgroup]]) { if (sgitg == 0) { shmem_f32[tiisg] = 0.0f; } - device const float4 * x = (device const float4 *) (src0 + tgpig*args.nb01); + const int i01 = tgpig.x; + const int i02 = tgpig.y; + const int i03 = tgpig.z; + + device const float4 * x = (device const float4 *) (src0 + i03*args.nbf3[0] + i02*args.nbf2[0] + i01*args.nbf1[0]); + + device const float4 * f0 = (device const float4 *) (src1_0 + (i03%args.nef3[1])*args.nbf3[1] + (i02%args.nef2[1])*args.nbf2[1] + (i01%args.nef1[1])*args.nbf1[1]); + device const float4 * f1 = (device const float4 *) (src1_1 + (i03%args.nef3[2])*args.nbf3[2] + (i02%args.nef2[2])*args.nbf2[2] + (i01%args.nef1[2])*args.nbf1[2]); float sumf = 0.0f; // parallel sum - for (int i00 = tpitg; i00 < args.ne00_4; i00 += ntg) { + for (int i00 = tpitg.x; i00 < args.ne00_4; i00 += ntg.x) { sumf += dot(x[i00], x[i00]); } sumf = simd_sum(sumf); @@ -1677,12 +2502,26 @@ kernel void kernel_rms_norm( const float mean = sumf/args.ne00; const float scale = 1.0f/sqrt(mean + args.eps); - device float4 * y = (device float4 *) dst + tgpig*args.ne00_4; - for (int i00 = tpitg; i00 < args.ne00_4; i00 += ntg) { - y[i00] = x[i00] * scale; + device float4 * y = (device float4 *) (dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1); + for (int i00 = tpitg.x; i00 < args.ne00_4; i00 += ntg.x) { + if (F == 1) { + y[i00] = (x[i00]*scale); + } + if (F == 2) { + y[i00] = (x[i00]*scale)*f0[i00]; + } + if (F == 3) { + y[i00] = (x[i00]*scale)*f0[i00] + f1[i00]; + } } } +typedef decltype(kernel_rms_norm_fuse_impl<1>) kernel_rms_norm_fuse_t; + +template [[host_name("kernel_rms_norm")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<1>; +template [[host_name("kernel_rms_norm_mul")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<2>; +template [[host_name("kernel_rms_norm_mul_add")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<3>; + kernel void kernel_l2_norm( constant ggml_metal_kargs_l2_norm & args, device const char * src0, @@ -1902,16 +2741,16 @@ void mul_vec_q_n_f32_impl( device const char * src1, device char * dst, threadgroup char * shmem, - uint3 tgpig, // Threadgroup Position in Grid - ushort tiisg, // Thread Index in SIMD Group - ushort sgitg) { // SIMD Group Index in ThreadGroup - const int nb = args.ne00/QK4_0; // src0->ne[0] / 32 + uint3 tgpig, + ushort tiisg, + ushort sgitg) { + const int nb = args.ne00/QK4_0; const int r0 = tgpig.x; const int r1 = tgpig.y; const int im = tgpig.z; - const int first_row = (r0 * nsg + sgitg) * nr0; // nsg=2 nr0=4 + const int first_row = (r0 * nsg + sgitg) * nr0; const uint i12 = im%args.ne12; const uint i13 = im/args.ne12; @@ -2377,6 +3216,11 @@ template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_3")]] kernel mul_mv_ext_q4 template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q8_0, 32, dequantize_q8_0_t4>; template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q8_0, 32, dequantize_q8_0_t4>; +template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_mxfp4, 32, dequantize_mxfp4_t4>; +template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_mxfp4, 32, dequantize_mxfp4_t4>; +template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_mxfp4, 32, dequantize_mxfp4_t4>; +template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_mxfp4, 32, dequantize_mxfp4_t4>; + template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_iq4_nl, 32, dequantize_iq4_nl_t4>; template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_iq4_nl, 32, dequantize_iq4_nl_t4>; template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_iq4_nl, 32, dequantize_iq4_nl_t4>; @@ -2495,6 +3339,70 @@ template [[host_name("kernel_mul_mv_bf16_f32")]] kernel mul_mv_t kernel_mul_mv< template [[host_name("kernel_mul_mv_bf16_bf16")]] kernel mul_mv_t kernel_mul_mv; #endif +template +void kernel_mul_mv_c4_impl( + args_t args, + device const char * src0, + device const char * src1, + device char * dst, + uint3 tgpig, + ushort tiisg) { + const int r0 = tgpig.x*32 + tiisg; + const int rb = tgpig.y*N_MV_T_T; + const int im = tgpig.z; + + if (r0 >= args.ne01) { + return; + } + + const uint i12 = im%args.ne12; + const uint i13 = im/args.ne12; + + const uint64_t offset0 = r0*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; + + device const T04 * x = (device const T04 *) (src0 + offset0); + + device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1; + + for (int row = 0; row < N_MV_T_T; ++row) { + int r1 = rb + row; + if (r1 >= args.ne11) { + break; + } + + const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; + + device const T14 * y = (device const T14 *) (src1 + offset1); + + dst_f32[(uint64_t)r1*args.ne0 + r0] = dot((float4) x[0], (float4) y[0]); + } +} + +template +kernel void kernel_mul_mv_c4( + constant ggml_metal_kargs_mul_mv & args, + device const char * src0, + device const char * src1, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + ushort tiisg[[thread_index_in_simdgroup]]) { + kernel_mul_mv_c4_impl( + args, + src0, + src1, + dst, + tgpig, + tiisg); +} + +typedef decltype(kernel_mul_mv_c4) mul_mv_c4_t; + +template [[host_name("kernel_mul_mv_f32_f32_c4")]] kernel mul_mv_c4_t kernel_mul_mv_c4; +template [[host_name("kernel_mul_mv_f16_f32_c4")]] kernel mul_mv_c4_t kernel_mul_mv_c4; +#if defined(GGML_METAL_USE_BF16) +template [[host_name("kernel_mul_mv_bf16_f32_c4")]] kernel mul_mv_c4_t kernel_mul_mv_c4; +#endif + template kernel void kernel_mul_mv_1row( constant ggml_metal_kargs_mul_mv & args, @@ -3285,7 +4193,7 @@ template< typename kd4x4_t, // key type in device memory short nl_k, void (*deq_k)(device const kd4x4_t *, short, thread k4x4_t &), - typename vd4x4_t, // key type in device memory + typename vd4x4_t, // value type in device memory short nl_v, void (*deq_v)(device const vd4x4_t *, short, thread v4x4_t &), short DK, // K head size @@ -3299,6 +4207,7 @@ kernel void kernel_flash_attn_ext( device const char * k, device const char * v, device const char * mask, + device const char * sinks, device char * dst, threadgroup half * shmem_f16 [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], @@ -3321,14 +4230,12 @@ kernel void kernel_flash_attn_ext( constexpr short NW = N_SIMDWIDTH; constexpr short SH = (2*C + Q); // shared memory per simdgroup (s_t == float) - const short TS = nsg*SH; // shared memory size per query in (s_t == float) - const short T = DK + 2*TS; // shared memory size per query in (half) + const short TS = nsg*SH; // shared memory size per query in (s_t == float) + const short T = 2*DK + 2*TS; // shared memory size per query in (half) - threadgroup q_t * sq = (threadgroup q_t *) (shmem_f16 + 0*DK); // holds the query data - threadgroup q4_t * sq4 = (threadgroup q4_t *) (shmem_f16 + 0*DK); // same as above but in q4_t - threadgroup o_t * so = (threadgroup o_t *) (shmem_f16 + 0*DK); // reuse query data for accumulation - threadgroup o4_t * so4 = (threadgroup o4_t *) (shmem_f16 + 0*DK); // same as above but in o4_t - threadgroup s_t * ss = (threadgroup s_t *) (shmem_f16 + 2*sgitg*SH + Q*DK); // scratch buffer for attention, mask and diagonal matrix + threadgroup q_t * sq = (threadgroup q_t *) (shmem_f16 + 0*DK); // holds the query data + threadgroup q4_t * sq4 = (threadgroup q4_t *) (shmem_f16 + 0*DK); // same as above but in q4_t + threadgroup s_t * ss = (threadgroup s_t *) (shmem_f16 + 2*sgitg*SH + 2*Q*DK); // scratch buffer for attention, mask and diagonal matrix threadgroup k_t * sk = (threadgroup k_t *) (shmem_f16 + sgitg*(4*16*KV) + Q*T); // scratch buffer to load K in shared memory threadgroup k4x4_t * sk4x4 = (threadgroup k4x4_t *) (shmem_f16 + sgitg*(4*16*KV) + Q*T); // same as above but in k4x4_t @@ -3347,7 +4254,7 @@ kernel void kernel_flash_attn_ext( if (iq1 + j < args.ne01) { sq4[j*DK4 + i] = (q4_t) q4[i]; } else { - sq4[j*DK4 + i] = (q4_t) 0.0f; + sq4[j*DK4 + i] = 0; } } } @@ -3412,7 +4319,7 @@ kernel void kernel_flash_attn_ext( // load the mask in shared memory #pragma unroll(Q) for (short j = 0; j < Q; ++j) { - device const half * pm = (device const half *) ((device const char *) mask + (iq1 + j)*args.nb31); + device const half * pm = (device const half *) ((device const char *) mask + (iq1 + j)*args.nb31 + (iq2%args.ne32)*args.nb32 + (iq3%args.ne33)*args.nb33); const float m = pm[ic + tiisg]; @@ -3541,20 +4448,20 @@ kernel void kernel_flash_attn_ext( // O = diag(ms)*O { - s8x8_t mm; - simdgroup_load(mm, ss + 2*C, TS, 0, false); + s8x8_t ms; + simdgroup_load(ms, ss + 2*C, TS, 0, false); #pragma unroll(DV8) for (short i = 0; i < DV8; ++i) { - simdgroup_multiply(lo[i], mm, lo[i]); + simdgroup_multiply(lo[i], ms, lo[i]); } } // O = O + (Q*K^T)*V { for (short cc = 0; cc < C/8; ++cc) { - s8x8_t ms; - simdgroup_load(ms, ss + 8*cc, TS, 0, false); + s8x8_t vs; + simdgroup_load(vs, ss + 8*cc, TS, 0, false); if (is_same::value) { // we can read directly from global memory @@ -3565,7 +4472,7 @@ kernel void kernel_flash_attn_ext( v8x8_t mv; simdgroup_load(mv, pv + i*8, args.nb21/sizeof(v_t), 0, false); // TODO: use ne20 - simdgroup_multiply_accumulate(lo[i], ms, mv, lo[i]); + simdgroup_multiply_accumulate(lo[i], vs, mv, lo[i]); } } else { for (short ii = 0; ii < DV16; ii += 4) { @@ -3586,10 +4493,10 @@ kernel void kernel_flash_attn_ext( v8x8_t mv; simdgroup_load(mv, sv + 16*k + 0*8, 4*16, 0, false); - simdgroup_multiply_accumulate(lo[2*(ii + k) + 0], ms, mv, lo[2*(ii + k) + 0]); + simdgroup_multiply_accumulate(lo[2*(ii + k) + 0], vs, mv, lo[2*(ii + k) + 0]); simdgroup_load(mv, sv + 16*k + 1*8, 4*16, 0, false); - simdgroup_multiply_accumulate(lo[2*(ii + k) + 1], ms, mv, lo[2*(ii + k) + 1]); + simdgroup_multiply_accumulate(lo[2*(ii + k) + 1], vs, mv, lo[2*(ii + k) + 1]); } } else { if (ii + tx < DV16) { @@ -3604,10 +4511,10 @@ kernel void kernel_flash_attn_ext( v8x8_t mv; simdgroup_load(mv, sv + 16*k + 0*8, 4*16, 0, false); - simdgroup_multiply_accumulate(lo[2*(ii + k) + 0], ms, mv, lo[2*(ii + k) + 0]); + simdgroup_multiply_accumulate(lo[2*(ii + k) + 0], vs, mv, lo[2*(ii + k) + 0]); simdgroup_load(mv, sv + 16*k + 1*8, 4*16, 0, false); - simdgroup_multiply_accumulate(lo[2*(ii + k) + 1], ms, mv, lo[2*(ii + k) + 1]); + simdgroup_multiply_accumulate(lo[2*(ii + k) + 1], vs, mv, lo[2*(ii + k) + 1]); } } } @@ -3616,94 +4523,119 @@ kernel void kernel_flash_attn_ext( } } - // these are needed for reducing the results from the simdgroups (reuse the ss buffer) - for (short j = 0; j < Q; ++j) { - if (tiisg == 0) { - ss[j*TS + 0] = S[j]; - ss[j*TS + 1] = M[j]; + if (sinks != q && sgitg == 0) { + for (ushort j = 0; j < Q; ++j) { + const float m = M[j]; + const float s = tiisg == 0 ? ((device const float *) sinks)[iq2] : -FLT_MAX/2; + + M[j] = simd_max(max(M[j], s)); + + const float ms = exp(m - M[j]); + const float vs = exp(s - M[j]); + + S[j] = S[j]*ms + simd_sum(vs); + + if (tiisg == j) { + ss[j*TS + 2*C + j] = ms; + } } + + // O = diag(ms)*O + { + s8x8_t ms; + simdgroup_load(ms, ss + 2*C, TS, 0, false); + + #pragma unroll(DV8) + for (short i = 0; i < DV8; ++i) { + simdgroup_multiply(lo[i], ms, lo[i]); + } + } + } + + // these are needed for reducing the results from the simdgroups (reuse the ss buffer) + for (short j = tiisg; j < Q; j += NW) { + ss[j*TS + 0] = S[j]; + ss[j*TS + 1] = M[j]; } } + threadgroup_barrier(mem_flags::mem_threadgroup); + + threadgroup float * so = (threadgroup float *) (shmem_f16 + 0*DK); // reuse query data for accumulation + threadgroup float4 * so4 = (threadgroup float4 *) (shmem_f16 + 0*DK); + + // store result to shared memory in F32 + if (sgitg == 0) { + for (short i = 0; i < DV8; ++i) { + //simdgroup_store(lo[i], so + i*8, DV, 0, false); + simdgroup_float8x8 t(1.0f); + simdgroup_multiply(t, lo[i], t); + simdgroup_store(t, so + i*8, DV, 0, false); + } + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + // reduce the warps sequentially for (ushort sg = 1; sg < nsg; ++sg) { - float S = { 0.0f }; - float M = { -__FLT_MAX__/2 }; - - threadgroup_barrier(mem_flags::mem_threadgroup); - - // each simdgroup stores its output to shared memory, reusing sq if (sgitg == sg) { - for (short i = 0; i < DV8; ++i) { - simdgroup_store(lo[i], so + i*8, DV, 0, false); + for (short j = tiisg; j < Q; j += NW) { + const float S0 = ss[j*TS - 1*SH + 0]; + const float S1 = ss[j*TS + 0]; + + const float M0 = ss[j*TS - 1*SH + 1]; + const float M1 = ss[j*TS + 1]; + + const float M = max(M0, M1); + + float ms0 = exp(M0 - M); + float ms1 = exp(M1 - M); + + const float S = S0*ms0 + S1*ms1; + + ss[j*TS + 0] = S; + ss[j*TS + 1] = M; + + ss[j*TS + 2*C + j - 1*SH] = ms0; + ss[j*TS + 2*C + j ] = ms1; } - } - threadgroup_barrier(mem_flags::mem_threadgroup); - - // the first simdgroup accumulates the results from the other simdgroups - if (sgitg == 0) { - for (short j = 0; j < Q; ++j) { - const float S0 = ss[j*TS + 0]; - const float S1 = ss[j*TS + sg*SH + 0]; - - const float M0 = ss[j*TS + 1]; - const float M1 = ss[j*TS + sg*SH + 1]; - - M = max(M0, M1); - - const float ms0 = exp(M0 - M); - const float ms1 = exp(M1 - M); - - S = S0*ms0 + S1*ms1; - - if (tiisg == 0) { - ss[j*TS + 0] = S; - ss[j*TS + 1] = M; - - ss[j*TS + 2*C + j ] = ms0; - ss[j*TS + 2*C + j + sg*SH] = ms1; - } - } + //simdgroup_barrier(mem_flags::mem_threadgroup); // O_0 = diag(ms0)*O_0 + diag(ms1)*O_1 { s8x8_t ms0; s8x8_t ms1; - simdgroup_load(ms0, ss + 2*C, TS, 0, false); - simdgroup_load(ms1, ss + 2*C + sg*SH, TS, 0, false); + simdgroup_load(ms0, ss + 2*C - 1*SH, TS, 0, false); + simdgroup_load(ms1, ss + 2*C, TS, 0, false); #pragma unroll(DV8) for (short i = 0; i < DV8; ++i) { - o8x8_t t; + simdgroup_float8x8 t; simdgroup_load (t, so + i*8, DV, 0, false); - simdgroup_multiply(t, ms1, t); + simdgroup_multiply(t, ms0, t); - simdgroup_multiply_accumulate(lo[i], ms0, lo[i], t); + simdgroup_multiply_accumulate(t, ms1, lo[i], t); + simdgroup_store(t, so + i*8, DV, 0, false); } } } + + threadgroup_barrier(mem_flags::mem_threadgroup); } - // store result to shared memory (reuse sq) - if (sgitg == 0) { - for (short i = 0; i < DV8; ++i) { - simdgroup_store(lo[i], so + i*8, DV, 0, false); - } - } - - device float4 * dst4 = (device float4 *) dst; + threadgroup s_t * sf = (threadgroup s_t *) (shmem_f16 + 2*(nsg-1)*SH + 2*Q*DK); // final rescale with 1/S and store to global memory - if (sgitg == 0) { - for (short j = 0; j < Q && iq1 + j < args.ne01; ++j) { - const float S = ss[j*TS + 0]; + for (short j = sgitg; j < Q && iq1 + j < args.ne01; j += nsg) { + const float S = 1.0f/sf[j*TS + 0]; - for (short i = tiisg; i < DV4; i += NW) { - dst4[((uint64_t)iq3*args.ne2*args.ne1 + iq2 + (uint64_t)(iq1 + j)*args.ne1)*DV4 + i] = (float4) so4[j*DV4 + i]/S; - } + device float4 * dst4 = (device float4 *) dst + ((uint64_t)iq3*args.ne2*args.ne1 + iq2 + (uint64_t)(iq1 + j)*args.ne1)*DV4; + + for (short i = tiisg; i < DV4; i += NW) { + dst4[i] = (float4) so4[j*DV4 + i]*S; } } } @@ -3712,12 +4644,22 @@ kernel void kernel_flash_attn_ext( // template to be able to explore different combinations // #define FA_TYPES \ - half, half4, simdgroup_half8x8, \ - half, half4x4, simdgroup_half8x8, \ - half, half4x4, simdgroup_half8x8, \ - float, simdgroup_float8x8, \ - float, simdgroup_float8x8, \ - half, half4, simdgroup_half8x8 + float, float4, simdgroup_float8x8, \ + half, half4x4, simdgroup_half8x8, \ + half, half4x4, simdgroup_half8x8, \ + float, simdgroup_float8x8, \ + float, simdgroup_float8x8, \ + half, half4, simdgroup_half8x8 + //float, float4, simdgroup_float8x8 + +#define FA_TYPES_BF \ + bfloat, bfloat4, simdgroup_bfloat8x8, \ + bfloat, bfloat4x4, simdgroup_bfloat8x8, \ + bfloat, bfloat4x4, simdgroup_bfloat8x8, \ + float, simdgroup_float8x8, \ + float, simdgroup_float8x8, \ + half, half4, simdgroup_half8x8 + //float, float4, simdgroup_float8x8 typedef decltype(kernel_flash_attn_ext) flash_attn_ext_t; @@ -3732,15 +4674,15 @@ template [[host_name("kernel_flash_attn_ext_f16_h256")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_f16_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; #if defined(GGML_METAL_USE_BF16) -template [[host_name("kernel_flash_attn_ext_bf16_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_h80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_h96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_h112")]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_h128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext; -template [[host_name("kernel_flash_attn_ext_bf16_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_h80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_h96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_h112")]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_h128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_h192")]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_hk192_hv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_h256")]] kernel flash_attn_ext_t kernel_flash_attn_ext; +template [[host_name("kernel_flash_attn_ext_bf16_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; #endif template [[host_name("kernel_flash_attn_ext_q4_0_h64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext; @@ -3794,6 +4736,7 @@ template [[host_name("kernel_flash_attn_ext_q8_0_h256")]] kernel flash_at template [[host_name("kernel_flash_attn_ext_q8_0_hk576_hv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext; #undef FA_TYPES +#undef FA_TYPES_BF template< typename q4_t, // query types in shared memory @@ -3806,7 +4749,7 @@ template< typename kd4_t, // key type in device memory short nl_k, void (*deq_k_t4)(device const kd4_t *, short, thread k4_t &), - typename vd4_t, // key type in device memory + typename vd4_t, // value type in device memory short nl_v, void (*deq_v_t4)(device const vd4_t *, short, thread v4_t &), short DK, // K head size @@ -3820,6 +4763,7 @@ kernel void kernel_flash_attn_ext_vec( device const char * k, device const char * v, device const char * mask, + device const char * sinks, device char * dst, threadgroup half * shmem_f16 [[threadgroup(0)]], uint3 tgpig[[threadgroup_position_in_grid]], @@ -3840,12 +4784,12 @@ kernel void kernel_flash_attn_ext_vec( const short T = DK + nsg*SH; // shared memory size per query in (half) - //threadgroup q_t * sq = (threadgroup q_t *) (shmem_f16 + 0*DK); // holds the query data - threadgroup q4_t * sq4 = (threadgroup q4_t *) (shmem_f16 + 0*DK); // same as above but in q4_t - threadgroup s_t * ss = (threadgroup s_t *) (shmem_f16 + sgitg*SH + Q*DK); // scratch buffer for attention - threadgroup s4_t * ss4 = (threadgroup s4_t *) (shmem_f16 + sgitg*SH + Q*DK); // same as above but in s4_t - threadgroup float * sm = (threadgroup float *) (shmem_f16 + sgitg*SH + 2*C + Q*DK); // scratch buffer for mask - threadgroup o4_t * sr4 = (threadgroup o4_t *) (shmem_f16 + sgitg*DV + Q*T); // scratch buffer for the results + //threadgroup q_t * sq = (threadgroup q_t *) (shmem_f16 + 0*DK); // holds the query data + threadgroup q4_t * sq4 = (threadgroup q4_t *) (shmem_f16 + 0*DK); // same as above but in q4_t + threadgroup s_t * ss = (threadgroup s_t *) (shmem_f16 + sgitg*SH + Q*DK); // scratch buffer for attention + threadgroup s4_t * ss4 = (threadgroup s4_t *) (shmem_f16 + sgitg*SH + Q*DK); // same as above but in s4_t + threadgroup float * sm = (threadgroup float *) (shmem_f16 + sgitg*SH + 2*C + Q*DK); // scratch buffer for mask + threadgroup o4_t * sr4 = (threadgroup o4_t *) (shmem_f16 + 2*sgitg*DV + Q*T); // scratch buffer for the results // store the result for all queries in local memory (the O matrix from the paper) o4_t lo[DV4/NL]; @@ -3891,7 +4835,7 @@ kernel void kernel_flash_attn_ext_vec( const bool has_mask = mask != q; // pointer to the mask - device const half * pm = (device const half *) (mask + iq1*args.nb31); + device const half * pm = (device const half *) (mask + iq1*args.nb31 + (iq2%args.ne32)*args.nb32 + (iq3%args.ne33)*args.nb33); float slope = 1.0f; @@ -3917,6 +4861,11 @@ kernel void kernel_flash_attn_ext_vec( sm[tiisg] = pm[ic + tiisg]; } + // skip -INF blocks + if (simd_max(sm[tiisg]) == -INFINITY) { + continue; + } + // Q*K^T { // each simdgroup processes 1 query and NE (NW/NL) head elements @@ -4032,6 +4981,23 @@ kernel void kernel_flash_attn_ext_vec( } } + if (sinks != q && sgitg == 0) { + const float m = M; + const float s = tiisg == 0 ? ((device const float *) sinks)[iq2] : -FLT_MAX/2; + + M = simd_max(max(M, s)); + + const float ms = exp(m - M); + const float vs = exp(s - M); + + S = S*ms + simd_sum(vs); + +#pragma unroll(DV4/NL) + for (short ii = 0; ii < DV4; ii += NL) { + lo[ii/NL] *= ms; + } + } + // these are needed for reducing the results from the simdgroups (reuse the ss buffer) if (tiisg == 0) { ss[0] = (s_t) S; @@ -4145,10 +5111,20 @@ kernel void kernel_flash_attn_ext_vec( half4, \ float, \ float, float4, \ - half4 + float4 typedef decltype(kernel_flash_attn_ext_vec) flash_attn_ext_vec_t; +template [[host_name("kernel_flash_attn_ext_vec_f16_h64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; +#if defined(GGML_METAL_USE_BF16) +template [[host_name("kernel_flash_attn_ext_vec_bf16_h64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; +#endif +template [[host_name("kernel_flash_attn_ext_vec_q4_0_h64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; +template [[host_name("kernel_flash_attn_ext_vec_q4_1_h64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; +template [[host_name("kernel_flash_attn_ext_vec_q5_0_h64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; +template [[host_name("kernel_flash_attn_ext_vec_q5_1_h64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; +template [[host_name("kernel_flash_attn_ext_vec_q8_0_h64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; + template [[host_name("kernel_flash_attn_ext_vec_f16_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; #if defined(GGML_METAL_USE_BF16) template [[host_name("kernel_flash_attn_ext_vec_bf16_h96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec; @@ -4249,11 +5225,16 @@ kernel void kernel_cpy( device const char * src0, device char * dst, uint3 tgpig[[threadgroup_position_in_grid]], + uint tiitg[[thread_index_in_threadgroup]], ushort3 tpitg[[thread_position_in_threadgroup]], - ushort3 ntg[[threads_per_threadgroup]]) { + ushort3 tptg[[threads_per_threadgroup]]) { const int i03 = tgpig[2]; const int i02 = tgpig[1]; - const int i01 = tgpig[0]; + const int i01 = tgpig[0]*tptg.y + tiitg/tptg.x; + + if (i01 >= args.ne01) { + return; + } const int64_t n = i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00; @@ -4264,7 +5245,7 @@ kernel void kernel_cpy( device T1 * dst_data = (device T1 *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0); - for (int64_t i00 = tpitg.x; i00 < args.ne00; i00 += ntg.x) { + for (int64_t i00 = tiitg%tptg.x; i00 < args.ne00; i00 += tptg.x) { device const T0 * src = (device T0 *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); dst_data[i00] = (T1) src[0]; } @@ -4284,6 +5265,7 @@ template [[host_name("kernel_cpy_bf16_f32")]] kernel kernel_cpy_t kernel_cpy; #endif +// TODO: templetify these kernels kernel void kernel_cpy_f32_q8_0( constant ggml_metal_kargs_cpy & args, device const char * src0, @@ -4307,23 +5289,7 @@ kernel void kernel_cpy_f32_q8_0( for (int64_t i00 = tpitg.x*QK8_0; i00 < args.ne00; i00 += ntg.x*QK8_0) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float amax = 0.0f; // absolute max - - for (int j = 0; j < QK8_0; j++) { - const float v = src[j]; - amax = MAX(amax, fabs(v)); - } - - const float d = amax / ((1 << 7) - 1); - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK8_0].d = d; - - for (int j = 0; j < QK8_0; ++j) { - const float x0 = src[j]*id; - - dst_data[i00/QK8_0].qs[j] = round(x0); - } + quantize_q8_0(src, dst_data[i00/QK8_0]); } } @@ -4350,32 +5316,7 @@ kernel void kernel_cpy_f32_q4_0( for (int64_t i00 = tpitg.x*QK4_0; i00 < args.ne00; i00 += ntg.x*QK4_0) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float amax = 0.0f; // absolute max - float max = 0.0f; - - for (int j = 0; j < QK4_0; j++) { - const float v = src[j]; - if (amax < fabs(v)) { - amax = fabs(v); - max = v; - } - } - - const float d = max / -8; - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK4_0].d = d; - - for (int j = 0; j < QK4_0/2; ++j) { - const float x0 = src[0 + j]*id; - const float x1 = src[QK4_0/2 + j]*id; - - const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f)); - const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f)); - - dst_data[i00/QK4_0].qs[j] = xi0; - dst_data[i00/QK4_0].qs[j] |= xi1 << 4; - } + quantize_q4_0(src, dst_data[i00/QK4_0]); } } @@ -4402,31 +5343,7 @@ kernel void kernel_cpy_f32_q4_1( for (int64_t i00 = tpitg.x*QK4_1; i00 < args.ne00; i00 += ntg.x*QK4_1) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float min = FLT_MAX; - float max = -FLT_MAX; - - for (int j = 0; j < QK4_1; j++) { - const float v = src[j]; - if (min > v) min = v; - if (max < v) max = v; - } - - const float d = (max - min) / ((1 << 4) - 1); - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK4_1].d = d; - dst_data[i00/QK4_1].m = min; - - for (int j = 0; j < QK4_1/2; ++j) { - const float x0 = (src[0 + j] - min)*id; - const float x1 = (src[QK4_1/2 + j] - min)*id; - - const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f)); - const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f)); - - dst_data[i00/QK4_1].qs[j] = xi0; - dst_data[i00/QK4_1].qs[j] |= xi1 << 4; - } + quantize_q4_1(src, dst_data[i00/QK4_1]); } } @@ -4453,38 +5370,7 @@ kernel void kernel_cpy_f32_q5_0( for (int64_t i00 = tpitg.x*QK5_0; i00 < args.ne00; i00 += ntg.x*QK5_0) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float amax = 0.0f; // absolute max - float max = 0.0f; - - for (int j = 0; j < QK5_0; j++) { - const float v = src[j]; - if (amax < fabs(v)) { - amax = fabs(v); - max = v; - } - } - - const float d = max / -16; - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK5_0].d = d; - - uint32_t qh = 0; - for (int j = 0; j < QK5_0/2; ++j) { - const float x0 = src[0 + j]*id; - const float x1 = src[QK5_0/2 + j]*id; - - const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f)); - const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f)); - - dst_data[i00/QK5_0].qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); - qh |= ((xi0 & 0x10u) >> 4) << (j + 0); - qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2); - } - thread const uint8_t * qh8 = (thread const uint8_t *)&qh; - for (int j = 0; j < 4; ++j) { - dst_data[i00/QK5_0].qh[j] = qh8[j]; - } + quantize_q5_0(src, dst_data[i00/QK5_0]); } } @@ -4511,51 +5397,10 @@ kernel void kernel_cpy_f32_q5_1( for (int64_t i00 = tpitg.x*QK5_1; i00 < args.ne00; i00 += ntg.x*QK5_1) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float max = src[0]; - float min = src[0]; - - for (int j = 1; j < QK5_1; j++) { - const float v = src[j]; - min = v < min ? v : min; - max = v > max ? v : max; - } - - const float d = (max - min) / 31; - const float id = d ? 1.0f/d : 0.0f; - - dst_data[i00/QK5_1].d = d; - dst_data[i00/QK5_1].m = min; - - uint32_t qh = 0; - for (int j = 0; j < QK5_1/2; ++j) { - const float x0 = (src[0 + j] - min)*id; - const float x1 = (src[QK5_1/2 + j] - min)*id; - - const uint8_t xi0 = (uint8_t)(x0 + 0.5f); - const uint8_t xi1 = (uint8_t)(x1 + 0.5f); - - dst_data[i00/QK5_1].qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4); - qh |= ((xi0 & 0x10u) >> 4) << (j + 0); - qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1/2); - } - thread const uint8_t * qh8 = (thread const uint8_t *)&qh; - for (int j = 0; j < 4; ++j) { - dst_data[i00/QK5_1].qh[j] = qh8[j]; - } + quantize_q5_1(src, dst_data[i00/QK5_1]); } } -static inline int best_index_int8(int n, constant float * val, float x) { - if (x <= val[0]) return 0; - if (x >= val[n-1]) return n-1; - int ml = 0, mu = n-1; - while (mu-ml > 1) { - int mav = (ml+mu)/2; - if (x < val[mav]) mu = mav; else ml = mav; - } - return x - val[mu-1] < val[mu] - x ? mu-1 : mu; -} - kernel void kernel_cpy_f32_iq4_nl( constant ggml_metal_kargs_cpy & args, device const char * src0, @@ -4579,40 +5424,7 @@ kernel void kernel_cpy_f32_iq4_nl( for (int64_t i00 = tpitg.x*QK4_NL; i00 < args.ne00; i00 += ntg.x*QK4_NL) { device const float * src = (device float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00); - float amax = 0.0f; // absolute max - float max = 0.0f; - - for (int j = 0; j < QK4_NL; j++) { - const float v = src[j]; - if (amax < fabs(v)) { - amax = fabs(v); - max = v; - } - } - - const float d = max / kvalues_iq4nl_f[0]; - const float id = d ? 1.0f/d : 0.0f; - - float sumqx = 0, sumq2 = 0; - for (int j = 0; j < QK4_NL/2; ++j) { - const float x0 = src[0 + j]*id; - const float x1 = src[QK4_NL/2 + j]*id; - - const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl_f, x0); - const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl_f, x1); - - dst_data[i00/QK4_NL].qs[j] = xi0 | (xi1 << 4); - - const float v0 = kvalues_iq4nl_f[xi0]; - const float v1 = kvalues_iq4nl_f[xi1]; - const float w0 = src[0 + j]*src[0 + j]; - const float w1 = src[QK4_NL/2 + j]*src[QK4_NL/2 + j]; - sumqx += w0*v0*src[j] + w1*v1*src[QK4_NL/2 + j]; - sumq2 += w0*v0*v0 + w1*v1*v1; - - } - - dst_data[i00/QK4_NL].d = sumq2 > 0 ? sumqx/sumq2 : d; + quantize_iq4_nl(src, dst_data[i00/QK4_NL]); } } @@ -6291,12 +7103,101 @@ kernel void kernel_mul_mv_iq4_xs_f32( kernel_mul_mv_iq4_xs_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); } +template +void kernel_mul_mv_mxfp4_f32_impl( + args_t args, + device const char * src0, + device const char * src1, + device char * dst, + threadgroup char * shmem, + uint3 tgpig, + ushort tiisg, + ushort sgitg) { + + threadgroup float * shmem_f32 = (threadgroup float *) shmem; + const int nb = args.ne00/QK_MXFP4; + + const int r0 = tgpig.x; + const int r1 = tgpig.y; + const int im = tgpig.z; + + const int first_row = (r0 * nsg + sgitg) * nr0; + + const uint i12 = im%args.ne12; + const uint i13 = im/args.ne12; + + const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; + const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; + + device const block_mxfp4 * x = (device const block_mxfp4 *) (src0 + offset0); + device const float * y = (device const float *) (src1 + offset1); + + const short ix = tiisg/2; // 0...15 + const short it = tiisg%2; // 0 or 1 + + shmem_f32[tiisg] = kvalues_mxfp4_f[tiisg%16]; + threadgroup_barrier(mem_flags::mem_threadgroup); + + float4 yl[4]; + float sumf[nr0]={0.f}; + + device const float * yb = y + ix * QK_MXFP4 + it * 8; + + for (int ib = ix; ib < nb; ib += 16) { + device const float4 * y4 = (device const float4 *)yb; + yl[0] = y4[0]; + yl[1] = y4[4]; + yl[2] = y4[1]; + yl[3] = y4[5]; + +#pragma unroll(nr0) + for (short row = 0; row < nr0; row++) { + device const block_mxfp4 & xb = x[row*nb + ib]; + device const uint8_t * q2 = (device const uint8_t *)(xb.qs + 8*it); + + float4 acc1 = yl[0]*float4(shmem_f32[q2[0] & 0x0F], shmem_f32[q2[1] & 0x0F], shmem_f32[q2[2] & 0x0F], shmem_f32[q2[3] & 0x0F]); + float4 acc2 = yl[1]*float4(shmem_f32[q2[0] >> 4 ], shmem_f32[q2[1] >> 4 ], shmem_f32[q2[2] >> 4 ], shmem_f32[q2[3] >> 4 ]); + float4 acc3 = yl[2]*float4(shmem_f32[q2[4] & 0x0F], shmem_f32[q2[5] & 0x0F], shmem_f32[q2[6] & 0x0F], shmem_f32[q2[7] & 0x0F]); + float4 acc4 = yl[3]*float4(shmem_f32[q2[4] >> 4 ], shmem_f32[q2[5] >> 4 ], shmem_f32[q2[6] >> 4 ], shmem_f32[q2[7] >> 4 ]); + + acc1 = (acc1 + acc3) + (acc2 + acc4); + + sumf[row] += e8m0_to_fp32(xb.e) * ((acc1[0] + acc1[1]) + (acc1[2] + acc1[3])); + } + + yb += 16 * QK_MXFP4; + } + + device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0; + + for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) { + float sum_all = simd_sum(sumf[row]); + if (tiisg == 0) { + dst_f32[first_row + row] = sum_all; + } + } +} + +[[host_name("kernel_mul_mv_mxfp4_f32")]] +kernel void kernel_mul_mv_mxfp4_f32( + constant ggml_metal_kargs_mul_mv & args, + device const char * src0, + device const char * src1, + device char * dst, + threadgroup char * shmem [[threadgroup(0)]], + uint3 tgpig[[threadgroup_position_in_grid]], + ushort tiisg[[thread_index_in_simdgroup]], + ushort sgitg[[simdgroup_index_in_threadgroup]]) { + + kernel_mul_mv_mxfp4_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); +} + template kernel void kernel_get_rows_q( + constant ggml_metal_kargs_get_rows & args, device const void * src0, device const void * src1, device float * dst, - constant ggml_metal_kargs_get_rows & args, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { @@ -6316,10 +7217,10 @@ kernel void kernel_get_rows_q( template kernel void kernel_get_rows_f( + constant ggml_metal_kargs_get_rows & args, device const void * src0, device const void * src1, device float * dst, - constant ggml_metal_kargs_get_rows & args, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { @@ -6337,10 +7238,10 @@ kernel void kernel_get_rows_f( } kernel void kernel_get_rows_i32( + constant ggml_metal_kargs_get_rows & args, device const void * src0, device const void * src1, device int32_t * dst, - constant ggml_metal_kargs_get_rows & args, uint3 tgpig[[threadgroup_position_in_grid]], uint tiitg[[thread_index_in_threadgroup]], uint3 tptg [[threads_per_threadgroup]]) { @@ -6357,6 +7258,67 @@ kernel void kernel_get_rows_i32( } } +template +kernel void kernel_set_rows_q32( + constant ggml_metal_kargs_set_rows & args, + device const void * src0, + device const void * src1, + device float * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + uint tiitg[[thread_index_in_threadgroup]], + uint3 tptg [[threads_per_threadgroup]]) { + const int32_t i03 = tgpig.z; + const int32_t i02 = tgpig.y; + + const int32_t i12 = i03%args.ne12; + const int32_t i11 = i02%args.ne11; + + const int32_t i01 = tgpig.x*tptg.y + tiitg/tptg.x; + if (i01 >= args.ne01) { + return; + } + + const int32_t i10 = i01; + const int64_t i1 = ((const device int64_t *) ((const device char *) src1 + i10*args.nb10 + i11*args.nb11 + i12*args.nb12))[0]; + + device block_q * dst_row = ( device block_q *) (( device char *) dst + i1*args.nb1 + i02*args.nb2 + i03*args.nb3); + const device float * src_row = (const device float *) ((const device char *) src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03); + + for (int ind = tiitg%tptg.x; ind < args.nk0; ind += tptg.x) { + quantize_func(src_row + 32*ind, dst_row[ind]); + } +} + +template +kernel void kernel_set_rows_f( + constant ggml_metal_kargs_set_rows & args, + device const void * src0, + device const void * src1, + device float * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + uint tiitg[[thread_index_in_threadgroup]], + uint3 tptg [[threads_per_threadgroup]]) { + const int32_t i03 = tgpig.z; + const int32_t i02 = tgpig.y; + + const int32_t i12 = i03%args.ne12; + const int32_t i11 = i02%args.ne11; + + const int32_t i01 = tgpig.x*tptg.y + tiitg/tptg.x; + if (i01 >= args.ne01) { + return; + } + + const int32_t i10 = i01; + const int64_t i1 = ((const device int64_t *) ((const device char *) src1 + i10*args.nb10 + i11*args.nb11 + i12*args.nb12))[0]; + + device T * dst_row = ( device T *) (( device char *) dst + i1*args.nb1 + i02*args.nb2 + i03*args.nb3); + const device float * src_row = (const device float *) ((const device char *) src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03); + + for (int ind = tiitg%tptg.x; ind < args.nk0; ind += tptg.x) { + dst_row[ind] = (T) src_row[ind]; + } +} #define BLOCK_SIZE_M 64 // 8 simdgroup matrices from matrix A #define BLOCK_SIZE_N 32 // 4 simdgroup matrices from matrix B @@ -6744,49 +7706,6 @@ kernel void kernel_mul_mm_id( } } -template -void dequantize_mxfp4(device const block_mxfp4 * xb, short il, thread type4x4 & reg) { - float4x4 reg_f; - const ushort dst_bias = 15; - const ushort dst_0p5 = 0x3800; - const ushort dst_m_bits = 10; - const half scale = (half)(as_type(((uint32_t)xb->d) << 23)); - // il:0 first 16, il:1 last 16 - for (int i = 0; i < 8; i++) { - ushort em0 = xb->qs[il*8 + i] & 0x07; - ushort em1 = xb->qs[il*8 + i] & 0x70; - // float16 values - ushort x0 = (em0 << (dst_m_bits - 1)) | ((xb->qs[il*8 + i] & 0x08) << 12); - ushort x1 = (em1 << (dst_m_bits - 5)) | ((xb->qs[il*8 + i] & 0x80) << 8); - - // Three cases: - // x is normal and non-zero: Correct bias - if ((em0 & 0x06) != 0) { - x0 = x0 + ((dst_bias - 1) << dst_m_bits); - } - if ((em1 & 0x60) != 0) { - x1 = x1 + ((dst_bias - 1) << dst_m_bits); - } - // x is subnormal (x == 0bs001 where s is the sign): Map to +-0.5 in the dst type - if (em0 == 0x01) { - x0 = dst_0p5 | (x0 & 0x8000); - } - if (em1 == 0x10) { - x1 = dst_0p5 | (x1 & 0x8000); - } - // x is zero, do nothing - - if (isnan(scale)) { - reg_f[i/2][2*(i%2) + 0] = scale; - reg_f[i/2][2*(i%2) + 1] = scale; - } else { - reg_f[i/2][2*(i%2) + 0] = scale * as_type(x0); - reg_f[i/2][2*(i%2) + 1] = scale * as_type(x1); - } - } - reg = (type4x4) reg_f; -} - #define QK_NL 16 // @@ -6808,6 +7727,7 @@ template [[host_name("kernel_get_rows_q4_1")]] kernel get_rows_q_t kernel_get template [[host_name("kernel_get_rows_q5_0")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q5_1")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q8_0")]] kernel get_rows_q_t kernel_get_rows_q; +template [[host_name("kernel_get_rows_mxfp4")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q2_K")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q3_K")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_q4_K")]] kernel get_rows_q_t kernel_get_rows_q; @@ -6823,6 +7743,27 @@ template [[host_name("kernel_get_rows_iq1_m")]] kernel get_rows_q_t kernel_get template [[host_name("kernel_get_rows_iq4_nl")]] kernel get_rows_q_t kernel_get_rows_q; template [[host_name("kernel_get_rows_iq4_xs")]] kernel get_rows_q_t kernel_get_rows_q; +// +// set rows +// + +typedef decltype(kernel_set_rows_f) set_rows_f_t; + +template [[host_name("kernel_set_rows_f32")]] kernel set_rows_f_t kernel_set_rows_f; +template [[host_name("kernel_set_rows_f16")]] kernel set_rows_f_t kernel_set_rows_f; +#if defined(GGML_METAL_USE_BF16) +template [[host_name("kernel_set_rows_bf16")]] kernel set_rows_f_t kernel_set_rows_f; +#endif + +typedef decltype(kernel_set_rows_q32) set_rows_q32_t; + +template [[host_name("kernel_set_rows_q8_0")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_q4_0")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_q4_1")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_q5_0")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_q5_1")]] kernel set_rows_q32_t kernel_set_rows_q32; +template [[host_name("kernel_set_rows_iq4_nl")]] kernel set_rows_q32_t kernel_set_rows_q32; + // // matrix-matrix multiplication // @@ -6839,6 +7780,7 @@ template [[host_name("kernel_mul_mm_q4_1_f32")]] kernel mul_mm_t kernel_mul_m template [[host_name("kernel_mul_mm_q5_0_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q5_1_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q8_0_f32")]] kernel mul_mm_t kernel_mul_mm; +template [[host_name("kernel_mul_mm_mxfp4_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q2_K_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q3_K_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_q4_K_f32")]] kernel mul_mm_t kernel_mul_mm; @@ -6854,8 +7796,6 @@ template [[host_name("kernel_mul_mm_iq1_m_f32")]] kernel mul_mm_t kernel_mul_m template [[host_name("kernel_mul_mm_iq4_nl_f32")]] kernel mul_mm_t kernel_mul_mm; template [[host_name("kernel_mul_mm_iq4_xs_f32")]] kernel mul_mm_t kernel_mul_mm; -template [[host_name("kernel_mul_mm_mxfp4_f32")]] kernel mul_mm_t kernel_mul_mm; - // // indirect matrix-matrix multiplication // @@ -6872,6 +7812,7 @@ template [[host_name("kernel_mul_mm_id_q4_1_f16")]] kernel mul_mm_id kernel_m template [[host_name("kernel_mul_mm_id_q5_0_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q5_1_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q8_0_f16")]] kernel mul_mm_id kernel_mul_mm_id; +template [[host_name("kernel_mul_mm_id_mxfp4_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q2_K_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q3_K_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_q4_K_f16")]] kernel mul_mm_id kernel_mul_mm_id; @@ -6887,8 +7828,6 @@ template [[host_name("kernel_mul_mm_id_iq1_m_f16")]] kernel mul_mm_id kernel_m template [[host_name("kernel_mul_mm_id_iq4_nl_f16")]] kernel mul_mm_id kernel_mul_mm_id; template [[host_name("kernel_mul_mm_id_iq4_xs_f16")]] kernel mul_mm_id kernel_mul_mm_id; -template [[host_name("kernel_mul_mm_id_mxfp4_f16")]] kernel mul_mm_id kernel_mul_mm_id; - // // matrix-vector multiplication @@ -7005,120 +7944,6 @@ kernel void kernel_mul_mv_id( sgitg); } -// MXFP32 implementation derived from mul_vec_q_n_f32_impl and block_q_n_dot_y -void mul_mv_mxfp4_f32_impl( - ggml_metal_kargs_mul_mv args, - device const char * src0, - device const char * src1, - device char * dst, - threadgroup char * shmem, - uint3 tgpig, - ushort tiisg, - ushort sgitg) { - const ushort dst_bias = 15; - const ushort dst_0p5 = 0x3800; - const ushort dst_m_bits = 10; - const int nr0 = N_R0_MXFP4; - const int nsg = N_SG_MXFP4; - const int nw = N_SIMDWIDTH; - const int nb = args.ne00/MXFP4; - - const int r0 = tgpig.x; - const int r1 = tgpig.y; - const int im = tgpig.z; - - const int first_row = (r0 * nsg + sgitg) * nr0; - - const uint i12 = im%args.ne12; - const uint i13 = im/args.ne12; - - const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; - - device const float * y = (device const float *) (src1 + offset1); - - // pointers to src0 rows - device const block_mxfp4 * ax[nr0]; - for (int row = 0; row < nr0; ++row) { - const uint64_t offset0 = (first_row + row)*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; - - ax[row] = (device const block_mxfp4 *) ((device char *) src0 + offset0); - } - - float yl[16]; // src1 vector cache - float sumf[nr0] = {0.f}; - - const short ix = (tiisg/2); - const short il = (tiisg%2)*16; - - device const float * yb = y + ix*MXFP4 + il; - - // each thread in a SIMD group deals with half a block. - for (int ib = ix; ib < nb; ib += nw/2) { - -#pragma unroll - for (short row = 0; row < nr0; row++) { - // Processes 16 items - device const block_mxfp4 * qb_curr = ax[row] + ib; - float d = as_type(((uint32_t)(ax[row] + ib)->d) << 23); - // il = 0 or 16 - device const uint8_t *qs = ((device const uint8_t *) qb_curr + 1 + il/2); - for (int i = 0; i < 8; ++i) { - ushort em0 = qs[i] & 0x07; - ushort em1 = qs[i] & 0x70; - ushort x0 = (em0 << (dst_m_bits - 1)) | ((qs[i] & 0x08) << 12); - ushort x1 = (em1 << (dst_m_bits - 5)) | ((qs[i] & 0x80) << 8); - // Three cases: - // x is normal and non-zero: Correct bias - if ((em0 & 0x06) != 0) { - x0 = x0 + ((dst_bias - 1) << dst_m_bits); - } - if ((em1 & 0x60) != 0) { - x1 = x1 + ((dst_bias - 1) << dst_m_bits); - } - // x is subnormal (x == 0bs001 where s is the sign): Map to +-0.5 in the dst type - if (em0 == 0x01) { - x0 = dst_0p5 | (x0 & 0x8000); - } - if (em1 == 0x10) { - x1 = dst_0p5 | (x1 & 0x8000); - } - // x is zero, do nothing - if (!isnan(d)) { - sumf[row] += yb[i*2] * as_type(x0) * d - + yb[i*2+1] * as_type(x1) * d; - } else { - sumf[row] = d; - } - } - } - - yb += MXFP4 * 16; - } - - device float * dst_f32 = (device float *) dst + im*args.ne0*args.ne1 + r1*args.ne0; - - for (int row = 0; row < nr0; ++row) { - const float tot = simd_sum(sumf[row]); - - if (tiisg == 0 && first_row + row < args.ne01) { - dst_f32[first_row + row] = tot; - } - } -} - -[[host_name("kernel_mul_mv_mxfp4_f32")]] -kernel void kernel_mul_mv_mxfp4_f32( - constant ggml_metal_kargs_mul_mv & args, - device const char * src0, - device const char * src1, - device char * dst, - threadgroup char * shmem [[threadgroup(0)]], - uint3 tgpig[[threadgroup_position_in_grid]], - ushort tiisg[[thread_index_in_simdgroup]], - ushort sgitg[[simdgroup_index_in_threadgroup]]) { - mul_mv_mxfp4_f32_impl(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); -} - typedef decltype(kernel_mul_mv_id>>) kernel_mul_mv_id_t; template [[host_name("kernel_mul_mv_id_f32_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; @@ -7133,6 +7958,8 @@ template [[host_name("kernel_mul_mv_id_q4_1_f32")]] kernel kernel_mul_mv_id_t template [[host_name("kernel_mul_mv_id_q5_0_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q5_1_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; +template [[host_name("kernel_mul_mv_id_mxfp4_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; + template [[host_name("kernel_mul_mv_id_q2_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q3_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_q4_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; @@ -7148,8 +7975,6 @@ template [[host_name("kernel_mul_mv_id_iq2_s_f32")]] kernel kernel_mul_mv_id_t template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>>; -template [[host_name("kernel_mul_mv_id_mxfp4_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id>; - kernel void kernel_pool_2d_max_f32( device const float * src0, device float * dst, diff --git a/ml/backend/ggml/ggml/src/ggml-opt.cpp b/ml/backend/ggml/ggml/src/ggml-opt.cpp index 58d77578f..a3c82d675 100644 --- a/ml/backend/ggml/ggml/src/ggml-opt.cpp +++ b/ml/backend/ggml/ggml/src/ggml-opt.cpp @@ -576,6 +576,10 @@ void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer) { } } +bool ggml_opt_static_graphs(ggml_opt_context_t opt_ctx) { + return opt_ctx->static_graphs; +} + struct ggml_tensor * ggml_opt_inputs(ggml_opt_context_t opt_ctx) { return opt_ctx->inputs; } @@ -842,6 +846,7 @@ void ggml_opt_epoch( int64_t idata_split, ggml_opt_epoch_callback callback_train, ggml_opt_epoch_callback callback_eval) { + GGML_ASSERT(ggml_opt_static_graphs(opt_ctx) && "ggml_opt_epoch requires static graphs"); struct ggml_tensor * inputs = ggml_opt_inputs(opt_ctx); struct ggml_tensor * labels = ggml_opt_labels(opt_ctx); struct ggml_tensor * data = ggml_opt_dataset_data(dataset); diff --git a/ml/backend/ggml/ggml/src/ggml-quants.c b/ml/backend/ggml/ggml/src/ggml-quants.c index 17c308aae..94f6405ca 100644 --- a/ml/backend/ggml/ggml/src/ggml-quants.c +++ b/ml/backend/ggml/ggml/src/ggml-quants.c @@ -21,6 +21,17 @@ #define UNUSED GGML_UNUSED +static inline int best_index_int8(int n, const int8_t * val, float x) { + if (x <= val[0]) return 0; + if (x >= val[n-1]) return n-1; + int ml = 0, mu = n-1; + while (mu-ml > 1) { + int mav = (ml+mu)/2; + if (x < val[mav]) mu = mav; else ml = mav; + } + return x - val[mu-1] < val[mu] - x ? mu-1 : mu; +} + // reference implementation for deterministic creation of model files void quantize_row_q4_0_ref(const float * GGML_RESTRICT x, block_q4_0 * GGML_RESTRICT y, int64_t k) { static const int qk = QK4_0; @@ -246,6 +257,53 @@ void quantize_row_q8_1_ref(const float * GGML_RESTRICT x, block_q8_1 * GGML_REST } } +static inline int best_index_mxfp4(float x, float e) { + int best_index = 0; + float best_err = fabsf(kvalues_mxfp4[0]*e - x); + for (int i = 1; i < 16; i++) { + float err = fabsf(kvalues_mxfp4[i]*e - x); + if (err < best_err) { + best_index = i; + best_err = err; + } + } + return best_index; +} + +void quantize_row_mxfp4_ref(const float * GGML_RESTRICT x, block_mxfp4 * GGML_RESTRICT y, int64_t k) { + static const int qk = QK_MXFP4; + + assert(k % qk == 0); + + const int nb = k / qk; + + for (int i = 0; i < nb; i++) { + float amax = 0.0f; // absolute max + + for (int j = 0; j < qk; j++) { + const float v = x[i*qk + j]; + + if (amax < fabsf(v)) { + amax = fabsf(v); + } + } + + const uint8_t e = amax > 0.0f ? (uint8_t) (floorf(log2f(amax)) - 2 + 127) : 0; + + const float d = GGML_E8M0_TO_FP32_HALF(e); + + y[i].e = e; + + for (int j = 0; j < qk/2; ++j) { + const uint8_t x0 = best_index_mxfp4(x[i*qk + 0 + j], d); + const uint8_t x1 = best_index_mxfp4(x[i*qk + qk/2 + j], d); + + y[i].qs[j] = x0; + y[i].qs[j] |= x1 << 4; + } + } +} + void dequantize_row_q4_0(const block_q4_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { static const int qk = QK4_0; @@ -356,6 +414,26 @@ void dequantize_row_q8_0(const block_q8_0 * GGML_RESTRICT x, float * GGML_RESTRI } } +void dequantize_row_mxfp4(const block_mxfp4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { + static const int qk = QK_MXFP4; + + assert(k % qk == 0); + + const int nb = k / qk; + + for (int i = 0; i < nb; i++) { + const float d = GGML_E8M0_TO_FP32_HALF(x[i].e); + + for (int j = 0; j < qk/2; ++j) { + const int8_t x0 = kvalues_mxfp4[x[i].qs[j] & 0x0F]; + const int8_t x1 = kvalues_mxfp4[x[i].qs[j] >> 4]; + + y[i*qk + j + 0 ] = x0*d; + y[i*qk + j + qk/2] = x1*d; + } + } +} + // // 2-6 bit quantization in super-blocks // @@ -568,14 +646,14 @@ static float make_qkx2_quants(int n, int nmax, const float * GGML_RESTRICT x, co } float iscale = nmax/(max - min); float scale = 1/iscale; - float best_mad = 0; + float best_error = 0; for (int i = 0; i < n; ++i) { int l = nearest_int(iscale*(x[i] - min)); L[i] = MAX(0, MIN(nmax, l)); float diff = scale * L[i] + min - x[i]; diff = use_mad ? fabsf(diff) : diff * diff; float w = weights[i]; - best_mad += w * diff; + best_error += w * diff; } if (nstep < 1) { *the_min = -min; @@ -601,18 +679,18 @@ static float make_qkx2_quants(int n, int nmax, const float * GGML_RESTRICT x, co this_min = 0; this_scale = sum_xl / sum_l2; } - float mad = 0; + float cur_error = 0; for (int i = 0; i < n; ++i) { float diff = this_scale * Laux[i] + this_min - x[i]; diff = use_mad ? fabsf(diff) : diff * diff; float w = weights[i]; - mad += w * diff; + cur_error += w * diff; } - if (mad < best_mad) { + if (cur_error < best_error) { for (int i = 0; i < n; ++i) { L[i] = Laux[i]; } - best_mad = mad; + best_error = cur_error; scale = this_scale; min = this_min; } @@ -2014,6 +2092,12 @@ size_t quantize_q8_0(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, return nrow * row_size; } +size_t quantize_mxfp4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { + GGML_UNUSED(quant_weights); + quantize_row_mxfp4_ref(src, dst, (int64_t)nrow*n_per_row); + return nrow * ggml_row_size(GGML_TYPE_MXFP4, n_per_row); +} + // ====================== Ternary (de)-quantization (BitNet b1.58 and TriLMs) void quantize_row_tq1_0_ref(const float * GGML_RESTRICT x, block_tq1_0 * GGML_RESTRICT y, int64_t k) { @@ -2425,8 +2509,6 @@ void dequantize_row_iq1_m(const block_iq1_m * GGML_RESTRICT x, float * GGML_REST } } -static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113}; - void dequantize_row_iq4_nl(const block_iq4_nl * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { assert(k % QK4_NL == 0); const int64_t nb = k / QK4_NL; @@ -4553,17 +4635,6 @@ size_t quantize_iq1_m(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, // ============================ 4-bit non-linear quants -static inline int best_index_int8(int n, const int8_t * val, float x) { - if (x <= val[0]) return 0; - if (x >= val[n-1]) return n-1; - int ml = 0, mu = n-1; - while (mu-ml > 1) { - int mav = (ml+mu)/2; - if (x < val[mav]) mu = mav; else ml = mav; - } - return x - val[mu-1] < val[mu] - x ? mu-1 : mu; -} - static void quantize_row_iq4_nl_impl(const int super_block_size, const int block_size, const float * GGML_RESTRICT x, ggml_fp16_t * dh, uint8_t * q4, uint16_t * scales_h, uint8_t * scales_l, float * scales, float * weight, uint8_t * L, @@ -4925,144 +4996,6 @@ void quantize_row_iq2_s_ref(const float * GGML_RESTRICT x, block_iq2_s * GGML_RE quantize_iq2_s(x, y, 1, k, NULL); } -// =============================== mxfp4 (de)-quantization - -void quantize_row_mxfp4_ref(const float * GGML_RESTRICT x, block_mxfp4 * GGML_RESTRICT y, int64_t k) { - static const int qk = MXFP4; - static const uint32_t E8_BIAS = 127; - static const uint32_t E2_BIAS = 1; - - assert(k % qk == 0); - - const int nb = k / qk; - - for (int i = 0; i < nb; i++) { - float amax = 0.0f; // absolute max - - for (int j = 0; j < qk; j++) { - const float v = x[i*qk + j]; - if (amax < fabsf(v)) { - amax = fabsf(v); - } - } - - const float dequant_scale = amax / 6.0f; - uint32_t dequant_scale_exponent = 0; - memcpy(&dequant_scale_exponent, &dequant_scale, sizeof(dequant_scale_exponent)); - - // Rounding up - dequant_scale_exponent = (dequant_scale_exponent + 0x007FFFFF) & 0x7F800000; - // Rounding down - // dequant_scale_exponent = dequant_scale_exponent & 0x7F800000; - - float dequant_scale_rounded = 0.0f; - memcpy(&dequant_scale_rounded, &dequant_scale_exponent, sizeof(dequant_scale_rounded)); - float quant_scale = 0.0f; - if (dequant_scale_rounded != 0.0f) { - quant_scale = 1.0f / dequant_scale_rounded; - } - - y[i].d = (uint8_t)(dequant_scale_exponent >> 23); - - for (int j = 0; j < qk/2; ++j) { - const float x0 = x[i*qk + j*2]*quant_scale; - const float x1 = x[i*qk + j*2+1]*quant_scale; - - uint32_t xi0 = 0; - uint32_t xi1 = 0; - memcpy(&xi0, &x0, sizeof(xi0)); - memcpy(&xi1, &x1, sizeof(xi1)); - - uint32_t s0 = xi0 & 0x80000000; - uint32_t s1 = xi1 & 0x80000000; - uint32_t e0 = (xi0 >> 23) & 0xFF; - uint32_t e1 = (xi1 >> 23) & 0xFF; - uint32_t m0 = (xi0 & 0x7FFFFF); - uint32_t m1 = (xi1 & 0x7FFFFF); - - // 0.25 <= x < 0.75 maps to 0.5, a denormal number - // Move implicit bit 1 at the beginning to mantissa for denormals - // adjusted_exponents - uint32_t ae0 = E8_BIAS - (e0 + 1); - uint32_t ae1 = E8_BIAS - (e1 + 1); - if (e0 < E8_BIAS) { - m0 = (0x400000 | (m0 >> 1)) >> ae0; - } - if (e1 < E8_BIAS) { - m1 = (0x400000 | (m1 >> 1)) >> ae1; - } - - // For normal numbers, we change the bias from 127 to 1, and for subnormals, we keep exponent as 0. - e0 = MAX(e0, E8_BIAS - E2_BIAS) - (E8_BIAS - E2_BIAS); - e1 = MAX(e1, E8_BIAS - E2_BIAS) - (E8_BIAS - E2_BIAS); - - // Combine sign, exponent, and mantissa, while saturating - // rounding nearest with tie breaking up by adding +1 to one bit right of the LSB, then shift right - uint32_t tmp0 = MIN((((e0 << 2) | (m0 >> 21)) + 1) >> 1, 0x7); - uint32_t tmp1 = MIN((((e1 << 2) | (m1 >> 21)) + 1) >> 1, 0x7); - uint8_t v0 = (uint8_t)((s0 >> 28) | tmp0); - uint8_t v1 = (uint8_t)((s1 >> 28) | tmp1); - y[i].qs[j] = v0; - y[i].qs[j] |= v1 << 4; - } - } -} - -void dequantize_row_mxfp4(const block_mxfp4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) { - assert(k % MXFP4 == 0); - - const int nb = k / MXFP4; - const uint16_t dst_bias = 15; - const uint16_t dst_0p5 = 0x3800; - const uint16_t dst_m_bits = 10; - - for (int i = 0; i < nb; i++) { - union { - uint32_t as_bits; - float as_value; - } scale; - scale.as_bits = (((uint32_t)x[i].d) << 23); - for (int j = 0; j < MXFP4/2; ++j) { - uint16_t em0 = x[i].qs[j] & 0x07; - uint16_t em1 = x[i].qs[j] & 0x70; - // float16 values - uint16_t x0 = (em0 << (dst_m_bits - 1)) | ((x[i].qs[j] & 0x08) << 12); - uint16_t x1 = (em1 << (dst_m_bits - 5)) | ((x[i].qs[j] & 0x80) << 8); - - // Three cases: - // x is normal and non-zero: Correct bias - if ((em0 & 0x06) != 0) { - x0 = x0 + ((dst_bias - 1) << dst_m_bits); - } - if ((em1 & 0x60) != 0) { - x1 = x1 + ((dst_bias - 1) << dst_m_bits); - } - // x is subnormal (x == 0bs001 where s is the sign): Map to +-0.5 in the dst type - if (em0 == 0x01) { - x0 = dst_0p5 | (x0 & 0x8000); - } - if (em1 == 0x10) { - x1 = dst_0p5 | (x1 & 0x8000); - } - // x is zero, do nothing - - if (isnan(scale.as_value)) { - y[i*MXFP4 + j*2] = scale.as_value; - y[i*MXFP4 + j*2+1] = scale.as_value; - } else { - y[i*MXFP4 + j*2] = GGML_FP16_TO_FP32(x0)*scale.as_value; - y[i*MXFP4 + j*2+1] = GGML_FP16_TO_FP32(x1)*scale.as_value; - } - } - } -} - - -size_t quantize_mxfp4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { - quantize_row_mxfp4_ref(src, dst, (int64_t)nrow*n_per_row); - return nrow * ggml_row_size(GGML_TYPE_MXFP4, n_per_row); -} - // =============================== data validation static bool validate_float(float f, size_t i) { @@ -5101,6 +5034,15 @@ static bool validate_fp16(ggml_fp16_t f, size_t i) { return true; } +static bool validate_e_e8m0(uint8_t e, size_t i) { + if (e == 0xff) { + fprintf(stderr, "ggml_validate_row_data: found invalid e value %d at block %zu\n", e, i); + return false; + } + + return true; +} + #define VALIDATE_ROW_DATA_D_F16_IMPL(type, data, nb) \ const type * q = (const type *) (data); \ for (size_t i = 0; i < (nb); ++i) { \ @@ -5117,6 +5059,14 @@ static bool validate_fp16(ggml_fp16_t f, size_t i) { } \ } +#define VALIDATE_ROW_DATA_E_E8M0_IMPL(type, data, nb) \ + const type * q = (const type *) (data); \ + for (size_t i = 0; i < (nb); ++i) { \ + if (!validate_e_e8m0(q[i].e, i)) { \ + return false; \ + } \ + } + #define VALIDATE_ROW_DATA_DVEC_F16_IMPL(type, data, nb, nr) \ const type * q = (const type *) (data); \ for (size_t i = 0; i < (nb); ++i) { \ @@ -5270,6 +5220,10 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte { VALIDATE_ROW_DATA_D_F16_IMPL(block_q8_0, data, nb); } break; + case GGML_TYPE_MXFP4: + { + VALIDATE_ROW_DATA_E_E8M0_IMPL(block_mxfp4, data, nb); + } break; case GGML_TYPE_Q2_K: { VALIDATE_ROW_DATA_DM_F16_IMPL(block_q2_K, data, nb, d, dmin); @@ -5352,9 +5306,7 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte { VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_nl, data, nb); } break; - case GGML_TYPE_MXFP4: - // TODO - anything to validate? - break; + case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: diff --git a/ml/backend/ggml/ggml/src/ggml-quants.h b/ml/backend/ggml/ggml/src/ggml-quants.h index 2fc40f754..3b688f31c 100644 --- a/ml/backend/ggml/ggml/src/ggml-quants.h +++ b/ml/backend/ggml/ggml/src/ggml-quants.h @@ -21,6 +21,8 @@ GGML_API void quantize_row_q5_1_ref(const float * GGML_RESTRICT x, block_q5_1 * GGML_API void quantize_row_q8_0_ref(const float * GGML_RESTRICT x, block_q8_0 * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q8_1_ref(const float * GGML_RESTRICT x, block_q8_1 * GGML_RESTRICT y, int64_t k); +GGML_API void quantize_row_mxfp4_ref(const float * GGML_RESTRICT x, block_mxfp4 * GGML_RESTRICT y, int64_t k); + GGML_API void quantize_row_q2_K_ref(const float * GGML_RESTRICT x, block_q2_K * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q3_K_ref(const float * GGML_RESTRICT x, block_q3_K * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_q4_K_ref(const float * GGML_RESTRICT x, block_q4_K * GGML_RESTRICT y, int64_t k); @@ -37,8 +39,6 @@ GGML_API void quantize_row_iq4_xs_ref (const float * GGML_RESTRICT x, block_iq4_ GGML_API void quantize_row_iq3_s_ref (const float * GGML_RESTRICT x, block_iq3_s * GGML_RESTRICT y, int64_t k); GGML_API void quantize_row_iq2_s_ref (const float * GGML_RESTRICT x, block_iq2_s * GGML_RESTRICT y, int64_t k); -GGML_API void quantize_row_mxfp4_ref(const float * GGML_RESTRICT x, block_mxfp4 * GGML_RESTRICT y, int64_t k); - // Dequantization GGML_API void dequantize_row_q4_0(const block_q4_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q4_1(const block_q4_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); @@ -47,6 +47,8 @@ GGML_API void dequantize_row_q5_1(const block_q5_1 * GGML_RESTRICT x, float * GG GGML_API void dequantize_row_q8_0(const block_q8_0 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); //GGML_API void dequantize_row_q8_1(const block_q8_1 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); +GGML_API void dequantize_row_mxfp4(const block_mxfp4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); + GGML_API void dequantize_row_q2_K(const block_q2_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q3_K(const block_q3_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_q4_K(const block_q4_K * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); @@ -67,8 +69,6 @@ GGML_API void dequantize_row_iq4_nl (const block_iq4_nl * GGML_RESTRICT x, floa GGML_API void dequantize_row_iq4_xs (const block_iq4_xs * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); GGML_API void dequantize_row_iq3_s (const block_iq3_s * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); -GGML_API void dequantize_row_mxfp4(const block_mxfp4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); - // Quantization utilizing an importance matrix (a.k.a. "Activation aWare Quantization") GGML_API size_t quantize_iq2_xxs(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); GGML_API size_t quantize_iq2_xs (const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); diff --git a/ml/backend/ggml/ggml/src/ggml.c b/ml/backend/ggml/ggml/src/ggml.c index 0f3c98340..55a76f824 100644 --- a/ml/backend/ggml/ggml/src/ggml.c +++ b/ml/backend/ggml/ggml/src/ggml.c @@ -61,15 +61,17 @@ #define m512i(p) (__m512i)(p) #endif -// precomputed f32 table for f16 (256 KB) (ggml-impl.h) -float ggml_table_f32_f16[1 << 16]; +#if defined(__linux__) || \ + defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \ + (defined(__APPLE__) && !TARGET_OS_TV && !TARGET_OS_WATCH) -#if (defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && \ - (!defined(TARGET_OS_TV) && !defined(TARGET_OS_WATCH)) #include #include #include #include +#if defined(__linux__) +#include +#endif #if defined(__ANDROID__) #include @@ -128,15 +130,46 @@ static void ggml_print_backtrace_symbols(void) { } #endif -static void ggml_print_backtrace(void) { +void ggml_print_backtrace(void) { const char * GGML_NO_BACKTRACE = getenv("GGML_NO_BACKTRACE"); if (GGML_NO_BACKTRACE) { return; } - char attach[32]; - snprintf(attach, sizeof(attach), "attach %d", getpid()); - int pid = fork(); - if (pid == 0) { +#if defined(__linux__) + FILE * f = fopen("/proc/self/status", "r"); + size_t size = 0; + char * line = NULL; + ssize_t length = 0; + while ((length = getline(&line, &size, f)) > 0) { + if (!strncmp(line, "TracerPid:", sizeof("TracerPid:") - 1) && + (length != sizeof("TracerPid:\t0\n") - 1 || line[length - 2] != '0')) { + // Already being debugged, and the breakpoint is the later abort() + free(line); + fclose(f); + return; + } + } + free(line); + fclose(f); + int lock[2] = { -1, -1 }; + (void) !pipe(lock); // Don't start gdb until after PR_SET_PTRACER +#endif + const int parent_pid = getpid(); + const int child_pid = fork(); + if (child_pid < 0) { // error +#if defined(__linux__) + close(lock[1]); + close(lock[0]); +#endif + return; + } else if (child_pid == 0) { // child + char attach[32]; + snprintf(attach, sizeof(attach), "attach %d", parent_pid); +#if defined(__linux__) + close(lock[1]); + (void) !read(lock[0], lock, 1); + close(lock[0]); +#endif // try gdb execlp("gdb", "gdb", "--batch", "-ex", "set style enabled on", @@ -149,42 +182,59 @@ static void ggml_print_backtrace(void) { execlp("lldb", "lldb", "--batch", "-o", "bt", "-o", "quit", - "-p", attach, + "-p", &attach[sizeof("attach ") - 1], (char *) NULL); - exit(EXIT_FAILURE); - } else { - int wstatus; - waitpid(pid, &wstatus, 0); - if (WIFEXITED(wstatus)) { - if (WEXITSTATUS(wstatus) == EXIT_FAILURE) { - // gdb failed, fallback to backtrace_symbols - ggml_print_backtrace_symbols(); - } - } + // gdb failed, fallback to backtrace_symbols + ggml_print_backtrace_symbols(); + _Exit(0); + } else { // parent +#if defined(__linux__) + prctl(PR_SET_PTRACER, child_pid); + close(lock[1]); + close(lock[0]); +#endif + waitpid(child_pid, NULL, 0); } } #else -static void ggml_print_backtrace(void) { +void ggml_print_backtrace(void) { // platform not supported } #endif +static ggml_abort_callback_t g_abort_callback = NULL; + +// Set the abort callback (passing null will restore original abort functionality: printing a message to stdout) +GGML_API ggml_abort_callback_t ggml_set_abort_callback(ggml_abort_callback_t callback) { + ggml_abort_callback_t ret_val = g_abort_callback; + g_abort_callback = callback; + return ret_val; +} + void ggml_abort(const char * file, int line, const char * fmt, ...) { fflush(stdout); - fprintf(stderr, "%s:%d: ", file, line); + char message[2048]; + int offset = snprintf(message, sizeof(message), "%s:%d: ", file, line); va_list args; va_start(args, fmt); - vfprintf(stderr, fmt, args); + vsnprintf(message + offset, sizeof(message) - offset, fmt, args); va_end(args); - fprintf(stderr, "\n"); + if (g_abort_callback) { + g_abort_callback(message); + } else { + // default: print error and backtrace to stderr + fprintf(stderr, "%s\n", message); + ggml_print_backtrace(); + } - ggml_print_backtrace(); abort(); } +// ggml_print_backtrace is registered with std::set_terminate by ggml.cpp + // // logging // @@ -423,6 +473,14 @@ bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b) { return memcmp(guid_a, guid_b, sizeof(ggml_guid)) == 0; } +const char * ggml_version(void) { + return GGML_VERSION; +} + +const char * ggml_commit(void) { + return GGML_COMMIT; +} + // // timing // @@ -524,9 +582,6 @@ FILE * ggml_fopen(const char * fname, const char * mode) { #endif } -static void ggml_vec_dot_f32(int n, float * GGML_RESTRICT s, size_t bs, const float * GGML_RESTRICT x, size_t bx, const float * GGML_RESTRICT y, size_t by, int nrc); -static void ggml_vec_dot_f16(int n, float * GGML_RESTRICT s, size_t bs, ggml_fp16_t * GGML_RESTRICT x, size_t bx, ggml_fp16_t * GGML_RESTRICT y, size_t by, int nrc); -static void ggml_vec_dot_bf16(int n, float * GGML_RESTRICT s, size_t bs, ggml_bf16_t * GGML_RESTRICT x, size_t bx, ggml_bf16_t * GGML_RESTRICT y, size_t by, int nrc); static const struct ggml_type_traits type_traits[GGML_TYPE_COUNT] = { [GGML_TYPE_I8] = { @@ -589,13 +644,11 @@ static const struct ggml_type_traits type_traits[GGML_TYPE_COUNT] = { .to_float = (ggml_to_float_t) dequantize_row_q4_1, .from_float_ref = (ggml_from_float_t) quantize_row_q4_1_ref, }, - [GGML_TYPE_MXFP4] = { // formerly deprecated GGML_TYPE_Q4_2 - .type_name = "mxfp4", - .blck_size = MXFP4, - .type_size = sizeof(block_mxfp4), - .is_quantized = true, - .to_float = (ggml_to_float_t) dequantize_row_mxfp4, - .from_float_ref = (ggml_from_float_t) quantize_row_mxfp4_ref, + [4] = { // GGML_TYPE_Q4_2 + .type_name = "DEPRECATED", + .blck_size = 0, + .type_size = 0, + .is_quantized = false, }, [5] = { // GGML_TYPE_Q4_3 .type_name = "DEPRECATED", @@ -634,6 +687,14 @@ static const struct ggml_type_traits type_traits[GGML_TYPE_COUNT] = { .is_quantized = true, .from_float_ref = (ggml_from_float_t) quantize_row_q8_1_ref, }, + [GGML_TYPE_MXFP4] = { + .type_name = "mxfp4", + .blck_size = QK_MXFP4, + .type_size = sizeof(block_mxfp4), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_mxfp4, + .from_float_ref = (ggml_from_float_t)quantize_row_mxfp4_ref, + }, [GGML_TYPE_Q2_K] = { .type_name = "q2_K", .blck_size = QK_K, @@ -852,12 +913,6 @@ struct ggml_context { struct ggml_object * objects_end; }; -struct ggml_context_container { - bool used; - - struct ggml_context context; -}; - // // data types // @@ -867,6 +922,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "DUP", "ADD", + "ADD_ID", "ADD1", "ACC", "SUB", @@ -906,6 +962,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "TRANSPOSE", "GET_ROWS", "GET_ROWS_BACK", + "SET_ROWS", "DIAG", "DIAG_MASK_INF", "DIAG_MASK_ZERO", @@ -917,6 +974,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "CONV_TRANSPOSE_1D", "IM2COL", "IM2COL_BACK", + "CONV_2D", "CONV_2D_DW", "CONV_TRANSPOSE_2D", "POOL_1D", @@ -925,6 +983,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "UPSCALE", "PAD", "PAD_REFLECT_1D", + "ROLL", "ARANGE", "TIMESTEP_EMBEDDING", "ARGSORT", @@ -953,15 +1012,18 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "CROSS_ENTROPY_LOSS", "CROSS_ENTROPY_LOSS_BACK", "OPT_STEP_ADAMW", + + "GLU", }; -static_assert(GGML_OP_COUNT == 82, "GGML_OP_COUNT != 82"); +static_assert(GGML_OP_COUNT == 87, "GGML_OP_COUNT != 87"); static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "none", "x", "x+y", + "x[i]+y", "x+y", "view(x,nb,offset)+=y->x", "x-y", @@ -1001,6 +1063,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "transpose(x)", "get_rows(x)", "get_rows_back(x)", + "set_rows(x)", "diag(x)", "diag_mask_inf(x)", "diag_mask_zero(x)", @@ -1012,6 +1075,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "conv_transpose_1d(x)", "im2col(x)", "im2col_back(x)", + "conv_2d(x)", "conv_2d_dw(x)", "conv_transpose_2d(x)", "pool_1d(x)", @@ -1020,6 +1084,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "upscale(x)", "pad(x)", "pad_reflect_1d(x)", + "roll(x)", "arange(start, stop, step)", "timestep_embedding(timesteps, dim, max_period)", "argsort(x)", @@ -1048,9 +1113,11 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "cross_entropy_loss(x,y)", "cross_entropy_loss_back(x,y)", "adamw(x)", + + "glu(x)", }; -static_assert(GGML_OP_COUNT == 82, "GGML_OP_COUNT != 82"); +static_assert(GGML_OP_COUNT == 87, "GGML_OP_COUNT != 87"); static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2"); @@ -1070,9 +1137,22 @@ static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = { "HARDSWISH", "HARDSIGMOID", "EXP", + "GELU_ERF", }; -static_assert(GGML_UNARY_OP_COUNT == 14, "GGML_UNARY_OP_COUNT != 14"); +static_assert(GGML_UNARY_OP_COUNT == 15, "GGML_UNARY_OP_COUNT != 15"); + + +static const char * GGML_GLU_OP_NAME[GGML_GLU_OP_COUNT] = { + "REGLU", + "GEGLU", + "SWIGLU", + "SWIGLU_OAI", + "GEGLU_ERF", + "GEGLU_QUICK", +}; + +static_assert(GGML_GLU_OP_COUNT == 6, "GGML_GLU_OP_COUNT != 6"); static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN"); @@ -1177,11 +1257,19 @@ const char * ggml_unary_op_name(enum ggml_unary_op op) { return GGML_UNARY_OP_NAME[op]; } +const char * ggml_glu_op_name(enum ggml_glu_op op) { + return GGML_GLU_OP_NAME[op]; +} + const char * ggml_op_desc(const struct ggml_tensor * t) { if (t->op == GGML_OP_UNARY) { enum ggml_unary_op uop = ggml_get_unary_op(t); return ggml_unary_op_name(uop); } + if (t->op == GGML_OP_GLU) { + enum ggml_glu_op gop = ggml_get_glu_op(t); + return ggml_glu_op_name(gop); + } return ggml_op_name(t->op); } @@ -1232,6 +1320,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) { case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break; case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break; case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break; + case GGML_FTYPE_MOSTLY_MXFP4: wtype = GGML_TYPE_MXFP4; break; case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break; case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break; case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break; @@ -1318,6 +1407,12 @@ bool ggml_is_contiguous_channels(const struct ggml_tensor * tensor) { tensor->nb[2] == ggml_type_size(tensor->type); } +bool ggml_is_contiguous_rows(const struct ggml_tensor * tensor) { + return + tensor->ne[0] == ggml_blck_size(tensor->type) || + tensor->nb[0] == ggml_type_size(tensor->type); +} + static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) { static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); @@ -1389,14 +1484,6 @@ struct ggml_context * ggml_init(struct ggml_init_params params) { // initialize time system (required on Windows) ggml_time_init(); - for (int i = 0; i < (1 << 16); ++i) { - union { - uint16_t u16; - ggml_fp16_t fp16; - } u = {i}; - ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(u.fp16); - } - is_first_call = false; } @@ -1700,6 +1787,11 @@ enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor) { return (enum ggml_unary_op) ggml_get_op_params_i32(tensor, 0); } +enum ggml_glu_op ggml_get_glu_op(const struct ggml_tensor * tensor) { + GGML_ASSERT(tensor->op == GGML_OP_GLU); + return (enum ggml_glu_op) ggml_get_op_params_i32(tensor, 0); +} + const char * ggml_get_name(const struct ggml_tensor * tensor) { return tensor->name; } @@ -1879,6 +1971,27 @@ struct ggml_tensor * ggml_add_cast( return ggml_add_cast_impl(ctx, a, b, type); } +struct ggml_tensor * ggml_add_id( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + struct ggml_tensor * ids) { + + GGML_ASSERT(a->ne[0] == b->ne[0]); + GGML_ASSERT(a->ne[1] == ids->ne[0]); + GGML_ASSERT(a->ne[2] == ids->ne[1]); + GGML_ASSERT(ids->type == GGML_TYPE_I32); + + struct ggml_tensor * result = ggml_dup_tensor(ctx, a); + + result->op = GGML_OP_ADD_ID; + result->src[0] = a; + result->src[1] = b; + result->src[2] = ids; + + return result; +} + // ggml_add1 static struct ggml_tensor * ggml_add1_impl( @@ -2282,6 +2395,26 @@ struct ggml_tensor * ggml_repeat( return result; } +struct ggml_tensor * ggml_repeat_4d( + struct ggml_context * ctx, + struct ggml_tensor * a, + int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) { + const bool can_repeat = ggml_is_empty(a) || ( + (ne0 % a->ne[0] == 0) && + (ne1 % a->ne[1] == 0) && + (ne2 % a->ne[2] == 0) && + (ne3 % a->ne[3] == 0) + ); + GGML_ASSERT(can_repeat); + + struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3); + + result->op = GGML_OP_REPEAT; + result->src[0] = a; + + return result; +} + // ggml_repeat_back struct ggml_tensor * ggml_repeat_back( @@ -2472,6 +2605,20 @@ struct ggml_tensor * ggml_gelu_inplace( return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU); } +// ggml_gelu_erf + +struct ggml_tensor * ggml_gelu_erf( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_unary(ctx, a, GGML_UNARY_OP_GELU_ERF); +} + +struct ggml_tensor * ggml_gelu_erf_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU_ERF); +} + // ggml_gelu_quick struct ggml_tensor * ggml_gelu_quick( @@ -2545,6 +2692,169 @@ struct ggml_tensor * ggml_exp_inplace( return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_EXP); } +// ggml_glu + +static struct ggml_tensor * ggml_glu_impl( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + enum ggml_glu_op op, + bool swapped) { + GGML_ASSERT(ggml_is_contiguous_1(a)); + + if (b) { + GGML_ASSERT(ggml_is_contiguous_1(b)); + GGML_ASSERT(ggml_are_same_shape(a, b)); + GGML_ASSERT(a->type == b->type); + } + + int64_t ne[GGML_MAX_DIMS] = { a->ne[0] / 2 }; for (int i = 1; i < GGML_MAX_DIMS; i++) ne[i] = a->ne[i]; + struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, GGML_MAX_DIMS, b ? a->ne : ne, NULL, 0); + + ggml_set_op_params_i32(result, 0, (int32_t) op); + ggml_set_op_params_i32(result, 1, (int32_t) swapped); + + result->op = GGML_OP_GLU; + result->src[0] = a; + result->src[1] = b; + + return result; +} + +struct ggml_tensor * ggml_glu( + struct ggml_context * ctx, + struct ggml_tensor * a, + enum ggml_glu_op op, + bool swapped) { + return ggml_glu_impl(ctx, a, NULL, op, swapped); +} + +struct ggml_tensor * ggml_glu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + enum ggml_glu_op op) { + return ggml_glu_impl(ctx, a, b, op, false); +} + +// ggml_reglu + +struct ggml_tensor * ggml_reglu( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_REGLU, false); +} + +struct ggml_tensor * ggml_reglu_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_REGLU, true); +} + +struct ggml_tensor * ggml_reglu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_glu_impl(ctx, a, b, GGML_GLU_OP_REGLU, false); +} + +// ggml_geglu + +struct ggml_tensor * ggml_geglu( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_GEGLU, false); +} + +struct ggml_tensor * ggml_geglu_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_GEGLU, true); +} + +struct ggml_tensor * ggml_geglu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_glu_impl(ctx, a, b, GGML_GLU_OP_GEGLU, false); +} + +// ggml_swiglu + +struct ggml_tensor * ggml_swiglu( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_SWIGLU, false); +} + +struct ggml_tensor * ggml_swiglu_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_SWIGLU, true); +} + +struct ggml_tensor * ggml_swiglu_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_glu_impl(ctx, a, b, GGML_GLU_OP_SWIGLU, false); +} + +// ggml_geglu_erf + +struct ggml_tensor * ggml_geglu_erf( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_GEGLU_ERF, false); +} + +struct ggml_tensor * ggml_geglu_erf_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_GEGLU_ERF, true); +} + +struct ggml_tensor * ggml_geglu_erf_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_glu_impl(ctx, a, b, GGML_GLU_OP_GEGLU_ERF, false); +} + +// ggml_geglu_quick + +struct ggml_tensor * ggml_geglu_quick( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_GEGLU_QUICK, false); +} + +struct ggml_tensor * ggml_geglu_quick_swapped( + struct ggml_context * ctx, + struct ggml_tensor * a) { + return ggml_glu_impl(ctx, a, NULL, GGML_GLU_OP_GEGLU_QUICK, true); +} + +struct ggml_tensor * ggml_geglu_quick_split( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b) { + return ggml_glu_impl(ctx, a, b, GGML_GLU_OP_GEGLU_QUICK, false); +} + +struct ggml_tensor * ggml_swiglu_oai( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + float alpha, + float limit) { + struct ggml_tensor * result = ggml_glu_impl(ctx, a, b, GGML_GLU_OP_SWIGLU_OAI, false); + ggml_set_op_params_f32(result, 2, alpha); + ggml_set_op_params_f32(result, 3, limit); + + return result; +} + // ggml_norm static struct ggml_tensor * ggml_norm_impl( @@ -2802,12 +3112,14 @@ static struct ggml_tensor * ggml_scale_impl( struct ggml_context * ctx, struct ggml_tensor * a, float s, + float b, bool inplace) { GGML_ASSERT(ggml_is_padded_1d(a)); struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - ggml_set_op_params(result, &s, sizeof(s)); + float params[2] = { s, b }; + ggml_set_op_params(result, ¶ms, sizeof(params)); result->op = GGML_OP_SCALE; result->src[0] = a; @@ -2819,14 +3131,30 @@ struct ggml_tensor * ggml_scale( struct ggml_context * ctx, struct ggml_tensor * a, float s) { - return ggml_scale_impl(ctx, a, s, false); + return ggml_scale_impl(ctx, a, s, 0.0, false); } struct ggml_tensor * ggml_scale_inplace( struct ggml_context * ctx, struct ggml_tensor * a, float s) { - return ggml_scale_impl(ctx, a, s, true); + return ggml_scale_impl(ctx, a, s, 0.0, true); +} + +struct ggml_tensor * ggml_scale_bias( + struct ggml_context * ctx, + struct ggml_tensor * a, + float s, + float b) { + return ggml_scale_impl(ctx, a, s, b, false); +} + +struct ggml_tensor * ggml_scale_bias_inplace( + struct ggml_context * ctx, + struct ggml_tensor * a, + float s, + float b) { + return ggml_scale_impl(ctx, a, s, b, true); } // ggml_set @@ -3328,6 +3656,35 @@ struct ggml_tensor * ggml_get_rows_back( return result; } +// ggml_set_rows + +struct ggml_tensor * ggml_set_rows( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + struct ggml_tensor * c) { + GGML_ASSERT(a->ne[0] == b->ne[0]); + GGML_ASSERT(a->ne[2] == b->ne[2]); + GGML_ASSERT(a->ne[3] == b->ne[3]); + GGML_ASSERT(b->ne[1] == c->ne[0]); + GGML_ASSERT(b->ne[2] % c->ne[1] == 0); + GGML_ASSERT(b->ne[3] % c->ne[2] == 0); + GGML_ASSERT(c->ne[3] == 1); + GGML_ASSERT(b->type == GGML_TYPE_F32); + GGML_ASSERT(c->type == GGML_TYPE_I64); + + GGML_ASSERT(ggml_is_contiguous_rows(a)); + GGML_ASSERT(ggml_is_contiguous_rows(b)); + + struct ggml_tensor * result = ggml_view_tensor(ctx, a); + + result->op = GGML_OP_SET_ROWS; + result->src[0] = b; + result->src[1] = c; + + return result; +} + // ggml_diag struct ggml_tensor * ggml_diag( @@ -3422,9 +3779,10 @@ static struct ggml_tensor * ggml_soft_max_impl( if (mask) { GGML_ASSERT(mask->type == GGML_TYPE_F16 || mask->type == GGML_TYPE_F32); GGML_ASSERT(ggml_is_contiguous(mask)); - GGML_ASSERT(ggml_is_matrix(mask)); GGML_ASSERT(mask->ne[0] == a->ne[0]); GGML_ASSERT(mask->ne[1] >= a->ne[1]); + GGML_ASSERT(a->ne[2]%mask->ne[2] == 0); + GGML_ASSERT(a->ne[3]%mask->ne[3] == 0); } if (max_bias > 0.0f) { @@ -3464,6 +3822,22 @@ struct ggml_tensor * ggml_soft_max_ext( return ggml_soft_max_impl(ctx, a, mask, scale, max_bias, false); } +void ggml_soft_max_add_sinks( + struct ggml_tensor * a, + struct ggml_tensor * sinks) { + if (!sinks) { + a->src[2] = NULL; + return; + } + + GGML_ASSERT(a->op == GGML_OP_SOFT_MAX); + GGML_ASSERT(a->src[2] == NULL); + GGML_ASSERT(a->src[0]->ne[2] == sinks->ne[0]); + GGML_ASSERT(sinks->type == GGML_TYPE_F32); + + a->src[2] = sinks; +} + // ggml_soft_max_ext_back static struct ggml_tensor * ggml_soft_max_ext_back_impl( @@ -4064,6 +4438,44 @@ struct ggml_tensor * ggml_conv_2d_dw_direct( return result; } +// ggml_conv_2d_direct + +struct ggml_tensor * ggml_conv_2d_direct( + struct ggml_context * ctx, + struct ggml_tensor * a, // convolution kernel [KW, KH, IC, OC] + struct ggml_tensor * b, // input data [W, H, C, N] + int s0, // stride dimension 0 + int s1, // stride dimension 1 + int p0, // padding dimension 0 + int p1, // padding dimension 1 + int d0, // dilation dimension 0 + int d1) {// dilation dimension 1 + + GGML_ASSERT(a->ne[2] == b->ne[2]); + //GGML_ASSERT(a->type == b->type); + + int64_t ne[4]; + ne[0] = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0); + ne[1] = ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1); + ne[2] = a->ne[3]; + ne[3] = b->ne[3]; + + struct ggml_tensor * result = ggml_new_tensor(ctx, b->type, 4, ne); + + ggml_set_op_params_i32(result, 0, s0); + ggml_set_op_params_i32(result, 1, s1); + ggml_set_op_params_i32(result, 2, p0); + ggml_set_op_params_i32(result, 3, p1); + ggml_set_op_params_i32(result, 4, d0); + ggml_set_op_params_i32(result, 5, d1); + + result->op = GGML_OP_CONV_2D; + result->src[0] = a; + result->src[1] = b; + + return result; +} + // ggml_conv_transpose_2d_p0 static int64_t ggml_calc_conv_transpose_output_size(int64_t ins, int64_t ks, int s, int p) { @@ -4180,24 +4592,21 @@ struct ggml_tensor * ggml_pool_2d_back( return result; } -// ggml_upscale +// ggml_upscale / ggml_interpolate -static struct ggml_tensor * ggml_upscale_impl( +static struct ggml_tensor * ggml_interpolate_impl( struct ggml_context * ctx, struct ggml_tensor * a, - int ne0, - int ne1, - int ne2, - int ne3, - enum ggml_scale_mode mode) { - GGML_ASSERT(a->ne[0] <= ne0); - GGML_ASSERT(a->ne[1] <= ne1); - GGML_ASSERT(a->ne[2] <= ne2); - GGML_ASSERT(a->ne[3] <= ne3); + int64_t ne0, + int64_t ne1, + int64_t ne2, + int64_t ne3, + uint32_t mode) { + GGML_ASSERT((mode & 0xFF) < GGML_SCALE_MODE_COUNT); struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3); - ggml_set_op_params_i32(result, 0, mode); + ggml_set_op_params_i32(result, 0, (int32_t)mode); result->op = GGML_OP_UPSCALE; result->src[0] = a; @@ -4210,7 +4619,8 @@ struct ggml_tensor * ggml_upscale( struct ggml_tensor * a, int scale_factor, enum ggml_scale_mode mode) { - return ggml_upscale_impl(ctx, a, a->ne[0] * scale_factor, a->ne[1] * scale_factor, a->ne[2], a->ne[3], mode); + GGML_ASSERT(scale_factor > 1); + return ggml_interpolate_impl(ctx, a, a->ne[0] * scale_factor, a->ne[1] * scale_factor, a->ne[2], a->ne[3], mode); } struct ggml_tensor * ggml_upscale_ext( @@ -4221,7 +4631,18 @@ struct ggml_tensor * ggml_upscale_ext( int ne2, int ne3, enum ggml_scale_mode mode) { - return ggml_upscale_impl(ctx, a, ne0, ne1, ne2, ne3, mode); + return ggml_interpolate_impl(ctx, a, ne0, ne1, ne2, ne3, mode); +} + +struct ggml_tensor * ggml_interpolate( + struct ggml_context * ctx, + struct ggml_tensor * a, + int64_t ne0, + int64_t ne1, + int64_t ne2, + int64_t ne3, + uint32_t mode) { + return ggml_interpolate_impl(ctx, a, ne0, ne1, ne2, ne3, mode); } // ggml_pad @@ -4276,6 +4697,34 @@ struct ggml_tensor * ggml_pad_reflect_1d( return result; } +// ggml_roll + +struct ggml_tensor * ggml_roll( + struct ggml_context * ctx, + struct ggml_tensor * a, + int shift0, + int shift1, + int shift2, + int shift3) { + GGML_ASSERT(a->nb[0] == ggml_type_size(a->type)); + GGML_ASSERT(abs(shift0) < a->ne[0]); + GGML_ASSERT(abs(shift1) < a->ne[1]); + GGML_ASSERT(abs(shift2) < a->ne[2]); + GGML_ASSERT(abs(shift3) < a->ne[3]); + + struct ggml_tensor * result = ggml_dup_tensor(ctx, a); + + ggml_set_op_params_i32(result, 0, shift0); + ggml_set_op_params_i32(result, 1, shift1); + ggml_set_op_params_i32(result, 2, shift2); + ggml_set_op_params_i32(result, 3, shift3); + + result->op = GGML_OP_ROLL; + result->src[0] = a; + + return result; +} + // ggml_arange struct ggml_tensor * ggml_arange( @@ -4370,13 +4819,17 @@ struct ggml_tensor * ggml_flash_attn_ext( GGML_ASSERT(ggml_can_mul_mat(k, q)); // TODO: check if vT can be multiplied by (k*qT) + GGML_ASSERT(q->ne[3] == k->ne[3]); + GGML_ASSERT(q->ne[3] == v->ne[3]); + if (mask) { GGML_ASSERT(ggml_is_contiguous(mask)); - GGML_ASSERT(mask->ne[2] == 1); - GGML_ASSERT(mask->ne[3] == 1); GGML_ASSERT(mask->ne[1] >= GGML_PAD(q->ne[1], GGML_KQ_MASK_PAD) && "the Flash-Attention kernel requires the mask to be padded to GGML_KQ_MASK_PAD and at least n_queries big"); //GGML_ASSERT(ggml_can_repeat_rows(mask, qk)); + + GGML_ASSERT(q->ne[2] % mask->ne[2] == 0); + GGML_ASSERT(q->ne[3] % mask->ne[3] == 0); } if (max_bias > 0.0f) { @@ -4418,6 +4871,22 @@ enum ggml_prec ggml_flash_attn_ext_get_prec( return (enum ggml_prec) prec_i32; } +void ggml_flash_attn_ext_add_sinks( + struct ggml_tensor * a, + struct ggml_tensor * sinks) { + if (!sinks) { + a->src[4] = NULL; + return; + } + + GGML_ASSERT(a->op == GGML_OP_FLASH_ATTN_EXT); + GGML_ASSERT(a->src[4] == NULL); + GGML_ASSERT(a->src[0]->ne[2] == sinks->ne[0]); + GGML_ASSERT(sinks->type == GGML_TYPE_F32); + + a->src[4] = sinks; +} + // ggml_flash_attn_back struct ggml_tensor * ggml_flash_attn_back( @@ -4504,7 +4973,6 @@ struct ggml_tensor * ggml_ssm_conv( const int64_t n_s = sx->ne[2]; // TODO: maybe support other strides than 1? - // FIXME: this is always true? GGML_ASSERT(sx->ne[0] == d_conv - 1 + n_t); GGML_ASSERT(sx->ne[1] == d_inner); GGML_ASSERT(n_t >= 0); @@ -4527,36 +4995,49 @@ struct ggml_tensor * ggml_ssm_scan( struct ggml_tensor * dt, struct ggml_tensor * A, struct ggml_tensor * B, - struct ggml_tensor * C) { + struct ggml_tensor * C, + struct ggml_tensor * ids) { GGML_ASSERT(ggml_is_contiguous(s)); - GGML_ASSERT(ggml_is_contiguous(x)); GGML_ASSERT(ggml_is_contiguous(dt)); GGML_ASSERT(ggml_is_contiguous(A)); - GGML_ASSERT(ggml_is_matrix(A)); - GGML_ASSERT(ggml_is_3d(B)); - GGML_ASSERT(ggml_is_3d(s)); + GGML_ASSERT(x->nb[0] == ggml_type_size(x->type)); GGML_ASSERT(B->nb[0] == ggml_type_size(B->type)); GGML_ASSERT(C->nb[0] == ggml_type_size(C->type)); - GGML_ASSERT(ggml_are_same_shape(x, dt)); + GGML_ASSERT(x->nb[1] == x->ne[0]*x->nb[0]); + GGML_ASSERT(B->nb[1] == B->ne[0]*B->nb[0]); + GGML_ASSERT(C->nb[1] == C->ne[0]*C->nb[0]); GGML_ASSERT(ggml_are_same_shape(B, C)); + GGML_ASSERT(ids->type == GGML_TYPE_I32); { const int64_t d_state = s->ne[0]; - const int64_t d_inner = s->ne[1]; - const int64_t n_seq_tokens = x->ne[1]; - const int64_t n_seqs = x->ne[2]; + const int64_t head_dim = x->ne[0]; + const int64_t n_head = x->ne[1]; + const int64_t n_seq_tokens = x->ne[2]; + const int64_t n_seqs = x->ne[3]; - GGML_ASSERT(s->ne[2] == n_seqs); - GGML_ASSERT(x->ne[0] == d_inner); - GGML_ASSERT(A->ne[0] == d_state); - GGML_ASSERT(A->ne[1] == d_inner); + GGML_ASSERT(dt->ne[0] == n_head); + GGML_ASSERT(dt->ne[1] == n_seq_tokens); + GGML_ASSERT(dt->ne[2] == n_seqs); + GGML_ASSERT(ggml_is_3d(dt)); + GGML_ASSERT(s->ne[1] == head_dim); + GGML_ASSERT(s->ne[2] == n_head); GGML_ASSERT(B->ne[0] == d_state); - GGML_ASSERT(B->ne[1] == n_seq_tokens); - GGML_ASSERT(B->ne[2] == n_seqs); + GGML_ASSERT(B->ne[2] == n_seq_tokens); + GGML_ASSERT(B->ne[3] == n_seqs); + GGML_ASSERT(ids->ne[0] == n_seqs); + GGML_ASSERT(ggml_is_vector(ids)); + GGML_ASSERT(A->ne[1] == n_head); + GGML_ASSERT(ggml_is_matrix(A)); + + if (A->ne[0] != 1) { + // Mamba-1 has more granular decay factors + GGML_ASSERT(A->ne[0] == d_state); + } } // concatenated y + ssm_states - struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, ggml_nelements(x) + ggml_nelements(s)); + struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, ggml_nelements(x) + s->ne[0]*s->ne[1]*s->ne[2]*ids->ne[0]); result->op = GGML_OP_SSM_SCAN; result->src[0] = s; @@ -4565,6 +5046,7 @@ struct ggml_tensor * ggml_ssm_scan( result->src[3] = A; result->src[4] = B; result->src[5] = C; + result->src[6] = ids; return result; } @@ -5388,7 +5870,7 @@ static void ggml_compute_backward( } break; case GGML_OP_MEAN: { if (src0_needs_grads) { - ggml_add1_or_set(ctx, cgraph, isrc0, ggml_scale_impl(ctx, grad, 1.0f/src0->ne[0], false)); + ggml_add1_or_set(ctx, cgraph, isrc0, ggml_scale_impl(ctx, grad, 1.0f/src0->ne[0], 0.0, false)); } } break; case GGML_OP_REPEAT: { @@ -5465,7 +5947,7 @@ static void ggml_compute_backward( if (src0_needs_grads) { float s; memcpy(&s, tensor->op_params, sizeof(float)); - ggml_add_or_set(ctx, cgraph, isrc0, ggml_scale_impl(ctx, grad, s, false)); + ggml_add_or_set(ctx, cgraph, isrc0, ggml_scale_impl(ctx, grad, s, 0.0, false)); } } break; case GGML_OP_SET: { @@ -5705,13 +6187,28 @@ static void ggml_compute_backward( } GGML_ASSERT(!src1_needs_grads && "backward pass for labels not implemented"); } break; + case GGML_OP_GLU: { + switch (ggml_get_glu_op(tensor)) { + case GGML_GLU_OP_SWIGLU: { + if (src0_needs_grads) { + GGML_ASSERT(src1 && "backward pass only implemented for split swiglu"); + ggml_add_or_set(ctx, cgraph, isrc0, ggml_silu_back(ctx, ggml_mul(ctx, grad, src1), src0)); + } + if (src1_needs_grads) { + ggml_add_or_set(ctx, cgraph, isrc1, ggml_mul(ctx, ggml_silu(ctx, src0), grad)); + } + } break; + default: { + GGML_ABORT("unsupported glu op for backward pass: %s", ggml_glu_op_name(ggml_get_glu_op(tensor))); + } //break; + } + } break; case GGML_OP_NONE: { // noop } break; case GGML_OP_COUNT: default: { - fprintf(stderr, "%s: unsupported ggml op for backward pass: %s\n", __func__, ggml_op_name(tensor->op)); - GGML_ABORT("fatal error"); + GGML_ABORT("%s: unsupported ggml op for backward pass: %s\n", __func__, ggml_op_name(tensor->op)); } //break; } @@ -5720,19 +6217,32 @@ static void ggml_compute_backward( GGML_ASSERT(!src2_needs_grads || ggml_are_same_shape(src2, cgraph->grads[isrc2])); } -static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) { +static size_t ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) { // check if already visited - if (ggml_hash_insert(&cgraph->visited_hash_set, node) == GGML_HASHSET_ALREADY_EXISTS) { - return; + size_t node_hash_pos = ggml_hash_find(&cgraph->visited_hash_set, node); + GGML_ASSERT(node_hash_pos != GGML_HASHSET_FULL); + if (!ggml_bitset_get(cgraph->visited_hash_set.used, node_hash_pos)) { + // This is the first time we see this node in the current graph. + cgraph->visited_hash_set.keys[node_hash_pos] = node; + ggml_bitset_set(cgraph->visited_hash_set.used, node_hash_pos); + cgraph->use_counts[node_hash_pos] = 0; + } else { + // already visited + return node_hash_pos; } for (int i = 0; i < GGML_MAX_SRC; ++i) { const int k = (cgraph->order == GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? i : (cgraph->order == GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? (GGML_MAX_SRC-1-i) : - /* unknown order, just fall back to using i*/ i; - if (node->src[k]) { - ggml_visit_parents(cgraph, node->src[k]); + /* unknown order, just fall back to using i */ i; + + struct ggml_tensor * src = node->src[k]; + if (src) { + size_t src_hash_pos = ggml_visit_parents(cgraph, src); + + // Update the use count for this operand. + cgraph->use_counts[src_hash_pos]++; } } @@ -5756,6 +6266,8 @@ static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * cgraph->nodes[cgraph->n_nodes] = node; cgraph->n_nodes++; } + + return node_hash_pos; } static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) { @@ -5893,6 +6405,7 @@ static size_t ggml_graph_nbytes(size_t size, bool grads) { incr_ptr_aligned(&p, sizeof(struct ggml_cgraph), 1); incr_ptr_aligned(&p, size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); // nodes incr_ptr_aligned(&p, size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); // leafs + incr_ptr_aligned(&p, hash_size * sizeof(int32_t), sizeof(int32_t)); // use_counts incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); // hash keys if (grads) { incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); // grads @@ -5922,11 +6435,12 @@ struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t siz void * p = cgraph + 1; - struct ggml_tensor ** nodes_ptr = incr_ptr_aligned(&p, size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); - struct ggml_tensor ** leafs_ptr = incr_ptr_aligned(&p, size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); - struct ggml_tensor ** hash_keys_ptr = incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); - struct ggml_tensor ** grads_ptr = grads ? incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)) : NULL; - struct ggml_tensor ** grad_accs_ptr = grads ? incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)) : NULL; + struct ggml_tensor ** nodes_ptr = incr_ptr_aligned(&p, size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); + struct ggml_tensor ** leafs_ptr = incr_ptr_aligned(&p, size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); + int32_t * use_counts_ptr = incr_ptr_aligned(&p, hash_size * sizeof(int32_t), sizeof(int32_t)); + struct ggml_tensor ** hash_keys_ptr = incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)); + struct ggml_tensor ** grads_ptr = grads ? incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)) : NULL; + struct ggml_tensor ** grad_accs_ptr = grads ? incr_ptr_aligned(&p, hash_size * sizeof(struct ggml_tensor *), sizeof(struct ggml_tensor *)) : NULL; ggml_bitset_t * hash_used = incr_ptr_aligned(&p, ggml_bitset_size(hash_size) * sizeof(ggml_bitset_t), sizeof(ggml_bitset_t)); @@ -5941,6 +6455,7 @@ struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t siz /*.grads =*/ grads_ptr, /*.grad_accs =*/ grad_accs_ptr, /*.leafs =*/ leafs_ptr, + /*.use_counts =*/ use_counts_ptr, /*.hash_table =*/ { hash_size, hash_used, hash_keys_ptr }, /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT, }; @@ -5967,7 +6482,8 @@ struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph0, int i0, int i1) /*.grads =*/ NULL, // gradients would need visited_hash_set /*.grad_accs =*/ NULL, /*.leafs =*/ NULL, - /*.visited_hash_set =*/ { 0, NULL, NULL }, + /*.use_counts =*/ cgraph0->use_counts, + /*.visited_hash_set =*/ cgraph0->visited_hash_set, /*.order =*/ cgraph0->order, }; @@ -5994,7 +6510,8 @@ void ggml_graph_cpy(struct ggml_cgraph * src, struct ggml_cgraph * dst) { for (size_t i = 0; i < src->visited_hash_set.size; ++i) { // copy all hashset keys (tensors) that are in use if (ggml_bitset_get(src->visited_hash_set.used, i)) { - ggml_hash_insert(&dst->visited_hash_set, src->visited_hash_set.keys[i]); + size_t new_hash_pos = ggml_hash_insert(&dst->visited_hash_set, src->visited_hash_set.keys[i]); + dst->use_counts[new_hash_pos] = src->use_counts[i]; } } @@ -6198,20 +6715,18 @@ static struct ggml_tensor * ggml_graph_get_parent(const struct ggml_cgraph * cgr static void ggml_graph_dump_dot_node_edge(FILE * fp, const struct ggml_cgraph * gb, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) { struct ggml_tensor * gparent = ggml_graph_get_parent(gb, node); struct ggml_tensor * gparent0 = ggml_graph_get_parent(gb, parent); - fprintf(fp, " \"%p\":%s -> \"%p\":%s [ arrowhead = %s; style = %s; label = \"%s\"; ]\n", + fprintf(fp, " \"%p\" -> \"%p\" [ arrowhead = %s; style = %s; label = \"%s\"; ]\n", gparent0 ? (void *) gparent0 : (void *) parent, - gparent0 ? "g" : "x", gparent ? (void *) gparent : (void *) node, - gparent ? "g" : "x", gparent ? "empty" : "vee", gparent ? "dashed" : "solid", label); } static void ggml_graph_dump_dot_leaf_edge(FILE * fp, struct ggml_tensor * node, struct ggml_tensor * parent, const char * label) { - fprintf(fp, " \"%p\":%s -> \"%p\":%s [ label = \"%s\"; ]\n", - (void *) parent, "x", - (void *) node, "x", + fprintf(fp, " \"%p\" -> \"%p\" [ label = \"%s\"; ]\n", + (void *) parent, + (void *) node, label); } @@ -6432,6 +6947,7 @@ size_t ggml_quantize_chunk( case GGML_TYPE_Q5_0: result = quantize_q5_0(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q5_1: result = quantize_q5_1(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q8_0: result = quantize_q8_0(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; + case GGML_TYPE_MXFP4: result = quantize_mxfp4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q2_K: result = quantize_q2_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q3_K: result = quantize_q3_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q4_K: result = quantize_q4_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; @@ -6448,7 +6964,6 @@ size_t ggml_quantize_chunk( case GGML_TYPE_IQ1_M: result = quantize_iq1_m (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; - case GGML_TYPE_MXFP4: result = quantize_mxfp4 (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_F16: { size_t elemsize = sizeof(ggml_fp16_t); diff --git a/ml/backend/ggml/ggml/src/ggml.cpp b/ml/backend/ggml/ggml/src/ggml.cpp new file mode 100644 index 000000000..0d388d455 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml.cpp @@ -0,0 +1,26 @@ +#include "ggml-impl.h" + +#include +#include + +static std::terminate_handler previous_terminate_handler; + +GGML_NORETURN static void ggml_uncaught_exception() { + ggml_print_backtrace(); + if (previous_terminate_handler) { + previous_terminate_handler(); + } + abort(); // unreachable unless previous_terminate_handler was nullptr +} + +static bool ggml_uncaught_exception_init = []{ + const char * GGML_NO_BACKTRACE = getenv("GGML_NO_BACKTRACE"); + if (GGML_NO_BACKTRACE) { + return false; + } + const auto prev{std::get_terminate()}; + GGML_ASSERT(prev != ggml_uncaught_exception); + previous_terminate_handler = prev; + std::set_terminate(ggml_uncaught_exception); + return true; +}(); diff --git a/ml/backend/ggml/ggml/src/ggml.go b/ml/backend/ggml/ggml/src/ggml.go index 91f1f1ada..37347807d 100644 --- a/ml/backend/ggml/ggml/src/ggml.go +++ b/ml/backend/ggml/ggml/src/ggml.go @@ -1,7 +1,7 @@ package ggml // #cgo CXXFLAGS: -std=c++17 -// #cgo CPPFLAGS: -DNDEBUG -DGGML_USE_CPU +// #cgo CPPFLAGS: -DNDEBUG -DGGML_USE_CPU -DGGML_VERSION=0x0 -DGGML_COMMIT=0x0 // #cgo CPPFLAGS: -I${SRCDIR}/../include -I${SRCDIR}/ggml-cpu // #cgo windows CFLAGS: -Wno-dll-attribute-on-redeclaration // #cgo windows LDFLAGS: -lmsvcrt -static -static-libgcc -static-libstdc++ diff --git a/ml/backend/ggml/ggml/src/gguf.cpp b/ml/backend/ggml/ggml/src/gguf.cpp index e45b453de..0f71d5f33 100644 --- a/ml/backend/ggml/ggml/src/gguf.cpp +++ b/ml/backend/ggml/ggml/src/gguf.cpp @@ -299,10 +299,10 @@ bool gguf_read_emplace_helper(const struct gguf_reader & gr, std::vectorversion)) { - if (ctx->version == 1) { - fprintf(stderr, "%s: GGUFv1 is no longer supported, please use a more up-to-date version\n", __func__); + if (ok && ctx->version == 0) { + GGML_LOG_ERROR("%s: bad GGUF version: %" PRIu32 "\n", __func__, ctx->version); ok = false; } - if (ctx->version > GGUF_VERSION) { - fprintf(stderr, "%s: this GGUF file is version %" PRIu32 " but this software only supports up to version %d\n", + + /* + * bit layout is different when reading non-native endian models. + * assuming that the GGUF version is 3, the non-native endian model + * would read it as 0x30000000. we can use the AND operation against + * the last 4 hexadecimal digits to check if the model is the same + * endianness as the host system. + */ + if (ok && (ctx->version & 0x0000FFFF) == 0x00000000) { + GGML_LOG_ERROR("%s: failed to load model: this GGUF file version %" PRIu32 " is extremely large, is there a mismatch between the host and model endianness?\n", __func__, ctx->version); + ok = false; + } + + if (ok && ctx->version == 1) { + GGML_LOG_ERROR("%s: GGUFv1 is no longer supported, please use a more up-to-date version\n", __func__); + ok = false; + } + if (ok && ctx->version > GGUF_VERSION) { + GGML_LOG_ERROR("%s: this GGUF file is version %" PRIu32 " but this software only supports up to version %d\n", __func__, ctx->version, GGUF_VERSION); ok = false; } @@ -363,7 +384,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par if (ok && gr.read(n_tensors)) { static_assert(sizeof(size_t) <= 8 && sizeof(gguf_tensor_info) >= 2, "int64_t insufficient for indexing"); if (n_tensors < 0 || n_tensors > int64_t(SIZE_MAX/sizeof(gguf_tensor_info))) { - fprintf(stderr, "%s: number of tensors is %" PRIi64 " but must be in [0, %zu]\n", + GGML_LOG_ERROR("%s: number of tensors is %" PRIi64 " but must be in [0, %zu]\n", __func__, n_tensors, SIZE_MAX/sizeof(gguf_tensor_info)); ok = false; } @@ -374,7 +395,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par if (ok && gr.read(n_kv)) { static_assert(sizeof(size_t) <= 8 && sizeof(gguf_tensor_info) >= 2, "int64_t insufficient for indexing"); if (n_kv < 0 || n_kv > int64_t(SIZE_MAX/sizeof(gguf_kv))) { - fprintf(stderr, "%s: number of key value pairs is %" PRIi64 " but must be in [0, %zu]\n", + GGML_LOG_ERROR("%s: number of key value pairs is %" PRIi64 " but must be in [0, %zu]\n", __func__, n_kv, SIZE_MAX/sizeof(gguf_kv)); ok = false; } @@ -383,7 +404,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par } if (!ok) { - fprintf(stderr, "%s: failed to read header\n", __func__); + GGML_LOG_ERROR("%s: failed to read header\n", __func__); gguf_free(ctx); return nullptr; } @@ -399,15 +420,15 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par try { ok = ok && gr.read(key); } catch (std::length_error &) { - fprintf(stderr, "%s: encountered length_error while reading key %" PRIi64 "\n", __func__, i); + GGML_LOG_ERROR("%s: encountered length_error while reading key %" PRIi64 "\n", __func__, i); ok = false; } catch (std::bad_alloc &) { - fprintf(stderr, "%s: encountered bad_alloc error while reading key %" PRIi64 "\n", __func__, i); + GGML_LOG_ERROR("%s: encountered bad_alloc error while reading key %" PRIi64 "\n", __func__, i); ok = false; } for (size_t j = 0; ok && j < ctx->kv.size(); ++j) { if (key == ctx->kv[j].key) { - fprintf(stderr, "%s: duplicate key '%s' for tensors %zu and %" PRIi64 " \n", __func__, key.c_str(), j, i); + GGML_LOG_ERROR("%s: duplicate key '%s' for tensors %zu and %" PRIi64 " \n", __func__, key.c_str(), j, i); ok = false; } } @@ -441,14 +462,14 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par case GGUF_TYPE_ARRAY: default: { - fprintf(stderr, "%s: key '%s' has invalid GGUF type %d\n", __func__, key.c_str(), type); + GGML_LOG_ERROR("%s: key '%s' has invalid GGUF type %d\n", __func__, key.c_str(), type); ok = false; } break; } } if (!ok) { - fprintf(stderr, "%s: failed to read key-value pairs\n", __func__); + GGML_LOG_ERROR("%s: failed to read key-value pairs\n", __func__); gguf_free(ctx); return nullptr; } @@ -458,7 +479,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par ctx->alignment = alignment_idx == -1 ? GGUF_DEFAULT_ALIGNMENT : gguf_get_val_u32(ctx, alignment_idx); if (ctx->alignment == 0 || (ctx->alignment & (ctx->alignment - 1)) != 0) { - fprintf(stderr, "%s: alignment %zu is not a power of 2\n", __func__, ctx->alignment); + GGML_LOG_ERROR("%s: alignment %zu is not a power of 2\n", __func__, ctx->alignment); gguf_free(ctx); return nullptr; } @@ -474,14 +495,14 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par try { ok = ok && gr.read(name); } catch (std::length_error &) { - fprintf(stderr, "%s: encountered length_error while reading tensor name %" PRIi64 "\n", __func__, i); + GGML_LOG_ERROR("%s: encountered length_error while reading tensor name %" PRIi64 "\n", __func__, i); ok = false; } catch (std::bad_alloc &) { - fprintf(stderr, "%s: encountered bad_alloc error while reading tensor name %" PRIi64 "\n", __func__, i); + GGML_LOG_ERROR("%s: encountered bad_alloc error while reading tensor name %" PRIi64 "\n", __func__, i); ok = false; } if (name.length() >= GGML_MAX_NAME) { - fprintf(stderr, "%s: tensor name %" PRIi64 " is too long: %zu >= %d\n", __func__, i, name.length(), GGML_MAX_NAME); + GGML_LOG_ERROR("%s: tensor name %" PRIi64 " is too long: %zu >= %d\n", __func__, i, name.length(), GGML_MAX_NAME); ok = false; break; } @@ -490,7 +511,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par // make sure there are no duplicate tensor names for (int64_t j = 0; ok && j < i; ++j) { if (strcmp(info.t.name, ctx->info[j].t.name) == 0) { - fprintf(stderr, "%s: duplicate tensor name '%s' for tensors %" PRIi64 " and %" PRIi64 "\n", __func__, info.t.name, j, i); + GGML_LOG_ERROR("%s: duplicate tensor name '%s' for tensors %" PRIi64 " and %" PRIi64 "\n", __func__, info.t.name, j, i); ok = false; break; } @@ -505,7 +526,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par uint32_t n_dims = -1; ok = ok && gr.read(n_dims); if (n_dims > GGML_MAX_DIMS) { - fprintf(stderr, "%s: tensor '%s' has invalid number of dimensions: %" PRIu32 " > %" PRIu32 "\n", + GGML_LOG_ERROR("%s: tensor '%s' has invalid number of dimensions: %" PRIu32 " > %" PRIu32 "\n", __func__, info.t.name, n_dims, GGML_MAX_DIMS); ok = false; break; @@ -518,7 +539,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par // check that all ne are non-negative if (info.t.ne[j] < 0) { - fprintf(stderr, "%s: tensor '%s' dimension %" PRIu32 " has invalid number of elements: %" PRIi64 " < 0\n", + GGML_LOG_ERROR("%s: tensor '%s' dimension %" PRIu32 " has invalid number of elements: %" PRIi64 " < 0\n", __func__, info.t.name, j, info.t.ne[j]); ok = false; break; @@ -530,7 +551,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par (INT64_MAX/info.t.ne[2] <= info.t.ne[0]*info.t.ne[1]) || (INT64_MAX/info.t.ne[3] <= info.t.ne[0]*info.t.ne[1]*info.t.ne[2]))) { - fprintf(stderr, "%s: total number of elements in tensor '%s' with shape " + GGML_LOG_ERROR("%s: total number of elements in tensor '%s' with shape " "(%" PRIi64 ", %" PRIi64 ", %" PRIi64 ", %" PRIi64 ") is >= %" PRIi64 "\n", __func__, info.t.name, info.t.ne[0], info.t.ne[1], info.t.ne[2], info.t.ne[3], INT64_MAX); ok = false; @@ -547,7 +568,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par // check that tensor type is within defined range if (info.t.type < 0 || info.t.type >= GGML_TYPE_COUNT) { - fprintf(stderr, "%s: tensor '%s' has invalid ggml type %d (%s)\n", + GGML_LOG_ERROR("%s: tensor '%s' has invalid ggml type %d (%s)\n", __func__, info.t.name, info.t.type, ggml_type_name(info.t.type)); ok = false; break; @@ -557,7 +578,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par // check that row size is divisible by block size if (blck_size == 0 || info.t.ne[0] % blck_size != 0) { - fprintf(stderr, "%s: tensor '%s' of type %d (%s) has %" PRId64 " elements per row, " + GGML_LOG_ERROR("%s: tensor '%s' of type %d (%s) has %" PRId64 " elements per row, " "not a multiple of block size (%" PRId64 ")\n", __func__, info.t.name, (int) info.t.type, ggml_type_name(info.t.type), info.t.ne[0], blck_size); ok = false; @@ -582,7 +603,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par } if (!ok) { - fprintf(stderr, "%s: failed to read tensor info\n", __func__); + GGML_LOG_ERROR("%s: failed to read tensor info\n", __func__); gguf_free(ctx); return nullptr; } @@ -590,7 +611,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par // we require the data section to be aligned, so take into account any padding if (fseek(file, GGML_PAD(ftell(file), ctx->alignment), SEEK_SET) != 0) { - fprintf(stderr, "%s: failed to seek to beginning of data section\n", __func__); + GGML_LOG_ERROR("%s: failed to seek to beginning of data section\n", __func__); gguf_free(ctx); return nullptr; } @@ -604,13 +625,20 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par for (size_t i = 0; i < ctx->info.size(); ++i) { const gguf_tensor_info & ti = ctx->info[i]; if (ti.offset != ctx->size) { - fprintf(stderr, "%s: tensor '%s' has offset %" PRIu64 ", expected %zu\n", + GGML_LOG_ERROR("%s: tensor '%s' has offset %" PRIu64 ", expected %zu\n", __func__, ti.t.name, ti.offset, ctx->size); - fprintf(stderr, "%s: failed to read tensor data\n", __func__); + GGML_LOG_ERROR("%s: failed to read tensor data\n", __func__); gguf_free(ctx); return nullptr; } - ctx->size += GGML_PAD(ggml_nbytes(&ti.t), ctx->alignment); + size_t padded_size = GGML_PAD(ggml_nbytes(&ti.t), ctx->alignment); + if (SIZE_MAX - ctx->size < padded_size) { + GGML_LOG_ERROR("%s: tensor '%s' size overflow, cannot accumulate size %zu + %zu\n", + __func__, ti.t.name, ctx->size, padded_size); + gguf_free(ctx); + return nullptr; + } + ctx->size += padded_size; } } @@ -634,7 +662,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par *params.ctx = ggml_init(pdata); if (*params.ctx == nullptr) { - fprintf(stderr, "%s: failed to initialize ggml context for storing tensors\n", __func__); + GGML_LOG_ERROR("%s: failed to initialize ggml context for storing tensors\n", __func__); gguf_free(ctx); return nullptr; } @@ -656,7 +684,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par ok = ok && gr.read(data->data, ctx->size); if (!ok) { - fprintf(stderr, "%s: failed to read tensor data binary blob\n", __func__); + GGML_LOG_ERROR("%s: failed to read tensor data binary blob\n", __func__); ggml_free(ctx_data); *params.ctx = nullptr; gguf_free(ctx); @@ -689,7 +717,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par } if (!ok) { - fprintf(stderr, "%s: failed to create tensors\n", __func__); + GGML_LOG_ERROR("%s: failed to create tensors\n", __func__); ggml_free(ctx_data); *params.ctx = nullptr; gguf_free(ctx); @@ -706,7 +734,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p FILE * file = ggml_fopen(fname, "rb"); if (!file) { - fprintf(stderr, "%s: failed to open GGUF file '%s'\n", __func__, fname); + GGML_LOG_ERROR("%s: failed to open GGUF file '%s'\n", __func__, fname); return nullptr; } @@ -1308,7 +1336,7 @@ bool gguf_write_to_file(const struct gguf_context * ctx, const char * fname, boo FILE * file = ggml_fopen(fname, "wb"); if (!file) { - fprintf(stderr, "%s: failed to open file '%s' for writing GGUF data\n", __func__, fname); + GGML_LOG_ERROR("%s: failed to open file '%s' for writing GGUF data\n", __func__, fname); return false; } diff --git a/ml/backend/ggml/ggml_test.go b/ml/backend/ggml/ggml_test.go deleted file mode 100644 index 70ebb9df4..000000000 --- a/ml/backend/ggml/ggml_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package ggml - -import ( - "bytes" - "log/slog" - "os" - "slices" - "testing" - - "github.com/ollama/ollama/envconfig" - "github.com/ollama/ollama/fs/ggml" - "github.com/ollama/ollama/logutil" - "github.com/ollama/ollama/ml" -) - -func TestMain(m *testing.M) { - slog.SetDefault(logutil.NewLogger(os.Stderr, envconfig.LogLevel())) - os.Exit(m.Run()) -} - -func setup(tb testing.TB) ml.Backend { - tb.Helper() - - f, err := os.CreateTemp(tb.TempDir(), "*.bin") - if err != nil { - tb.Fatal(err) - } - defer f.Close() - - if err := ggml.WriteGGUF(f, ggml.KV{ - "general.architecture": "test", - "test.block_count": uint32(1), - }, []*ggml.Tensor{ - {Name: "blk.0.weight", Shape: []uint64{1}, WriterTo: bytes.NewBuffer(slices.Repeat([]byte{0}, 4))}, - }); err != nil { - tb.Fatal(err) - } - - b, err := New(f.Name(), ml.BackendParams{NumGPULayers: 1}) - if err != nil { - tb.Fatal(err) - } - - return b -} - -// initContextOrSkip takes a testing.T and true for GPU -// If GPUs are not available, the current test is skipped -// gpu=false will always succed -func initContextOrSkip(t *testing.T, b ml.Backend, gpu bool) ml.Context { - if gpu && len(b.(*Backend).schedBackends) == 1 { - t.Skip("No GPU detected, skipping GPU test case") - } - ctx := b.NewContext() - t.Cleanup(func() { ctx.Close() }) - if gpu { - return ctx.Layer(0) - } - return ctx.Input() -} diff --git a/ml/backend/ggml/mxfp4_test.go b/ml/backend/ggml/mxfp4_test.go deleted file mode 100644 index 3c17eb8aa..000000000 --- a/ml/backend/ggml/mxfp4_test.go +++ /dev/null @@ -1,795 +0,0 @@ -package ggml - -import ( - "math" - "math/rand" - "os" - "testing" - - "github.com/ollama/ollama/ml" - - fsggml "github.com/ollama/ollama/fs/ggml" -) - -/* - To get GPUs loading in these tests on windows... - - $env:OLLAMA_LIBRARY_PATH="$(pwd)\build\lib\ollama" - $env:PATH="$(pwd)\build\lib\ollama;$env:PATH" - - go test .\ml\backend\ggml\... -run TestMXFP4 -*/ - -// MXFP4 reference: https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf - -// E2M1 values -var mxfp4_vals = []float32{ - 0.0, // 0 00 0 = 0x0 - 0.5, // 0 00 1 = 0x1 - 1.0, // 0 01 0 = 0x2 - 1.5, // 0 01 1 = 0x3 - 2.0, // 0 10 0 = 0x4 - 3.0, // 0 10 1 = 0x5 - 4.0, // 0 11 0 = 0x6 - 6.0, // 0 11 1 = 0x7 - 0.0, // 1 00 0 = 0x8 - -0.5, // 1 00 1 = 0x9 - -1.0, // 1 01 0 = 0xa - -1.5, // 1 01 1 = 0xb - -2.0, // 1 10 0 = 0xc - -3.0, // 1 10 1 = 0xd - -4.0, // 1 11 0 = 0xe - -6.0, // 1 11 1 = 0xf -} - -func TestMXFP4Ops(t *testing.T) { - b := setup(t) - for _, useGPU := range []bool{false, true} { - useGPU := useGPU - var label string - if useGPU { - label = "gpu" - } else { - label = "cpu" - } - t.Run(label, func(t *testing.T) { - t.Run("mulmatid", func(t *testing.T) { - // Use exact values that are supported without scaling so we can compare against an fp32 tensor - t.Run("exact", func(t *testing.T) { - r := rand.New(rand.NewSource(0)) - ctx := initContextOrSkip(t, b, useGPU) - const s00 = 64 - const s01 = 1 - const s02 = 2 - const s10 = s00 - const s11 = 1 - const s12 = 1 - // const s00 = 2880 - // const s01 = 5760 - // const s02 = 32 - // const s10 = s00 - // const s11 = 1 - // const s12 = 64 - - data := [s00 * s01 * s02]float32{} - for i := range data { - data[i] = mxfp4_vals[r.Int()%len(mxfp4_vals)] - } - mxData := Quantize(fsggml.TensorTypeMXFP4, data[:], []uint64{uint64(len(data))}) - dtype := ml.DTypeMXFP4 - t1 := ctx.(*Context).FromBytes(dtype, mxData, s00, s01, s02) - t1f := ctx.(*Context).FromFloatSlice(data[:], s00, s01, s02) - // for i := range len(data) / 32 { // MXFP4 block size - // vals := [32]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2f", data[i*32+j]) - // } - // t.Logf(" t1[%s]\n", strings.Join(vals[:], ", ")) - // } - - // random 0-1 float - d2 := [s10 * s11 * s12]float32{} - for i := range d2 { - d2[i] = float32(r.Float32()) - } - // for i := range len(d2) / s10 { - // vals := [s10]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2f", d2[i*s10+j]) - // } - // t.Logf(" t2[%s]\n", strings.Join(vals[:], ", ")) - // } - t2 := ctx.(*Context).FromFloatSlice(d2[:], s10, s11, s12) - - d3 := [4 * s12]int32{} - for i := range d3 { - d3[i] = int32(i) % s02 - } - t3 := ctx.(*Context).FromIntSlice(d3[:], 4, s12) - - // t.Log("calling MulmatID") - t4 := t1.MulmatID(ctx, t2, t3) - t4f := t1f.MulmatID(ctx, t2, t3) - d4 := ml.Dump(ctx, t4, ml.DumpWithPrecision(2)) // lower precision for CPU accuracy - d4f := ml.Dump(ctx, t4f, ml.DumpWithPrecision(2)) - if d4 != d4f { - t.Fatalf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d4f, d4) - } - // t.Logf("MulmatID results matched:\n%s", d4) - }) - - t.Run("range", func(t *testing.T) { - r := rand.New(rand.NewSource(0)) - ctx := initContextOrSkip(t, b, useGPU) - const s0 = 64 - const s1 = 2 - const s2 = 4 - const idlen = 4 - data := [s0 * s1 * s2]float32{} - inTotal := float32(0) - for i := range data { - data[i] = float32(i) - inTotal += float32(i) - } - mxData := Quantize(fsggml.TensorTypeMXFP4, data[:], []uint64{uint64(len(data))}) - // Reconvert back to floats to remove the quantization fidelity loss for comparison - dataf := ConvertToF32(mxData, uint32(fsggml.TensorTypeMXFP4), uint64(len(data))) - dtype := ml.DTypeMXFP4 - t1 := ctx.(*Context).FromBytes(dtype, mxData, s0, s1, s2) - t1f := ctx.(*Context).FromFloatSlice(dataf, s0, s1, s2) - // for i := range len(data) / 32 { - // vals := [32]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2f", dataf[i*32+j]) - // } - // t.Logf(" t1[%s]\n", strings.Join(vals[:], ", ")) - // } - - d2 := [s0]float32{} - for i := range d2 { - // d2[i] = float32(i) - d2[i] = float32(r.Float32()) - } - // for i := range len(d2) / s0 { - // vals := [s0]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2f", d2[i*s0+j]) - // } - // t.Logf(" t2[%s]\n", strings.Join(vals[:], ", ")) - // } - t2 := ctx.(*Context).FromFloatSlice(d2[:], s0) - - // TODO - there might be a CUDA bug here... - d3 := [idlen]int32{1, 1, 2, 3} - // for i := range d3 { - // d3[i] = int32(i) % s2 - // t.Logf("%d] %d", i, d3[i]) - // } - t3 := ctx.(*Context).FromIntSlice(d3[:], idlen) - - // t.Log("calling Mulmat") - t4 := t1.MulmatID(ctx, t2, t3) - t4f := t1f.MulmatID(ctx, t2, t3) - // Metal has some drift so use reduced precision for dump comparisons - d4 := ml.Dump(ctx, t4, ml.DumpWithPrecision(2)) - d4f := ml.Dump(ctx, t4f, ml.DumpWithPrecision(2)) - r4 := t4.Floats() - r4f := t4f.Floats() - sim := cosineSimilarity(r4, r4f) - if sim < 0.99 { - t.Logf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d4f, d4) - t.Fatalf("failed similarity test: %f", sim) - } - t.Logf("similarity: %f", sim) - - if d4 != d4f { - t.Fatalf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d4f, d4) - } - // t.Logf("mxfp4 result\n%s", d4) - }) - t.Run("random", func(t *testing.T) { - r := rand.New(rand.NewSource(0)) - ctx := initContextOrSkip(t, b, useGPU) - const s00 = 2880 - const s01 = 5760 - const s02 = 32 - const s10 = s00 - const s11 = 1 - const s12 = 64 - const idlen = 4 - - data := [s00 * s01 * s02]float32{} - for i := range data { - data[i] = float32(r.Float32() * 10.0) - } - mxData := Quantize(fsggml.TensorTypeMXFP4, data[:], []uint64{uint64(len(data))}) - // Reconvert back to floats to remove the quantization fidelity loss for comparison - dataf := ConvertToF32(mxData, uint32(fsggml.TensorTypeMXFP4), uint64(len(data))) - dtype := ml.DTypeMXFP4 - t1 := ctx.(*Context).FromBytes(dtype, mxData, s00, s01, s02) - t1f := ctx.(*Context).FromFloatSlice(dataf, s00, s01, s02) - // for i := range len(data) / 32 { - // vals := [32]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2f", dataf[i*32+j]) - // } - // t.Logf(" t1[%s]\n", strings.Join(vals[:], ", ")) - // } - - d2 := [s10 * s11 * s12]float32{} - for i := range d2 { - // d2[i] = float32(i) - d2[i] = float32(r.Float32()) - } - // for i := range len(d2) / s0 { - // vals := [s0]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2f", d2[i*s0+j]) - // } - // t.Logf(" t2[%s]\n", strings.Join(vals[:], ", ")) - // } - t2 := ctx.(*Context).FromFloatSlice(d2[:], s10, s11, s12) - - // arange equiv - d3 := [idlen * s12]int32{} - for i := range d3 { - d3[i] = int32(i) % s02 - } - t3 := ctx.(*Context).FromIntSlice(d3[:], idlen, s12) - - // t.Log("calling Mulmat") - // t3 := t1.Mulmat(ctx, t2) - // t3f := t1f.Mulmat(ctx, t2) - t4 := t1.MulmatID(ctx, t2, t3) - t4f := t1f.MulmatID(ctx, t2, t3) - // Metal and CPU have some drift so use reduced precision for dump comparisons - d4 := ml.Dump(ctx, t4, ml.DumpWithPrecision(1)) - d4f := ml.Dump(ctx, t4f, ml.DumpWithPrecision(1)) - // t.Logf("mxfp4 data: \n%s", d4) - r4 := t4.Floats() - r4f := t4f.Floats() - sim := cosineSimilarity(r4, r4f) - if sim < 0.99 { - t.Logf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d4f, d4) - t.Fatalf("failed similarity test: %f", sim) - } - t.Logf("similarity: %f", sim) - - if d4 != d4f { - t.Fatalf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d4f, d4) - } - }) - - // Use data file(s) with real data - t.Run("example_7", func(t *testing.T) { - ctx := initContextOrSkip(t, b, useGPU) - data0, err := os.ReadFile("mlp-gateup.bin") - if err != nil { - t.Skip("missing mlp-gateup.bin file, skipping test") - } - data1, err := os.ReadFile("hidden-states-7.bin") - if err != nil { - t.Skip("missing hidden-states.bin file, skipping test") - } - data2, err := os.ReadFile("selected-experts-7.bin") - if err != nil { - t.Skip("missing selected-experts.bin file, skipping test") - } - - dtype := ml.DTypeMXFP4 - data0f := ConvertToF32(data0, uint32(fsggml.TensorTypeMXFP4), 2880*5760*32) - t1 := ctx.(*Context).FromBytes(dtype, data0, 2880, 5760, 32) - t1f := ctx.(*Context).FromFloatSlice(data0f, 2880, 5760, 32) - - // t.Logf("f32: \n%s", ml.Dump(ctx, t1f)) - - t2 := ctx.(*Context).FromBytes(ml.DTypeF32, data1, 2880, 1, 7) - // t.Logf("hidden-state: \n%s", ml.Dump(ctx, t2)) - - t3 := ctx.(*Context).FromBytes(ml.DTypeI32, data2, 4, 7) - // t.Logf("experts: \n%s", ml.Dump(ctx, t3)) - - // t.Log("calling MulmatID") - t4 := t1.MulmatID(ctx, t2, t3) - t4f := t1f.MulmatID(ctx, t2, t3) - - d4 := ml.Dump(ctx, t4) - d4f := ml.Dump(ctx, t4f) - - r4 := t4.Floats() - r4f := t4f.Floats() - sim := cosineSimilarity(r4, r4f) - if sim < 0.99 { - t.Fatalf("failed similarity test: %f", sim) - } - t.Logf("similarity: %f", sim) - - if d4 != d4f { - t.Fatalf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d4f, d4) - } - // t.Logf("MulmatID results matched:\n%s", d4) - }) - - // Use data file(s) with real data - t.Run("example_384", func(t *testing.T) { - ctx := initContextOrSkip(t, b, useGPU) - data0, err := os.ReadFile("mlp-gateup.bin") - if err != nil { - t.Skip("missing mlp-gateup.bin file, skipping test") - } - data1, err := os.ReadFile("hidden-states-384.bin") - if err != nil { - t.Skip("missing hidden-states.bin file, skipping test") - } - data2, err := os.ReadFile("selected-experts-384.bin") - if err != nil { - t.Skip("missing selected-experts.bin file, skipping test") - } - - dtype := ml.DTypeMXFP4 - data0f := ConvertToF32(data0, uint32(fsggml.TensorTypeMXFP4), 2880*5760*32) - t1 := ctx.(*Context).FromBytes(dtype, data0, 2880, 5760, 32) - t1f := ctx.(*Context).FromFloatSlice(data0f, 2880, 5760, 32) - - // t.Logf("f32: \n%s", ml.Dump(ctx, t1f)) - - t2 := ctx.(*Context).FromBytes(ml.DTypeF32, data1, 2880, 1, 384) - // t.Logf("hidden-state: \n%s", ml.Dump(ctx, t2)) - - t3 := ctx.(*Context).FromBytes(ml.DTypeI32, data2, 4, 384) - // t.Logf("experts: \n%s", ml.Dump(ctx, t3)) - - // t.Log("calling MulmatID") - t4 := t1.MulmatID(ctx, t2, t3) - t4f := t1f.MulmatID(ctx, t2, t3) - - d4 := ml.Dump(ctx, t4, ml.DumpWithPrecision(3)) - d4f := ml.Dump(ctx, t4f, ml.DumpWithPrecision(3)) - - r4 := t4.Floats() - r4f := t4f.Floats() - sim := cosineSimilarity(r4, r4f) - if sim < 0.99 { - t.Fatalf("failed similarity test: %f", sim) - } - t.Logf("similarity: %f", sim) - - if d4 != d4f { - t.Fatalf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d4f, d4) - } - // t.Logf("MulmatID results matched:\n%s", d4) - }) - - // Use data file(s) with real data - t.Run("example_1d", func(t *testing.T) { - r := rand.New(rand.NewSource(0)) - ctx := initContextOrSkip(t, b, useGPU) - data0, err := os.ReadFile("mlp-gateup.bin") - if err != nil { - t.Skip("missing mlp-gateup.bin file, skipping test") - } - - dtype := ml.DTypeMXFP4 - data0f := ConvertToF32(data0, uint32(fsggml.TensorTypeMXFP4), 2880*5760*32) - t1 := ctx.(*Context).FromBytes(dtype, data0, 2880, 5760, 32) - t1f := ctx.(*Context).FromFloatSlice(data0f, 2880, 5760, 32) - - // t.Logf("f32: \n%s", ml.Dump(ctx, t1f)) - data1 := [2880]float32{} - for i := range data1 { - data1[i] = float32(r.Float32()) - } - - t2 := ctx.(*Context).FromFloatSlice(data1[:], 2880) - // t.Logf("hidden-state: \n%s", ml.Dump(ctx, t2)) - data2 := [4]int32{ - 12, 30, 17, 7, - // 7, 17, 12, 30, - } - - t3 := ctx.(*Context).FromIntSlice(data2[:], 4) - // t.Logf("experts: \n%s", ml.Dump(ctx, t3)) - - // t.Log("calling MulmatID") - t4 := t1.MulmatID(ctx, t2, t3) - t4f := t1f.MulmatID(ctx, t2, t3) - - d4 := ml.Dump(ctx, t4) - d4f := ml.Dump(ctx, t4f) - - r4 := t4.Floats() - r4f := t4f.Floats() - sim := cosineSimilarity(r4, r4f) - if sim < 0.99 { - t.Fatalf("failed similarity test: %f", sim) - } - t.Logf("similarity: %f", sim) - - if d4 != d4f { - t.Fatalf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d4f, d4) - } - // t.Logf("MulmatID results matched:\n%s", d4) - }) - }) - - t.Run("mm", func(t *testing.T) { - t.Run("example", func(t *testing.T) { - r := rand.New(rand.NewSource(0)) - ctx := initContextOrSkip(t, b, useGPU) - data0, err := os.ReadFile("mlp-gateup.bin") - if err != nil { - t.Skip("missing mlp-gateup.bin file, skipping test") - } - data1 := [2880 * 1 * 32]float32{} - for i := range data1 { - data1[i] = float32(r.Float32()) - } - - dtype := ml.DTypeMXFP4 - data0f := ConvertToF32(data0, uint32(fsggml.TensorTypeMXFP4), 2880*5760*32) - t1 := ctx.(*Context).FromBytes(dtype, data0, 2880, 5760, 32) - t1f := ctx.(*Context).FromFloatSlice(data0f, 2880, 5760, 32) - - // t.Logf("f32: \n%s", ml.Dump(ctx, t1f)) - - t2 := ctx.(*Context).FromFloatSlice(data1[:], 2880, 1, 32) - - t4 := t1.Mulmat(ctx, t2) - t4f := t1f.Mulmat(ctx, t2) - - d4 := ml.Dump(ctx, t4, ml.DumpWithPrecision(3)) - d4f := ml.Dump(ctx, t4f, ml.DumpWithPrecision(3)) - - r4 := t4.Floats() - r4f := t4f.Floats() - sim := cosineSimilarity(r4, r4f) - if sim < 0.99 { - t.Fatalf("failed similarity test: %f", sim) - } - t.Logf("similarity: %f", sim) - - if d4 != d4f { - t.Fatalf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d4f, d4) - } - // t.Logf("Mulmat results matched:\n%s", d4) - }) - - t.Run("exact/3x3", func(t *testing.T) { - r := rand.New(rand.NewSource(0)) - ctx := initContextOrSkip(t, b, useGPU) - const s10 = 64 - const s11 = 1 - const s12 = 2 - const s20 = s10 - const s21 = 1 - const s22 = 2 - - data := [s10 * s11 * s12]float32{} - for i := range data { - data[i] = mxfp4_vals[r.Int()%len(mxfp4_vals)] - } - // for i := range len(data) / 32 { - // vals := [32]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2f", data[i*32+j]) - // } - // t.Logf(" [%s]\n", strings.Join(vals[:], ", ")) - // } - mxData := Quantize(fsggml.TensorTypeMXFP4, data[:], []uint64{uint64(len(data))}) - // for i := range len(mxData) / 17 { - // vals := [17]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2x", mxData[i*17+j]) - // } - // t.Logf(" %s\n", strings.Join(vals[:], ", ")) - // } - dtype := ml.DTypeMXFP4 - t1 := ctx.(*Context).FromBytes(dtype, mxData, s10, s11, s12) - t1f := ctx.(*Context).FromFloatSlice(data[:], s10, s11, s12) - - d2 := [s20 * s21 * s22]float32{} - for i := range d2 { - d2[i] = float32(r.Float32()) - } - t2 := ctx.(*Context).FromFloatSlice(d2[:], s20, s21, s22) - - t3f := t1f.Mulmat(ctx, t2) - t3 := t1.Mulmat(ctx, t2) - d3 := ml.Dump(ctx, t3) - d3f := ml.Dump(ctx, t3f) - if d3 != d3f { - t.Fatalf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d3f, d3) - } - }) - - t.Run("exact/2x2", func(t *testing.T) { - r := rand.New(rand.NewSource(0)) - ctx := initContextOrSkip(t, b, useGPU) - const s0 = 32 - const s1 = 64 - - data := [s0 * s1]float32{} - for i := range data { - data[i] = mxfp4_vals[r.Int()%len(mxfp4_vals)] - } - // for i := range 4 { - // vals := [32]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2f", data[i*32+j]) - // } - // t.Logf(" [%s]\n", strings.Join(vals[:], ", ")) - // } - mxData := Quantize(fsggml.TensorTypeMXFP4, data[:], []uint64{uint64(len(data))}) - // for i := range len(mxData) / 17 { - // vals := [17]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2x", mxData[i*17+j]) - // } - // t.Logf(" %s\n", strings.Join(vals[:], ", ")) - // } - dtype := ml.DTypeMXFP4 - t1 := ctx.(*Context).FromBytes(dtype, mxData, s0, s1) - t1f := ctx.(*Context).FromFloatSlice(data[:], s0, s1) - - d2 := [s0 * s1]float32{} - for i := range d2 { - d2[i] = float32(r.Float32()) - } - t2 := ctx.(*Context).FromFloatSlice(d2[:], s0, s1) - - t3f := t1f.Mulmat(ctx, t2) - t3 := t1.Mulmat(ctx, t2) - d3 := ml.Dump(ctx, t3) - d3f := ml.Dump(ctx, t3f) - if d3 != d3f { - t.Fatalf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d3f, d3) - } - }) - t.Run("exact/2x1", func(t *testing.T) { - r := rand.New(rand.NewSource(0)) - ctx := initContextOrSkip(t, b, useGPU) - const s0 = 64 - const s1 = 4 - - data := [s0 * s1]float32{} - for i := range data { - data[i] = mxfp4_vals[r.Int()%len(mxfp4_vals)] - } - // for i := range len(data) / 32 { - // vals := [32]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2f", data[i*32+j]) - // } - // t.Logf(" t1[%s]\n", strings.Join(vals[:], ", ")) - // } - mxData := Quantize(fsggml.TensorTypeMXFP4, data[:], []uint64{uint64(len(data))}) - // for i := range len(mxData) / 17 { - // vals := [17]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2x", mxData[i*17+j]) - // } - // t.Logf(" %s\n", strings.Join(vals[:], ", ")) - // } - dtype := ml.DTypeMXFP4 - t1 := ctx.(*Context).FromBytes(dtype, mxData, s0, s1) - t1f := ctx.(*Context).FromFloatSlice(data[:], s0, s1) - - d2 := [s0]float32{} - for i := range d2 { - d2[i] = float32(r.Float32()) - } - // for i := range len(d2) / 32 { - // vals := [32]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2f", d2[i*32+j]) - // } - // t.Logf(" t2[%s]\n", strings.Join(vals[:], ", ")) - // } - - t2 := ctx.(*Context).FromFloatSlice(d2[:], s0) - - t3f := t1f.Mulmat(ctx, t2) - t3 := t1.Mulmat(ctx, t2) - d3 := ml.Dump(ctx, t3, ml.DumpWithPrecision(3)) - d3f := ml.Dump(ctx, t3f, ml.DumpWithPrecision(3)) - if d3 != d3f { - t.Fatalf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d3f, d3) - } - }) - - t.Run("range/2d", func(t *testing.T) { - r := rand.New(rand.NewSource(0)) - ctx := initContextOrSkip(t, b, useGPU) - const s0 = 32 - const s1 = 4 - data := [s0 * s1]float32{} - inTotal := float32(0) - for i := range data { - data[i] = float32(i) - inTotal += float32(i) - } - mxData := Quantize(fsggml.TensorTypeMXFP4, data[:], []uint64{uint64(len(data))}) - // Reconvert back to floats to remove the quantization fidelity loss for comparison - dataf := ConvertToF32(mxData, uint32(fsggml.TensorTypeMXFP4), uint64(len(data))) - dtype := ml.DTypeMXFP4 - t1 := ctx.(*Context).FromBytes(dtype, mxData, s0, s1) - t1f := ctx.(*Context).FromFloatSlice(dataf, s0, s1) - // for i := range len(data) / 32 { - // vals := [32]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2f", dataf[i*32+j]) - // } - // t.Logf(" t1[%s]\n", strings.Join(vals[:], ", ")) - // } - - d2 := [s0 * s1]float32{} - for i := range d2 { - // d2[i] = float32(i) - d2[i] = float32(r.Float32()) - } - // for i := range len(d2) / s0 { - // vals := [s0]string{} - // for j := range vals { - // vals[j] = fmt.Sprintf("%0.2f", d2[i*s0+j]) - // } - // t.Logf(" t2[%s]\n", strings.Join(vals[:], ", ")) - // } - - t2 := ctx.(*Context).FromFloatSlice(d2[:], s0, s1) - - // t.Log("calling Mulmat") - t3 := t1.Mulmat(ctx, t2) - t3f := t1f.Mulmat(ctx, t2) - d3 := ml.Dump(ctx, t3, ml.DumpWithPrecision(2)) - d3f := ml.Dump(ctx, t3f, ml.DumpWithPrecision(2)) - r3 := t3.Floats() - r3f := t3f.Floats() - sim := cosineSimilarity(r3, r3f) - if sim < 0.99 { - t.Logf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d3f, d3) - t.Fatalf("failed similarity test: %f", sim) - } - t.Logf("similarity: %f", sim) - if d3 != d3f { - t.Fatalf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d3f, d3) - } - }) - - t.Run("range/3d", func(t *testing.T) { - ctx := initContextOrSkip(t, b, useGPU) - data := [32 * 4 * 2]float32{} - inTotal := float32(0) - for i := range data { - data[i] = float32(i) - inTotal += float32(i) - } - mxData := Quantize(fsggml.TensorTypeMXFP4, data[:], []uint64{uint64(len(data))}) - dtype := ml.DTypeMXFP4 - // Reconvert back to floats to remove the quantization fidelity loss for comparison - dataf := ConvertToF32(mxData, uint32(fsggml.TensorTypeMXFP4), uint64(len(data))) - t1 := ctx.(*Context).FromBytes(dtype, mxData, 32, 4, 2) - t1f := ctx.(*Context).FromFloatSlice(dataf, 32, 4, 2) - - d2 := [32 * 4 * 2]float32{} - for i := range d2 { - d2[i] = 2.0 - } - t2 := ctx.(*Context).FromFloatSlice(d2[:], 32, 4, 2) - - // t.Log("calling Mulmat") - t3 := t1.Mulmat(ctx, t2) - t3f := t1f.Mulmat(ctx, t2) - d3 := ml.Dump(ctx, t3) - d3f := ml.Dump(ctx, t3f) - r3 := t3.Floats() - r3f := t3f.Floats() - sim := cosineSimilarity(r3, r3f) - if sim < 0.99 { - t.Logf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d3f, d3) - t.Fatalf("failed similarity test: %f", sim) - } - t.Logf("similarity: %f", sim) - if d3 != d3f { - t.Fatalf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d3f, d3) - } - }) - }) - }) - } -} - -func TestMXFP4Simple(t *testing.T) { - b := setup(t) - - t.Run("fixed", func(t *testing.T) { - ctx := initContextOrSkip(t, b, false) - data := [32 * 2]float32{ - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - } - mxData := Quantize(fsggml.TensorTypeMXFP4, data[:], []uint64{uint64(len(data))}) - dtype := ml.DTypeMXFP4 - // Reconvert back to floats to remove the quantization fidelity loss for comparison - dataf := ConvertToF32(mxData, uint32(fsggml.TensorTypeMXFP4), uint64(len(data))) - t1 := ctx.(*Context).FromBytes(dtype, mxData, 32, 2) - t1f := ctx.(*Context).FromFloatSlice(dataf, 32, 2) - - d2 := [32 * 2]float32{ - // 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - } - t2 := ctx.(*Context).FromFloatSlice(d2[:], 32, 2) - - t.Log("calling Mulmat") - t3f := t1f.Mulmat(ctx, t2) - t3 := t1.Mulmat(ctx, t2) - d3 := ml.Dump(ctx, t3) - d3f := ml.Dump(ctx, t3f) - if d3 != d3f { - t.Fatalf("expected (f32): \n%s\n\n but got (mxfp4): \n%s", d3f, d3) - } - t.Logf("result (mxfp4): \n%s", d3) - }) -} - -func TestMXFP4Conversion(t *testing.T) { - t.Run("quantize/exact", func(t *testing.T) { - r := rand.New(rand.NewSource(0)) - - data := [32 * 4]float32{} - for i := range data { - data[i] = mxfp4_vals[r.Int()%len(mxfp4_vals)] - } - mxData := Quantize(fsggml.TensorTypeMXFP4, data[:], []uint64{uint64(len(data))}) - newData := ConvertToF32(mxData, uint32(fsggml.TensorTypeMXFP4), uint64(len(data))) - - if len(data) != len(newData) { - t.Fatalf("length mismatch. started with %d but got %d", len(data), len(newData)) - } - for i := range data { - if data[i] != newData[i] { - t.Logf("started with: %v", data) - t.Logf("got : %v", newData) - t.Fatalf("mismatched data starting at offset %d started with %f but got %f", i, data[i], newData[i]) - } - } - }) - t.Run("quantize/arange", func(t *testing.T) { - data := [32 * 8]float32{} - for i := range data { - data[i] = float32(i) // / float32(6.0) - } - mxData := Quantize(fsggml.TensorTypeMXFP4, data[:], []uint64{uint64(len(data))}) - newData := ConvertToF32(mxData, uint32(fsggml.TensorTypeMXFP4), uint64(len(data))) - - if len(data) != len(newData) { - t.Fatalf("length mismatch. started with %d but got %d", len(data), len(newData)) - } - sim := cosineSimilarity(data[:], newData) - if sim < 0.99 { - t.Fatalf("failed similarity test: %f", sim) - } - t.Logf("similarity: %f", sim) - }) -} - -func dotProduct[V float32 | float64](v1, v2 []V) V { - var result V = 0 - for i := range v1 { - result += v1[i] * v2[i] - } - return result -} - -func magnitude[V float32 | float64](v []V) V { - var result V = 0 - for _, val := range v { - result += val * val - } - return V(math.Sqrt(float64(result))) -} - -func cosineSimilarity[V float32 | float64](v1, v2 []V) V { - return dotProduct(v1, v2) / (magnitude(v1) * magnitude(v2)) -} diff --git a/ml/nn/attention.go b/ml/nn/attention.go index a3f43a1ea..21b4a28ae 100644 --- a/ml/nn/attention.go +++ b/ml/nn/attention.go @@ -22,6 +22,10 @@ import ( // // Attention output with shape [d_v, heads, seq_len_q] func Attention(ctx ml.Context, query, key, value ml.Tensor, scale float64, cache kvcache.Cache) ml.Tensor { + return AttentionWithSinks(ctx, query, key, value, nil, scale, cache) +} + +func AttentionWithSinks(ctx ml.Context, query, key, value, sinks ml.Tensor, scale float64, cache kvcache.Cache) ml.Tensor { if key != nil && value != nil { if query.Dim(0) != key.Dim(0) { panic(fmt.Errorf("d_k in attention operation does not match between query(%v) and key(%v)", query.Dim(0), key.Dim(0))) @@ -50,7 +54,7 @@ func Attention(ctx ml.Context, query, key, value ml.Tensor, scale float64, cache // Only use the fast SDPA implementation if we have a cache, since that's what // will do any expected backend-specific transformations for us if sdpa, ok := query.(ml.ScaledDotProductAttention); ok && cache != nil { - return sdpa.ScaledDotProductAttention(ctx, key, value, mask, scale) + return sdpa.ScaledDotProductAttention(ctx, key, value, mask, sinks, scale) } else { query = query.Permute(ctx, 0, 2, 1, 3) key = key.Permute(ctx, 0, 2, 1, 3) diff --git a/ml/nn/linear.go b/ml/nn/linear.go index 5bcde84de..c1f331635 100644 --- a/ml/nn/linear.go +++ b/ml/nn/linear.go @@ -24,16 +24,7 @@ type LinearBatch struct { func (m *LinearBatch) Forward(ctx ml.Context, t, indices ml.Tensor) ml.Tensor { t = m.Weight.MulmatID(ctx, t, indices) if m.Bias != nil { - var bias ml.Tensor - if len(indices.Shape()) > 1 { - // FIXME: Rows does not support 2D indices for a 2D input tensor so reshape indices to 1D. - bias = m.Bias.Rows(ctx, indices.Contiguous(ctx, indices.Dim(0)*indices.Dim(1))). - Duplicate(ctx). - Reshape(ctx, m.Bias.Dim(0), indices.Dim(0), indices.Dim(1)) - } else { - bias = m.Bias.Rows(ctx, indices) - } - t = t.Add(ctx, bias) + t = t.AddID(ctx, m.Bias, indices) } return t diff --git a/model/model.go b/model/model.go index 25097e010..d0fe26d7e 100644 --- a/model/model.go +++ b/model/model.go @@ -174,25 +174,25 @@ func populateFields(base Base, v reflect.Value, tags ...Tag) reflect.Value { vv.Set(reflect.ValueOf(base)) } else if tt == reflect.TypeOf((*ml.Tensor)(nil)).Elem() { var fn func([]Tag) [][]string - fn = func(tags []Tag) (values [][]string) { - if len(tags) < 1 { - return nil - } + fn = func(tags []Tag) (names [][]string) { + if len(tags) > 0 { + localNames := []string{tags[0].Name} + localNames = append(localNames, tags[0].Alternate...) - values = [][]string{{tags[0].Name}} - for _, alt := range tags[0].Alternate { - values = append(values, []string{alt}) - } - - for i, value := range values { - for _, rest := range fn(tags[1:]) { - value = append(value, rest...) + for _, localName := range localNames { + fullName := []string{localName} + nested := fn(tags[1:]) + if len(nested) > 0 { + for _, rest := range nested { + names = append(names, append(fullName, rest...)) + } + } else { + names = append(names, fullName) + } } - - values[i] = value } - return values + return names } names := fn(tagsCopy) diff --git a/model/model_test.go b/model/model_test.go index 717c425e2..020f9ffbd 100644 --- a/model/model_test.go +++ b/model/model_test.go @@ -118,22 +118,31 @@ func TestPopulateFields(t *testing.T) { } func TestPopulateFieldsAlternateName(t *testing.T) { + type nested struct { + Weight *nn.Linear `gguf:"a,alt:b"` + } + type fakeModel struct { Input *nn.Embedding `gguf:"input"` Output *nn.Linear `gguf:"output,alt:input"` + Nested *nested `gguf:"nested"` } - m := fakeModel{} + var m fakeModel v := reflect.ValueOf(&m) v.Elem().Set(populateFields(Base{b: &fakeBackend{ names: []string{ "input.weight", + "nested.b.weight", }, }}, v.Elem())) if diff := cmp.Diff(fakeModel{ Input: &nn.Embedding{Weight: &fakeTensor{Name: "input.weight"}}, Output: &nn.Linear{Weight: &fakeTensor{Name: "input.weight"}}, + Nested: &nested{ + Weight: &nn.Linear{Weight: &fakeTensor{Name: "nested.b.weight"}}, + }, }, m); diff != "" { t.Errorf("populateFields() set incorrect values (-want +got):\n%s", diff) } diff --git a/model/models/gptoss/model.go b/model/models/gptoss/model.go index 22b3e0794..3ef078095 100644 --- a/model/models/gptoss/model.go +++ b/model/models/gptoss/model.go @@ -101,10 +101,16 @@ func (d *TransformerBlock) Forward(ctx ml.Context, hiddenStates, positions, outp } type AttentionBlock struct { - Norm *nn.RMSNorm `gguf:"attn_norm"` - QKV *nn.Linear `gguf:"attn_qkv"` - Output *nn.Linear `gguf:"attn_out"` - Sinks ml.Tensor `gguf:"attn_sinks"` + Norm *nn.RMSNorm `gguf:"attn_norm"` + + QKV *nn.Linear `gguf:"attn_qkv"` + + Query *nn.Linear `gguf:"attn_q"` + Key *nn.Linear `gguf:"attn_k"` + Value *nn.Linear `gguf:"attn_v"` + + Output *nn.Linear `gguf:"attn_out,alt:attn_output"` + Sinks ml.Tensor `gguf:"attn_sinks,alt:attn_sinks.weight"` } func (attn *AttentionBlock) Forward(ctx ml.Context, hiddenStates, positions ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor { @@ -113,60 +119,62 @@ func (attn *AttentionBlock) Forward(ctx ml.Context, hiddenStates, positions ml.T residual := hiddenStates hiddenStates = attn.Norm.Forward(ctx, hiddenStates, opts.eps) - qkv := attn.QKV.Forward(ctx, hiddenStates) + var query, key, value ml.Tensor + if attn.QKV != nil { + qkv := attn.QKV.Forward(ctx, hiddenStates) + + // query = qkv[..., : num_attention_heads * head_dim].reshape(batch_size, num_attention_heads, head_dim) + query = qkv.View(ctx, + 0, + opts.headDim(), qkv.Stride(0)*opts.headDim(), + opts.numHeads, qkv.Stride(1), + batchSize, + ) + + // key = qkv[..., num_attention_heads * head_dim:(num_attention_heads + num_key_value_heads) * head_dim].reshape(batch_size, num_key_value_heads, head_dim) + key = qkv.View(ctx, + qkv.Stride(0)*opts.headDim()*opts.numHeads, + opts.headDim(), qkv.Stride(0)*opts.headDim(), + opts.numKVHeads, qkv.Stride(1), + batchSize, + ) + + // value = qkv[..., (num_attention_heads + num_key_value_heads) * head_dim:].reshape(batch_size, num_key_value_heads, head_dim) + value = qkv.View(ctx, + qkv.Stride(0)*opts.headDim()*(opts.numHeads+opts.numKVHeads), + opts.headDim(), qkv.Stride(0)*opts.headDim(), + opts.numKVHeads, qkv.Stride(1), + batchSize, + ) + } else { + query = attn.Query.Forward(ctx, hiddenStates) + query = query.Reshape(ctx, opts.headDim(), opts.numHeads, batchSize) + + key = attn.Key.Forward(ctx, hiddenStates) + key = key.Reshape(ctx, opts.headDim(), opts.numKVHeads, batchSize) + + value = attn.Value.Forward(ctx, hiddenStates) + value = value.Reshape(ctx, opts.headDim(), opts.numKVHeads, batchSize) + } - // query = qkv[..., : num_attention_heads * head_dim].reshape(batch_size, num_attention_heads, head_dim) - query := qkv.View(ctx, - 0, - opts.headDim(), qkv.Stride(0)*opts.headDim(), - opts.numHeads, qkv.Stride(1), - batchSize, - ) query = fast.RoPE(ctx, query, positions, opts.headDim(), opts.ropeBase, 1./opts.ropeScale, opts.RoPEOptions()...) - - // key = qkv[..., num_attention_heads * head_dim:(num_attention_heads + num_key_value_heads) * head_dim].reshape(batch_size, num_key_value_heads, head_dim) - key := qkv.View(ctx, - qkv.Stride(0)*opts.headDim()*opts.numHeads, - opts.headDim(), qkv.Stride(0)*opts.headDim(), - opts.numKVHeads, qkv.Stride(1), - batchSize, - ) key = fast.RoPE(ctx, key, positions, opts.headDim(), opts.ropeBase, 1./opts.ropeScale, opts.RoPEOptions()...) - // value = qkv[..., (num_attention_heads + num_key_value_heads) * head_dim:].reshape(batch_size, num_key_value_heads, head_dim) - value := qkv.View(ctx, - qkv.Stride(0)*opts.headDim()*(opts.numHeads+opts.numKVHeads), - opts.headDim(), qkv.Stride(0)*opts.headDim(), - opts.numKVHeads, qkv.Stride(1), - batchSize, - ) - - cache.Put(ctx, key, value) - key, value, mask := cache.Get(ctx) - - query = query.Permute(ctx, 0, 2, 1, 3) - key = key.Permute(ctx, 0, 2, 1, 3) - - scores := key.MulmatFullPrec(ctx, query) - scores = scores.Scale(ctx, 1./math.Sqrt(float64(opts.headDim()))) - scores = scores.Add(ctx, mask) - - scores = scores.Concat(ctx, attn.Sinks.Reshape(ctx, 1, 1, opts.numHeads, 1).Repeat(ctx, 1, batchSize), 0) - scores = scores.Softmax(ctx) - scores = scores.Pad(ctx, -1, 0, 0, 0) - - attention := value.Mulmat(ctx, scores) - attention = attention.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) + attention := nn.AttentionWithSinks(ctx, query, key, value, attn.Sinks, 1/math.Sqrt(float64(opts.headDim())), cache) attention = attention.Reshape(ctx, attention.Dim(0)*attention.Dim(1), batchSize) - return attn.Output.Forward(ctx, attention).Add(ctx, residual) } type MLPBlock struct { - Norm *nn.RMSNorm `gguf:"ffn_norm"` - Router *nn.Linear `gguf:"ffn_gate_inp"` + Norm *nn.RMSNorm `gguf:"ffn_norm,alt:post_attention_norm"` + Router *nn.Linear `gguf:"ffn_gate_inp"` + GateUp *nn.LinearBatch `gguf:"ffn_gate_up_exps"` - Down *nn.LinearBatch `gguf:"ffn_down_exps"` + + Gate *nn.LinearBatch `gguf:"ffn_gate_exps"` + Up *nn.LinearBatch `gguf:"ffn_up_exps"` + + Down *nn.LinearBatch `gguf:"ffn_down_exps"` } func (mlp *MLPBlock) Forward(ctx ml.Context, hiddenStates, one ml.Tensor, opts *Options) ml.Tensor { @@ -185,21 +193,24 @@ func (mlp *MLPBlock) Forward(ctx ml.Context, hiddenStates, one ml.Tensor, opts * hiddenStates = hiddenStates.Reshape(ctx, hiddenStates.Dim(0), 1, hiddenStates.Dim(1)) - hiddenStates = mlp.GateUp.Forward(ctx, hiddenStates, selectedExperts) - hiddenStates = hiddenStates.Reshape(ctx, 2, hiddenStates.Dim(0)/2, hiddenStates.Dim(1), hiddenStates.Dim(2)) + var gate, up ml.Tensor + if mlp.GateUp != nil { + hiddenStates = mlp.GateUp.Forward(ctx, hiddenStates, selectedExperts) + hiddenStates = hiddenStates.Reshape(ctx, 2, hiddenStates.Dim(0)/2, hiddenStates.Dim(1), hiddenStates.Dim(2)) - dimStride := []int{hiddenStates.Dim(0) / 2, hiddenStates.Stride(1), hiddenStates.Dim(1), hiddenStates.Stride(2), hiddenStates.Dim(2), hiddenStates.Stride(3), hiddenStates.Dim(3)} + dimStride := []int{hiddenStates.Dim(0) / 2, hiddenStates.Stride(1), hiddenStates.Dim(1), hiddenStates.Stride(2), hiddenStates.Dim(2), hiddenStates.Stride(3), hiddenStates.Dim(3)} - glu := hiddenStates.View(ctx, 0, dimStride...) - glu = glu.Contiguous(ctx) - glu = glu.Clamp(ctx, float32(math.Inf(-1)), 7.0) - glu = glu.QuickGELU(ctx) + gate = hiddenStates.View(ctx, 0, dimStride...) + gate = gate.Contiguous(ctx, gate.Dim(0)*gate.Dim(1), gate.Dim(2), gate.Dim(3)) - linear := hiddenStates.View(ctx, hiddenStates.Stride(0), dimStride...) - linear = linear.Clamp(ctx, -7.0, 7.0) + up = hiddenStates.View(ctx, hiddenStates.Stride(0), dimStride...) + up = up.Contiguous(ctx, up.Dim(0)*up.Dim(1), up.Dim(2), up.Dim(3)) + } else { + gate = mlp.Gate.Forward(ctx, hiddenStates, selectedExperts) + up = mlp.Up.Forward(ctx, hiddenStates, selectedExperts) + } - hiddenStates = glu.Mul(ctx, linear.Add(ctx, one)) - hiddenStates = hiddenStates.Reshape(ctx, hiddenStates.Dim(0)*hiddenStates.Dim(1), hiddenStates.Dim(2), hiddenStates.Dim(3)) + hiddenStates = gate.SwiGLU(ctx, up, 1.702, 7) experts := mlp.Down.Forward(ctx, hiddenStates, selectedExperts) experts = experts.Mul(ctx, routingWeights) @@ -259,10 +270,10 @@ func New(c fs.Config) (model.Model, error) { kvcache.NewSWAMemCache(int32(c.Uint("attention.sliding_window")), 4096, m.Shift), kvcache.NewCausalCache(m.Shift), ) - m.Cache.SetConfig(ml.CacheConfig{CachePadding: 32, PermutedV: true}) return &m, nil } func init() { model.Register("gptoss", New) + model.Register("gpt-oss", New) } diff --git a/runner/llamarunner/image.go b/runner/llamarunner/image.go index 1d0c1a4f5..cc0153aea 100644 --- a/runner/llamarunner/image.go +++ b/runner/llamarunner/image.go @@ -17,7 +17,7 @@ type ImageContext struct { // mu is required to be held when generating embeddings or accessing the cache mu sync.Mutex - clip *llama.ClipContext + mtmd *llama.MtmdContext // cache of images to embeddings images []imageCache @@ -32,7 +32,7 @@ func NewImageContext(llamaContext *llama.Context, modelPath string) (*ImageConte var c ImageContext if arch == "clip" { - c.clip, err = llama.NewClipContext(llamaContext, modelPath) + c.mtmd, err = llama.NewMtmdContext(llamaContext, modelPath) } else { return nil, fmt.Errorf("unknown vision model architecture: %s", arch) } @@ -51,8 +51,8 @@ func (c *ImageContext) Free(modelPath string) { return } - if c.clip != nil { - c.clip.Free() + if c.mtmd != nil { + c.mtmd.Free() } } @@ -72,8 +72,8 @@ func (c *ImageContext) NewEmbed(llamaContext *llama.Context, data []byte) ([][]f embed, err := c.findImage(hash) if err != nil { - if c.clip != nil { - embed, err = c.clip.NewEmbed(llamaContext, data) + if c.mtmd != nil { + embed, err = c.mtmd.NewEmbed(llamaContext, data) if err != nil { return nil, err } diff --git a/server/harmonyparser.go b/server/harmonyparser.go index fd6c64e73..86dcf66e3 100644 --- a/server/harmonyparser.go +++ b/server/harmonyparser.go @@ -3,6 +3,7 @@ package server import ( "context" "log/slog" + "slices" "strings" "unicode" @@ -19,7 +20,7 @@ const ( ) func shouldUseHarmony(model Model) bool { - if model.Config.ModelFamily == "gptoss" { + if slices.Contains([]string{"gptoss", "gpt-oss"}, model.Config.ModelFamily) { // heuristic to check whether the template expects to be parsed via harmony: // search for harmony tags that are nearly always used if model.Template.Contains("<|start|>") && model.Template.Contains("<|end|>") { diff --git a/server/images.go b/server/images.go index 0c16dd435..504eb95cf 100644 --- a/server/images.go +++ b/server/images.go @@ -112,7 +112,7 @@ func (m *Model) Capabilities() []model.Capability { // Check for thinking capability openingTag, closingTag := thinking.InferTags(m.Template.Template) hasTags := openingTag != "" && closingTag != "" - if hasTags || m.Config.ModelFamily == "gptoss" { + if hasTags || slices.Contains([]string{"gptoss", "gpt-oss"}, m.Config.ModelFamily) { capabilities = append(capabilities, model.CapabilityThinking) } diff --git a/server/routes.go b/server/routes.go index 3c044cd00..5d37c83a9 100644 --- a/server/routes.go +++ b/server/routes.go @@ -120,7 +120,7 @@ func (s *Server) scheduleRunner(ctx context.Context, name string, caps []model.C // This model is much more capable with a larger context, so set that // unless it would penalize performance too much - if !s.lowVRAM && slices.Contains(model.Config.ModelFamilies, "gptoss") { + if !s.lowVRAM && slices.Contains([]string{"gptoss", "gpt-oss"}, model.Config.ModelFamily) { opts.NumCtx = max(opts.NumCtx, 8192) }