* kimi linear model implementation * kimi linear convert_hf_to_gguf * kimi linear constants.py tensor_mapping.py * Kimi Linear ggml.h * kimi linear ggml-cpu * Kimi Linear ggml-cuda * Kimi Linear ggml.c * kimi linear src/llama * remove "const int64_t n_seq_tokens = q->ne[2];" to get rid of unused variable warning * remove type mismatch warning * read MoE params * removed some hard coded code * removed all hard code * use DeepseekV2 tokenizer * removed unnecessary internal methods called by the old set_vocab of KimiLinear * rewrite get_vocab for KimiLinear. Removed all kda_scan code * removed all traces of kda_scan * reduce OP count by 1 due to removal of kda_scan * Move KIMI_LINEAR to llm_arch_is_hybrid to enable KV cache * set n_embd_head_k/v to ensure kv cache works * don't quantize conv1d of Kimi Linear * Kimi Linear backend agnostic * removed LOG_INFO * naive chunking form implemented * fixed some comments * add Kimi-K2 specific tokens to be recognized as EOG * build_kda_autoregressive is implemented to replace build_kda_recurrent for faster inference. sync'd to b7682 * replaced Akk and Aqk with mul_mat and clamp * no clamp version * Moved Aqk computation out of the loop * fixed typo and split wkv_b into wk_b and wv_b * MLA KV cache support * fix trailing spaces * moved const llama_model & model; around to follow qwen3next format and see if it cna pass the -Wunused-private-field error * fix trailing whitespace * removed traling whitespaces in empty line + make sure indentation is multiple of 4 * try to make lint happy * remove blank lines to make lint happy * removed at least blank line containing white space * fixed flake8 complaints locally * return ggml_tensor * pair in kda_autoregressive and kda_chunking as in ngxson's Qwen3Next improvement * removed Kimi-Linear specific change that causes failure at server-windows * removed private: from kimi_linear to make build checks happy * removed unnecessary ggml_cont before ggml_reshape * created static function causal_conv1d to abtract similar code for q/k/v * merged dt_bias to SSM_DT. Do -exp(log_A) in convert_hf_to_gguf.py. * reverted to original * fixed find_hparam calls. Fixed e_score_correction_bias to use bias instead of weight. Removed all ssm_conv bias terms. * remove DT_B from constants.py. remove one comment line in llama-model.cpp * new class llm_graph_input_mem_hybrid_k to get around the new MLA change. switch the concat order of ggml_concat calls in kimi-linear.cpp to accommodate MLA changes. Removed support for exp_probs_b.weight * remove ssm_o_norm_b * remove ssm_o_norm_b * changed hparams.kda_head_dim to hparams.n_embd_head_kda. added TODO comment for class llama_graph_mem_hybrid_k * removed all ggml_cont b4 ggml_reshape_4d * Whitespace * replaced all hparams.get with find_hparams * added new names for n_experts, n_experts_used and score_func in TextModel and removed their code in KimiLinear in convert_hf_to_gguf.py. Removed unnecessary ggml_cont and GGML_ASSERT in kimi-linear.cpp * use is_mla to switch between different mem_hybrid types * fixed logical errors in convert_hf_to_gguf.py pointed out by CISC * removed if else for required parameters kv_lora_rank and qk_rope_head_dim * add back ggml_cont for Vcur * minor changes * removed extra line in llama-vocab.cpp. Added back the comment in llama-graph.cpp * f16 gguf cannot run without context length * made a mistake of adding back n_ctx parsing --------- Co-authored-by: Piotr Wilkin (ilintar) <piotr.wilkin@syndatis.com>
234 lines
5.7 KiB
C++
234 lines
5.7 KiB
C++
#include "llama-hparams.h"
|
|
|
|
#include "ggml.h"
|
|
|
|
#include <algorithm>
|
|
#include <cassert>
|
|
|
|
void llama_hparams::set_swa_pattern(uint32_t n_pattern, bool dense_first) {
|
|
if (dense_first) {
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
swa_layers[il] = n_pattern == 0 || (il % n_pattern != 0);
|
|
}
|
|
} else {
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
swa_layers[il] = n_pattern == 0 || (il % n_pattern < (n_pattern - 1));
|
|
}
|
|
}
|
|
}
|
|
|
|
bool llama_hparams::is_swa_any() const {
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
if (swa_layers[il]) {
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_head(uint32_t il) const {
|
|
if (il < n_layer) {
|
|
return n_head_arr[il];
|
|
}
|
|
|
|
GGML_ABORT("fatal error");
|
|
}
|
|
|
|
uint32_t llama_hparams::n_head_kv(uint32_t il) const {
|
|
if (il < n_layer) {
|
|
return n_head_kv_arr[il];
|
|
}
|
|
|
|
GGML_ABORT("fatal error");
|
|
}
|
|
|
|
uint32_t llama_hparams::n_ff(uint32_t il) const {
|
|
if (il < n_layer) {
|
|
return n_ff_arr[il];
|
|
}
|
|
|
|
GGML_ABORT("fatal error");
|
|
}
|
|
|
|
uint32_t llama_hparams::n_gqa(uint32_t il) const {
|
|
const uint32_t n_head = this->n_head(il);
|
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
|
|
|
if (n_head_kv == 0) {
|
|
return 0;
|
|
}
|
|
|
|
return n_head/n_head_kv;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_inp() const {
|
|
uint32_t n_embd_inp = n_embd;
|
|
|
|
if (n_deepstack_layers > 0) {
|
|
n_embd_inp += n_embd * n_deepstack_layers;
|
|
}
|
|
|
|
return n_embd_inp;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_out() const {
|
|
return n_embd_out_impl > 0 ? n_embd_out_impl : n_embd;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_k_gqa(uint32_t il) const {
|
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
|
|
|
return n_embd_head_k * n_head_kv;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const {
|
|
const uint32_t n_head_kv = this->n_head_kv(il);
|
|
|
|
return n_embd_head_v * n_head_kv;
|
|
}
|
|
|
|
bool llama_hparams::is_n_embd_k_gqa_variable() const {
|
|
const uint32_t val = n_embd_k_gqa();
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
if (val != n_embd_k_gqa(il)) {
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
bool llama_hparams::is_n_embd_v_gqa_variable() const {
|
|
const uint32_t val = n_embd_v_gqa();
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
if (val != n_embd_v_gqa(il)) {
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_k_gqa_max() const {
|
|
uint32_t val = n_embd_k_gqa();
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
val = std::max(val, n_embd_k_gqa(il));
|
|
}
|
|
|
|
return val;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_v_gqa_max() const {
|
|
uint32_t val = n_embd_v_gqa();
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
val = std::max(val, n_embd_v_gqa(il));
|
|
}
|
|
|
|
return val;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_r() const {
|
|
if (wkv_head_size != 0) {
|
|
// for RWKV models
|
|
return token_shift_count * n_embd;
|
|
}
|
|
|
|
if (n_shortconv_l_cache != 0) {
|
|
// for LFM2 models
|
|
return n_embd * (n_shortconv_l_cache - 1);
|
|
}
|
|
|
|
if (n_embd_head_kda != 0) {
|
|
// for Kimi KDA layers
|
|
// Conv state for Q, K, V: 3 * (d_conv - 1) * n_head * head_dim
|
|
const uint32_t d_inner = n_head() * n_embd_head_kda; // 32 * 128 = 4096
|
|
return 3 * (ssm_d_conv > 0 ? ssm_d_conv - 1 : 3) * d_inner;
|
|
}
|
|
|
|
// TODO: maybe support other convolution strides than 1
|
|
// NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed
|
|
// Corresponds to Mamba's conv_states size
|
|
return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * (ssm_d_inner + 2*ssm_n_group*ssm_d_state);
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_s() const {
|
|
if (wkv_head_size != 0) {
|
|
// corresponds to RWKV's wkv_states size
|
|
return n_embd * wkv_head_size;
|
|
}
|
|
|
|
if (n_embd_head_kda != 0) {
|
|
// for Kimi KDA layers
|
|
// Full recurrent state: head_dim * head_dim * n_head
|
|
// h tensor shape for delta attention: [head_dim, head_dim, n_head]
|
|
return n_embd_head_kda * n_embd_head_kda * n_head(); // 128 * 128 * 32 = 524288
|
|
}
|
|
|
|
// corresponds to Mamba's ssm_states size
|
|
return ssm_d_state * ssm_d_inner;
|
|
}
|
|
|
|
bool llama_hparams::is_recurrent(uint32_t il) const {
|
|
if (il < n_layer) {
|
|
return recurrent_layer_arr[il];
|
|
}
|
|
|
|
GGML_ABORT("%s: il (%u) out of bounds (n_layer: %u)\n", __func__, il, n_layer);
|
|
}
|
|
|
|
uint32_t llama_hparams::n_pos_per_embd() const {
|
|
return rope_type == LLAMA_ROPE_TYPE_MROPE || rope_type == LLAMA_ROPE_TYPE_IMROPE ? 4 : 1;
|
|
}
|
|
|
|
bool llama_hparams::is_swa(uint32_t il) const {
|
|
if (il < n_layer) {
|
|
return swa_layers[il];
|
|
}
|
|
|
|
GGML_ABORT("fatal error");
|
|
}
|
|
|
|
bool llama_hparams::is_mla() const {
|
|
assert((n_embd_head_k_mla_impl == 0 && n_embd_head_v_mla_impl == 0) ||
|
|
(n_embd_head_k_mla_impl != 0 && n_embd_head_v_mla_impl != 0));
|
|
|
|
return n_embd_head_k_mla_impl != 0 && n_embd_head_v_mla_impl != 0;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_head_k_mla() const {
|
|
return is_mla() ? n_embd_head_k_mla_impl : n_embd_head_k;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_embd_head_v_mla() const {
|
|
return is_mla() ? n_embd_head_v_mla_impl : n_embd_head_v;
|
|
}
|
|
|
|
bool llama_hparams::has_kv(uint32_t il) const {
|
|
if (n_layer_kv_from_start >= 0) {
|
|
if (il < (uint32_t) n_layer_kv_from_start) {
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
// by default, all layers have kv
|
|
return true;
|
|
}
|
|
|
|
uint32_t llama_hparams::n_layer_kv() const {
|
|
uint32_t res = 0;
|
|
|
|
for (uint32_t il = 0; il < n_layer; ++il) {
|
|
if (has_kv(il)) {
|
|
res++;
|
|
}
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
bool llama_hparams::use_mrope() const {
|
|
return rope_sections[0] > 0 && rope_sections[1] > 0;
|
|
}
|