llama-cpp-turboquant/tests/test-gguf-model-data.cpp
Bartowski 7992aa7c8e
tests : add unit test coverage for llama_tensor_get_type (#20112)
* Add unit test coverage for llama_tensor_get_type

* Fix merge conflicts, add more schemas

* clang formatter changes

* Trailing whitespace

* Update name

* Start rebase

* Updating files with upstream changes prior to rebase

* Changes needed from rebase

* Update attn_qkv schema, change throw behaviour

* Fix merge conflicts

* White space

* Update with latest changes to state counters

* Revert accidental personal CLAUDE.md changes

* Change quotation mark

* Reuse metadata.name since we have it

* Move test-only stuff out of llama-quant.cpp

* Hide the regex functionality back in llama-quant.cpp, use a unique pointer to a new struct 'compiled_tensor_type_patterns' which contains the patterns

* cont : inital deslop guidelines

* Cleanup based on review comments

* Continue cleanup

* Small cleanup

* Manually set proper ordering of tensors, mostly applies to gemma

* Formatting

* Update tests/test-quant-type-selection.cpp

Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com>

* Fix merge conflicts

---------

Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com>
2026-04-02 22:53:58 +02:00

154 lines
6.8 KiB
C++

#include "gguf-model-data.h"
#include <cstdio>
#define TEST_ASSERT(cond, msg) \
do { \
if (!(cond)) { \
fprintf(stderr, "FAIL: %s (line %d): %s\n", #cond, __LINE__, msg); \
return 1; \
} \
} while (0)
int main() {
fprintf(stderr, "=== test-gguf-model-data ===\n");
// Fetch Qwen3-0.6B Q8_0 metadata
auto result = gguf_fetch_model_meta("ggml-org/Qwen3-0.6B-GGUF", "Q8_0");
if (!result.has_value()) {
fprintf(stderr, "SKIP: could not fetch model metadata (no network or HTTP disabled)\n");
return 0;
}
const auto & model = result.value();
fprintf(stderr, "Architecture: %s\n", model.architecture.c_str());
fprintf(stderr, "n_embd: %u\n", model.n_embd);
fprintf(stderr, "n_ff: %u\n", model.n_ff);
fprintf(stderr, "n_vocab: %u\n", model.n_vocab);
fprintf(stderr, "n_layer: %u\n", model.n_layer);
fprintf(stderr, "n_head: %u\n", model.n_head);
fprintf(stderr, "n_head_kv: %u\n", model.n_head_kv);
fprintf(stderr, "n_expert: %u\n", model.n_expert);
fprintf(stderr, "n_embd_head_k: %u\n", model.n_embd_head_k);
fprintf(stderr, "n_embd_head_v: %u\n", model.n_embd_head_v);
fprintf(stderr, "tensors: %zu\n", model.tensors.size());
// Verify architecture
TEST_ASSERT(model.architecture == "qwen3", "expected architecture 'qwen3'");
// Verify key dimensions (Qwen3-0.6B)
TEST_ASSERT(model.n_layer == 28, "expected n_layer == 28");
TEST_ASSERT(model.n_embd == 1024, "expected n_embd == 1024");
TEST_ASSERT(model.n_head == 16, "expected n_head == 16");
TEST_ASSERT(model.n_head_kv == 8, "expected n_head_kv == 8");
TEST_ASSERT(model.n_expert == 0, "expected n_expert == 0 (not MoE)");
TEST_ASSERT(model.n_vocab == 151936, "expected n_vocab == 151936");
// Verify tensor count
TEST_ASSERT(model.tensors.size() == 311, "expected tensor count == 311");
// Verify known tensor names exist
bool found_attn_q = false;
bool found_token_embd = false;
bool found_output_norm = false;
for (const auto & t : model.tensors) {
if (t.name == "blk.0.attn_q.weight") {
found_attn_q = true;
}
if (t.name == "token_embd.weight") {
found_token_embd = true;
}
if (t.name == "output_norm.weight") {
found_output_norm = true;
}
}
TEST_ASSERT(found_attn_q, "expected tensor 'blk.0.attn_q.weight'");
TEST_ASSERT(found_token_embd, "expected tensor 'token_embd.weight'");
TEST_ASSERT(found_output_norm, "expected tensor 'output_norm.weight'");
// Verify token_embd.weight shape
for (const auto & t : model.tensors) {
if (t.name == "token_embd.weight") {
TEST_ASSERT(t.ne[0] == 1024, "expected token_embd.weight ne[0] == 1024");
TEST_ASSERT(t.n_dims == 2, "expected token_embd.weight to be 2D");
break;
}
}
// Test that second call uses cache (just call again, it should work)
auto result2 = gguf_fetch_model_meta("ggml-org/Qwen3-0.6B-GGUF", "Q8_0");
TEST_ASSERT(result2.has_value(), "cached fetch should succeed");
TEST_ASSERT(result2->tensors.size() == model.tensors.size(), "cached result should match");
// Test a split MoE model without specifying quant (should default to Q8_0)
auto result3 = gguf_fetch_model_meta("ggml-org/GLM-4.6V-GGUF");
if (!result3.has_value()) {
fprintf(stderr, "SKIP: could not fetch GLM-4.6V metadata (no network?)\n");
return 0;
}
const auto & model3 = result3.value();
fprintf(stderr, "Architecture: %s\n", model3.architecture.c_str());
fprintf(stderr, "n_embd: %u\n", model3.n_embd);
fprintf(stderr, "n_ff: %u\n", model3.n_ff);
fprintf(stderr, "n_vocab: %u\n", model3.n_vocab);
fprintf(stderr, "n_layer: %u\n", model3.n_layer);
fprintf(stderr, "n_head: %u\n", model3.n_head);
fprintf(stderr, "n_head_kv: %u\n", model3.n_head_kv);
fprintf(stderr, "n_expert: %u\n", model3.n_expert);
fprintf(stderr, "n_embd_head_k: %u\n", model3.n_embd_head_k);
fprintf(stderr, "n_embd_head_v: %u\n", model3.n_embd_head_v);
fprintf(stderr, "tensors: %zu\n", model3.tensors.size());
// Verify architecture
TEST_ASSERT(model3.architecture == "glm4moe", "expected architecture 'glm4moe'");
// Verify key dimensions (GLM-4.6V)
TEST_ASSERT(model3.n_layer == 46, "expected n_layer == 46");
TEST_ASSERT(model3.n_embd == 4096, "expected n_embd == 4096");
TEST_ASSERT(model3.n_head == 96, "expected n_head == 96");
TEST_ASSERT(model3.n_head_kv == 8, "expected n_head_kv == 8");
TEST_ASSERT(model3.n_expert == 128, "expected n_expert == 128 (MoE)");
TEST_ASSERT(model3.n_vocab == 151552, "expected n_vocab == 151552");
// Verify tensor count
TEST_ASSERT(model3.tensors.size() == 780, "expected tensor count == 780");
// Test a hybrid-attention model with array-valued head counts
auto result4 = gguf_fetch_model_meta("ggml-org/Step-3.5-Flash-GGUF", "Q4_K");
if (!result4.has_value()) {
fprintf(stderr, "FAIL: could not fetch Step-3.5-Flash metadata\n");
return 1;
}
const auto & model4 = result4.value();
fprintf(stderr, "Architecture: %s\n", model4.architecture.c_str());
fprintf(stderr, "n_embd: %u\n", model4.n_embd);
fprintf(stderr, "n_ff: %u\n", model4.n_ff);
fprintf(stderr, "n_vocab: %u\n", model4.n_vocab);
fprintf(stderr, "n_layer: %u\n", model4.n_layer);
fprintf(stderr, "n_head: %u\n", model4.n_head);
fprintf(stderr, "n_head_kv: %u\n", model4.n_head_kv);
fprintf(stderr, "n_expert: %u\n", model4.n_expert);
fprintf(stderr, "n_embd_head_k: %u\n", model4.n_embd_head_k);
fprintf(stderr, "n_embd_head_v: %u\n", model4.n_embd_head_v);
fprintf(stderr, "tensors: %zu\n", model4.tensors.size());
TEST_ASSERT(model4.architecture == "step35", "expected architecture 'step35'");
TEST_ASSERT(model4.n_layer == 45, "expected n_layer == 45");
TEST_ASSERT(model4.n_embd == 4096, "expected n_embd == 4096");
TEST_ASSERT(model4.n_ff == 11264, "expected n_ff == 11264");
TEST_ASSERT(model4.n_head == 64, "expected n_head == 64 (first element of per-layer array)");
TEST_ASSERT(model4.n_head_kv == 8, "expected n_head_kv == 8 (first element of per-layer array)");
TEST_ASSERT(model4.n_expert == 288, "expected n_expert == 288");
TEST_ASSERT(model4.n_embd_head_k == 128, "expected n_embd_head_k == 128");
TEST_ASSERT(model4.n_embd_head_v == 128, "expected n_embd_head_v == 128");
TEST_ASSERT(model4.n_vocab == 128896, "expected n_vocab == 128896");
TEST_ASSERT(model4.tensors.size() == 754, "expected tensor count == 754");
fprintf(stderr, "=== ALL TESTS PASSED ===\n");
return 0;
}