Ministral 8B is an 8B parameter model featuring a unique interleaved sliding-window attention pattern for faster, memory-efficient inference. Designed for edge use cases, it supports up to 128k context length and excels in knowledge and reasoning tasks.
tools
8b
324 Pulls Updated 2 months ago
bbe8d91c1964 · 4.9GB
-
general.architecturellamallama
-
general.file_typeQ4_K_MQ4_K_M
-
llama.attention.head_count3232
-
llama.attention.head_count_kv88
-
llama.attention.key_length128128
-
llama.attention.layer_norm_rms_epsilon1e-051e-05
-
llama.attention.value_length128128
-
llama.block_count3636
-
llama.context_length3276832768
-
llama.embedding_length40964096
-
llama.feed_forward_length1228812288
-
llama.rope.dimension_count128128
-
llama.rope.freq_base1e+081e+08
-
llama.vocab_size131072131072
-
quantize.imatrix.chunks_count128128
-
quantize.imatrix.dataset/training_dir/calibration_datav3.txt/training_dir/calibration_datav3.txt
-
quantize.imatrix.entries_count252252
-
quantize.imatrix.file/models_out/Ministral-8B-Instruct-2410-TEST-GGUF/Ministral-8B-Instruct-2410-TEST.imatrix/models_out/Ministral-8B-Instruct-2410-TEST-GGUF/Ministral-8B-Instruct-2410-TEST.imatrix
-
tokenizer.ggml.add_bos_tokentruetrue
-
tokenizer.ggml.add_eos_tokenfalsefalse
-
tokenizer.ggml.add_space_prefixfalsefalse
-
tokenizer.ggml.bos_token_id11
-
tokenizer.ggml.eos_token_id22
-
tokenizer.ggml.merges[Ġ Ġ, Ġ t, e r, i n, Ġ ĠĠĠ, ...][Ġ Ġ, Ġ t, e r, i n, Ġ ĠĠĠ, ...]
-
tokenizer.ggml.modelgpt2gpt2
-
tokenizer.ggml.pretekkentekken
-
tokenizer.ggml.token_type[3, 3, 3, 3, 3, ...][3, 3, 3, 3, 3, ...]
-
tokenizer.ggml.tokens[<unk>, <s>, </s>, [INST], [/INST], ...][<unk>, <s>, </s>, [INST], [/INST], ...]
-
tokenizer.ggml.unknown_token_id00
-
token_embd.weightQ4_K[4096, 131072]
-
blk.0.attn_k.weightQ4_K[4096, 1024]
-
blk.0.attn_norm.weightF32[4096]
-
blk.0.attn_output.weightQ4_K[4096, 4096]
-
blk.0.attn_q.weightQ4_K[4096, 4096]
-
blk.0.attn_v.weightQ6_K[4096, 1024]
-
blk.0.ffn_down.weightQ6_K[12288, 4096]
-
blk.0.ffn_gate.weightQ4_K[4096, 12288]
-
blk.0.ffn_norm.weightF32[4096]
-
blk.0.ffn_up.weightQ4_K[4096, 12288]
-
blk.1.attn_k.weightQ4_K[4096, 1024]
-
blk.1.attn_norm.weightF32[4096]
-
blk.1.attn_output.weightQ4_K[4096, 4096]
-
blk.1.attn_q.weightQ4_K[4096, 4096]
-
blk.1.attn_v.weightQ6_K[4096, 1024]
-
blk.1.ffn_down.weightQ6_K[12288, 4096]
-
blk.1.ffn_gate.weightQ4_K[4096, 12288]
-
blk.1.ffn_norm.weightF32[4096]
-
blk.1.ffn_up.weightQ4_K[4096, 12288]
-
blk.2.attn_k.weightQ4_K[4096, 1024]
-
blk.2.attn_norm.weightF32[4096]
-
blk.2.attn_output.weightQ4_K[4096, 4096]
-
blk.2.attn_q.weightQ4_K[4096, 4096]
-
blk.2.attn_v.weightQ6_K[4096, 1024]
-
blk.2.ffn_down.weightQ6_K[12288, 4096]
-
blk.2.ffn_gate.weightQ4_K[4096, 12288]
-
blk.2.ffn_norm.weightF32[4096]
-
blk.2.ffn_up.weightQ4_K[4096, 12288]
-
blk.3.attn_k.weightQ4_K[4096, 1024]
-
blk.3.attn_norm.weightF32[4096]
-
blk.3.attn_output.weightQ4_K[4096, 4096]
-
blk.3.attn_q.weightQ4_K[4096, 4096]
-
blk.3.attn_v.weightQ4_K[4096, 1024]
-
blk.3.ffn_down.weightQ6_K[12288, 4096]
-
blk.3.ffn_gate.weightQ4_K[4096, 12288]
-
blk.3.ffn_norm.weightF32[4096]
-
blk.3.ffn_up.weightQ4_K[4096, 12288]
-
blk.4.attn_k.weightQ4_K[4096, 1024]
-
blk.4.attn_norm.weightF32[4096]
-
blk.4.attn_output.weightQ4_K[4096, 4096]
-
blk.4.attn_q.weightQ4_K[4096, 4096]
-
blk.4.attn_v.weightQ4_K[4096, 1024]
-
blk.4.ffn_down.weightQ4_K[12288, 4096]
-
blk.4.ffn_gate.weightQ4_K[4096, 12288]
-
blk.4.ffn_norm.weightF32[4096]
-
blk.4.ffn_up.weightQ4_K[4096, 12288]
-
blk.5.attn_k.weightQ4_K[4096, 1024]
-
blk.5.attn_norm.weightF32[4096]
-
blk.5.attn_output.weightQ4_K[4096, 4096]
-
blk.5.attn_q.weightQ4_K[4096, 4096]
-
blk.5.attn_v.weightQ6_K[4096, 1024]
-
blk.5.ffn_down.weightQ4_K[12288, 4096]
-
blk.5.ffn_gate.weightQ4_K[4096, 12288]
-
blk.5.ffn_norm.weightF32[4096]
-
blk.5.ffn_up.weightQ4_K[4096, 12288]
-
blk.6.attn_k.weightQ4_K[4096, 1024]
-
blk.6.attn_norm.weightF32[4096]
-
blk.6.attn_output.weightQ4_K[4096, 4096]
-
blk.6.attn_q.weightQ4_K[4096, 4096]
-
blk.6.attn_v.weightQ4_K[4096, 1024]
-
blk.6.ffn_down.weightQ6_K[12288, 4096]
-
blk.6.ffn_gate.weightQ4_K[4096, 12288]
-
blk.6.ffn_norm.weightF32[4096]
-
blk.6.ffn_up.weightQ4_K[4096, 12288]
-
blk.7.attn_k.weightQ4_K[4096, 1024]
-
blk.7.attn_norm.weightF32[4096]
-
blk.7.attn_output.weightQ4_K[4096, 4096]
-
blk.7.attn_q.weightQ4_K[4096, 4096]
-
blk.7.attn_v.weightQ4_K[4096, 1024]
-
blk.7.ffn_down.weightQ4_K[12288, 4096]
-
blk.7.ffn_gate.weightQ4_K[4096, 12288]
-
blk.7.ffn_norm.weightF32[4096]
-
blk.7.ffn_up.weightQ4_K[4096, 12288]
-
blk.8.attn_k.weightQ4_K[4096, 1024]
-
blk.8.attn_norm.weightF32[4096]
-
blk.8.attn_output.weightQ4_K[4096, 4096]
-
blk.8.attn_q.weightQ4_K[4096, 4096]
-
blk.8.attn_v.weightQ6_K[4096, 1024]
-
blk.8.ffn_down.weightQ4_K[12288, 4096]
-
blk.8.ffn_gate.weightQ4_K[4096, 12288]
-
blk.8.ffn_norm.weightF32[4096]
-
blk.8.ffn_up.weightQ4_K[4096, 12288]
-
blk.9.attn_k.weightQ4_K[4096, 1024]
-
blk.9.attn_norm.weightF32[4096]
-
blk.9.attn_output.weightQ4_K[4096, 4096]
-
blk.9.attn_q.weightQ4_K[4096, 4096]
-
blk.9.attn_v.weightQ4_K[4096, 1024]
-
blk.9.ffn_down.weightQ6_K[12288, 4096]
-
blk.9.ffn_gate.weightQ4_K[4096, 12288]
-
blk.9.ffn_norm.weightF32[4096]
-
blk.9.ffn_up.weightQ4_K[4096, 12288]
-
blk.10.attn_k.weightQ4_K[4096, 1024]
-
blk.10.attn_norm.weightF32[4096]
-
blk.10.attn_output.weightQ4_K[4096, 4096]
-
blk.10.attn_q.weightQ4_K[4096, 4096]
-
blk.10.attn_v.weightQ6_K[4096, 1024]
-
blk.10.ffn_down.weightQ4_K[12288, 4096]
-
blk.10.ffn_gate.weightQ4_K[4096, 12288]
-
blk.10.ffn_norm.weightF32[4096]
-
blk.10.ffn_up.weightQ4_K[4096, 12288]
-
blk.11.attn_k.weightQ4_K[4096, 1024]
-
blk.11.attn_norm.weightF32[4096]
-
blk.11.attn_output.weightQ4_K[4096, 4096]
-
blk.11.attn_q.weightQ4_K[4096, 4096]
-
blk.11.attn_v.weightQ4_K[4096, 1024]
-
blk.11.ffn_down.weightQ4_K[12288, 4096]
-
blk.11.ffn_gate.weightQ4_K[4096, 12288]
-
blk.11.ffn_norm.weightF32[4096]
-
blk.11.ffn_up.weightQ4_K[4096, 12288]
-
blk.12.attn_k.weightQ4_K[4096, 1024]
-
blk.12.attn_norm.weightF32[4096]
-
blk.12.attn_output.weightQ4_K[4096, 4096]
-
blk.12.attn_q.weightQ4_K[4096, 4096]
-
blk.12.attn_v.weightQ6_K[4096, 1024]
-
blk.12.ffn_down.weightQ6_K[12288, 4096]
-
blk.12.ffn_gate.weightQ4_K[4096, 12288]
-
blk.12.ffn_norm.weightF32[4096]
-
blk.12.ffn_up.weightQ4_K[4096, 12288]
-
blk.13.attn_k.weightQ4_K[4096, 1024]
-
blk.13.attn_norm.weightF32[4096]
-
blk.13.attn_output.weightQ4_K[4096, 4096]
-
blk.13.attn_q.weightQ4_K[4096, 4096]
-
blk.13.attn_v.weightQ4_K[4096, 1024]
-
blk.13.ffn_down.weightQ4_K[12288, 4096]
-
blk.13.ffn_gate.weightQ4_K[4096, 12288]
-
blk.13.ffn_norm.weightF32[4096]
-
blk.13.ffn_up.weightQ4_K[4096, 12288]
-
blk.14.attn_k.weightQ4_K[4096, 1024]
-
blk.14.attn_norm.weightF32[4096]
-
blk.14.attn_output.weightQ4_K[4096, 4096]
-
blk.14.attn_q.weightQ4_K[4096, 4096]
-
blk.14.attn_v.weightQ4_K[4096, 1024]
-
blk.14.ffn_down.weightQ4_K[12288, 4096]
-
blk.14.ffn_gate.weightQ4_K[4096, 12288]
-
blk.14.ffn_norm.weightF32[4096]
-
blk.14.ffn_up.weightQ4_K[4096, 12288]
-
blk.15.attn_k.weightQ4_K[4096, 1024]
-
blk.15.attn_norm.weightF32[4096]
-
blk.15.attn_output.weightQ4_K[4096, 4096]
-
blk.15.attn_q.weightQ4_K[4096, 4096]
-
blk.15.attn_v.weightQ6_K[4096, 1024]
-
blk.15.ffn_down.weightQ6_K[12288, 4096]
-
blk.15.ffn_gate.weightQ4_K[4096, 12288]
-
blk.15.ffn_norm.weightF32[4096]
-
blk.15.ffn_up.weightQ4_K[4096, 12288]
-
blk.16.attn_k.weightQ4_K[4096, 1024]
-
blk.16.attn_norm.weightF32[4096]
-
blk.16.attn_output.weightQ4_K[4096, 4096]
-
blk.16.attn_q.weightQ4_K[4096, 4096]
-
blk.16.attn_v.weightQ4_K[4096, 1024]
-
blk.16.ffn_down.weightQ4_K[12288, 4096]
-
blk.16.ffn_gate.weightQ4_K[4096, 12288]
-
blk.16.ffn_norm.weightF32[4096]
-
blk.16.ffn_up.weightQ4_K[4096, 12288]
-
blk.17.attn_k.weightQ4_K[4096, 1024]
-
blk.17.attn_norm.weightF32[4096]
-
blk.17.attn_output.weightQ4_K[4096, 4096]
-
blk.17.attn_q.weightQ4_K[4096, 4096]
-
blk.17.attn_v.weightQ4_K[4096, 1024]
-
blk.17.ffn_down.weightQ4_K[12288, 4096]
-
blk.17.ffn_gate.weightQ4_K[4096, 12288]
-
blk.17.ffn_norm.weightF32[4096]
-
blk.17.ffn_up.weightQ4_K[4096, 12288]
-
blk.18.attn_k.weightQ4_K[4096, 1024]
-
blk.18.attn_norm.weightF32[4096]
-
blk.18.attn_output.weightQ4_K[4096, 4096]
-
blk.18.attn_q.weightQ4_K[4096, 4096]
-
blk.18.attn_v.weightQ6_K[4096, 1024]
-
blk.18.ffn_down.weightQ6_K[12288, 4096]
-
blk.18.ffn_gate.weightQ4_K[4096, 12288]
-
blk.18.ffn_norm.weightF32[4096]
-
blk.18.ffn_up.weightQ4_K[4096, 12288]
-
blk.19.attn_k.weightQ4_K[4096, 1024]
-
blk.19.attn_norm.weightF32[4096]
-
blk.19.attn_output.weightQ4_K[4096, 4096]
-
blk.19.attn_q.weightQ4_K[4096, 4096]
-
blk.19.attn_v.weightQ4_K[4096, 1024]
-
blk.19.ffn_down.weightQ4_K[12288, 4096]
-
blk.19.ffn_gate.weightQ4_K[4096, 12288]
-
blk.19.ffn_norm.weightF32[4096]
-
blk.19.ffn_up.weightQ4_K[4096, 12288]
-
blk.20.attn_k.weightQ4_K[4096, 1024]
-
blk.20.attn_norm.weightF32[4096]
-
blk.20.attn_output.weightQ4_K[4096, 4096]
-
blk.20.attn_q.weightQ4_K[4096, 4096]
-
blk.20.attn_v.weightQ4_K[4096, 1024]
-
blk.20.ffn_down.weightQ4_K[12288, 4096]
-
blk.20.ffn_gate.weightQ4_K[4096, 12288]
-
blk.20.ffn_norm.weightF32[4096]
-
blk.20.ffn_up.weightQ4_K[4096, 12288]
-
blk.21.attn_k.weightQ4_K[4096, 1024]
-
blk.21.attn_norm.weightF32[4096]
-
blk.21.attn_output.weightQ4_K[4096, 4096]
-
blk.21.attn_q.weightQ4_K[4096, 4096]
-
blk.21.attn_v.weightQ6_K[4096, 1024]
-
blk.21.ffn_down.weightQ6_K[12288, 4096]
-
blk.21.ffn_gate.weightQ4_K[4096, 12288]
-
blk.21.ffn_norm.weightF32[4096]
-
blk.21.ffn_up.weightQ4_K[4096, 12288]
-
blk.22.attn_k.weightQ4_K[4096, 1024]
-
blk.22.attn_norm.weightF32[4096]
-
blk.22.attn_output.weightQ4_K[4096, 4096]
-
blk.22.attn_q.weightQ4_K[4096, 4096]
-
blk.22.attn_v.weightQ4_K[4096, 1024]
-
blk.22.ffn_down.weightQ4_K[12288, 4096]
-
blk.22.ffn_gate.weightQ4_K[4096, 12288]
-
blk.22.ffn_norm.weightF32[4096]
-
blk.22.ffn_up.weightQ4_K[4096, 12288]
-
blk.23.attn_k.weightQ4_K[4096, 1024]
-
blk.23.attn_norm.weightF32[4096]
-
blk.23.attn_output.weightQ4_K[4096, 4096]
-
blk.23.attn_q.weightQ4_K[4096, 4096]
-
blk.23.attn_v.weightQ4_K[4096, 1024]
-
blk.23.ffn_down.weightQ4_K[12288, 4096]
-
blk.23.ffn_gate.weightQ4_K[4096, 12288]
-
blk.23.ffn_norm.weightF32[4096]
-
blk.23.ffn_up.weightQ4_K[4096, 12288]
-
blk.24.attn_k.weightQ4_K[4096, 1024]
-
blk.24.attn_norm.weightF32[4096]
-
blk.24.attn_output.weightQ4_K[4096, 4096]
-
blk.24.attn_q.weightQ4_K[4096, 4096]
-
blk.24.attn_v.weightQ6_K[4096, 1024]
-
blk.24.ffn_down.weightQ6_K[12288, 4096]
-
blk.24.ffn_gate.weightQ4_K[4096, 12288]
-
blk.24.ffn_norm.weightF32[4096]
-
blk.24.ffn_up.weightQ4_K[4096, 12288]
-
blk.25.attn_k.weightQ4_K[4096, 1024]
-
blk.25.attn_norm.weightF32[4096]
-
blk.25.attn_output.weightQ4_K[4096, 4096]
-
blk.25.attn_q.weightQ4_K[4096, 4096]
-
blk.25.attn_v.weightQ4_K[4096, 1024]
-
blk.25.ffn_down.weightQ4_K[12288, 4096]
-
blk.25.ffn_gate.weightQ4_K[4096, 12288]
-
blk.25.ffn_norm.weightF32[4096]
-
blk.25.ffn_up.weightQ4_K[4096, 12288]
-
blk.26.attn_k.weightQ4_K[4096, 1024]
-
blk.26.attn_norm.weightF32[4096]
-
blk.26.attn_output.weightQ4_K[4096, 4096]
-
blk.26.attn_q.weightQ4_K[4096, 4096]
-
blk.26.attn_v.weightQ4_K[4096, 1024]
-
blk.26.ffn_down.weightQ4_K[12288, 4096]
-
blk.26.ffn_gate.weightQ4_K[4096, 12288]
-
blk.26.ffn_norm.weightF32[4096]
-
blk.26.ffn_up.weightQ4_K[4096, 12288]
-
blk.27.attn_k.weightQ4_K[4096, 1024]
-
blk.27.attn_norm.weightF32[4096]
-
blk.27.attn_output.weightQ4_K[4096, 4096]
-
blk.27.attn_q.weightQ4_K[4096, 4096]
-
blk.27.attn_v.weightQ6_K[4096, 1024]
-
blk.27.ffn_down.weightQ6_K[12288, 4096]
-
blk.27.ffn_gate.weightQ4_K[4096, 12288]
-
blk.27.ffn_norm.weightF32[4096]
-
blk.27.ffn_up.weightQ4_K[4096, 12288]
-
blk.28.attn_k.weightQ4_K[4096, 1024]
-
blk.28.attn_norm.weightF32[4096]
-
blk.28.attn_output.weightQ4_K[4096, 4096]
-
blk.28.attn_q.weightQ4_K[4096, 4096]
-
blk.28.attn_v.weightQ4_K[4096, 1024]
-
blk.28.ffn_down.weightQ4_K[12288, 4096]
-
blk.28.ffn_gate.weightQ4_K[4096, 12288]
-
blk.28.ffn_norm.weightF32[4096]
-
blk.28.ffn_up.weightQ4_K[4096, 12288]
-
blk.29.attn_k.weightQ4_K[4096, 1024]
-
blk.29.attn_norm.weightF32[4096]
-
blk.29.attn_output.weightQ4_K[4096, 4096]
-
blk.29.attn_q.weightQ4_K[4096, 4096]
-
blk.29.attn_v.weightQ4_K[4096, 1024]
-
blk.29.ffn_down.weightQ4_K[12288, 4096]
-
blk.29.ffn_gate.weightQ4_K[4096, 12288]
-
blk.29.ffn_norm.weightF32[4096]
-
blk.29.ffn_up.weightQ4_K[4096, 12288]
-
blk.30.attn_k.weightQ4_K[4096, 1024]
-
blk.30.attn_norm.weightF32[4096]
-
blk.30.attn_output.weightQ4_K[4096, 4096]
-
blk.30.attn_q.weightQ4_K[4096, 4096]
-
blk.30.attn_v.weightQ6_K[4096, 1024]
-
blk.30.ffn_down.weightQ6_K[12288, 4096]
-
blk.30.ffn_gate.weightQ4_K[4096, 12288]
-
blk.30.ffn_norm.weightF32[4096]
-
blk.30.ffn_up.weightQ4_K[4096, 12288]
-
blk.31.attn_k.weightQ4_K[4096, 1024]
-
blk.31.attn_norm.weightF32[4096]
-
blk.31.attn_output.weightQ4_K[4096, 4096]
-
blk.31.attn_q.weightQ4_K[4096, 4096]
-
blk.31.attn_v.weightQ6_K[4096, 1024]
-
blk.31.ffn_down.weightQ6_K[12288, 4096]
-
blk.31.ffn_gate.weightQ4_K[4096, 12288]
-
blk.31.ffn_norm.weightF32[4096]
-
blk.31.ffn_up.weightQ4_K[4096, 12288]
-
blk.32.attn_k.weightQ4_K[4096, 1024]
-
blk.32.attn_norm.weightF32[4096]
-
blk.32.attn_output.weightQ4_K[4096, 4096]
-
blk.32.attn_q.weightQ4_K[4096, 4096]
-
blk.32.attn_v.weightQ6_K[4096, 1024]
-
blk.32.ffn_down.weightQ6_K[12288, 4096]
-
blk.32.ffn_gate.weightQ4_K[4096, 12288]
-
blk.32.ffn_norm.weightF32[4096]
-
blk.32.ffn_up.weightQ4_K[4096, 12288]
-
blk.33.attn_k.weightQ4_K[4096, 1024]
-
blk.33.attn_norm.weightF32[4096]
-
blk.33.attn_output.weightQ4_K[4096, 4096]
-
blk.33.attn_q.weightQ4_K[4096, 4096]
-
blk.33.attn_v.weightQ6_K[4096, 1024]
-
blk.33.ffn_down.weightQ6_K[12288, 4096]
-
blk.33.ffn_gate.weightQ4_K[4096, 12288]
-
blk.33.ffn_norm.weightF32[4096]
-
blk.33.ffn_up.weightQ4_K[4096, 12288]
-
blk.34.attn_k.weightQ4_K[4096, 1024]
-
blk.34.attn_norm.weightF32[4096]
-
blk.34.attn_output.weightQ4_K[4096, 4096]
-
blk.34.attn_q.weightQ4_K[4096, 4096]
-
blk.34.attn_v.weightQ6_K[4096, 1024]
-
blk.34.ffn_down.weightQ6_K[12288, 4096]
-
blk.34.ffn_gate.weightQ4_K[4096, 12288]
-
blk.34.ffn_norm.weightF32[4096]
-
blk.34.ffn_up.weightQ4_K[4096, 12288]
-
blk.35.attn_k.weightQ4_K[4096, 1024]
-
blk.35.attn_norm.weightF32[4096]
-
blk.35.attn_output.weightQ4_K[4096, 4096]
-
blk.35.attn_q.weightQ4_K[4096, 4096]
-
blk.35.attn_v.weightQ6_K[4096, 1024]
-
blk.35.ffn_down.weightQ6_K[12288, 4096]
-
blk.35.ffn_gate.weightQ4_K[4096, 12288]
-
blk.35.ffn_norm.weightF32[4096]
-
blk.35.ffn_up.weightQ4_K[4096, 12288]
-
output.weightQ6_K[4096, 131072]
-
output_norm.weightF32[4096]
Metadata
Tensor
blk.0
blk.1
blk.2
blk.3
blk.4
blk.5
blk.6
blk.7
blk.8
blk.9
blk.10
blk.11
blk.12
blk.13
blk.14
blk.15
blk.16
blk.17
blk.18
blk.19
blk.20
blk.21
blk.22
blk.23
blk.24
blk.25
blk.26
blk.27
blk.28
blk.29
blk.30
blk.31
blk.32
blk.33
blk.34
blk.35