Models
GitHub
Discord
Turbo
Sign in
Download
Models
Download
GitHub
Discord
Sign in
huihui_ai
/
microthinker
:1b-preview-fp16
530
Downloads
Updated
7 months ago
MicroThinker is an experimental research model focused on advancing AI reasoning capabilities.
MicroThinker is an experimental research model focused on advancing AI reasoning capabilities.
Cancel
tools
1b
3b
8b
microthinker:1b-preview-fp16
...
/
model
d9bd20c79754 · 2.5GB
Metadata
general.architecture
llama
llama
general.file_type
F16
F16
llama.attention.head_count
32
32
llama.attention.head_count_kv
8
8
llama.attention.key_length
64
64
llama.attention.layer_norm_rms_epsilon
1e-05
1e-05
llama.attention.value_length
64
64
llama.block_count
16
16
llama.context_length
131072
131072
llama.embedding_length
2048
2048
llama.feed_forward_length
8192
8192
llama.rope.dimension_count
64
64
llama.rope.freq_base
500000
500000
llama.vocab_size
128256
128256
tokenizer.ggml.bos_token_id
128000
128000
tokenizer.ggml.eos_token_id
128009
128009
tokenizer.ggml.merges
[Ġ Ġ, Ġ ĠĠĠ, ĠĠ ĠĠ, ĠĠĠ Ġ, i n, ...]
[Ġ Ġ, Ġ ĠĠĠ, ĠĠ ĠĠ, ĠĠĠ Ġ, i n, ...]
tokenizer.ggml.model
gpt2
gpt2
tokenizer.ggml.padding_token_id
128009
128009
tokenizer.ggml.pre
llama-bpe
llama-bpe
tokenizer.ggml.token_type
[1, 1, 1, 1, 1, ...]
[1, 1, 1, 1, 1, ...]
tokenizer.ggml.tokens
[!, ", #, $, %, ...]
[!, ", #, $, %, ...]
Tensor
Name
Type
Shape
token_embd.weight
F16
F16
[2048, 128256]
blk.0
blk.0.attn_k.weight
F16
F16
[2048, 512]
blk.0.attn_norm.weight
F32
F32
[2048]
blk.0.attn_output.weight
F16
F16
[2048, 2048]
blk.0.attn_q.weight
F16
F16
[2048, 2048]
blk.0.attn_v.weight
F16
F16
[2048, 512]
blk.0.ffn_down.weight
F16
F16
[8192, 2048]
blk.0.ffn_gate.weight
F16
F16
[2048, 8192]
blk.0.ffn_norm.weight
F32
F32
[2048]
blk.0.ffn_up.weight
F16
F16
[2048, 8192]
blk.1
blk.1.attn_k.weight
F16
F16
[2048, 512]
blk.1.attn_norm.weight
F32
F32
[2048]
blk.1.attn_output.weight
F16
F16
[2048, 2048]
blk.1.attn_q.weight
F16
F16
[2048, 2048]
blk.1.attn_v.weight
F16
F16
[2048, 512]
blk.1.ffn_down.weight
F16
F16
[8192, 2048]
blk.1.ffn_gate.weight
F16
F16
[2048, 8192]
blk.1.ffn_norm.weight
F32
F32
[2048]
blk.1.ffn_up.weight
F16
F16
[2048, 8192]
blk.2
blk.2.attn_k.weight
F16
F16
[2048, 512]
blk.2.attn_norm.weight
F32
F32
[2048]
blk.2.attn_output.weight
F16
F16
[2048, 2048]
blk.2.attn_q.weight
F16
F16
[2048, 2048]
blk.2.attn_v.weight
F16
F16
[2048, 512]
blk.2.ffn_down.weight
F16
F16
[8192, 2048]
blk.2.ffn_gate.weight
F16
F16
[2048, 8192]
blk.2.ffn_norm.weight
F32
F32
[2048]
blk.2.ffn_up.weight
F16
F16
[2048, 8192]
blk.3
blk.3.attn_k.weight
F16
F16
[2048, 512]
blk.3.attn_norm.weight
F32
F32
[2048]
blk.3.attn_output.weight
F16
F16
[2048, 2048]
blk.3.attn_q.weight
F16
F16
[2048, 2048]
blk.3.attn_v.weight
F16
F16
[2048, 512]
blk.3.ffn_down.weight
F16
F16
[8192, 2048]
blk.3.ffn_gate.weight
F16
F16
[2048, 8192]
blk.3.ffn_norm.weight
F32
F32
[2048]
blk.3.ffn_up.weight
F16
F16
[2048, 8192]
blk.4
blk.4.attn_k.weight
F16
F16
[2048, 512]
blk.4.attn_norm.weight
F32
F32
[2048]
blk.4.attn_output.weight
F16
F16
[2048, 2048]
blk.4.attn_q.weight
F16
F16
[2048, 2048]
blk.4.attn_v.weight
F16
F16
[2048, 512]
blk.4.ffn_down.weight
F16
F16
[8192, 2048]
blk.4.ffn_gate.weight
F16
F16
[2048, 8192]
blk.4.ffn_norm.weight
F32
F32
[2048]
blk.4.ffn_up.weight
F16
F16
[2048, 8192]
blk.5
blk.5.attn_k.weight
F16
F16
[2048, 512]
blk.5.attn_norm.weight
F32
F32
[2048]
blk.5.attn_output.weight
F16
F16
[2048, 2048]
blk.5.attn_q.weight
F16
F16
[2048, 2048]
blk.5.attn_v.weight
F16
F16
[2048, 512]
blk.5.ffn_down.weight
F16
F16
[8192, 2048]
blk.5.ffn_gate.weight
F16
F16
[2048, 8192]
blk.5.ffn_norm.weight
F32
F32
[2048]
blk.5.ffn_up.weight
F16
F16
[2048, 8192]
blk.6
blk.6.attn_k.weight
F16
F16
[2048, 512]
blk.6.attn_norm.weight
F32
F32
[2048]
blk.6.attn_output.weight
F16
F16
[2048, 2048]
blk.6.attn_q.weight
F16
F16
[2048, 2048]
blk.6.attn_v.weight
F16
F16
[2048, 512]
blk.6.ffn_down.weight
F16
F16
[8192, 2048]
blk.6.ffn_gate.weight
F16
F16
[2048, 8192]
blk.6.ffn_norm.weight
F32
F32
[2048]
blk.6.ffn_up.weight
F16
F16
[2048, 8192]
blk.7
blk.7.attn_k.weight
F16
F16
[2048, 512]
blk.7.attn_norm.weight
F32
F32
[2048]
blk.7.attn_output.weight
F16
F16
[2048, 2048]
blk.7.attn_q.weight
F16
F16
[2048, 2048]
blk.7.attn_v.weight
F16
F16
[2048, 512]
blk.7.ffn_down.weight
F16
F16
[8192, 2048]
blk.7.ffn_gate.weight
F16
F16
[2048, 8192]
blk.7.ffn_norm.weight
F32
F32
[2048]
blk.7.ffn_up.weight
F16
F16
[2048, 8192]
blk.8
blk.8.attn_k.weight
F16
F16
[2048, 512]
blk.8.attn_norm.weight
F32
F32
[2048]
blk.8.attn_output.weight
F16
F16
[2048, 2048]
blk.8.attn_q.weight
F16
F16
[2048, 2048]
blk.8.attn_v.weight
F16
F16
[2048, 512]
blk.8.ffn_down.weight
F16
F16
[8192, 2048]
blk.8.ffn_gate.weight
F16
F16
[2048, 8192]
blk.8.ffn_norm.weight
F32
F32
[2048]
blk.8.ffn_up.weight
F16
F16
[2048, 8192]
blk.9
blk.9.attn_k.weight
F16
F16
[2048, 512]
blk.9.attn_norm.weight
F32
F32
[2048]
blk.9.attn_output.weight
F16
F16
[2048, 2048]
blk.9.attn_q.weight
F16
F16
[2048, 2048]
blk.9.attn_v.weight
F16
F16
[2048, 512]
blk.9.ffn_down.weight
F16
F16
[8192, 2048]
blk.9.ffn_gate.weight
F16
F16
[2048, 8192]
blk.9.ffn_norm.weight
F32
F32
[2048]
blk.9.ffn_up.weight
F16
F16
[2048, 8192]
blk.10
blk.10.attn_k.weight
F16
F16
[2048, 512]
blk.10.attn_norm.weight
F32
F32
[2048]
blk.10.attn_output.weight
F16
F16
[2048, 2048]
blk.10.attn_q.weight
F16
F16
[2048, 2048]
blk.10.attn_v.weight
F16
F16
[2048, 512]
blk.10.ffn_down.weight
F16
F16
[8192, 2048]
blk.10.ffn_gate.weight
F16
F16
[2048, 8192]
blk.10.ffn_norm.weight
F32
F32
[2048]
blk.10.ffn_up.weight
F16
F16
[2048, 8192]
blk.11
blk.11.attn_k.weight
F16
F16
[2048, 512]
blk.11.attn_norm.weight
F32
F32
[2048]
blk.11.attn_output.weight
F16
F16
[2048, 2048]
blk.11.attn_q.weight
F16
F16
[2048, 2048]
blk.11.attn_v.weight
F16
F16
[2048, 512]
blk.11.ffn_down.weight
F16
F16
[8192, 2048]
blk.11.ffn_gate.weight
F16
F16
[2048, 8192]
blk.11.ffn_norm.weight
F32
F32
[2048]
blk.11.ffn_up.weight
F16
F16
[2048, 8192]
blk.12
blk.12.attn_k.weight
F16
F16
[2048, 512]
blk.12.attn_norm.weight
F32
F32
[2048]
blk.12.attn_output.weight
F16
F16
[2048, 2048]
blk.12.attn_q.weight
F16
F16
[2048, 2048]
blk.12.attn_v.weight
F16
F16
[2048, 512]
blk.12.ffn_down.weight
F16
F16
[8192, 2048]
blk.12.ffn_gate.weight
F16
F16
[2048, 8192]
blk.12.ffn_norm.weight
F32
F32
[2048]
blk.12.ffn_up.weight
F16
F16
[2048, 8192]
blk.13
blk.13.attn_k.weight
F16
F16
[2048, 512]
blk.13.attn_norm.weight
F32
F32
[2048]
blk.13.attn_output.weight
F16
F16
[2048, 2048]
blk.13.attn_q.weight
F16
F16
[2048, 2048]
blk.13.attn_v.weight
F16
F16
[2048, 512]
blk.13.ffn_down.weight
F16
F16
[8192, 2048]
blk.13.ffn_gate.weight
F16
F16
[2048, 8192]
blk.13.ffn_norm.weight
F32
F32
[2048]
blk.13.ffn_up.weight
F16
F16
[2048, 8192]
blk.14
blk.14.attn_k.weight
F16
F16
[2048, 512]
blk.14.attn_norm.weight
F32
F32
[2048]
blk.14.attn_output.weight
F16
F16
[2048, 2048]
blk.14.attn_q.weight
F16
F16
[2048, 2048]
blk.14.attn_v.weight
F16
F16
[2048, 512]
blk.14.ffn_down.weight
F16
F16
[8192, 2048]
blk.14.ffn_gate.weight
F16
F16
[2048, 8192]
blk.14.ffn_norm.weight
F32
F32
[2048]
blk.14.ffn_up.weight
F16
F16
[2048, 8192]
blk.15
blk.15.attn_k.weight
F16
F16
[2048, 512]
blk.15.attn_norm.weight
F32
F32
[2048]
blk.15.attn_output.weight
F16
F16
[2048, 2048]
blk.15.attn_q.weight
F16
F16
[2048, 2048]
blk.15.attn_v.weight
F16
F16
[2048, 512]
blk.15.ffn_down.weight
F16
F16
[8192, 2048]
blk.15.ffn_gate.weight
F16
F16
[2048, 8192]
blk.15.ffn_norm.weight
F32
F32
[2048]
blk.15.ffn_up.weight
F16
F16
[2048, 8192]
rope_freqs.weight
F32
F32
[32]
output_norm.weight
F32
F32
[2048]