Meta's Llama 3.2 goes small with 1B and 3B models.
tools
1b
3b
2.2M Pulls Updated 5 weeks ago
a963ab51d30c · 691MB
-
general.architecturellama
-
general.basenameLlama-3.2
-
general.file_type12
-
general.languages[en, de, fr, it, pt, ...]
-
general.licensellama3.2
-
general.nameLlama 3.2 1B
-
general.quantization_version2
-
general.size_label1B
-
general.tags[facebook, meta, pytorch, llama, llama-3, ...]
-
general.typemodel
-
llama.attention.head_count32
-
llama.attention.head_count_kv8
-
llama.attention.key_length64
-
llama.attention.layer_norm_rms_epsilon1e-05
-
llama.attention.value_length64
-
llama.block_count16
-
llama.context_length131072
-
llama.embedding_length2048
-
llama.feed_forward_length8192
-
llama.rope.dimension_count64
-
llama.rope.freq_base500000
-
llama.vocab_size128256
-
tokenizer.ggml.bos_token_id128000
-
tokenizer.ggml.eos_token_id128001
-
tokenizer.ggml.merges[Ġ Ġ, Ġ ĠĠĠ, ĠĠ ĠĠ, ĠĠĠ Ġ, i n, ...]
-
tokenizer.ggml.modelgpt2
-
tokenizer.ggml.prellama-bpe
-
tokenizer.ggml.token_type[1, 1, 1, 1, 1, ...]
-
tokenizer.ggml.tokens[!, ", #, $, %, ...]
-
NameTypeShape
-
token_embd.weightQ6_K[2048, 128256]
-
blk.0.attn_k.weightQ3_K[2048, 512]
-
blk.0.attn_norm.weightF32[2048]
-
blk.0.attn_output.weightQ4_K[2048, 2048]
-
blk.0.attn_q.weightQ3_K[2048, 2048]
-
blk.0.attn_v.weightQ5_K[2048, 512]
-
blk.0.ffn_down.weightQ5_K[8192, 2048]
-
blk.0.ffn_gate.weightQ3_K[2048, 8192]
-
blk.0.ffn_norm.weightF32[2048]
-
blk.0.ffn_up.weightQ3_K[2048, 8192]
-
blk.1.attn_k.weightQ3_K[2048, 512]
-
blk.1.attn_norm.weightF32[2048]
-
blk.1.attn_output.weightQ4_K[2048, 2048]
-
blk.1.attn_q.weightQ3_K[2048, 2048]
-
blk.1.attn_v.weightQ5_K[2048, 512]
-
blk.1.ffn_down.weightQ4_K[8192, 2048]
-
blk.1.ffn_gate.weightQ3_K[2048, 8192]
-
blk.1.ffn_norm.weightF32[2048]
-
blk.1.ffn_up.weightQ3_K[2048, 8192]
-
blk.2.attn_k.weightQ3_K[2048, 512]
-
blk.2.attn_norm.weightF32[2048]
-
blk.2.attn_output.weightQ4_K[2048, 2048]
-
blk.2.attn_q.weightQ3_K[2048, 2048]
-
blk.2.attn_v.weightQ4_K[2048, 512]
-
blk.2.ffn_down.weightQ4_K[8192, 2048]
-
blk.2.ffn_gate.weightQ3_K[2048, 8192]
-
blk.2.ffn_norm.weightF32[2048]
-
blk.2.ffn_up.weightQ3_K[2048, 8192]
-
blk.3.attn_k.weightQ3_K[2048, 512]
-
blk.3.attn_norm.weightF32[2048]
-
blk.3.attn_output.weightQ4_K[2048, 2048]
-
blk.3.attn_q.weightQ3_K[2048, 2048]
-
blk.3.attn_v.weightQ4_K[2048, 512]
-
blk.3.ffn_down.weightQ4_K[8192, 2048]
-
blk.3.ffn_gate.weightQ3_K[2048, 8192]
-
blk.3.ffn_norm.weightF32[2048]
-
blk.3.ffn_up.weightQ3_K[2048, 8192]
-
blk.4.attn_k.weightQ3_K[2048, 512]
-
blk.4.attn_norm.weightF32[2048]
-
blk.4.attn_output.weightQ4_K[2048, 2048]
-
blk.4.attn_q.weightQ3_K[2048, 2048]
-
blk.4.attn_v.weightQ4_K[2048, 512]
-
blk.4.ffn_down.weightQ4_K[8192, 2048]
-
blk.4.ffn_gate.weightQ3_K[2048, 8192]
-
blk.4.ffn_norm.weightF32[2048]
-
blk.4.ffn_up.weightQ3_K[2048, 8192]
-
blk.5.attn_k.weightQ3_K[2048, 512]
-
blk.5.attn_norm.weightF32[2048]
-
blk.5.attn_output.weightQ4_K[2048, 2048]
-
blk.5.attn_q.weightQ3_K[2048, 2048]
-
blk.5.attn_v.weightQ4_K[2048, 512]
-
blk.5.ffn_down.weightQ4_K[8192, 2048]
-
blk.5.ffn_gate.weightQ3_K[2048, 8192]
-
blk.5.ffn_norm.weightF32[2048]
-
blk.5.ffn_up.weightQ3_K[2048, 8192]
-
blk.6.attn_k.weightQ3_K[2048, 512]
-
blk.6.attn_norm.weightF32[2048]
-
blk.6.attn_output.weightQ4_K[2048, 2048]
-
blk.6.attn_q.weightQ3_K[2048, 2048]
-
blk.6.attn_v.weightQ4_K[2048, 512]
-
blk.6.ffn_down.weightQ4_K[8192, 2048]
-
blk.6.ffn_gate.weightQ3_K[2048, 8192]
-
blk.6.ffn_norm.weightF32[2048]
-
blk.6.ffn_up.weightQ3_K[2048, 8192]
-
blk.7.attn_k.weightQ3_K[2048, 512]
-
blk.7.attn_norm.weightF32[2048]
-
blk.7.attn_output.weightQ4_K[2048, 2048]
-
blk.7.attn_q.weightQ3_K[2048, 2048]
-
blk.7.attn_v.weightQ4_K[2048, 512]
-
blk.7.ffn_down.weightQ4_K[8192, 2048]
-
blk.7.ffn_gate.weightQ3_K[2048, 8192]
-
blk.7.ffn_norm.weightF32[2048]
-
blk.7.ffn_up.weightQ3_K[2048, 8192]
-
blk.8.attn_k.weightQ3_K[2048, 512]
-
blk.8.attn_norm.weightF32[2048]
-
blk.8.attn_output.weightQ4_K[2048, 2048]
-
blk.8.attn_q.weightQ3_K[2048, 2048]
-
blk.8.attn_v.weightQ4_K[2048, 512]
-
blk.8.ffn_down.weightQ4_K[8192, 2048]
-
blk.8.ffn_gate.weightQ3_K[2048, 8192]
-
blk.8.ffn_norm.weightF32[2048]
-
blk.8.ffn_up.weightQ3_K[2048, 8192]
-
blk.9.attn_k.weightQ3_K[2048, 512]
-
blk.9.attn_norm.weightF32[2048]
-
blk.9.attn_output.weightQ4_K[2048, 2048]
-
blk.9.attn_q.weightQ3_K[2048, 2048]
-
blk.9.attn_v.weightQ4_K[2048, 512]
-
blk.9.ffn_down.weightQ4_K[8192, 2048]
-
blk.9.ffn_gate.weightQ3_K[2048, 8192]
-
blk.9.ffn_norm.weightF32[2048]
-
blk.9.ffn_up.weightQ3_K[2048, 8192]
-
blk.10.attn_k.weightQ3_K[2048, 512]
-
blk.10.attn_norm.weightF32[2048]
-
blk.10.attn_output.weightQ4_K[2048, 2048]
-
blk.10.attn_q.weightQ3_K[2048, 2048]
-
blk.10.attn_v.weightQ4_K[2048, 512]
-
blk.10.ffn_down.weightQ4_K[8192, 2048]
-
blk.10.ffn_gate.weightQ3_K[2048, 8192]
-
blk.10.ffn_norm.weightF32[2048]
-
blk.10.ffn_up.weightQ3_K[2048, 8192]
-
blk.11.attn_k.weightQ3_K[2048, 512]
-
blk.11.attn_norm.weightF32[2048]
-
blk.11.attn_output.weightQ4_K[2048, 2048]
-
blk.11.attn_q.weightQ3_K[2048, 2048]
-
blk.11.attn_v.weightQ4_K[2048, 512]
-
blk.11.ffn_down.weightQ4_K[8192, 2048]
-
blk.11.ffn_gate.weightQ3_K[2048, 8192]
-
blk.11.ffn_norm.weightF32[2048]
-
blk.11.ffn_up.weightQ3_K[2048, 8192]
-
blk.12.attn_k.weightQ3_K[2048, 512]
-
blk.12.attn_norm.weightF32[2048]
-
blk.12.attn_output.weightQ4_K[2048, 2048]
-
blk.12.attn_q.weightQ3_K[2048, 2048]
-
blk.12.attn_v.weightQ4_K[2048, 512]
-
blk.12.ffn_down.weightQ4_K[8192, 2048]
-
blk.12.ffn_gate.weightQ3_K[2048, 8192]
-
blk.12.ffn_norm.weightF32[2048]
-
blk.12.ffn_up.weightQ3_K[2048, 8192]
-
blk.13.attn_k.weightQ3_K[2048, 512]
-
blk.13.attn_norm.weightF32[2048]
-
blk.13.attn_output.weightQ4_K[2048, 2048]
-
blk.13.attn_q.weightQ3_K[2048, 2048]
-
blk.13.attn_v.weightQ4_K[2048, 512]
-
blk.13.ffn_down.weightQ4_K[8192, 2048]
-
blk.13.ffn_gate.weightQ3_K[2048, 8192]
-
blk.13.ffn_norm.weightF32[2048]
-
blk.13.ffn_up.weightQ3_K[2048, 8192]
-
blk.14.attn_k.weightQ3_K[2048, 512]
-
blk.14.attn_norm.weightF32[2048]
-
blk.14.attn_output.weightQ4_K[2048, 2048]
-
blk.14.attn_q.weightQ3_K[2048, 2048]
-
blk.14.attn_v.weightQ4_K[2048, 512]
-
blk.14.ffn_down.weightQ4_K[8192, 2048]
-
blk.14.ffn_gate.weightQ3_K[2048, 8192]
-
blk.14.ffn_norm.weightF32[2048]
-
blk.14.ffn_up.weightQ3_K[2048, 8192]
-
blk.15.attn_k.weightQ3_K[2048, 512]
-
blk.15.attn_norm.weightF32[2048]
-
blk.15.attn_output.weightQ4_K[2048, 2048]
-
blk.15.attn_q.weightQ3_K[2048, 2048]
-
blk.15.attn_v.weightQ4_K[2048, 512]
-
blk.15.ffn_down.weightQ4_K[8192, 2048]
-
blk.15.ffn_gate.weightQ3_K[2048, 8192]
-
blk.15.ffn_norm.weightF32[2048]
-
blk.15.ffn_up.weightQ3_K[2048, 8192]
-
rope_freqs.weightF32[32]
-
output_norm.weightF32[2048]
Metadata
Tensor
blk.0
blk.1
blk.2
blk.3
blk.4
blk.5
blk.6
blk.7
blk.8
blk.9
blk.10
blk.11
blk.12
blk.13
blk.14
blk.15