Pretrained Large Language Models based on Gemma built by URA
2b
7b
122 Pulls Updated 8 months ago
5b4169ed7782 · 5.0GB
-
gemma.attention.head_count8
-
gemma.attention.head_count_kv1
-
gemma.attention.key_length256
-
gemma.attention.layer_norm_rms_epsilon1e-06
-
gemma.attention.value_length256
-
gemma.block_count18
-
gemma.context_length8192
-
gemma.embedding_length2048
-
gemma.feed_forward_length16384
-
general.architecturegemma
-
general.file_type1
-
general.nameGemSUra-2B
-
tokenizer.ggml.add_bos_tokentrue
-
tokenizer.ggml.add_eos_tokenfalse
-
tokenizer.ggml.bos_token_id2
-
tokenizer.ggml.eos_token_id1
-
tokenizer.ggml.modelllama
-
tokenizer.ggml.padding_token_id0
-
tokenizer.ggml.scores[0, 0, 0, 0, 0, ...]
-
tokenizer.ggml.token_type[3, 3, 3, 2, 1, ...]
-
tokenizer.ggml.tokens[<pad>, <eos>, <bos>, <unk>, <mask>, ...]
-
tokenizer.ggml.unknown_token_id3
-
NameTypeShape
-
token_embd.weightF16[2048, 256000]
-
blk.0.attn_k.weightF16[2048, 256]
-
blk.0.attn_norm.weightF32[2048]
-
blk.0.attn_output.weightF16[2048, 2048]
-
blk.0.attn_q.weightF16[2048, 2048]
-
blk.0.attn_v.weightF16[2048, 256]
-
blk.0.ffn_down.weightF16[16384, 2048]
-
blk.0.ffn_gate.weightF16[2048, 16384]
-
blk.0.ffn_norm.weightF32[2048]
-
blk.0.ffn_up.weightF16[2048, 16384]
-
blk.1.attn_k.weightF16[2048, 256]
-
blk.1.attn_norm.weightF32[2048]
-
blk.1.attn_output.weightF16[2048, 2048]
-
blk.1.attn_q.weightF16[2048, 2048]
-
blk.1.attn_v.weightF16[2048, 256]
-
blk.1.ffn_down.weightF16[16384, 2048]
-
blk.1.ffn_gate.weightF16[2048, 16384]
-
blk.1.ffn_norm.weightF32[2048]
-
blk.1.ffn_up.weightF16[2048, 16384]
-
blk.2.attn_k.weightF16[2048, 256]
-
blk.2.attn_norm.weightF32[2048]
-
blk.2.attn_output.weightF16[2048, 2048]
-
blk.2.attn_q.weightF16[2048, 2048]
-
blk.2.attn_v.weightF16[2048, 256]
-
blk.2.ffn_down.weightF16[16384, 2048]
-
blk.2.ffn_gate.weightF16[2048, 16384]
-
blk.2.ffn_norm.weightF32[2048]
-
blk.2.ffn_up.weightF16[2048, 16384]
-
blk.3.attn_k.weightF16[2048, 256]
-
blk.3.attn_norm.weightF32[2048]
-
blk.3.attn_output.weightF16[2048, 2048]
-
blk.3.attn_q.weightF16[2048, 2048]
-
blk.3.attn_v.weightF16[2048, 256]
-
blk.3.ffn_down.weightF16[16384, 2048]
-
blk.3.ffn_gate.weightF16[2048, 16384]
-
blk.3.ffn_norm.weightF32[2048]
-
blk.3.ffn_up.weightF16[2048, 16384]
-
blk.4.attn_k.weightF16[2048, 256]
-
blk.4.attn_norm.weightF32[2048]
-
blk.4.attn_output.weightF16[2048, 2048]
-
blk.4.attn_q.weightF16[2048, 2048]
-
blk.4.attn_v.weightF16[2048, 256]
-
blk.4.ffn_down.weightF16[16384, 2048]
-
blk.4.ffn_gate.weightF16[2048, 16384]
-
blk.4.ffn_norm.weightF32[2048]
-
blk.4.ffn_up.weightF16[2048, 16384]
-
blk.5.attn_k.weightF16[2048, 256]
-
blk.5.attn_norm.weightF32[2048]
-
blk.5.attn_output.weightF16[2048, 2048]
-
blk.5.attn_q.weightF16[2048, 2048]
-
blk.5.attn_v.weightF16[2048, 256]
-
blk.5.ffn_down.weightF16[16384, 2048]
-
blk.5.ffn_gate.weightF16[2048, 16384]
-
blk.5.ffn_norm.weightF32[2048]
-
blk.5.ffn_up.weightF16[2048, 16384]
-
blk.6.attn_k.weightF16[2048, 256]
-
blk.6.attn_norm.weightF32[2048]
-
blk.6.attn_output.weightF16[2048, 2048]
-
blk.6.attn_q.weightF16[2048, 2048]
-
blk.6.attn_v.weightF16[2048, 256]
-
blk.6.ffn_down.weightF16[16384, 2048]
-
blk.6.ffn_gate.weightF16[2048, 16384]
-
blk.6.ffn_norm.weightF32[2048]
-
blk.6.ffn_up.weightF16[2048, 16384]
-
blk.7.attn_k.weightF16[2048, 256]
-
blk.7.attn_norm.weightF32[2048]
-
blk.7.attn_output.weightF16[2048, 2048]
-
blk.7.attn_q.weightF16[2048, 2048]
-
blk.7.attn_v.weightF16[2048, 256]
-
blk.7.ffn_down.weightF16[16384, 2048]
-
blk.7.ffn_gate.weightF16[2048, 16384]
-
blk.7.ffn_norm.weightF32[2048]
-
blk.7.ffn_up.weightF16[2048, 16384]
-
blk.8.attn_k.weightF16[2048, 256]
-
blk.8.attn_norm.weightF32[2048]
-
blk.8.attn_output.weightF16[2048, 2048]
-
blk.8.attn_q.weightF16[2048, 2048]
-
blk.8.attn_v.weightF16[2048, 256]
-
blk.8.ffn_down.weightF16[16384, 2048]
-
blk.8.ffn_gate.weightF16[2048, 16384]
-
blk.8.ffn_norm.weightF32[2048]
-
blk.8.ffn_up.weightF16[2048, 16384]
-
blk.9.attn_k.weightF16[2048, 256]
-
blk.9.attn_norm.weightF32[2048]
-
blk.9.attn_output.weightF16[2048, 2048]
-
blk.9.attn_q.weightF16[2048, 2048]
-
blk.9.attn_v.weightF16[2048, 256]
-
blk.9.ffn_down.weightF16[16384, 2048]
-
blk.9.ffn_gate.weightF16[2048, 16384]
-
blk.9.ffn_norm.weightF32[2048]
-
blk.9.ffn_up.weightF16[2048, 16384]
-
blk.10.attn_k.weightF16[2048, 256]
-
blk.10.attn_norm.weightF32[2048]
-
blk.10.attn_output.weightF16[2048, 2048]
-
blk.10.attn_q.weightF16[2048, 2048]
-
blk.10.attn_v.weightF16[2048, 256]
-
blk.10.ffn_down.weightF16[16384, 2048]
-
blk.10.ffn_gate.weightF16[2048, 16384]
-
blk.10.ffn_norm.weightF32[2048]
-
blk.10.ffn_up.weightF16[2048, 16384]
-
blk.11.attn_k.weightF16[2048, 256]
-
blk.11.attn_norm.weightF32[2048]
-
blk.11.attn_output.weightF16[2048, 2048]
-
blk.11.attn_q.weightF16[2048, 2048]
-
blk.11.attn_v.weightF16[2048, 256]
-
blk.11.ffn_down.weightF16[16384, 2048]
-
blk.11.ffn_gate.weightF16[2048, 16384]
-
blk.11.ffn_norm.weightF32[2048]
-
blk.11.ffn_up.weightF16[2048, 16384]
-
blk.12.attn_k.weightF16[2048, 256]
-
blk.12.attn_norm.weightF32[2048]
-
blk.12.attn_output.weightF16[2048, 2048]
-
blk.12.attn_q.weightF16[2048, 2048]
-
blk.12.attn_v.weightF16[2048, 256]
-
blk.12.ffn_down.weightF16[16384, 2048]
-
blk.12.ffn_gate.weightF16[2048, 16384]
-
blk.12.ffn_norm.weightF32[2048]
-
blk.12.ffn_up.weightF16[2048, 16384]
-
blk.13.attn_k.weightF16[2048, 256]
-
blk.13.attn_norm.weightF32[2048]
-
blk.13.attn_output.weightF16[2048, 2048]
-
blk.13.attn_q.weightF16[2048, 2048]
-
blk.13.attn_v.weightF16[2048, 256]
-
blk.13.ffn_down.weightF16[16384, 2048]
-
blk.13.ffn_gate.weightF16[2048, 16384]
-
blk.13.ffn_norm.weightF32[2048]
-
blk.13.ffn_up.weightF16[2048, 16384]
-
blk.14.attn_k.weightF16[2048, 256]
-
blk.14.attn_norm.weightF32[2048]
-
blk.14.attn_output.weightF16[2048, 2048]
-
blk.14.attn_q.weightF16[2048, 2048]
-
blk.14.attn_v.weightF16[2048, 256]
-
blk.14.ffn_down.weightF16[16384, 2048]
-
blk.14.ffn_gate.weightF16[2048, 16384]
-
blk.14.ffn_norm.weightF32[2048]
-
blk.14.ffn_up.weightF16[2048, 16384]
-
blk.15.attn_k.weightF16[2048, 256]
-
blk.15.attn_norm.weightF32[2048]
-
blk.15.attn_output.weightF16[2048, 2048]
-
blk.15.attn_q.weightF16[2048, 2048]
-
blk.15.attn_v.weightF16[2048, 256]
-
blk.15.ffn_down.weightF16[16384, 2048]
-
blk.15.ffn_gate.weightF16[2048, 16384]
-
blk.15.ffn_norm.weightF32[2048]
-
blk.15.ffn_up.weightF16[2048, 16384]
-
blk.16.attn_k.weightF16[2048, 256]
-
blk.16.attn_norm.weightF32[2048]
-
blk.16.attn_output.weightF16[2048, 2048]
-
blk.16.attn_q.weightF16[2048, 2048]
-
blk.16.attn_v.weightF16[2048, 256]
-
blk.16.ffn_down.weightF16[16384, 2048]
-
blk.16.ffn_gate.weightF16[2048, 16384]
-
blk.16.ffn_norm.weightF32[2048]
-
blk.16.ffn_up.weightF16[2048, 16384]
-
blk.17.attn_k.weightF16[2048, 256]
-
blk.17.attn_norm.weightF32[2048]
-
blk.17.attn_output.weightF16[2048, 2048]
-
blk.17.attn_q.weightF16[2048, 2048]
-
blk.17.attn_v.weightF16[2048, 256]
-
blk.17.ffn_down.weightF16[16384, 2048]
-
blk.17.ffn_gate.weightF16[2048, 16384]
-
blk.17.ffn_norm.weightF32[2048]
-
blk.17.ffn_up.weightF16[2048, 16384]
-
output_norm.weightF32[2048]
Metadata
Tensor
blk.0
blk.1
blk.2
blk.3
blk.4
blk.5
blk.6
blk.7
blk.8
blk.9
blk.10
blk.11
blk.12
blk.13
blk.14
blk.15
blk.16
blk.17