Less than a megabyte, generates stories but not very coherently!
290 Pulls Updated 6 months ago
e06e68395e91 · 749kB
-
general.architecturellama
-
general.file_type7
-
general.nameTinyStories-656K
-
general.quantization_version2
-
llama.attention.head_count8
-
llama.attention.head_count_kv4
-
llama.attention.layer_norm_rms_epsilon1e-06
-
llama.block_count2
-
llama.context_length512
-
llama.embedding_length128
-
llama.feed_forward_length384
-
llama.rope.dimension_count16
-
llama.rope.freq_base10000
-
llama.vocab_size2048
-
tokenizer.ggml.add_bos_tokentrue
-
tokenizer.ggml.add_eos_tokenfalse
-
tokenizer.ggml.bos_token_id1
-
tokenizer.ggml.eos_token_id2
-
tokenizer.ggml.modelllama
-
tokenizer.ggml.padding_token_id0
-
tokenizer.ggml.predefault
-
tokenizer.ggml.scores[-1000, -1000, -1000, -1000, -1000, ...]
-
tokenizer.ggml.token_type[3, 3, 3, 1, 1, ...]
-
tokenizer.ggml.tokens[<unk>, <|start_story|>, <|end_story|>, , !, ...]
-
tokenizer.ggml.unknown_token_id0
-
NameTypeShape
-
token_embd.weightQ8_0[128, 2048]
-
blk.0.attn_k.weightQ8_0[128, 64]
-
blk.0.attn_norm.weightF32[128]
-
blk.0.attn_output.weightQ8_0[128, 128]
-
blk.0.attn_q.weightQ8_0[128, 128]
-
blk.0.attn_v.weightQ8_0[128, 64]
-
blk.0.ffn_down.weightQ8_0[384, 128]
-
blk.0.ffn_gate.weightQ8_0[128, 384]
-
blk.0.ffn_norm.weightF32[128]
-
blk.0.ffn_up.weightQ8_0[128, 384]
-
blk.1.attn_k.weightQ8_0[128, 64]
-
blk.1.attn_norm.weightF32[128]
-
blk.1.attn_output.weightQ8_0[128, 128]
-
blk.1.attn_q.weightQ8_0[128, 128]
-
blk.1.attn_v.weightQ8_0[128, 64]
-
blk.1.ffn_down.weightQ8_0[384, 128]
-
blk.1.ffn_gate.weightQ8_0[128, 384]
-
blk.1.ffn_norm.weightF32[128]
-
blk.1.ffn_up.weightQ8_0[128, 384]
-
output_norm.weightF32[128]
Metadata
Tensor
blk.0
blk.1