Less than a megabyte, generates stories but not very coherently!
379 Pulls Updated 9 months ago
e06e68395e91 · 749kB
-
general.architecturellamallama
-
general.file_type77
-
general.nameTinyStories-656KTinyStories-656K
-
general.quantization_version22
-
llama.attention.head_count88
-
llama.attention.head_count_kv44
-
llama.attention.layer_norm_rms_epsilon1e-061e-06
-
llama.block_count22
-
llama.context_length512512
-
llama.embedding_length128128
-
llama.feed_forward_length384384
-
llama.rope.dimension_count1616
-
llama.rope.freq_base1000010000
-
llama.vocab_size20482048
-
tokenizer.ggml.add_bos_tokentruetrue
-
tokenizer.ggml.add_eos_tokenfalsefalse
-
tokenizer.ggml.bos_token_id11
-
tokenizer.ggml.eos_token_id22
-
tokenizer.ggml.modelllamallama
-
tokenizer.ggml.padding_token_id00
-
tokenizer.ggml.predefaultdefault
-
tokenizer.ggml.scores[-1000, -1000, -1000, -1000, -1000, ...][-1000, -1000, -1000, -1000, -1000, ...]
-
tokenizer.ggml.token_type[3, 3, 3, 1, 1, ...][3, 3, 3, 1, 1, ...]
-
tokenizer.ggml.tokens[<unk>, <|start_story|>, <|end_story|>, , !, ...][<unk>, <|start_story|>, <|end_story|>, , !, ...]
-
tokenizer.ggml.unknown_token_id00
-
token_embd.weightQ8_0[128, 2048]
-
blk.0.attn_k.weightQ8_0[128, 64]
-
blk.0.attn_norm.weightF32[128]
-
blk.0.attn_output.weightQ8_0[128, 128]
-
blk.0.attn_q.weightQ8_0[128, 128]
-
blk.0.attn_v.weightQ8_0[128, 64]
-
blk.0.ffn_down.weightQ8_0[384, 128]
-
blk.0.ffn_gate.weightQ8_0[128, 384]
-
blk.0.ffn_norm.weightF32[128]
-
blk.0.ffn_up.weightQ8_0[128, 384]
-
blk.1.attn_k.weightQ8_0[128, 64]
-
blk.1.attn_norm.weightF32[128]
-
blk.1.attn_output.weightQ8_0[128, 128]
-
blk.1.attn_q.weightQ8_0[128, 128]
-
blk.1.attn_v.weightQ8_0[128, 64]
-
blk.1.ffn_down.weightQ8_0[384, 128]
-
blk.1.ffn_gate.weightQ8_0[128, 384]
-
blk.1.ffn_norm.weightF32[128]
-
blk.1.ffn_up.weightQ8_0[128, 384]
-
output_norm.weightF32[128]
Metadata
Tensor
blk.0
blk.1