Models
GitHub
Discord
Turbo
Sign in
Download
Models
Download
GitHub
Discord
Sign in
Hudson
/
pythia-instruct
:70m
70
Downloads
Updated
11 months ago
Cancel
70m
pythia-instruct:70m
...
/
model
f719f9ed21c3 · 143MB
Metadata
general.architecture
gptneox
gptneox
gptneox.attention.head_count
8
8
gptneox.attention.layer_norm_epsilon
1e-05
1e-05
gptneox.block_count
6
6
gptneox.context_length
2048
2048
gptneox.embedding_length
512
512
gptneox.feed_forward_length
2048
2048
gptneox.rope.dimension_count
16
16
gptneox.use_parallel_residual
true
true
tokenizer.ggml.bos_token_id
0
0
tokenizer.ggml.eos_token_id
0
0
tokenizer.ggml.merges
[Ġ Ġ, Ġ t, Ġ a, h e, i n, ...]
[Ġ Ġ, Ġ t, Ġ a, h e, i n, ...]
tokenizer.ggml.model
gpt2
gpt2
tokenizer.ggml.pre
olmo
olmo
tokenizer.ggml.token_type
[3, 3, 1, 1, 1, ...]
[3, 3, 1, 1, 1, ...]
tokenizer.ggml.tokens
[<|endoftext|>, <|padding|>, !, ", #, ...]
[<|endoftext|>, <|padding|>, !, ", #, ...]
tokenizer.ggml.unknown_token_id
0
0
Tensor
Name
Type
Shape
token_embd.weight
F16
F16
[512, 50279]
blk.0
blk.0.attn_norm.bias
F32
F32
[512]
blk.0.attn_norm.weight
F32
F32
[512]
blk.0.attn_output.bias
F32
F32
[512]
blk.0.attn_output.weight
F16
F16
[512, 512]
blk.0.attn_qkv.bias
F32
F32
[1536]
blk.0.attn_qkv.weight
F16
F16
[512, 1536]
blk.0.ffn_down.bias
F32
F32
[512]
blk.0.ffn_down.weight
F16
F16
[2048, 512]
blk.0.ffn_norm.bias
F32
F32
[512]
blk.0.ffn_norm.weight
F32
F32
[512]
blk.0.ffn_up.bias
F32
F32
[2048]
blk.0.ffn_up.weight
F16
F16
[512, 2048]
blk.1
blk.1.attn_norm.bias
F32
F32
[512]
blk.1.attn_norm.weight
F32
F32
[512]
blk.1.attn_output.bias
F32
F32
[512]
blk.1.attn_output.weight
F16
F16
[512, 512]
blk.1.attn_qkv.bias
F32
F32
[1536]
blk.1.attn_qkv.weight
F16
F16
[512, 1536]
blk.1.ffn_down.bias
F32
F32
[512]
blk.1.ffn_down.weight
F16
F16
[2048, 512]
blk.1.ffn_norm.bias
F32
F32
[512]
blk.1.ffn_norm.weight
F32
F32
[512]
blk.1.ffn_up.bias
F32
F32
[2048]
blk.1.ffn_up.weight
F16
F16
[512, 2048]
blk.2
blk.2.attn_norm.bias
F32
F32
[512]
blk.2.attn_norm.weight
F32
F32
[512]
blk.2.attn_output.bias
F32
F32
[512]
blk.2.attn_output.weight
F16
F16
[512, 512]
blk.2.attn_qkv.bias
F32
F32
[1536]
blk.2.attn_qkv.weight
F16
F16
[512, 1536]
blk.2.ffn_down.bias
F32
F32
[512]
blk.2.ffn_down.weight
F16
F16
[2048, 512]
blk.2.ffn_norm.bias
F32
F32
[512]
blk.2.ffn_norm.weight
F32
F32
[512]
blk.2.ffn_up.bias
F32
F32
[2048]
blk.2.ffn_up.weight
F16
F16
[512, 2048]
blk.3
blk.3.attn_norm.bias
F32
F32
[512]
blk.3.attn_norm.weight
F32
F32
[512]
blk.3.attn_output.bias
F32
F32
[512]
blk.3.attn_output.weight
F16
F16
[512, 512]
blk.3.attn_qkv.bias
F32
F32
[1536]
blk.3.attn_qkv.weight
F16
F16
[512, 1536]
blk.3.ffn_down.bias
F32
F32
[512]
blk.3.ffn_down.weight
F16
F16
[2048, 512]
blk.3.ffn_norm.bias
F32
F32
[512]
blk.3.ffn_norm.weight
F32
F32
[512]
blk.3.ffn_up.bias
F32
F32
[2048]
blk.3.ffn_up.weight
F16
F16
[512, 2048]
blk.4
blk.4.attn_norm.bias
F32
F32
[512]
blk.4.attn_norm.weight
F32
F32
[512]
blk.4.attn_output.bias
F32
F32
[512]
blk.4.attn_output.weight
F16
F16
[512, 512]
blk.4.attn_qkv.bias
F32
F32
[1536]
blk.4.attn_qkv.weight
F16
F16
[512, 1536]
blk.4.ffn_down.bias
F32
F32
[512]
blk.4.ffn_down.weight
F16
F16
[2048, 512]
blk.4.ffn_norm.bias
F32
F32
[512]
blk.4.ffn_norm.weight
F32
F32
[512]
blk.4.ffn_up.bias
F32
F32
[2048]
blk.4.ffn_up.weight
F16
F16
[512, 2048]
blk.5
blk.5.attn_norm.bias
F32
F32
[512]
blk.5.attn_norm.weight
F32
F32
[512]
blk.5.attn_output.bias
F32
F32
[512]
blk.5.attn_output.weight
F16
F16
[512, 512]
blk.5.attn_qkv.bias
F32
F32
[1536]
blk.5.attn_qkv.weight
F16
F16
[512, 1536]
blk.5.ffn_down.bias
F32
F32
[512]
blk.5.ffn_down.weight
F16
F16
[2048, 512]
blk.5.ffn_norm.bias
F32
F32
[512]
blk.5.ffn_norm.weight
F32
F32
[512]
blk.5.ffn_up.bias
F32
F32
[2048]
blk.5.ffn_up.weight
F16
F16
[512, 2048]
output.weight
F16
F16
[512, 50279]
output_norm.bias
F32
F32
[512]
output_norm.weight
F32
F32
[512]