Models
GitHub
Discord
Docs
Cloud
Sign in
Download
Models
Download
GitHub
Discord
Docs
Cloud
Sign in
M_INC
/
DACMini-IT
:latest
12
Downloads
Updated
1 week ago
DACMini is an ultra-lightweight language model (109M parameters) optimized for Italian dialogue. It delivers smooth, natural responses despite its small size, making it ideal for local, embedded, or low-power applications via Ollama.
DACMini is an ultra-lightweight language model (109M parameters) optimized for Italian dialogue. It delivers smooth, natural responses despite its small size, making it ideal for local, embedded, or low-power applications via Ollama.
Cancel
DACMini-IT:latest
...
/
model
f0c0840affff · 119MB
Metadata
general.architecture
gpt2
gpt2
general.file_type
Q8_0
Q8_0
gpt2.attention.head_count
12
12
gpt2.attention.layer_norm_epsilon
1e-05
1e-05
gpt2.block_count
12
12
gpt2.context_length
1024
1024
gpt2.embedding_length
768
768
gpt2.feed_forward_length
3072
3072
tokenizer.ggml.add_bos_token
false
false
tokenizer.ggml.bos_token_id
30000
30000
tokenizer.ggml.eos_token_id
0
0
tokenizer.ggml.merges
[Ġ d, o n, Ġ c, e r, Ġ s, ...]
[Ġ d, o n, Ġ c, e r, Ġ s, ...]
tokenizer.ggml.model
gpt2
gpt2
tokenizer.ggml.padding_token_id
30001
30001
tokenizer.ggml.pre
gpt2
gpt2
tokenizer.ggml.token_type
[3, 1, 1, 1, 1, ...]
[3, 1, 1, 1, 1, ...]
tokenizer.ggml.tokens
[<|endoftext|>, !, ", #, $, ...]
[<|endoftext|>, !, ", #, $, ...]
tokenizer.ggml.unknown_token_id
0
0
Tensor
Name
Type
Shape
token_embd.weight
Q8_0
Q8_0
[768, 30002]
blk.0
blk.0.attn_norm.bias
F32
F32
[768]
blk.0.attn_norm.weight
F32
F32
[768]
blk.0.attn_output.bias
F32
F32
[768]
blk.0.attn_output.weight
Q8_0
Q8_0
[768, 768]
blk.0.attn_qkv.bias
F32
F32
[2304]
blk.0.attn_qkv.weight
Q8_0
Q8_0
[768, 2304]
blk.0.ffn_down.bias
F32
F32
[768]
blk.0.ffn_down.weight
Q8_0
Q8_0
[3072, 768]
blk.0.ffn_norm.bias
F32
F32
[768]
blk.0.ffn_norm.weight
F32
F32
[768]
blk.0.ffn_up.bias
F32
F32
[3072]
blk.0.ffn_up.weight
Q8_0
Q8_0
[768, 3072]
blk.1
blk.1.attn_norm.bias
F32
F32
[768]
blk.1.attn_norm.weight
F32
F32
[768]
blk.1.attn_output.bias
F32
F32
[768]
blk.1.attn_output.weight
Q8_0
Q8_0
[768, 768]
blk.1.attn_qkv.bias
F32
F32
[2304]
blk.1.attn_qkv.weight
Q8_0
Q8_0
[768, 2304]
blk.1.ffn_down.bias
F32
F32
[768]
blk.1.ffn_down.weight
Q8_0
Q8_0
[3072, 768]
blk.1.ffn_norm.bias
F32
F32
[768]
blk.1.ffn_norm.weight
F32
F32
[768]
blk.1.ffn_up.bias
F32
F32
[3072]
blk.1.ffn_up.weight
Q8_0
Q8_0
[768, 3072]
blk.2
blk.2.attn_norm.bias
F32
F32
[768]
blk.2.attn_norm.weight
F32
F32
[768]
blk.2.attn_output.bias
F32
F32
[768]
blk.2.attn_output.weight
Q8_0
Q8_0
[768, 768]
blk.2.attn_qkv.bias
F32
F32
[2304]
blk.2.attn_qkv.weight
Q8_0
Q8_0
[768, 2304]
blk.2.ffn_down.bias
F32
F32
[768]
blk.2.ffn_down.weight
Q8_0
Q8_0
[3072, 768]
blk.2.ffn_norm.bias
F32
F32
[768]
blk.2.ffn_norm.weight
F32
F32
[768]
blk.2.ffn_up.bias
F32
F32
[3072]
blk.2.ffn_up.weight
Q8_0
Q8_0
[768, 3072]
blk.3
blk.3.attn_norm.bias
F32
F32
[768]
blk.3.attn_norm.weight
F32
F32
[768]
blk.3.attn_output.bias
F32
F32
[768]
blk.3.attn_output.weight
Q8_0
Q8_0
[768, 768]
blk.3.attn_qkv.bias
F32
F32
[2304]
blk.3.attn_qkv.weight
Q8_0
Q8_0
[768, 2304]
blk.3.ffn_down.bias
F32
F32
[768]
blk.3.ffn_down.weight
Q8_0
Q8_0
[3072, 768]
blk.3.ffn_norm.bias
F32
F32
[768]
blk.3.ffn_norm.weight
F32
F32
[768]
blk.3.ffn_up.bias
F32
F32
[3072]
blk.3.ffn_up.weight
Q8_0
Q8_0
[768, 3072]
blk.4
blk.4.attn_norm.bias
F32
F32
[768]
blk.4.attn_norm.weight
F32
F32
[768]
blk.4.attn_output.bias
F32
F32
[768]
blk.4.attn_output.weight
Q8_0
Q8_0
[768, 768]
blk.4.attn_qkv.bias
F32
F32
[2304]
blk.4.attn_qkv.weight
Q8_0
Q8_0
[768, 2304]
blk.4.ffn_down.bias
F32
F32
[768]
blk.4.ffn_down.weight
Q8_0
Q8_0
[3072, 768]
blk.4.ffn_norm.bias
F32
F32
[768]
blk.4.ffn_norm.weight
F32
F32
[768]
blk.4.ffn_up.bias
F32
F32
[3072]
blk.4.ffn_up.weight
Q8_0
Q8_0
[768, 3072]
blk.5
blk.5.attn_norm.bias
F32
F32
[768]
blk.5.attn_norm.weight
F32
F32
[768]
blk.5.attn_output.bias
F32
F32
[768]
blk.5.attn_output.weight
Q8_0
Q8_0
[768, 768]
blk.5.attn_qkv.bias
F32
F32
[2304]
blk.5.attn_qkv.weight
Q8_0
Q8_0
[768, 2304]
blk.5.ffn_down.bias
F32
F32
[768]
blk.5.ffn_down.weight
Q8_0
Q8_0
[3072, 768]
blk.5.ffn_norm.bias
F32
F32
[768]
blk.5.ffn_norm.weight
F32
F32
[768]
blk.5.ffn_up.bias
F32
F32
[3072]
blk.5.ffn_up.weight
Q8_0
Q8_0
[768, 3072]
blk.6
blk.6.attn_norm.bias
F32
F32
[768]
blk.6.attn_norm.weight
F32
F32
[768]
blk.6.attn_output.bias
F32
F32
[768]
blk.6.attn_output.weight
Q8_0
Q8_0
[768, 768]
blk.6.attn_qkv.bias
F32
F32
[2304]
blk.6.attn_qkv.weight
Q8_0
Q8_0
[768, 2304]
blk.6.ffn_down.bias
F32
F32
[768]
blk.6.ffn_down.weight
Q8_0
Q8_0
[3072, 768]
blk.6.ffn_norm.bias
F32
F32
[768]
blk.6.ffn_norm.weight
F32
F32
[768]
blk.6.ffn_up.bias
F32
F32
[3072]
blk.6.ffn_up.weight
Q8_0
Q8_0
[768, 3072]
blk.7
blk.7.attn_norm.bias
F32
F32
[768]
blk.7.attn_norm.weight
F32
F32
[768]
blk.7.attn_output.bias
F32
F32
[768]
blk.7.attn_output.weight
Q8_0
Q8_0
[768, 768]
blk.7.attn_qkv.bias
F32
F32
[2304]
blk.7.attn_qkv.weight
Q8_0
Q8_0
[768, 2304]
blk.7.ffn_down.bias
F32
F32
[768]
blk.7.ffn_down.weight
Q8_0
Q8_0
[3072, 768]
blk.7.ffn_norm.bias
F32
F32
[768]
blk.7.ffn_norm.weight
F32
F32
[768]
blk.7.ffn_up.bias
F32
F32
[3072]
blk.7.ffn_up.weight
Q8_0
Q8_0
[768, 3072]
blk.8
blk.8.attn_norm.bias
F32
F32
[768]
blk.8.attn_norm.weight
F32
F32
[768]
blk.8.attn_output.bias
F32
F32
[768]
blk.8.attn_output.weight
Q8_0
Q8_0
[768, 768]
blk.8.attn_qkv.bias
F32
F32
[2304]
blk.8.attn_qkv.weight
Q8_0
Q8_0
[768, 2304]
blk.8.ffn_down.bias
F32
F32
[768]
blk.8.ffn_down.weight
Q8_0
Q8_0
[3072, 768]
blk.8.ffn_norm.bias
F32
F32
[768]
blk.8.ffn_norm.weight
F32
F32
[768]
blk.8.ffn_up.bias
F32
F32
[3072]
blk.8.ffn_up.weight
Q8_0
Q8_0
[768, 3072]
blk.9
blk.9.attn_norm.bias
F32
F32
[768]
blk.9.attn_norm.weight
F32
F32
[768]
blk.9.attn_output.bias
F32
F32
[768]
blk.9.attn_output.weight
Q8_0
Q8_0
[768, 768]
blk.9.attn_qkv.bias
F32
F32
[2304]
blk.9.attn_qkv.weight
Q8_0
Q8_0
[768, 2304]
blk.9.ffn_down.bias
F32
F32
[768]
blk.9.ffn_down.weight
Q8_0
Q8_0
[3072, 768]
blk.9.ffn_norm.bias
F32
F32
[768]
blk.9.ffn_norm.weight
F32
F32
[768]
blk.9.ffn_up.bias
F32
F32
[3072]
blk.9.ffn_up.weight
Q8_0
Q8_0
[768, 3072]
blk.10
blk.10.attn_norm.bias
F32
F32
[768]
blk.10.attn_norm.weight
F32
F32
[768]
blk.10.attn_output.bias
F32
F32
[768]
blk.10.attn_output.weight
Q8_0
Q8_0
[768, 768]
blk.10.attn_qkv.bias
F32
F32
[2304]
blk.10.attn_qkv.weight
Q8_0
Q8_0
[768, 2304]
blk.10.ffn_down.bias
F32
F32
[768]
blk.10.ffn_down.weight
Q8_0
Q8_0
[3072, 768]
blk.10.ffn_norm.bias
F32
F32
[768]
blk.10.ffn_norm.weight
F32
F32
[768]
blk.10.ffn_up.bias
F32
F32
[3072]
blk.10.ffn_up.weight
Q8_0
Q8_0
[768, 3072]
blk.11
blk.11.attn_norm.bias
F32
F32
[768]
blk.11.attn_norm.weight
F32
F32
[768]
blk.11.attn_output.bias
F32
F32
[768]
blk.11.attn_output.weight
Q8_0
Q8_0
[768, 768]
blk.11.attn_qkv.bias
F32
F32
[2304]
blk.11.attn_qkv.weight
Q8_0
Q8_0
[768, 2304]
blk.11.ffn_down.bias
F32
F32
[768]
blk.11.ffn_down.weight
Q8_0
Q8_0
[3072, 768]
blk.11.ffn_norm.bias
F32
F32
[768]
blk.11.ffn_norm.weight
F32
F32
[768]
blk.11.ffn_up.bias
F32
F32
[3072]
blk.11.ffn_up.weight
Q8_0
Q8_0
[768, 3072]
output_norm.bias
F32
F32
[768]
position_embd.weight
F32
F32
[768, 1024]
output_norm.weight
F32
F32
[768]