Models
GitHub
Discord
Turbo
Sign in
Download
Models
Download
GitHub
Discord
Sign in
granite-embedding
:latest
92.4K
Downloads
Updated
8 months ago
The IBM Granite Embedding 30M and 278M models models are text-only dense biencoder embedding models, with 30M available in English only and 278M serving multilingual use cases.
The IBM Granite Embedding 30M and 278M models models are text-only dense biencoder embedding models, with 30M available in English only and 278M serving multilingual use cases.
Cancel
embedding
30m
278m
granite-embedding:latest
...
/
model
27d24c87a53d · 63MB
Metadata
general.architecture
bert
bert
general.file_type
F16
F16
bert.attention.causal
false
false
bert.attention.head_count
12
12
bert.attention.layer_norm_epsilon
1e-12
1e-12
bert.block_count
6
6
bert.context_length
512
512
bert.embedding_length
384
384
bert.feed_forward_length
1536
1536
bert.pooling_type
CLS
CLS
tokenizer.ggml.add_bos_token
true
true
tokenizer.ggml.add_eos_token
true
true
tokenizer.ggml.bos_token_id
0
0
tokenizer.ggml.cls_token_id
0
0
tokenizer.ggml.eos_token_id
2
2
tokenizer.ggml.mask_token_id
50264
50264
tokenizer.ggml.merges
[Ġ t, Ġ a, h e, i n, r e, ...]
[Ġ t, Ġ a, h e, i n, r e, ...]
tokenizer.ggml.model
gpt2
gpt2
tokenizer.ggml.padding_token_id
1
1
tokenizer.ggml.pre
gpt-2
gpt-2
tokenizer.ggml.seperator_token_id
2
2
tokenizer.ggml.token_type
[3, 3, 3, 3, 1, ...]
[3, 3, 3, 3, 1, ...]
tokenizer.ggml.token_type_count
2
2
tokenizer.ggml.tokens
[<s>, <pad>, </s>, <unk>, ., ...]
[<s>, <pad>, </s>, <unk>, ., ...]
tokenizer.ggml.unknown_token_id
3
3
Tensor
Name
Type
Shape
token_embd.weight
F16
F16
[384, 50265]
blk.0
blk.0.attn_k.bias
F32
F32
[384]
blk.0.attn_k.weight
F16
F16
[384, 384]
blk.0.attn_output.bias
F32
F32
[384]
blk.0.attn_output.weight
F16
F16
[384, 384]
blk.0.attn_output_norm.bias
F32
F32
[384]
blk.0.attn_output_norm.weight
F32
F32
[384]
blk.0.attn_q.bias
F32
F32
[384]
blk.0.attn_q.weight
F16
F16
[384, 384]
blk.0.attn_v.bias
F32
F32
[384]
blk.0.attn_v.weight
F16
F16
[384, 384]
blk.0.ffn_down.bias
F32
F32
[384]
blk.0.ffn_down.weight
F16
F16
[1536, 384]
blk.0.ffn_up.bias
F32
F32
[1536]
blk.0.ffn_up.weight
F16
F16
[384, 1536]
blk.0.layer_output_norm.bias
F32
F32
[384]
blk.0.layer_output_norm.weight
F32
F32
[384]
blk.1
blk.1.attn_k.bias
F32
F32
[384]
blk.1.attn_k.weight
F16
F16
[384, 384]
blk.1.attn_output.bias
F32
F32
[384]
blk.1.attn_output.weight
F16
F16
[384, 384]
blk.1.attn_output_norm.bias
F32
F32
[384]
blk.1.attn_output_norm.weight
F32
F32
[384]
blk.1.attn_q.bias
F32
F32
[384]
blk.1.attn_q.weight
F16
F16
[384, 384]
blk.1.attn_v.bias
F32
F32
[384]
blk.1.attn_v.weight
F16
F16
[384, 384]
blk.1.ffn_down.bias
F32
F32
[384]
blk.1.ffn_down.weight
F16
F16
[1536, 384]
blk.1.ffn_up.bias
F32
F32
[1536]
blk.1.ffn_up.weight
F16
F16
[384, 1536]
blk.1.layer_output_norm.bias
F32
F32
[384]
blk.1.layer_output_norm.weight
F32
F32
[384]
blk.2
blk.2.attn_k.bias
F32
F32
[384]
blk.2.attn_k.weight
F16
F16
[384, 384]
blk.2.attn_output.bias
F32
F32
[384]
blk.2.attn_output.weight
F16
F16
[384, 384]
blk.2.attn_output_norm.bias
F32
F32
[384]
blk.2.attn_output_norm.weight
F32
F32
[384]
blk.2.attn_q.bias
F32
F32
[384]
blk.2.attn_q.weight
F16
F16
[384, 384]
blk.2.attn_v.bias
F32
F32
[384]
blk.2.attn_v.weight
F16
F16
[384, 384]
blk.2.ffn_down.bias
F32
F32
[384]
blk.2.ffn_down.weight
F16
F16
[1536, 384]
blk.2.ffn_up.bias
F32
F32
[1536]
blk.2.ffn_up.weight
F16
F16
[384, 1536]
blk.2.layer_output_norm.bias
F32
F32
[384]
blk.2.layer_output_norm.weight
F32
F32
[384]
blk.3
blk.3.attn_k.bias
F32
F32
[384]
blk.3.attn_k.weight
F16
F16
[384, 384]
blk.3.attn_output.bias
F32
F32
[384]
blk.3.attn_output.weight
F16
F16
[384, 384]
blk.3.attn_output_norm.bias
F32
F32
[384]
blk.3.attn_output_norm.weight
F32
F32
[384]
blk.3.attn_q.bias
F32
F32
[384]
blk.3.attn_q.weight
F16
F16
[384, 384]
blk.3.attn_v.bias
F32
F32
[384]
blk.3.attn_v.weight
F16
F16
[384, 384]
blk.3.ffn_down.bias
F32
F32
[384]
blk.3.ffn_down.weight
F16
F16
[1536, 384]
blk.3.ffn_up.bias
F32
F32
[1536]
blk.3.ffn_up.weight
F16
F16
[384, 1536]
blk.3.layer_output_norm.bias
F32
F32
[384]
blk.3.layer_output_norm.weight
F32
F32
[384]
blk.4
blk.4.attn_k.bias
F32
F32
[384]
blk.4.attn_k.weight
F16
F16
[384, 384]
blk.4.attn_output.bias
F32
F32
[384]
blk.4.attn_output.weight
F16
F16
[384, 384]
blk.4.attn_output_norm.bias
F32
F32
[384]
blk.4.attn_output_norm.weight
F32
F32
[384]
blk.4.attn_q.bias
F32
F32
[384]
blk.4.attn_q.weight
F16
F16
[384, 384]
blk.4.attn_v.bias
F32
F32
[384]
blk.4.attn_v.weight
F16
F16
[384, 384]
blk.4.ffn_down.bias
F32
F32
[384]
blk.4.ffn_down.weight
F16
F16
[1536, 384]
blk.4.ffn_up.bias
F32
F32
[1536]
blk.4.ffn_up.weight
F16
F16
[384, 1536]
blk.4.layer_output_norm.bias
F32
F32
[384]
blk.4.layer_output_norm.weight
F32
F32
[384]
blk.5
blk.5.attn_k.bias
F32
F32
[384]
blk.5.attn_k.weight
F16
F16
[384, 384]
blk.5.attn_output.bias
F32
F32
[384]
blk.5.attn_output.weight
F16
F16
[384, 384]
blk.5.attn_output_norm.bias
F32
F32
[384]
blk.5.attn_output_norm.weight
F32
F32
[384]
blk.5.attn_q.bias
F32
F32
[384]
blk.5.attn_q.weight
F16
F16
[384, 384]
blk.5.attn_v.bias
F32
F32
[384]
blk.5.attn_v.weight
F16
F16
[384, 384]
blk.5.ffn_down.bias
F32
F32
[384]
blk.5.ffn_down.weight
F16
F16
[1536, 384]
blk.5.ffn_up.bias
F32
F32
[1536]
blk.5.ffn_up.weight
F16
F16
[384, 1536]
blk.5.layer_output_norm.bias
F32
F32
[384]
blk.5.layer_output_norm.weight
F32
F32
[384]
position_embd.weight
F32
F32
[384, 512]
token_embd_norm.bias
F32
F32
[384]
token_embd_norm.weight
F32
F32
[384]
token_types.weight
F32
F32
[384, 2]