Models
GitHub
Discord
Turbo
Sign in
Download
Models
Download
GitHub
Discord
Sign in
jina
/
jina-embeddings-v2-small-en
:latest
2,603
Downloads
Updated
1 year ago
Text embedding model (small) for input of size up to 8192 tokens
Text embedding model (small) for input of size up to 8192 tokens
Cancel
embedding
jina-embeddings-v2-small-en:latest
...
/
model
465b654b2a5a · 66MB
Metadata
general.architecture
jina-bert-v2
jina-bert-v2
general.file_type
F16
F16
jina-bert-v2.attention.causal
false
false
jina-bert-v2.attention.head_count
8
8
jina-bert-v2.attention.layer_norm_epsilon
1e-12
1e-12
jina-bert-v2.block_count
4
4
jina-bert-v2.context_length
8192
8192
jina-bert-v2.embedding_length
512
512
jina-bert-v2.feed_forward_length
2048
2048
jina-bert-v2.pooling_type
Mean
Mean
tokenizer.ggml.add_bos_token
true
true
tokenizer.ggml.add_eos_token
true
true
tokenizer.ggml.cls_token_id
101
101
tokenizer.ggml.mask_token_id
103
103
tokenizer.ggml.model
bert
bert
tokenizer.ggml.padding_token_id
0
0
tokenizer.ggml.pre
jina-v2-en
jina-v2-en
tokenizer.ggml.seperator_token_id
102
102
tokenizer.ggml.token_type
[3, 1, 1, 1, 1, ...]
[3, 1, 1, 1, 1, ...]
tokenizer.ggml.token_type_count
2
2
tokenizer.ggml.tokens
[[PAD], [unused0], [unused1], [unused2], [unused3], ...]
[[PAD], [unused0], [unused1], [unused2], [unused3], ...]
tokenizer.ggml.unknown_token_id
100
100
Tensor
Name
Type
Shape
token_embd.weight
F16
F16
[512, 30528]
blk.0
blk.0.attn_k.bias
F32
F32
[512]
blk.0.attn_k.weight
F16
F16
[512, 512]
blk.0.attn_output.bias
F32
F32
[512]
blk.0.attn_output.weight
F16
F16
[512, 512]
blk.0.attn_output_norm.bias
F32
F32
[512]
blk.0.attn_output_norm.weight
F32
F32
[512]
blk.0.attn_q.bias
F32
F32
[512]
blk.0.attn_q.weight
F16
F16
[512, 512]
blk.0.attn_v.bias
F32
F32
[512]
blk.0.attn_v.weight
F16
F16
[512, 512]
blk.0.ffn_down.bias
F32
F32
[512]
blk.0.ffn_down.weight
F16
F16
[2048, 512]
blk.0.ffn_gate.weight
F16
F16
[512, 2048]
blk.0.ffn_up.weight
F16
F16
[512, 2048]
blk.0.layer_output_norm.bias
F32
F32
[512]
blk.0.layer_output_norm.weight
F32
F32
[512]
blk.1
blk.1.attn_k.bias
F32
F32
[512]
blk.1.attn_k.weight
F16
F16
[512, 512]
blk.1.attn_output.bias
F32
F32
[512]
blk.1.attn_output.weight
F16
F16
[512, 512]
blk.1.attn_output_norm.bias
F32
F32
[512]
blk.1.attn_output_norm.weight
F32
F32
[512]
blk.1.attn_q.bias
F32
F32
[512]
blk.1.attn_q.weight
F16
F16
[512, 512]
blk.1.attn_v.bias
F32
F32
[512]
blk.1.attn_v.weight
F16
F16
[512, 512]
blk.1.ffn_down.bias
F32
F32
[512]
blk.1.ffn_down.weight
F16
F16
[2048, 512]
blk.1.ffn_gate.weight
F16
F16
[512, 2048]
blk.1.ffn_up.weight
F16
F16
[512, 2048]
blk.1.layer_output_norm.bias
F32
F32
[512]
blk.1.layer_output_norm.weight
F32
F32
[512]
blk.2
blk.2.attn_k.bias
F32
F32
[512]
blk.2.attn_k.weight
F16
F16
[512, 512]
blk.2.attn_output.bias
F32
F32
[512]
blk.2.attn_output.weight
F16
F16
[512, 512]
blk.2.attn_output_norm.bias
F32
F32
[512]
blk.2.attn_output_norm.weight
F32
F32
[512]
blk.2.attn_q.bias
F32
F32
[512]
blk.2.attn_q.weight
F16
F16
[512, 512]
blk.2.attn_v.bias
F32
F32
[512]
blk.2.attn_v.weight
F16
F16
[512, 512]
blk.2.ffn_down.bias
F32
F32
[512]
blk.2.ffn_down.weight
F16
F16
[2048, 512]
blk.2.ffn_gate.weight
F16
F16
[512, 2048]
blk.2.ffn_up.weight
F16
F16
[512, 2048]
blk.2.layer_output_norm.bias
F32
F32
[512]
blk.2.layer_output_norm.weight
F32
F32
[512]
blk.3
blk.3.attn_k.bias
F32
F32
[512]
blk.3.attn_k.weight
F16
F16
[512, 512]
blk.3.attn_output.bias
F32
F32
[512]
blk.3.attn_output.weight
F16
F16
[512, 512]
blk.3.attn_output_norm.bias
F32
F32
[512]
blk.3.attn_output_norm.weight
F32
F32
[512]
blk.3.attn_q.bias
F32
F32
[512]
blk.3.attn_q.weight
F16
F16
[512, 512]
blk.3.attn_v.bias
F32
F32
[512]
blk.3.attn_v.weight
F16
F16
[512, 512]
blk.3.ffn_down.bias
F32
F32
[512]
blk.3.ffn_down.weight
F16
F16
[2048, 512]
blk.3.ffn_gate.weight
F16
F16
[512, 2048]
blk.3.ffn_up.weight
F16
F16
[512, 2048]
blk.3.layer_output_norm.bias
F32
F32
[512]
blk.3.layer_output_norm.weight
F32
F32
[512]
token_embd_norm.bias
F32
F32
[512]
token_embd_norm.weight
F32
F32
[512]
token_types.weight
F32
F32
[512, 2]