Models
GitHub
Discord
Turbo
Sign in
Download
Models
Download
GitHub
Discord
Sign in
qllama
/
bge-small-zh-v1.5
:q4_k_m
655
Downloads
Updated
6 months ago
quantize https://huggingface.co/BAAI/bge-small-zh-v1.5 to f16 / q8_0 (latest) / q4_k_m
quantize https://huggingface.co/BAAI/bge-small-zh-v1.5 to f16 / q8_0 (latest) / q4_k_m
Cancel
embedding
bge-small-zh-v1.5:q4_k_m
...
/
model
f1b50deed303 · 18MB
Metadata
general.architecture
bert
bert
general.file_type
Q4_K_M
Q4_K_M
bert.attention.causal
false
false
bert.attention.head_count
8
8
bert.attention.layer_norm_epsilon
1e-12
1e-12
bert.block_count
4
4
bert.context_length
512
512
bert.embedding_length
512
512
bert.feed_forward_length
2048
2048
bert.pooling_type
CLS
CLS
tokenizer.ggml.cls_token_id
101
101
tokenizer.ggml.mask_token_id
103
103
tokenizer.ggml.model
bert
bert
tokenizer.ggml.padding_token_id
0
0
tokenizer.ggml.seperator_token_id
102
102
tokenizer.ggml.token_type
[3, 1, 1, 1, 1, ...]
[3, 1, 1, 1, 1, ...]
tokenizer.ggml.token_type_count
2
2
tokenizer.ggml.tokens
[[PAD], [unused1], [unused2], [unused3], [unused4], ...]
[[PAD], [unused1], [unused2], [unused3], [unused4], ...]
tokenizer.ggml.unknown_token_id
100
100
Tensor
Name
Type
Shape
token_embd.weight
Q6_K
Q6_K
[512, 21128]
blk.0
blk.0.attn_k.bias
F32
F32
[512]
blk.0.attn_k.weight
Q4_K
Q4_K
[512, 512]
blk.0.attn_output.bias
F32
F32
[512]
blk.0.attn_output.weight
Q4_K
Q4_K
[512, 512]
blk.0.attn_output_norm.bias
F32
F32
[512]
blk.0.attn_output_norm.weight
F32
F32
[512]
blk.0.attn_q.bias
F32
F32
[512]
blk.0.attn_q.weight
Q4_K
Q4_K
[512, 512]
blk.0.attn_v.bias
F32
F32
[512]
blk.0.attn_v.weight
Q4_K
Q4_K
[512, 512]
blk.0.ffn_down.bias
F32
F32
[512]
blk.0.ffn_down.weight
Q4_K
Q4_K
[2048, 512]
blk.0.ffn_up.bias
F32
F32
[2048]
blk.0.ffn_up.weight
Q4_K
Q4_K
[512, 2048]
blk.0.layer_output_norm.bias
F32
F32
[512]
blk.0.layer_output_norm.weight
F32
F32
[512]
blk.1
blk.1.attn_k.bias
F32
F32
[512]
blk.1.attn_k.weight
Q4_K
Q4_K
[512, 512]
blk.1.attn_output.bias
F32
F32
[512]
blk.1.attn_output.weight
Q4_K
Q4_K
[512, 512]
blk.1.attn_output_norm.bias
F32
F32
[512]
blk.1.attn_output_norm.weight
F32
F32
[512]
blk.1.attn_q.bias
F32
F32
[512]
blk.1.attn_q.weight
Q4_K
Q4_K
[512, 512]
blk.1.attn_v.bias
F32
F32
[512]
blk.1.attn_v.weight
Q4_K
Q4_K
[512, 512]
blk.1.ffn_down.bias
F32
F32
[512]
blk.1.ffn_down.weight
Q4_K
Q4_K
[2048, 512]
blk.1.ffn_up.bias
F32
F32
[2048]
blk.1.ffn_up.weight
Q4_K
Q4_K
[512, 2048]
blk.1.layer_output_norm.bias
F32
F32
[512]
blk.1.layer_output_norm.weight
F32
F32
[512]
blk.2
blk.2.attn_k.bias
F32
F32
[512]
blk.2.attn_k.weight
Q4_K
Q4_K
[512, 512]
blk.2.attn_output.bias
F32
F32
[512]
blk.2.attn_output.weight
Q4_K
Q4_K
[512, 512]
blk.2.attn_output_norm.bias
F32
F32
[512]
blk.2.attn_output_norm.weight
F32
F32
[512]
blk.2.attn_q.bias
F32
F32
[512]
blk.2.attn_q.weight
Q4_K
Q4_K
[512, 512]
blk.2.attn_v.bias
F32
F32
[512]
blk.2.attn_v.weight
Q6_K
Q6_K
[512, 512]
blk.2.ffn_down.bias
F32
F32
[512]
blk.2.ffn_down.weight
Q6_K
Q6_K
[2048, 512]
blk.2.ffn_up.bias
F32
F32
[2048]
blk.2.ffn_up.weight
Q4_K
Q4_K
[512, 2048]
blk.2.layer_output_norm.bias
F32
F32
[512]
blk.2.layer_output_norm.weight
F32
F32
[512]
blk.3
blk.3.attn_k.bias
F32
F32
[512]
blk.3.attn_k.weight
Q4_K
Q4_K
[512, 512]
blk.3.attn_output.bias
F32
F32
[512]
blk.3.attn_output.weight
Q4_K
Q4_K
[512, 512]
blk.3.attn_output_norm.bias
F32
F32
[512]
blk.3.attn_output_norm.weight
F32
F32
[512]
blk.3.attn_q.bias
F32
F32
[512]
blk.3.attn_q.weight
Q4_K
Q4_K
[512, 512]
blk.3.attn_v.bias
F32
F32
[512]
blk.3.attn_v.weight
Q6_K
Q6_K
[512, 512]
blk.3.ffn_down.bias
F32
F32
[512]
blk.3.ffn_down.weight
Q6_K
Q6_K
[2048, 512]
blk.3.ffn_up.bias
F32
F32
[2048]
blk.3.ffn_up.weight
Q4_K
Q4_K
[512, 2048]
blk.3.layer_output_norm.bias
F32
F32
[512]
blk.3.layer_output_norm.weight
F32
F32
[512]
position_embd.weight
F16
F16
[512, 512]
token_embd_norm.bias
F32
F32
[512]
token_embd_norm.weight
F32
F32
[512]
token_types.weight
F32
F32
[512, 2]