1 4 months ago

4ad866d91636 · 14MB
    Metadata
  • adapter.lora.alpha
    32
  • adapter.type
    lora
  • general.architecture
    llama
  • general.file_type
    1
  • general.type
    adapter
  • general.version
    v0.2
  • llama.attention.head_count
    32
  • llama.attention.head_count_kv
    8
  • Tensor
  • Name
    Type
    Shape
  • blk.16
  • blk.16.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.16.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.16.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.16.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.17
  • blk.17.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.17.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.17.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.17.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.18
  • blk.18.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.18.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.18.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.18.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.19
  • blk.19.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.19.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.19.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.19.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.20
  • blk.20.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.20.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.20.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.20.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.21
  • blk.21.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.21.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.21.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.21.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.22
  • blk.22.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.22.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.22.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.22.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.23
  • blk.23.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.23.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.23.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.23.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.24
  • blk.24.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.24.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.24.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.24.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.25
  • blk.25.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.25.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.25.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.25.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.26
  • blk.26.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.26.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.26.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.26.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.27
  • blk.27.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.27.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.27.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.27.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.28
  • blk.28.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.28.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.28.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.28.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.29
  • blk.29.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.29.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.29.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.29.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.30
  • blk.30.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.30.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.30.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.30.attn_v.weight.lora_b
    F16
    [32, 1024]
  • blk.31
  • blk.31.attn_q.weight.lora_a
    F16
    [4096, 32]
  • blk.31.attn_q.weight.lora_b
    F16
    [32, 4096]
  • blk.31.attn_v.weight.lora_a
    F16
    [4096, 32]
  • blk.31.attn_v.weight.lora_b
    F16
    [32, 1024]