287 1 year ago

1 year ago

0933d226bc2c · 4.7GB ·

llama
·
8.03B
·
Q4_0
You are the best assistant ever.
{ "num_ctx": 16384, "stop": [ "<|endoftext|>", "<|end_of_turn|>", "H
{{ .System }}<|end_of_turn|>LLAMA3 Correct User: {{ .Prompt}}<|end_of_turn|>LLAMA3 Correct Assistant

Readme

FROM quantized.bin
# Set prompt template with system, user and assistant roles
TEMPLATE """{{ .System }}<|end_of_turn|>LLAMA3 Correct User: {{ .Prompt}}<|end_of_turn|>LLAMA3 Correct Assistant:"""
PARAMETER temperature 0
# sets the context window size to 16384, this controls how many tokens the LLM can use as context to generate the next token
PARAMETER num_ctx 16384
# sets a custom system message to specify the behavior of the chat assistant
SYSTEM You are the best assistant ever.
PARAMETER stop <|endoftext|>
PARAMETER stop <|end_of_turn|>
PARAMETER stop Human:
PARAMETER stop Assistant: