from transformers import AutoModelForCausalLM, AutoTokenizer import torch # Load model and tokenizer model_name = "llama3.2" # Replace with your actual model path or name model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer
No models have been pushed.
Readme
No readme