Vijayendra commited on
Commit
61e164b
·
verified ·
1 Parent(s): 24a0b7d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -32,7 +32,7 @@ model = AutoModelForCausalLM.from_pretrained(
32
  ).to("cuda" if torch.cuda.is_available() else "cpu") # Send model to GPU if available
33
 
34
  # 🛠 **Define Inference Function**
35
- def generate_response(model, tokenizer, prompt, max_new_tokens=2048, temperature=0.7):
36
 
37
  # Tokenize input
38
  inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(model.device)
 
32
  ).to("cuda" if torch.cuda.is_available() else "cpu") # Send model to GPU if available
33
 
34
  # 🛠 **Define Inference Function**
35
+ def generate_response(model, tokenizer, prompt, max_new_tokens=3200, temperature=0.7):
36
 
37
  # Tokenize input
38
  inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(model.device)