muhammad-mujtaba-ai commited on
Commit
e3c4ffa
·
verified ·
1 Parent(s): c407764

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -113,10 +113,10 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
113
  modelpath = "Chain-GPT/Solidity-LLM"
114
 
115
  tokenizer = AutoTokenizer.from_pretrained(modelpath)
116
- model = AutoModelForCausalLM.from_pretrained(modelpath)
117
 
118
  prompt = "Write a Solidity function to transfer tokens."
119
- inputs = tokenizer(prompt, return_tensors="pt")
120
 
121
  outputs = model.generate(**inputs, max_new_tokens=1400, pad_token_id=tokenizer.eos_token_id)
122
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
@@ -135,7 +135,7 @@ from threading import Thread
135
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
136
 
137
  model = AutoModelForCausalLM.from_pretrained(
138
- "ChainGPT/SolidityLLM",
139
  torch_dtype=torch.bfloat16,
140
  device_map="cuda"
141
  )
 
113
  modelpath = "Chain-GPT/Solidity-LLM"
114
 
115
  tokenizer = AutoTokenizer.from_pretrained(modelpath)
116
+ model = AutoModelForCausalLM.from_pretrained(modelpath).cuda()
117
 
118
  prompt = "Write a Solidity function to transfer tokens."
119
+ inputs = tokenizer(prompt, return_tensors="pt").cuda()
120
 
121
  outputs = model.generate(**inputs, max_new_tokens=1400, pad_token_id=tokenizer.eos_token_id)
122
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
135
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
136
 
137
  model = AutoModelForCausalLM.from_pretrained(
138
+ "Chain-GPT/Solidity-LLM",
139
  torch_dtype=torch.bfloat16,
140
  device_map="cuda"
141
  )