# demo.py - Quick demo of the model | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
model_name = "raihan-js/medllm-10m" | |
print("Loading MedLLM...") | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
prompts = [ | |
"Symptoms of diabetes include", | |
"Treatment for high blood pressure", | |
"The patient presents with" | |
] | |
print("\nGenerating medical text:") | |
for prompt in prompts: | |
inputs = tokenizer(prompt, return_tensors="pt") | |
outputs = model.generate( | |
**inputs, | |
max_length=50, | |
do_sample=True, | |
temperature=0.7, | |
pad_token_id=tokenizer.eos_token_id | |
) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
print(f"\nPrompt: {prompt}") | |
print(f"Response: {response}") | |