|
import streamlit as st
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
import torch
|
|
|
|
|
|
st.set_page_config(
|
|
page_title="Natural Reasoning Bot",
|
|
page_icon="🤖",
|
|
layout="centered"
|
|
)
|
|
|
|
st.title("🤖 Natural Reasoning Bot")
|
|
st.markdown("Ask science questions and get answers from your fine-tuned model.")
|
|
|
|
|
|
st.sidebar.header("⚙️ Generation Settings")
|
|
temperature = st.sidebar.slider("Temperature", 0.0, 1.5, 1.0, 0.1)
|
|
top_k = st.sidebar.slider("Top-k", 0, 100, 50, 5)
|
|
top_p = st.sidebar.slider("Top-p", 0.0, 1.0, 0.95, 0.05)
|
|
|
|
|
|
@st.cache_resource(show_spinner=False)
|
|
def load_model():
|
|
model = AutoModelForCausalLM.from_pretrained("./my_bot_model")
|
|
tokenizer = AutoTokenizer.from_pretrained("./my_bot_model")
|
|
return model, tokenizer
|
|
|
|
model, tokenizer = load_model()
|
|
|
|
|
|
question = st.text_area("🧠 Enter your science question:", height=100)
|
|
|
|
generate_btn = st.button("🔍 Generate Answer")
|
|
|
|
|
|
if generate_btn and question:
|
|
input_text = f"### Question: {question}\n### Answer:"
|
|
inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
|
|
|
|
model.eval()
|
|
with torch.no_grad():
|
|
output = model.generate(
|
|
**inputs,
|
|
max_length=256,
|
|
do_sample=True,
|
|
top_p=top_p,
|
|
top_k=top_k,
|
|
temperature=temperature,
|
|
pad_token_id=tokenizer.eos_token_id
|
|
)
|
|
|
|
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
|
answer = response.replace(input_text, "").strip()
|
|
|
|
st.markdown("---")
|
|
st.subheader("📤 Model Answer")
|
|
st.success(answer)
|
|
|
|
elif generate_btn:
|
|
st.warning("Please enter a question to get an answer.")
|
|
|