File size: 1,885 Bytes
a9dd270
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# --- Fix Streamlit config issue ---
st.set_page_config(
    page_title="Natural Reasoning Bot",
    page_icon="🤖",
    layout="centered"
)

st.title("🤖 Natural Reasoning Bot")
st.markdown("Ask science questions and get answers from your fine-tuned model.")

# --- Sidebar for parameters ---
st.sidebar.header("⚙️ Generation Settings")
temperature = st.sidebar.slider("Temperature", 0.0, 1.5, 1.0, 0.1)
top_k = st.sidebar.slider("Top-k", 0, 100, 50, 5)
top_p = st.sidebar.slider("Top-p", 0.0, 1.0, 0.95, 0.05)

# --- Load model and tokenizer ---
@st.cache_resource(show_spinner=False)
def load_model():
    model = AutoModelForCausalLM.from_pretrained("./my_bot_model")
    tokenizer = AutoTokenizer.from_pretrained("./my_bot_model")
    return model, tokenizer

model, tokenizer = load_model()

# --- Text Input ---
question = st.text_area("🧠 Enter your science question:", height=100)

generate_btn = st.button("🔍 Generate Answer")

# --- Inference Logic ---
if generate_btn and question:
    input_text = f"### Question: {question}\n### Answer:"
    inputs = tokenizer(input_text, return_tensors="pt").to(model.device)

    model.eval()
    with torch.no_grad():
        output = model.generate(
            **inputs,
            max_length=256,
            do_sample=True,
            top_p=top_p,
            top_k=top_k,
            temperature=temperature,
            pad_token_id=tokenizer.eos_token_id
        )

    response = tokenizer.decode(output[0], skip_special_tokens=True)
    answer = response.replace(input_text, "").strip()

    st.markdown("---")
    st.subheader("📤 Model Answer")
    st.success(answer)

elif generate_btn:
    st.warning("Please enter a question to get an answer.")