File size: 3,261 Bytes
9a7b741
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import streamlit as st
from huggingface_hub import InferenceClient
import os

# Page configuration with anthropic styling
st.title("πŸ’¬ GPT-OSS-20B Chatbot")
st.markdown("*A clean, multi-turn chatbot powered by GPT-OSS-20B*")

# Initialize Hugging Face client
hf_token = os.getenv("HF_TOKEN")
# if not hf_token:
#     st.error("Please set your HF_TOKEN environment variable.")
#     st.stop()

client = InferenceClient(
    model="openai/gpt-oss-20b",
    token=hf_token 
)

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Sidebar configuration with anthropic styling
with st.sidebar:
    st.header("βš™οΈ Configuration")
    
    # Model parameters
    system_prompt = st.text_area(
        "System message",
        value="You are a helpful assistant.",
        help="Define the AI assistant's behavior and personality"
    )
    
    st.subheader("Generation Parameters")
    max_tokens = st.slider("Max new tokens", 1, 2048, 512)
    temperature = st.slider("Temperature", 0.1, 2.0, 0.7)
    top_p = st.slider("Top-p", 0.1, 1.0, 0.95)
    
    # Chat management
    st.divider()
    if st.button("πŸ—‘οΈ Clear Chat History", use_container_width=True):
        st.session_state.messages = []
        st.rerun()
    
    # Display chat stats
    if st.session_state.messages:
        st.caption(f"Messages in conversation: {len(st.session_state.messages)}")

# Main chat interface
st.subheader("Chat")

# Display chat history with improved formatting
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Chat input
if prompt := st.chat_input("Type your message here..."):
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})
    
    # Display user message
    with st.chat_message("user"):
        st.markdown(prompt)
    
    # Generate and display assistant response
    with st.chat_message("assistant"):
        with st.spinner("Thinking..."):
            try:
                # Prepare messages for API call
                messages_for_api = [{"role": "system", "content": system_prompt}]
                messages_for_api.extend(st.session_state.messages)
                
                # Call Hugging Face API
                response = client.chat_completion(
                    messages=messages_for_api,
                    max_tokens=max_tokens,
                    temperature=temperature,
                    top_p=top_p,
                    stream=False,
                )
                
                # Extract and display response
                reply = response.choices[0].message["content"]
                st.markdown(reply)
                
                # Add assistant response to chat history
                st.session_state.messages.append({"role": "assistant", "content": reply})
                
            except Exception as e:
                st.error(f"Error generating response: {str(e)}")
                # Don't add the user message if there was an error
                st.session_state.messages.pop()

# Footer
st.divider()
st.caption("Built with Streamlit β€’ Powered by GPT-OSS-20B β€’ Styled with Anthropic theme")