|
import streamlit as st |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
@st.cache_resource |
|
def load_model(): |
|
model_name = "replit/replit-code-v1-3b" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True) |
|
return model, tokenizer |
|
|
|
model, tokenizer = load_model() |
|
|
|
|
|
st.title("Replit-code-v1-3b Code Assistant 📊") |
|
st.markdown(""" |
|
This application allows you to interact with the **Replit-code-v1-3b** large language model. |
|
You can use it to generate, debug, or optimize code snippets. |
|
Simply provide a prompt, and the model will respond with suggestions! |
|
""") |
|
|
|
|
|
st.header("Enter Your Prompt") |
|
prompt = st.text_area("Describe your coding task or paste your code for debugging:") |
|
|
|
|
|
st.sidebar.header("Model Settings") |
|
temperature = st.sidebar.slider("Temperature (Creativity)", 0.0, 1.0, 0.7) |
|
max_length = st.sidebar.slider("Max Response Length", 50, 500, 200) |
|
|
|
|
|
if st.button("Generate Response"): |
|
if prompt.strip(): |
|
with st.spinner("Generating response..."): |
|
|
|
inputs = tokenizer(prompt, return_tensors="pt", truncation=True) |
|
outputs = model.generate( |
|
inputs.input_ids, |
|
max_length=max_length, |
|
temperature=temperature, |
|
pad_token_id=tokenizer.eos_token_id |
|
) |
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
st.subheader("Generated Code/Response") |
|
st.code(response, language="python") |
|
else: |
|
st.warning("Please enter a prompt to generate a response.") |
|
|
|
|
|
st.markdown("---") |
|
st.markdown(""" |
|
**Replit-code-v1-3b Code Assistant** |
|
Built with [Streamlit](https://streamlit.io/) and the [Hugging Face Transformers Library](https://huggingface.co/docs/transformers). |
|
""") |