acecalisto3 commited on
Commit
263da2f
·
verified ·
1 Parent(s): dcbeecc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +188 -282
app.py CHANGED
@@ -1,16 +1,25 @@
1
- import streamlit as st
2
  import os
3
  import subprocess
4
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
 
 
5
  import black
6
  from pylint import lint
7
  from io import StringIO
8
- import openai
9
  import sys
10
-
11
- # Set your OpenAI API key here
12
- openai.api_key = "YOUR_OPENAI_API_KEY"
13
-
 
 
 
 
 
 
 
 
 
14
  PROJECT_ROOT = "projects"
15
  AGENT_DIRECTORY = "agents"
16
 
@@ -23,165 +32,175 @@ if 'workspace_projects' not in st.session_state:
23
  st.session_state.workspace_projects = {}
24
  if 'available_agents' not in st.session_state:
25
  st.session_state.available_agents = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  class AIAgent:
28
- def __init__(self, name, description, skills):
29
  self.name = name
30
  self.description = description
31
  self.skills = skills
 
 
32
 
33
- def create_agent_prompt(self):
34
- skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
35
- agent_prompt = f"""
36
- As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
37
- {skills_str}
38
-
39
- I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
40
- """
41
- return agent_prompt
42
-
43
- def autonomous_build(self, chat_history, workspace_projects):
44
- """
45
- Autonomous build logic that continues based on the state of chat history and workspace projects.
46
- """
47
- # Example logic: Generate a summary of chat history and workspace state
48
  summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
49
  summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
50
 
51
- # Example: Generate the next logical step in the project
 
 
 
 
 
 
 
 
 
 
52
  next_step = "Based on the current state, the next logical step is to implement the main application logic."
53
 
54
- return summary, next_step
55
-
56
- def save_agent_to_file(agent):
57
- """Saves the agent's prompt to a file."""
58
- if not os.path.exists(AGENT_DIRECTORY):
59
- os.makedirs(AGENT_DIRECTORY)
60
- file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
61
- with open(file_path, "w") as file:
62
- file.write(agent.create_agent_prompt())
63
- st.session_state.available_agents.append(agent.name)
64
-
65
- def load_agent_prompt(agent_name):
66
- """Loads an agent prompt from a file."""
67
- file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
68
- if os.path.exists(file_path):
69
- with open(file_path, "r") as file:
70
- agent_prompt = file.read()
71
- return agent_prompt
72
- else:
73
- return None
74
-
75
- def create_agent_from_text(name, text):
76
- skills = text.split('\n')
77
- agent = AIAgent(name, "AI agent created from text input.", skills)
78
- save_agent_to_file(agent)
79
- return agent.create_agent_prompt()
80
-
81
- # Chat interface using a selected agent
82
- def chat_interface_with_agent(input_text, agent_name):
83
- agent_prompt = load_agent_prompt(agent_name)
84
- if agent_prompt is None:
85
- return f"Agent {agent_name} not found."
86
-
87
- # Load the GPT-2 model which is compatible with AutoModelForCausalLM
88
- model_name = "gpt2"
89
- try:
90
- model = AutoModelForCausalLM.from_pretrained(model_name)
91
- tokenizer = AutoTokenizer.from_pretrained(model_name)
92
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
93
- except EnvironmentError as e:
94
- return f"Error loading model: {e}"
95
-
96
- # Combine the agent prompt with user input
97
- combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
98
-
99
- # Truncate input text to avoid exceeding the model's maximum length
100
- max_input_length = 900
101
- input_ids = tokenizer.encode(combined_input, return_tensors="pt")
102
- if input_ids.shape[1] > max_input_length:
103
- input_ids = input_ids[:, :max_input_length]
104
-
105
- # Generate chatbot response
106
- outputs = model.generate(
107
- input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True
108
- )
109
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
110
- return response
111
-
112
- # Terminal interface
113
- def terminal_interface(command, project_name=None):
114
- if project_name:
115
  project_path = os.path.join(PROJECT_ROOT, project_name)
116
- result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_path)
117
- else:
118
- result = subprocess.run(command, shell=True, capture_output=True, text=True)
119
- return result.stdout
120
-
121
- # Code editor interface
122
- def code_editor_interface(code):
123
- formatted_code = black.format_str(code, mode=black.FileMode())
124
- pylint_output = lint.Run([formatted_code], do_exit=False)
125
- pylint_output_str = StringIO()
126
- pylint_output.linter.reporter.write_messages(pylint_output_str)
127
- return formatted_code, pylint_output_str.getvalue()
128
-
129
- # Text summarization tool
130
- def summarize_text(text):
131
- summarizer = pipeline("summarization")
132
- summary = summarizer(text, max_length=130, min_length=30, do_sample=False)
133
- return summary[0]['summary_text']
134
-
135
- # Sentiment analysis tool
136
- def sentiment_analysis(text):
137
- analyzer = pipeline("sentiment-analysis")
138
- result = analyzer(text)
139
- return result[0]['label']
140
-
141
- # Text translation tool (code translation)
142
- def translate_code(code, source_language, target_language):
143
- # Placeholder for translation logic
144
- return f"Translated {source_language} code to {target_language}."
145
-
146
- # Code generation tool
147
- def generate_code(idea):
148
- response = openai.Completion.create(
149
- engine="davinci-codex",
150
- prompt=idea,
151
- max_tokens=150
152
- )
153
- return response.choices[0].text.strip()
154
-
155
- # Workspace interface
156
- def workspace_interface(project_name):
157
- project_path = os.path.join(PROJECT_ROOT, project_name)
158
- if not os.path.exists(project_path):
159
- os.makedirs(project_path)
160
- st.session_state.workspace_projects[project_name] = {'files': []}
161
- return f"Project '{project_name}' created successfully."
162
- else:
163
- return f"Project '{project_name}' already exists."
164
-
165
- # Add code to workspace
166
- def add_code_to_workspace(project_name, code, file_name):
167
- project_path = os.path.join(PROJECT_ROOT, project_name)
168
- if not os.path.exists(project_path):
169
- return f"Project '{project_name}' does not exist."
170
 
171
- file_path = os.path.join(project_path, file_name)
172
- with open(file_path, "w") as file:
173
- file.write(code)
174
- st.session_state.workspace_projects[project_name]['files'].append(file_name)
175
- return f"Code added to '{file_name}' in project '{project_name}'."
176
-
177
- # Chat interface
178
- def chat_interface(input_text):
179
- response = openai.Completion.create(
180
- engine="davinci-codex",
181
- prompt=input_text,
182
- max_tokens=150
183
- )
184
- return response.choices[0].text.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
  # Streamlit App
187
  st.title("AI Agent Creator")
@@ -190,6 +209,8 @@ st.title("AI Agent Creator")
190
  st.sidebar.title("Navigation")
191
  app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
192
 
 
 
193
  if app_mode == "AI Agent Creator":
194
  # AI Agent Creator
195
  st.header("Create an AI Agent from Text")
@@ -202,132 +223,17 @@ if app_mode == "AI Agent Creator":
202
  st.success(f"Agent '{agent_name}' created and saved successfully.")
203
  st.session_state.available_agents.append(agent_name)
204
 
205
- elif app_mode == "Tool Box":
206
- # Tool Box
207
- st.header("AI-Powered Tools")
208
-
209
- # Chat Interface
210
- st.subheader("Chat with CodeCraft")
211
- chat_input = st.text_area("Enter your message:")
212
- if st.button("Send"):
213
- chat_response = chat_interface(chat_input)
214
- st.session_state.chat_history.append((chat_input, chat_response))
215
- st.write(f"CodeCraft: {chat_response}")
216
-
217
- # Terminal Interface
218
- st.subheader("Terminal")
219
- terminal_input = st.text_input("Enter a command:")
220
- if st.button("Run"):
221
- terminal_output = terminal_interface(terminal_input)
222
- st.session_state.terminal_history.append((terminal_input, terminal_output))
223
- st.code(terminal_output, language="bash")
224
-
225
- # Code Editor Interface
226
- st.subheader("Code Editor")
227
- code_editor = st.text_area("Write your code:", height=300)
228
- if st.button("Format & Lint"):
229
- formatted_code, lint_message = code_editor_interface(code_editor)
230
- st.code(formatted_code, language="python")
231
- st.info(lint_message)
232
-
233
- # Text Summarization Tool
234
- st.subheader("Summarize Text")
235
- text_to_summarize = st.text_area("Enter text to summarize:")
236
- if st.button("Summarize"):
237
- summary = summarize_text(text_to_summarize)
238
- st.write(f"Summary: {summary}")
239
-
240
- # Sentiment Analysis Tool
241
- st.subheader("Sentiment Analysis")
242
- sentiment_text = st.text_area("Enter text for sentiment analysis:")
243
- if st.button("Analyze Sentiment"):
244
- sentiment = sentiment_analysis(sentiment_text)
245
- st.write(f"Sentiment: {sentiment}")
246
-
247
- # Text Translation Tool (Code Translation)
248
- st.subheader("Translate Code")
249
- code_to_translate = st.text_area("Enter code to translate:")
250
- source_language = st.text_input("Enter source language (e.g., 'Python'):")
251
- target_language = st.text_input("Enter target language (e.g., 'JavaScript'):")
252
- if st.button("Translate Code"):
253
- translated_code = translate_code(code_to_translate, source_language, target_language)
254
- st.code(translated_code, language=target_language.lower())
255
-
256
- # Code Generation
257
- st.subheader("Code Generation")
258
- code_idea = st.text_input("Enter your code idea:")
259
- if st.button("Generate Code"):
260
- generated_code = generate_code(code_idea)
261
- st.code(generated_code, language="python")
262
-
263
- elif app_mode == "Workspace Chat App":
264
- # Workspace Chat App
265
- st.header("Workspace Chat App")
266
-
267
- # Project Workspace Creation
268
- st.subheader("Create a New Project")
269
- project_name = st.text_input("Enter project name:")
270
- if st.button("Create Project"):
271
- workspace_status = workspace_interface(project_name)
272
- st.success(workspace_status)
273
-
274
- # Add Code to Workspace
275
- st.subheader("Add Code to Workspace")
276
- code_to_add = st.text_area("Enter code to add to workspace:")
277
- file_name = st.text_input("Enter file name (e.g., 'app.py'):")
278
- if st.button("Add Code"):
279
- add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
280
- st.success(add_code_status)
281
-
282
- # Terminal Interface with Project Context
283
- st.subheader("Terminal (Workspace Context)")
284
- terminal_input = st.text_input("Enter a command within the workspace:")
285
- if st.button("Run Command"):
286
- terminal_output = terminal_interface(terminal_input, project_name)
287
- st.code(terminal_output, language="bash")
288
-
289
- # Chat Interface for Guidance
290
- st.subheader("Chat with CodeCraft for Guidance")
291
- chat_input = st.text_area("Enter your message for guidance:")
292
- if st.button("Get Guidance"):
293
- chat_response = chat_interface(chat_input)
294
- st.session_state.chat_history.append((chat_input, chat_response))
295
- st.write(f"CodeCraft: {chat_response}")
296
-
297
- # Display Chat History
298
- st.subheader("Chat History")
299
- for user_input, response in st.session_state.chat_history:
300
- st.write(f"User: {user_input}")
301
- st.write(f"CodeCraft: {response}")
302
-
303
- # Display Terminal History
304
- st.subheader("Terminal History")
305
- for command, output in st.session_state.terminal_history:
306
- st.write(f"Command: {command}")
307
- st.code(output, language="bash")
308
-
309
- # Display Projects and Files
310
- st.subheader("Workspace Projects")
311
- for project, details in st.session_state.workspace_projects.items():
312
- st.write(f"Project: {project}")
313
- for file in details['files']:
314
- st.write(f" - {file}")
315
-
316
- # Chat with AI Agents
317
- st.subheader("Chat with AI Agents")
318
- selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents)
319
- agent_chat_input = st.text_area("Enter your message for the agent:")
320
- if st.button("Send to Agent"):
321
- agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent)
322
- st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
323
- st.write(f"{selected_agent}: {agent_chat_response}")
324
-
325
- # Automate Build Process
326
- st.subheader("Automate Build Process")
327
- if st.button("Automate"):
328
- agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
329
- summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
330
- st.write("Autonomous Build Summary:")
331
- st.write(summary)
332
- st.write("Next Step:")
333
- st.write(next_step)
 
 
1
  import os
2
  import subprocess
3
+ import streamlit as st
4
+ from transformers.pipelines import pipeline
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModel, RagRetriever, AutoModelForSeq2SeqLM
6
  import black
7
  from pylint import lint
8
  from io import StringIO
 
9
  import sys
10
+ import torch
11
+ from huggingface_hub import hf_hub_url, cached_download, HfApi
12
+ from datetime import datetime
13
+ import requests
14
+ import random
15
+ from huggingface_hub.hf_api import Repository # Assuming this is how you import the Repository class
16
+
17
+ # Set your Hugging Face API key here
18
+ # hf_token = "YOUR_HUGGING_FACE_API_KEY" # Replace with your actual token
19
+ # Get Hugging Face token from secrets.toml - this line should already be in the main code
20
+ hf_token = st.secrets["huggingface"]["hf_token"]
21
+
22
+ HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
23
  PROJECT_ROOT = "projects"
24
  AGENT_DIRECTORY = "agents"
25
 
 
32
  st.session_state.workspace_projects = {}
33
  if 'available_agents' not in st.session_state:
34
  st.session_state.available_agents = []
35
+ if 'current_state' not in st.session_state:
36
+ st.session_state.current_state = {
37
+ 'toolbox': {},
38
+ 'workspace_chat': {}
39
+ }
40
+
41
+ # List of top downloaded free code-generative models from Hugging Face Hub
42
+ AVAILABLE_CODE_GENERATIVE_MODELS = [
43
+ "bigcode/starcoder", # Popular and powerful
44
+ "Salesforce/codegen-350M-mono", # Smaller, good for quick tasks
45
+ "microsoft/CodeGPT-small", # Smaller, good for quick tasks
46
+ "google/flan-t5-xl", # Powerful, good for complex tasks
47
+ "facebook/bart-large-cnn", # Good for text-to-code tasks
48
+ ]
49
+
50
+ # Load pre-trained RAG retriever
51
+ rag_retriever = RagRetriever.from_pretrained("facebook/rag-token-base") # Use a Hugging Face RAG model
52
+
53
+ # Load pre-trained chat model
54
+ chat_model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/DialoGPT-medium") # Use a Hugging Face chat model
55
+
56
+ # Load tokenizer
57
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
58
+
59
+ def process_input(user_input):
60
+ # Input pipeline: Tokenize and preprocess user input
61
+ input_ids = tokenizer(user_input, return_tensors="pt").input_ids
62
+ attention_mask = tokenizer(user_input, return_tensors="pt").attention_mask
63
+
64
+ # RAG model: Generate response
65
+ with torch.no_grad():
66
+ output = rag_retriever(input_ids, attention_mask=attention_mask)
67
+ response = output.generator_outputs[0].sequences[0]
68
+
69
+ # Chat model: Refine response
70
+ chat_input = tokenizer(response, return_tensors="pt")
71
+ chat_input["input_ids"] = chat_input["input_ids"].unsqueeze(0)
72
+ chat_input["attention_mask"] = chat_input["attention_mask"].unsqueeze(0)
73
+ with torch.no_grad():
74
+ chat_output = chat_model(**chat_input)
75
+ refined_response = chat_output.sequences[0]
76
+
77
+ # Output pipeline: Return final response
78
+ return refined_response
79
 
80
  class AIAgent:
81
+ def __init__(self, name, description, skills, hf_api=None):
82
  self.name = name
83
  self.description = description
84
  self.skills = skills
85
+ self._hf_api = hf_api
86
+ self._hf_token = hf_token # Store the token here
87
 
88
+ @property
89
+ def hf_api(self):
90
+ if not self._hf_api and self.has_valid_hf_token():
91
+ self._hf_api = HfApi(token=self._hf_token)
92
+ return self._hf_api
93
+
94
+ def has_valid_hf_token(self):
95
+ return bool(self._hf_token)
96
+
97
+ async def autonomous_build(self, chat_history, workspace_projects, project_name, selected_model, hf_token):
98
+ self._hf_token = hf_token
99
+ # Continuation of previous methods
 
 
 
100
  summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
101
  summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
102
 
103
+ # Analyze chat history and workspace projects to suggest actions
104
+ # Example:
105
+ # - Check if the user has requested to create a new file
106
+ # - Check if the user has requested to install a package
107
+ # - Check if the user has requested to run a command
108
+ # - Check if the user has requested to generate code
109
+ # - Check if the user has requested to translate code
110
+ # - Check if the user has requested to summarize text
111
+ # - Check if the user has requested to analyze sentiment
112
+
113
+ # Generate a response based on the analysis
114
  next_step = "Based on the current state, the next logical step is to implement the main application logic."
115
 
116
+ # Ensure project folder exists
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  project_path = os.path.join(PROJECT_ROOT, project_name)
118
+ if not os.path.exists(project_path):
119
+ os.makedirs(project_path)
120
+
121
+ # Create requirements.txt if it doesn't exist
122
+ requirements_file = os.path.join(project_path, "requirements.txt")
123
+ if not os.path.exists(requirements_file):
124
+ with open(requirements_file, "w") as f:
125
+ f.write("# Add your project's dependencies here\n")
126
+
127
+ # Create app.py if it doesn't exist
128
+ app_file = os.path.join(project_path, "app.py")
129
+ if not os.path.exists(app_file):
130
+ with open(app_file, "w") as f:
131
+ f.write("# Your project's main application logic goes here\n")
132
+
133
+ # Generate GUI code for app.py if requested
134
+ if "create a gui" in summary.lower():
135
+ gui_code = generate_code("Create a simple GUI for this application", selected_model)
136
+ with open(app_file, "a") as f:
137
+ f.write(gui_code)
138
+
139
+ # Run the default build process
140
+ build_command = "pip install -r requirements.txt && python app.py"
141
+ try:
142
+ result = subprocess.run(build_command, shell=True, capture_output=True, text=True, cwd=project_path)
143
+ st.write(f"Build Output:\n{result.stdout}")
144
+ if result.stderr:
145
+ st.error(f"Build Errors:\n{result.stderr}")
146
+ except Exception as e:
147
+ st.error(f"Build Error: {e}")
148
+
149
+ return summary, next_step
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
+ def deploy_built_space_to_hf(self):
152
+ if not self._hf_api or not self._hf_token:
153
+ raise ValueError("Cannot deploy the Space since no valid Hugoging Face API connection was established.")
154
+
155
+ # Assuming you have a function to get the files for your Space
156
+ repository_name = f"my-awesome-space_{datetime.now().timestamp()}"
157
+ files = get_built_space_files() # Placeholder - you'll need to define this function
158
+
159
+ # Create the Space
160
+ create_space(self.hf_api, repository_name, "Description", True, files)
161
+
162
+ st.markdown("## Congratulations! Successfully deployed Space 🚀 ##")
163
+ st.markdown(f"[Check out your new Space here](https://huggingface.co/spaces/{repository_name})")
164
+
165
+
166
+ # Add any missing functions from your original code (e.g., get_built_space_files)
167
+ def get_built_space_files():
168
+ # Replace with your logic to gather the files you want to deploy
169
+ return {
170
+ "app.py": "# Your Streamlit app code here",
171
+ "requirements.txt": "streamlit\ntransformers"
172
+ # Add other files as needed
173
+ }
174
+
175
+ # ... (Rest of your existing functions: save_agent_to_file, load_agent_prompt,
176
+ # create_agent_from_text, chat_interface_with_agent, terminal_interface,
177
+ # code_editor_interface, summarize_text, sentiment_analysis, translate_code,
178
+ # generate_code, chat_interface, workspace_interface, add_code_to_workspace)
179
+
180
+ def create_space(api, name, description, public, files, entrypoint="launch.py"):
181
+ url = f"{hf_hub_url()}spaces/{name}/prepare-repo"
182
+ headers = {"Authorization": f"Bearer {api.access_token}"}
183
+ payload = {
184
+ "public": public,
185
+ "gitignore_template": "web",
186
+ "default_branch": "main",
187
+ "archived": False,
188
+ "files": []
189
+ }
190
+ for filename, contents in files.items():
191
+ data = {
192
+ "content": contents,
193
+ "path": filename,
194
+ "encoding": "utf-8",
195
+ "mode": "overwrite" if "#\{random.randint(0, 1)\}" not in contents else "merge",
196
+ }
197
+ payload["files"].append(data)
198
+ response = requests.post(url, json=payload, headers=headers)
199
+ response.raise_for_status()
200
+ location = response.headers.get("Location")
201
+ # wait_for_processing(location, api) # You might need to implement this if it's not already defined
202
+
203
+ return Repository(name=name, api=api)
204
 
205
  # Streamlit App
206
  st.title("AI Agent Creator")
 
209
  st.sidebar.title("Navigation")
210
  app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
211
 
212
+ # ... (Rest of your Streamlit app logic, including the 'Automate' button callback)
213
+
214
  if app_mode == "AI Agent Creator":
215
  # AI Agent Creator
216
  st.header("Create an AI Agent from Text")
 
223
  st.success(f"Agent '{agent_name}' created and saved successfully.")
224
  st.session_state.available_agents.append(agent_name)
225
 
226
+ # ... (Rest of your Streamlit app logic for other app modes)
227
+
228
+ # Using the modified and extended class and functions, update the callback for the 'Automate' button in the Streamlit UI:
229
+ if st.button("Automate", args=(hf_token,)):
230
+ agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
231
+ summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects, project_name, selected_model, hf_token)
232
+ st.write("Autonomous Build Summary:")
233
+ st.write(summary)
234
+ st.write("Next Step:")
235
+ st.write(next_step)
236
+
237
+ # If everything went well, proceed to deploy the Space
238
+ if agent._hf_api and agent.has_valid_hf_token():
239
+ agent.deploy_built_space_to_hf()