connpy v6.0.0b4: AI Stability, Remote Sync & UI Polish (Clean Commit)

This commit is contained in:
2026-05-01 18:55:25 -03:00
parent c81f6e049f
commit a192bd1912
18 changed files with 717 additions and 292 deletions
+128 -25
View File
@@ -1182,9 +1182,12 @@ class ai:
if not debug and not chunk_callback:
if not is_streaming_text:
# Stop spinner before starting live display
# Stop spinner definitively
if status:
status.stop()
try:
status.stop()
except Exception:
pass
live_display = Live(
Panel(Markdown(full_content), title=title, border_style=border, expand=False),
console=self.console,
@@ -1372,7 +1375,7 @@ class ai:
tail_limit = int(final_limit * 0.4)
return (text[:head_limit] + f"\n\n[... OUTPUT TRUNCATED ...]\n\n" + text[-tail_limit:])
def _print_debug_observation(self, fn, obs):
def _print_debug_observation(self, fn, obs, status=None):
"""Prints a tool observation in a readable way during debug mode."""
# Try to parse as JSON if it's a string
if isinstance(obs, str):
@@ -1396,6 +1399,7 @@ class ai:
content = Text("Empty data set")
else:
# Add a small spacer instead of a Rule for cleaner look
from rich.console import Group
content = Group(*elements)
elif isinstance(obs_data, list):
content = Text("\n".join(f"• {item}" for item in obs_data))
@@ -1403,7 +1407,18 @@ class ai:
content = Text(str(obs_data))
title = f"[bold]{fn}[/bold]"
# Stop status before printing panel to avoid ghosting
if status:
try: status.stop()
except: pass
self.console.print(Panel(content, title=title, border_style="ai_status"))
# Resume status
if status:
try: status.start()
except: pass
def manage_memory_tool(self, content, action="append"):
"""Save or update long-term memory. Only use when user explicitly requests it."""
@@ -1604,7 +1619,7 @@ class ai:
elif fn in self.tool_status_formatters: status.update(self.tool_status_formatters[fn](args))
if debug:
self._print_debug_observation(f"Decision: {fn}", args)
self._print_debug_observation(f"Decision: {fn}", args, status=status)
if fn == "list_nodes": obs = self.list_nodes_tool(**args)
elif fn == "run_commands": obs = self.run_commands_tool(**args, status=status)
@@ -1613,7 +1628,7 @@ class ai:
else: obs = f"Error: Unknown tool '{fn}'."
if debug:
self._print_debug_observation(f"Observation: {fn}", obs)
self._print_debug_observation(f"Observation: {fn}", obs, status=status)
# Ensure observation is a string and truncated for the LLM
obs_str = obs if isinstance(obs, str) else json.dumps(obs)
@@ -1883,7 +1898,7 @@ class ai:
streamed_response = False
try:
safe_messages = self._sanitize_messages(messages)
if stream and (not debug or chunk_callback):
if stream and chunk_callback:
response, streamed_response = self._stream_completion(
model=model, messages=safe_messages, tools=tools, api_key=key,
status=status, label=label, debug=debug, num_retries=3,
@@ -1926,7 +1941,13 @@ class ai:
# In CLI debug mode, only print intermediate reasoning if there are tool calls.
# If there are no tool calls, this content is the final answer and will be printed by the caller.
if resp_msg.tool_calls:
if status:
try: status.stop()
except: pass
self.console.print(Panel(Markdown(resp_msg.content), title=f"[{current_brain}][bold]{label} Reasoning[/bold][/{current_brain}]", border_style="architect" if current_brain == "architect" else "engineer"))
if status:
try: status.start()
except: pass
if not resp_msg.tool_calls: break
@@ -1947,7 +1968,7 @@ class ai:
elif fn == "manage_memory_tool": status.update(f"[architect]Architect: [UPDATING MEMORY]")
if debug:
self._print_debug_observation(f"Decision: {fn}", args)
self._print_debug_observation(f"Decision: {fn}", args, status=status)
if fn == "delegate_to_engineer":
obs, eng_usage = self._engineer_loop(args["task"], status=status, debug=debug, chat_history=messages[:-1])
@@ -1966,7 +1987,14 @@ class ai:
num_retries=3
)
obs = claude_resp.choices[0].message.content
if debug: self.console.print(Panel(Markdown(obs), title="[architect]Architect Consultation[/architect]", border_style="architect"))
if debug:
if status:
try: status.stop()
except: pass
self.console.print(Panel(Markdown(obs), title="[architect]Architect Consultation[/architect]", border_style="architect"))
if status:
try: status.start()
except: pass
except Exception as e:
if status: status.update("[unavailable]Architect unavailable! Engineer continuing alone...")
obs = f"Architect unavailable ({str(e)}). Proceeding with your best technical judgment."
@@ -1983,7 +2011,14 @@ class ai:
handover_msg = f"HANDOVER FROM EXECUTION ENGINE\n\nReason: {args['reason']}\n\nContext: {args['context']}\n\nYou are now in control of this conversation."
pending_user_message = handover_msg
obs = "Control transferred to Architect. Handover context will be provided."
if debug: self.console.print(Panel(Text(handover_msg), title="[architect]Escalation to Architect[/architect]", border_style="architect"))
if debug:
if status:
try: status.stop()
except: pass
self.console.print(Panel(Text(handover_msg), title="[architect]Escalation to Architect[/architect]", border_style="architect"))
if status:
try: status.start()
except: pass
elif fn == "return_to_engineer":
if status: status.update("[engineer]Transferring control back to Engineer...")
@@ -1997,7 +2032,14 @@ class ai:
handover_msg = f"HANDOVER FROM ARCHITECT\n\nSummary: {args['summary']}\n\nYou are now back in control. Continue handling the user's requests."
pending_user_message = handover_msg
obs = "Control returned to Engineer. Handover summary will be provided."
if debug: self.console.print(Panel(Text(handover_msg), title="[engineer]Return to Engineer[/engineer]", border_style="engineer"))
if debug:
if status:
try: status.stop()
except: pass
self.console.print(Panel(Text(handover_msg), title="[engineer]Return to Engineer[/engineer]", border_style="engineer"))
if status:
try: status.start()
except: pass
elif fn == "list_nodes": obs = self.list_nodes_tool(**args)
elif fn == "run_commands": obs = self.run_commands_tool(**args, status=status)
@@ -2007,7 +2049,7 @@ class ai:
else: obs = f"Error: {fn} unknown."
if debug and fn not in ["delegate_to_engineer", "consult_architect", "escalate_to_architect", "return_to_engineer"]:
self._print_debug_observation(f"Observation: {fn}", obs)
self._print_debug_observation(f"Observation: {fn}", obs, status=status)
# Ensure observation is a string and truncated for the LLM
obs_str = obs if isinstance(obs, str) else json.dumps(obs)
@@ -2229,7 +2271,7 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
streamed_response = False
try:
safe_messages = self._sanitize_messages(messages)
if stream and (not debug or chunk_callback):
if stream and chunk_callback:
response, streamed_response = self._stream_completion(
model=model, messages=safe_messages, tools=tools, api_key=key,
status=status, label=label, debug=debug, num_retries=3,
@@ -2272,7 +2314,13 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
# In CLI debug mode, only print intermediate reasoning if there are tool calls.
# If there are no tool calls, this content is the final answer and will be printed by the caller.
if resp_msg.tool_calls:
if status:
try: status.stop()
except: pass
self.console.print(Panel(Markdown(resp_msg.content), title=f"[{current_brain}][bold]{label} Reasoning[/bold][/{current_brain}]", border_style="architect" if current_brain == "architect" else "engineer"))
if status:
try: status.start()
except: pass
if not resp_msg.tool_calls: break
@@ -2293,7 +2341,7 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
elif fn == "manage_memory_tool": status.update(f"[architect]Architect: [UPDATING MEMORY]")
if debug:
self._print_debug_observation(f"Decision: {fn}", args)
self._print_debug_observation(f"Decision: {fn}", args, status=status)
if fn == "delegate_to_engineer":
obs, eng_usage = self._engineer_loop(args["task"], status=status, debug=debug, chat_history=messages[:-1])
@@ -2312,7 +2360,14 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
num_retries=3
)
obs = claude_resp.choices[0].message.content
if debug: self.console.print(Panel(Markdown(obs), title="[architect]Architect Consultation[/architect]", border_style="architect"))
if debug:
if status:
try: status.stop()
except: pass
self.console.print(Panel(Markdown(obs), title="[architect]Architect Consultation[/architect]", border_style="architect"))
if status:
try: status.start()
except: pass
except Exception as e:
if status: status.update("[unavailable]Architect unavailable! Engineer continuing alone...")
obs = f"Architect unavailable ({str(e)}). Proceeding with your best technical judgment."
@@ -2329,7 +2384,14 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
handover_msg = f"HANDOVER FROM EXECUTION ENGINE\n\nReason: {args['reason']}\n\nContext: {args['context']}\n\nYou are now in control of this conversation."
pending_user_message = handover_msg
obs = "Control transferred to Architect. Handover context will be provided."
if debug: self.console.print(Panel(Text(handover_msg), title="[architect]Escalation to Architect[/architect]", border_style="architect"))
if debug:
if status:
try: status.stop()
except: pass
self.console.print(Panel(Text(handover_msg), title="[architect]Escalation to Architect[/architect]", border_style="architect"))
if status:
try: status.start()
except: pass
elif fn == "return_to_engineer":
if status: status.update("[engineer]Transferring control back to Engineer...")
@@ -2343,7 +2405,14 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
handover_msg = f"HANDOVER FROM ARCHITECT\n\nSummary: {args['summary']}\n\nYou are now back in control. Continue handling the user's requests."
pending_user_message = handover_msg
obs = "Control returned to Engineer. Handover summary will be provided."
if debug: self.console.print(Panel(Text(handover_msg), title="[engineer]Return to Engineer[/engineer]", border_style="engineer"))
if debug:
if status:
try: status.stop()
except: pass
self.console.print(Panel(Text(handover_msg), title="[engineer]Return to Engineer[/engineer]", border_style="engineer"))
if status:
try: status.start()
except: pass
elif fn == "list_nodes": obs = self.list_nodes_tool(**args)
elif fn == "run_commands": obs = self.run_commands_tool(**args, status=status)
@@ -2353,7 +2422,7 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
else: obs = f"Error: {fn} unknown."
if debug and fn not in ["delegate_to_engineer", "consult_architect", "escalate_to_architect", "return_to_engineer"]:
self._print_debug_observation(f"Observation: {fn}", obs)
self._print_debug_observation(f"Observation: {fn}", obs, status=status)
# Ensure observation is a string and truncated for the LLM
obs_str = obs if isinstance(obs, str) else json.dumps(obs)
@@ -3748,6 +3817,42 @@ class node:
else:
jumphost_cmd = jumphost_cmd + " {}".format("@".join([self.jumphost["user"],self.jumphost["host"]]))
self.jumphost = f"-o ProxyCommand=\"{jumphost_cmd}\""
elif self.jumphost["protocol"] == "ssm":
ssm_target = self.jumphost["host"]
ssm_cmd = f"aws ssm start-session --target {ssm_target} --document-name AWS-StartSSHSession --parameters 'portNumber=22'"
if isinstance(self.jumphost.get("tags"), dict):
if "profile" in self.jumphost["tags"]:
ssm_cmd += f" --profile {self.jumphost['tags']['profile']}"
if "region" in self.jumphost["tags"]:
ssm_cmd += f" --region {self.jumphost['tags']['region']}"
if self.jumphost["options"] != '':
ssm_cmd += f" {self.jumphost['options']}"
bastion_user_part = f"{self.jumphost['user']}@{ssm_target}" if self.jumphost['user'] else ssm_target
ssh_opts = ""
if isinstance(self.jumphost.get("tags"), dict) and "ssh_options" in self.jumphost["tags"]:
ssh_opts = f" {self.jumphost['tags']['ssh_options']}"
inner_ssh = f"ssh{ssh_opts} -o ProxyCommand='{ssm_cmd}' -W %h:%p {bastion_user_part}"
self.jumphost = f"-o ProxyCommand=\"{inner_ssh}\""
elif self.jumphost["protocol"] in ["kubectl", "docker"]:
nc_cmd = "nc"
if isinstance(self.jumphost.get("tags"), dict) and "nc_command" in self.jumphost["tags"]:
nc_cmd = self.jumphost["tags"]["nc_command"]
if self.jumphost["protocol"] == "kubectl":
proxy_cmd = f"kubectl exec "
if self.jumphost["options"] != '':
proxy_cmd += f"{self.jumphost['options']} "
proxy_cmd += f"{self.jumphost['host']} -i -- {nc_cmd} %h %p"
else:
proxy_cmd = f"docker "
if self.jumphost["options"] != '':
proxy_cmd += f"{self.jumphost['options']} "
proxy_cmd += f"exec -i {self.jumphost['host']} {nc_cmd} %h %p"
self.jumphost = f"-o ProxyCommand=\"{proxy_cmd}\""
else:
self.jumphost = ""
@@ -4003,8 +4108,6 @@ class node:
self.mylog.write(data)
async def keepalive_task():
if self.idletime <= 0:
return
while True:
await asyncio.sleep(1)
if time() - self.lastinput >= self.idletime:
@@ -4015,8 +4118,6 @@ class node:
pass
async def savelog_task():
if not hasattr(self, 'logfile') or not hasattr(self, 'mylog'):
return
prev_size = 0
while True:
await asyncio.sleep(5)
@@ -4035,10 +4136,12 @@ class node:
# We want to exit if either happens, so return_exceptions=False, but we need to cancel the others.
tasks = [
asyncio.create_task(ingress_task()),
asyncio.create_task(egress_task()),
asyncio.create_task(keepalive_task()),
asyncio.create_task(savelog_task())
asyncio.create_task(egress_task())
]
if self.idletime > 0:
tasks.append(asyncio.create_task(keepalive_task()))
if hasattr(self, 'logfile') and hasattr(self, 'mylog'):
tasks.append(asyncio.create_task(savelog_task()))
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
for p in pending:
p.cancel()
@@ -4404,7 +4507,7 @@ class node:
"telnet": ['[u|U]sername:', 'refused', 'supported', 'invalid|unrecognized option', 'ssh-keygen.*\"', 'timeout|timed.out', 'unavailable', 'closed', password_prompt, prompt, 'suspend', pexpect.EOF, pexpect.TIMEOUT, "No route to host", "resolve hostname", "no matching", "[b|B]ad (owner|permissions)"],
"kubectl": ['[u|U]sername:', '[r|R]efused', '[E|e]rror', 'DEPRECATED', pexpect.TIMEOUT, password_prompt, prompt, pexpect.EOF, "expired|invalid"],
"docker": ['[u|U]sername:', 'Cannot', '[E|e]rror', 'failed', 'not a docker command', 'unknown', 'unable to resolve', pexpect.TIMEOUT, password_prompt, prompt, pexpect.EOF],
"ssm": ['[u|U]sername:', 'Cannot', '[E|e]rror', 'failed', 'SessionManagerPlugin', 'unknown', 'unable to resolve', pexpect.TIMEOUT, password_prompt, prompt, pexpect.EOF]
"ssm": ['[u|U]sername:', 'Cannot', '[E|e]rror', 'failed', 'SessionManagerPlugin', '[u|U]nknown', 'unable to resolve', pexpect.TIMEOUT, password_prompt, prompt, pexpect.EOF]
}
error_indices = {