add stream

This commit is contained in:
2026-05-07 16:43:32 -03:00
parent c4d704f473
commit 61552cffa9
3 changed files with 96 additions and 60 deletions
+72 -54
View File
@@ -1212,18 +1212,20 @@ class ai:
} }
@MethodHook @MethodHook
def ask_copilot(self, terminal_buffer, user_question, node_info=None): def ask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None):
"""Single-shot copilot for augmented terminal sessions. """Single-shot copilot for augmented terminal sessions.
Args: Args:
terminal_buffer: Sanitized terminal screen content (últimas N líneas). terminal_buffer: Sanitized terminal screen content (últimas N líneas).
user_question: Pregunta del usuario sobre la sesión activa. user_question: Pregunta del usuario sobre la sesión activa.
node_info: Optional dict con metadata del nodo (os, name, etc.) node_info: Optional dict con metadata del nodo (os, name, etc.)
chunk_callback: Optional callable for streaming the guide.
Returns: Returns:
dict: {commands: list[str], guide: str, risk_level: str, error: str|None} dict: {commands: list[str], guide: str, risk_level: str, error: str|None}
""" """
import json import json
import re
node_info = node_info or {} node_info = node_info or {}
os_info = node_info.get("os", "unknown") os_info = node_info.get("os", "unknown")
@@ -1232,11 +1234,21 @@ class ai:
system_prompt = f"""Role: TERMINAL COPILOT. You assist a network engineer during a live SSH session. system_prompt = f"""Role: TERMINAL COPILOT. You assist a network engineer during a live SSH session.
Rules: Rules:
1. Answer the user's question directly based on the Terminal Context. 1. Answer the user's question directly based on the Terminal Context.
2. If the user asks you to analyze, parse, or extract data from the Terminal Context, DO IT directly in the 'guide' section (you can use markdown tables or lists). Do NOT just give them a command to do it themselves. 2. If the user asks you to analyze, parse, or extract data from the Terminal Context, DO IT directly in the <guide> section (you can use markdown tables or lists). Do NOT just give them a command to do it themselves.
3. If the user wants to execute an action, provide the required CLI commands in the 'commands' array. If no commands are needed, leave it empty. 3. If the user wants to execute an action, provide the required CLI commands inside a <commands> block, one command per line. If no commands are needed, leave it empty or omit the block.
4. ULTRA-CONCISE. Keep your guide to the point. 4. ULTRA-CONCISE. Keep your guide to the point.
5. You MUST call provide_copilot_assistance with your response. 5. You MUST output your response in the following strict format:
6. risk_level: "low" for read-only/no commands, "high" for config changes, "destructive" for potentially dangerous ops. <guide>
Your brief tactical guide in markdown. 3-4 sentences max.
</guide>
<commands>
command 1
command 2
</commands>
<risk>
low, high, or destructive
</risk>
6. Risk level: "low" for read-only/no commands, "high" for config changes, "destructive" for potentially dangerous ops.
Terminal Context: Terminal Context:
{terminal_buffer} {terminal_buffer}
@@ -1244,34 +1256,6 @@ Terminal Context:
Device OS: {os_info} Device OS: {os_info}
Node: {node_name}""" Node: {node_name}"""
tools = [{
"type": "function",
"function": {
"name": "provide_copilot_assistance",
"description": "Provide terminal copilot assistance with suggested commands and a brief guide.",
"parameters": {
"type": "object",
"properties": {
"commands": {
"type": "array",
"items": {"type": "string"},
"description": "Ordered list of CLI commands. Each item is one command line."
},
"guide": {
"type": "string",
"description": "Brief tactical guide in markdown. 3-4 sentences max."
},
"risk_level": {
"type": "string",
"enum": ["low", "high", "destructive"],
"description": "Risk level: low=read-only, high=config change, destructive=dangerous."
}
},
"required": ["commands", "guide", "risk_level"]
}
}
}]
messages = [ messages = [
{"role": "system", "content": system_prompt}, {"role": "system", "content": system_prompt},
{"role": "user", "content": user_question} {"role": "user", "content": user_question}
@@ -1281,32 +1265,66 @@ Node: {node_name}"""
response = completion( response = completion(
model=self.engineer_model, model=self.engineer_model,
messages=messages, messages=messages,
tools=tools,
tool_choice={"type": "function", "function": {"name": "provide_copilot_assistance"}},
api_key=self.engineer_key, api_key=self.engineer_key,
stream=False stream=True
) )
message = response.choices[0].message full_content = ""
if hasattr(message, "tool_calls") and message.tool_calls: streamed_guide = ""
for tool_call in message.tool_calls:
if tool_call.function.name == "provide_copilot_assistance": for chunk in response:
try: delta = chunk.choices[0].delta
args = json.loads(tool_call.function.arguments) if hasattr(delta, 'content') and delta.content:
return { full_content += delta.content
"commands": args.get("commands", []),
"guide": args.get("guide", ""), if chunk_callback:
"risk_level": args.get("risk_level", "low"), start_idx = full_content.find("<guide>")
"error": None if start_idx != -1:
} after_start = full_content[start_idx + 7:]
except json.JSONDecodeError: end_idx = after_start.find("</guide>")
pass
# Fallback if no tool called or decode error if end_idx != -1:
current_guide = after_start[:end_idx]
else:
current_guide = after_start
if current_guide.endswith("<"): current_guide = current_guide[:-1]
elif current_guide.endswith("</"): current_guide = current_guide[:-2]
elif current_guide.endswith("</g"): current_guide = current_guide[:-3]
elif current_guide.endswith("</gu"): current_guide = current_guide[:-4]
elif current_guide.endswith("</gui"): current_guide = current_guide[:-5]
elif current_guide.endswith("</guid"): current_guide = current_guide[:-6]
elif current_guide.endswith("</guide"): current_guide = current_guide[:-7]
new_text = current_guide[len(streamed_guide):]
if new_text:
chunk_callback(new_text)
streamed_guide += new_text
guide = ""
commands = []
risk_level = "low"
guide_match = re.search(r"<guide>(.*?)</guide>", full_content, re.DOTALL)
if guide_match:
guide = guide_match.group(1).strip()
cmd_match = re.search(r"<commands>(.*?)</commands>", full_content, re.DOTALL)
if cmd_match:
cmds_raw = cmd_match.group(1).strip()
if cmds_raw:
commands = [c.strip() for c in cmds_raw.split('\n') if c.strip()]
risk_match = re.search(r"<risk>(.*?)</risk>", full_content, re.DOTALL)
if risk_match:
risk_level = risk_match.group(1).strip().lower()
if not guide and full_content and not ("<guide>" in full_content):
guide = full_content.strip()
return { return {
"commands": [], "commands": commands,
"guide": getattr(message, "content", "") or "Could not parse response.", "guide": guide,
"risk_level": "low", "risk_level": risk_level,
"error": None "error": None
} }
+22 -4
View File
@@ -664,15 +664,33 @@ class node:
else: else:
enriched_question = question enriched_question = question
with console.status("[bold cyan]Thinking...[/bold cyan]", spinner="dots"): from rich.live import Live
result = await asyncio.to_thread(service.ask_copilot, active_buffer, enriched_question, node_info)
live_text = "Thinking..."
panel = Panel(live_text, title="[bold cyan]Copilot Guide[/bold cyan]", border_style="cyan")
def on_chunk(text):
nonlocal live_text
if live_text == "Thinking...":
live_text = ""
live_text += text
try:
# Use call_soon_threadsafe if possible, but rich Live is thread-safe enough
loop.call_soon_threadsafe(
lambda: live.update(Panel(Markdown(live_text), title="[bold cyan]Copilot Guide[/bold cyan]", border_style="cyan"))
)
except Exception:
live.update(Panel(Markdown(live_text), title="[bold cyan]Copilot Guide[/bold cyan]", border_style="cyan"))
with Live(panel, console=console, refresh_per_second=10) as live:
result = await asyncio.to_thread(service.ask_copilot, active_buffer, enriched_question, node_info, chunk_callback=on_chunk)
if result.get("error"): if result.get("error"):
console.print(f"[red]Error: {result['error']}[/red]") console.print(f"[red]Error: {result['error']}[/red]")
return return
# 4. Renderizar respuesta # If nothing was streamed (fallback), or to ensure final state
if result.get("guide"): if live_text == "Thinking..." and result.get("guide"):
console.print(Panel( console.print(Panel(
Markdown(result["guide"]), Markdown(result["guide"]),
title="[bold cyan]Copilot Guide[/bold cyan]", title="[bold cyan]Copilot Guide[/bold cyan]",
+2 -2
View File
@@ -17,11 +17,11 @@ class AIService(BaseService):
agent = ai(self.config, console=console) agent = ai(self.config, console=console)
return agent.confirm(input_text) return agent.confirm(input_text)
def ask_copilot(self, terminal_buffer, user_question, node_info=None): def ask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None):
"""Ask the AI copilot for terminal assistance.""" """Ask the AI copilot for terminal assistance."""
from connpy.ai import ai from connpy.ai import ai
agent = ai(self.config) agent = ai(self.config)
return agent.ask_copilot(terminal_buffer, user_question, node_info) return agent.ask_copilot(terminal_buffer, user_question, node_info, chunk_callback=chunk_callback)
def list_sessions(self): def list_sessions(self):