diff --git a/connpy/_version.py b/connpy/_version.py index 290678f..c73ef42 100644 --- a/connpy/_version.py +++ b/connpy/_version.py @@ -1 +1 @@ -__version__ = "5.0b5" +__version__ = "5.0b6" diff --git a/connpy/ai.py b/connpy/ai.py index 8854ddc..e0759f3 100755 --- a/connpy/ai.py +++ b/connpy/ai.py @@ -13,11 +13,11 @@ litellm.set_verbose = False from .hooks import ClassHook, MethodHook from . import printer from rich.markdown import Markdown -from rich.console import Console from rich.panel import Panel from rich.text import Text -console = Console() +console = printer.console + @ClassHook class ai: @@ -62,7 +62,7 @@ class ai: self.architect_prompt_extensions = [] # Extra text for architect prompt # Long-term memory - self.memory_path = os.path.expanduser("~/.config/conn/ai_memory.md") + self.memory_path = os.path.join(self.config.defaultdir, "ai_memory.md") self.long_term_memory = "" if os.path.exists(self.memory_path): try: @@ -75,6 +75,12 @@ class ai: except Exception as e: console.print(f"[yellow]Warning: Failed to load AI memory: {e}[/yellow]") + # Session Management + self.sessions_dir = os.path.join(self.config.defaultdir, "ai_sessions") + os.makedirs(self.sessions_dir, exist_ok=True) + self.session_id = None + self.session_path = None + # Prompts base agnósticos self._engineer_base_prompt = dedent(f""" Role: TECHNICAL EXECUTION ENGINE. @@ -190,7 +196,7 @@ class ai: # Determine styling based on current brain role_label = "Network Architect" if "architect" in label.lower() else "Network Engineer" - border = "purple" if "architect" in label.lower() else "blue" + border = "medium_purple" if "architect" in label.lower() else "blue" title = f"[bold {border}]{role_label}[/bold {border}]" try: @@ -290,14 +296,34 @@ class ai: 2. No user/system messages appear between tool_calls and tool responses 3. Orphaned tool_calls at the end are removed 4. Orphaned tool responses without a preceding tool_call are removed + 5. Incompatible metadata like cache_control is stripped for non-Anthropic models """ if not messages: return messages + # Pre-process messages to pull text from list contents (Anthropic cache format) + # and remove explicit cache keys. + pre_sanitized = [] + for msg in messages: + m = msg.copy() if isinstance(msg, dict) else msg.model_dump(exclude_none=True) + + # Convert content list to plain string if it's a system message with caching metadata + if m.get('role') == 'system' and isinstance(m.get('content'), list): + # Extraer texto de [{"type": "text", "text": "...", "cache_control": ...}] + m['content'] = m['content'][0]['text'] if m['content'] else "" + + # Remove any explicit cache_control key anywhere + if 'cache_control' in m: del m['cache_control'] + if isinstance(m.get('content'), list): + for item in m['content']: + if isinstance(item, dict) and 'cache_control' in item: del item['cache_control'] + + pre_sanitized.append(m) + sanitized = [] i = 0 - while i < len(messages): - msg = messages[i] + while i < len(pre_sanitized): + msg = pre_sanitized[i] role = msg.get('role', '') if role == 'assistant' and msg.get('tool_calls'): @@ -311,8 +337,8 @@ class ai: # Look ahead for matching tool responses tool_responses = [] j = i + 1 - while j < len(messages): - next_msg = messages[j] + while j < len(pre_sanitized): + next_msg = pre_sanitized[j] if next_msg.get('role') == 'tool': tool_responses.append(next_msg) j += 1 @@ -470,23 +496,16 @@ class ai: def _engineer_loop(self, task, status=None, debug=False, chat_history=None): """Internal loop where the Engineer executes technical tasks for the Architect.""" - # Optimización de caché para el Ingeniero - if "claude" in self.engineer_model.lower(): + # Optimización de caché para el Ingeniero (Solo para Anthropic directo, Vertex tiene reglas distintas) + if "claude" in self.engineer_model.lower() and "vertex" not in self.engineer_model.lower(): messages = [{"role": "system", "content": [{"type": "text", "text": self.engineer_system_prompt, "cache_control": {"type": "ephemeral"}}]}] else: messages = [{"role": "system", "content": self.engineer_system_prompt}] if chat_history: - # Clean chat history from caching metadata if engineer is not Claude - if "claude" not in self.engineer_model.lower(): - cleaned_history = [] - for msg in chat_history[-5:]: - m = msg if isinstance(msg, dict) else msg.model_dump(exclude_none=True) - # Remove cache_control from system messages - if m.get('role') == 'system' and isinstance(m.get('content'), list): - m['content'] = m['content'][0]['text'] if m['content'] else "" - cleaned_history.append(m) - messages.extend(cleaned_history) + # Clean chat history from caching metadata if engineer is not a compatible Claude model + if "claude" not in self.engineer_model.lower() or "vertex" in self.engineer_model.lower(): + messages.extend(self._sanitize_messages(chat_history[-5:])) else: messages.extend(chat_history[-5:]) @@ -582,9 +601,125 @@ class ai: tools.extend(self.external_architect_tools) return tools + def _get_sessions(self): + """Returns a list of session metadata sorted by date.""" + sessions = [] + if not os.path.exists(self.sessions_dir): + return [] + for f in os.listdir(self.sessions_dir): + if f.endswith(".json"): + path = os.path.join(self.sessions_dir, f) + try: + with open(path, "r") as fs: + data = json.load(fs) + sessions.append({ + "id": f[:-5], + "title": data.get("title", "Untitled Session"), + "created_at": data.get("created_at", "Unknown"), + "model": data.get("model", "Unknown"), + "path": path + }) + except Exception: + continue + return sorted(sessions, key=lambda x: x["created_at"], reverse=True) + + def list_sessions(self): + """Prints a list of sessions using printer.table.""" + sessions = self._get_sessions() + if not sessions: + printer.info("No saved AI sessions found.") + return + + columns = ["ID", "Title", "Created At", "Model"] + rows = [[s["id"], s["title"], s["created_at"], s["model"]] for s in sessions] + printer.table("AI Persisted Sessions", columns, rows) + + def load_session_data(self, session_id): + """Loads a session's raw data by ID.""" + path = os.path.join(self.sessions_dir, f"{session_id}.json") + if os.path.exists(path): + try: + with open(path, "r") as f: + data = json.load(f) + self.session_id = session_id + self.session_path = path + return data + except Exception as e: + printer.error(f"Failed to load session {session_id}: {e}") + return None + + def delete_session(self, session_id): + """Deletes a session by ID.""" + path = os.path.join(self.sessions_dir, f"{session_id}.json") + if os.path.exists(path): + os.remove(path) + printer.success(f"Session {session_id} deleted.") + else: + printer.error(f"Session {session_id} not found.") + + def get_last_session_id(self): + """Returns the ID of the most recent session.""" + sessions = self._get_sessions() + return sessions[0]["id"] if sessions else None + + def _generate_session_id(self, query): + """Generates a unique session ID based on timestamp.""" + return datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + + def save_session(self, history, title=None, model=None): + """Saves current history to the session file.""" + if not self.session_id: + # Generate ID from first user query if available + first_user_msg = next((m["content"] for m in history if m["role"] == "user"), "new-session") + self.session_id = self._generate_session_id(first_user_msg) + self.session_path = os.path.join(self.sessions_dir, f"{self.session_id}.json") + + # If it's a new file, we might want to set a better title + if not os.path.exists(self.session_path) and not title: + raw_title = next((m["content"] for m in history if m["role"] == "user"), "New Session") + # Clean title: remove newlines, multiple spaces + clean_title = " ".join(raw_title.split()) + if len(clean_title) > 40: + title = clean_title[:37].strip() + "..." + else: + title = clean_title + + try: + # Read existing metadata if it exists + metadata = {} + if os.path.exists(self.session_path): + with open(self.session_path, "r") as f: + metadata = json.load(f) + + metadata.update({ + "id": self.session_id, + "title": title or metadata.get("title", "New Session"), + "created_at": metadata.get("created_at", datetime.datetime.now().isoformat()), + "updated_at": datetime.datetime.now().isoformat(), + "model": model or metadata.get("model", self.engineer_model), + "history": history + }) + + with open(self.session_path, "w") as f: + json.dump(metadata, f, indent=4) + except Exception as e: + printer.error(f"Failed to save session: {e}") + + except Exception as e: + printer.error(f"Failed to save session: {e}") + @MethodHook - def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=False, stream=True): + def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=False, stream=True, session_id=None): if chat_history is None: chat_history = [] + + # Load session if provided and history is empty + if session_id and not chat_history: + session_data = self.load_session_data(session_id) + if session_data: + chat_history = session_data.get("history", []) + # If we loaded history, the caller might need it back + # But typically ask() is called in a loop with an external history object + usage = {"input": 0, "output": 0, "total": 0} # 1. Selector de Rol inicial (Sticky Brain) @@ -618,15 +753,20 @@ class ai: model = self.architect_model if current_brain == "architect" else self.engineer_model key = self.architect_key if current_brain == "architect" else self.engineer_key - # Estructura optimizada para Prompt Caching - if "claude" in model.lower(): + # Estructura optimizada para Prompt Caching (Solo para Anthropic directo, Vertex tiene reglas distintas) + if "claude" in model.lower() and "vertex" not in model.lower(): messages = [{"role": "system", "content": [{"type": "text", "text": system_prompt, "cache_control": {"type": "ephemeral"}}]}] else: messages = [{"role": "system", "content": system_prompt}] # Interleaving de historial last_role = "system" - for msg in chat_history[-self.max_history:]: + # Sanitize history if the current target model is not compatible with cache_control + history_to_process = chat_history[-self.max_history:] + if "claude" not in model.lower() or "vertex" in model.lower(): + history_to_process = self._sanitize_messages(history_to_process) + + for msg in history_to_process: m = msg if isinstance(msg, dict) else msg.model_dump(exclude_none=True) role = m.get('role') if role == last_role and role == 'user': @@ -654,7 +794,7 @@ class ai: console.print(f"[yellow] You can press Ctrl+C to interrupt and get a summary of progress.[/yellow]") soft_limit_warned = True - label = "[bold purple]Architect" if current_brain == "architect" else "[bold blue]Engineer" + label = "[bold medium_purple]Architect" if current_brain == "architect" else "[bold blue]Engineer" if status: status.update(f"{label} is thinking... (step {iteration})") streamed_response = False @@ -699,7 +839,7 @@ class ai: messages.append(msg_dict) if debug and resp_msg.content: - console.print(Panel(Markdown(resp_msg.content), title=f"{label} Reasoning", border_style="purple" if current_brain == "architect" else "blue")) + console.print(Panel(Markdown(resp_msg.content), title=f"{label} Reasoning", border_style="medium_purple" if current_brain == "architect" else "blue")) if not resp_msg.tool_calls: break @@ -716,8 +856,8 @@ class ai: continue if status: - if fn == "delegate_to_engineer": status.update(f"[bold purple]Architect: [DELEGATING MISSION] {args.get('task','')[:40]}...") - elif fn == "manage_memory_tool": status.update(f"[bold purple]Architect: [UPDATING MEMORY]") + if fn == "delegate_to_engineer": status.update(f"[bold medium_purple]Architect: [DELEGATING MISSION] {args.get('task','')[:40]}...") + elif fn == "manage_memory_tool": status.update(f"[bold medium_purple]Architect: [UPDATING MEMORY]") if debug: console.print(Panel(Text(json.dumps(args, indent=2)), title=f"{label} Decision: {fn}", border_style="white")) @@ -725,7 +865,7 @@ class ai: obs, eng_usage = self._engineer_loop(args["task"], status=status, debug=debug, chat_history=messages[:-1]) usage["input"] += eng_usage["input"]; usage["output"] += eng_usage["output"]; usage["total"] += eng_usage["total"] elif fn == "consult_architect": - if status: status.update("[bold purple]Engineer consulting Architect...") + if status: status.update("[bold medium_purple]Engineer consulting Architect...") try: # Consultation only - Engineer stays in control claude_resp = completion( @@ -738,13 +878,13 @@ class ai: num_retries=3 ) obs = claude_resp.choices[0].message.content - if debug: console.print(Panel(Markdown(obs), title="[bold purple]Architect Consultation[/bold purple]", border_style="purple")) + if debug: console.print(Panel(Markdown(obs), title="[bold medium_purple]Architect Consultation[/bold medium_purple]", border_style="medium_purple")) except Exception as e: if status: status.update("[bold orange3]Architect unavailable! Engineer continuing alone...") obs = f"Architect unavailable ({str(e)}). Proceeding with your best technical judgment." elif fn == "escalate_to_architect": - if status: status.update("[bold purple]Transferring control to Architect...") + if status: status.update("[bold medium_purple]Transferring control to Architect...") # Full escalation - Architect takes over current_brain = "architect" model = self.architect_model @@ -755,7 +895,7 @@ class ai: handover_msg = f"HANDOVER FROM EXECUTION ENGINE\n\nReason: {args['reason']}\n\nContext: {args['context']}\n\nYou are now in control of this conversation." pending_user_message = handover_msg obs = "Control transferred to Architect. Handover context will be provided." - if debug: console.print(Panel(Text(handover_msg), title="[bold purple]Escalation to Architect[/bold purple]", border_style="purple")) + if debug: console.print(Panel(Text(handover_msg), title="[bold medium_purple]Escalation to Architect[/bold medium_purple]", border_style="medium_purple")) elif fn == "return_to_engineer": if status: status.update("[bold blue]Transferring control back to Engineer...") @@ -813,19 +953,8 @@ class ai: messages.append(resp_msg.model_dump(exclude_none=True)) except Exception: pass finally: - try: - log_dir = self.config.defaultdir - os.makedirs(log_dir, exist_ok=True) - log_path = os.path.join(log_dir, "ai_debug.json") - hist = [] - if os.path.exists(log_path): - try: - with open(log_path, "r") as f: hist = json.load(f) - except (IOError, json.JSONDecodeError): hist = [] - hist.append({"timestamp": datetime.datetime.now().isoformat(), "roles": {"strategic_engine": self.architect_model, "execution_engine": self.engineer_model}, "session": messages}) - with open(log_path, "w") as f: json.dump(hist[-10:], f, indent=4) - except Exception as e: - if debug: console.print(f"[dim red]Debug log failed: {e}[/dim red]") + # Auto-save session + self.save_session(messages, model=model) return { "response": messages[-1].get("content"), diff --git a/connpy/api.py b/connpy/api.py index 3436439..d8bf2ae 100755 --- a/connpy/api.py +++ b/connpy/api.py @@ -8,7 +8,7 @@ import signal app = Flask(__name__) CORS(app) -conf = configfile() +# conf = configfile() # REMOVED: Item #1 in Roadmap -> Don't instantiate globally PID_FILE1 = "/run/connpy.pid" PID_FILE2 = "/tmp/connpy.pid" @@ -156,23 +156,23 @@ def stop_api(): return port @hooks.MethodHook -def debug_api(port=8048): - app.custom_config = configfile() +def debug_api(port=8048, config=None): + app.custom_config = config or configfile() app.run(debug=True, port=port) @hooks.MethodHook -def start_server(port=8048): - app.custom_config = configfile() +def start_server(port=8048, config=None): + app.custom_config = config or configfile() serve(app, host='0.0.0.0', port=port) @hooks.MethodHook -def start_api(port=8048): +def start_api(port=8048, config=None): if os.path.exists(PID_FILE1) or os.path.exists(PID_FILE2): printer.warning("Connpy server is already running.") return pid = os.fork() if pid == 0: - start_server(port) + start_server(port, config=config) else: try: with open(PID_FILE1, "w") as f: diff --git a/connpy/configfile.py b/connpy/configfile.py index 5a6b292..c38d2e0 100755 --- a/connpy/configfile.py +++ b/connpy/configfile.py @@ -56,27 +56,31 @@ class configfile: ''' home = os.path.expanduser("~") defaultdir = home + '/.config/conn' - self.defaultdir = defaultdir - Path(defaultdir).mkdir(parents=True, exist_ok=True) - Path(f"{defaultdir}/plugins").mkdir(parents=True, exist_ok=True) - pathfile = defaultdir + '/.folder' - try: - with open(pathfile, "r") as f: - configdir = f.read().strip() - except (FileNotFoundError, IOError): - with open(pathfile, "w") as f: - f.write(str(defaultdir)) - configdir = defaultdir - defaultfile = configdir + '/config.yaml' - self.cachefile = configdir + '/.config.cache.json' - self.fzf_cachefile = configdir + '/.fzf_nodes_cache.txt' - self.folders_cachefile = configdir + '/.folders_cache.txt' - self.profiles_cachefile = configdir + '/.profiles_cache.txt' - defaultkey = configdir + '/.osk' - if conf == None: - self.file = defaultfile + + if conf is None: + # Standard path: use ~/.config/conn and respect .folder redirection + self.anchor_path = defaultdir + self.defaultdir = defaultdir + Path(defaultdir).mkdir(parents=True, exist_ok=True) - # Backwards compatibility: Migrate from JSON to YAML + pathfile = defaultdir + '/.folder' + try: + with open(pathfile, "r") as f: + configdir = f.read().strip() + except (FileNotFoundError, IOError): + with open(pathfile, "w") as f: + f.write(str(defaultdir)) + configdir = defaultdir + + self.defaultdir = configdir + self.file = configdir + '/config.yaml' + self.key = key or (configdir + '/.osk') + + # Ensure redirected directories exist + Path(configdir).mkdir(parents=True, exist_ok=True) + Path(f"{configdir}/plugins").mkdir(parents=True, exist_ok=True) + + # Backwards compatibility: Migrate from JSON to YAML only for default path legacy_json = configdir + '/config.json' legacy_noext = configdir + '/config' legacy_file = None @@ -99,38 +103,44 @@ class configfile: os.remove(self.file) printer.warning("YAML verification failed after migration, keeping legacy config.") else: - with open(self.cachefile, 'w') as f: + # Note: cachefile is derived later, we use temp one for migration sync + temp_cache = configdir + '/.config.cache.json' + with open(temp_cache, 'w') as f: json.dump(old_data, f) shutil.move(legacy_file, legacy_file + ".backup") printer.success(f"Migrated legacy config ({len(old_data.get('connections',{}))} folders/nodes) into YAML and Cache successfully!") except Exception as e: - # Clean up partial YAML if it was created if os.path.exists(self.file): - try: - os.remove(self.file) - except OSError: - pass + try: os.remove(self.file) + except OSError: pass printer.warning(f"Failed to migrate legacy config: {e}") else: - self.file = conf - - if key == None: - self.key = defaultkey - else: - self.key = key + # Custom path (common in tests): isolate everything to the conf parent directory + self.file = os.path.abspath(conf) + configdir = os.path.dirname(self.file) + self.anchor_path = configdir + self.defaultdir = configdir + self.key = os.path.abspath(key) if key else (configdir + '/.osk') + + # Sidecar files always live next to the config file (or in the redirected configdir) + self.cachefile = configdir + '/.config.cache.json' + self.fzf_cachefile = configdir + '/.fzf_nodes_cache.txt' + self.folders_cachefile = configdir + '/.folders_cache.txt' + self.profiles_cachefile = configdir + '/.profiles_cache.txt' if os.path.exists(self.file): config = self._loadconfig(self.file) else: config = self._createconfig(self.file) + self.config = config["config"] self.connections = config["connections"] self.profiles = config["profiles"] + if not os.path.exists(self.key): self._createkey(self.key) with open(self.key) as f: self.privatekey = RSA.import_key(f.read()) - f.close() self.publickey = self.privatekey.publickey() # Self-heal text caches if they are missing diff --git a/connpy/connapp.py b/connpy/connapp.py index c097ac5..7e2a70f 100755 --- a/connpy/connapp.py +++ b/connpy/connapp.py @@ -18,14 +18,15 @@ class NoAliasDumper(yaml.SafeDumper): def ignore_aliases(self, data): return True from rich.markdown import Markdown -from rich.console import Console, Group +from rich.markdown import Markdown from rich.panel import Panel from rich.text import Text from rich.rule import Rule from rich.style import Style from rich.prompt import Prompt -mdprint = Console().print -console = Console() +mdprint = printer.console.print +console = printer.console + try: from pyfzf.pyfzf import FzfPrompt except ImportError: @@ -135,6 +136,10 @@ class connapp: aiparser.add_argument("--architect-model", nargs=1, help="Override architect model") aiparser.add_argument("--architect-api-key", nargs=1, help="Override architect api key") aiparser.add_argument("--debug", action="store_true", help="Show AI reasoning and tool calls") + aiparser.add_argument("--list", "--list-sessions", dest="list_sessions", action="store_true", help="List saved AI sessions") + aiparser.add_argument("--session", nargs=1, help="Resume a specific AI session by ID") + aiparser.add_argument("--resume", action="store_true", help="Resume the most recent AI session") + aiparser.add_argument("--delete", "--delete-session", dest="delete_session", nargs=1, help="Delete an AI session by ID") aiparser.set_defaults(func=self._func_ai) #RUNPARSER runparser = subparsers.add_parser("run", description="Run scripts or commands on nodes", formatter_class=argparse.RawTextHelpFormatter) @@ -188,8 +193,10 @@ class connapp: for preload in self.plugins.preloads.values(): preload.Preload(self) - if not os.path.exists(self.config.fzf_cachefile): - self.config._generate_nodes_cache() + # Update internal state and force cache generation after all preloads + self.nodes_list = self.config._getallnodes() + self.folders = self.config._getallfolders() + self.config._generate_nodes_cache() #Generate helps nodeparser.usage = self._help("usage", subparsers) @@ -656,7 +663,7 @@ class connapp: if not os.path.isdir(args.data[0]): raise argparse.ArgumentTypeError(f"readable_dir:{args.data[0]} is not a valid path") else: - pathfile = self.config.defaultdir + "/.folder" + pathfile = self.config.anchor_path + "/.folder" folder = os.path.abspath(args.data[0]).rstrip('/') with open(pathfile, "w") as f: f.write(str(folder)) @@ -803,13 +810,15 @@ class connapp: plugins = {} # Iterate over all files in the specified folder - for file in os.listdir(self.config.defaultdir + "/plugins"): - # Check if the file is a Python file - if file.endswith('.py'): - enabled_files.append(os.path.splitext(file)[0]) - # Check if the file is a Python backup file - elif file.endswith('.py.bkp'): - disabled_files.append(os.path.splitext(os.path.splitext(file)[0])[0]) + plugins_dir = self.config.defaultdir + "/plugins" + if os.path.exists(plugins_dir): + for file in os.listdir(plugins_dir): + # Check if the file is a Python file + if file.endswith('.py'): + enabled_files.append(os.path.splitext(file)[0]) + # Check if the file is a Python backup file + elif file.endswith('.py.bkp'): + disabled_files.append(os.path.splitext(os.path.splitext(file)[0])[0]) if enabled_files: plugins["Enabled"] = enabled_files if disabled_files: @@ -899,17 +908,35 @@ class connapp: self.myai = self.ai(self.config, **arguments) + # 1. Gestionar comandos de sesión (Listar/Borrar) + if args.list_sessions: + self.myai.list_sessions() + return + + if args.delete_session: + self.myai.delete_session(args.delete_session[0]) + return + + # 2. Determinar session_id para retomar + session_id = None + if args.resume: + session_id = self.myai.get_last_session_id() + if not session_id: + printer.warning("No previous session found to resume.") + elif args.session: + session_id = args.session[0] + if args.ask: # Single question mode query = " ".join(args.ask) with console.status("[bold green]Agent is thinking and analyzing...") as status: - result = self.myai.ask(query, status=status, debug=args.debug) + result = self.myai.ask(query, status=status, debug=args.debug, session_id=session_id) # Determine title and color based on responder responder = result.get("responder", "engineer") if responder == "architect": - title = "[bold purple]Network Architect[/bold purple]" - border_style = "purple" + title = "[bold medium_purple]Network Architect[/bold medium_purple]" + border_style = "medium_purple" else: title = "[bold blue]Network Engineer[/bold blue]" border_style = "blue" @@ -927,9 +954,20 @@ class connapp: else: # Interactive chat mode history = None - mdprint(Rule(style="bold blue")) - mdprint(Markdown("**Networking Expert Agent**: Hi! I'm your assistant. I can help you diagnose issues, run commands, and manage your nodes.\nType 'exit' to quit.\n")) - mdprint(Rule(style="bold blue")) + if session_id: + session_data = self.myai.load_session_data(session_id) + if session_data: + history = session_data.get("history", []) + mdprint(Rule(title=f"[bold cyan] Resuming Session: {session_data.get('title')} [/bold cyan]", style="cyan")) + else: + printer.error(f"Could not load session {session_id}. Starting clean.") + + if not history: + mdprint(Rule(style="bold blue")) + mdprint(Markdown("**Networking Expert Agent**: Hi! I'm your assistant. I can help you diagnose issues, run commands, and manage your nodes.\nType 'exit' to quit.\n")) + mdprint(Rule(style="bold blue")) + else: + mdprint(f"[dim]Analyzing {len(history)} previous messages...[/dim]\n") while True: try: @@ -984,18 +1022,18 @@ class connapp: return True def _func_api(self, args): - if args.command == "stop" or args.command == "restart": + if args.command == "stop" or args.command == "restart" or args.command == "stop": args.data = self.stop_api() if args.command == "start" or args.command == "restart": if args.data: - self.start_api(args.data) + self.start_api(args.data, config=self.config) else: - self.start_api() + self.start_api(config=self.config) if args.command == "debug": if args.data: - self.debug_api(args.data) + self.debug_api(args.data, config=self.config) else: - self.debug_api() + self.debug_api(config=self.config) return def _node_run(self, args): @@ -1577,8 +1615,9 @@ compdef _conn connpy connpy() { if [ $# -eq 0 ]; then local selected - if [ -f ~/.config/conn/.fzf_nodes_cache.txt ]; then - selected=$(cat ~/.config/conn/.fzf_nodes_cache.txt | fzf-tmux -d 25% --reverse) + local configdir=$(cat ~/.config/conn/.folder 2>/dev/null || echo ~/.config/conn) + if [ -s "$configdir/.fzf_nodes_cache.txt" ]; then + selected=$(cat "$configdir/.fzf_nodes_cache.txt" | fzf-tmux -i -d 25%) else command connpy return @@ -1598,8 +1637,9 @@ alias c="connpy" connpy() { if [ $# -eq 0 ]; then local selected - if [ -f ~/.config/conn/.fzf_nodes_cache.txt ]; then - selected=$(cat ~/.config/conn/.fzf_nodes_cache.txt | fzf-tmux -d 25% --reverse) + local configdir=$(cat ~/.config/conn/.folder 2>/dev/null || echo ~/.config/conn) + if [ -s "$configdir/.fzf_nodes_cache.txt" ]; then + selected=$(cat "$configdir/.fzf_nodes_cache.txt" | fzf-tmux -i -d 25%) else command connpy return diff --git a/connpy/core_plugins/context.py b/connpy/core_plugins/context.py index fc22647..09f4bc9 100644 --- a/connpy/core_plugins/context.py +++ b/connpy/core_plugins/context.py @@ -117,15 +117,18 @@ class context_manager: class Preload: def __init__(self, connapp): - #define contexts if doesn't exist - connapp.config.modify(context_manager.add_default_context) - #filter nodes using context cm = context_manager(connapp) - connapp.nodes_list = [node for node in connapp.nodes_list if cm.match_any_regex(node, cm.regex)] - connapp.folders = [node for node in connapp.folders if cm.match_any_regex(node, cm.regex)] + # Register hooks first so that any save triggers a filtered cache generation connapp.config._getallnodes.register_post_hook(cm.modify_node_list) connapp.config._getallfolders.register_post_hook(cm.modify_node_list) connapp.config._getallnodesfull.register_post_hook(cm.modify_node_dict) + + # Define contexts if doesn't exist (triggers save/cache generation) + connapp.config.modify(context_manager.add_default_context) + + # Filter in-memory nodes using current context + connapp.nodes_list = [node for node in connapp.nodes_list if cm.match_any_regex(node, cm.regex)] + connapp.folders = [node for node in connapp.folders if cm.match_any_regex(node, cm.regex)] class Parser: def __init__(self): diff --git a/connpy/plugins.py b/connpy/plugins.py index 81a95a2..8f6fdfb 100755 --- a/connpy/plugins.py +++ b/connpy/plugins.py @@ -115,6 +115,8 @@ class Plugins: return module def _import_plugins_to_argparse(self, directory, subparsers): + if not os.path.exists(directory): + return for filename in os.listdir(directory): commands = subparsers.choices.keys() if filename.endswith(".py"): diff --git a/connpy/printer.py b/connpy/printer.py index 68a6a8c..e95e811 100644 --- a/connpy/printer.py +++ b/connpy/printer.py @@ -1,33 +1,51 @@ import sys +from rich.console import Console +from rich.table import Table +from rich.live import Live + +console = Console() +err_console = Console(stderr=True) + def _format_multiline(tag, message): + message = str(message) lines = message.splitlines() if not lines: - return f"[{tag}]" - formatted = [f"[{tag}] {lines[0]}"] + return f"\\[{tag}]" + formatted = [f"\\[{tag}] {lines[0]}"] indent = " " * (len(tag) + 3) for line in lines[1:]: formatted.append(f"{indent}{line}") return "\n".join(formatted) def info(message): - print(_format_multiline("i", message)) + console.print(_format_multiline("i", message)) def success(message): - print(_format_multiline("✓", message)) + console.print(_format_multiline("✓", message)) def start(message): - print(_format_multiline("+", message)) + console.print(_format_multiline("+", message)) def warning(message): - print(_format_multiline("!", message)) + console.print(_format_multiline("!", message)) def error(message): - print(_format_multiline("✗", message), file=sys.stderr) + # For error, we can create a temporary stderr console or just use the current one + # err_console handles styles better than standard print and outputs to stderr. + err_console.print(_format_multiline("✗", message), style="red") def debug(message): - print(_format_multiline("d", message)) + console.print(_format_multiline("d", message)) def custom(tag, message): - print(_format_multiline(tag, message)) + console.print(_format_multiline(tag, message)) + +def table(title, columns, rows, header_style="bold cyan", box=None): + t = Table(title=title, header_style=header_style, box=box) + for col in columns: + t.add_column(col) + for row in rows: + t.add_row(*[str(item) for item in row]) + console.print(t) diff --git a/connpy/tests/test_ai.py b/connpy/tests/test_ai.py index 17e39e0..c387c71 100644 --- a/connpy/tests/test_ai.py +++ b/connpy/tests/test_ai.py @@ -42,7 +42,7 @@ class TestAIInit: def test_init_loads_memory(self, ai_config, tmp_path, mock_litellm): """Loads long-term memory from file if it exists.""" - memory_path = os.path.expanduser("~/.config/conn/ai_memory.md") + memory_path = os.path.join(ai_config.defaultdir, "ai_memory.md") from connpy.ai import ai with patch("os.path.exists", side_effect=lambda p: True if p == memory_path else os.path.exists(p)): @@ -210,6 +210,17 @@ class TestSanitizeMessages: result = myai._sanitize_messages(messages) assert len(result) == 4 + def test_sanitize_strips_cache_control(self, myai): + """_sanitize_messages should convert list-based content (with cache_control) back to strings.""" + messages = [ + {"role": "system", "content": [{"type": "text", "text": "system prompt", "cache_control": {"type": "ephemeral"}}]}, + {"role": "user", "content": "hello"} + ] + result = myai._sanitize_messages(messages) + assert result[0]["role"] == "system" + assert isinstance(result[0]["content"], str) + assert result[0]["content"] == "system prompt" + # ========================================================================= # _truncate tests @@ -395,3 +406,76 @@ class TestToolDefinitions: tools = myai._get_architect_tools() names = [t["function"]["name"] for t in tools] assert "arch_tool" in names + + +# ========================================================================= +# AI Session Management tests +# ========================================================================= + +class TestAISessions: + @pytest.fixture + def myai(self, ai_config, mock_litellm, tmp_path): + from connpy.ai import ai + ai_config.defaultdir = str(tmp_path) + return ai(ai_config) + + def test_sessions_dir_initialization(self, myai, tmp_path): + assert os.path.exists(os.path.join(tmp_path, "ai_sessions")) + assert myai.sessions_dir == str(tmp_path / "ai_sessions") + + def test_generate_session_id(self, myai): + session_id = myai._generate_session_id("Any query") + # Format: YYYYMMDD-HHMMSS + assert len(session_id) == 15 + assert "-" in session_id + parts = session_id.split("-") + assert len(parts[0]) == 8 # YYYYMMDD + assert len(parts[1]) == 6 # HHMMSS + + def test_save_and_load_session(self, myai): + history = [ + {"role": "user", "content": "Hello"}, + {"role": "assistant", "content": "Hi"} + ] + myai.save_session(history, title="Test Session") + session_id = myai.session_id + + # Load it back + loaded = myai.load_session_data(session_id) + assert loaded["title"] == "Test Session" + assert loaded["history"] == history + assert loaded["model"] == myai.engineer_model + + def test_list_sessions(self, myai, capsys): + history = [{"role": "user", "content": "Query 1"}] + myai.save_session(history, title="Session 1") + + # Use a second instance to list + myai.list_sessions() + captured = capsys.readouterr() + assert "Session 1" in captured.out + assert "AI Persisted Sessions" in captured.out + + def test_get_last_session_id(self, myai): + # Save two sessions + myai.session_id = None # Force new + myai.save_session([{"role": "user", "content": "First"}]) + first_id = myai.session_id + import time + time.sleep(1.1) # Ensure different timestamp + + myai.session_id = None # Force new + myai.save_session([{"role": "user", "content": "Second"}]) + second_id = myai.session_id + + last_id = myai.get_last_session_id() + assert last_id == second_id + assert last_id != first_id + + def test_delete_session(self, myai): + myai.save_session([{"role": "user", "content": "To be deleted"}]) + session_id = myai.session_id + assert os.path.exists(myai.session_path) + + myai.delete_session(session_id) + assert not os.path.exists(myai.session_path) diff --git a/docs/connpy/index.html b/docs/connpy/index.html index fced62f..32aa48e 100644 --- a/docs/connpy/index.html +++ b/docs/connpy/index.html @@ -3,7 +3,7 @@ - + connpy API documentation @@ -683,6 +683,8 @@ class Preload: return module def _import_plugins_to_argparse(self, directory, subparsers): + if not os.path.exists(directory): + return for filename in os.listdir(directory): commands = subparsers.choices.keys() if filename.endswith(".py"): @@ -890,7 +892,7 @@ class ai: self.architect_prompt_extensions = [] # Extra text for architect prompt # Long-term memory - self.memory_path = os.path.expanduser("~/.config/conn/ai_memory.md") + self.memory_path = os.path.join(self.config.defaultdir, "ai_memory.md") self.long_term_memory = "" if os.path.exists(self.memory_path): try: @@ -903,6 +905,12 @@ class ai: except Exception as e: console.print(f"[yellow]Warning: Failed to load AI memory: {e}[/yellow]") + # Session Management + self.sessions_dir = os.path.join(self.config.defaultdir, "ai_sessions") + os.makedirs(self.sessions_dir, exist_ok=True) + self.session_id = None + self.session_path = None + # Prompts base agnósticos self._engineer_base_prompt = dedent(f""" Role: TECHNICAL EXECUTION ENGINE. @@ -1018,7 +1026,7 @@ class ai: # Determine styling based on current brain role_label = "Network Architect" if "architect" in label.lower() else "Network Engineer" - border = "purple" if "architect" in label.lower() else "blue" + border = "medium_purple" if "architect" in label.lower() else "blue" title = f"[bold {border}]{role_label}[/bold {border}]" try: @@ -1118,14 +1126,34 @@ class ai: 2. No user/system messages appear between tool_calls and tool responses 3. Orphaned tool_calls at the end are removed 4. Orphaned tool responses without a preceding tool_call are removed + 5. Incompatible metadata like cache_control is stripped for non-Anthropic models """ if not messages: return messages + # Pre-process messages to pull text from list contents (Anthropic cache format) + # and remove explicit cache keys. + pre_sanitized = [] + for msg in messages: + m = msg.copy() if isinstance(msg, dict) else msg.model_dump(exclude_none=True) + + # Convert content list to plain string if it's a system message with caching metadata + if m.get('role') == 'system' and isinstance(m.get('content'), list): + # Extraer texto de [{"type": "text", "text": "...", "cache_control": ...}] + m['content'] = m['content'][0]['text'] if m['content'] else "" + + # Remove any explicit cache_control key anywhere + if 'cache_control' in m: del m['cache_control'] + if isinstance(m.get('content'), list): + for item in m['content']: + if isinstance(item, dict) and 'cache_control' in item: del item['cache_control'] + + pre_sanitized.append(m) + sanitized = [] i = 0 - while i < len(messages): - msg = messages[i] + while i < len(pre_sanitized): + msg = pre_sanitized[i] role = msg.get('role', '') if role == 'assistant' and msg.get('tool_calls'): @@ -1139,8 +1167,8 @@ class ai: # Look ahead for matching tool responses tool_responses = [] j = i + 1 - while j < len(messages): - next_msg = messages[j] + while j < len(pre_sanitized): + next_msg = pre_sanitized[j] if next_msg.get('role') == 'tool': tool_responses.append(next_msg) j += 1 @@ -1298,23 +1326,16 @@ class ai: def _engineer_loop(self, task, status=None, debug=False, chat_history=None): """Internal loop where the Engineer executes technical tasks for the Architect.""" - # Optimización de caché para el Ingeniero - if "claude" in self.engineer_model.lower(): + # Optimización de caché para el Ingeniero (Solo para Anthropic directo, Vertex tiene reglas distintas) + if "claude" in self.engineer_model.lower() and "vertex" not in self.engineer_model.lower(): messages = [{"role": "system", "content": [{"type": "text", "text": self.engineer_system_prompt, "cache_control": {"type": "ephemeral"}}]}] else: messages = [{"role": "system", "content": self.engineer_system_prompt}] if chat_history: - # Clean chat history from caching metadata if engineer is not Claude - if "claude" not in self.engineer_model.lower(): - cleaned_history = [] - for msg in chat_history[-5:]: - m = msg if isinstance(msg, dict) else msg.model_dump(exclude_none=True) - # Remove cache_control from system messages - if m.get('role') == 'system' and isinstance(m.get('content'), list): - m['content'] = m['content'][0]['text'] if m['content'] else "" - cleaned_history.append(m) - messages.extend(cleaned_history) + # Clean chat history from caching metadata if engineer is not a compatible Claude model + if "claude" not in self.engineer_model.lower() or "vertex" in self.engineer_model.lower(): + messages.extend(self._sanitize_messages(chat_history[-5:])) else: messages.extend(chat_history[-5:]) @@ -1410,9 +1431,125 @@ class ai: tools.extend(self.external_architect_tools) return tools + def _get_sessions(self): + """Returns a list of session metadata sorted by date.""" + sessions = [] + if not os.path.exists(self.sessions_dir): + return [] + for f in os.listdir(self.sessions_dir): + if f.endswith(".json"): + path = os.path.join(self.sessions_dir, f) + try: + with open(path, "r") as fs: + data = json.load(fs) + sessions.append({ + "id": f[:-5], + "title": data.get("title", "Untitled Session"), + "created_at": data.get("created_at", "Unknown"), + "model": data.get("model", "Unknown"), + "path": path + }) + except Exception: + continue + return sorted(sessions, key=lambda x: x["created_at"], reverse=True) + + def list_sessions(self): + """Prints a list of sessions using printer.table.""" + sessions = self._get_sessions() + if not sessions: + printer.info("No saved AI sessions found.") + return + + columns = ["ID", "Title", "Created At", "Model"] + rows = [[s["id"], s["title"], s["created_at"], s["model"]] for s in sessions] + printer.table("AI Persisted Sessions", columns, rows) + + def load_session_data(self, session_id): + """Loads a session's raw data by ID.""" + path = os.path.join(self.sessions_dir, f"{session_id}.json") + if os.path.exists(path): + try: + with open(path, "r") as f: + data = json.load(f) + self.session_id = session_id + self.session_path = path + return data + except Exception as e: + printer.error(f"Failed to load session {session_id}: {e}") + return None + + def delete_session(self, session_id): + """Deletes a session by ID.""" + path = os.path.join(self.sessions_dir, f"{session_id}.json") + if os.path.exists(path): + os.remove(path) + printer.success(f"Session {session_id} deleted.") + else: + printer.error(f"Session {session_id} not found.") + + def get_last_session_id(self): + """Returns the ID of the most recent session.""" + sessions = self._get_sessions() + return sessions[0]["id"] if sessions else None + + def _generate_session_id(self, query): + """Generates a unique session ID based on timestamp.""" + return datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + + def save_session(self, history, title=None, model=None): + """Saves current history to the session file.""" + if not self.session_id: + # Generate ID from first user query if available + first_user_msg = next((m["content"] for m in history if m["role"] == "user"), "new-session") + self.session_id = self._generate_session_id(first_user_msg) + self.session_path = os.path.join(self.sessions_dir, f"{self.session_id}.json") + + # If it's a new file, we might want to set a better title + if not os.path.exists(self.session_path) and not title: + raw_title = next((m["content"] for m in history if m["role"] == "user"), "New Session") + # Clean title: remove newlines, multiple spaces + clean_title = " ".join(raw_title.split()) + if len(clean_title) > 40: + title = clean_title[:37].strip() + "..." + else: + title = clean_title + + try: + # Read existing metadata if it exists + metadata = {} + if os.path.exists(self.session_path): + with open(self.session_path, "r") as f: + metadata = json.load(f) + + metadata.update({ + "id": self.session_id, + "title": title or metadata.get("title", "New Session"), + "created_at": metadata.get("created_at", datetime.datetime.now().isoformat()), + "updated_at": datetime.datetime.now().isoformat(), + "model": model or metadata.get("model", self.engineer_model), + "history": history + }) + + with open(self.session_path, "w") as f: + json.dump(metadata, f, indent=4) + except Exception as e: + printer.error(f"Failed to save session: {e}") + + except Exception as e: + printer.error(f"Failed to save session: {e}") + @MethodHook - def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=False, stream=True): + def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=False, stream=True, session_id=None): if chat_history is None: chat_history = [] + + # Load session if provided and history is empty + if session_id and not chat_history: + session_data = self.load_session_data(session_id) + if session_data: + chat_history = session_data.get("history", []) + # If we loaded history, the caller might need it back + # But typically ask() is called in a loop with an external history object + usage = {"input": 0, "output": 0, "total": 0} # 1. Selector de Rol inicial (Sticky Brain) @@ -1446,15 +1583,20 @@ class ai: model = self.architect_model if current_brain == "architect" else self.engineer_model key = self.architect_key if current_brain == "architect" else self.engineer_key - # Estructura optimizada para Prompt Caching - if "claude" in model.lower(): + # Estructura optimizada para Prompt Caching (Solo para Anthropic directo, Vertex tiene reglas distintas) + if "claude" in model.lower() and "vertex" not in model.lower(): messages = [{"role": "system", "content": [{"type": "text", "text": system_prompt, "cache_control": {"type": "ephemeral"}}]}] else: messages = [{"role": "system", "content": system_prompt}] # Interleaving de historial last_role = "system" - for msg in chat_history[-self.max_history:]: + # Sanitize history if the current target model is not compatible with cache_control + history_to_process = chat_history[-self.max_history:] + if "claude" not in model.lower() or "vertex" in model.lower(): + history_to_process = self._sanitize_messages(history_to_process) + + for msg in history_to_process: m = msg if isinstance(msg, dict) else msg.model_dump(exclude_none=True) role = m.get('role') if role == last_role and role == 'user': @@ -1482,7 +1624,7 @@ class ai: console.print(f"[yellow] You can press Ctrl+C to interrupt and get a summary of progress.[/yellow]") soft_limit_warned = True - label = "[bold purple]Architect" if current_brain == "architect" else "[bold blue]Engineer" + label = "[bold medium_purple]Architect" if current_brain == "architect" else "[bold blue]Engineer" if status: status.update(f"{label} is thinking... (step {iteration})") streamed_response = False @@ -1527,7 +1669,7 @@ class ai: messages.append(msg_dict) if debug and resp_msg.content: - console.print(Panel(Markdown(resp_msg.content), title=f"{label} Reasoning", border_style="purple" if current_brain == "architect" else "blue")) + console.print(Panel(Markdown(resp_msg.content), title=f"{label} Reasoning", border_style="medium_purple" if current_brain == "architect" else "blue")) if not resp_msg.tool_calls: break @@ -1544,8 +1686,8 @@ class ai: continue if status: - if fn == "delegate_to_engineer": status.update(f"[bold purple]Architect: [DELEGATING MISSION] {args.get('task','')[:40]}...") - elif fn == "manage_memory_tool": status.update(f"[bold purple]Architect: [UPDATING MEMORY]") + if fn == "delegate_to_engineer": status.update(f"[bold medium_purple]Architect: [DELEGATING MISSION] {args.get('task','')[:40]}...") + elif fn == "manage_memory_tool": status.update(f"[bold medium_purple]Architect: [UPDATING MEMORY]") if debug: console.print(Panel(Text(json.dumps(args, indent=2)), title=f"{label} Decision: {fn}", border_style="white")) @@ -1553,7 +1695,7 @@ class ai: obs, eng_usage = self._engineer_loop(args["task"], status=status, debug=debug, chat_history=messages[:-1]) usage["input"] += eng_usage["input"]; usage["output"] += eng_usage["output"]; usage["total"] += eng_usage["total"] elif fn == "consult_architect": - if status: status.update("[bold purple]Engineer consulting Architect...") + if status: status.update("[bold medium_purple]Engineer consulting Architect...") try: # Consultation only - Engineer stays in control claude_resp = completion( @@ -1566,13 +1708,13 @@ class ai: num_retries=3 ) obs = claude_resp.choices[0].message.content - if debug: console.print(Panel(Markdown(obs), title="[bold purple]Architect Consultation[/bold purple]", border_style="purple")) + if debug: console.print(Panel(Markdown(obs), title="[bold medium_purple]Architect Consultation[/bold medium_purple]", border_style="medium_purple")) except Exception as e: if status: status.update("[bold orange3]Architect unavailable! Engineer continuing alone...") obs = f"Architect unavailable ({str(e)}). Proceeding with your best technical judgment." elif fn == "escalate_to_architect": - if status: status.update("[bold purple]Transferring control to Architect...") + if status: status.update("[bold medium_purple]Transferring control to Architect...") # Full escalation - Architect takes over current_brain = "architect" model = self.architect_model @@ -1583,7 +1725,7 @@ class ai: handover_msg = f"HANDOVER FROM EXECUTION ENGINE\n\nReason: {args['reason']}\n\nContext: {args['context']}\n\nYou are now in control of this conversation." pending_user_message = handover_msg obs = "Control transferred to Architect. Handover context will be provided." - if debug: console.print(Panel(Text(handover_msg), title="[bold purple]Escalation to Architect[/bold purple]", border_style="purple")) + if debug: console.print(Panel(Text(handover_msg), title="[bold medium_purple]Escalation to Architect[/bold medium_purple]", border_style="medium_purple")) elif fn == "return_to_engineer": if status: status.update("[bold blue]Transferring control back to Engineer...") @@ -1641,19 +1783,8 @@ class ai: messages.append(resp_msg.model_dump(exclude_none=True)) except Exception: pass finally: - try: - log_dir = self.config.defaultdir - os.makedirs(log_dir, exist_ok=True) - log_path = os.path.join(log_dir, "ai_debug.json") - hist = [] - if os.path.exists(log_path): - try: - with open(log_path, "r") as f: hist = json.load(f) - except (IOError, json.JSONDecodeError): hist = [] - hist.append({"timestamp": datetime.datetime.now().isoformat(), "roles": {"strategic_engine": self.architect_model, "execution_engine": self.engineer_model}, "session": messages}) - with open(log_path, "w") as f: json.dump(hist[-10:], f, indent=4) - except Exception as e: - if debug: console.print(f"[dim red]Debug log failed: {e}[/dim red]") + # Auto-save session + self.save_session(messages, model=model) return { "response": messages[-1].get("content"), @@ -1672,7 +1803,7 @@ class ai:
var SAFE_COMMANDS
-

The type of the None singleton.

+

Instance variables

@@ -1713,7 +1844,7 @@ def engineer_system_prompt(self):

Methods

-def ask(self,
user_input,
dryrun=False,
chat_history=None,
status=None,
debug=False,
stream=True)
+def ask(self,
user_input,
dryrun=False,
chat_history=None,
status=None,
debug=False,
stream=True,
session_id=None)
@@ -1721,8 +1852,17 @@ def engineer_system_prompt(self): Expand source code
@MethodHook
-def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=False, stream=True):
+def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=False, stream=True, session_id=None):
     if chat_history is None: chat_history = []
+    
+    # Load session if provided and history is empty
+    if session_id and not chat_history:
+        session_data = self.load_session_data(session_id)
+        if session_data:
+            chat_history = session_data.get("history", [])
+            # If we loaded history, the caller might need it back
+            # But typically ask() is called in a loop with an external history object
+
     usage = {"input": 0, "output": 0, "total": 0}
     
     # 1. Selector de Rol inicial (Sticky Brain)
@@ -1756,15 +1896,20 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
     model = self.architect_model if current_brain == "architect" else self.engineer_model
     key = self.architect_key if current_brain == "architect" else self.engineer_key
 
-    # Estructura optimizada para Prompt Caching
-    if "claude" in model.lower():
+    # Estructura optimizada para Prompt Caching (Solo para Anthropic directo, Vertex tiene reglas distintas)
+    if "claude" in model.lower() and "vertex" not in model.lower():
         messages = [{"role": "system", "content": [{"type": "text", "text": system_prompt, "cache_control": {"type": "ephemeral"}}]}]
     else:
         messages = [{"role": "system", "content": system_prompt}]
     
     # Interleaving de historial
     last_role = "system"
-    for msg in chat_history[-self.max_history:]:
+    # Sanitize history if the current target model is not compatible with cache_control
+    history_to_process = chat_history[-self.max_history:]
+    if "claude" not in model.lower() or "vertex" in model.lower():
+        history_to_process = self._sanitize_messages(history_to_process)
+
+    for msg in history_to_process:
         m = msg if isinstance(msg, dict) else msg.model_dump(exclude_none=True)
         role = m.get('role')
         if role == last_role and role == 'user':
@@ -1792,7 +1937,7 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
                 console.print(f"[yellow]  You can press Ctrl+C to interrupt and get a summary of progress.[/yellow]")
                 soft_limit_warned = True
             
-            label = "[bold purple]Architect" if current_brain == "architect" else "[bold blue]Engineer"
+            label = "[bold medium_purple]Architect" if current_brain == "architect" else "[bold blue]Engineer"
             if status: status.update(f"{label} is thinking... (step {iteration})")
             
             streamed_response = False
@@ -1837,7 +1982,7 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
             messages.append(msg_dict)
 
             if debug and resp_msg.content:
-                console.print(Panel(Markdown(resp_msg.content), title=f"{label} Reasoning", border_style="purple" if current_brain == "architect" else "blue"))
+                console.print(Panel(Markdown(resp_msg.content), title=f"{label} Reasoning", border_style="medium_purple" if current_brain == "architect" else "blue"))
 
             if not resp_msg.tool_calls: break
             
@@ -1854,8 +1999,8 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
                     continue
                 
                 if status:
-                    if fn == "delegate_to_engineer": status.update(f"[bold purple]Architect: [DELEGATING MISSION] {args.get('task','')[:40]}...")
-                    elif fn == "manage_memory_tool": status.update(f"[bold purple]Architect: [UPDATING MEMORY]")
+                    if fn == "delegate_to_engineer": status.update(f"[bold medium_purple]Architect: [DELEGATING MISSION] {args.get('task','')[:40]}...")
+                    elif fn == "manage_memory_tool": status.update(f"[bold medium_purple]Architect: [UPDATING MEMORY]")
 
                 if debug: console.print(Panel(Text(json.dumps(args, indent=2)), title=f"{label} Decision: {fn}", border_style="white"))
 
@@ -1863,7 +2008,7 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
                     obs, eng_usage = self._engineer_loop(args["task"], status=status, debug=debug, chat_history=messages[:-1])
                     usage["input"] += eng_usage["input"]; usage["output"] += eng_usage["output"]; usage["total"] += eng_usage["total"]
                 elif fn == "consult_architect":
-                    if status: status.update("[bold purple]Engineer consulting Architect...")
+                    if status: status.update("[bold medium_purple]Engineer consulting Architect...")
                     try:
                         # Consultation only - Engineer stays in control
                         claude_resp = completion(
@@ -1876,13 +2021,13 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
                             num_retries=3
                         )
                         obs = claude_resp.choices[0].message.content
-                        if debug: console.print(Panel(Markdown(obs), title="[bold purple]Architect Consultation[/bold purple]", border_style="purple"))
+                        if debug: console.print(Panel(Markdown(obs), title="[bold medium_purple]Architect Consultation[/bold medium_purple]", border_style="medium_purple"))
                     except Exception as e:
                         if status: status.update("[bold orange3]Architect unavailable! Engineer continuing alone...")
                         obs = f"Architect unavailable ({str(e)}). Proceeding with your best technical judgment."
                 
                 elif fn == "escalate_to_architect":
-                    if status: status.update("[bold purple]Transferring control to Architect...")
+                    if status: status.update("[bold medium_purple]Transferring control to Architect...")
                     # Full escalation - Architect takes over
                     current_brain = "architect"
                     model = self.architect_model
@@ -1893,7 +2038,7 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
                     handover_msg = f"HANDOVER FROM EXECUTION ENGINE\n\nReason: {args['reason']}\n\nContext: {args['context']}\n\nYou are now in control of this conversation."
                     pending_user_message = handover_msg
                     obs = "Control transferred to Architect. Handover context will be provided."
-                    if debug: console.print(Panel(Text(handover_msg), title="[bold purple]Escalation to Architect[/bold purple]", border_style="purple"))
+                    if debug: console.print(Panel(Text(handover_msg), title="[bold medium_purple]Escalation to Architect[/bold medium_purple]", border_style="medium_purple"))
                 
                 elif fn == "return_to_engineer":
                     if status: status.update("[bold blue]Transferring control back to Engineer...")
@@ -1951,19 +2096,8 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
             messages.append(resp_msg.model_dump(exclude_none=True))
         except Exception: pass
     finally:
-        try:
-            log_dir = self.config.defaultdir
-            os.makedirs(log_dir, exist_ok=True)
-            log_path = os.path.join(log_dir, "ai_debug.json")
-            hist = []
-            if os.path.exists(log_path):
-                try:
-                    with open(log_path, "r") as f: hist = json.load(f)
-                except (IOError, json.JSONDecodeError): hist = []
-            hist.append({"timestamp": datetime.datetime.now().isoformat(), "roles": {"strategic_engine": self.architect_model, "execution_engine": self.engineer_model}, "session": messages})
-            with open(log_path, "w") as f: json.dump(hist[-10:], f, indent=4)
-        except Exception as e:
-            if debug: console.print(f"[dim red]Debug log failed: {e}[/dim red]")
+        # Auto-save session
+        self.save_session(messages, model=model)
 
     return {
         "response": messages[-1].get("content"), 
@@ -1989,6 +2123,40 @@ def confirm(self, user_input): return True
+
+def delete_session(self, session_id) +
+
+
+ +Expand source code + +
def delete_session(self, session_id):
+    """Deletes a session by ID."""
+    path = os.path.join(self.sessions_dir, f"{session_id}.json")
+    if os.path.exists(path):
+        os.remove(path)
+        printer.success(f"Session {session_id} deleted.")
+    else:
+        printer.error(f"Session {session_id} not found.")
+
+

Deletes a session by ID.

+
+
+def get_last_session_id(self) +
+
+
+ +Expand source code + +
def get_last_session_id(self):
+    """Returns the ID of the most recent session."""
+    sessions = self._get_sessions()
+    return sessions[0]["id"] if sessions else None
+
+

Returns the ID of the most recent session.

+
def get_node_info_tool(self, node_name)
@@ -2037,6 +2205,51 @@ def confirm(self, user_input): return True

List nodes matching the filter pattern. Returns metadata for <=5 nodes, names only for more.

+
+def list_sessions(self) +
+
+
+ +Expand source code + +
def list_sessions(self):
+    """Prints a list of sessions using printer.table."""
+    sessions = self._get_sessions()
+    if not sessions:
+        printer.info("No saved AI sessions found.")
+        return
+    
+    columns = ["ID", "Title", "Created At", "Model"]
+    rows = [[s["id"], s["title"], s["created_at"], s["model"]] for s in sessions]
+    printer.table("AI Persisted Sessions", columns, rows)
+
+

Prints a list of sessions using printer.table.

+
+
+def load_session_data(self, session_id) +
+
+
+ +Expand source code + +
def load_session_data(self, session_id):
+    """Loads a session's raw data by ID."""
+    path = os.path.join(self.sessions_dir, f"{session_id}.json")
+    if os.path.exists(path):
+        try:
+            with open(path, "r") as f:
+                data = json.load(f)
+                self.session_id = session_id
+                self.session_path = path
+                return data
+        except Exception as e:
+            printer.error(f"Failed to load session {session_id}: {e}")
+    return None
+
+

Loads a session's raw data by ID.

+
def manage_memory_tool(self, content, action='append')
@@ -2197,6 +2410,58 @@ def confirm(self, user_input): return True

Execute commands on nodes matching the filter. Native interactive confirmation for unsafe commands.

+
+def save_session(self, history, title=None, model=None) +
+
+
+ +Expand source code + +
def save_session(self, history, title=None, model=None):
+    """Saves current history to the session file."""
+    if not self.session_id:
+        # Generate ID from first user query if available
+        first_user_msg = next((m["content"] for m in history if m["role"] == "user"), "new-session")
+        self.session_id = self._generate_session_id(first_user_msg)
+        self.session_path = os.path.join(self.sessions_dir, f"{self.session_id}.json")
+
+    # If it's a new file, we might want to set a better title
+    if not os.path.exists(self.session_path) and not title:
+        raw_title = next((m["content"] for m in history if m["role"] == "user"), "New Session")
+        # Clean title: remove newlines, multiple spaces
+        clean_title = " ".join(raw_title.split())
+        if len(clean_title) > 40:
+            title = clean_title[:37].strip() + "..."
+        else:
+            title = clean_title
+
+    try:
+        # Read existing metadata if it exists
+        metadata = {}
+        if os.path.exists(self.session_path):
+            with open(self.session_path, "r") as f:
+                metadata = json.load(f)
+        
+        metadata.update({
+            "id": self.session_id,
+            "title": title or metadata.get("title", "New Session"),
+            "created_at": metadata.get("created_at", datetime.datetime.now().isoformat()),
+            "updated_at": datetime.datetime.now().isoformat(),
+            "model": model or metadata.get("model", self.engineer_model),
+            "history": history
+        })
+
+        with open(self.session_path, "w") as f:
+            json.dump(metadata, f, indent=4)
+    except Exception as e:
+        printer.error(f"Failed to save session: {e}")
+
+    except Exception as e:
+        printer.error(f"Failed to save session: {e}")
+
+

Saves current history to the session file.

+
@@ -2248,27 +2513,31 @@ class configfile: ''' home = os.path.expanduser("~") defaultdir = home + '/.config/conn' - self.defaultdir = defaultdir - Path(defaultdir).mkdir(parents=True, exist_ok=True) - Path(f"{defaultdir}/plugins").mkdir(parents=True, exist_ok=True) - pathfile = defaultdir + '/.folder' - try: - with open(pathfile, "r") as f: - configdir = f.read().strip() - except (FileNotFoundError, IOError): - with open(pathfile, "w") as f: - f.write(str(defaultdir)) - configdir = defaultdir - defaultfile = configdir + '/config.yaml' - self.cachefile = configdir + '/.config.cache.json' - self.fzf_cachefile = configdir + '/.fzf_nodes_cache.txt' - self.folders_cachefile = configdir + '/.folders_cache.txt' - self.profiles_cachefile = configdir + '/.profiles_cache.txt' - defaultkey = configdir + '/.osk' - if conf == None: - self.file = defaultfile + + if conf is None: + # Standard path: use ~/.config/conn and respect .folder redirection + self.anchor_path = defaultdir + self.defaultdir = defaultdir + Path(defaultdir).mkdir(parents=True, exist_ok=True) - # Backwards compatibility: Migrate from JSON to YAML + pathfile = defaultdir + '/.folder' + try: + with open(pathfile, "r") as f: + configdir = f.read().strip() + except (FileNotFoundError, IOError): + with open(pathfile, "w") as f: + f.write(str(defaultdir)) + configdir = defaultdir + + self.defaultdir = configdir + self.file = configdir + '/config.yaml' + self.key = key or (configdir + '/.osk') + + # Ensure redirected directories exist + Path(configdir).mkdir(parents=True, exist_ok=True) + Path(f"{configdir}/plugins").mkdir(parents=True, exist_ok=True) + + # Backwards compatibility: Migrate from JSON to YAML only for default path legacy_json = configdir + '/config.json' legacy_noext = configdir + '/config' legacy_file = None @@ -2291,38 +2560,44 @@ class configfile: os.remove(self.file) printer.warning("YAML verification failed after migration, keeping legacy config.") else: - with open(self.cachefile, 'w') as f: + # Note: cachefile is derived later, we use temp one for migration sync + temp_cache = configdir + '/.config.cache.json' + with open(temp_cache, 'w') as f: json.dump(old_data, f) shutil.move(legacy_file, legacy_file + ".backup") printer.success(f"Migrated legacy config ({len(old_data.get('connections',{}))} folders/nodes) into YAML and Cache successfully!") except Exception as e: - # Clean up partial YAML if it was created if os.path.exists(self.file): - try: - os.remove(self.file) - except OSError: - pass + try: os.remove(self.file) + except OSError: pass printer.warning(f"Failed to migrate legacy config: {e}") else: - self.file = conf - - if key == None: - self.key = defaultkey - else: - self.key = key + # Custom path (common in tests): isolate everything to the conf parent directory + self.file = os.path.abspath(conf) + configdir = os.path.dirname(self.file) + self.anchor_path = configdir + self.defaultdir = configdir + self.key = os.path.abspath(key) if key else (configdir + '/.osk') + + # Sidecar files always live next to the config file (or in the redirected configdir) + self.cachefile = configdir + '/.config.cache.json' + self.fzf_cachefile = configdir + '/.fzf_nodes_cache.txt' + self.folders_cachefile = configdir + '/.folders_cache.txt' + self.profiles_cachefile = configdir + '/.profiles_cache.txt' if os.path.exists(self.file): config = self._loadconfig(self.file) else: config = self._createconfig(self.file) + self.config = config["config"] self.connections = config["connections"] self.profiles = config["profiles"] + if not os.path.exists(self.key): self._createkey(self.key) with open(self.key) as f: self.privatekey = RSA.import_key(f.read()) - f.close() self.publickey = self.privatekey.publickey() # Self-heal text caches if they are missing @@ -4724,12 +4999,17 @@ def test(self, commands, expected, vars = None,*, prompt = None, parallel = 10,
  • architect_system_prompt
  • ask
  • confirm
  • +
  • delete_session
  • engineer_system_prompt
  • +
  • get_last_session_id
  • get_node_info_tool
  • list_nodes_tool
  • +
  • list_sessions
  • +
  • load_session_data
  • manage_memory_tool
  • register_ai_tool
  • run_commands_tool
  • +
  • save_session
  • @@ -4761,7 +5041,7 @@ def test(self, commands, expected, vars = None,*, prompt = None, parallel = 10, diff --git a/docs/connpy/tests/conftest.html b/docs/connpy/tests/conftest.html index 560bf76..bc7eb23 100644 --- a/docs/connpy/tests/conftest.html +++ b/docs/connpy/tests/conftest.html @@ -3,7 +3,7 @@ - + connpy.tests.conftest API documentation @@ -258,7 +258,7 @@ def tmp_config_dir(tmp_path): diff --git a/docs/connpy/tests/index.html b/docs/connpy/tests/index.html index 5b37649..3643176 100644 --- a/docs/connpy/tests/index.html +++ b/docs/connpy/tests/index.html @@ -3,7 +3,7 @@ - + connpy.tests API documentation @@ -127,7 +127,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_ai.html b/docs/connpy/tests/test_ai.html index 086f7ee..ef77abd 100644 --- a/docs/connpy/tests/test_ai.html +++ b/docs/connpy/tests/test_ai.html @@ -3,7 +3,7 @@ - + connpy.tests.test_ai API documentation @@ -88,7 +88,7 @@ el.replaceWith(d); def test_init_loads_memory(self, ai_config, tmp_path, mock_litellm): """Loads long-term memory from file if it exists.""" - memory_path = os.path.expanduser("~/.config/conn/ai_memory.md") + memory_path = os.path.join(ai_config.defaultdir, "ai_memory.md") from connpy.ai import ai with patch("os.path.exists", side_effect=lambda p: True if p == memory_path else os.path.exists(p)): @@ -132,7 +132,7 @@ el.replaceWith(d);
    def test_init_loads_memory(self, ai_config, tmp_path, mock_litellm):
         """Loads long-term memory from file if it exists."""
    -    memory_path = os.path.expanduser("~/.config/conn/ai_memory.md")
    +    memory_path = os.path.join(ai_config.defaultdir, "ai_memory.md")
         from connpy.ai import ai
     
         with patch("os.path.exists", side_effect=lambda p: True if p == memory_path else os.path.exists(p)):
    @@ -201,6 +201,224 @@ el.replaceWith(d);
     
     
     
    +
    +class TestAISessions +
    +
    +
    + +Expand source code + +
    class TestAISessions:
    +    @pytest.fixture
    +    def myai(self, ai_config, mock_litellm, tmp_path):
    +        from connpy.ai import ai
    +        ai_config.defaultdir = str(tmp_path)
    +        return ai(ai_config)
    +
    +    def test_sessions_dir_initialization(self, myai, tmp_path):
    +        assert os.path.exists(os.path.join(tmp_path, "ai_sessions"))
    +        assert myai.sessions_dir == str(tmp_path / "ai_sessions")
    +
    +    def test_generate_session_id(self, myai):
    +        session_id = myai._generate_session_id("Any query")
    +        # Format: YYYYMMDD-HHMMSS
    +        assert len(session_id) == 15
    +        assert "-" in session_id
    +        parts = session_id.split("-")
    +        assert len(parts[0]) == 8 # YYYYMMDD
    +        assert len(parts[1]) == 6 # HHMMSS
    +
    +    def test_save_and_load_session(self, myai):
    +        history = [
    +            {"role": "user", "content": "Hello"},
    +            {"role": "assistant", "content": "Hi"}
    +        ]
    +        myai.save_session(history, title="Test Session")
    +        session_id = myai.session_id
    +        
    +        # Load it back
    +        loaded = myai.load_session_data(session_id)
    +        assert loaded["title"] == "Test Session"
    +        assert loaded["history"] == history
    +        assert loaded["model"] == myai.engineer_model
    +
    +    def test_list_sessions(self, myai, capsys):
    +        history = [{"role": "user", "content": "Query 1"}]
    +        myai.save_session(history, title="Session 1")
    +        
    +        # Use a second instance to list
    +        myai.list_sessions()
    +        captured = capsys.readouterr()
    +        assert "Session 1" in captured.out
    +        assert "AI Persisted Sessions" in captured.out
    +
    +    def test_get_last_session_id(self, myai):
    +        # Save two sessions
    +        myai.session_id = None # Force new
    +        myai.save_session([{"role": "user", "content": "First"}])
    +        first_id = myai.session_id
    +        import time
    +        time.sleep(1.1) # Ensure different timestamp
    +        
    +        myai.session_id = None # Force new
    +        myai.save_session([{"role": "user", "content": "Second"}])
    +        second_id = myai.session_id
    +        
    +        last_id = myai.get_last_session_id()
    +        assert last_id == second_id
    +        assert last_id != first_id
    +
    +    def test_delete_session(self, myai):
    +        myai.save_session([{"role": "user", "content": "To be deleted"}])
    +        session_id = myai.session_id
    +        assert os.path.exists(myai.session_path)
    +        
    +        myai.delete_session(session_id)
    +        assert not os.path.exists(myai.session_path)
    +
    +
    +

    Methods

    +
    +
    +def myai(self, ai_config, mock_litellm, tmp_path) +
    +
    +
    + +Expand source code + +
    @pytest.fixture
    +def myai(self, ai_config, mock_litellm, tmp_path):
    +    from connpy.ai import ai
    +    ai_config.defaultdir = str(tmp_path)
    +    return ai(ai_config)
    +
    +
    +
    +
    +def test_delete_session(self, myai) +
    +
    +
    + +Expand source code + +
    def test_delete_session(self, myai):
    +    myai.save_session([{"role": "user", "content": "To be deleted"}])
    +    session_id = myai.session_id
    +    assert os.path.exists(myai.session_path)
    +    
    +    myai.delete_session(session_id)
    +    assert not os.path.exists(myai.session_path)
    +
    +
    +
    +
    +def test_generate_session_id(self, myai) +
    +
    +
    + +Expand source code + +
    def test_generate_session_id(self, myai):
    +    session_id = myai._generate_session_id("Any query")
    +    # Format: YYYYMMDD-HHMMSS
    +    assert len(session_id) == 15
    +    assert "-" in session_id
    +    parts = session_id.split("-")
    +    assert len(parts[0]) == 8 # YYYYMMDD
    +    assert len(parts[1]) == 6 # HHMMSS
    +
    +
    +
    +
    +def test_get_last_session_id(self, myai) +
    +
    +
    + +Expand source code + +
    def test_get_last_session_id(self, myai):
    +    # Save two sessions
    +    myai.session_id = None # Force new
    +    myai.save_session([{"role": "user", "content": "First"}])
    +    first_id = myai.session_id
    +    import time
    +    time.sleep(1.1) # Ensure different timestamp
    +    
    +    myai.session_id = None # Force new
    +    myai.save_session([{"role": "user", "content": "Second"}])
    +    second_id = myai.session_id
    +    
    +    last_id = myai.get_last_session_id()
    +    assert last_id == second_id
    +    assert last_id != first_id
    +
    +
    +
    +
    +def test_list_sessions(self, myai, capsys) +
    +
    +
    + +Expand source code + +
    def test_list_sessions(self, myai, capsys):
    +    history = [{"role": "user", "content": "Query 1"}]
    +    myai.save_session(history, title="Session 1")
    +    
    +    # Use a second instance to list
    +    myai.list_sessions()
    +    captured = capsys.readouterr()
    +    assert "Session 1" in captured.out
    +    assert "AI Persisted Sessions" in captured.out
    +
    +
    +
    +
    +def test_save_and_load_session(self, myai) +
    +
    +
    + +Expand source code + +
    def test_save_and_load_session(self, myai):
    +    history = [
    +        {"role": "user", "content": "Hello"},
    +        {"role": "assistant", "content": "Hi"}
    +    ]
    +    myai.save_session(history, title="Test Session")
    +    session_id = myai.session_id
    +    
    +    # Load it back
    +    loaded = myai.load_session_data(session_id)
    +    assert loaded["title"] == "Test Session"
    +    assert loaded["history"] == history
    +    assert loaded["model"] == myai.engineer_model
    +
    +
    +
    +
    +def test_sessions_dir_initialization(self, myai, tmp_path) +
    +
    +
    + +Expand source code + +
    def test_sessions_dir_initialization(self, myai, tmp_path):
    +    assert os.path.exists(os.path.join(tmp_path, "ai_sessions"))
    +    assert myai.sessions_dir == str(tmp_path / "ai_sessions")
    +
    +
    +
    +
    +
    class TestAsk
    @@ -807,7 +1025,18 @@ def myai(self, ai_config, mock_litellm): {"role": "assistant", "content": "Found r1"} ] result = myai._sanitize_messages(messages) - assert len(result) == 4
    + assert len(result) == 4 + + def test_sanitize_strips_cache_control(self, myai): + """_sanitize_messages should convert list-based content (with cache_control) back to strings.""" + messages = [ + {"role": "system", "content": [{"type": "text", "text": "system prompt", "cache_control": {"type": "ephemeral"}}]}, + {"role": "user", "content": "hello"} + ] + result = myai._sanitize_messages(messages) + assert result[0]["role"] == "system" + assert isinstance(result[0]["content"], str) + assert result[0]["content"] == "system prompt"

    Methods

    @@ -925,6 +1154,27 @@ def myai(self, ai_config, mock_litellm):

    Tool responses without preceding tool_calls are removed.

    +
    +def test_sanitize_strips_cache_control(self, myai) +
    +
    +
    + +Expand source code + +
    def test_sanitize_strips_cache_control(self, myai):
    +    """_sanitize_messages should convert list-based content (with cache_control) back to strings."""
    +    messages = [
    +        {"role": "system", "content": [{"type": "text", "text": "system prompt", "cache_control": {"type": "ephemeral"}}]},
    +        {"role": "user", "content": "hello"}
    +    ]
    +    result = myai._sanitize_messages(messages)
    +    assert result[0]["role"] == "system"
    +    assert isinstance(result[0]["content"], str)
    +    assert result[0]["content"] == "system prompt"
    +
    +

    _sanitize_messages should convert list-based content (with cache_control) back to strings.

    +
    @@ -1373,6 +1623,18 @@ def myai(self, ai_config, mock_litellm):
  • +

    TestAISessions

    + +
  • +
  • TestAsk

  • @@ -1464,7 +1727,7 @@ def myai(self, ai_config, mock_litellm): diff --git a/docs/connpy/tests/test_api.html b/docs/connpy/tests/test_api.html index 436b83a..7e8b0dd 100644 --- a/docs/connpy/tests/test_api.html +++ b/docs/connpy/tests/test_api.html @@ -3,7 +3,7 @@ - + connpy.tests.test_api API documentation @@ -876,7 +876,7 @@ def test_test_action(self, mock_nodes_cls, api_client): diff --git a/docs/connpy/tests/test_capture.html b/docs/connpy/tests/test_capture.html index db68f63..5b38b11 100644 --- a/docs/connpy/tests/test_capture.html +++ b/docs/connpy/tests/test_capture.html @@ -3,7 +3,7 @@ - + connpy.tests.test_capture API documentation @@ -229,7 +229,7 @@ def test_is_port_in_use(self, mock_socket, mock_connapp): diff --git a/docs/connpy/tests/test_completion.html b/docs/connpy/tests/test_completion.html index 7f6162f..587ffdd 100644 --- a/docs/connpy/tests/test_completion.html +++ b/docs/connpy/tests/test_completion.html @@ -3,7 +3,7 @@ - + connpy.tests.test_completion API documentation @@ -433,7 +433,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_configfile.html b/docs/connpy/tests/test_configfile.html index 7134fb4..56f3118 100644 --- a/docs/connpy/tests/test_configfile.html +++ b/docs/connpy/tests/test_configfile.html @@ -3,7 +3,7 @@ - + connpy.tests.test_configfile API documentation @@ -2003,7 +2003,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_context.html b/docs/connpy/tests/test_context.html index bc4ca50..46368fc 100644 --- a/docs/connpy/tests/test_context.html +++ b/docs/connpy/tests/test_context.html @@ -3,7 +3,7 @@ - + connpy.tests.test_context API documentation @@ -469,7 +469,7 @@ def mock_connapp(): diff --git a/docs/connpy/tests/test_core.html b/docs/connpy/tests/test_core.html index 9635d93..75b2a6f 100644 --- a/docs/connpy/tests/test_core.html +++ b/docs/connpy/tests/test_core.html @@ -3,7 +3,7 @@ - + connpy.tests.test_core API documentation @@ -1300,7 +1300,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_hooks.html b/docs/connpy/tests/test_hooks.html index 32ce5b0..d953297 100644 --- a/docs/connpy/tests/test_hooks.html +++ b/docs/connpy/tests/test_hooks.html @@ -3,7 +3,7 @@ - + connpy.tests.test_hooks API documentation @@ -673,7 +673,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_plugins.html b/docs/connpy/tests/test_plugins.html index 9417e56..50e72d6 100644 --- a/docs/connpy/tests/test_plugins.html +++ b/docs/connpy/tests/test_plugins.html @@ -3,7 +3,7 @@ - + connpy.tests.test_plugins API documentation @@ -917,7 +917,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_printer.html b/docs/connpy/tests/test_printer.html index b3f4aa8..e3fcefc 100644 --- a/docs/connpy/tests/test_printer.html +++ b/docs/connpy/tests/test_printer.html @@ -3,7 +3,7 @@ - + connpy.tests.test_printer API documentation @@ -263,7 +263,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_sync.html b/docs/connpy/tests/test_sync.html index d1e0b08..96e2f56 100644 --- a/docs/connpy/tests/test_sync.html +++ b/docs/connpy/tests/test_sync.html @@ -3,7 +3,7 @@ - + connpy.tests.test_sync API documentation @@ -390,7 +390,7 @@ def test_get_credentials_success(self, MockCreds, mock_exists, mock_connapp):