feat: implement AI session management, fix UI rendering, and release 5.0b6
- Bump version to 5.0b6 and regenerate HTML documentation via pdoc3. - Add persistent AI chat sessions (list, resume, delete) stored locally. - Fix 'rich' library console rendering and routing 'error()' to stderr. - Update Architect UI color theme to medium_purple. - Sanitize caching metadata (cache_control) for compatibility with non-Anthropic models. - Fix .folder config path redirection mapping and fzf-wrapper compatibility. - Ensure context plugin correctly filters node lists upon load. - Inject config instance directly into API components instead of instantiating globally. - Fix edge-case in plugin loading preventing startup when folder is missing. - Add comprehensive test coverage for printer module and AI sessions.
This commit is contained in:
+1
-1
@@ -1 +1 @@
|
||||
__version__ = "5.0b5"
|
||||
__version__ = "5.0b6"
|
||||
|
||||
+174
-45
@@ -13,11 +13,11 @@ litellm.set_verbose = False
|
||||
from .hooks import ClassHook, MethodHook
|
||||
from . import printer
|
||||
from rich.markdown import Markdown
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.text import Text
|
||||
|
||||
console = Console()
|
||||
console = printer.console
|
||||
|
||||
|
||||
@ClassHook
|
||||
class ai:
|
||||
@@ -62,7 +62,7 @@ class ai:
|
||||
self.architect_prompt_extensions = [] # Extra text for architect prompt
|
||||
|
||||
# Long-term memory
|
||||
self.memory_path = os.path.expanduser("~/.config/conn/ai_memory.md")
|
||||
self.memory_path = os.path.join(self.config.defaultdir, "ai_memory.md")
|
||||
self.long_term_memory = ""
|
||||
if os.path.exists(self.memory_path):
|
||||
try:
|
||||
@@ -75,6 +75,12 @@ class ai:
|
||||
except Exception as e:
|
||||
console.print(f"[yellow]Warning: Failed to load AI memory: {e}[/yellow]")
|
||||
|
||||
# Session Management
|
||||
self.sessions_dir = os.path.join(self.config.defaultdir, "ai_sessions")
|
||||
os.makedirs(self.sessions_dir, exist_ok=True)
|
||||
self.session_id = None
|
||||
self.session_path = None
|
||||
|
||||
# Prompts base agnósticos
|
||||
self._engineer_base_prompt = dedent(f"""
|
||||
Role: TECHNICAL EXECUTION ENGINE.
|
||||
@@ -190,7 +196,7 @@ class ai:
|
||||
|
||||
# Determine styling based on current brain
|
||||
role_label = "Network Architect" if "architect" in label.lower() else "Network Engineer"
|
||||
border = "purple" if "architect" in label.lower() else "blue"
|
||||
border = "medium_purple" if "architect" in label.lower() else "blue"
|
||||
title = f"[bold {border}]{role_label}[/bold {border}]"
|
||||
|
||||
try:
|
||||
@@ -290,14 +296,34 @@ class ai:
|
||||
2. No user/system messages appear between tool_calls and tool responses
|
||||
3. Orphaned tool_calls at the end are removed
|
||||
4. Orphaned tool responses without a preceding tool_call are removed
|
||||
5. Incompatible metadata like cache_control is stripped for non-Anthropic models
|
||||
"""
|
||||
if not messages:
|
||||
return messages
|
||||
|
||||
# Pre-process messages to pull text from list contents (Anthropic cache format)
|
||||
# and remove explicit cache keys.
|
||||
pre_sanitized = []
|
||||
for msg in messages:
|
||||
m = msg.copy() if isinstance(msg, dict) else msg.model_dump(exclude_none=True)
|
||||
|
||||
# Convert content list to plain string if it's a system message with caching metadata
|
||||
if m.get('role') == 'system' and isinstance(m.get('content'), list):
|
||||
# Extraer texto de [{"type": "text", "text": "...", "cache_control": ...}]
|
||||
m['content'] = m['content'][0]['text'] if m['content'] else ""
|
||||
|
||||
# Remove any explicit cache_control key anywhere
|
||||
if 'cache_control' in m: del m['cache_control']
|
||||
if isinstance(m.get('content'), list):
|
||||
for item in m['content']:
|
||||
if isinstance(item, dict) and 'cache_control' in item: del item['cache_control']
|
||||
|
||||
pre_sanitized.append(m)
|
||||
|
||||
sanitized = []
|
||||
i = 0
|
||||
while i < len(messages):
|
||||
msg = messages[i]
|
||||
while i < len(pre_sanitized):
|
||||
msg = pre_sanitized[i]
|
||||
role = msg.get('role', '')
|
||||
|
||||
if role == 'assistant' and msg.get('tool_calls'):
|
||||
@@ -311,8 +337,8 @@ class ai:
|
||||
# Look ahead for matching tool responses
|
||||
tool_responses = []
|
||||
j = i + 1
|
||||
while j < len(messages):
|
||||
next_msg = messages[j]
|
||||
while j < len(pre_sanitized):
|
||||
next_msg = pre_sanitized[j]
|
||||
if next_msg.get('role') == 'tool':
|
||||
tool_responses.append(next_msg)
|
||||
j += 1
|
||||
@@ -470,23 +496,16 @@ class ai:
|
||||
|
||||
def _engineer_loop(self, task, status=None, debug=False, chat_history=None):
|
||||
"""Internal loop where the Engineer executes technical tasks for the Architect."""
|
||||
# Optimización de caché para el Ingeniero
|
||||
if "claude" in self.engineer_model.lower():
|
||||
# Optimización de caché para el Ingeniero (Solo para Anthropic directo, Vertex tiene reglas distintas)
|
||||
if "claude" in self.engineer_model.lower() and "vertex" not in self.engineer_model.lower():
|
||||
messages = [{"role": "system", "content": [{"type": "text", "text": self.engineer_system_prompt, "cache_control": {"type": "ephemeral"}}]}]
|
||||
else:
|
||||
messages = [{"role": "system", "content": self.engineer_system_prompt}]
|
||||
|
||||
if chat_history:
|
||||
# Clean chat history from caching metadata if engineer is not Claude
|
||||
if "claude" not in self.engineer_model.lower():
|
||||
cleaned_history = []
|
||||
for msg in chat_history[-5:]:
|
||||
m = msg if isinstance(msg, dict) else msg.model_dump(exclude_none=True)
|
||||
# Remove cache_control from system messages
|
||||
if m.get('role') == 'system' and isinstance(m.get('content'), list):
|
||||
m['content'] = m['content'][0]['text'] if m['content'] else ""
|
||||
cleaned_history.append(m)
|
||||
messages.extend(cleaned_history)
|
||||
# Clean chat history from caching metadata if engineer is not a compatible Claude model
|
||||
if "claude" not in self.engineer_model.lower() or "vertex" in self.engineer_model.lower():
|
||||
messages.extend(self._sanitize_messages(chat_history[-5:]))
|
||||
else:
|
||||
messages.extend(chat_history[-5:])
|
||||
|
||||
@@ -582,9 +601,125 @@ class ai:
|
||||
tools.extend(self.external_architect_tools)
|
||||
return tools
|
||||
|
||||
def _get_sessions(self):
|
||||
"""Returns a list of session metadata sorted by date."""
|
||||
sessions = []
|
||||
if not os.path.exists(self.sessions_dir):
|
||||
return []
|
||||
for f in os.listdir(self.sessions_dir):
|
||||
if f.endswith(".json"):
|
||||
path = os.path.join(self.sessions_dir, f)
|
||||
try:
|
||||
with open(path, "r") as fs:
|
||||
data = json.load(fs)
|
||||
sessions.append({
|
||||
"id": f[:-5],
|
||||
"title": data.get("title", "Untitled Session"),
|
||||
"created_at": data.get("created_at", "Unknown"),
|
||||
"model": data.get("model", "Unknown"),
|
||||
"path": path
|
||||
})
|
||||
except Exception:
|
||||
continue
|
||||
return sorted(sessions, key=lambda x: x["created_at"], reverse=True)
|
||||
|
||||
def list_sessions(self):
|
||||
"""Prints a list of sessions using printer.table."""
|
||||
sessions = self._get_sessions()
|
||||
if not sessions:
|
||||
printer.info("No saved AI sessions found.")
|
||||
return
|
||||
|
||||
columns = ["ID", "Title", "Created At", "Model"]
|
||||
rows = [[s["id"], s["title"], s["created_at"], s["model"]] for s in sessions]
|
||||
printer.table("AI Persisted Sessions", columns, rows)
|
||||
|
||||
def load_session_data(self, session_id):
|
||||
"""Loads a session's raw data by ID."""
|
||||
path = os.path.join(self.sessions_dir, f"{session_id}.json")
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
data = json.load(f)
|
||||
self.session_id = session_id
|
||||
self.session_path = path
|
||||
return data
|
||||
except Exception as e:
|
||||
printer.error(f"Failed to load session {session_id}: {e}")
|
||||
return None
|
||||
|
||||
def delete_session(self, session_id):
|
||||
"""Deletes a session by ID."""
|
||||
path = os.path.join(self.sessions_dir, f"{session_id}.json")
|
||||
if os.path.exists(path):
|
||||
os.remove(path)
|
||||
printer.success(f"Session {session_id} deleted.")
|
||||
else:
|
||||
printer.error(f"Session {session_id} not found.")
|
||||
|
||||
def get_last_session_id(self):
|
||||
"""Returns the ID of the most recent session."""
|
||||
sessions = self._get_sessions()
|
||||
return sessions[0]["id"] if sessions else None
|
||||
|
||||
def _generate_session_id(self, query):
|
||||
"""Generates a unique session ID based on timestamp."""
|
||||
return datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
|
||||
def save_session(self, history, title=None, model=None):
|
||||
"""Saves current history to the session file."""
|
||||
if not self.session_id:
|
||||
# Generate ID from first user query if available
|
||||
first_user_msg = next((m["content"] for m in history if m["role"] == "user"), "new-session")
|
||||
self.session_id = self._generate_session_id(first_user_msg)
|
||||
self.session_path = os.path.join(self.sessions_dir, f"{self.session_id}.json")
|
||||
|
||||
# If it's a new file, we might want to set a better title
|
||||
if not os.path.exists(self.session_path) and not title:
|
||||
raw_title = next((m["content"] for m in history if m["role"] == "user"), "New Session")
|
||||
# Clean title: remove newlines, multiple spaces
|
||||
clean_title = " ".join(raw_title.split())
|
||||
if len(clean_title) > 40:
|
||||
title = clean_title[:37].strip() + "..."
|
||||
else:
|
||||
title = clean_title
|
||||
|
||||
try:
|
||||
# Read existing metadata if it exists
|
||||
metadata = {}
|
||||
if os.path.exists(self.session_path):
|
||||
with open(self.session_path, "r") as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
metadata.update({
|
||||
"id": self.session_id,
|
||||
"title": title or metadata.get("title", "New Session"),
|
||||
"created_at": metadata.get("created_at", datetime.datetime.now().isoformat()),
|
||||
"updated_at": datetime.datetime.now().isoformat(),
|
||||
"model": model or metadata.get("model", self.engineer_model),
|
||||
"history": history
|
||||
})
|
||||
|
||||
with open(self.session_path, "w") as f:
|
||||
json.dump(metadata, f, indent=4)
|
||||
except Exception as e:
|
||||
printer.error(f"Failed to save session: {e}")
|
||||
|
||||
except Exception as e:
|
||||
printer.error(f"Failed to save session: {e}")
|
||||
|
||||
@MethodHook
|
||||
def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=False, stream=True):
|
||||
def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=False, stream=True, session_id=None):
|
||||
if chat_history is None: chat_history = []
|
||||
|
||||
# Load session if provided and history is empty
|
||||
if session_id and not chat_history:
|
||||
session_data = self.load_session_data(session_id)
|
||||
if session_data:
|
||||
chat_history = session_data.get("history", [])
|
||||
# If we loaded history, the caller might need it back
|
||||
# But typically ask() is called in a loop with an external history object
|
||||
|
||||
usage = {"input": 0, "output": 0, "total": 0}
|
||||
|
||||
# 1. Selector de Rol inicial (Sticky Brain)
|
||||
@@ -618,15 +753,20 @@ class ai:
|
||||
model = self.architect_model if current_brain == "architect" else self.engineer_model
|
||||
key = self.architect_key if current_brain == "architect" else self.engineer_key
|
||||
|
||||
# Estructura optimizada para Prompt Caching
|
||||
if "claude" in model.lower():
|
||||
# Estructura optimizada para Prompt Caching (Solo para Anthropic directo, Vertex tiene reglas distintas)
|
||||
if "claude" in model.lower() and "vertex" not in model.lower():
|
||||
messages = [{"role": "system", "content": [{"type": "text", "text": system_prompt, "cache_control": {"type": "ephemeral"}}]}]
|
||||
else:
|
||||
messages = [{"role": "system", "content": system_prompt}]
|
||||
|
||||
# Interleaving de historial
|
||||
last_role = "system"
|
||||
for msg in chat_history[-self.max_history:]:
|
||||
# Sanitize history if the current target model is not compatible with cache_control
|
||||
history_to_process = chat_history[-self.max_history:]
|
||||
if "claude" not in model.lower() or "vertex" in model.lower():
|
||||
history_to_process = self._sanitize_messages(history_to_process)
|
||||
|
||||
for msg in history_to_process:
|
||||
m = msg if isinstance(msg, dict) else msg.model_dump(exclude_none=True)
|
||||
role = m.get('role')
|
||||
if role == last_role and role == 'user':
|
||||
@@ -654,7 +794,7 @@ class ai:
|
||||
console.print(f"[yellow] You can press Ctrl+C to interrupt and get a summary of progress.[/yellow]")
|
||||
soft_limit_warned = True
|
||||
|
||||
label = "[bold purple]Architect" if current_brain == "architect" else "[bold blue]Engineer"
|
||||
label = "[bold medium_purple]Architect" if current_brain == "architect" else "[bold blue]Engineer"
|
||||
if status: status.update(f"{label} is thinking... (step {iteration})")
|
||||
|
||||
streamed_response = False
|
||||
@@ -699,7 +839,7 @@ class ai:
|
||||
messages.append(msg_dict)
|
||||
|
||||
if debug and resp_msg.content:
|
||||
console.print(Panel(Markdown(resp_msg.content), title=f"{label} Reasoning", border_style="purple" if current_brain == "architect" else "blue"))
|
||||
console.print(Panel(Markdown(resp_msg.content), title=f"{label} Reasoning", border_style="medium_purple" if current_brain == "architect" else "blue"))
|
||||
|
||||
if not resp_msg.tool_calls: break
|
||||
|
||||
@@ -716,8 +856,8 @@ class ai:
|
||||
continue
|
||||
|
||||
if status:
|
||||
if fn == "delegate_to_engineer": status.update(f"[bold purple]Architect: [DELEGATING MISSION] {args.get('task','')[:40]}...")
|
||||
elif fn == "manage_memory_tool": status.update(f"[bold purple]Architect: [UPDATING MEMORY]")
|
||||
if fn == "delegate_to_engineer": status.update(f"[bold medium_purple]Architect: [DELEGATING MISSION] {args.get('task','')[:40]}...")
|
||||
elif fn == "manage_memory_tool": status.update(f"[bold medium_purple]Architect: [UPDATING MEMORY]")
|
||||
|
||||
if debug: console.print(Panel(Text(json.dumps(args, indent=2)), title=f"{label} Decision: {fn}", border_style="white"))
|
||||
|
||||
@@ -725,7 +865,7 @@ class ai:
|
||||
obs, eng_usage = self._engineer_loop(args["task"], status=status, debug=debug, chat_history=messages[:-1])
|
||||
usage["input"] += eng_usage["input"]; usage["output"] += eng_usage["output"]; usage["total"] += eng_usage["total"]
|
||||
elif fn == "consult_architect":
|
||||
if status: status.update("[bold purple]Engineer consulting Architect...")
|
||||
if status: status.update("[bold medium_purple]Engineer consulting Architect...")
|
||||
try:
|
||||
# Consultation only - Engineer stays in control
|
||||
claude_resp = completion(
|
||||
@@ -738,13 +878,13 @@ class ai:
|
||||
num_retries=3
|
||||
)
|
||||
obs = claude_resp.choices[0].message.content
|
||||
if debug: console.print(Panel(Markdown(obs), title="[bold purple]Architect Consultation[/bold purple]", border_style="purple"))
|
||||
if debug: console.print(Panel(Markdown(obs), title="[bold medium_purple]Architect Consultation[/bold medium_purple]", border_style="medium_purple"))
|
||||
except Exception as e:
|
||||
if status: status.update("[bold orange3]Architect unavailable! Engineer continuing alone...")
|
||||
obs = f"Architect unavailable ({str(e)}). Proceeding with your best technical judgment."
|
||||
|
||||
elif fn == "escalate_to_architect":
|
||||
if status: status.update("[bold purple]Transferring control to Architect...")
|
||||
if status: status.update("[bold medium_purple]Transferring control to Architect...")
|
||||
# Full escalation - Architect takes over
|
||||
current_brain = "architect"
|
||||
model = self.architect_model
|
||||
@@ -755,7 +895,7 @@ class ai:
|
||||
handover_msg = f"HANDOVER FROM EXECUTION ENGINE\n\nReason: {args['reason']}\n\nContext: {args['context']}\n\nYou are now in control of this conversation."
|
||||
pending_user_message = handover_msg
|
||||
obs = "Control transferred to Architect. Handover context will be provided."
|
||||
if debug: console.print(Panel(Text(handover_msg), title="[bold purple]Escalation to Architect[/bold purple]", border_style="purple"))
|
||||
if debug: console.print(Panel(Text(handover_msg), title="[bold medium_purple]Escalation to Architect[/bold medium_purple]", border_style="medium_purple"))
|
||||
|
||||
elif fn == "return_to_engineer":
|
||||
if status: status.update("[bold blue]Transferring control back to Engineer...")
|
||||
@@ -813,19 +953,8 @@ class ai:
|
||||
messages.append(resp_msg.model_dump(exclude_none=True))
|
||||
except Exception: pass
|
||||
finally:
|
||||
try:
|
||||
log_dir = self.config.defaultdir
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
log_path = os.path.join(log_dir, "ai_debug.json")
|
||||
hist = []
|
||||
if os.path.exists(log_path):
|
||||
try:
|
||||
with open(log_path, "r") as f: hist = json.load(f)
|
||||
except (IOError, json.JSONDecodeError): hist = []
|
||||
hist.append({"timestamp": datetime.datetime.now().isoformat(), "roles": {"strategic_engine": self.architect_model, "execution_engine": self.engineer_model}, "session": messages})
|
||||
with open(log_path, "w") as f: json.dump(hist[-10:], f, indent=4)
|
||||
except Exception as e:
|
||||
if debug: console.print(f"[dim red]Debug log failed: {e}[/dim red]")
|
||||
# Auto-save session
|
||||
self.save_session(messages, model=model)
|
||||
|
||||
return {
|
||||
"response": messages[-1].get("content"),
|
||||
|
||||
+7
-7
@@ -8,7 +8,7 @@ import signal
|
||||
|
||||
app = Flask(__name__)
|
||||
CORS(app)
|
||||
conf = configfile()
|
||||
# conf = configfile() # REMOVED: Item #1 in Roadmap -> Don't instantiate globally
|
||||
|
||||
PID_FILE1 = "/run/connpy.pid"
|
||||
PID_FILE2 = "/tmp/connpy.pid"
|
||||
@@ -156,23 +156,23 @@ def stop_api():
|
||||
return port
|
||||
|
||||
@hooks.MethodHook
|
||||
def debug_api(port=8048):
|
||||
app.custom_config = configfile()
|
||||
def debug_api(port=8048, config=None):
|
||||
app.custom_config = config or configfile()
|
||||
app.run(debug=True, port=port)
|
||||
|
||||
@hooks.MethodHook
|
||||
def start_server(port=8048):
|
||||
app.custom_config = configfile()
|
||||
def start_server(port=8048, config=None):
|
||||
app.custom_config = config or configfile()
|
||||
serve(app, host='0.0.0.0', port=port)
|
||||
|
||||
@hooks.MethodHook
|
||||
def start_api(port=8048):
|
||||
def start_api(port=8048, config=None):
|
||||
if os.path.exists(PID_FILE1) or os.path.exists(PID_FILE2):
|
||||
printer.warning("Connpy server is already running.")
|
||||
return
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
start_server(port)
|
||||
start_server(port, config=config)
|
||||
else:
|
||||
try:
|
||||
with open(PID_FILE1, "w") as f:
|
||||
|
||||
+42
-32
@@ -56,27 +56,31 @@ class configfile:
|
||||
'''
|
||||
home = os.path.expanduser("~")
|
||||
defaultdir = home + '/.config/conn'
|
||||
self.defaultdir = defaultdir
|
||||
Path(defaultdir).mkdir(parents=True, exist_ok=True)
|
||||
Path(f"{defaultdir}/plugins").mkdir(parents=True, exist_ok=True)
|
||||
pathfile = defaultdir + '/.folder'
|
||||
try:
|
||||
with open(pathfile, "r") as f:
|
||||
configdir = f.read().strip()
|
||||
except (FileNotFoundError, IOError):
|
||||
with open(pathfile, "w") as f:
|
||||
f.write(str(defaultdir))
|
||||
configdir = defaultdir
|
||||
defaultfile = configdir + '/config.yaml'
|
||||
self.cachefile = configdir + '/.config.cache.json'
|
||||
self.fzf_cachefile = configdir + '/.fzf_nodes_cache.txt'
|
||||
self.folders_cachefile = configdir + '/.folders_cache.txt'
|
||||
self.profiles_cachefile = configdir + '/.profiles_cache.txt'
|
||||
defaultkey = configdir + '/.osk'
|
||||
if conf == None:
|
||||
self.file = defaultfile
|
||||
|
||||
# Backwards compatibility: Migrate from JSON to YAML
|
||||
if conf is None:
|
||||
# Standard path: use ~/.config/conn and respect .folder redirection
|
||||
self.anchor_path = defaultdir
|
||||
self.defaultdir = defaultdir
|
||||
Path(defaultdir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
pathfile = defaultdir + '/.folder'
|
||||
try:
|
||||
with open(pathfile, "r") as f:
|
||||
configdir = f.read().strip()
|
||||
except (FileNotFoundError, IOError):
|
||||
with open(pathfile, "w") as f:
|
||||
f.write(str(defaultdir))
|
||||
configdir = defaultdir
|
||||
|
||||
self.defaultdir = configdir
|
||||
self.file = configdir + '/config.yaml'
|
||||
self.key = key or (configdir + '/.osk')
|
||||
|
||||
# Ensure redirected directories exist
|
||||
Path(configdir).mkdir(parents=True, exist_ok=True)
|
||||
Path(f"{configdir}/plugins").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Backwards compatibility: Migrate from JSON to YAML only for default path
|
||||
legacy_json = configdir + '/config.json'
|
||||
legacy_noext = configdir + '/config'
|
||||
legacy_file = None
|
||||
@@ -99,38 +103,44 @@ class configfile:
|
||||
os.remove(self.file)
|
||||
printer.warning("YAML verification failed after migration, keeping legacy config.")
|
||||
else:
|
||||
with open(self.cachefile, 'w') as f:
|
||||
# Note: cachefile is derived later, we use temp one for migration sync
|
||||
temp_cache = configdir + '/.config.cache.json'
|
||||
with open(temp_cache, 'w') as f:
|
||||
json.dump(old_data, f)
|
||||
shutil.move(legacy_file, legacy_file + ".backup")
|
||||
printer.success(f"Migrated legacy config ({len(old_data.get('connections',{}))} folders/nodes) into YAML and Cache successfully!")
|
||||
except Exception as e:
|
||||
# Clean up partial YAML if it was created
|
||||
if os.path.exists(self.file):
|
||||
try:
|
||||
os.remove(self.file)
|
||||
except OSError:
|
||||
pass
|
||||
try: os.remove(self.file)
|
||||
except OSError: pass
|
||||
printer.warning(f"Failed to migrate legacy config: {e}")
|
||||
else:
|
||||
self.file = conf
|
||||
# Custom path (common in tests): isolate everything to the conf parent directory
|
||||
self.file = os.path.abspath(conf)
|
||||
configdir = os.path.dirname(self.file)
|
||||
self.anchor_path = configdir
|
||||
self.defaultdir = configdir
|
||||
self.key = os.path.abspath(key) if key else (configdir + '/.osk')
|
||||
|
||||
if key == None:
|
||||
self.key = defaultkey
|
||||
else:
|
||||
self.key = key
|
||||
# Sidecar files always live next to the config file (or in the redirected configdir)
|
||||
self.cachefile = configdir + '/.config.cache.json'
|
||||
self.fzf_cachefile = configdir + '/.fzf_nodes_cache.txt'
|
||||
self.folders_cachefile = configdir + '/.folders_cache.txt'
|
||||
self.profiles_cachefile = configdir + '/.profiles_cache.txt'
|
||||
|
||||
if os.path.exists(self.file):
|
||||
config = self._loadconfig(self.file)
|
||||
else:
|
||||
config = self._createconfig(self.file)
|
||||
|
||||
self.config = config["config"]
|
||||
self.connections = config["connections"]
|
||||
self.profiles = config["profiles"]
|
||||
|
||||
if not os.path.exists(self.key):
|
||||
self._createkey(self.key)
|
||||
with open(self.key) as f:
|
||||
self.privatekey = RSA.import_key(f.read())
|
||||
f.close()
|
||||
self.publickey = self.privatekey.publickey()
|
||||
|
||||
# Self-heal text caches if they are missing
|
||||
|
||||
+68
-28
@@ -18,14 +18,15 @@ class NoAliasDumper(yaml.SafeDumper):
|
||||
def ignore_aliases(self, data):
|
||||
return True
|
||||
from rich.markdown import Markdown
|
||||
from rich.console import Console, Group
|
||||
from rich.markdown import Markdown
|
||||
from rich.panel import Panel
|
||||
from rich.text import Text
|
||||
from rich.rule import Rule
|
||||
from rich.style import Style
|
||||
from rich.prompt import Prompt
|
||||
mdprint = Console().print
|
||||
console = Console()
|
||||
mdprint = printer.console.print
|
||||
console = printer.console
|
||||
|
||||
try:
|
||||
from pyfzf.pyfzf import FzfPrompt
|
||||
except ImportError:
|
||||
@@ -135,6 +136,10 @@ class connapp:
|
||||
aiparser.add_argument("--architect-model", nargs=1, help="Override architect model")
|
||||
aiparser.add_argument("--architect-api-key", nargs=1, help="Override architect api key")
|
||||
aiparser.add_argument("--debug", action="store_true", help="Show AI reasoning and tool calls")
|
||||
aiparser.add_argument("--list", "--list-sessions", dest="list_sessions", action="store_true", help="List saved AI sessions")
|
||||
aiparser.add_argument("--session", nargs=1, help="Resume a specific AI session by ID")
|
||||
aiparser.add_argument("--resume", action="store_true", help="Resume the most recent AI session")
|
||||
aiparser.add_argument("--delete", "--delete-session", dest="delete_session", nargs=1, help="Delete an AI session by ID")
|
||||
aiparser.set_defaults(func=self._func_ai)
|
||||
#RUNPARSER
|
||||
runparser = subparsers.add_parser("run", description="Run scripts or commands on nodes", formatter_class=argparse.RawTextHelpFormatter)
|
||||
@@ -188,8 +193,10 @@ class connapp:
|
||||
for preload in self.plugins.preloads.values():
|
||||
preload.Preload(self)
|
||||
|
||||
if not os.path.exists(self.config.fzf_cachefile):
|
||||
self.config._generate_nodes_cache()
|
||||
# Update internal state and force cache generation after all preloads
|
||||
self.nodes_list = self.config._getallnodes()
|
||||
self.folders = self.config._getallfolders()
|
||||
self.config._generate_nodes_cache()
|
||||
|
||||
#Generate helps
|
||||
nodeparser.usage = self._help("usage", subparsers)
|
||||
@@ -656,7 +663,7 @@ class connapp:
|
||||
if not os.path.isdir(args.data[0]):
|
||||
raise argparse.ArgumentTypeError(f"readable_dir:{args.data[0]} is not a valid path")
|
||||
else:
|
||||
pathfile = self.config.defaultdir + "/.folder"
|
||||
pathfile = self.config.anchor_path + "/.folder"
|
||||
folder = os.path.abspath(args.data[0]).rstrip('/')
|
||||
with open(pathfile, "w") as f:
|
||||
f.write(str(folder))
|
||||
@@ -803,13 +810,15 @@ class connapp:
|
||||
plugins = {}
|
||||
|
||||
# Iterate over all files in the specified folder
|
||||
for file in os.listdir(self.config.defaultdir + "/plugins"):
|
||||
# Check if the file is a Python file
|
||||
if file.endswith('.py'):
|
||||
enabled_files.append(os.path.splitext(file)[0])
|
||||
# Check if the file is a Python backup file
|
||||
elif file.endswith('.py.bkp'):
|
||||
disabled_files.append(os.path.splitext(os.path.splitext(file)[0])[0])
|
||||
plugins_dir = self.config.defaultdir + "/plugins"
|
||||
if os.path.exists(plugins_dir):
|
||||
for file in os.listdir(plugins_dir):
|
||||
# Check if the file is a Python file
|
||||
if file.endswith('.py'):
|
||||
enabled_files.append(os.path.splitext(file)[0])
|
||||
# Check if the file is a Python backup file
|
||||
elif file.endswith('.py.bkp'):
|
||||
disabled_files.append(os.path.splitext(os.path.splitext(file)[0])[0])
|
||||
if enabled_files:
|
||||
plugins["Enabled"] = enabled_files
|
||||
if disabled_files:
|
||||
@@ -899,17 +908,35 @@ class connapp:
|
||||
|
||||
self.myai = self.ai(self.config, **arguments)
|
||||
|
||||
# 1. Gestionar comandos de sesión (Listar/Borrar)
|
||||
if args.list_sessions:
|
||||
self.myai.list_sessions()
|
||||
return
|
||||
|
||||
if args.delete_session:
|
||||
self.myai.delete_session(args.delete_session[0])
|
||||
return
|
||||
|
||||
# 2. Determinar session_id para retomar
|
||||
session_id = None
|
||||
if args.resume:
|
||||
session_id = self.myai.get_last_session_id()
|
||||
if not session_id:
|
||||
printer.warning("No previous session found to resume.")
|
||||
elif args.session:
|
||||
session_id = args.session[0]
|
||||
|
||||
if args.ask:
|
||||
# Single question mode
|
||||
query = " ".join(args.ask)
|
||||
with console.status("[bold green]Agent is thinking and analyzing...") as status:
|
||||
result = self.myai.ask(query, status=status, debug=args.debug)
|
||||
result = self.myai.ask(query, status=status, debug=args.debug, session_id=session_id)
|
||||
|
||||
# Determine title and color based on responder
|
||||
responder = result.get("responder", "engineer")
|
||||
if responder == "architect":
|
||||
title = "[bold purple]Network Architect[/bold purple]"
|
||||
border_style = "purple"
|
||||
title = "[bold medium_purple]Network Architect[/bold medium_purple]"
|
||||
border_style = "medium_purple"
|
||||
else:
|
||||
title = "[bold blue]Network Engineer[/bold blue]"
|
||||
border_style = "blue"
|
||||
@@ -927,9 +954,20 @@ class connapp:
|
||||
else:
|
||||
# Interactive chat mode
|
||||
history = None
|
||||
mdprint(Rule(style="bold blue"))
|
||||
mdprint(Markdown("**Networking Expert Agent**: Hi! I'm your assistant. I can help you diagnose issues, run commands, and manage your nodes.\nType 'exit' to quit.\n"))
|
||||
mdprint(Rule(style="bold blue"))
|
||||
if session_id:
|
||||
session_data = self.myai.load_session_data(session_id)
|
||||
if session_data:
|
||||
history = session_data.get("history", [])
|
||||
mdprint(Rule(title=f"[bold cyan] Resuming Session: {session_data.get('title')} [/bold cyan]", style="cyan"))
|
||||
else:
|
||||
printer.error(f"Could not load session {session_id}. Starting clean.")
|
||||
|
||||
if not history:
|
||||
mdprint(Rule(style="bold blue"))
|
||||
mdprint(Markdown("**Networking Expert Agent**: Hi! I'm your assistant. I can help you diagnose issues, run commands, and manage your nodes.\nType 'exit' to quit.\n"))
|
||||
mdprint(Rule(style="bold blue"))
|
||||
else:
|
||||
mdprint(f"[dim]Analyzing {len(history)} previous messages...[/dim]\n")
|
||||
|
||||
while True:
|
||||
try:
|
||||
@@ -984,18 +1022,18 @@ class connapp:
|
||||
return True
|
||||
|
||||
def _func_api(self, args):
|
||||
if args.command == "stop" or args.command == "restart":
|
||||
if args.command == "stop" or args.command == "restart" or args.command == "stop":
|
||||
args.data = self.stop_api()
|
||||
if args.command == "start" or args.command == "restart":
|
||||
if args.data:
|
||||
self.start_api(args.data)
|
||||
self.start_api(args.data, config=self.config)
|
||||
else:
|
||||
self.start_api()
|
||||
self.start_api(config=self.config)
|
||||
if args.command == "debug":
|
||||
if args.data:
|
||||
self.debug_api(args.data)
|
||||
self.debug_api(args.data, config=self.config)
|
||||
else:
|
||||
self.debug_api()
|
||||
self.debug_api(config=self.config)
|
||||
return
|
||||
|
||||
def _node_run(self, args):
|
||||
@@ -1577,8 +1615,9 @@ compdef _conn connpy
|
||||
connpy() {
|
||||
if [ $# -eq 0 ]; then
|
||||
local selected
|
||||
if [ -f ~/.config/conn/.fzf_nodes_cache.txt ]; then
|
||||
selected=$(cat ~/.config/conn/.fzf_nodes_cache.txt | fzf-tmux -d 25% --reverse)
|
||||
local configdir=$(cat ~/.config/conn/.folder 2>/dev/null || echo ~/.config/conn)
|
||||
if [ -s "$configdir/.fzf_nodes_cache.txt" ]; then
|
||||
selected=$(cat "$configdir/.fzf_nodes_cache.txt" | fzf-tmux -i -d 25%)
|
||||
else
|
||||
command connpy
|
||||
return
|
||||
@@ -1598,8 +1637,9 @@ alias c="connpy"
|
||||
connpy() {
|
||||
if [ $# -eq 0 ]; then
|
||||
local selected
|
||||
if [ -f ~/.config/conn/.fzf_nodes_cache.txt ]; then
|
||||
selected=$(cat ~/.config/conn/.fzf_nodes_cache.txt | fzf-tmux -d 25% --reverse)
|
||||
local configdir=$(cat ~/.config/conn/.folder 2>/dev/null || echo ~/.config/conn)
|
||||
if [ -s "$configdir/.fzf_nodes_cache.txt" ]; then
|
||||
selected=$(cat "$configdir/.fzf_nodes_cache.txt" | fzf-tmux -i -d 25%)
|
||||
else
|
||||
command connpy
|
||||
return
|
||||
|
||||
@@ -117,16 +117,19 @@ class context_manager:
|
||||
|
||||
class Preload:
|
||||
def __init__(self, connapp):
|
||||
#define contexts if doesn't exist
|
||||
connapp.config.modify(context_manager.add_default_context)
|
||||
#filter nodes using context
|
||||
cm = context_manager(connapp)
|
||||
connapp.nodes_list = [node for node in connapp.nodes_list if cm.match_any_regex(node, cm.regex)]
|
||||
connapp.folders = [node for node in connapp.folders if cm.match_any_regex(node, cm.regex)]
|
||||
# Register hooks first so that any save triggers a filtered cache generation
|
||||
connapp.config._getallnodes.register_post_hook(cm.modify_node_list)
|
||||
connapp.config._getallfolders.register_post_hook(cm.modify_node_list)
|
||||
connapp.config._getallnodesfull.register_post_hook(cm.modify_node_dict)
|
||||
|
||||
# Define contexts if doesn't exist (triggers save/cache generation)
|
||||
connapp.config.modify(context_manager.add_default_context)
|
||||
|
||||
# Filter in-memory nodes using current context
|
||||
connapp.nodes_list = [node for node in connapp.nodes_list if cm.match_any_regex(node, cm.regex)]
|
||||
connapp.folders = [node for node in connapp.folders if cm.match_any_regex(node, cm.regex)]
|
||||
|
||||
class Parser:
|
||||
def __init__(self):
|
||||
self.parser = argparse.ArgumentParser(description="Manage contexts with regex matching", formatter_class=argparse.RawTextHelpFormatter)
|
||||
|
||||
@@ -115,6 +115,8 @@ class Plugins:
|
||||
return module
|
||||
|
||||
def _import_plugins_to_argparse(self, directory, subparsers):
|
||||
if not os.path.exists(directory):
|
||||
return
|
||||
for filename in os.listdir(directory):
|
||||
commands = subparsers.choices.keys()
|
||||
if filename.endswith(".py"):
|
||||
|
||||
+27
-9
@@ -1,33 +1,51 @@
|
||||
import sys
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.live import Live
|
||||
|
||||
console = Console()
|
||||
err_console = Console(stderr=True)
|
||||
|
||||
|
||||
def _format_multiline(tag, message):
|
||||
message = str(message)
|
||||
lines = message.splitlines()
|
||||
if not lines:
|
||||
return f"[{tag}]"
|
||||
formatted = [f"[{tag}] {lines[0]}"]
|
||||
return f"\\[{tag}]"
|
||||
formatted = [f"\\[{tag}] {lines[0]}"]
|
||||
indent = " " * (len(tag) + 3)
|
||||
for line in lines[1:]:
|
||||
formatted.append(f"{indent}{line}")
|
||||
return "\n".join(formatted)
|
||||
|
||||
def info(message):
|
||||
print(_format_multiline("i", message))
|
||||
console.print(_format_multiline("i", message))
|
||||
|
||||
def success(message):
|
||||
print(_format_multiline("✓", message))
|
||||
console.print(_format_multiline("✓", message))
|
||||
|
||||
def start(message):
|
||||
print(_format_multiline("+", message))
|
||||
console.print(_format_multiline("+", message))
|
||||
|
||||
def warning(message):
|
||||
print(_format_multiline("!", message))
|
||||
console.print(_format_multiline("!", message))
|
||||
|
||||
def error(message):
|
||||
print(_format_multiline("✗", message), file=sys.stderr)
|
||||
# For error, we can create a temporary stderr console or just use the current one
|
||||
# err_console handles styles better than standard print and outputs to stderr.
|
||||
err_console.print(_format_multiline("✗", message), style="red")
|
||||
|
||||
def debug(message):
|
||||
print(_format_multiline("d", message))
|
||||
console.print(_format_multiline("d", message))
|
||||
|
||||
def custom(tag, message):
|
||||
print(_format_multiline(tag, message))
|
||||
console.print(_format_multiline(tag, message))
|
||||
|
||||
def table(title, columns, rows, header_style="bold cyan", box=None):
|
||||
t = Table(title=title, header_style=header_style, box=box)
|
||||
for col in columns:
|
||||
t.add_column(col)
|
||||
for row in rows:
|
||||
t.add_row(*[str(item) for item in row])
|
||||
console.print(t)
|
||||
|
||||
|
||||
+85
-1
@@ -42,7 +42,7 @@ class TestAIInit:
|
||||
|
||||
def test_init_loads_memory(self, ai_config, tmp_path, mock_litellm):
|
||||
"""Loads long-term memory from file if it exists."""
|
||||
memory_path = os.path.expanduser("~/.config/conn/ai_memory.md")
|
||||
memory_path = os.path.join(ai_config.defaultdir, "ai_memory.md")
|
||||
from connpy.ai import ai
|
||||
|
||||
with patch("os.path.exists", side_effect=lambda p: True if p == memory_path else os.path.exists(p)):
|
||||
@@ -210,6 +210,17 @@ class TestSanitizeMessages:
|
||||
result = myai._sanitize_messages(messages)
|
||||
assert len(result) == 4
|
||||
|
||||
def test_sanitize_strips_cache_control(self, myai):
|
||||
"""_sanitize_messages should convert list-based content (with cache_control) back to strings."""
|
||||
messages = [
|
||||
{"role": "system", "content": [{"type": "text", "text": "system prompt", "cache_control": {"type": "ephemeral"}}]},
|
||||
{"role": "user", "content": "hello"}
|
||||
]
|
||||
result = myai._sanitize_messages(messages)
|
||||
assert result[0]["role"] == "system"
|
||||
assert isinstance(result[0]["content"], str)
|
||||
assert result[0]["content"] == "system prompt"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# _truncate tests
|
||||
@@ -395,3 +406,76 @@ class TestToolDefinitions:
|
||||
tools = myai._get_architect_tools()
|
||||
names = [t["function"]["name"] for t in tools]
|
||||
assert "arch_tool" in names
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# AI Session Management tests
|
||||
# =========================================================================
|
||||
|
||||
class TestAISessions:
|
||||
@pytest.fixture
|
||||
def myai(self, ai_config, mock_litellm, tmp_path):
|
||||
from connpy.ai import ai
|
||||
ai_config.defaultdir = str(tmp_path)
|
||||
return ai(ai_config)
|
||||
|
||||
def test_sessions_dir_initialization(self, myai, tmp_path):
|
||||
assert os.path.exists(os.path.join(tmp_path, "ai_sessions"))
|
||||
assert myai.sessions_dir == str(tmp_path / "ai_sessions")
|
||||
|
||||
def test_generate_session_id(self, myai):
|
||||
session_id = myai._generate_session_id("Any query")
|
||||
# Format: YYYYMMDD-HHMMSS
|
||||
assert len(session_id) == 15
|
||||
assert "-" in session_id
|
||||
parts = session_id.split("-")
|
||||
assert len(parts[0]) == 8 # YYYYMMDD
|
||||
assert len(parts[1]) == 6 # HHMMSS
|
||||
|
||||
def test_save_and_load_session(self, myai):
|
||||
history = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi"}
|
||||
]
|
||||
myai.save_session(history, title="Test Session")
|
||||
session_id = myai.session_id
|
||||
|
||||
# Load it back
|
||||
loaded = myai.load_session_data(session_id)
|
||||
assert loaded["title"] == "Test Session"
|
||||
assert loaded["history"] == history
|
||||
assert loaded["model"] == myai.engineer_model
|
||||
|
||||
def test_list_sessions(self, myai, capsys):
|
||||
history = [{"role": "user", "content": "Query 1"}]
|
||||
myai.save_session(history, title="Session 1")
|
||||
|
||||
# Use a second instance to list
|
||||
myai.list_sessions()
|
||||
captured = capsys.readouterr()
|
||||
assert "Session 1" in captured.out
|
||||
assert "AI Persisted Sessions" in captured.out
|
||||
|
||||
def test_get_last_session_id(self, myai):
|
||||
# Save two sessions
|
||||
myai.session_id = None # Force new
|
||||
myai.save_session([{"role": "user", "content": "First"}])
|
||||
first_id = myai.session_id
|
||||
import time
|
||||
time.sleep(1.1) # Ensure different timestamp
|
||||
|
||||
myai.session_id = None # Force new
|
||||
myai.save_session([{"role": "user", "content": "Second"}])
|
||||
second_id = myai.session_id
|
||||
|
||||
last_id = myai.get_last_session_id()
|
||||
assert last_id == second_id
|
||||
assert last_id != first_id
|
||||
|
||||
def test_delete_session(self, myai):
|
||||
myai.save_session([{"role": "user", "content": "To be deleted"}])
|
||||
session_id = myai.session_id
|
||||
assert os.path.exists(myai.session_path)
|
||||
|
||||
myai.delete_session(session_id)
|
||||
assert not os.path.exists(myai.session_path)
|
||||
|
||||
+384
-104
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1">
|
||||
<meta name="generator" content="pdoc3 0.11.6">
|
||||
<meta name="generator" content="pdoc3 0.11.5">
|
||||
<title>connpy API documentation</title>
|
||||
<meta name="description" content="Connection manager …">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/13.0.0/sanitize.min.css" integrity="sha512-y1dtMcuvtTMJc1yPgEqF0ZjQbhnc/bFhyvIyVNb9Zk5mIGtqVaAB1Ttl28su8AvFMOY0EwRbAe+HCLqj6W7/KA==" crossorigin>
|
||||
@@ -683,6 +683,8 @@ class Preload:
|
||||
return module
|
||||
|
||||
def _import_plugins_to_argparse(self, directory, subparsers):
|
||||
if not os.path.exists(directory):
|
||||
return
|
||||
for filename in os.listdir(directory):
|
||||
commands = subparsers.choices.keys()
|
||||
if filename.endswith(".py"):
|
||||
@@ -890,7 +892,7 @@ class ai:
|
||||
self.architect_prompt_extensions = [] # Extra text for architect prompt
|
||||
|
||||
# Long-term memory
|
||||
self.memory_path = os.path.expanduser("~/.config/conn/ai_memory.md")
|
||||
self.memory_path = os.path.join(self.config.defaultdir, "ai_memory.md")
|
||||
self.long_term_memory = ""
|
||||
if os.path.exists(self.memory_path):
|
||||
try:
|
||||
@@ -903,6 +905,12 @@ class ai:
|
||||
except Exception as e:
|
||||
console.print(f"[yellow]Warning: Failed to load AI memory: {e}[/yellow]")
|
||||
|
||||
# Session Management
|
||||
self.sessions_dir = os.path.join(self.config.defaultdir, "ai_sessions")
|
||||
os.makedirs(self.sessions_dir, exist_ok=True)
|
||||
self.session_id = None
|
||||
self.session_path = None
|
||||
|
||||
# Prompts base agnósticos
|
||||
self._engineer_base_prompt = dedent(f"""
|
||||
Role: TECHNICAL EXECUTION ENGINE.
|
||||
@@ -1018,7 +1026,7 @@ class ai:
|
||||
|
||||
# Determine styling based on current brain
|
||||
role_label = "Network Architect" if "architect" in label.lower() else "Network Engineer"
|
||||
border = "purple" if "architect" in label.lower() else "blue"
|
||||
border = "medium_purple" if "architect" in label.lower() else "blue"
|
||||
title = f"[bold {border}]{role_label}[/bold {border}]"
|
||||
|
||||
try:
|
||||
@@ -1118,14 +1126,34 @@ class ai:
|
||||
2. No user/system messages appear between tool_calls and tool responses
|
||||
3. Orphaned tool_calls at the end are removed
|
||||
4. Orphaned tool responses without a preceding tool_call are removed
|
||||
5. Incompatible metadata like cache_control is stripped for non-Anthropic models
|
||||
"""
|
||||
if not messages:
|
||||
return messages
|
||||
|
||||
# Pre-process messages to pull text from list contents (Anthropic cache format)
|
||||
# and remove explicit cache keys.
|
||||
pre_sanitized = []
|
||||
for msg in messages:
|
||||
m = msg.copy() if isinstance(msg, dict) else msg.model_dump(exclude_none=True)
|
||||
|
||||
# Convert content list to plain string if it's a system message with caching metadata
|
||||
if m.get('role') == 'system' and isinstance(m.get('content'), list):
|
||||
# Extraer texto de [{"type": "text", "text": "...", "cache_control": ...}]
|
||||
m['content'] = m['content'][0]['text'] if m['content'] else ""
|
||||
|
||||
# Remove any explicit cache_control key anywhere
|
||||
if 'cache_control' in m: del m['cache_control']
|
||||
if isinstance(m.get('content'), list):
|
||||
for item in m['content']:
|
||||
if isinstance(item, dict) and 'cache_control' in item: del item['cache_control']
|
||||
|
||||
pre_sanitized.append(m)
|
||||
|
||||
sanitized = []
|
||||
i = 0
|
||||
while i < len(messages):
|
||||
msg = messages[i]
|
||||
while i < len(pre_sanitized):
|
||||
msg = pre_sanitized[i]
|
||||
role = msg.get('role', '')
|
||||
|
||||
if role == 'assistant' and msg.get('tool_calls'):
|
||||
@@ -1139,8 +1167,8 @@ class ai:
|
||||
# Look ahead for matching tool responses
|
||||
tool_responses = []
|
||||
j = i + 1
|
||||
while j < len(messages):
|
||||
next_msg = messages[j]
|
||||
while j < len(pre_sanitized):
|
||||
next_msg = pre_sanitized[j]
|
||||
if next_msg.get('role') == 'tool':
|
||||
tool_responses.append(next_msg)
|
||||
j += 1
|
||||
@@ -1298,23 +1326,16 @@ class ai:
|
||||
|
||||
def _engineer_loop(self, task, status=None, debug=False, chat_history=None):
|
||||
"""Internal loop where the Engineer executes technical tasks for the Architect."""
|
||||
# Optimización de caché para el Ingeniero
|
||||
if "claude" in self.engineer_model.lower():
|
||||
# Optimización de caché para el Ingeniero (Solo para Anthropic directo, Vertex tiene reglas distintas)
|
||||
if "claude" in self.engineer_model.lower() and "vertex" not in self.engineer_model.lower():
|
||||
messages = [{"role": "system", "content": [{"type": "text", "text": self.engineer_system_prompt, "cache_control": {"type": "ephemeral"}}]}]
|
||||
else:
|
||||
messages = [{"role": "system", "content": self.engineer_system_prompt}]
|
||||
|
||||
if chat_history:
|
||||
# Clean chat history from caching metadata if engineer is not Claude
|
||||
if "claude" not in self.engineer_model.lower():
|
||||
cleaned_history = []
|
||||
for msg in chat_history[-5:]:
|
||||
m = msg if isinstance(msg, dict) else msg.model_dump(exclude_none=True)
|
||||
# Remove cache_control from system messages
|
||||
if m.get('role') == 'system' and isinstance(m.get('content'), list):
|
||||
m['content'] = m['content'][0]['text'] if m['content'] else ""
|
||||
cleaned_history.append(m)
|
||||
messages.extend(cleaned_history)
|
||||
# Clean chat history from caching metadata if engineer is not a compatible Claude model
|
||||
if "claude" not in self.engineer_model.lower() or "vertex" in self.engineer_model.lower():
|
||||
messages.extend(self._sanitize_messages(chat_history[-5:]))
|
||||
else:
|
||||
messages.extend(chat_history[-5:])
|
||||
|
||||
@@ -1410,9 +1431,125 @@ class ai:
|
||||
tools.extend(self.external_architect_tools)
|
||||
return tools
|
||||
|
||||
def _get_sessions(self):
|
||||
"""Returns a list of session metadata sorted by date."""
|
||||
sessions = []
|
||||
if not os.path.exists(self.sessions_dir):
|
||||
return []
|
||||
for f in os.listdir(self.sessions_dir):
|
||||
if f.endswith(".json"):
|
||||
path = os.path.join(self.sessions_dir, f)
|
||||
try:
|
||||
with open(path, "r") as fs:
|
||||
data = json.load(fs)
|
||||
sessions.append({
|
||||
"id": f[:-5],
|
||||
"title": data.get("title", "Untitled Session"),
|
||||
"created_at": data.get("created_at", "Unknown"),
|
||||
"model": data.get("model", "Unknown"),
|
||||
"path": path
|
||||
})
|
||||
except Exception:
|
||||
continue
|
||||
return sorted(sessions, key=lambda x: x["created_at"], reverse=True)
|
||||
|
||||
def list_sessions(self):
|
||||
"""Prints a list of sessions using printer.table."""
|
||||
sessions = self._get_sessions()
|
||||
if not sessions:
|
||||
printer.info("No saved AI sessions found.")
|
||||
return
|
||||
|
||||
columns = ["ID", "Title", "Created At", "Model"]
|
||||
rows = [[s["id"], s["title"], s["created_at"], s["model"]] for s in sessions]
|
||||
printer.table("AI Persisted Sessions", columns, rows)
|
||||
|
||||
def load_session_data(self, session_id):
|
||||
"""Loads a session's raw data by ID."""
|
||||
path = os.path.join(self.sessions_dir, f"{session_id}.json")
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
data = json.load(f)
|
||||
self.session_id = session_id
|
||||
self.session_path = path
|
||||
return data
|
||||
except Exception as e:
|
||||
printer.error(f"Failed to load session {session_id}: {e}")
|
||||
return None
|
||||
|
||||
def delete_session(self, session_id):
|
||||
"""Deletes a session by ID."""
|
||||
path = os.path.join(self.sessions_dir, f"{session_id}.json")
|
||||
if os.path.exists(path):
|
||||
os.remove(path)
|
||||
printer.success(f"Session {session_id} deleted.")
|
||||
else:
|
||||
printer.error(f"Session {session_id} not found.")
|
||||
|
||||
def get_last_session_id(self):
|
||||
"""Returns the ID of the most recent session."""
|
||||
sessions = self._get_sessions()
|
||||
return sessions[0]["id"] if sessions else None
|
||||
|
||||
def _generate_session_id(self, query):
|
||||
"""Generates a unique session ID based on timestamp."""
|
||||
return datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
|
||||
def save_session(self, history, title=None, model=None):
|
||||
"""Saves current history to the session file."""
|
||||
if not self.session_id:
|
||||
# Generate ID from first user query if available
|
||||
first_user_msg = next((m["content"] for m in history if m["role"] == "user"), "new-session")
|
||||
self.session_id = self._generate_session_id(first_user_msg)
|
||||
self.session_path = os.path.join(self.sessions_dir, f"{self.session_id}.json")
|
||||
|
||||
# If it's a new file, we might want to set a better title
|
||||
if not os.path.exists(self.session_path) and not title:
|
||||
raw_title = next((m["content"] for m in history if m["role"] == "user"), "New Session")
|
||||
# Clean title: remove newlines, multiple spaces
|
||||
clean_title = " ".join(raw_title.split())
|
||||
if len(clean_title) > 40:
|
||||
title = clean_title[:37].strip() + "..."
|
||||
else:
|
||||
title = clean_title
|
||||
|
||||
try:
|
||||
# Read existing metadata if it exists
|
||||
metadata = {}
|
||||
if os.path.exists(self.session_path):
|
||||
with open(self.session_path, "r") as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
metadata.update({
|
||||
"id": self.session_id,
|
||||
"title": title or metadata.get("title", "New Session"),
|
||||
"created_at": metadata.get("created_at", datetime.datetime.now().isoformat()),
|
||||
"updated_at": datetime.datetime.now().isoformat(),
|
||||
"model": model or metadata.get("model", self.engineer_model),
|
||||
"history": history
|
||||
})
|
||||
|
||||
with open(self.session_path, "w") as f:
|
||||
json.dump(metadata, f, indent=4)
|
||||
except Exception as e:
|
||||
printer.error(f"Failed to save session: {e}")
|
||||
|
||||
except Exception as e:
|
||||
printer.error(f"Failed to save session: {e}")
|
||||
|
||||
@MethodHook
|
||||
def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=False, stream=True):
|
||||
def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=False, stream=True, session_id=None):
|
||||
if chat_history is None: chat_history = []
|
||||
|
||||
# Load session if provided and history is empty
|
||||
if session_id and not chat_history:
|
||||
session_data = self.load_session_data(session_id)
|
||||
if session_data:
|
||||
chat_history = session_data.get("history", [])
|
||||
# If we loaded history, the caller might need it back
|
||||
# But typically ask() is called in a loop with an external history object
|
||||
|
||||
usage = {"input": 0, "output": 0, "total": 0}
|
||||
|
||||
# 1. Selector de Rol inicial (Sticky Brain)
|
||||
@@ -1446,15 +1583,20 @@ class ai:
|
||||
model = self.architect_model if current_brain == "architect" else self.engineer_model
|
||||
key = self.architect_key if current_brain == "architect" else self.engineer_key
|
||||
|
||||
# Estructura optimizada para Prompt Caching
|
||||
if "claude" in model.lower():
|
||||
# Estructura optimizada para Prompt Caching (Solo para Anthropic directo, Vertex tiene reglas distintas)
|
||||
if "claude" in model.lower() and "vertex" not in model.lower():
|
||||
messages = [{"role": "system", "content": [{"type": "text", "text": system_prompt, "cache_control": {"type": "ephemeral"}}]}]
|
||||
else:
|
||||
messages = [{"role": "system", "content": system_prompt}]
|
||||
|
||||
# Interleaving de historial
|
||||
last_role = "system"
|
||||
for msg in chat_history[-self.max_history:]:
|
||||
# Sanitize history if the current target model is not compatible with cache_control
|
||||
history_to_process = chat_history[-self.max_history:]
|
||||
if "claude" not in model.lower() or "vertex" in model.lower():
|
||||
history_to_process = self._sanitize_messages(history_to_process)
|
||||
|
||||
for msg in history_to_process:
|
||||
m = msg if isinstance(msg, dict) else msg.model_dump(exclude_none=True)
|
||||
role = m.get('role')
|
||||
if role == last_role and role == 'user':
|
||||
@@ -1482,7 +1624,7 @@ class ai:
|
||||
console.print(f"[yellow] You can press Ctrl+C to interrupt and get a summary of progress.[/yellow]")
|
||||
soft_limit_warned = True
|
||||
|
||||
label = "[bold purple]Architect" if current_brain == "architect" else "[bold blue]Engineer"
|
||||
label = "[bold medium_purple]Architect" if current_brain == "architect" else "[bold blue]Engineer"
|
||||
if status: status.update(f"{label} is thinking... (step {iteration})")
|
||||
|
||||
streamed_response = False
|
||||
@@ -1527,7 +1669,7 @@ class ai:
|
||||
messages.append(msg_dict)
|
||||
|
||||
if debug and resp_msg.content:
|
||||
console.print(Panel(Markdown(resp_msg.content), title=f"{label} Reasoning", border_style="purple" if current_brain == "architect" else "blue"))
|
||||
console.print(Panel(Markdown(resp_msg.content), title=f"{label} Reasoning", border_style="medium_purple" if current_brain == "architect" else "blue"))
|
||||
|
||||
if not resp_msg.tool_calls: break
|
||||
|
||||
@@ -1544,8 +1686,8 @@ class ai:
|
||||
continue
|
||||
|
||||
if status:
|
||||
if fn == "delegate_to_engineer": status.update(f"[bold purple]Architect: [DELEGATING MISSION] {args.get('task','')[:40]}...")
|
||||
elif fn == "manage_memory_tool": status.update(f"[bold purple]Architect: [UPDATING MEMORY]")
|
||||
if fn == "delegate_to_engineer": status.update(f"[bold medium_purple]Architect: [DELEGATING MISSION] {args.get('task','')[:40]}...")
|
||||
elif fn == "manage_memory_tool": status.update(f"[bold medium_purple]Architect: [UPDATING MEMORY]")
|
||||
|
||||
if debug: console.print(Panel(Text(json.dumps(args, indent=2)), title=f"{label} Decision: {fn}", border_style="white"))
|
||||
|
||||
@@ -1553,7 +1695,7 @@ class ai:
|
||||
obs, eng_usage = self._engineer_loop(args["task"], status=status, debug=debug, chat_history=messages[:-1])
|
||||
usage["input"] += eng_usage["input"]; usage["output"] += eng_usage["output"]; usage["total"] += eng_usage["total"]
|
||||
elif fn == "consult_architect":
|
||||
if status: status.update("[bold purple]Engineer consulting Architect...")
|
||||
if status: status.update("[bold medium_purple]Engineer consulting Architect...")
|
||||
try:
|
||||
# Consultation only - Engineer stays in control
|
||||
claude_resp = completion(
|
||||
@@ -1566,13 +1708,13 @@ class ai:
|
||||
num_retries=3
|
||||
)
|
||||
obs = claude_resp.choices[0].message.content
|
||||
if debug: console.print(Panel(Markdown(obs), title="[bold purple]Architect Consultation[/bold purple]", border_style="purple"))
|
||||
if debug: console.print(Panel(Markdown(obs), title="[bold medium_purple]Architect Consultation[/bold medium_purple]", border_style="medium_purple"))
|
||||
except Exception as e:
|
||||
if status: status.update("[bold orange3]Architect unavailable! Engineer continuing alone...")
|
||||
obs = f"Architect unavailable ({str(e)}). Proceeding with your best technical judgment."
|
||||
|
||||
elif fn == "escalate_to_architect":
|
||||
if status: status.update("[bold purple]Transferring control to Architect...")
|
||||
if status: status.update("[bold medium_purple]Transferring control to Architect...")
|
||||
# Full escalation - Architect takes over
|
||||
current_brain = "architect"
|
||||
model = self.architect_model
|
||||
@@ -1583,7 +1725,7 @@ class ai:
|
||||
handover_msg = f"HANDOVER FROM EXECUTION ENGINE\n\nReason: {args['reason']}\n\nContext: {args['context']}\n\nYou are now in control of this conversation."
|
||||
pending_user_message = handover_msg
|
||||
obs = "Control transferred to Architect. Handover context will be provided."
|
||||
if debug: console.print(Panel(Text(handover_msg), title="[bold purple]Escalation to Architect[/bold purple]", border_style="purple"))
|
||||
if debug: console.print(Panel(Text(handover_msg), title="[bold medium_purple]Escalation to Architect[/bold medium_purple]", border_style="medium_purple"))
|
||||
|
||||
elif fn == "return_to_engineer":
|
||||
if status: status.update("[bold blue]Transferring control back to Engineer...")
|
||||
@@ -1641,19 +1783,8 @@ class ai:
|
||||
messages.append(resp_msg.model_dump(exclude_none=True))
|
||||
except Exception: pass
|
||||
finally:
|
||||
try:
|
||||
log_dir = self.config.defaultdir
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
log_path = os.path.join(log_dir, "ai_debug.json")
|
||||
hist = []
|
||||
if os.path.exists(log_path):
|
||||
try:
|
||||
with open(log_path, "r") as f: hist = json.load(f)
|
||||
except (IOError, json.JSONDecodeError): hist = []
|
||||
hist.append({"timestamp": datetime.datetime.now().isoformat(), "roles": {"strategic_engine": self.architect_model, "execution_engine": self.engineer_model}, "session": messages})
|
||||
with open(log_path, "w") as f: json.dump(hist[-10:], f, indent=4)
|
||||
except Exception as e:
|
||||
if debug: console.print(f"[dim red]Debug log failed: {e}[/dim red]")
|
||||
# Auto-save session
|
||||
self.save_session(messages, model=model)
|
||||
|
||||
return {
|
||||
"response": messages[-1].get("content"),
|
||||
@@ -1672,7 +1803,7 @@ class ai:
|
||||
<dl>
|
||||
<dt id="connpy.ai.SAFE_COMMANDS"><code class="name">var <span class="ident">SAFE_COMMANDS</span></code></dt>
|
||||
<dd>
|
||||
<div class="desc"><p>The type of the None singleton.</p></div>
|
||||
<div class="desc"></div>
|
||||
</dd>
|
||||
</dl>
|
||||
<h3>Instance variables</h3>
|
||||
@@ -1713,7 +1844,7 @@ def engineer_system_prompt(self):
|
||||
<h3>Methods</h3>
|
||||
<dl>
|
||||
<dt id="connpy.ai.ask"><code class="name flex">
|
||||
<span>def <span class="ident">ask</span></span>(<span>self,<br>user_input,<br>dryrun=False,<br>chat_history=None,<br>status=None,<br>debug=False,<br>stream=True)</span>
|
||||
<span>def <span class="ident">ask</span></span>(<span>self,<br>user_input,<br>dryrun=False,<br>chat_history=None,<br>status=None,<br>debug=False,<br>stream=True,<br>session_id=None)</span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
@@ -1721,8 +1852,17 @@ def engineer_system_prompt(self):
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">@MethodHook
|
||||
def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=False, stream=True):
|
||||
def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=False, stream=True, session_id=None):
|
||||
if chat_history is None: chat_history = []
|
||||
|
||||
# Load session if provided and history is empty
|
||||
if session_id and not chat_history:
|
||||
session_data = self.load_session_data(session_id)
|
||||
if session_data:
|
||||
chat_history = session_data.get("history", [])
|
||||
# If we loaded history, the caller might need it back
|
||||
# But typically ask() is called in a loop with an external history object
|
||||
|
||||
usage = {"input": 0, "output": 0, "total": 0}
|
||||
|
||||
# 1. Selector de Rol inicial (Sticky Brain)
|
||||
@@ -1756,15 +1896,20 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
|
||||
model = self.architect_model if current_brain == "architect" else self.engineer_model
|
||||
key = self.architect_key if current_brain == "architect" else self.engineer_key
|
||||
|
||||
# Estructura optimizada para Prompt Caching
|
||||
if "claude" in model.lower():
|
||||
# Estructura optimizada para Prompt Caching (Solo para Anthropic directo, Vertex tiene reglas distintas)
|
||||
if "claude" in model.lower() and "vertex" not in model.lower():
|
||||
messages = [{"role": "system", "content": [{"type": "text", "text": system_prompt, "cache_control": {"type": "ephemeral"}}]}]
|
||||
else:
|
||||
messages = [{"role": "system", "content": system_prompt}]
|
||||
|
||||
# Interleaving de historial
|
||||
last_role = "system"
|
||||
for msg in chat_history[-self.max_history:]:
|
||||
# Sanitize history if the current target model is not compatible with cache_control
|
||||
history_to_process = chat_history[-self.max_history:]
|
||||
if "claude" not in model.lower() or "vertex" in model.lower():
|
||||
history_to_process = self._sanitize_messages(history_to_process)
|
||||
|
||||
for msg in history_to_process:
|
||||
m = msg if isinstance(msg, dict) else msg.model_dump(exclude_none=True)
|
||||
role = m.get('role')
|
||||
if role == last_role and role == 'user':
|
||||
@@ -1792,7 +1937,7 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
|
||||
console.print(f"[yellow] You can press Ctrl+C to interrupt and get a summary of progress.[/yellow]")
|
||||
soft_limit_warned = True
|
||||
|
||||
label = "[bold purple]Architect" if current_brain == "architect" else "[bold blue]Engineer"
|
||||
label = "[bold medium_purple]Architect" if current_brain == "architect" else "[bold blue]Engineer"
|
||||
if status: status.update(f"{label} is thinking... (step {iteration})")
|
||||
|
||||
streamed_response = False
|
||||
@@ -1837,7 +1982,7 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
|
||||
messages.append(msg_dict)
|
||||
|
||||
if debug and resp_msg.content:
|
||||
console.print(Panel(Markdown(resp_msg.content), title=f"{label} Reasoning", border_style="purple" if current_brain == "architect" else "blue"))
|
||||
console.print(Panel(Markdown(resp_msg.content), title=f"{label} Reasoning", border_style="medium_purple" if current_brain == "architect" else "blue"))
|
||||
|
||||
if not resp_msg.tool_calls: break
|
||||
|
||||
@@ -1854,8 +1999,8 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
|
||||
continue
|
||||
|
||||
if status:
|
||||
if fn == "delegate_to_engineer": status.update(f"[bold purple]Architect: [DELEGATING MISSION] {args.get('task','')[:40]}...")
|
||||
elif fn == "manage_memory_tool": status.update(f"[bold purple]Architect: [UPDATING MEMORY]")
|
||||
if fn == "delegate_to_engineer": status.update(f"[bold medium_purple]Architect: [DELEGATING MISSION] {args.get('task','')[:40]}...")
|
||||
elif fn == "manage_memory_tool": status.update(f"[bold medium_purple]Architect: [UPDATING MEMORY]")
|
||||
|
||||
if debug: console.print(Panel(Text(json.dumps(args, indent=2)), title=f"{label} Decision: {fn}", border_style="white"))
|
||||
|
||||
@@ -1863,7 +2008,7 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
|
||||
obs, eng_usage = self._engineer_loop(args["task"], status=status, debug=debug, chat_history=messages[:-1])
|
||||
usage["input"] += eng_usage["input"]; usage["output"] += eng_usage["output"]; usage["total"] += eng_usage["total"]
|
||||
elif fn == "consult_architect":
|
||||
if status: status.update("[bold purple]Engineer consulting Architect...")
|
||||
if status: status.update("[bold medium_purple]Engineer consulting Architect...")
|
||||
try:
|
||||
# Consultation only - Engineer stays in control
|
||||
claude_resp = completion(
|
||||
@@ -1876,13 +2021,13 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
|
||||
num_retries=3
|
||||
)
|
||||
obs = claude_resp.choices[0].message.content
|
||||
if debug: console.print(Panel(Markdown(obs), title="[bold purple]Architect Consultation[/bold purple]", border_style="purple"))
|
||||
if debug: console.print(Panel(Markdown(obs), title="[bold medium_purple]Architect Consultation[/bold medium_purple]", border_style="medium_purple"))
|
||||
except Exception as e:
|
||||
if status: status.update("[bold orange3]Architect unavailable! Engineer continuing alone...")
|
||||
obs = f"Architect unavailable ({str(e)}). Proceeding with your best technical judgment."
|
||||
|
||||
elif fn == "escalate_to_architect":
|
||||
if status: status.update("[bold purple]Transferring control to Architect...")
|
||||
if status: status.update("[bold medium_purple]Transferring control to Architect...")
|
||||
# Full escalation - Architect takes over
|
||||
current_brain = "architect"
|
||||
model = self.architect_model
|
||||
@@ -1893,7 +2038,7 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
|
||||
handover_msg = f"HANDOVER FROM EXECUTION ENGINE\n\nReason: {args['reason']}\n\nContext: {args['context']}\n\nYou are now in control of this conversation."
|
||||
pending_user_message = handover_msg
|
||||
obs = "Control transferred to Architect. Handover context will be provided."
|
||||
if debug: console.print(Panel(Text(handover_msg), title="[bold purple]Escalation to Architect[/bold purple]", border_style="purple"))
|
||||
if debug: console.print(Panel(Text(handover_msg), title="[bold medium_purple]Escalation to Architect[/bold medium_purple]", border_style="medium_purple"))
|
||||
|
||||
elif fn == "return_to_engineer":
|
||||
if status: status.update("[bold blue]Transferring control back to Engineer...")
|
||||
@@ -1951,19 +2096,8 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa
|
||||
messages.append(resp_msg.model_dump(exclude_none=True))
|
||||
except Exception: pass
|
||||
finally:
|
||||
try:
|
||||
log_dir = self.config.defaultdir
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
log_path = os.path.join(log_dir, "ai_debug.json")
|
||||
hist = []
|
||||
if os.path.exists(log_path):
|
||||
try:
|
||||
with open(log_path, "r") as f: hist = json.load(f)
|
||||
except (IOError, json.JSONDecodeError): hist = []
|
||||
hist.append({"timestamp": datetime.datetime.now().isoformat(), "roles": {"strategic_engine": self.architect_model, "execution_engine": self.engineer_model}, "session": messages})
|
||||
with open(log_path, "w") as f: json.dump(hist[-10:], f, indent=4)
|
||||
except Exception as e:
|
||||
if debug: console.print(f"[dim red]Debug log failed: {e}[/dim red]")
|
||||
# Auto-save session
|
||||
self.save_session(messages, model=model)
|
||||
|
||||
return {
|
||||
"response": messages[-1].get("content"),
|
||||
@@ -1989,6 +2123,40 @@ def confirm(self, user_input): return True</code></pre>
|
||||
</details>
|
||||
<div class="desc"></div>
|
||||
</dd>
|
||||
<dt id="connpy.ai.delete_session"><code class="name flex">
|
||||
<span>def <span class="ident">delete_session</span></span>(<span>self, session_id)</span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
<summary>
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">def delete_session(self, session_id):
|
||||
"""Deletes a session by ID."""
|
||||
path = os.path.join(self.sessions_dir, f"{session_id}.json")
|
||||
if os.path.exists(path):
|
||||
os.remove(path)
|
||||
printer.success(f"Session {session_id} deleted.")
|
||||
else:
|
||||
printer.error(f"Session {session_id} not found.")</code></pre>
|
||||
</details>
|
||||
<div class="desc"><p>Deletes a session by ID.</p></div>
|
||||
</dd>
|
||||
<dt id="connpy.ai.get_last_session_id"><code class="name flex">
|
||||
<span>def <span class="ident">get_last_session_id</span></span>(<span>self)</span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
<summary>
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">def get_last_session_id(self):
|
||||
"""Returns the ID of the most recent session."""
|
||||
sessions = self._get_sessions()
|
||||
return sessions[0]["id"] if sessions else None</code></pre>
|
||||
</details>
|
||||
<div class="desc"><p>Returns the ID of the most recent session.</p></div>
|
||||
</dd>
|
||||
<dt id="connpy.ai.get_node_info_tool"><code class="name flex">
|
||||
<span>def <span class="ident">get_node_info_tool</span></span>(<span>self, node_name)</span>
|
||||
</code></dt>
|
||||
@@ -2037,6 +2205,51 @@ def confirm(self, user_input): return True</code></pre>
|
||||
</details>
|
||||
<div class="desc"><p>List nodes matching the filter pattern. Returns metadata for <=5 nodes, names only for more.</p></div>
|
||||
</dd>
|
||||
<dt id="connpy.ai.list_sessions"><code class="name flex">
|
||||
<span>def <span class="ident">list_sessions</span></span>(<span>self)</span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
<summary>
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">def list_sessions(self):
|
||||
"""Prints a list of sessions using printer.table."""
|
||||
sessions = self._get_sessions()
|
||||
if not sessions:
|
||||
printer.info("No saved AI sessions found.")
|
||||
return
|
||||
|
||||
columns = ["ID", "Title", "Created At", "Model"]
|
||||
rows = [[s["id"], s["title"], s["created_at"], s["model"]] for s in sessions]
|
||||
printer.table("AI Persisted Sessions", columns, rows)</code></pre>
|
||||
</details>
|
||||
<div class="desc"><p>Prints a list of sessions using printer.table.</p></div>
|
||||
</dd>
|
||||
<dt id="connpy.ai.load_session_data"><code class="name flex">
|
||||
<span>def <span class="ident">load_session_data</span></span>(<span>self, session_id)</span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
<summary>
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">def load_session_data(self, session_id):
|
||||
"""Loads a session's raw data by ID."""
|
||||
path = os.path.join(self.sessions_dir, f"{session_id}.json")
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
data = json.load(f)
|
||||
self.session_id = session_id
|
||||
self.session_path = path
|
||||
return data
|
||||
except Exception as e:
|
||||
printer.error(f"Failed to load session {session_id}: {e}")
|
||||
return None</code></pre>
|
||||
</details>
|
||||
<div class="desc"><p>Loads a session's raw data by ID.</p></div>
|
||||
</dd>
|
||||
<dt id="connpy.ai.manage_memory_tool"><code class="name flex">
|
||||
<span>def <span class="ident">manage_memory_tool</span></span>(<span>self, content, action='append')</span>
|
||||
</code></dt>
|
||||
@@ -2197,6 +2410,58 @@ def confirm(self, user_input): return True</code></pre>
|
||||
</details>
|
||||
<div class="desc"><p>Execute commands on nodes matching the filter. Native interactive confirmation for unsafe commands.</p></div>
|
||||
</dd>
|
||||
<dt id="connpy.ai.save_session"><code class="name flex">
|
||||
<span>def <span class="ident">save_session</span></span>(<span>self, history, title=None, model=None)</span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
<summary>
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">def save_session(self, history, title=None, model=None):
|
||||
"""Saves current history to the session file."""
|
||||
if not self.session_id:
|
||||
# Generate ID from first user query if available
|
||||
first_user_msg = next((m["content"] for m in history if m["role"] == "user"), "new-session")
|
||||
self.session_id = self._generate_session_id(first_user_msg)
|
||||
self.session_path = os.path.join(self.sessions_dir, f"{self.session_id}.json")
|
||||
|
||||
# If it's a new file, we might want to set a better title
|
||||
if not os.path.exists(self.session_path) and not title:
|
||||
raw_title = next((m["content"] for m in history if m["role"] == "user"), "New Session")
|
||||
# Clean title: remove newlines, multiple spaces
|
||||
clean_title = " ".join(raw_title.split())
|
||||
if len(clean_title) > 40:
|
||||
title = clean_title[:37].strip() + "..."
|
||||
else:
|
||||
title = clean_title
|
||||
|
||||
try:
|
||||
# Read existing metadata if it exists
|
||||
metadata = {}
|
||||
if os.path.exists(self.session_path):
|
||||
with open(self.session_path, "r") as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
metadata.update({
|
||||
"id": self.session_id,
|
||||
"title": title or metadata.get("title", "New Session"),
|
||||
"created_at": metadata.get("created_at", datetime.datetime.now().isoformat()),
|
||||
"updated_at": datetime.datetime.now().isoformat(),
|
||||
"model": model or metadata.get("model", self.engineer_model),
|
||||
"history": history
|
||||
})
|
||||
|
||||
with open(self.session_path, "w") as f:
|
||||
json.dump(metadata, f, indent=4)
|
||||
except Exception as e:
|
||||
printer.error(f"Failed to save session: {e}")
|
||||
|
||||
except Exception as e:
|
||||
printer.error(f"Failed to save session: {e}")</code></pre>
|
||||
</details>
|
||||
<div class="desc"><p>Saves current history to the session file.</p></div>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd>
|
||||
<dt id="connpy.configfile"><code class="flex name class">
|
||||
@@ -2248,27 +2513,31 @@ class configfile:
|
||||
'''
|
||||
home = os.path.expanduser("~")
|
||||
defaultdir = home + '/.config/conn'
|
||||
self.defaultdir = defaultdir
|
||||
Path(defaultdir).mkdir(parents=True, exist_ok=True)
|
||||
Path(f"{defaultdir}/plugins").mkdir(parents=True, exist_ok=True)
|
||||
pathfile = defaultdir + '/.folder'
|
||||
try:
|
||||
with open(pathfile, "r") as f:
|
||||
configdir = f.read().strip()
|
||||
except (FileNotFoundError, IOError):
|
||||
with open(pathfile, "w") as f:
|
||||
f.write(str(defaultdir))
|
||||
configdir = defaultdir
|
||||
defaultfile = configdir + '/config.yaml'
|
||||
self.cachefile = configdir + '/.config.cache.json'
|
||||
self.fzf_cachefile = configdir + '/.fzf_nodes_cache.txt'
|
||||
self.folders_cachefile = configdir + '/.folders_cache.txt'
|
||||
self.profiles_cachefile = configdir + '/.profiles_cache.txt'
|
||||
defaultkey = configdir + '/.osk'
|
||||
if conf == None:
|
||||
self.file = defaultfile
|
||||
|
||||
# Backwards compatibility: Migrate from JSON to YAML
|
||||
if conf is None:
|
||||
# Standard path: use ~/.config/conn and respect .folder redirection
|
||||
self.anchor_path = defaultdir
|
||||
self.defaultdir = defaultdir
|
||||
Path(defaultdir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
pathfile = defaultdir + '/.folder'
|
||||
try:
|
||||
with open(pathfile, "r") as f:
|
||||
configdir = f.read().strip()
|
||||
except (FileNotFoundError, IOError):
|
||||
with open(pathfile, "w") as f:
|
||||
f.write(str(defaultdir))
|
||||
configdir = defaultdir
|
||||
|
||||
self.defaultdir = configdir
|
||||
self.file = configdir + '/config.yaml'
|
||||
self.key = key or (configdir + '/.osk')
|
||||
|
||||
# Ensure redirected directories exist
|
||||
Path(configdir).mkdir(parents=True, exist_ok=True)
|
||||
Path(f"{configdir}/plugins").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Backwards compatibility: Migrate from JSON to YAML only for default path
|
||||
legacy_json = configdir + '/config.json'
|
||||
legacy_noext = configdir + '/config'
|
||||
legacy_file = None
|
||||
@@ -2291,38 +2560,44 @@ class configfile:
|
||||
os.remove(self.file)
|
||||
printer.warning("YAML verification failed after migration, keeping legacy config.")
|
||||
else:
|
||||
with open(self.cachefile, 'w') as f:
|
||||
# Note: cachefile is derived later, we use temp one for migration sync
|
||||
temp_cache = configdir + '/.config.cache.json'
|
||||
with open(temp_cache, 'w') as f:
|
||||
json.dump(old_data, f)
|
||||
shutil.move(legacy_file, legacy_file + ".backup")
|
||||
printer.success(f"Migrated legacy config ({len(old_data.get('connections',{}))} folders/nodes) into YAML and Cache successfully!")
|
||||
except Exception as e:
|
||||
# Clean up partial YAML if it was created
|
||||
if os.path.exists(self.file):
|
||||
try:
|
||||
os.remove(self.file)
|
||||
except OSError:
|
||||
pass
|
||||
try: os.remove(self.file)
|
||||
except OSError: pass
|
||||
printer.warning(f"Failed to migrate legacy config: {e}")
|
||||
else:
|
||||
self.file = conf
|
||||
# Custom path (common in tests): isolate everything to the conf parent directory
|
||||
self.file = os.path.abspath(conf)
|
||||
configdir = os.path.dirname(self.file)
|
||||
self.anchor_path = configdir
|
||||
self.defaultdir = configdir
|
||||
self.key = os.path.abspath(key) if key else (configdir + '/.osk')
|
||||
|
||||
if key == None:
|
||||
self.key = defaultkey
|
||||
else:
|
||||
self.key = key
|
||||
# Sidecar files always live next to the config file (or in the redirected configdir)
|
||||
self.cachefile = configdir + '/.config.cache.json'
|
||||
self.fzf_cachefile = configdir + '/.fzf_nodes_cache.txt'
|
||||
self.folders_cachefile = configdir + '/.folders_cache.txt'
|
||||
self.profiles_cachefile = configdir + '/.profiles_cache.txt'
|
||||
|
||||
if os.path.exists(self.file):
|
||||
config = self._loadconfig(self.file)
|
||||
else:
|
||||
config = self._createconfig(self.file)
|
||||
|
||||
self.config = config["config"]
|
||||
self.connections = config["connections"]
|
||||
self.profiles = config["profiles"]
|
||||
|
||||
if not os.path.exists(self.key):
|
||||
self._createkey(self.key)
|
||||
with open(self.key) as f:
|
||||
self.privatekey = RSA.import_key(f.read())
|
||||
f.close()
|
||||
self.publickey = self.privatekey.publickey()
|
||||
|
||||
# Self-heal text caches if they are missing
|
||||
@@ -4724,12 +4999,17 @@ def test(self, commands, expected, vars = None,*, prompt = None, parallel = 10,
|
||||
<li><code><a title="connpy.ai.architect_system_prompt" href="#connpy.ai.architect_system_prompt">architect_system_prompt</a></code></li>
|
||||
<li><code><a title="connpy.ai.ask" href="#connpy.ai.ask">ask</a></code></li>
|
||||
<li><code><a title="connpy.ai.confirm" href="#connpy.ai.confirm">confirm</a></code></li>
|
||||
<li><code><a title="connpy.ai.delete_session" href="#connpy.ai.delete_session">delete_session</a></code></li>
|
||||
<li><code><a title="connpy.ai.engineer_system_prompt" href="#connpy.ai.engineer_system_prompt">engineer_system_prompt</a></code></li>
|
||||
<li><code><a title="connpy.ai.get_last_session_id" href="#connpy.ai.get_last_session_id">get_last_session_id</a></code></li>
|
||||
<li><code><a title="connpy.ai.get_node_info_tool" href="#connpy.ai.get_node_info_tool">get_node_info_tool</a></code></li>
|
||||
<li><code><a title="connpy.ai.list_nodes_tool" href="#connpy.ai.list_nodes_tool">list_nodes_tool</a></code></li>
|
||||
<li><code><a title="connpy.ai.list_sessions" href="#connpy.ai.list_sessions">list_sessions</a></code></li>
|
||||
<li><code><a title="connpy.ai.load_session_data" href="#connpy.ai.load_session_data">load_session_data</a></code></li>
|
||||
<li><code><a title="connpy.ai.manage_memory_tool" href="#connpy.ai.manage_memory_tool">manage_memory_tool</a></code></li>
|
||||
<li><code><a title="connpy.ai.register_ai_tool" href="#connpy.ai.register_ai_tool">register_ai_tool</a></code></li>
|
||||
<li><code><a title="connpy.ai.run_commands_tool" href="#connpy.ai.run_commands_tool">run_commands_tool</a></code></li>
|
||||
<li><code><a title="connpy.ai.save_session" href="#connpy.ai.save_session">save_session</a></code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
@@ -4761,7 +5041,7 @@ def test(self, commands, expected, vars = None,*, prompt = None, parallel = 10,
|
||||
</nav>
|
||||
</main>
|
||||
<footer id="footer">
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.6</a>.</p>
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.5</a>.</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1">
|
||||
<meta name="generator" content="pdoc3 0.11.6">
|
||||
<meta name="generator" content="pdoc3 0.11.5">
|
||||
<title>connpy.tests.conftest API documentation</title>
|
||||
<meta name="description" content="Shared fixtures for connpy tests …">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/13.0.0/sanitize.min.css" integrity="sha512-y1dtMcuvtTMJc1yPgEqF0ZjQbhnc/bFhyvIyVNb9Zk5mIGtqVaAB1Ttl28su8AvFMOY0EwRbAe+HCLqj6W7/KA==" crossorigin>
|
||||
@@ -258,7 +258,7 @@ def tmp_config_dir(tmp_path):
|
||||
</nav>
|
||||
</main>
|
||||
<footer id="footer">
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.6</a>.</p>
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.5</a>.</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1">
|
||||
<meta name="generator" content="pdoc3 0.11.6">
|
||||
<meta name="generator" content="pdoc3 0.11.5">
|
||||
<title>connpy.tests API documentation</title>
|
||||
<meta name="description" content="">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/13.0.0/sanitize.min.css" integrity="sha512-y1dtMcuvtTMJc1yPgEqF0ZjQbhnc/bFhyvIyVNb9Zk5mIGtqVaAB1Ttl28su8AvFMOY0EwRbAe+HCLqj6W7/KA==" crossorigin>
|
||||
@@ -127,7 +127,7 @@ el.replaceWith(d);
|
||||
</nav>
|
||||
</main>
|
||||
<footer id="footer">
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.6</a>.</p>
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.5</a>.</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1">
|
||||
<meta name="generator" content="pdoc3 0.11.6">
|
||||
<meta name="generator" content="pdoc3 0.11.5">
|
||||
<title>connpy.tests.test_ai API documentation</title>
|
||||
<meta name="description" content="Tests for connpy.ai module.">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/13.0.0/sanitize.min.css" integrity="sha512-y1dtMcuvtTMJc1yPgEqF0ZjQbhnc/bFhyvIyVNb9Zk5mIGtqVaAB1Ttl28su8AvFMOY0EwRbAe+HCLqj6W7/KA==" crossorigin>
|
||||
@@ -88,7 +88,7 @@ el.replaceWith(d);
|
||||
|
||||
def test_init_loads_memory(self, ai_config, tmp_path, mock_litellm):
|
||||
"""Loads long-term memory from file if it exists."""
|
||||
memory_path = os.path.expanduser("~/.config/conn/ai_memory.md")
|
||||
memory_path = os.path.join(ai_config.defaultdir, "ai_memory.md")
|
||||
from connpy.ai import ai
|
||||
|
||||
with patch("os.path.exists", side_effect=lambda p: True if p == memory_path else os.path.exists(p)):
|
||||
@@ -132,7 +132,7 @@ el.replaceWith(d);
|
||||
</summary>
|
||||
<pre><code class="python">def test_init_loads_memory(self, ai_config, tmp_path, mock_litellm):
|
||||
"""Loads long-term memory from file if it exists."""
|
||||
memory_path = os.path.expanduser("~/.config/conn/ai_memory.md")
|
||||
memory_path = os.path.join(ai_config.defaultdir, "ai_memory.md")
|
||||
from connpy.ai import ai
|
||||
|
||||
with patch("os.path.exists", side_effect=lambda p: True if p == memory_path else os.path.exists(p)):
|
||||
@@ -201,6 +201,224 @@ el.replaceWith(d);
|
||||
</dd>
|
||||
</dl>
|
||||
</dd>
|
||||
<dt id="connpy.tests.test_ai.TestAISessions"><code class="flex name class">
|
||||
<span>class <span class="ident">TestAISessions</span></span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
<summary>
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">class TestAISessions:
|
||||
@pytest.fixture
|
||||
def myai(self, ai_config, mock_litellm, tmp_path):
|
||||
from connpy.ai import ai
|
||||
ai_config.defaultdir = str(tmp_path)
|
||||
return ai(ai_config)
|
||||
|
||||
def test_sessions_dir_initialization(self, myai, tmp_path):
|
||||
assert os.path.exists(os.path.join(tmp_path, "ai_sessions"))
|
||||
assert myai.sessions_dir == str(tmp_path / "ai_sessions")
|
||||
|
||||
def test_generate_session_id(self, myai):
|
||||
session_id = myai._generate_session_id("Any query")
|
||||
# Format: YYYYMMDD-HHMMSS
|
||||
assert len(session_id) == 15
|
||||
assert "-" in session_id
|
||||
parts = session_id.split("-")
|
||||
assert len(parts[0]) == 8 # YYYYMMDD
|
||||
assert len(parts[1]) == 6 # HHMMSS
|
||||
|
||||
def test_save_and_load_session(self, myai):
|
||||
history = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi"}
|
||||
]
|
||||
myai.save_session(history, title="Test Session")
|
||||
session_id = myai.session_id
|
||||
|
||||
# Load it back
|
||||
loaded = myai.load_session_data(session_id)
|
||||
assert loaded["title"] == "Test Session"
|
||||
assert loaded["history"] == history
|
||||
assert loaded["model"] == myai.engineer_model
|
||||
|
||||
def test_list_sessions(self, myai, capsys):
|
||||
history = [{"role": "user", "content": "Query 1"}]
|
||||
myai.save_session(history, title="Session 1")
|
||||
|
||||
# Use a second instance to list
|
||||
myai.list_sessions()
|
||||
captured = capsys.readouterr()
|
||||
assert "Session 1" in captured.out
|
||||
assert "AI Persisted Sessions" in captured.out
|
||||
|
||||
def test_get_last_session_id(self, myai):
|
||||
# Save two sessions
|
||||
myai.session_id = None # Force new
|
||||
myai.save_session([{"role": "user", "content": "First"}])
|
||||
first_id = myai.session_id
|
||||
import time
|
||||
time.sleep(1.1) # Ensure different timestamp
|
||||
|
||||
myai.session_id = None # Force new
|
||||
myai.save_session([{"role": "user", "content": "Second"}])
|
||||
second_id = myai.session_id
|
||||
|
||||
last_id = myai.get_last_session_id()
|
||||
assert last_id == second_id
|
||||
assert last_id != first_id
|
||||
|
||||
def test_delete_session(self, myai):
|
||||
myai.save_session([{"role": "user", "content": "To be deleted"}])
|
||||
session_id = myai.session_id
|
||||
assert os.path.exists(myai.session_path)
|
||||
|
||||
myai.delete_session(session_id)
|
||||
assert not os.path.exists(myai.session_path)</code></pre>
|
||||
</details>
|
||||
<div class="desc"></div>
|
||||
<h3>Methods</h3>
|
||||
<dl>
|
||||
<dt id="connpy.tests.test_ai.TestAISessions.myai"><code class="name flex">
|
||||
<span>def <span class="ident">myai</span></span>(<span>self, ai_config, mock_litellm, tmp_path)</span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
<summary>
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">@pytest.fixture
|
||||
def myai(self, ai_config, mock_litellm, tmp_path):
|
||||
from connpy.ai import ai
|
||||
ai_config.defaultdir = str(tmp_path)
|
||||
return ai(ai_config)</code></pre>
|
||||
</details>
|
||||
<div class="desc"></div>
|
||||
</dd>
|
||||
<dt id="connpy.tests.test_ai.TestAISessions.test_delete_session"><code class="name flex">
|
||||
<span>def <span class="ident">test_delete_session</span></span>(<span>self, myai)</span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
<summary>
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">def test_delete_session(self, myai):
|
||||
myai.save_session([{"role": "user", "content": "To be deleted"}])
|
||||
session_id = myai.session_id
|
||||
assert os.path.exists(myai.session_path)
|
||||
|
||||
myai.delete_session(session_id)
|
||||
assert not os.path.exists(myai.session_path)</code></pre>
|
||||
</details>
|
||||
<div class="desc"></div>
|
||||
</dd>
|
||||
<dt id="connpy.tests.test_ai.TestAISessions.test_generate_session_id"><code class="name flex">
|
||||
<span>def <span class="ident">test_generate_session_id</span></span>(<span>self, myai)</span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
<summary>
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">def test_generate_session_id(self, myai):
|
||||
session_id = myai._generate_session_id("Any query")
|
||||
# Format: YYYYMMDD-HHMMSS
|
||||
assert len(session_id) == 15
|
||||
assert "-" in session_id
|
||||
parts = session_id.split("-")
|
||||
assert len(parts[0]) == 8 # YYYYMMDD
|
||||
assert len(parts[1]) == 6 # HHMMSS</code></pre>
|
||||
</details>
|
||||
<div class="desc"></div>
|
||||
</dd>
|
||||
<dt id="connpy.tests.test_ai.TestAISessions.test_get_last_session_id"><code class="name flex">
|
||||
<span>def <span class="ident">test_get_last_session_id</span></span>(<span>self, myai)</span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
<summary>
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">def test_get_last_session_id(self, myai):
|
||||
# Save two sessions
|
||||
myai.session_id = None # Force new
|
||||
myai.save_session([{"role": "user", "content": "First"}])
|
||||
first_id = myai.session_id
|
||||
import time
|
||||
time.sleep(1.1) # Ensure different timestamp
|
||||
|
||||
myai.session_id = None # Force new
|
||||
myai.save_session([{"role": "user", "content": "Second"}])
|
||||
second_id = myai.session_id
|
||||
|
||||
last_id = myai.get_last_session_id()
|
||||
assert last_id == second_id
|
||||
assert last_id != first_id</code></pre>
|
||||
</details>
|
||||
<div class="desc"></div>
|
||||
</dd>
|
||||
<dt id="connpy.tests.test_ai.TestAISessions.test_list_sessions"><code class="name flex">
|
||||
<span>def <span class="ident">test_list_sessions</span></span>(<span>self, myai, capsys)</span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
<summary>
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">def test_list_sessions(self, myai, capsys):
|
||||
history = [{"role": "user", "content": "Query 1"}]
|
||||
myai.save_session(history, title="Session 1")
|
||||
|
||||
# Use a second instance to list
|
||||
myai.list_sessions()
|
||||
captured = capsys.readouterr()
|
||||
assert "Session 1" in captured.out
|
||||
assert "AI Persisted Sessions" in captured.out</code></pre>
|
||||
</details>
|
||||
<div class="desc"></div>
|
||||
</dd>
|
||||
<dt id="connpy.tests.test_ai.TestAISessions.test_save_and_load_session"><code class="name flex">
|
||||
<span>def <span class="ident">test_save_and_load_session</span></span>(<span>self, myai)</span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
<summary>
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">def test_save_and_load_session(self, myai):
|
||||
history = [
|
||||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi"}
|
||||
]
|
||||
myai.save_session(history, title="Test Session")
|
||||
session_id = myai.session_id
|
||||
|
||||
# Load it back
|
||||
loaded = myai.load_session_data(session_id)
|
||||
assert loaded["title"] == "Test Session"
|
||||
assert loaded["history"] == history
|
||||
assert loaded["model"] == myai.engineer_model</code></pre>
|
||||
</details>
|
||||
<div class="desc"></div>
|
||||
</dd>
|
||||
<dt id="connpy.tests.test_ai.TestAISessions.test_sessions_dir_initialization"><code class="name flex">
|
||||
<span>def <span class="ident">test_sessions_dir_initialization</span></span>(<span>self, myai, tmp_path)</span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
<summary>
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">def test_sessions_dir_initialization(self, myai, tmp_path):
|
||||
assert os.path.exists(os.path.join(tmp_path, "ai_sessions"))
|
||||
assert myai.sessions_dir == str(tmp_path / "ai_sessions")</code></pre>
|
||||
</details>
|
||||
<div class="desc"></div>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd>
|
||||
<dt id="connpy.tests.test_ai.TestAsk"><code class="flex name class">
|
||||
<span>class <span class="ident">TestAsk</span></span>
|
||||
</code></dt>
|
||||
@@ -807,7 +1025,18 @@ def myai(self, ai_config, mock_litellm):
|
||||
{"role": "assistant", "content": "Found r1"}
|
||||
]
|
||||
result = myai._sanitize_messages(messages)
|
||||
assert len(result) == 4</code></pre>
|
||||
assert len(result) == 4
|
||||
|
||||
def test_sanitize_strips_cache_control(self, myai):
|
||||
"""_sanitize_messages should convert list-based content (with cache_control) back to strings."""
|
||||
messages = [
|
||||
{"role": "system", "content": [{"type": "text", "text": "system prompt", "cache_control": {"type": "ephemeral"}}]},
|
||||
{"role": "user", "content": "hello"}
|
||||
]
|
||||
result = myai._sanitize_messages(messages)
|
||||
assert result[0]["role"] == "system"
|
||||
assert isinstance(result[0]["content"], str)
|
||||
assert result[0]["content"] == "system prompt"</code></pre>
|
||||
</details>
|
||||
<div class="desc"></div>
|
||||
<h3>Methods</h3>
|
||||
@@ -925,6 +1154,27 @@ def myai(self, ai_config, mock_litellm):
|
||||
</details>
|
||||
<div class="desc"><p>Tool responses without preceding tool_calls are removed.</p></div>
|
||||
</dd>
|
||||
<dt id="connpy.tests.test_ai.TestSanitizeMessages.test_sanitize_strips_cache_control"><code class="name flex">
|
||||
<span>def <span class="ident">test_sanitize_strips_cache_control</span></span>(<span>self, myai)</span>
|
||||
</code></dt>
|
||||
<dd>
|
||||
<details class="source">
|
||||
<summary>
|
||||
<span>Expand source code</span>
|
||||
</summary>
|
||||
<pre><code class="python">def test_sanitize_strips_cache_control(self, myai):
|
||||
"""_sanitize_messages should convert list-based content (with cache_control) back to strings."""
|
||||
messages = [
|
||||
{"role": "system", "content": [{"type": "text", "text": "system prompt", "cache_control": {"type": "ephemeral"}}]},
|
||||
{"role": "user", "content": "hello"}
|
||||
]
|
||||
result = myai._sanitize_messages(messages)
|
||||
assert result[0]["role"] == "system"
|
||||
assert isinstance(result[0]["content"], str)
|
||||
assert result[0]["content"] == "system prompt"</code></pre>
|
||||
</details>
|
||||
<div class="desc"><p>_sanitize_messages should convert list-based content (with cache_control) back to strings.</p></div>
|
||||
</dd>
|
||||
</dl>
|
||||
</dd>
|
||||
<dt id="connpy.tests.test_ai.TestToolDefinitions"><code class="flex name class">
|
||||
@@ -1373,6 +1623,18 @@ def myai(self, ai_config, mock_litellm):
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<h4><code><a title="connpy.tests.test_ai.TestAISessions" href="#connpy.tests.test_ai.TestAISessions">TestAISessions</a></code></h4>
|
||||
<ul class="">
|
||||
<li><code><a title="connpy.tests.test_ai.TestAISessions.myai" href="#connpy.tests.test_ai.TestAISessions.myai">myai</a></code></li>
|
||||
<li><code><a title="connpy.tests.test_ai.TestAISessions.test_delete_session" href="#connpy.tests.test_ai.TestAISessions.test_delete_session">test_delete_session</a></code></li>
|
||||
<li><code><a title="connpy.tests.test_ai.TestAISessions.test_generate_session_id" href="#connpy.tests.test_ai.TestAISessions.test_generate_session_id">test_generate_session_id</a></code></li>
|
||||
<li><code><a title="connpy.tests.test_ai.TestAISessions.test_get_last_session_id" href="#connpy.tests.test_ai.TestAISessions.test_get_last_session_id">test_get_last_session_id</a></code></li>
|
||||
<li><code><a title="connpy.tests.test_ai.TestAISessions.test_list_sessions" href="#connpy.tests.test_ai.TestAISessions.test_list_sessions">test_list_sessions</a></code></li>
|
||||
<li><code><a title="connpy.tests.test_ai.TestAISessions.test_save_and_load_session" href="#connpy.tests.test_ai.TestAISessions.test_save_and_load_session">test_save_and_load_session</a></code></li>
|
||||
<li><code><a title="connpy.tests.test_ai.TestAISessions.test_sessions_dir_initialization" href="#connpy.tests.test_ai.TestAISessions.test_sessions_dir_initialization">test_sessions_dir_initialization</a></code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
<h4><code><a title="connpy.tests.test_ai.TestAsk" href="#connpy.tests.test_ai.TestAsk">TestAsk</a></code></h4>
|
||||
<ul class="">
|
||||
<li><code><a title="connpy.tests.test_ai.TestAsk.myai" href="#connpy.tests.test_ai.TestAsk.myai">myai</a></code></li>
|
||||
@@ -1423,6 +1685,7 @@ def myai(self, ai_config, mock_litellm):
|
||||
<li><code><a title="connpy.tests.test_ai.TestSanitizeMessages.test_sanitize_preserves_valid_tool_pairs" href="#connpy.tests.test_ai.TestSanitizeMessages.test_sanitize_preserves_valid_tool_pairs">test_sanitize_preserves_valid_tool_pairs</a></code></li>
|
||||
<li><code><a title="connpy.tests.test_ai.TestSanitizeMessages.test_sanitize_removes_orphan_tool_calls" href="#connpy.tests.test_ai.TestSanitizeMessages.test_sanitize_removes_orphan_tool_calls">test_sanitize_removes_orphan_tool_calls</a></code></li>
|
||||
<li><code><a title="connpy.tests.test_ai.TestSanitizeMessages.test_sanitize_removes_orphan_tool_responses" href="#connpy.tests.test_ai.TestSanitizeMessages.test_sanitize_removes_orphan_tool_responses">test_sanitize_removes_orphan_tool_responses</a></code></li>
|
||||
<li><code><a title="connpy.tests.test_ai.TestSanitizeMessages.test_sanitize_strips_cache_control" href="#connpy.tests.test_ai.TestSanitizeMessages.test_sanitize_strips_cache_control">test_sanitize_strips_cache_control</a></code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
@@ -1464,7 +1727,7 @@ def myai(self, ai_config, mock_litellm):
|
||||
</nav>
|
||||
</main>
|
||||
<footer id="footer">
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.6</a>.</p>
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.5</a>.</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1">
|
||||
<meta name="generator" content="pdoc3 0.11.6">
|
||||
<meta name="generator" content="pdoc3 0.11.5">
|
||||
<title>connpy.tests.test_api API documentation</title>
|
||||
<meta name="description" content="Tests for connpy.api module — Flask routes.">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/13.0.0/sanitize.min.css" integrity="sha512-y1dtMcuvtTMJc1yPgEqF0ZjQbhnc/bFhyvIyVNb9Zk5mIGtqVaAB1Ttl28su8AvFMOY0EwRbAe+HCLqj6W7/KA==" crossorigin>
|
||||
@@ -876,7 +876,7 @@ def test_test_action(self, mock_nodes_cls, api_client):
|
||||
</nav>
|
||||
</main>
|
||||
<footer id="footer">
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.6</a>.</p>
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.5</a>.</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1">
|
||||
<meta name="generator" content="pdoc3 0.11.6">
|
||||
<meta name="generator" content="pdoc3 0.11.5">
|
||||
<title>connpy.tests.test_capture API documentation</title>
|
||||
<meta name="description" content="Tests for connpy.core_plugins.capture">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/13.0.0/sanitize.min.css" integrity="sha512-y1dtMcuvtTMJc1yPgEqF0ZjQbhnc/bFhyvIyVNb9Zk5mIGtqVaAB1Ttl28su8AvFMOY0EwRbAe+HCLqj6W7/KA==" crossorigin>
|
||||
@@ -229,7 +229,7 @@ def test_is_port_in_use(self, mock_socket, mock_connapp):
|
||||
</nav>
|
||||
</main>
|
||||
<footer id="footer">
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.6</a>.</p>
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.5</a>.</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1">
|
||||
<meta name="generator" content="pdoc3 0.11.6">
|
||||
<meta name="generator" content="pdoc3 0.11.5">
|
||||
<title>connpy.tests.test_completion API documentation</title>
|
||||
<meta name="description" content="Tests for connpy.completion module.">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/13.0.0/sanitize.min.css" integrity="sha512-y1dtMcuvtTMJc1yPgEqF0ZjQbhnc/bFhyvIyVNb9Zk5mIGtqVaAB1Ttl28su8AvFMOY0EwRbAe+HCLqj6W7/KA==" crossorigin>
|
||||
@@ -433,7 +433,7 @@ el.replaceWith(d);
|
||||
</nav>
|
||||
</main>
|
||||
<footer id="footer">
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.6</a>.</p>
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.5</a>.</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1">
|
||||
<meta name="generator" content="pdoc3 0.11.6">
|
||||
<meta name="generator" content="pdoc3 0.11.5">
|
||||
<title>connpy.tests.test_configfile API documentation</title>
|
||||
<meta name="description" content="Tests for connpy.configfile module.">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/13.0.0/sanitize.min.css" integrity="sha512-y1dtMcuvtTMJc1yPgEqF0ZjQbhnc/bFhyvIyVNb9Zk5mIGtqVaAB1Ttl28su8AvFMOY0EwRbAe+HCLqj6W7/KA==" crossorigin>
|
||||
@@ -2003,7 +2003,7 @@ el.replaceWith(d);
|
||||
</nav>
|
||||
</main>
|
||||
<footer id="footer">
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.6</a>.</p>
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.5</a>.</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1">
|
||||
<meta name="generator" content="pdoc3 0.11.6">
|
||||
<meta name="generator" content="pdoc3 0.11.5">
|
||||
<title>connpy.tests.test_context API documentation</title>
|
||||
<meta name="description" content="Tests for connpy.core_plugins.context">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/13.0.0/sanitize.min.css" integrity="sha512-y1dtMcuvtTMJc1yPgEqF0ZjQbhnc/bFhyvIyVNb9Zk5mIGtqVaAB1Ttl28su8AvFMOY0EwRbAe+HCLqj6W7/KA==" crossorigin>
|
||||
@@ -469,7 +469,7 @@ def mock_connapp():
|
||||
</nav>
|
||||
</main>
|
||||
<footer id="footer">
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.6</a>.</p>
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.5</a>.</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1">
|
||||
<meta name="generator" content="pdoc3 0.11.6">
|
||||
<meta name="generator" content="pdoc3 0.11.5">
|
||||
<title>connpy.tests.test_core API documentation</title>
|
||||
<meta name="description" content="Tests for connpy.core module — node and nodes classes.">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/13.0.0/sanitize.min.css" integrity="sha512-y1dtMcuvtTMJc1yPgEqF0ZjQbhnc/bFhyvIyVNb9Zk5mIGtqVaAB1Ttl28su8AvFMOY0EwRbAe+HCLqj6W7/KA==" crossorigin>
|
||||
@@ -1300,7 +1300,7 @@ el.replaceWith(d);
|
||||
</nav>
|
||||
</main>
|
||||
<footer id="footer">
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.6</a>.</p>
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.5</a>.</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1">
|
||||
<meta name="generator" content="pdoc3 0.11.6">
|
||||
<meta name="generator" content="pdoc3 0.11.5">
|
||||
<title>connpy.tests.test_hooks API documentation</title>
|
||||
<meta name="description" content="Tests for connpy.hooks module — MethodHook and ClassHook.">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/13.0.0/sanitize.min.css" integrity="sha512-y1dtMcuvtTMJc1yPgEqF0ZjQbhnc/bFhyvIyVNb9Zk5mIGtqVaAB1Ttl28su8AvFMOY0EwRbAe+HCLqj6W7/KA==" crossorigin>
|
||||
@@ -673,7 +673,7 @@ el.replaceWith(d);
|
||||
</nav>
|
||||
</main>
|
||||
<footer id="footer">
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.6</a>.</p>
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.5</a>.</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1">
|
||||
<meta name="generator" content="pdoc3 0.11.6">
|
||||
<meta name="generator" content="pdoc3 0.11.5">
|
||||
<title>connpy.tests.test_plugins API documentation</title>
|
||||
<meta name="description" content="Tests for connpy.plugins module.">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/13.0.0/sanitize.min.css" integrity="sha512-y1dtMcuvtTMJc1yPgEqF0ZjQbhnc/bFhyvIyVNb9Zk5mIGtqVaAB1Ttl28su8AvFMOY0EwRbAe+HCLqj6W7/KA==" crossorigin>
|
||||
@@ -917,7 +917,7 @@ el.replaceWith(d);
|
||||
</nav>
|
||||
</main>
|
||||
<footer id="footer">
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.6</a>.</p>
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.5</a>.</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1">
|
||||
<meta name="generator" content="pdoc3 0.11.6">
|
||||
<meta name="generator" content="pdoc3 0.11.5">
|
||||
<title>connpy.tests.test_printer API documentation</title>
|
||||
<meta name="description" content="Tests for connpy.printer module.">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/13.0.0/sanitize.min.css" integrity="sha512-y1dtMcuvtTMJc1yPgEqF0ZjQbhnc/bFhyvIyVNb9Zk5mIGtqVaAB1Ttl28su8AvFMOY0EwRbAe+HCLqj6W7/KA==" crossorigin>
|
||||
@@ -263,7 +263,7 @@ el.replaceWith(d);
|
||||
</nav>
|
||||
</main>
|
||||
<footer id="footer">
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.6</a>.</p>
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.5</a>.</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1">
|
||||
<meta name="generator" content="pdoc3 0.11.6">
|
||||
<meta name="generator" content="pdoc3 0.11.5">
|
||||
<title>connpy.tests.test_sync API documentation</title>
|
||||
<meta name="description" content="Tests for connpy.core_plugins.sync">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/13.0.0/sanitize.min.css" integrity="sha512-y1dtMcuvtTMJc1yPgEqF0ZjQbhnc/bFhyvIyVNb9Zk5mIGtqVaAB1Ttl28su8AvFMOY0EwRbAe+HCLqj6W7/KA==" crossorigin>
|
||||
@@ -390,7 +390,7 @@ def test_get_credentials_success(self, MockCreds, mock_exists, mock_connapp):
|
||||
</nav>
|
||||
</main>
|
||||
<footer id="footer">
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.6</a>.</p>
|
||||
<p>Generated by <a href="https://pdoc3.github.io/pdoc" title="pdoc: Python API documentation generator"><cite>pdoc</cite> 0.11.5</a>.</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
Reference in New Issue
Block a user